diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/alpha/kernel/entry.S linux.22-ac2/arch/alpha/kernel/entry.S --- linux.vanilla/arch/alpha/kernel/entry.S 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/arch/alpha/kernel/entry.S 2003-06-29 16:10:35.000000000 +0100 @@ -690,6 +690,7 @@ .end entSys .globl ret_from_fork +#if CONFIG_SMP .align 3 .ent ret_from_fork ret_from_fork: @@ -697,6 +698,9 @@ mov $17,$16 jsr $31,schedule_tail .end ret_from_fork +#else +ret_from_fork = ret_from_sys_call +#endif .align 3 .ent reschedule diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/alpha/kernel/process.c linux.22-ac2/arch/alpha/kernel/process.c --- linux.vanilla/arch/alpha/kernel/process.c 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/arch/alpha/kernel/process.c 2003-08-09 16:06:26.000000000 +0100 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -74,9 +75,6 @@ cpu_idle(void) { /* An endless idle loop with no priority at all. */ - current->nice = 20; - current->counter = -100; - while (1) { /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/alpha/kernel/smp.c linux.22-ac2/arch/alpha/kernel/smp.c --- linux.vanilla/arch/alpha/kernel/smp.c 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/arch/alpha/kernel/smp.c 2003-06-29 16:10:35.000000000 +0100 @@ -81,6 +81,7 @@ int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ int smp_threads_ready; /* True once the per process idle is forked. */ +unsigned long cache_decay_ticks; int __cpu_number_map[NR_CPUS]; int __cpu_logical_map[NR_CPUS]; @@ -155,11 +156,6 @@ { int cpuid = hard_smp_processor_id(); - if (current != init_tasks[cpu_number_map(cpuid)]) { - printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n", - cpuid, current, init_tasks[cpu_number_map(cpuid)]); - } - DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state)); /* Turn on machine checks. */ @@ -217,9 +213,6 @@ DBGS(("smp_callin: commencing CPU %d current %p\n", cpuid, current)); - /* Setup the scheduler for this processor. */ - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -449,14 +442,11 @@ if (idle == &init_task) panic("idle process is init_task for CPU %d", cpuid); - idle->processor = cpuid; - idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */ + init_idle(idle, cpuid); + unhash_process(idle); + __cpu_logical_map[cpunum] = cpuid; __cpu_number_map[cpuid] = cpunum; - - del_from_runqueue(idle); - unhash_process(idle); - init_tasks[cpunum] = idle; DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", cpuid, idle->state, idle->flags)); @@ -563,13 +553,10 @@ __cpu_number_map[boot_cpuid] = 0; __cpu_logical_map[0] = boot_cpuid; - current->processor = boot_cpuid; smp_store_cpu_info(boot_cpuid); smp_setup_percpu_timer(boot_cpuid); - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/alpha/kernel/srmcons.c linux.22-ac2/arch/alpha/kernel/srmcons.c --- linux.vanilla/arch/alpha/kernel/srmcons.c 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/arch/alpha/kernel/srmcons.c 2003-06-29 16:10:35.000000000 +0100 @@ -260,7 +260,7 @@ spin_lock_irqsave(&srmconsp->lock, flags); - if (tty->count == 1) { + if (atomic_read(&tty->count) == 1) { srmconsp->tty = NULL; del_timer(&srmconsp->timer); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/alpha/mm/fault.c linux.22-ac2/arch/alpha/mm/fault.c --- linux.vanilla/arch/alpha/mm/fault.c 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/alpha/mm/fault.c 2003-06-29 16:10:35.000000000 +0100 @@ -122,8 +122,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/arm/config.in linux.22-ac2/arch/arm/config.in --- linux.vanilla/arch/arm/config.in 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/arm/config.in 2003-07-06 13:38:19.000000000 +0100 @@ -722,6 +722,7 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL dep_bool ' Debug memory allocations' CONFIG_DEBUG_SLAB $CONFIG_DEBUG_KERNEL dep_bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_DEBUG_KERNEL +dep_bool ' Morse code panics' CONFIG_PANIC_MORSE $CONFIG_DEBUG_KERNEL $CONFIG_PC_KEYB dep_bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK $CONFIG_DEBUG_KERNEL dep_bool ' Wait queue debugging' CONFIG_DEBUG_WAITQ $CONFIG_DEBUG_KERNEL dep_bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE $CONFIG_DEBUG_KERNEL diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/arm/mm/fault-common.c linux.22-ac2/arch/arm/mm/fault-common.c --- linux.vanilla/arch/arm/mm/fault-common.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/arm/mm/fault-common.c 2003-07-06 13:38:42.000000000 +0100 @@ -254,7 +254,7 @@ goto survive; check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + if (!expand_stack(vma, addr)) goto good_area; out: return fault; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/cris/drivers/serial.c linux.22-ac2/arch/cris/drivers/serial.c --- linux.vanilla/arch/cris/drivers/serial.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/cris/drivers/serial.c 2003-07-14 12:37:10.000000000 +0100 @@ -4502,7 +4502,7 @@ printk("[%d] rs_close ttyS%d, count = %d\n", current->pid, info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/boot/setup.S linux.22-ac2/arch/i386/boot/setup.S --- linux.vanilla/arch/i386/boot/setup.S 2002-08-03 16:08:20.000000000 +0100 +++ linux.22-ac2/arch/i386/boot/setup.S 2003-06-29 16:10:33.000000000 +0100 @@ -45,6 +45,10 @@ * New A20 code ported from SYSLINUX by H. Peter Anvin. AMD Elan bugfixes * by Robert Schwebel, December 2001 * + * BIOS Enhanced Disk Drive support + * by Matt Domsch October 2002 + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT */ #include @@ -53,6 +57,7 @@ #include #include #include +#include #include /* Signature words to ensure LILO loaded us right */ @@ -543,6 +548,70 @@ done_apm_bios: #endif +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +# Do the BIOS Enhanced Disk Drive calls +# This consists of two calls: +# int 13h ah=41h "Check Extensions Present" +# int 13h ah=48h "Get Device Parameters" +# +# A buffer of size EDDMAXNR*(EDDEXTSIZE+EDDPARMSIZE) is reserved for our use +# in the empty_zero_page at EDDBUF. The first four bytes of which are +# used to store the device number, interface support map and version +# results from fn41. The following 74 bytes are used to store +# the results from fn48. Starting from device 80h, fn41, then fn48 +# are called and their results stored in EDDBUF+n*(EDDEXTSIZE+EDDPARMIZE). +# Then the pointer is incremented to store the data for the next call. +# This repeats until either a device doesn't exist, or until EDDMAXNR +# devices have been stored. +# The one tricky part is that ds:si always points four bytes into +# the structure, and the fn41 results are stored at offsets +# from there. This removes the need to increment the pointer for +# every store, and leaves it ready for the fn48 call. +# A second one-byte buffer, EDDNR, in the empty_zero_page stores +# the number of BIOS devices which exist, up to EDDMAXNR. +# In setup.c, copy_edd() stores both empty_zero_page buffers away +# for later use, as they would get overwritten otherwise. +# This code is sensitive to the size of the structs in edd.h +edd_start: + # %ds points to the bootsector + # result buffer for fn48 + movw $EDDBUF+EDDEXTSIZE, %si # in ds:si, fn41 results + # kept just before that + movb $0, (EDDNR) # zero value at EDDNR + movb $0x80, %dl # BIOS device 0x80 + +edd_check_ext: + movb $CHECKEXTENSIONSPRESENT, %ah # Function 41 + movw $EDDMAGIC1, %bx # magic + int $0x13 # make the call + jc edd_done # no more BIOS devices + + cmpw $EDDMAGIC2, %bx # is magic right? + jne edd_next # nope, next... + + movb %dl, %ds:-4(%si) # store device number + movb %ah, %ds:-3(%si) # store version + movw %cx, %ds:-2(%si) # store extensions + incb (EDDNR) # note that we stored something + +edd_get_device_params: + movw $EDDPARMSIZE, %ds:(%si) # put size + movb $GETDEVICEPARAMETERS, %ah # Function 48 + int $0x13 # make the call + # Don't check for fail return + # it doesn't matter. + movw %si, %ax # increment si + addw $EDDPARMSIZE+EDDEXTSIZE, %ax + movw %ax, %si + +edd_next: + incb %dl # increment to next device + cmpb $EDDMAXNR, (EDDNR) # Out of space? + jb edd_check_ext # keep looping + +edd_done: +#endif + # Now we want to move to protected mode ... cmpw $0, %cs:realmode_swtch jz rmodeswtch_normal diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/config.in linux.22-ac2/arch/i386/config.in --- linux.vanilla/arch/i386/config.in 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/config.in 2003-08-13 14:33:27.000000000 +0100 @@ -56,6 +56,7 @@ define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n define_bool CONFIG_X86_PPRO_FENCE y define_bool CONFIG_X86_F00F_WORKS_OK n + define_bool CONFIG_X86_HAS_TSC n else define_bool CONFIG_X86_WP_WORKS_OK y define_bool CONFIG_X86_INVLPG y @@ -72,6 +73,7 @@ define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_PPRO_FENCE y define_bool CONFIG_X86_F00F_WORKS_OK n + define_bool CONFIG_X86_HAS_TSC n fi if [ "$CONFIG_M586" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 @@ -79,6 +81,7 @@ define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_PPRO_FENCE y define_bool CONFIG_X86_F00F_WORKS_OK n + define_bool CONFIG_X86_HAS_TSC n fi if [ "$CONFIG_M586TSC" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 @@ -194,6 +197,30 @@ bool 'Machine Check Exception' CONFIG_X86_MCE +mainmenu_option next_comment +comment 'CPU Frequency scaling' +bool 'CPU Frequency scaling' CONFIG_CPU_FREQ +if [ "$CONFIG_CPU_FREQ" = "y" ]; then + bool ' CPU frequency table helpers' CONFIG_CPU_FREQ_TABLE + define_bool CONFIG_CPU_FREQ_PROC_INTF y + comment 'CPUFreq governors' + bool ' "userspace" for userspace frequency scaling' CONFIG_CPU_FREQ_GOV_USERSPACE + define_bool CONFIG_CPU_FREQ_24_API y + comment 'CPUFreq processor drivers' + dep_tristate ' AMD Mobile K6-2/K6-3 PowerNow!' CONFIG_X86_POWERNOW_K6 $CONFIG_CPU_FREQ_TABLE + dep_tristate ' AMD Mobile Athlon/Duron K7 PowerNow!' CONFIG_X86_POWERNOW_K7 $CONFIG_CPU_FREQ_TABLE + if [ "$CONFIG_MELAN" = "y" ]; then + dep_tristate ' AMD Elan' CONFIG_ELAN_CPUFREQ $CONFIG_CPU_FREQ_TABLE + fi + dep_tristate ' VIA Cyrix III Longhaul' CONFIG_X86_LONGHAUL $CONFIG_CPU_FREQ_TABLE + dep_tristate ' Intel Speedstep (ICH)' CONFIG_X86_SPEEDSTEP_ICH $CONFIG_CPU_FREQ_TABLE + dep_tristate ' Intel Pentium-M Enhanced SpeedStep' CONFIG_X86_SPEEDSTEP_CENTRINO $CONFIG_CPU_FREQ_TABLE + dep_tristate ' Intel Pentium 4 clock modulation' CONFIG_X86_P4_CLOCKMOD $CONFIG_CPU_FREQ_TABLE + tristate ' Transmeta LongRun' CONFIG_X86_LONGRUN + tristate ' Cyrix MediaGX/NatSemi Geode Suspend Modulation' CONFIG_X86_GX_SUSPMOD +fi +endmenu + tristate 'Toshiba Laptop support' CONFIG_TOSHIBA tristate 'Dell laptop support' CONFIG_I8K @@ -201,6 +228,10 @@ tristate '/dev/cpu/*/msr - Model-specific register support' CONFIG_X86_MSR tristate '/dev/cpu/*/cpuid - CPU information support' CONFIG_X86_CPUID +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + tristate 'BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)' CONFIG_EDD +fi + choice 'High Memory Support' \ "off CONFIG_NOHIGHMEM \ 4GB CONFIG_HIGHMEM4G \ @@ -231,6 +262,7 @@ define_bool CONFIG_X86_IO_APIC y fi else + bool 'Clustered APIC support' CONFIG_X86_CLUSTERED_APIC bool 'Multi-node NUMA system support' CONFIG_X86_NUMA if [ "$CONFIG_X86_NUMA" = "y" ]; then #Platform Choices @@ -292,6 +324,8 @@ bool 'ISA bus support' CONFIG_ISA fi +tristate 'NatSemi SCx200 support' CONFIG_SCx200 + source drivers/pci/Config.in bool 'EISA support' CONFIG_EISA @@ -324,6 +358,8 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC +bool 'Kernel .config support' CONFIG_IKCONFIG + bool 'Power Management support' CONFIG_PM dep_tristate ' Advanced Power Management BIOS support' CONFIG_APM $CONFIG_PM @@ -468,12 +504,13 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW + bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM bool ' Debug memory allocations' CONFIG_DEBUG_SLAB bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ + bool ' Morse code panics' CONFIG_PANIC_MORSE bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK - bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER fi endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/defconfig linux.22-ac2/arch/i386/defconfig --- linux.vanilla/arch/i386/defconfig 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/i386/defconfig 2003-07-31 13:48:29.000000000 +0100 @@ -56,6 +56,7 @@ # CONFIG_MICROCODE is not set # CONFIG_X86_MSR is not set # CONFIG_X86_CPUID is not set +# CONFIG_EDD is not set CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set @@ -607,6 +608,8 @@ CONFIG_AGP_SIS=y CONFIG_AGP_ALI=y # CONFIG_AGP_SWORKS is not set +CONFIG_AGP_ATI=y +CONFIG_AGP_NVIDIA=y CONFIG_DRM=y # CONFIG_DRM_OLD is not set diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/dmi_scan.c linux.22-ac2/arch/i386/kernel/dmi_scan.c --- linux.vanilla/arch/i386/kernel/dmi_scan.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/dmi_scan.c 2003-08-28 18:31:51.000000000 +0100 @@ -437,37 +437,6 @@ } /* - * The Intel 440GX hall of shame. - * - * On many (all we have checked) of these boxes the $PIRQ table is wrong. - * The MP1.4 table is right however and so SMP kernels tend to work. - */ - -#ifdef CONFIG_PCI -extern int broken_440gx_bios; -extern unsigned int pci_probe; -#endif -static __init int broken_pirq(struct dmi_blacklist *d) -{ - printk(KERN_INFO " *** Possibly defective BIOS detected (irqtable)\n"); - printk(KERN_INFO " *** Many BIOSes matching this signature have incorrect IRQ routing tables.\n"); - printk(KERN_INFO " *** If you see IRQ problems, in paticular SCSI resets and hangs at boot\n"); - printk(KERN_INFO " *** contact your hardware vendor and ask about updates.\n"); - printk(KERN_INFO " *** Building an SMP kernel may evade the bug some of the time.\n"); -#ifdef CONFIG_X86_IO_APIC - { - extern int skip_ioapic_setup; - skip_ioapic_setup = 0; - } -#endif -#ifdef CONFIG_PCI - broken_440gx_bios = 1; - pci_probe |= PCI_BIOS_IRQ_SCAN; -#endif - return 0; -} - -/* * ASUS K7V-RM has broken ACPI table defining sleep modes */ @@ -871,77 +840,17 @@ MATCH(DMI_BOARD_NAME, "AL440LX"), NO_MATCH, NO_MATCH } }, - /* Problem Intel 440GX bioses */ - - { broken_pirq, "SABR1 Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"SABR1"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0066.P07"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "IBM xseries 370", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "IBM"), - MATCH(DMI_BIOS_VERSION,"MMKT33AUS"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0094.P10"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0115.P12"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0120.P12"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0125.P13"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"C440GX0.86B"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0133.P14"), - NO_MATCH, NO_MATCH - } }, - { broken_pirq, "l44GX Bios", { /* Bad $PIR */ - MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), - MATCH(DMI_BIOS_VERSION,"L440GX0"), - NO_MATCH, NO_MATCH - } }, - - /* Intel in disgiuse - In this case they can't hide and they don't run - too well either... */ - { broken_pirq, "Dell PowerEdge 8450", { /* Bad $PIR */ - MATCH(DMI_PRODUCT_NAME, "Dell PowerEdge 8450"), + { init_ints_after_s1, "Toshiba Satellite 4030cdt", { /* Reinitialization of 8259 is needed after S1 resume */ + MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), NO_MATCH, NO_MATCH, NO_MATCH } }, - + { broken_acpi_Sx, "ASUS K7V-RM", { /* Bad ACPI Sx table */ MATCH(DMI_BIOS_VERSION,"ASUS K7V-RM ACPI BIOS Revision 1003A"), MATCH(DMI_BOARD_NAME, ""), NO_MATCH, NO_MATCH } }, - { init_ints_after_s1, "Toshiba Satellite 4030cdt", { /* Reinitialization of 8259 is needed after S1 resume */ - MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), - NO_MATCH, NO_MATCH, NO_MATCH - } }, - { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", { MATCH(DMI_SYS_VENDOR, "IBM"), MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/edd.c linux.22-ac2/arch/i386/kernel/edd.c --- linux.vanilla/arch/i386/kernel/edd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/edd.c 2003-06-29 16:10:34.000000000 +0100 @@ -0,0 +1,672 @@ +/* + * linux/arch/i386/kernel/edd.c + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * BIOS Enhanced Disk Drive Services (EDD) + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT + * + * This code takes information provided by BIOS EDD calls + * fn41 - Check Extensions Present and + * fn48 - Get Device Parametes with EDD extensions + * made in setup.S, copied to safe structures in setup.c, + * and presents it in /proc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * TODO: + * - move edd.[ch] to better locations if/when one is decided + * - keep current with 2.5 EDD code changes + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Matt Domsch "); +MODULE_DESCRIPTION("proc interface to BIOS EDD information"); +MODULE_LICENSE("GPL"); + +#define EDD_VERSION "0.09 2003-Jan-22" +#define EDD_DEVICE_NAME_SIZE 16 +#define REPORT_URL "http://domsch.com/linux/edd30/results.html" + +#define left (count - (p - page) - 1) + +static struct proc_dir_entry *bios_dir; + +struct attr_entry { + struct proc_dir_entry *entry; + struct list_head node; +}; + +struct edd_device { + char name[EDD_DEVICE_NAME_SIZE]; + struct edd_info *info; + struct proc_dir_entry *dir; + struct list_head attr_list; +}; + +static struct edd_device *edd_devices[EDDMAXNR]; + +struct edd_attribute { + char *name; + int (*show)(char *page, char **start, off_t off, + int count, int *eof, void *data); + int (*test) (struct edd_device * edev); +}; + +#define EDD_DEVICE_ATTR(_name,_show,_test) \ +struct edd_attribute edd_attr_##_name = { \ + .name = __stringify(_name), \ + .show = _show, \ + .test = _test, \ +}; + +static inline struct edd_info * +edd_dev_get_info(struct edd_device *edev) +{ + return edev->info; +} + +static inline void +edd_dev_set_info(struct edd_device *edev, struct edd_info *info) +{ + edev->info = info; +} + +static int +proc_calc_metrics(char *page, char **start, off_t off, + int count, int *eof, int len) +{ + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +static int +edd_dump_raw_data(char *b, int count, void *data, int length) +{ + char *orig_b = b; + char hexbuf[80], ascbuf[20], *h, *a, c; + unsigned char *p = data; + unsigned long column = 0; + int length_printed = 0, d; + const char maxcolumn = 16; + while (length_printed < length && count > 0) { + h = hexbuf; + a = ascbuf; + for (column = 0; + column < maxcolumn && length_printed < length; column++) { + h += sprintf(h, "%02x ", (unsigned char) *p); + if (!isprint(*p)) + c = '.'; + else + c = *p; + a += sprintf(a, "%c", c); + p++; + length_printed++; + } + /* pad out the line */ + for (; column < maxcolumn; column++) { + h += sprintf(h, " "); + a += sprintf(a, " "); + } + d = snprintf(b, count, "%s\t%s\n", hexbuf, ascbuf); + b += d; + count -= d; + } + return (b - orig_b); +} + +static int +edd_show_host_bus(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 4; i++) { + if (isprint(info->params.host_bus_type[i])) { + p += snprintf(p, left, "%c", info->params.host_bus_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + + if (!strncmp(info->params.host_bus_type, "ISA", 3)) { + p += snprintf(p, left, "\tbase_address: %x\n", + info->params.interface_path.isa.base_address); + } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || + !strncmp(info->params.host_bus_type, "PCI", 3)) { + p += snprintf(p, left, + "\t%02x:%02x.%d channel: %u\n", + info->params.interface_path.pci.bus, + info->params.interface_path.pci.slot, + info->params.interface_path.pci.function, + info->params.interface_path.pci.channel); + } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || + !strncmp(info->params.host_bus_type, "XPRS", 4) || + !strncmp(info->params.host_bus_type, "HTPT", 4)) { + p += snprintf(p, left, + "\tTBD: %llx\n", + info->params.interface_path.ibnd.reserved); + + } else { + p += snprintf(p, left, "\tunknown: %llx\n", + info->params.interface_path.unknown.reserved); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_interface(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 8; i++) { + if (isprint(info->params.interface_type[i])) { + p += snprintf(p, left, "%c", info->params.interface_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + if (!strncmp(info->params.interface_type, "ATAPI", 5)) { + p += snprintf(p, left, "\tdevice: %u lun: %u\n", + info->params.device_path.atapi.device, + info->params.device_path.atapi.lun); + } else if (!strncmp(info->params.interface_type, "ATA", 3)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.ata.device); + } else if (!strncmp(info->params.interface_type, "SCSI", 4)) { + p += snprintf(p, left, "\tid: %u lun: %llu\n", + info->params.device_path.scsi.id, + info->params.device_path.scsi.lun); + } else if (!strncmp(info->params.interface_type, "USB", 3)) { + p += snprintf(p, left, "\tserial_number: %llx\n", + info->params.device_path.usb.serial_number); + } else if (!strncmp(info->params.interface_type, "1394", 4)) { + p += snprintf(p, left, "\teui: %llx\n", + info->params.device_path.i1394.eui); + } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) { + p += snprintf(p, left, "\twwid: %llx lun: %llx\n", + info->params.device_path.fibre.wwid, + info->params.device_path.fibre.lun); + } else if (!strncmp(info->params.interface_type, "I2O", 3)) { + p += snprintf(p, left, "\tidentity_tag: %llx\n", + info->params.device_path.i2o.identity_tag); + } else if (!strncmp(info->params.interface_type, "RAID", 4)) { + p += snprintf(p, left, "\tidentity_tag: %x\n", + info->params.device_path.raid.array_number); + } else if (!strncmp(info->params.interface_type, "SATA", 4)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.sata.device); + } else { + p += snprintf(p, left, "\tunknown: %llx %llx\n", + info->params.device_path.unknown.reserved1, + info->params.device_path.unknown.reserved2); + } + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +/** + * edd_show_raw_data() - unparses EDD information, returned to user-space + * + * Returns: number of bytes written, or 0 on failure + */ +static int +edd_show_raw_data(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i, warn_padding = 0, nonzero_path = 0, + len = sizeof (*info) - 4; + uint8_t checksum = 0, c = 0; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) + len = info->params.length; + + p += snprintf(p, left, "int13 fn48 returned data:\n\n"); + p += edd_dump_raw_data(p, left, ((char *) info) + 4, len); + + /* Spec violation. Adaptec AIC7899 returns 0xDDBE + here, when it should be 0xBEDD. + */ + p += snprintf(p, left, "\n"); + if (info->params.key == 0xDDBE) { + p += snprintf(p, left, + "Warning: Spec violation. Key should be 0xBEDD, is 0xDDBE\n"); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + goto out; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) + nonzero_path++; + checksum += c; + } + + if (checksum) { + p += snprintf(p, left, + "Warning: Spec violation. Device Path checksum invalid.\n"); + } + + if (!nonzero_path) { + p += snprintf(p, left, "Error: Spec violation. Empty device path.\n"); + goto out; + } + + for (i = 0; i < 4; i++) { + if (!isprint(info->params.host_bus_type[i])) { + warn_padding++; + } + } + for (i = 0; i < 8; i++) { + if (!isprint(info->params.interface_type[i])) { + warn_padding++; + } + } + + if (warn_padding) { + p += snprintf(p, left, + "Warning: Spec violation. Padding should be 0x20.\n"); + } + +out: + p += snprintf(p, left, "\nPlease check %s\n", REPORT_URL); + p += snprintf(p, left, "to see if this device has been reported. If not,\n"); + p += snprintf(p, left, "please send the information requested there.\n"); + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_version(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%02x\n", info->version); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_extensions(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) { + p += snprintf(p, left, "Fixed disk access\n"); + } + if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) { + p += snprintf(p, left, "Device locking and ejecting\n"); + } + if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) { + p += snprintf(p, left, "Enhanced Disk Drive support\n"); + } + if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) { + p += snprintf(p, left, "64-bit extensions\n"); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_info_flags(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->params.info_flags & EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT) + p += snprintf(p, left, "DMA boundry error transparent\n"); + if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID) + p += snprintf(p, left, "geometry valid\n"); + if (info->params.info_flags & EDD_INFO_REMOVABLE) + p += snprintf(p, left, "removable\n"); + if (info->params.info_flags & EDD_INFO_WRITE_VERIFY) + p += snprintf(p, left, "write verify\n"); + if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION) + p += snprintf(p, left, "media change notification\n"); + if (info->params.info_flags & EDD_INFO_LOCKABLE) + p += snprintf(p, left, "lockable\n"); + if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT) + p += snprintf(p, left, "no media present\n"); + if (info->params.info_flags & EDD_INFO_USE_INT13_FN50) + p += snprintf(p, left, "use int13 fn50\n"); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_cylinders(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_cylinders); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_heads(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_heads); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_sectors_per_track(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.sectors_per_track); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_sectors(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%llx\n", info->params.number_of_sectors); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_has_default_cylinders(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_cylinders > 0; +} + +static int +edd_has_default_heads(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_heads > 0; +} + +static int +edd_has_default_sectors_per_track(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.sectors_per_track > 0; +} + +static int +edd_has_edd30(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + int i, nonzero_path = 0; + char c; + + if (!edev || !info) + return 0; + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + return 0; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) { + nonzero_path++; + break; + } + } + if (!nonzero_path) { + return 0; + } + + return 1; +} + +static EDD_DEVICE_ATTR(raw_data, edd_show_raw_data, NULL); +static EDD_DEVICE_ATTR(version, edd_show_version, NULL); +static EDD_DEVICE_ATTR(extensions, edd_show_extensions, NULL); +static EDD_DEVICE_ATTR(info_flags, edd_show_info_flags, NULL); +static EDD_DEVICE_ATTR(sectors, edd_show_sectors, NULL); +static EDD_DEVICE_ATTR(default_cylinders, edd_show_default_cylinders, + edd_has_default_cylinders); +static EDD_DEVICE_ATTR(default_heads, edd_show_default_heads, + edd_has_default_heads); +static EDD_DEVICE_ATTR(default_sectors_per_track, + edd_show_default_sectors_per_track, + edd_has_default_sectors_per_track); +static EDD_DEVICE_ATTR(interface, edd_show_interface,edd_has_edd30); +static EDD_DEVICE_ATTR(host_bus, edd_show_host_bus, edd_has_edd30); + +static struct edd_attribute *def_attrs[] = { + &edd_attr_raw_data, + &edd_attr_version, + &edd_attr_extensions, + &edd_attr_info_flags, + &edd_attr_sectors, + &edd_attr_default_cylinders, + &edd_attr_default_heads, + &edd_attr_default_sectors_per_track, + &edd_attr_interface, + &edd_attr_host_bus, + NULL, +}; + +static inline void +edd_device_unregister(struct edd_device *edev) +{ + struct list_head *pos, *next; + struct attr_entry *ae; + + list_for_each_safe(pos, next, &edev->attr_list) { + ae = list_entry(pos, struct attr_entry, node); + remove_proc_entry(ae->entry->name, edev->dir); + list_del(&ae->node); + kfree(ae); + } + + remove_proc_entry(edev->dir->name, bios_dir); +} + +static int +edd_populate_dir(struct edd_device *edev) +{ + struct edd_attribute *attr; + struct attr_entry *ae; + int i; + int error = 0; + + for (i = 0; (attr=def_attrs[i]); i++) { + if (!attr->test || (attr->test && attr->test(edev))) { + ae = kmalloc(sizeof (*ae), GFP_KERNEL); + if (ae == NULL) { + error = 1; + break; + } + INIT_LIST_HEAD(&ae->node); + ae->entry = + create_proc_read_entry(attr->name, 0444, + edev->dir, attr->show, + edd_dev_get_info(edev)); + if (ae->entry == NULL) { + error = 1; + break; + } + list_add(&ae->node, &edev->attr_list); + } + } + + if (error) + return error; + + return 0; +} + +static int +edd_make_dir(struct edd_device *edev) +{ + int error=1; + + edev->dir = proc_mkdir(edev->name, bios_dir); + if (edev->dir != NULL) { + edev->dir->mode = (S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); + error = edd_populate_dir(edev); + } + return error; +} + +static int +edd_device_register(struct edd_device *edev, int i) +{ + int error; + + if (!edev) + return 1; + memset(edev, 0, sizeof (*edev)); + INIT_LIST_HEAD(&edev->attr_list); + edd_dev_set_info(edev, &edd[i]); + snprintf(edev->name, EDD_DEVICE_NAME_SIZE, "int13_dev%02x", + edd[i].device); + error = edd_make_dir(edev); + return error; +} + +/** + * edd_init() - creates /proc tree of EDD data + * + * This assumes that eddnr and edd were + * assigned in setup.c already. + */ +static int __init +edd_init(void) +{ + unsigned int i; + int rc = 0; + struct edd_device *edev; + + printk(KERN_INFO "BIOS EDD facility v%s, %d devices found\n", + EDD_VERSION, eddnr); + + if (!eddnr) { + printk(KERN_INFO "EDD information not available.\n"); + return 1; + } + + bios_dir = proc_mkdir("bios", NULL); + if (bios_dir == NULL) + return 1; + + for (i = 0; i < eddnr && i < EDDMAXNR && !rc; i++) { + edev = kmalloc(sizeof (*edev), GFP_KERNEL); + if (!edev) { + rc = 1; + break; + } + + rc = edd_device_register(edev, i); + if (rc) { + break; + } + edd_devices[i] = edev; + } + + if (rc) { + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); + } + + return rc; +} + +static void __exit +edd_exit(void) +{ + int i; + struct edd_device *edev; + + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); +} + +module_init(edd_init); +module_exit(edd_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/elanfreq.c linux.22-ac2/arch/i386/kernel/elanfreq.c --- linux.vanilla/arch/i386/kernel/elanfreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/elanfreq.c 2003-07-31 14:49:39.000000000 +0100 @@ -0,0 +1,286 @@ +/* + * elanfreq: cpufreq driver for the AMD ELAN family + * + * (c) Copyright 2002 Robert Schwebel + * + * Parts of this code are (c) Sven Geggus + * + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel + * + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ +#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ + +/* Module parameter */ +static int max_freq; + +struct s_elan_multiplier { + int clock; /* frequency in kHz */ + int val40h; /* PMU Force Mode register */ + int val80h; /* CPU Clock Speed Register */ +}; + +/* + * It is important that the frequencies + * are listed in ascending order here! + */ +struct s_elan_multiplier elan_multiplier[] = { + {1000, 0x02, 0x18}, + {2000, 0x02, 0x10}, + {4000, 0x02, 0x08}, + {8000, 0x00, 0x00}, + {16000, 0x00, 0x02}, + {33000, 0x00, 0x04}, + {66000, 0x01, 0x04}, + {99000, 0x01, 0x05} +}; + +static struct cpufreq_frequency_table elanfreq_table[] = { + {0, 1000}, + {1, 2000}, + {2, 4000}, + {3, 8000}, + {4, 16000}, + {5, 33000}, + {6, 66000}, + {7, 99000}, + {0, CPUFREQ_TABLE_END}, +}; + + +/** + * elanfreq_get_cpu_frequency: determine current cpu speed + * + * Finds out at which frequency the CPU of the Elan SOC runs + * at the moment. Frequencies from 1 to 33 MHz are generated + * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" + * and have the rest of the chip running with 33 MHz. + */ + +static unsigned int elanfreq_get_cpu_frequency(void) +{ + u8 clockspeed_reg; /* Clock Speed Register */ + + local_irq_disable(); + outb_p(0x80,REG_CSCIR); + clockspeed_reg = inb_p(REG_CSCDR); + local_irq_enable(); + + if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; } + + /* Are we in CPU clock multiplied mode (66/99 MHz)? */ + if ((clockspeed_reg & 0xE0) == 0xC0) { + if ((clockspeed_reg & 0x01) == 0) { + return 66000; + } else { + return 99000; + } + } + + /* 33 MHz is not 32 MHz... */ + if ((clockspeed_reg & 0xE0)==0xA0) + return 33000; + + return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); +} + + +/** + * elanfreq_set_cpu_frequency: Change the CPU core frequency + * @cpu: cpu number + * @freq: frequency in kHz + * + * This function takes a frequency value and changes the CPU frequency + * according to this. Note that the frequency has to be checked by + * elanfreq_validatespeed() for correctness! + * + * There is no return value. + */ + +static void elanfreq_set_cpu_state (unsigned int state) { + + struct cpufreq_freqs freqs; + + freqs.old = elanfreq_get_cpu_frequency(); + freqs.new = elan_multiplier[state].clock; + freqs.cpu = 0; /* elanfreq.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock); + + + /* + * Access to the Elan's internal registers is indexed via + * 0x22: Chip Setup & Control Register Index Register (CSCI) + * 0x23: Chip Setup & Control Register Data Register (CSCD) + * + */ + + /* + * 0x40 is the Power Management Unit's Force Mode Register. + * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) + */ + + local_irq_disable(); + outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ + outb_p(0x00,REG_CSCDR); + local_irq_enable(); /* wait till internal pipelines and */ + udelay(1000); /* buffers have cleaned up */ + + local_irq_disable(); + + /* now, set the CPU clock speed register (0x80) */ + outb_p(0x80,REG_CSCIR); + outb_p(elan_multiplier[state].val80h,REG_CSCDR); + + /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ + outb_p(0x40,REG_CSCIR); + outb_p(elan_multiplier[state].val40h,REG_CSCDR); + udelay(10000); + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +}; + + +/** + * elanfreq_validatespeed: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int elanfreq_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); +} + +static int elanfreq_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate)) + return -EINVAL; + + elanfreq_set_cpu_state(newstate); + + return 0; +} + + +/* + * Module init and exit code + */ + +static int elanfreq_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = cpu_data; + unsigned int i; + + /* capability check */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model!=10)) + return -ENODEV; + + /* max freq */ + if (!max_freq) + max_freq = elanfreq_get_cpu_frequency(); + + /* table init */ + for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if (elanfreq_table[i].frequency > max_freq) + elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + } + + /* cpuinfo and default policy values */ + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = elanfreq_get_cpu_frequency(); + + return cpufreq_frequency_table_cpuinfo(policy, &elanfreq_table[0]);; +} + + +#ifndef MODULE +/** + * elanfreq_setup - elanfreq command line parameter parsing + * + * elanfreq command line parameter. Use: + * elanfreq=66000 + * to set the maximum CPU frequency to 66 MHz. Note that in + * case you do not give this boot parameter, the maximum + * frequency will fall back to _current_ CPU frequency which + * might be lower. If you build this as a module, use the + * max_freq module parameter instead. + */ +static int __init elanfreq_setup(char *str) +{ + max_freq = simple_strtoul(str, &str, 0); + return 1; +} +__setup("elanfreq=", elanfreq_setup); +#endif + + +static struct cpufreq_driver elanfreq_driver = { + .verify = elanfreq_verify, + .target = elanfreq_target, + .init = elanfreq_cpu_init, + .name = "elanfreq", +}; + + +static int __init elanfreq_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + + /* Test if we have the right hardware */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model!=10)) + { + printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); + return -ENODEV; + } + + return cpufreq_register_driver(&elanfreq_driver); +} + + +static void __exit elanfreq_exit(void) +{ + cpufreq_unregister_driver(&elanfreq_driver); +} + + +MODULE_PARM (max_freq, "i"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Schwebel , Sven Geggus "); +MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); + +module_init(elanfreq_init); +module_exit(elanfreq_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/entry.S linux.22-ac2/arch/i386/kernel/entry.S --- linux.vanilla/arch/i386/kernel/entry.S 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/entry.S 2003-06-29 16:10:34.000000000 +0100 @@ -79,7 +79,7 @@ exec_domain = 16 need_resched = 20 tsk_ptrace = 24 -processor = 52 +cpu = 32 ENOSYS = 38 @@ -184,9 +184,11 @@ ENTRY(ret_from_fork) +#if CONFIG_SMP pushl %ebx call SYMBOL_NAME(schedule_tail) addl $4, %esp +#endif GET_CURRENT(%ebx) testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS jne tracesys_exit @@ -645,8 +647,8 @@ .long SYMBOL_NAME(sys_tkill) .long SYMBOL_NAME(sys_sendfile64) .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */ + .long SYMBOL_NAME(sys_sched_setaffinity) + .long SYMBOL_NAME(sys_sched_getaffinity) .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* 245 sys_io_setup */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/gx-suspmod.c linux.22-ac2/arch/i386/kernel/gx-suspmod.c --- linux.vanilla/arch/i386/kernel/gx-suspmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/gx-suspmod.c 2003-07-31 14:49:39.000000000 +0100 @@ -0,0 +1,510 @@ +/* + * Cyrix MediaGX and NatSemi Geode Suspend Modulation + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Hiroshi Miura + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Theoritical note: + * + * (see Geode(tm) CS5530 manual (rev.4.1) page.56) + * + * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 + * are based on Suspend Moduration. + * + * Suspend Modulation works by asserting and de-asserting the SUSP# pin + * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# + * the CPU enters an idle state. GX1 stops its core clock when SUSP# is + * asserted then power consumption is reduced. + * + * Suspend Modulation's OFF/ON duration are configurable + * with 'Suspend Modulation OFF Count Register' + * and 'Suspend Modulation ON Count Register'. + * These registers are 8bit counters that represent the number of + * 32us intervals which the SUSP# pin is asserted/de-asserted to the + * processor. + * + * These counters define a ratio which is the effective frequency + * of operation of the system. + * + * On Count + * F_eff = Fgx * ---------------------- + * On Count + Off Count + * + * 0 <= On Count, Off Count <= 255 + * + * From these limits, we can get register values + * + * on_duration + off_duration <= MAX_DURATION + * off_duration = on_duration * (stock_freq - freq) / freq + * + * on_duration = (freq * DURATION) / stock_freq + * off_duration = DURATION - on_duration + * + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Dec. 11, 2002 Hiroshi Miura + * - rewrite for Cyrix MediaGX Cx5510/5520 and + * NatSemi Geode Cs5530(A). + * + * Jul. ??, 2002 Zwane Mwaikambo + * - cs5530_mod patch for 2.4.19-rc1. + * + *--------------------------------------------------------------------------- + * + * Todo + * Test on machines with 5510, 5530, 5530A + */ + +/************************************************************************ + * Suspend Modulation - Definitions * + ************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PCI config registers, all at F0 */ +#define PCI_PMER1 0x80 /* power management enable register 1 */ +#define PCI_PMER2 0x81 /* power management enable register 2 */ +#define PCI_PMER3 0x82 /* power management enable register 3 */ +#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ +#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ +#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ +#define PCI_MODON 0x95 /* suspend modulation ON counter register */ +#define PCI_SUSCFG 0x96 /* suspend configuration register */ + +/* PMER1 bits */ +#define GPM (1<<0) /* global power management */ +#define GIT (1<<1) /* globally enable PM device idle timers */ +#define GTR (1<<2) /* globally enable IO traps */ +#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ +#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ + +/* SUSCFG bits */ +#define SUSMOD (1<<0) /* enable/disable suspend modulation */ +/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ +#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ + /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ +#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ +/* the belows support only with cs5530A */ +#define PWRSVE_ISA (1<<3) /* stop ISA clock */ +#define PWRSVE (1<<4) /* active idle */ + +struct gxfreq_params { + u8 on_duration; + u8 off_duration; + u8 pci_suscfg; + u8 pci_pmer1; + u8 pci_pmer2; + u8 pci_rev; + struct pci_dev *cs55x0; +}; + +static struct gxfreq_params *gx_params; +static int stock_freq; + +/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ +static int pci_busclk = 0; +MODULE_PARM(pci_busclk, "i"); + +/* maximum duration for which the cpu may be suspended + * (32us * MAX_DURATION). If no parameter is given, this defaults + * to 255. + * Note that this leads to a maximum of 8 ms(!) where the CPU clock + * is suspended -- processing power is just 0.39% of what it used to be, + * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ +static int max_duration = 255; +MODULE_PARM(max_duration, "i"); + +/* For the default policy, we want at least some processing power + * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) + */ +#define POLICY_MIN_DIV 20 + + +/* DEBUG + * Define it if you want verbose debug output + */ + +#define SUSPMOD_DEBUG 1 + +#ifdef SUSPMOD_DEBUG +#define dprintk(msg...) printk(KERN_DEBUG "cpufreq:" msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +/** + * we can detect a core multipiler from dir0_lsb + * from GX1 datasheet p.56, + * MULT[3:0]: + * 0000 = SYSCLK multiplied by 4 (test only) + * 0001 = SYSCLK multiplied by 10 + * 0010 = SYSCLK multiplied by 4 + * 0011 = SYSCLK multiplied by 6 + * 0100 = SYSCLK multiplied by 9 + * 0101 = SYSCLK multiplied by 5 + * 0110 = SYSCLK multiplied by 7 + * 0111 = SYSCLK multiplied by 8 + * of 33.3MHz + **/ +static int gx_freq_mult[16] = { + 4, 10, 4, 6, 9, 5, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +/**************************************************************** + * Low Level chipset interface * + ****************************************************************/ +static struct pci_device_id gx_chipset_tbl[] __initdata = { + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, + { 0, }, +}; + +/** + * gx_detect_chipset: + * + **/ +static __init struct pci_dev *gx_detect_chipset(void) +{ + struct pci_dev *gx_pci = NULL; + + /* check if CPU is a MediaGX or a Geode. */ + if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) && + (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { + printk(KERN_INFO "gx-suspmod: error: no MediaGX/Geode processor found!\n"); + return NULL; + } + + /* detect which companion chip is used */ + while ((gx_pci = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { + if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) { + return gx_pci; + } + } + + dprintk(KERN_INFO "gx-suspmod: error: no supported chipset found!\n"); + return NULL; +} + +/** + * gx_get_cpuspeed: + * + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. + */ +static int gx_get_cpuspeed(void) +{ + if ((gx_params->pci_suscfg & SUSMOD) == 0) + return stock_freq; + + return (stock_freq * gx_params->on_duration) + / (gx_params->on_duration + gx_params->off_duration); +} + +/** + * gx_validate_speed: + * determine current cpu speed + * +**/ + +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) +{ + unsigned int i; + u8 tmp_on, tmp_off; + int old_tmp_freq = stock_freq; + int tmp_freq; + + *on_duration=1; + *off_duration=0; + + for (i=max_duration; i>0; i--) { + tmp_on = ((khz * i) / stock_freq) & 0xff; + tmp_off = i - tmp_on; + tmp_freq = (stock_freq * tmp_on) / i; + /* if this relation is closer to khz, use this. If it's equal, + * prefer it, too - lower latency */ + if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { + *on_duration = tmp_on; + *off_duration = tmp_off; + old_tmp_freq = tmp_freq; + } + } + + return old_tmp_freq; +} + + +/** + * gx_set_cpuspeed: + * set cpu speed in khz. + **/ + +static void gx_set_cpuspeed(unsigned int khz) +{ + u8 suscfg, pmer1; + unsigned int new_khz; + unsigned long flags; + struct cpufreq_freqs freqs; + + + freqs.cpu = 0; + freqs.old = gx_get_cpuspeed(); + + new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); + + freqs.new = new_khz; + + if (new_khz == stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ + local_irq_save(flags); + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, (gx_params->pci_suscfg & ~(SUSMOD))); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &(gx_params->pci_suscfg)); + local_irq_restore(flags); + dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + local_irq_save(flags); + switch (gx_params->cs55x0->device) { + case PCI_DEVICE_ID_CYRIX_5530_LEGACY: + pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; + /* FIXME: need to test other values -- Zwane,Miura */ + pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); + + if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg | SUSMOD; + } else { /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; + } + break; + case PCI_DEVICE_ID_CYRIX_5520: + case PCI_DEVICE_ID_CYRIX_5510: + suscfg = gx_params->pci_suscfg | SUSMOD; + break; + default: + local_irq_restore(flags); + dprintk("fatal: try to set unknown chipset.\n"); + return; + } + + pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); + pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); + + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); + + local_irq_restore(flags); + + gx_params->pci_suscfg = suscfg; + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + gx_params->on_duration * 32, gx_params->off_duration * 32); + dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); +} + +/**************************************************************** + * High level functions * + ****************************************************************/ + +/* + * cpufreq_gx_verify: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int cpufreq_gx_verify(struct cpufreq_policy *policy) +{ + unsigned int tmp_freq = 0; + u8 tmp1, tmp2; + + if (!stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + /* it needs to be assured that at least one supported frequency is + * within policy->min and policy->max. If it is not, policy->max + * needs to be increased until one freuqency is supported. + * policy->min may not be decreased, though. This way we guarantee a + * specific processing capacity. + */ + tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + if (tmp_freq < policy->min) + tmp_freq += stock_freq / max_duration; + policy->min = tmp_freq; + if (policy->min > policy->max) + policy->max = tmp_freq; + tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); + if (tmp_freq > policy->max) + tmp_freq -= stock_freq / max_duration; + policy->max = tmp_freq; + if (policy->max < policy->min) + policy->max = policy->min; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + return 0; +} + +/* + * cpufreq_gx_target: + * + */ +static int cpufreq_gx_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + u8 tmp1, tmp2; + unsigned int tmp_freq; + + if (!stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + + tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2); + while (tmp_freq < policy->min) { + tmp_freq += stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + while (tmp_freq > policy->max) { + tmp_freq -= stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + + gx_set_cpuspeed(tmp_freq); + + return 0; +} + +static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) +{ + int maxfreq, curfreq; + + if (!policy || policy->cpu != 0) + return -ENODEV; + + /* determine maximum frequency */ + if (pci_busclk) { + maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } else if (cpu_khz) { + maxfreq = cpu_khz; + } else { + maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } + stock_freq = maxfreq; + curfreq = gx_get_cpuspeed(); + + dprintk("cpu max frequency is %d.\n", maxfreq); + dprintk("cpu current frequency is %dkHz.\n",curfreq); + + /* setup basic struct for cpufreq API */ + policy->cpu = 0; + + if (max_duration < POLICY_MIN_DIV) + policy->min = maxfreq / max_duration; + else + policy->min = maxfreq / POLICY_MIN_DIV; + policy->max = maxfreq; + policy->cur = curfreq; + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.min_freq = maxfreq / max_duration; + policy->cpuinfo.max_freq = maxfreq; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + return 0; +} + +/* + * cpufreq_gx_init: + * MediaGX/Geode GX initialize cpufreq driver + */ +static struct cpufreq_driver gx_suspmod_driver = { + .verify = cpufreq_gx_verify, + .target = cpufreq_gx_target, + .init = cpufreq_gx_cpu_init, + .name = "gx-suspmod", +}; + +static int __init cpufreq_gx_init(void) +{ + int ret; + struct gxfreq_params *params; + struct pci_dev *gx_pci; + u32 class_rev; + + /* Test if we have the right hardware */ + if ((gx_pci = gx_detect_chipset()) == NULL) + return -ENODEV; + + /* check whether module parameters are sane */ + if (max_duration > 0xff) + max_duration = 0xff; + + dprintk("geode suspend modulation available.\n"); + + params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL); + if (params == NULL) + return -ENOMEM; + memset(params, 0, sizeof(struct gxfreq_params)); + + params->cs55x0 = gx_pci; + gx_params = params; + + /* keep cs55x0 configurations */ + pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); + pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); + pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); + pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); + pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); + params->pci_rev = class_rev && 0xff; + + if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { + kfree(params); + return ret; /* register error! */ + } + + return 0; +} + +static void __exit cpufreq_gx_exit(void) +{ + cpufreq_unregister_driver(&gx_suspmod_driver); + kfree(gx_params); +} + +MODULE_AUTHOR ("Hiroshi Miura "); +MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_gx_init); +module_exit(cpufreq_gx_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/head.S linux.22-ac2/arch/i386/kernel/head.S --- linux.vanilla/arch/i386/kernel/head.S 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/head.S 2003-08-13 14:29:16.000000000 +0100 @@ -34,7 +34,7 @@ #define X86_HARD_MATH CPU_PARAMS+6 #define X86_CPUID CPU_PARAMS+8 #define X86_CAPABILITY CPU_PARAMS+12 -#define X86_VENDOR_ID CPU_PARAMS+28 +#define X86_VENDOR_ID CPU_PARAMS+36 /* * swapper_pg_dir is the main page directory, address 0x00101000 @@ -445,4 +445,15 @@ .quad 0x00409a0000000000 /* 0x48 APM CS code */ .quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */ .quad 0x0040920000000000 /* 0x58 APM DS data */ + /* Segments used for calling PnP BIOS */ + .quad 0x00c09a0000000000 /* 0x60 32-bit code */ + .quad 0x00809a0000000000 /* 0x68 16-bit code */ + .quad 0x0080920000000000 /* 0x70 16-bit data */ + .quad 0x0080920000000000 /* 0x78 16-bit data */ + .quad 0x0080920000000000 /* 0x80 16-bit data */ + .quad 0x0000000000000000 /* 0x88 not used */ + .quad 0x0000000000000000 /* 0x90 not used */ + .quad 0x0000000000000000 /* 0x98 not used */ + /* Per CPU segments */ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */ + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/i386_ksyms.c linux.22-ac2/arch/i386/kernel/i386_ksyms.c --- linux.vanilla/arch/i386/kernel/i386_ksyms.c 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/i386_ksyms.c 2003-08-08 13:56:09.000000000 +0100 @@ -28,6 +28,7 @@ #include #include #include +#include extern void dump_thread(struct pt_regs *, struct user *); extern spinlock_t rtc_lock; @@ -49,6 +50,7 @@ EXPORT_SYMBOL(drive_info); #endif +extern unsigned long cpu_khz; extern unsigned long get_cmos_time(void); /* platform dependent support */ @@ -71,6 +73,7 @@ EXPORT_SYMBOL(pm_idle); EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(get_cmos_time); +EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(apm_info); EXPORT_SYMBOL(gdt); EXPORT_SYMBOL(empty_zero_page); @@ -130,7 +133,9 @@ EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(kernel_flag_cacheline); EXPORT_SYMBOL(smp_num_cpus); +EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(cpu_online_map); +EXPORT_SYMBOL_GPL(cpu_sibling_map); EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed); @@ -180,3 +185,8 @@ #ifdef CONFIG_MULTIQUAD EXPORT_SYMBOL(xquad_portio); #endif + +#ifdef CONFIG_EDD_MODULE +EXPORT_SYMBOL(edd); +EXPORT_SYMBOL(eddnr); +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/i387.c linux.22-ac2/arch/i386/kernel/i387.c --- linux.vanilla/arch/i386/kernel/i387.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/i387.c 2003-08-01 17:49:13.000000000 +0100 @@ -248,7 +248,7 @@ * FXSR floating point environment conversions. */ -static inline int convert_fxsr_to_user( struct _fpstate *buf, +static int convert_fxsr_to_user( struct _fpstate *buf, struct i387_fxsave_struct *fxsave ) { unsigned long env[7]; @@ -270,13 +270,18 @@ to = &buf->_st[0]; from = (struct _fpxreg *) &fxsave->st_space[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_to_user( to, from, sizeof(*to) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__put_user(*f, t) || + __put_user(*(f + 1), t + 1) || + __put_user(from->exponent, &to->exponent)) return 1; } return 0; } -static inline int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, +static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, struct _fpstate *buf ) { unsigned long env[7]; @@ -299,7 +304,12 @@ to = (struct _fpxreg *) &fxsave->st_space[0]; from = &buf->_st[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_from_user( to, from, sizeof(*from) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__get_user(*t, f) || + __get_user(*(t + 1), f + 1) || + __get_user(to->exponent, &from->exponent)) return 1; } return 0; @@ -321,7 +331,7 @@ return 1; } -static inline int save_i387_fxsave( struct _fpstate *buf ) +static int save_i387_fxsave( struct _fpstate *buf ) { struct task_struct *tsk = current; int err = 0; @@ -371,7 +381,7 @@ sizeof(struct i387_fsave_struct) ); } -static inline int restore_i387_fxsave( struct _fpstate *buf ) +static int restore_i387_fxsave( struct _fpstate *buf ) { int err; struct task_struct *tsk = current; @@ -389,7 +399,7 @@ if ( HAVE_HWFP ) { if ( cpu_has_fxsr ) { - err = restore_i387_fxsave( buf ); + err = restore_i387_fxsave( buf ); } else { err = restore_i387_fsave( buf ); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/io_apic.c linux.22-ac2/arch/i386/kernel/io_apic.c --- linux.vanilla/arch/i386/kernel/io_apic.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/io_apic.c 2003-08-28 22:12:42.000000000 +0100 @@ -44,7 +44,7 @@ unsigned int int_dest_addr_mode = APIC_DEST_LOGICAL; unsigned char int_delivery_mode = dest_LowestPrio; - +extern unsigned int xapic_support; /* * # of IRQ routing registers @@ -169,6 +169,14 @@ { struct IO_APIC_route_entry entry; unsigned long flags; + + /* Check delivery_mode to be sure we're not clearing an SMI pin */ + *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); + *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); + if (entry.delivery_mode == dest_SMI){ + printk(KERN_INFO "apic %i pin %i is an SMI pin!\n", apic, pin); + return; + } /* * Disable it in the IO-APIC irq-routing table: @@ -190,6 +198,105 @@ clear_IO_APIC_pin(apic, pin); } +static void set_ioapic_affinity (unsigned int irq, unsigned long mask) +{ + unsigned long flags; + + /* + * Only the first 8 bits are valid. + */ + mask = mask << 24; + spin_lock_irqsave(&ioapic_lock, flags); + __DO_ACTION(1, = mask, ) + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +#ifndef CONFIG_SMP + +void send_IPI_self(int vector) +{ + unsigned int cfg; + + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + apic_write_around(APIC_ICR, cfg); +} +#endif + + +#if CONFIG_SMP + +typedef struct { + unsigned int cpu; + unsigned long timestamp; +} ____cacheline_aligned irq_balance_t; + +static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned + = { [ 0 ... NR_IRQS-1 ] = { 0, 0 } }; + +extern unsigned long irq_affinity [NR_IRQS]; + +#endif + +#define IDLE_ENOUGH(cpu,now) \ + (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1)) + +#define IRQ_ALLOWED(cpu,allowed_mask) \ + ((1 << cpu) & (allowed_mask)) + +static unsigned long move(int curr_cpu, unsigned long allowed_mask, unsigned long now, int direction) +{ + int search_idle = 1; + int cpu = curr_cpu; + + goto inside; + + do { + if (unlikely(cpu == curr_cpu)) + search_idle = 0; +inside: + if (direction == 1) { + cpu++; + if (cpu >= smp_num_cpus) + cpu = 0; + } else { + cpu--; + if (cpu == -1) + cpu = smp_num_cpus-1; + } + } while (!IRQ_ALLOWED(cpu,allowed_mask) || + (search_idle && !IDLE_ENOUGH(cpu,now))); + + return cpu; +} + +static inline void balance_irq(int irq) +{ +#if CONFIG_SMP + irq_balance_t *entry = irq_balance + irq; + unsigned long now = jiffies; + + if (unlikely(entry->timestamp != now)) { + unsigned long allowed_mask; + int random_number; + + rdtscl(random_number); + random_number &= 1; + + allowed_mask = cpu_online_map & irq_affinity[irq]; + entry->timestamp = now; + entry->cpu = move(entry->cpu, allowed_mask, now, random_number); + set_ioapic_affinity(irq, apicid_to_phys_cpu_present(entry->cpu)); + } +#endif +} + /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. @@ -783,7 +890,7 @@ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.delivery_type); printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.LTS); - if (reg_00.__reserved_0 || reg_00.__reserved_1 || reg_00.__reserved_2) + if (reg_00.__reserved_1 || reg_00.__reserved_2) UNEXPECTED_IO_APIC(); printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); @@ -794,7 +901,8 @@ (reg_01.entries != 0x1f) && /* dual Xeon boards */ (reg_01.entries != 0x22) && /* bigger Xeon boards */ (reg_01.entries != 0x2E) && - (reg_01.entries != 0x3F) + (reg_01.entries != 0x3F) && + (reg_01.entries != 0x03) /* Opteron */ ) UNEXPECTED_IO_APIC(); @@ -806,6 +914,7 @@ (reg_01.version != 0x10) && /* oldest IO-APICs */ (reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */ (reg_01.version != 0x13) && /* Xeon IO-APICs */ + (reg_01.version != 0x14) && /* SiS */ (reg_01.version != 0x20) /* Intel P64H (82806 AA) */ ) UNEXPECTED_IO_APIC(); @@ -1099,7 +1208,8 @@ old_id = mp_ioapics[apic].mpc_apicid; - if (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id) { + if (!xapic_support && + (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", apic, mp_ioapics[apic].mpc_apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", @@ -1113,7 +1223,8 @@ * 'stuck on smp_invalidate_needed IPI wait' messages. * I/O APIC IDs no longer have any meaning for xAPICs and SAPICs. */ - if ((clustered_apic_mode != CLUSTERED_APIC_XAPIC) && + if (!xapic_support && + (clustered_apic_mode != CLUSTERED_APIC_XAPIC) && (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid))) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic, mp_ioapics[apic].mpc_apicid); @@ -1155,12 +1266,15 @@ spin_unlock_irqrestore(&ioapic_lock, flags); /* - * Sanity check + * Sanity check. Note that only the lowest-order 4 bits + * are verified by the test below. This is for handling + * chipsets that implement 8-bit-wide modifications to + * the ID while only providing 4-bit-wide data on reads. */ spin_lock_irqsave(&ioapic_lock, flags); *(int *)®_00 = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); - if (reg_00.ID != mp_ioapics[apic].mpc_apicid) + if ((reg_00.ID ^ mp_ioapics[apic].mpc_apicid) & 0xf) panic("could not set ID!\n"); else printk(" ok.\n"); @@ -1248,6 +1362,7 @@ */ static void ack_edge_ioapic_irq(unsigned int irq) { + balance_irq(irq); if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED)) mask_IO_APIC_irq(irq); @@ -1287,6 +1402,7 @@ unsigned long v; int i; + balance_irq(irq); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various @@ -1343,38 +1459,6 @@ static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ } -#ifndef CONFIG_SMP - -void send_IPI_self(int vector) -{ - unsigned int cfg; - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write_around(APIC_ICR, cfg); -} - -#endif /* CONFIG_SMP */ - -static void set_ioapic_affinity (unsigned int irq, unsigned long mask) -{ - unsigned long flags; - /* - * Only the first 8 bits are valid. - */ - mask = mask << 24; - - spin_lock_irqsave(&ioapic_lock, flags); - __DO_ACTION(1, = mask, ) - spin_unlock_irqrestore(&ioapic_lock, flags); -} - /* * Level and edge triggered IO-APIC interrupts need different handling, * so we use two separate IRQ descriptors. Edge triggered IRQs can be diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/irq.c linux.22-ac2/arch/i386/kernel/irq.c --- linux.vanilla/arch/i386/kernel/irq.c 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/i386/kernel/irq.c 2003-06-29 16:10:34.000000000 +0100 @@ -1090,7 +1090,7 @@ static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; -static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; +unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/ldt.c linux.22-ac2/arch/i386/kernel/ldt.c --- linux.vanilla/arch/i386/kernel/ldt.c 2001-10-17 22:46:29.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/ldt.c 2003-06-29 16:10:34.000000000 +0100 @@ -12,37 +12,139 @@ #include #include #include +#include #include #include #include #include +#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ +static void flush_ldt(void *mm) +{ + if (current->active_mm) + load_LDT(¤t->active_mm->context); +} +#endif + +static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +{ + void *oldldt; + void *newldt; + int oldsize; + + if (mincount <= pc->size) + return 0; + oldsize = pc->size; + mincount = (mincount+511)&(~511); + if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) + newldt = vmalloc(mincount*LDT_ENTRY_SIZE); + else + newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); + + if (!newldt) + return -ENOMEM; + + if (oldsize) + memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); + + oldldt = pc->ldt; + memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); + wmb(); + pc->ldt = newldt; + pc->size = mincount; + if (reload) { + load_LDT(pc); +#ifdef CONFIG_SMP + if (current->mm->cpu_vm_mask != (1< PAGE_SIZE) + vfree(oldldt); + else + kfree(oldldt); + } + return 0; +} + +static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +{ + int err = alloc_ldt(new, old->size, 0); + if (err < 0) { + printk(KERN_WARNING "ldt allocation failed\n"); + new->size = 0; + return err; + } + memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); + return 0; +} + +/* + * we do not have to muck with descriptors here, that is + * done in switch_mm() as needed. + */ +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + struct mm_struct * old_mm; + int retval = 0; + + init_MUTEX(&mm->context.sem); + mm->context.size = 0; + old_mm = current->mm; + if (old_mm && old_mm->context.size > 0) { + down(&old_mm->context.sem); + retval = copy_ldt(&mm->context, &old_mm->context); + up(&old_mm->context.sem); + } + return retval; +} + /* - * read_ldt() is not really atomic - this is not a problem since - * synchronization of reads and writes done to the LDT has to be - * assured by user-space anyway. Writes are atomic, to protect - * the security checks done on new descriptors. + * No need to lock the MM as we are the last user + * Do not touch the ldt register, we are already + * in the next thread. */ +void destroy_context(struct mm_struct *mm) +{ + if (mm->context.size) { + if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(mm->context.ldt); + else + kfree(mm->context.ldt); + mm->context.size = 0; + } +} + static int read_ldt(void * ptr, unsigned long bytecount) { int err; unsigned long size; struct mm_struct * mm = current->mm; - err = 0; - if (!mm->context.segments) - goto out; + if (!mm->context.size) + return 0; + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - size = LDT_ENTRIES*LDT_ENTRY_SIZE; + down(&mm->context.sem); + size = mm->context.size*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = size; - if (copy_to_user(ptr, mm->context.segments, size)) + err = 0; + if (copy_to_user(ptr, mm->context.ldt, size)) err = -EFAULT; -out: - return err; + up(&mm->context.sem); + if (err < 0) + return err; + if (size != bytecount) { + /* zero-fill the rest */ + clear_user(ptr+size, bytecount-size); + } + return bytecount; } static int read_default_ldt(void * ptr, unsigned long bytecount) @@ -53,7 +155,7 @@ err = 0; address = &default_ldt[0]; - size = sizeof(struct desc_struct); + size = 5*sizeof(struct desc_struct); if (size > bytecount) size = bytecount; @@ -88,24 +190,14 @@ goto out; } - /* - * the GDT index of the LDT is allocated dynamically, and is - * limited by MAX_LDT_DESCRIPTORS. - */ - down_write(&mm->mmap_sem); - if (!mm->context.segments) { - void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - error = -ENOMEM; - if (!segments) + down(&mm->context.sem); + if (ldt_info.entry_number >= mm->context.size) { + error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + if (error < 0) goto out_unlock; - memset(segments, 0, LDT_ENTRIES*LDT_ENTRY_SIZE); - wmb(); - mm->context.segments = segments; - mm->context.cpuvalid = 1UL << smp_processor_id(); - load_LDT(mm); } - lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.segments); + lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt); /* Allow LDTs to be cleared by the user. */ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { @@ -143,7 +235,7 @@ error = 0; out_unlock: - up_write(&mm->mmap_sem); + up(&mm->context.sem); out: return error; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/longhaul.c linux.22-ac2/arch/i386/kernel/longhaul.c --- linux.vanilla/arch/i386/kernel/longhaul.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/longhaul.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,635 @@ +/* + * (C) 2001-2003 Dave Jones. + * (C) 2002 Padraig Brady. + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by VIA. + * + * VIA have currently 3 different versions of Longhaul. + * + * +---------------------+----------+---------------------------------+ + * | Marketing name | Codename | longhaul version / features. | + * +---------------------+----------+---------------------------------+ + * | Samuel/CyrixIII | C5A | v1 : multipliers only | + * | Samuel2/C3 | C3E/C5B | v1 : multiplier only | + * | Ezra | C5C | v2 : multipliers & voltage | + * | Ezra-T | C5M/C5N | v3 : multipliers, voltage & FSB | + * +---------------------+----------+---------------------------------+ + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "longhaul.h" + +#define DEBUG + +#ifdef DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +#define PFX "longhaul: " + +static unsigned int numscales=16, numvscales; +static int minvid, maxvid; +static int can_scale_voltage; +static int vrmrev; + + +/* Module parameters */ +static int dont_scale_voltage; +static unsigned int fsb; + +#define __hlt() __asm__ __volatile__("hlt": : :"memory") + +/* + * Clock ratio tables. + * The eblcr ones specify the ratio read from the CPU. + * The clock_ratio ones specify what to write to the CPU. + */ + +/* VIA C3 Samuel 1 & Samuel 2 (stepping 0)*/ +static int __initdata longhaul1_clock_ratio[16] = { + -1, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + -1, /* 0100 -> RESERVED */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + -1, /* 1111 -> RESERVED */ +}; + +static int __initdata samuel1_eblcr[16] = { + 50, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + -1, /* 0111 -> RESERVED */ + -1, /* 1000 -> RESERVED */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + -1, /* 1100 -> RESERVED */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C3 Samuel2 Stepping 1->15 & VIA C3 Ezra */ +static int __initdata longhaul2_clock_ratio[16] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ +}; + +static int __initdata samuel2_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 110, /* 0111 -> 11.0x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 130, /* 1110 -> 13.0x */ + 65, /* 1111 -> 6.5x */ +}; + +static int __initdata ezra_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C5M. */ +static int __initdata longhaul3_clock_ratio[32] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ + + -1, /* 0000 -> RESERVED (10.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (9.0x)*/ + 105, /* 0100 -> 10.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 135, /* 0111 -> 13.5x */ + 140, /* 1000 -> 14.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 130, /* 1011 -> 13.0x */ + 145, /* 1100 -> 14.5x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + -1, /* 1111 -> RESERVED (12.0x) */ +}; + +static int __initdata c5m_eblcr[32] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ + + -1, /* 0000 -> RESERVED (9.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (10.0x)*/ + 135, /* 0100 -> 13.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 105, /* 0111 -> 10.5x */ + 130, /* 1000 -> 13.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 140, /* 1011 -> 14.0x */ + -1, /* 1100 -> RESERVED (12.0x) */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + 145, /* 1111 -> 14.5x */ +}; + +/* Voltage scales. Div by 1000 to get actual voltage. */ +static int __initdata vrm85scales[32] = { + 1250, 1200, 1150, 1100, 1050, 1800, 1750, 1700, + 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300, + 1275, 1225, 1175, 1125, 1075, 1825, 1775, 1725, + 1675, 1625, 1575, 1525, 1475, 1425, 1375, 1325, +}; + +static int __initdata mobilevrmscales[32] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1500, 1350, 1300, -1, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, -1, +}; + +/* Clock ratios multiplied by 10 */ +static int clock_ratio[32]; +static int eblcr_table[32]; +static int voltage_table[32]; +static unsigned int highest_speed, lowest_speed; /* kHz */ +static int longhaul_version; +static struct cpufreq_frequency_table *longhaul_table; + + +static int longhaul_get_cpu_fsb (void) +{ + unsigned int eblcr_fsb_table[] = { 66, 133, 100, -1 }; + unsigned long invalue=0,lo, hi; + + if (fsb == 0) { + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<18|1<<19)) >>18; + return eblcr_fsb_table[invalue]; + } else { + return fsb; + } +} + + +static int longhaul_get_cpu_mult (void) +{ + unsigned long invalue=0,lo, hi; + + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; + if (longhaul_version==3) { + if (lo & (1<<27)) + invalue+=16; + } + return eblcr_table[invalue]; +} + + +/** + * longhaul_set_cpu_frequency() + * @clock_ratio_index : bitpattern of the new multiplier. + * + * Sets a new clock ratio, and -if applicable- a new Front Side Bus + */ + +static void longhaul_setstate (unsigned int clock_ratio_index) +{ + int vidindex, i; + struct cpufreq_freqs freqs; + union msr_longhaul longhaul; + union msr_bcr2 bcr2; + + if (clock_ratio[clock_ratio_index] == -1) + return; + + if (((clock_ratio[clock_ratio_index] * fsb * 100) > highest_speed) || + ((clock_ratio[clock_ratio_index] * fsb * 100) < lowest_speed)) + return; + + freqs.old = longhaul_get_cpu_mult() * longhaul_get_cpu_fsb() * 100; + freqs.new = clock_ratio[clock_ratio_index] * fsb * 100; + freqs.cpu = 0; /* longhaul.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + dprintk (KERN_INFO PFX "FSB:%d Mult(x10):%d\n", + fsb * 100, clock_ratio[clock_ratio_index]); + + switch (longhaul_version) { + case 1: + rdmsrl (MSR_VIA_BCR2, bcr2.val); + /* Enable software clock multiplier */ + bcr2.bits.ESOFTBF = 1; + bcr2.bits.CLOCKMUL = clock_ratio_index; + wrmsrl (MSR_VIA_BCR2, bcr2.val); + + __hlt(); + + /* Disable software clock multiplier */ + rdmsrl (MSR_VIA_BCR2, bcr2.val); + bcr2.bits.ESOFTBF = 0; + wrmsrl (MSR_VIA_BCR2, bcr2.val); + break; + + case 2: + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; + longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; + longhaul.bits.EnableSoftBusRatio = 1; + /* We must program the revision key only with values we + * know about, not blindly copy it from 0:3 */ + longhaul.bits.RevisionKey = 1; + + if (can_scale_voltage) { + /* PB: TODO fix this up */ + vidindex = (((highest_speed-lowest_speed) / (fsb/2)) - + ((highest_speed-((clock_ratio[clock_ratio_index] * fsb * 100)/1000)) / (fsb/2))); + for (i=0;i<32;i++) { + dprintk (KERN_INFO "VID hunting. Looking for %d, found %d\n", + minvid+(vidindex*25), voltage_table[i]); + if (voltage_table[i]==(minvid + (vidindex * 25))) + break; + } + if (i==32) + goto bad_voltage; + + dprintk (KERN_INFO PFX "Desired vid index=%d\n", i); +#if 0 + longhaul.bits.SoftVID = i; + longhaul.bits.EnableSoftVID = 1; +#endif + } +/* FIXME: Do voltage and freq seperatly like we do in powernow-k7 */ +bad_voltage: + wrmsrl (MSR_VIA_LONGHAUL, longhaul.val); + __hlt(); + + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + longhaul.bits.EnableSoftBusRatio = 0; + if (can_scale_voltage) + longhaul.bits.EnableSoftVID = 0; + longhaul.bits.RevisionKey = 1; + wrmsrl (MSR_VIA_LONGHAUL, longhaul.val); + break; + + case 3: + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; + longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; + longhaul.bits.EnableSoftBusRatio = 1; + /* We must program the revision key only with values we + * know about, not blindly copy it from 0:3 */ + longhaul.bits.RevisionKey = 3; /* SoftVID & SoftBSEL */ + + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + __hlt(); + + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + longhaul.bits.EnableSoftBusRatio = 0; + longhaul.bits.RevisionKey = 3; + wrmsrl (MSR_VIA_LONGHAUL, longhaul.val); + break; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +} + + +static int __init longhaul_get_ranges (void) +{ + unsigned long invalue; + unsigned int minmult=0, maxmult=0; + unsigned int multipliers[32]= { + 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, + -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; + unsigned int j, k = 0; + union msr_longhaul longhaul; + + switch (longhaul_version) { + case 1: + /* Ugh, Longhaul v1 didn't have the min/max MSRs. + Assume min=3.0x & max = whatever we booted at. */ + minmult = 30; + maxmult = longhaul_get_cpu_mult(); + break; + + case 2 ... 3: + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + + invalue = longhaul.bits.MaxMHzBR; + if (longhaul.bits.MaxMHzBR4) + invalue += 16; + maxmult=multipliers[invalue]; + +#if 0 + invalue = longhaul.bits.MinMHzBR; + if (longhaul.bits.MinMHzBR4); + invalue += 16; + minmult = multipliers[invalue]; +#else + minmult = 30; /* as per spec */ +#endif + break; + } + + highest_speed = maxmult * fsb * 100; + lowest_speed = minmult * fsb * 100; + dprintk (KERN_INFO PFX "MinMult(x10)=%d MaxMult(x10)=%d\n", + minmult, maxmult); + dprintk (KERN_INFO PFX "Lowestspeed=%d Highestspeed=%d\n", + lowest_speed, highest_speed); + + longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); + if(!longhaul_table) + return -ENOMEM; + + for (j=0; (j maxmult) || ((unsigned int)clock_ratio[j] < minmult)) + continue; + longhaul_table[k].frequency= clock_ratio[j] * fsb * 100; + longhaul_table[k].index = (j << 8); + k++; + } + + longhaul_table[k].frequency = CPUFREQ_TABLE_END; + if (!k) { + kfree (longhaul_table); + return -EINVAL; + } + + return 0; +} + + +static void __init longhaul_setup_voltagescaling(void) +{ + union msr_longhaul longhaul; + + rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); + + if (!(longhaul.bits.RevisionID & 1)) + return; + + minvid = longhaul.bits.MinimumVID; + maxvid = longhaul.bits.MaximumVID; + vrmrev = longhaul.bits.VRMRev; + + if (minvid == 0 || maxvid == 0) { + printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " + "Voltage scaling disabled.\n", + minvid/1000, minvid%1000, maxvid/1000, maxvid%1000); + return; + } + + if (minvid == maxvid) { + printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are " + "both %d.%03d. Voltage scaling disabled\n", + maxvid/1000, maxvid%1000); + return; + } + + if (vrmrev==0) { + dprintk (KERN_INFO PFX "VRM 8.5 : "); + memcpy (voltage_table, vrm85scales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/25; + } else { + dprintk (KERN_INFO PFX "Mobile VRM : "); + memcpy (voltage_table, mobilevrmscales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/5; + } + + /* Current voltage isn't readable at first, so we need to + set it to a known value. The spec says to use maxvid */ + longhaul.bits.RevisionKey = longhaul.bits.RevisionID; /* FIXME: This is bad. */ + longhaul.bits.EnableSoftVID = 1; + longhaul.bits.SoftVID = maxvid; + wrmsrl (MSR_VIA_LONGHAUL, longhaul.val); + + minvid = voltage_table[minvid]; + maxvid = voltage_table[maxvid]; + + dprintk ("Min VID=%d.%03d Max VID=%d.%03d, %d possible voltage scales\n", + maxvid/1000, maxvid%1000, minvid/1000, minvid%1000, numvscales); + + can_scale_voltage = 1; +} + + +static int longhaul_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, longhaul_table); +} + + +static int longhaul_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int table_index = 0; + unsigned int new_clock_ratio = 0; + + if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) + return -EINVAL; + + new_clock_ratio = longhaul_table[table_index].index & 0xFF; + + longhaul_setstate(new_clock_ratio); + + return 0; +} + +static int longhaul_cpu_init (struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = cpu_data; + int ret; + + switch (c->x86_model) { + case 6: /* VIA C3 Samuel C5A */ + longhaul_version=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); + break; + + case 7: /* C5B / C5C */ + switch (c->x86_mask) { + case 0: + longhaul_version=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); + break; + case 1 ... 15: + longhaul_version=2; + memcpy (clock_ratio, longhaul2_clock_ratio, sizeof(longhaul2_clock_ratio)); + memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); + break; + } + break; + + case 8: /* C5M/C5N */ + return -ENODEV; // Waiting on updated docs from VIA before this is usable + longhaul_version=3; + numscales=32; + memcpy (clock_ratio, longhaul3_clock_ratio, sizeof(longhaul3_clock_ratio)); + memcpy (eblcr_table, c5m_eblcr, sizeof(c5m_eblcr)); + break; + } + + printk (KERN_INFO PFX "VIA CPU detected. Longhaul version %d supported\n", + longhaul_version); + + if ((longhaul_version==2 || longhaul_version==3) && (dont_scale_voltage==0)) + longhaul_setup_voltagescaling(); + + ret = longhaul_get_ranges(); + if (ret != 0) + return ret; + + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + policy->cur = (unsigned int) (longhaul_get_cpu_fsb() * longhaul_get_cpu_mult() * 100); + + return cpufreq_frequency_table_cpuinfo(policy, longhaul_table); +} + +static struct cpufreq_driver longhaul_driver = { + .verify = longhaul_verify, + .target = longhaul_target, + .init = longhaul_cpu_init, + .name = "longhaul", +}; + +static int __init longhaul_init (void) +{ + struct cpuinfo_x86 *c = cpu_data; + + if ((c->x86_vendor != X86_VENDOR_CENTAUR) || (c->x86 !=6) ) + return -ENODEV; + + switch (c->x86_model) { + case 6 ... 7: + return cpufreq_register_driver(&longhaul_driver); + case 8: + return -ENODEV; + default: + printk (KERN_INFO PFX "Unknown VIA CPU. Contact davej@codemonkey.org.uk\n"); + } + + return -ENODEV; +} + +static void __exit longhaul_exit (void) +{ + cpufreq_unregister_driver(&longhaul_driver); + kfree(longhaul_table); +} + +MODULE_PARM (dont_scale_voltage, "i"); + +MODULE_AUTHOR ("Dave Jones "); +MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE ("GPL"); + +module_init(longhaul_init); +module_exit(longhaul_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/longhaul.h linux.22-ac2/arch/i386/kernel/longhaul.h --- linux.vanilla/arch/i386/kernel/longhaul.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/longhaul.h 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,49 @@ +/* + * longhaul.h + * (C) 2003 Dave Jones. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * VIA-specific information + */ + +union msr_bcr2 { + struct { + unsigned Reseved:19, // 18:0 + ESOFTBF:1, // 19 + Reserved2:3, // 22:20 + CLOCKMUL:4, // 26:23 + Reserved3:5; // 31:27 + } bits; + unsigned long val; +}; + +union msr_longhaul { + struct { + unsigned RevisionID:4, // 3:0 + RevisionKey:4, // 7:4 + EnableSoftBusRatio:1, // 8 + EnableSoftVID:1, // 9 + EnableSoftBSEL:1, // 10 + Reserved:3, // 11:13 + SoftBusRatio4:1, // 14 + VRMRev:1, // 15 + SoftBusRatio:4, // 19:16 + SoftVID:5, // 24:20 + Reserved2:3, // 27:25 + SoftBSEL:2, // 29:28 + Reserved3:2, // 31:30 + MaxMHzBR:4, // 35:32 + MaximumVID:5, // 40:36 + MaxMHzFSB:2, // 42:41 + MaxMHzBR4:1, // 43 + Reserved4:4, // 47:44 + MinMHzBR:4, // 51:48 + MinimumVID:5, // 56:52 + MinMHzFSB:2, // 58:57 + MinMHzBR4:1, // 59 + Reserved5:4; // 63:60 + } bits; + unsigned long long val; +}; + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/longrun.c linux.22-ac2/arch/i386/kernel/longrun.c --- linux.vanilla/arch/i386/kernel/longrun.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/longrun.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,284 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct cpufreq_driver longrun_driver; + +/** + * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz + * values into per cent values. In TMTA microcode, the following is valid: + * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int longrun_low_freq, longrun_high_freq; + + +/** + * longrun_get_policy - get the current LongRun policy + * @policy: struct cpufreq_policy where current policy is written into + * + * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS + * and MSR_TMTA_LONGRUN_CTRL + */ +static void longrun_get_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + if (msr_lo & 0x01) + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; + + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0x0000007F; + msr_hi &= 0x0000007F; + + policy->min = longrun_low_freq + msr_lo * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->max = longrun_low_freq + msr_hi * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->cpu = 0; +} + + +/** + * longrun_set_policy - sets a new CPUFreq policy + * @policy - new policy + * + * Sets a new CPUFreq policy on LongRun-capable processors. This function + * has to be called with cpufreq_driver locked. + */ +static int longrun_set_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + u32 pctg_lo, pctg_hi; + + if (!policy) + return -EINVAL; + + pctg_lo = (policy->min - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + pctg_hi = (policy->max - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + + if (pctg_hi > 100) + pctg_hi = 100; + if (pctg_lo > pctg_hi) + pctg_lo = pctg_hi; + + /* performance or economy mode */ + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + msr_lo &= 0xFFFFFFFE; + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + msr_lo |= 0x00000001; + break; + case CPUFREQ_POLICY_POWERSAVE: + break; + } + wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + + /* lower and upper boundary */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= pctg_lo; + msr_hi |= pctg_hi; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + return 0; +} + + +/** + * longrun_verify_poliy - verifies a new CPUFreq policy + * + * Validates a new CPUFreq policy. This function has to be called with + * cpufreq_driver locked. + */ +static int longrun_verify_policy(struct cpufreq_policy *policy) +{ + if (!policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + if (policy->policy == CPUFREQ_POLICY_GOVERNOR) + return -EINVAL; + + return 0; +} + + +/** + * longrun_determine_freqs - determines the lowest and highest possible core frequency + * + * Determines the lowest and highest possible core frequencies on this CPU. + * This is necessary to calculate the performance percentage according to + * TMTA rules: + * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, + unsigned int *high_freq) +{ + u32 msr_lo, msr_hi; + u32 save_lo, save_hi; + u32 eax, ebx, ecx, edx; + struct cpuinfo_x86 *c = cpu_data; + + if (!low_freq || !high_freq) + return -EINVAL; + + if (cpu_has(c, X86_FEATURE_LRTI)) { + /* if the LongRun Table Interface is present, the + * detection is a bit easier: + * For minimum frequency, read out the maximum + * level (msr_hi), write that into "currently + * selected level", and read out the frequency. + * For maximum frequency, read out level zero. + */ + /* minimum */ + rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); + wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *low_freq = msr_lo * 1000; /* to kHz */ + + /* maximum */ + wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *high_freq = msr_lo * 1000; /* to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + return 0; + } + + /* set the upper border to the value determined during TSC init */ + *high_freq = (cpu_khz / 1000); + *high_freq = *high_freq * 1000; + + /* get current borders */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + save_lo = msr_lo & 0x0000007F; + save_hi = msr_hi & 0x0000007F; + + /* if current perf_pctg is larger than 90%, we need to decrease the + * upper limit to make the calculation more accurate. + */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + if (ecx > 90) { + /* set to 0 to 80 perf_pctg */ + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= 0; + msr_hi |= 80; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + /* read out current core MHz and current perf_pctg */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + + /* restore values */ + wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); + } + + /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + * eqals + * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * + * high_freq * perf_pctg is stored tempoarily into "ebx". + */ + ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ + + if ((ecx > 95) || (ecx == 0) || (eax < ebx)) + return -EIO; + + edx = (eax - ebx) / (100 - ecx); + *low_freq = edx * 1000; /* back to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + + return 0; +} + + +static int longrun_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + + /* capability check */ + if (policy->cpu != 0) + return -ENODEV; + + /* detect low and high frequency */ + result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq); + if (result) + return result; + + /* cpuinfo and default policy values */ + policy->cpuinfo.min_freq = longrun_low_freq; + policy->cpuinfo.max_freq = longrun_high_freq; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + longrun_get_policy(policy); + + return 0; +} + + +static struct cpufreq_driver longrun_driver = { + .verify = longrun_verify_policy, + .setpolicy = longrun_set_policy, + .init = longrun_cpu_init, + .name = "longrun", +}; + + +/** + * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver + * + * Initializes the LongRun support. + */ +static int __init longrun_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + + if (c->x86_vendor != X86_VENDOR_TRANSMETA || + !cpu_has(c, X86_FEATURE_LONGRUN)) + return -ENODEV; + + return cpufreq_register_driver(&longrun_driver); +} + + +/** + * longrun_exit - unregisters LongRun support + */ +static void __exit longrun_exit(void) +{ + cpufreq_unregister_driver(&longrun_driver); +} + + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe processors."); +MODULE_LICENSE ("GPL"); + +module_init(longrun_init); +module_exit(longrun_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/Makefile linux.22-ac2/arch/i386/kernel/Makefile --- linux.vanilla/arch/i386/kernel/Makefile 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/Makefile 2003-07-31 15:37:00.000000000 +0100 @@ -14,7 +14,8 @@ O_TARGET := kernel.o -export-objs := mca.o mtrr.o msr.o cpuid.o microcode.o i386_ksyms.o time.o setup.o +export-objs := mca.o mtrr.o msr.o cpuid.o microcode.o i386_ksyms.o time.o \ + setup.o speedstep-lib.o obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \ ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_i386.o \ @@ -43,5 +44,16 @@ obj-$(CONFIG_X86_LOCAL_APIC) += mpparse.o apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_VISWS_APIC) += visws_apic.o +obj-$(CONFIG_EDD) += edd.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o +obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o speedstep-lib.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o + include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/mpparse.c linux.22-ac2/arch/i386/kernel/mpparse.c --- linux.vanilla/arch/i386/kernel/mpparse.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/mpparse.c 2003-08-28 16:49:32.000000000 +0100 @@ -78,6 +78,7 @@ unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE; unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC; #endif +unsigned int xapic_support = 0; unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; /* @@ -238,6 +239,8 @@ return; } ver = m->mpc_apicver; + if (APIC_XAPIC_SUPPORT(ver)) + xapic_support = 1; logical_cpu_present_map |= 1 << (num_processors-1); phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid); @@ -587,15 +590,6 @@ } - printk("Enabling APIC mode: "); - if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ) - printk("Clustered Logical. "); - else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC) - printk("Physical. "); - else - printk("Flat. "); - printk("Using %d I/O APICs\n",nr_ioapics); - if (!num_processors) printk(KERN_ERR "SMP mptable: no processors registered!\n"); return num_processors; @@ -831,6 +825,34 @@ BUG(); printk("Processors: %d\n", num_processors); + printk("xAPIC support %s present\n", (xapic_support?"is":"is not")); + +#ifdef CONFIG_X86_CLUSTERED_APIC + /* + * Switch to Physical destination mode in case of generic + * more than 8 CPU system, which has xAPIC support + */ +#define FLAT_APIC_CPU_MAX 8 + if ((clustered_apic_mode == CLUSTERED_APIC_NONE) && + (xapic_support) && + (num_processors > FLAT_APIC_CPU_MAX)) { + clustered_apic_mode = CLUSTERED_APIC_XAPIC; + apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; + int_dest_addr_mode = APIC_DEST_PHYSICAL; + int_delivery_mode = dest_Fixed; + esr_disable = 1; + } +#endif + + printk("Enabling APIC mode: "); + if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) + printk("Clustered Logical. "); + else if (clustered_apic_mode == CLUSTERED_APIC_XAPIC) + printk("Physical. "); + else + printk("Flat. "); + printk("Using %d I/O APICs\n",nr_ioapics); + /* * Only use the first configuration found. */ @@ -977,7 +999,14 @@ processor.mpc_type = MP_PROCESSOR; processor.mpc_apicid = id; - processor.mpc_apicver = 0x10; /* TBD: lapic version */ + + /* + * mp_register_lapic_address() which is called before the + * current function does the fixmap of FIX_APIC_BASE. + * Read in the correct APIC version from there + */ + processor.mpc_apicver = apic_read(APIC_LVR); + processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/p4-clockmod.c linux.22-ac2/arch/i386/kernel/p4-clockmod.c --- linux.vanilla/arch/i386/kernel/p4-clockmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/p4-clockmod.c 2003-06-29 16:10:34.000000000 +0100 @@ -0,0 +1,271 @@ +/* + * Pentium 4/Xeon CPU on demand clock modulation/speed scaling + * (C) 2002 - 2003 Dominik Brodowski + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Arjan van de Ven + * (C) 2002 Tora T. Engstad + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Date Errata Description + * 20020525 N44, O17 12.5% or 25% DC causes lockup + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define PFX "cpufreq: " + +/* + * Duty Cycle (3bits), note DC_DISABLE is not specified in + * intel docs i just use it to mean disable + */ +enum { + DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, + DC_64PT, DC_75PT, DC_88PT, DC_DISABLE +}; + +#define DC_ENTRIES 8 + + +static int has_N44_O17_errata[NR_CPUS]; +static int stock_freq; + + +static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) +{ + u32 l, h; + unsigned long cpus_allowed; + struct cpufreq_freqs freqs; + int hyperthreading = 0; + int affected_cpu_map = 0; + int sibling = 0; + + if (!cpu_online(cpu) || (newstate > DC_DISABLE) || + (newstate == DC_RESV)) + return -EINVAL; + + /* switch to physical CPU where state is to be changed*/ + cpus_allowed = current->cpus_allowed; + + /* only run on CPU to be set, or on its sibling */ + affected_cpu_map = 1 << cpu; +#ifdef CONFIG_X86_HT + hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); + if (hyperthreading) { + sibling = cpu_sibling_map[cpu]; + affected_cpu_map |= (1 << sibling); + } +#endif + set_cpus_allowed(current, affected_cpu_map); + BUG_ON(!(affected_cpu_map & (1 << smp_processor_id()))); + + /* get current state */ + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (l & 0x10) { + l = l >> 1; + l &= 0x7; + } else + l = DC_DISABLE; + + if (l == newstate) { + set_cpus_allowed(current, cpus_allowed); + return 0; + } else if (l == DC_RESV) { + printk(KERN_ERR PFX "BIG FAT WARNING: currently in invalid setting\n"); + } + + /* notifiers */ + freqs.old = stock_freq * l / 8; + freqs.new = stock_freq * newstate / 8; + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + if (hyperthreading) { + freqs.cpu = sibling; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + rdmsr(MSR_IA32_THERM_STATUS, l, h); +#if 0 + if (l & 0x01) + printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu); +#endif + if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) + newstate = DC_38PT; + + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (newstate == DC_DISABLE) { + /* printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); */ + wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); + } else { + /* printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", + cpu, ((125 * newstate) / 10)); */ + /* bits 63 - 5 : reserved + * bit 4 : enable/disable + * bits 3-1 : duty cycle + * bit 0 : reserved + */ + l = (l & ~14); + l = l | (1<<4) | ((newstate & 0x7)<<1); + wrmsr(MSR_IA32_THERM_CONTROL, l, h); + } + + set_cpus_allowed(current, cpus_allowed); + + /* notifiers */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + if (hyperthreading) { + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + + +static struct cpufreq_frequency_table p4clockmod_table[] = { + {DC_RESV, CPUFREQ_ENTRY_INVALID}, + {DC_DFLT, 0}, + {DC_25PT, 0}, + {DC_38PT, 0}, + {DC_50PT, 0}, + {DC_64PT, 0}, + {DC_75PT, 0}, + {DC_88PT, 0}, + {DC_DISABLE, 0}, + {DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static int cpufreq_p4_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = DC_RESV; + + if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) + return -EINVAL; + + cpufreq_p4_setdc(policy->cpu, p4clockmod_table[newstate].index); + + return 0; +} + + +static int cpufreq_p4_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); +} + + +static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; + int cpuid = 0; + unsigned int i; + + /* Errata workaround */ + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + switch (cpuid) { + case 0x0f07: + case 0x0f0a: + case 0x0f11: + case 0x0f12: + has_N44_O17_errata[policy->cpu] = 1; + } + + /* get frequency */ + if (!stock_freq) { + if (cpu_khz) + stock_freq = cpu_khz; + else { + printk(KERN_INFO PFX "unknown core frequency - please use module parameter 'stock_freq'\n"); + return -EINVAL; + } + } + + /* table init */ + for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i<2) && (has_N44_O17_errata[policy->cpu])) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + p4clockmod_table[i].frequency = (stock_freq * i)/8; + } + + /* cpuinfo and default policy values */ + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = 1000; + policy->cur = stock_freq; + + return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); +} + + +static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) +{ + return cpufreq_p4_setdc(policy->cpu, DC_DISABLE); +} + +static struct cpufreq_driver p4clockmod_driver = { + .verify = cpufreq_p4_verify, + .target = cpufreq_p4_target, + .init = cpufreq_p4_cpu_init, + .exit = cpufreq_p4_cpu_exit, + .name = "p4-clockmod", +}; + + +static int __init cpufreq_p4_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + + /* + * THERM_CONTROL is architectural for IA32 now, so + * we can rely on the capability checks + */ + if (c->x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || + !test_bit(X86_FEATURE_ACC, c->x86_capability)) + return -ENODEV; + + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); + + return cpufreq_register_driver(&p4clockmod_driver); +} + + +static void __exit cpufreq_p4_exit(void) +{ + cpufreq_unregister_driver(&p4clockmod_driver); +} + + +MODULE_PARM(stock_freq, "i"); + +MODULE_AUTHOR ("Zwane Mwaikambo "); +MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_p4_init); +module_exit(cpufreq_p4_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/pci-dma.c linux.22-ac2/arch/i386/kernel/pci-dma.c --- linux.vanilla/arch/i386/kernel/pci-dma.c 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/i386/kernel/pci-dma.c 2003-06-29 16:10:34.000000000 +0100 @@ -19,7 +19,7 @@ void *ret; int gfp = GFP_ATOMIC; - if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff)) + if (hwdev == NULL || hwdev->dma_mask < 0xffffffff) gfp |= GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/pci-irq.c linux.22-ac2/arch/i386/kernel/pci-irq.c --- linux.vanilla/arch/i386/kernel/pci-irq.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/pci-irq.c 2003-09-09 19:18:09.000000000 +0100 @@ -23,7 +23,6 @@ #define PIRQ_VERSION 0x0100 int broken_hp_bios_irq9; -int broken_440gx_bios; static struct irq_routing_table *pirq_table; @@ -46,6 +45,11 @@ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); }; +struct irq_router_handler { + u16 vendor; + int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); +}; + /* * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. */ @@ -257,111 +261,221 @@ } /* - * PIRQ routing for SiS 85C503 router used in several SiS chipsets - * According to the SiS 5595 datasheet (preliminary V1.0, 12/24/1997) - * the related registers work as follows: - * - * general: one byte per re-routable IRQ, + * PIRQ routing for SiS 85C503 router used in several SiS chipsets. + * We have to deal with the following issues here: + * - vendors have different ideas about the meaning of link values + * - some onboard devices (integrated in the chipset) have special + * links and are thus routed differently (i.e. not via PCI INTA-INTD) + * - different revision of the router have a different layout for + * the routing registers, particularly for the onchip devices + * + * For all routing registers the common thing is we have one byte + * per routeable link which is defined as: * bit 7 IRQ mapping enabled (0) or disabled (1) - * bits [6:4] reserved + * bits [6:4] reserved (sometimes used for onchip devices) * bits [3:0] IRQ to map to * allowed: 3-7, 9-12, 14-15 * reserved: 0, 1, 2, 8, 13 * - * individual registers in device config space: + * The config-space registers located at 0x41/0x42/0x43/0x44 are + * always used to route the normal PCI INT A/B/C/D respectively. + * Apparently there are systems implementing PCI routing table using + * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D. + * We try our best to handle both link mappings. + * + * Currently (2003-05-21) it appears most SiS chipsets follow the + * definition of routing registers from the SiS-5595 southbridge. + * According to the SiS 5595 datasheets the revision id's of the + * router (ISA-bridge) should be 0x01 or 0xb0. * - * 0x41/0x42/0x43/0x44: PCI INT A/B/C/D - bits as in general case + * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1. + * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets. + * They seem to work with the current routing code. However there is + * some concern because of the two USB-OHCI HCs (original SiS 5595 + * had only one). YMMV. * - * 0x61: IDEIRQ: bits as in general case - but: - * bits [6:5] must be written 01 - * bit 4 channel-select primary (0), secondary (1) + * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1: * - * 0x62: USBIRQ: bits as in general case - but: - * bit 4 OHCI function disabled (0), enabled (1) + * 0x61: IDEIRQ: + * bits [6:5] must be written 01 + * bit 4 channel-select primary (0), secondary (1) + * + * 0x62: USBIRQ: + * bit 6 OHCI function disabled (0), enabled (1) * - * 0x6a: ACPI/SCI IRQ - bits as in general case + * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved + * + * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved + * + * We support USBIRQ (in addition to INTA-INTD) and keep the + * IDE, ACPI and DAQ routing untouched as set by the BIOS. + * + * Currently the only reported exception is the new SiS 65x chipset + * which includes the SiS 69x southbridge. Here we have the 85C503 + * router revision 0x04 and there are changes in the register layout + * mostly related to the different USB HCs with USB 2.0 support. * - * 0x7e: Data Acq. Module IRQ - bits as in general case + * Onchip routing for router rev-id 0x04 (try-and-error observation) * - * Apparently there are systems implementing PCI routing table using both - * link values 0x01-0x04 and 0x41-0x44 for PCI INTA..D, but register offsets - * like 0x62 as link values for USBIRQ e.g. So there is no simple - * "register = offset + pirq" relation. - * Currently we support PCI INTA..D and USBIRQ and try our best to handle - * both link mappings. - * IDE/ACPI/DAQ mapping is currently unsupported (left untouched as set by BIOS). + * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs + * bit 6-4 are probably unused, not like 5595 */ -static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +#define PIRQ_SIS_IRQ_MASK 0x0f +#define PIRQ_SIS_IRQ_DISABLE 0x80 +#define PIRQ_SIS_USB_ENABLE 0x40 +#define PIRQ_SIS_DETECT_REGISTER 0x40 + +/* return value: + * -1 on error + * 0 for PCI INTA-INTD + * 0 or enable bit mask to check or set for onchip functions + */ +static inline int pirq_sis5595_onchip(int pirq, int *reg) { - u8 x; - int reg = pirq; + int ret = -1; + *reg = pirq; switch(pirq) { - case 0x01: - case 0x02: - case 0x03: - case 0x04: - reg += 0x40; - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x62: - pci_read_config_byte(router, reg, &x); - if (reg != 0x62) - break; - if (!(x & 0x40)) - return 0; - break; - case 0x61: - case 0x6a: - case 0x7e: - printk(KERN_INFO "SiS pirq: advanced IDE/ACPI/DAQ mapping not yet implemented\n"); - return 0; - default: - printk(KERN_INFO "SiS router pirq escape (%d)\n", pirq); - return 0; + case 0x01: + case 0x02: + case 0x03: + case 0x04: + *reg += 0x40; + case 0x41: + case 0x42: + case 0x43: + case 0x44: + ret = 0; + break; + + case 0x62: + ret = PIRQ_SIS_USB_ENABLE; /* documented for 5595 */ + break; + + case 0x61: + case 0x6a: + case 0x7e: + printk(KERN_INFO "SiS pirq: IDE/ACPI/DAQ mapping not implemented: (%u)\n", + (unsigned) pirq); + /* fall thru */ + default: + printk(KERN_INFO "SiS router unknown request: (%u)\n", + (unsigned) pirq); + break; + } + return ret; +} + +/* return value: + * -1 on error + * 0 for PCI INTA-INTD + * 0 or enable bit mask to check or set for onchip functions + */ +static inline int pirq_sis96x_onchip(int pirq, int *reg) +{ + int ret = -1; + + *reg = pirq; + switch(pirq) { + case 0x01: + case 0x02: + case 0x03: + case 0x04: + *reg += 0x40; + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + ret = 0; + break; + + default: + printk(KERN_INFO "SiS router unknown request: (%u)\n", + (unsigned) pirq); + break; } - return (x & 0x80) ? 0 : (x & 0x0f); + return ret; +} + + +static int pirq_sis5595_get(struct pci_dev *router, struct pci_dev *dev, int pirq) +{ + u8 x; + int reg, check; + + check = pirq_sis5595_onchip(pirq, ®); + if (check < 0) + return 0; + + pci_read_config_byte(router, reg, &x); + if (check != 0 && !(x & check)) + return 0; + + return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); } -static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +static int pirq_sis96x_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; - int reg = pirq; + int reg, check; + + check = pirq_sis96x_onchip(pirq, ®); + if (check < 0) + return 0; + + pci_read_config_byte(router, reg, &x); + if (check != 0 && !(x & check)) + return 0; + + return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); +} + +static int pirq_sis5595_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + u8 x; + int reg, set; + + set = pirq_sis5595_onchip(pirq, ®); + if (set < 0) + return 0; + + x = (irq & PIRQ_SIS_IRQ_MASK); + if (x == 0) + x = PIRQ_SIS_IRQ_DISABLE; + else + x |= set; - switch(pirq) { - case 0x01: - case 0x02: - case 0x03: - case 0x04: - reg += 0x40; - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x62: - x = (irq&0x0f) ? (irq&0x0f) : 0x80; - if (reg != 0x62) - break; - /* always mark OHCI enabled, as nothing else knows about this */ - x |= 0x40; - break; - case 0x61: - case 0x6a: - case 0x7e: - printk(KERN_INFO "advanced SiS pirq mapping not yet implemented\n"); - return 0; - default: - printk(KERN_INFO "SiS router pirq escape (%d)\n", pirq); - return 0; - } pci_write_config_byte(router, reg, x); return 1; } +static int pirq_sis96x_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) +{ + u8 x; + int reg, set; + + set = pirq_sis96x_onchip(pirq, ®); + if (set < 0) + return 0; + + x = (irq & PIRQ_SIS_IRQ_MASK); + if (x == 0) + x = PIRQ_SIS_IRQ_DISABLE; + else + x |= set; + + pci_write_config_byte(router, reg, x); + + return 1; +} + + /* * VLSI: nibble offset 0x74 - educated guess due to routing table and * config space of VLSI 82C534 PCI-bridge/router (1004:0102) @@ -454,96 +568,263 @@ return pcibios_set_irq_routing(bridge, pin, irq); } -static struct irq_router pirq_bios_router = - { "BIOS", 0, 0, NULL, pirq_bios_set }; - #endif -static struct irq_router pirq_routers[] = { - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, pirq_piix_get, pirq_piix_set }, - { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_0, pirq_piix_get, pirq_piix_set }, - - { "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set }, - - { "ITE", PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_IT8330G_0, pirq_ite_get, pirq_ite_set }, - - { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set }, - { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set }, - { "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, pirq_via_get, pirq_via_set }, - - { "OPTI", PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C700, pirq_opti_get, pirq_opti_set }, - - { "NatSemi", PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, pirq_cyrix_get, pirq_cyrix_set }, - { "SIS", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, pirq_sis_get, pirq_sis_set }, - { "VLSI 82C534", PCI_VENDOR_ID_VLSI, PCI_DEVICE_ID_VLSI_82C534, pirq_vlsi_get, pirq_vlsi_set }, - { "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4, - pirq_serverworks_get, pirq_serverworks_set }, - { "ServerWorks", PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5, - pirq_serverworks_get, pirq_serverworks_set }, - { "AMD756 VIPER", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B, - pirq_amd756_get, pirq_amd756_set }, - { "AMD766", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413, - pirq_amd756_get, pirq_amd756_set }, - { "AMD768", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7443, - pirq_amd756_get, pirq_amd756_set }, - { "default", 0, 0, NULL, NULL } -}; +static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + /* We must not touch 440GX even if we have tables. 440GX has + different IRQ routing weirdness */ + if(pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82450GX, NULL)) + return 0; + switch(device) + { + case PCI_DEVICE_ID_INTEL_82371FB_0: + case PCI_DEVICE_ID_INTEL_82371SB_0: + case PCI_DEVICE_ID_INTEL_82371AB_0: + case PCI_DEVICE_ID_INTEL_82371MX: + case PCI_DEVICE_ID_INTEL_82443MX_0: + case PCI_DEVICE_ID_INTEL_82801AA_0: + case PCI_DEVICE_ID_INTEL_82801AB_0: + case PCI_DEVICE_ID_INTEL_82801BA_0: + case PCI_DEVICE_ID_INTEL_82801BA_10: + case PCI_DEVICE_ID_INTEL_82801CA_0: + case PCI_DEVICE_ID_INTEL_82801CA_12: + case PCI_DEVICE_ID_INTEL_82801DB_0: + case PCI_DEVICE_ID_INTEL_82801E_0: + case PCI_DEVICE_ID_INTEL_82801EB_0: + case PCI_DEVICE_ID_INTEL_ESB_0: + r->name = "PIIX/ICH"; + r->get = pirq_piix_get; + r->set = pirq_piix_set; + return 1; + } + return 0; +} -static struct irq_router *pirq_router; +static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + /* FIXME: We should move some of the quirk fixup stuff here */ + switch(device) + { + case PCI_DEVICE_ID_VIA_82C586_0: + case PCI_DEVICE_ID_VIA_82C596: + case PCI_DEVICE_ID_VIA_82C686: + case PCI_DEVICE_ID_VIA_8231: + /* FIXME: add new ones for 8233/5 */ + r->name = "VIA"; + r->get = pirq_via_get; + r->set = pirq_via_set; + return 1; + } + return 0; +} + +static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_VLSI_82C534: + r->name = "VLSI 82C534"; + r->get = pirq_vlsi_get; + r->set = pirq_vlsi_set; + return 1; + } + return 0; +} + + +static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_SERVERWORKS_OSB4: + case PCI_DEVICE_ID_SERVERWORKS_CSB5: + r->name = "ServerWorks"; + r->get = pirq_serverworks_get; + r->set = pirq_serverworks_set; + return 1; + } + return 0; +} + +static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + u8 reg; + u16 devid; + + if (device != PCI_DEVICE_ID_SI_503) + return 0; + + /* + * In case of SiS south bridge, we need to detect the two + * kinds of routing tables we have seen so far (5595 and 96x). + * Since the maintain the same device ID, we need to do poke + * the PCI configuration space to find the router type we are + * dealing with. + */ + + /* + * Factoid: writing bit6 of register 0x40 of the router config space + * will make the SB to show up 0x096x inside the device id. Note, + * we need to restore register 0x40 after the device id poke. + */ + + pci_read_config_byte(router, PIRQ_SIS_DETECT_REGISTER, ®); + pci_write_config_byte(router, PIRQ_SIS_DETECT_REGISTER, reg | (1 << 6)); + pci_read_config_word(router, PCI_DEVICE_ID, &devid); + pci_write_config_byte(router, PIRQ_SIS_DETECT_REGISTER, reg); + + if ((devid & 0xfff0) == 0x0960) { + r->name = "SIS96x"; + r->get = pirq_sis96x_get; + r->set = pirq_sis96x_set; + DBG("PCI: Detecting SiS router at %02x:%02x : SiS096x detected\n", + rt->rtr_bus, rt->rtr_devfn); + } else { + r->name = "SIS5595"; + r->get = pirq_sis5595_get; + r->set = pirq_sis5595_set; + DBG("PCI: Detecting SiS router at %02x:%02x : SiS5595 detected\n", + rt->rtr_bus, rt->rtr_devfn); + } + return 1; +} + +static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_CYRIX_5520: + r->name = "NatSemi"; + r->get = pirq_cyrix_get; + r->set = pirq_cyrix_set; + return 1; + } + return 0; +} + +static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_OPTI_82C700: + r->name = "OPTI"; + r->get = pirq_opti_get; + r->set = pirq_opti_set; + return 1; + } + return 0; +} + +static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_ITE_IT8330G_0: + r->name = "ITE"; + r->get = pirq_ite_get; + r->set = pirq_ite_set; + return 1; + } + return 0; +} + +static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_AL_M1533: + r->name = "ALI"; + r->get = pirq_ali_get; + r->set = pirq_ali_set; + return 1; + /* Should add 156x some day */ + } + return 0; +} + +static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) +{ + switch(device) + { + case PCI_DEVICE_ID_AMD_VIPER_740B: + r->name = "AMD756"; + break; + case PCI_DEVICE_ID_AMD_VIPER_7413: + r->name = "AMD766"; + break; + case PCI_DEVICE_ID_AMD_VIPER_7443: + r->name = "AMD768"; + break; + default: + return 0; + } + r->get = pirq_amd756_get; + r->set = pirq_amd756_set; + return 1; +} + +static __initdata struct irq_router_handler pirq_routers[] = { + { PCI_VENDOR_ID_INTEL, intel_router_probe }, + { PCI_VENDOR_ID_AL, ali_router_probe }, + { PCI_VENDOR_ID_ITE, ite_router_probe }, + { PCI_VENDOR_ID_VIA, via_router_probe }, + { PCI_VENDOR_ID_OPTI, opti_router_probe }, + { PCI_VENDOR_ID_SI, sis_router_probe }, + { PCI_VENDOR_ID_CYRIX, cyrix_router_probe }, + { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, + { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, + { PCI_VENDOR_ID_AMD, amd_router_probe }, + /* Someone with docs needs to add the ATI Radeon IGP */ + { 0, NULL } +}; +static struct irq_router pirq_router; static struct pci_dev *pirq_router_dev; -static void __init pirq_find_router(void) +/* + * FIXME: should we have an option to say "generic for + * chipset" ? + */ + +static void __init pirq_find_router(struct irq_router *r) { struct irq_routing_table *rt = pirq_table; - struct irq_router *r; + struct irq_router_handler *h; #ifdef CONFIG_PCI_BIOS if (!rt->signature) { printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n"); - pirq_router = &pirq_bios_router; + r->set = pirq_bios_set; + r->name = "BIOS"; return; } #endif + /* Default unless a driver reloads it */ + r->name = "default"; + r->get = NULL; + r->set = NULL; + DBG("PCI: Attempting to find IRQ router for %04x:%04x\n", rt->rtr_vendor, rt->rtr_device); - /* fall back to default router if nothing else found */ - pirq_router = &pirq_routers[ARRAY_SIZE(pirq_routers) - 1]; - pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn); if (!pirq_router_dev) { DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); return; } - for(r=pirq_routers; r->vendor; r++) { - /* Exact match against router table entry? Use it! */ - if (r->vendor == rt->rtr_vendor && r->device == rt->rtr_device) { - pirq_router = r; + for( h = pirq_routers; h->vendor; h++) { + /* First look for a router match */ + if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) + break; + /* Fall back to a device match */ + if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) break; - } - /* Match against router device entry? Use it as a fallback */ - if (r->vendor == pirq_router_dev->vendor && r->device == pirq_router_dev->device) { - pirq_router = r; - } } printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", - pirq_router->name, + pirq_router.name, pirq_router_dev->vendor, pirq_router_dev->device, pirq_router_dev->slot_name); @@ -572,7 +853,7 @@ int i, pirq, newirq; int irq = 0; u32 mask; - struct irq_router *r = pirq_router; + struct irq_router *r = &pirq_router; struct pci_dev *dev2; char *msg = NULL; @@ -685,17 +966,14 @@ void __init pcibios_irq_init(void) { DBG("PCI: IRQ init\n"); - if (broken_440gx_bios) - pirq_table = NULL; - else - pirq_table = pirq_find_routing_table(); + pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) pirq_table = pcibios_get_irq_routing_table(); #endif if (pirq_table) { pirq_peer_trick(); - pirq_find_router(); + pirq_find_router(&pirq_router); if (pirq_table->exclusive_irqs) { int i; for (i=0; i<16; i++) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/powernow-k6.c linux.22-ac2/arch/i386/kernel/powernow-k6.c --- linux.vanilla/arch/i386/kernel/powernow-k6.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/powernow-k6.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,234 @@ +/* + * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) + * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long + as it is unused */ + +static unsigned int busfreq; /* FSB, in 10 kHz */ +static unsigned int max_multiplier; + + +/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ +static struct cpufreq_frequency_table clock_ratio[] = { + {45, /* 000 -> 4.5x */ 0}, + {50, /* 001 -> 5.0x */ 0}, + {40, /* 010 -> 4.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, + {20, /* 100 -> 2.0x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {35, /* 111 -> 3.5x */ 0}, + {0, CPUFREQ_TABLE_END} +}; + + +/** + * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier + * + * Returns the current setting of the frequency multiplier. Core clock + * speed is frequency of the Front-Side Bus multiplied with this value. + */ +static int powernow_k6_get_cpu_multiplier(void) +{ + u64 invalue = 0; + u32 msrval; + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + return clock_ratio[(invalue >> 5)&7].index; +} + + +/** + * powernow_k6_set_state - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier + * + * Tries to change the PowerNow! multiplier + */ +static void powernow_k6_set_state (unsigned int best_i) +{ + unsigned long outvalue=0, invalue=0; + unsigned long msrval; + struct cpufreq_freqs freqs; + + if (clock_ratio[best_i].index > max_multiplier) { + printk(KERN_ERR "cpufreq: invalid target frequency\n"); + return; + } + + freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); + freqs.new = busfreq * clock_ratio[best_i].index; + freqs.cpu = 0; /* powernow-k6.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0xf; + outvalue = outvalue | invalue; + outl(outvalue ,(POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * powernow_k6_verify - verifies a new CPUfreq policy + * @policy: new policy + * + * Policy must be within lowest and highest possible CPU Frequency, + * and at least one possible state must be within min and max. + */ +static int powernow_k6_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); +} + + +/** + * powernow_k6_setpolicy - sets a new CPUFreq policy + * @policy - new policy + * + * sets a new CPUFreq policy + */ +static int powernow_k6_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) + return -EINVAL; + + powernow_k6_set_state(newstate); + + return 0; +} + + +static int powernow_k6_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + + if (policy->cpu != 0) + return -ENODEV; + + /* get frequencies */ + max_multiplier = powernow_k6_get_cpu_multiplier(); + busfreq = cpu_khz / max_multiplier; + + /* table init */ + for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].index > max_multiplier) + clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + else + clock_ratio[i].frequency = busfreq * clock_ratio[i].index; + } + + /* cpuinfo and default policy values */ + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = busfreq * max_multiplier; + + return cpufreq_frequency_table_cpuinfo(policy, &clock_ratio[0]); +} + + +static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) +{ + unsigned int i; + for (i=0; i<8; i++) { + if (i==max_multiplier) + powernow_k6_set_state(i); + } + return 0; +} + + +static struct cpufreq_driver powernow_k6_driver = { + .verify = powernow_k6_verify, + .target = powernow_k6_target, + .init = powernow_k6_cpu_init, + .exit = powernow_k6_cpu_exit, + .name = "powernow-k6", +}; + + +/** + * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver + * + * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported + * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero + * on success. + */ +static int __init powernow_k6_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || + ((c->x86_model != 12) && (c->x86_model != 13))) + return -ENODEV; + + if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { + printk("cpufreq: PowerNow IOPORT region already used.\n"); + return -EIO; + } + + if (cpufreq_register_driver(&powernow_k6_driver)) { + release_region (POWERNOW_IOPORT, 16); + return -EINVAL; + } + + return 0; +} + + +/** + * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support + * + * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. + */ +static void __exit powernow_k6_exit(void) +{ + cpufreq_unregister_driver(&powernow_k6_driver); + release_region (POWERNOW_IOPORT, 16); +} + + +MODULE_AUTHOR ("Arjan van de Ven , Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); +MODULE_LICENSE ("GPL"); + +module_init(powernow_k6_init); +module_exit(powernow_k6_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/powernow-k7.c linux.22-ac2/arch/i386/kernel/powernow-k7.c --- linux.vanilla/arch/i386/kernel/powernow-k7.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/powernow-k7.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,410 @@ +/* + * AMD K7 Powernow driver. + * (C) 2003 Dave Jones on behalf of SuSE Labs. + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by AMD. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + * + * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. + * - We cli/sti on stepping A0 CPUs around the FID/VID transition. + * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. + * - We disable half multipliers if ACPI is used on A0 stepping CPUs. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "powernow-k7.h" + +#define DEBUG + +#ifdef DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +#define PFX "powernow: " + + +struct psb_s { + u8 signature[10]; + u8 tableversion; + u8 flags; + u16 settlingtime; + u8 reserved1; + u8 numpst; +}; + +struct pst_s { + u32 cpuid; + u8 fsbspeed; + u8 maxfid; + u8 startvid; + u8 numpstates; +}; + + +/* divide by 1000 to get VID. */ +static int mobile_vid_table[32] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1024, 1000, 975, 950, 925, 0, +}; + +/* divide by 10 to get FID. */ +static int fid_codes[32] = { + 110, 115, 120, 125, 50, 55, 60, 65, + 70, 75, 80, 85, 90, 95, 100, 105, + 30, 190, 40, 200, 130, 135, 140, 210, + 150, 225, 160, 165, 170, 180, -1, -1, +}; + +static struct cpufreq_frequency_table *powernow_table; + +static unsigned int can_scale_bus; +static unsigned int can_scale_vid; +static unsigned int minimum_speed=-1; +static unsigned int maximum_speed; +static unsigned int number_scales; +static unsigned int fsb; +static unsigned int latency; +static char have_a0; + + +static int check_powernow(void) +{ + struct cpuinfo_x86 *c = cpu_data; + unsigned int maxei, eax, ebx, ecx, edx; + + if (c->x86_vendor != X86_VENDOR_AMD) { + printk (KERN_INFO PFX "AMD processor not detected.\n"); + return 0; + } + + if (c->x86 !=6) { + printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); + return 0; + } + + printk (KERN_INFO PFX "AMD K7 CPU detected.\n"); + + if ((c->x86_model == 6) && (c->x86_mask == 0)) { + printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); + have_a0 = 1; + } + + /* Get maximum capabilities */ + maxei = cpuid_eax (0x80000000); + if (maxei < 0x80000007) { /* Any powernow info ? */ + printk (KERN_INFO PFX "No powernow capabilities detected\n"); + return 0; + } + + cpuid(0x80000007, &eax, &ebx, &ecx, &edx); + printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); + + if (edx & 1 << 1) { + printk ("frequency"); + can_scale_bus=1; + } + + if ((edx & (1 << 1 | 1 << 2)) == 0x6) + printk (" and "); + + if (edx & 1 << 2) { + printk ("voltage"); + can_scale_vid=1; + } + + if (!(edx & (1 << 1 | 1 << 2))) { + printk ("nothing.\n"); + return 0; + } + + printk (".\n"); + return 1; +} + + +static int get_ranges (unsigned char *pst) +{ + unsigned int j, speed; + u8 fid, vid; + + powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); + if (!powernow_table) + return -ENOMEM; + memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1))); + + for (j=0 ; j < number_scales; j++) { + fid = *pst++; + + powernow_table[j].frequency = fsb * fid_codes[fid] * 100; + powernow_table[j].index = fid; /* lower 8 bits */ + + speed = fsb * (fid_codes[fid]/10); + if ((fid_codes[fid] % 10)==5) { + speed += fsb/2; +#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) + if (have_a0 == 1) + powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; +#endif + } + + dprintk (KERN_INFO PFX " FID: 0x%x (%d.%dx [%dMHz])\t", fid, + fid_codes[fid] / 10, fid_codes[fid] % 10, speed); + + if (speed < minimum_speed) + minimum_speed = speed; + if (speed > maximum_speed) + maximum_speed = speed; + + vid = *pst++; + powernow_table[j].index |= (vid << 8); /* upper 8 bits */ + dprintk ("VID: 0x%x (%d.%03dV)\n", vid, mobile_vid_table[vid]/1000, + mobile_vid_table[vid]%1000); + } + dprintk ("\n"); + + powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; + powernow_table[number_scales].index = 0; + + return 0; +} + + +static void change_FID(int fid) +{ + union msr_fidvidctl fidvidctl; + + if (fidvidctl.bits.FID != fid) { + rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + fidvidctl.bits.SGTC = latency; + fidvidctl.bits.FID = fid; + fidvidctl.bits.FIDC = 1; + wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + } +} + + +static void change_VID(int vid) +{ + union msr_fidvidctl fidvidctl; + + if (fidvidctl.bits.VID != vid) { + rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + fidvidctl.bits.VID = vid; + fidvidctl.bits.VIDC = 1; + wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + } +} + + +static void change_speed (unsigned int index) +{ + u8 fid, vid; + struct cpufreq_freqs freqs; + union msr_fidvidstatus fidvidstatus; + int cfid; + + /* fid are the lower 8 bits of the index we stored into + * the cpufreq frequency table in powernow_decode_bios, + * vid are the upper 8 bits. + */ + + fid = powernow_table[index].index & 0xFF; + vid = (powernow_table[index].index & 0xFF00) >> 8; + + freqs.cpu = 0; + + rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + cfid = fidvidstatus.bits.CFID; + freqs.old = fsb * fid_codes[cfid] * 100; + freqs.new = powernow_table[index].frequency; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* Now do the magic poking into the MSRs. */ + + if (have_a0 == 1) /* A0 errata 5 */ + local_irq_disable(); + + if (freqs.old > freqs.new) { + /* Going down, so change FID first */ + change_FID(fid); + change_VID(vid); + } else { + /* Going up, so change VID first */ + change_VID(vid); + change_FID(fid); + } + + + if (have_a0 == 1) + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +} + + +static int powernow_decode_bios (int maxfid, int startvid) +{ + struct psb_s *psb; + struct pst_s *pst; + struct cpuinfo_x86 *c = cpu_data; + unsigned int i, j; + unsigned char *p; + unsigned int etuple; + unsigned int ret; + + etuple = cpuid_eax(0x80000001); + etuple &= 0xf00; + etuple |= (c->x86_model<<4)|(c->x86_mask); + + for (i=0xC0000; i < 0xffff0 ; i+=16) { + + p = phys_to_virt(i); + + if (memcmp(p, "AMDK7PNOW!", 10) == 0){ + dprintk (KERN_INFO PFX "Found PSB header at %p\n", p); + psb = (struct psb_s *) p; + dprintk (KERN_INFO PFX "Table version: 0x%x\n", psb->tableversion); + if (psb->tableversion != 0x12) { + printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n"); + return -ENODEV; + } + + dprintk (KERN_INFO PFX "Flags: 0x%x (", psb->flags); + if ((psb->flags & 1)==0) { + dprintk ("Mobile"); + } else { + dprintk ("Desktop"); + } + dprintk (" voltage regulator)\n"); + + latency = psb->settlingtime; + dprintk (KERN_INFO PFX "Settling Time: %d microseconds.\n", psb->settlingtime); + dprintk (KERN_INFO PFX "Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst); + + p += sizeof (struct psb_s); + + pst = (struct pst_s *) p; + + for (i = 0 ; i numpst; i++) { + pst = (struct pst_s *) p; + number_scales = pst->numpstates; + + if ((etuple == pst->cpuid) && (maxfid==pst->maxfid) && (startvid==pst->startvid)) + { + dprintk (KERN_INFO PFX "PST:%d (@%p)\n", i, pst); + dprintk (KERN_INFO PFX " cpuid: 0x%x\t", pst->cpuid); + dprintk ("fsb: %d\t", pst->fsbspeed); + dprintk ("maxFID: 0x%x\t", pst->maxfid); + dprintk ("startvid: 0x%x\n", pst->startvid); + + fsb = pst->fsbspeed; + ret = get_ranges ((char *) pst + sizeof (struct pst_s)); + return ret; + + } else { + p = (char *) pst + sizeof (struct pst_s); + for (j=0 ; j < number_scales; j++) + p+=2; + } + } + return -EINVAL; + } + p++; + } + + return -ENODEV; +} + + +static int powernow_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate; + + if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate)) + return -EINVAL; + + change_speed(newstate); + + return 0; +} + + +static int powernow_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, powernow_table); +} + + +static int __init powernow_cpu_init (struct cpufreq_policy *policy) +{ + union msr_fidvidstatus fidvidstatus; + int result; + + if (policy->cpu != 0) + return -ENODEV; + + rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + + result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); + if (result) + return result; + + printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", + minimum_speed, maximum_speed); + + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = latency; + policy->cur = maximum_speed; + + return cpufreq_frequency_table_cpuinfo(policy, powernow_table); +} + +static struct cpufreq_driver powernow_driver = { + .verify = powernow_verify, + .target = powernow_target, + .init = powernow_cpu_init, + .name = "powernow-k7", +}; + +static int __init powernow_init (void) +{ + if (check_powernow()==0) + return -ENODEV; + return cpufreq_register_driver(&powernow_driver); +} + + +static void __exit powernow_exit (void) +{ + cpufreq_unregister_driver(&powernow_driver); + if (powernow_table) + kfree(powernow_table); +} + +MODULE_AUTHOR ("Dave Jones "); +MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); +MODULE_LICENSE ("GPL"); + +module_init(powernow_init); +module_exit(powernow_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/powernow-k7.h linux.22-ac2/arch/i386/kernel/powernow-k7.h --- linux.vanilla/arch/i386/kernel/powernow-k7.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/powernow-k7.h 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,44 @@ +/* + * $Id: powernow-k7.h,v 1.2 2003/02/10 18:26:01 davej Exp $ + * (C) 2003 Dave Jones. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * AMD-specific information + * + */ + +union msr_fidvidctl { + struct { + unsigned FID:5, // 4:0 + reserved1:3, // 7:5 + VID:5, // 12:8 + reserved2:3, // 15:13 + FIDC:1, // 16 + VIDC:1, // 17 + reserved3:2, // 19:18 + FIDCHGRATIO:1, // 20 + reserved4:11, // 31-21 + SGTC:20, // 32:51 + reserved5:12; // 63:52 + } bits; + unsigned long long val; +}; + +union msr_fidvidstatus { + struct { + unsigned CFID:5, // 4:0 + reserved1:3, // 7:5 + SFID:5, // 12:8 + reserved2:3, // 15:13 + MFID:5, // 20:16 + reserved3:11, // 31:21 + CVID:5, // 36:32 + reserved4:3, // 39:37 + SVID:5, // 44:40 + reserved5:3, // 47:45 + MVID:5, // 52:48 + reserved6:11; // 63:53 + } bits; + unsigned long long val; +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/process.c linux.22-ac2/arch/i386/kernel/process.c --- linux.vanilla/arch/i386/kernel/process.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/process.c 2003-06-29 16:10:34.000000000 +0100 @@ -124,15 +124,12 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { void (*idle)(void) = pm_idle; if (!idle) idle = default_idle; - while (!current->need_resched) + if (!current->need_resched) idle(); schedule(); check_pgt_cache(); @@ -187,7 +184,7 @@ } /* we will leave sorting out the final value when we are ready to reboot, since we might not - have set up boot_cpu_id or smp_num_cpu */ + have set up boot_cpu_physical_apicid or smp_num_cpu */ break; #endif } @@ -466,23 +463,6 @@ } /* - * No need to lock the MM as we are the last user - */ -void release_segments(struct mm_struct *mm) -{ - void * ldt = mm->context.segments; - - /* - * free the LDT - */ - if (ldt) { - mm->context.segments = NULL; - clear_LDT(); - vfree(ldt); - } -} - -/* * Create a kernel thread */ int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) @@ -535,45 +515,19 @@ void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - void * ldt = dead_task->mm->context.segments; - // temporary debugging check - if (ldt) { - printk("WARNING: dead process %8s still has LDT? <%p>\n", - dead_task->comm, ldt); + if (dead_task->mm->context.size) { + printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", + dead_task->comm, + dead_task->mm->context.ldt, + dead_task->mm->context.size); BUG(); } } - release_x86_irqs(dead_task); } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -void copy_segments(struct task_struct *p, struct mm_struct *new_mm) -{ - struct mm_struct * old_mm; - void *old_ldt, *ldt; - - ldt = NULL; - old_mm = current->mm; - if (old_mm && (old_ldt = old_mm->context.segments) != NULL) { - /* - * Completely new LDT, we initialize it from the parent: - */ - ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - if (!ldt) - printk(KERN_WARNING "ldt allocation failed\n"); - else - memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); - } - new_mm->context.segments = ldt; - new_mm->context.cpuvalid = ~0UL; /* valid on all CPU's - they can't have stale data */ -} - -/* * Save a segment. */ #define savesegment(seg,value) \ @@ -698,15 +652,17 @@ asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); /* - * Restore %fs and %gs. + * Restore %fs and %gs if needed. */ - loadsegment(fs, next->fs); - loadsegment(gs, next->gs); + if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { + loadsegment(fs, next->fs); + loadsegment(gs, next->gs); + } /* * Now maybe reload the debug registers */ - if (next->debugreg[7]){ + if (unlikely(next->debugreg[7])) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); @@ -716,7 +672,7 @@ loaddebug(next, 7); } - if (prev->ioperm || next->ioperm) { + if (unlikely(prev->ioperm || next->ioperm)) { if (next->ioperm) { /* * 4 cachelines copy ... not good, but not that diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/setup.c linux.22-ac2/arch/i386/kernel/setup.c --- linux.vanilla/arch/i386/kernel/setup.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/setup.c 2003-08-28 16:49:32.000000000 +0100 @@ -120,6 +120,7 @@ #include #include #include +#include /* * Machine setup.. */ @@ -211,6 +212,8 @@ #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) +#define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) +#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #define COMMAND_LINE ((char *) (PARAM+2048)) #define COMMAND_LINE_SIZE 256 @@ -218,6 +221,7 @@ #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 + #ifdef CONFIG_VISWS char visws_board_type = -1; char visws_board_rev = -1; @@ -715,6 +719,23 @@ return 0; } +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +unsigned char eddnr; +struct edd_info edd[EDDMAXNR]; +/** + * copy_edd() - Copy the BIOS EDD information + * from empty_zero_page into a safe place. + * + */ +static inline void copy_edd(void) +{ + eddnr = EDD_NR; + memcpy(edd, EDD_BUF, sizeof(edd)); +} +#else +#define copy_edd() do {} while (0) +#endif + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. @@ -1151,6 +1172,7 @@ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_memory_region(); + copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; @@ -1976,6 +1998,37 @@ #endif +static void __init init_c3(struct cpuinfo_x86 *c) +{ + u32 lo, hi; + + /* Test for Centaur Extended Feature Flags presence */ + if (cpuid_eax(0xC0000000) >= 0xC0000001) { + /* store Centaur Extended Feature Flags as + * word 5 of the CPU capability bit array + */ + c->x86_capability[5] = cpuid_edx(0xC0000001); + } + + switch (c->x86_model) { + case 6 ... 8: /* Cyrix III family */ + rdmsr (MSR_VIA_FCR, lo, hi); + lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */ + wrmsr (MSR_VIA_FCR, lo, hi); + + set_bit(X86_FEATURE_CX8, c->x86_capability); + set_bit(X86_FEATURE_3DNOW, c->x86_capability); + + /* fall through */ + + case 9: /* Nehemiah */ + default: + get_model_name(c); + display_cacheinfo(c); + break; + } +} + static void __init init_centaur(struct cpuinfo_x86 *c) { enum { @@ -2114,23 +2167,7 @@ break; case 6: - switch (c->x86_model) { - case 6 ... 8: /* Cyrix III family */ - rdmsr (MSR_VIA_FCR, lo, hi); - lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */ - wrmsr (MSR_VIA_FCR, lo, hi); - - set_bit(X86_FEATURE_CX8, &c->x86_capability); - set_bit(X86_FEATURE_3DNOW, &c->x86_capability); - - /* fall through */ - - case 9: /* Nehemiah */ - default: - get_model_name(c); - display_cacheinfo(c); - break; - } + init_c3(c); break; } } @@ -2765,10 +2802,16 @@ /* Intel-defined flags: level 0x00000001 */ if ( c->cpuid_level >= 0x00000001 ) { - cpuid(0x00000001, &tfms, &junk, &junk, - &c->x86_capability[0]); + u32 capability, excap; + cpuid(0x00000001, &tfms, &junk, &excap, &capability); + c->x86_capability[0] = capability; + c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; + if (c->x86 == 0xf) { + c->x86 += (tfms >> 20) & 0xff; + c->x86_model += ((tfms >> 16) & 0xF) << 4; + } c->x86_mask = tfms & 15; } else { /* Have CPUID level 0 only - unheard of */ @@ -2967,17 +3010,18 @@ * applications want to get the raw CPUID data, they should access * /dev/cpu//cpuid instead. */ + extern int phys_proc_id[NR_CPUS]; static char *x86_cap_flags[] = { /* Intel-defined */ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", - "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL, + "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", /* AMD-defined */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL, + NULL, NULL, NULL, "mp", NULL, NULL, "mmxext", NULL, NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow", /* Transmeta-defined */ @@ -2987,7 +3031,20 @@ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* Other (Linux-defined) */ - "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL, + "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + + /* Intel-defined (#2) */ + "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "tm2", + "est", NULL, "cid", NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + + /* VIA/Cyrix/Centaur-defined */ + NULL, NULL, "xstore", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -3024,6 +3081,11 @@ /* Cache size */ if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + +#ifdef CONFIG_SMP + seq_printf(m, "physical id\t: %d\n",phys_proc_id[n]); + seq_printf(m, "siblings\t: %d\n",smp_num_siblings); +#endif /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu); @@ -3126,11 +3188,12 @@ set_tss_desc(nr,t); gdt_table[__TSS(nr)].b &= 0xfffffdff; load_TR(nr); - load_LDT(&init_mm); + load_LDT(&init_mm.context); - /* - * Clear all 6 debug registers: - */ + /* Clear %fs and %gs. */ + asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); + + /* Clear all 6 debug registers: */ #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/smpboot.c linux.22-ac2/arch/i386/kernel/smpboot.c --- linux.vanilla/arch/i386/kernel/smpboot.c 2003-08-28 16:45:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/smpboot.c 2003-08-28 16:50:35.000000000 +0100 @@ -58,7 +58,7 @@ /* Number of siblings per CPU package */ int smp_num_siblings = 1; -int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ +int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ /* Bitmask of currently online CPUs */ unsigned long cpu_online_map; @@ -365,7 +365,7 @@ * (This works even if the APIC is not enabled.) */ phys_id = GET_APIC_ID(apic_read(APIC_ID)); - cpuid = current->processor; + cpuid = cpu(); if (test_and_set_bit(cpuid, &cpu_online_map)) { printk("huh, phys CPU#%d, CPU#%d already present??\n", phys_id, cpuid); @@ -435,6 +435,7 @@ */ smp_store_cpu_info(cpuid); + disable_APIC_timer(); /* * Allow the master to continue. */ @@ -443,7 +444,7 @@ /* * Synchronize the TSC with the BP */ - if (cpu_has_tsc) + if (cpu_has_tsc > 1) synchronize_tsc_ap(); } @@ -465,6 +466,7 @@ smp_callin(); while (!atomic_read(&smp_commenced)) rep_nop(); + enable_APIC_timer(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. @@ -803,16 +805,13 @@ if (!idle) panic("No idle process for CPU %d", cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); map_cpu_to_boot_apicid(cpu, apicid); idle->thread.eip = (unsigned long) start_secondary; - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); @@ -925,6 +924,7 @@ } cycles_t cacheflush_time; +unsigned long cache_decay_ticks; static void smp_tune_scheduling (void) { @@ -958,9 +958,13 @@ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; } + cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; + printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", (long)cacheflush_time/(cpu_khz/1000), ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); + printk("task migration cache decay timeout: %ld msecs.\n", + (cache_decay_ticks + 1) * 1000 / HZ); } /* @@ -1026,8 +1030,7 @@ map_cpu_to_boot_apicid(0, boot_cpu_apicid); global_irq_holder = 0; - current->processor = 0; - init_idle(); + current->cpu = 0; smp_tune_scheduling(); /* @@ -1219,7 +1222,7 @@ /* * Synchronize the TSC with the AP */ - if (cpu_has_tsc && cpucount) + if (cpu_has_tsc > 1 && cpucount) synchronize_tsc_bp(); smp_done: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/smp.c linux.22-ac2/arch/i386/kernel/smp.c --- linux.vanilla/arch/i386/kernel/smp.c 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/smp.c 2003-06-29 16:10:34.000000000 +0100 @@ -496,13 +496,23 @@ * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ - void smp_send_reschedule(int cpu) { send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR); } /* + * this function sends a reschedule IPI to all (other) CPUs. + * This should only be used if some 'global' task became runnable, + * such as a RT task, that must be handled now. The first CPU + * that manages to grab the task will run it. + */ +void smp_send_reschedule_all(void) +{ + send_IPI_allbutself(RESCHEDULE_VECTOR); +} + +/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ @@ -553,7 +563,7 @@ spin_lock(&call_lock); call_data = &data; - wmb(); + mb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_VECTOR); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/speedstep-centrino.c linux.22-ac2/arch/i386/kernel/speedstep-centrino.c --- linux.vanilla/arch/i386/kernel/speedstep-centrino.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/speedstep-centrino.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,382 @@ +/* + * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium + * M (part of the Centrino chipset). + * + * Despite the "SpeedStep" in the name, this is almost entirely unlike + * traditional SpeedStep. + * + * Modelled on speedstep.c + * + * Copyright (C) 2003 Jeremy Fitzhardinge + * + * WARNING WARNING WARNING + * + * This driver manipulates the PERF_CTL MSR, which is only somewhat + * documented. While it seems to work on my laptop, it has not been + * tested anywhere else, and it may not work for you, do strange + * things or simply crash. + */ + +#include +#include +#include +#include +#include /* BACKPORT: for strcmp */ +#include +#include +#include + +#define PFX "speedstep-centrino: " +#define MAINTAINER "Jeremy Fitzhardinge " + +/*#define CENTRINO_DEBUG*/ + +#ifdef CENTRINO_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +struct cpu_model +{ + const char *model_name; + unsigned max_freq; /* max clock in kHz */ + + struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ +}; + +/* Operating points for current CPU */ +static const struct cpu_model *centrino_model; + +/* Computes the correct form for IA32_PERF_CTL MSR for a particular + frequency/voltage operating point; frequency in MHz, volts in mV. + This is stored as "index" in the structure. */ +#define OP(mhz, mv) \ + { \ + .frequency = (mhz) * 1000, \ + .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ + } + +/* + * These voltage tables were derived from the Intel Pentium M + * datasheet, document 25261202.pdf, Table 5. I have verified they + * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium + * M. + */ + +/* Ultra Low Voltage Intel Pentium M processor 900MHz */ +static struct cpufreq_frequency_table op_900[] = +{ + OP(600, 844), + OP(800, 988), + OP(900, 1004), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Low Voltage Intel Pentium M processor 1.10GHz */ +static struct cpufreq_frequency_table op_1100[] = +{ + OP( 600, 956), + OP( 800, 1020), + OP( 900, 1100), + OP(1000, 1164), + OP(1100, 1180), + { .frequency = CPUFREQ_TABLE_END } +}; + + +/* Low Voltage Intel Pentium M processor 1.20GHz */ +static struct cpufreq_frequency_table op_1200[] = +{ + OP( 600, 956), + OP( 800, 1004), + OP( 900, 1020), + OP(1000, 1100), + OP(1100, 1164), + OP(1200, 1180), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.30GHz */ +static struct cpufreq_frequency_table op_1300[] = +{ + OP( 600, 956), + OP( 800, 1260), + OP(1000, 1292), + OP(1200, 1356), + OP(1300, 1388), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.40GHz */ +static struct cpufreq_frequency_table op_1400[] = +{ + OP( 600, 956), + OP( 800, 1180), + OP(1000, 1308), + OP(1200, 1436), + OP(1400, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.50GHz */ +static struct cpufreq_frequency_table op_1500[] = +{ + OP( 600, 956), + OP( 800, 1116), + OP(1000, 1228), + OP(1200, 1356), + OP(1400, 1452), + OP(1500, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.60GHz */ +static struct cpufreq_frequency_table op_1600[] = +{ + OP( 600, 956), + OP( 800, 1036), + OP(1000, 1164), + OP(1200, 1276), + OP(1400, 1420), + OP(1600, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.70GHz */ +static struct cpufreq_frequency_table op_1700[] = +{ + OP( 600, 956), + OP( 800, 1004), + OP(1000, 1116), + OP(1200, 1228), + OP(1400, 1308), + OP(1700, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; +#undef OP + +#define _CPU(max, name) \ + { "Intel(R) Pentium(R) M processor " name "MHz", (max)*1000, op_##max } +#define CPU(max) _CPU(max, #max) + +/* CPU models, their operating frequency range, and freq/voltage + operating points */ +static const struct cpu_model models[] = +{ + _CPU( 900, " 900"), + CPU(1100), + CPU(1200), + CPU(1300), + CPU(1400), + CPU(1500), + CPU(1600), + CPU(1700), + { 0, } +}; +#undef CPU + +/* Extract clock in kHz from PERF_CTL value */ +static unsigned extract_clock(unsigned msr) +{ + msr = (msr >> 8) & 0xff; + return msr * 100000; +} + +/* Return the current CPU frequency in kHz */ +static unsigned get_cur_freq(void) +{ + unsigned l, h; + + rdmsr(MSR_IA32_PERF_STATUS, l, h); + return extract_clock(l); +} + +static int centrino_cpu_init(struct cpufreq_policy *policy) +{ + unsigned freq; + + if (policy->cpu != 0 || centrino_model == NULL) + return -ENODEV; + + freq = get_cur_freq(); + + policy->policy = (freq == centrino_model->max_freq) ? + CPUFREQ_POLICY_PERFORMANCE : + CPUFREQ_POLICY_POWERSAVE; + policy->cpuinfo.transition_latency = 10; /* 10uS transition latency */ + policy->cur = freq; + + dprintk(KERN_INFO PFX "centrino_cpu_init: policy=%d cur=%dkHz\n", + policy->policy, policy->cur); + + return cpufreq_frequency_table_cpuinfo(policy, centrino_model->op_points); +} + +/** + * centrino_verify - verifies a new CPUFreq policy + * @freq: new policy + * + * Limit must be within this model's frequency range at least one + * border included. + */ +static int centrino_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, centrino_model->op_points); +} + +/** + * centrino_setpolicy - set a new CPUFreq policy + * @policy: new policy + * + * Sets a new CPUFreq policy. + */ +static int centrino_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + unsigned int msr, oldmsr, h; + struct cpufreq_freqs freqs; + + if (centrino_model == NULL) + return -ENODEV; + + if (cpufreq_frequency_table_target(policy, centrino_model->op_points, target_freq, + relation, &newstate)) + return -EINVAL; + + msr = centrino_model->op_points[newstate].index; + rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); + + if (msr == (oldmsr & 0xffff)) + return 0; + + /* Hm, old frequency can either be the last value we put in + PERF_CTL, or whatever it is now. The trouble is that TM2 + can change it behind our back, which means we never get to + see the speed change. Reading back the current speed would + tell us something happened, but it may leave the things on + the notifier chain confused; we therefore stick to using + the last programmed speed rather than the current speed for + "old". + + TODO: work out how the TCC interrupts work, and try to + catch the CPU changing things under us. + */ + freqs.cpu = 0; + freqs.old = extract_clock(oldmsr); + freqs.new = extract_clock(msr); + + dprintk(KERN_INFO PFX "target=%dkHz old=%d new=%d msr=%04x\n", + target_freq, freqs.old, freqs.new, msr); + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* all but 16 LSB are "reserved", so treat them with + care */ + oldmsr &= ~0xffff; + msr &= 0xffff; + oldmsr |= msr; + + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static struct cpufreq_driver centrino_driver = { + .name = "centrino", /* should be speedstep-centrino, + but there's a 16 char limit */ + .init = centrino_cpu_init, + .verify = centrino_verify, + .target = centrino_target, +}; + + +/** + * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver + * + * Initializes the Enhanced SpeedStep support. Returns -ENODEV on + * unsupported devices, -ENOENT if there's no voltage table for this + * particular CPU model, -EINVAL on problems during initiatization, + * and zero on success. + * + * This is quite picky. Not only does the CPU have to advertise the + * "est" flag in the cpuid capability flags, we look for a specific + * CPU model and stepping, and we need to have the exact model name in + * our voltage tables. That is, be paranoid about not releasing + * someone's valuable magic smoke. + */ +static int __init centrino_init(void) +{ + struct cpuinfo_x86 *cpu = cpu_data; + const struct cpu_model *model; + unsigned l, h; + int dummy, ecx; + + /* backport info: we can't use cpu_has here, as cpuid(1) isn't + * stored in 2.4 + */ + cpuid(1,&dummy,&dummy,&ecx,&dummy); + if (!(ecx & (1<<7))) + return -ENODEV; + + /* Only Intel Pentium M stepping 5 for now - add new CPUs as + they appear after making sure they use PERF_CTL in the same + way. */ + if (cpu->x86_vendor != X86_VENDOR_INTEL || + cpu->x86 != 6 || + cpu->x86_model != 9 || + cpu->x86_mask != 5) { + printk(KERN_INFO PFX "found unsupported CPU with Enhanced SpeedStep: " + "send /proc/cpuinfo to " MAINTAINER "\n"); + return -ENODEV; + } + + /* Check to see if Enhanced SpeedStep is enabled, and try to + enable it if not. */ + rdmsr(MSR_IA32_MISC_ENABLE, l, h); + + if (!(l & (1<<16))) { + l |= (1<<16); + wrmsr(MSR_IA32_MISC_ENABLE, l, h); + + /* check to see if it stuck */ + rdmsr(MSR_IA32_MISC_ENABLE, l, h); + if (!(l & (1<<16))) { + printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); + return -ENODEV; + } + } + + for(model = models; model->model_name != NULL; model++) + if (strcmp(cpu->x86_model_id, model->model_name) == 0) + break; + if (model->model_name == NULL) { + printk(KERN_INFO PFX "no support for CPU model \"%s\": " + "send /proc/cpuinfo to " MAINTAINER "\n", + cpu->x86_model_id); + return -ENOENT; + } + + centrino_model = model; + + printk(KERN_INFO PFX "found \"%s\": max frequency: %dkHz\n", + model->model_name, model->max_freq); + + return cpufreq_register_driver(¢rino_driver); +} + +static void __exit centrino_exit(void) +{ + cpufreq_unregister_driver(¢rino_driver); +} + +MODULE_AUTHOR ("Jeremy Fitzhardinge "); +MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors."); +MODULE_LICENSE ("GPL"); + +module_init(centrino_init); +module_exit(centrino_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/speedstep-ich.c linux.22-ac2/arch/i386/kernel/speedstep-ich.c --- linux.vanilla/arch/i386/kernel/speedstep-ich.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/speedstep-ich.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,363 @@ +/* + * (C) 2001 Dave Jones, Arjan van de ven. + * (C) 2002 - 2003 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon reverse engineered information, and on Intel documentation + * for chipsets ICH2-M and ICH3-M. + * + * Many thanks to Ducrot Bruno for finding and fixing the last + * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler + * for extensive testing. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + +/********************************************************************* + * SPEEDSTEP - DEFINITIONS * + *********************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include "speedstep-lib.h" + + +/* speedstep_chipset: + * It is necessary to know which chipset is used. As accesses to + * this device occur at various places in this module, we need a + * static struct pci_dev * pointing to that device. + */ +static struct pci_dev *speedstep_chipset_dev; + + +/* speedstep_processor + */ +static unsigned int speedstep_processor = 0; + + +/* + * There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +static struct cpufreq_frequency_table speedstep_freqs[] = { + {SPEEDSTEP_HIGH, 0}, + {SPEEDSTEP_LOW, 0}, + {0, CPUFREQ_TABLE_END}, +}; + + +/* DEBUG + * Define it if you want verbose debug output, e.g. for bug reporting + */ +//#define SPEEDSTEP_DEBUG + +#ifdef SPEEDSTEP_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + + +/** + * speedstep_set_state - set the SpeedStep state + * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to change the SpeedStep state. + */ +static void speedstep_set_state (unsigned int state, unsigned int notify) +{ + u32 pmbase; + u8 pm2_blk; + u8 value; + unsigned long flags; + struct cpufreq_freqs freqs; + + if (!speedstep_chipset_dev || (state > 0x1)) + return; + + freqs.old = speedstep_get_processor_frequency(speedstep_processor); + freqs.new = speedstep_freqs[state].frequency; + freqs.cpu = 0; /* speedstep.c is UP only driver */ + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) + { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + pmbase &= 0xFFFFFFFE; + if (!pmbase) { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + /* Disable IRQs */ + local_irq_save(flags); + + /* read state */ + value = inb(pmbase + 0x50); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + /* write new state */ + value &= 0xFE; + value |= state; + + dprintk(KERN_DEBUG "cpufreq: writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + + /* Disable bus master arbitration */ + pm2_blk = inb(pmbase + 0x20); + pm2_blk |= 0x01; + outb(pm2_blk, (pmbase + 0x20)); + + /* Actual transition */ + outb(value, (pmbase + 0x50)); + + /* Restore bus master arbitration */ + pm2_blk &= 0xfe; + outb(pm2_blk, (pmbase + 0x20)); + + /* check if transition was successful */ + value = inb(pmbase + 0x50); + + /* Enable IRQs */ + local_irq_restore(flags); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + if (state == (value & 0x1)) { + dprintk (KERN_INFO "cpufreq: change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000)); + } else { + printk (KERN_ERR "cpufreq: change failed - I/O error\n"); + } + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * speedstep_activate - activate SpeedStep control in the chipset + * + * Tries to activate the SpeedStep status and control registers. + * Returns -EINVAL on an unsupported chipset, and zero on success. + */ +static int speedstep_activate (void) +{ + u16 value = 0; + + if (!speedstep_chipset_dev) + return -EINVAL; + + pci_read_config_word(speedstep_chipset_dev, + 0x00A0, &value); + if (!(value & 0x08)) { + value |= 0x08; + dprintk(KERN_DEBUG "cpufreq: activating SpeedStep (TM) registers\n"); + pci_write_config_word(speedstep_chipset_dev, + 0x00A0, value); + } + + return 0; +} + + +/** + * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic + * + * Detects PIIX4, ICH2-M and ICH3-M so far. The pci_dev points to + * the LPC bridge / PM module which contains all power-management + * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected + * chipset, or zero on failure. + */ +static unsigned int speedstep_detect_chipset (void) +{ + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801DB_12, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return 4; /* 4-M */ + + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801CA_12, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return 3; /* 3-M */ + + + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801BA_10, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) { + /* speedstep.c causes lockups on Dell Inspirons 8000 and + * 8100 which use a pretty old revision of the 82815 + * host brige. Abort on these systems. + */ + static struct pci_dev *hostbridge; + u8 rev = 0; + + hostbridge = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82815_MC, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + + if (!hostbridge) + return 2; /* 2-M */ + + pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev); + if (rev < 5) { + dprintk(KERN_INFO "cpufreq: hostbridge does not support speedstep\n"); + speedstep_chipset_dev = NULL; + return 0; + } + + return 2; /* 2-M */ + } + + return 0; +} + + +/** + * speedstep_setpolicy - set a new CPUFreq policy + * @policy: new policy + * + * Sets a new CPUFreq policy. + */ +static int speedstep_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) + return -EINVAL; + + speedstep_set_state(newstate, 1); + + return 0; +} + + +/** + * speedstep_verify - verifies a new CPUFreq policy + * @freq: new policy + * + * Limit must be within speedstep_low_freq and speedstep_high_freq, with + * at least one border included. + */ +static int speedstep_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); +} + + +static int speedstep_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + unsigned int speed; + + /* capability check */ + if (policy->cpu != 0) + return -ENODEV; + + /* detect low and high frequency */ + result = speedstep_get_freqs(speedstep_processor, + &speedstep_freqs[SPEEDSTEP_LOW].frequency, + &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + &speedstep_set_state); + if (result) + return result; + + /* get current speed setting */ + speed = speedstep_get_processor_frequency(speedstep_processor); + if (!speed) + return -EIO; + + dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", + (speed / 1000)); + + /* cpuinfo and default policy values */ + policy->policy = (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? + CPUFREQ_POLICY_POWERSAVE : CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = speed; + + return cpufreq_frequency_table_cpuinfo(policy, &speedstep_freqs[0]); +} + + +static struct cpufreq_driver speedstep_driver = { + .name = "speedstep", + .verify = speedstep_verify, + .target = speedstep_target, + .init = speedstep_cpu_init, +}; + + +/** + * speedstep_init - initializes the SpeedStep CPUFreq driver + * + * Initializes the SpeedStep support. Returns -ENODEV on unsupported + * devices, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init speedstep_init(void) +{ + /* detect processor */ + speedstep_processor = speedstep_detect_processor(); + if (!speedstep_processor) + return -ENODEV; + + /* detect chipset */ + if (!speedstep_detect_chipset()) { + printk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n"); + return -ENODEV; + } + + /* activate speedstep support */ + if (speedstep_activate()) + return -EINVAL; + + return cpufreq_register_driver(&speedstep_driver); +} + + +/** + * speedstep_exit - unregisters SpeedStep support + * + * Unregisters SpeedStep support. + */ +static void __exit speedstep_exit(void) +{ + cpufreq_unregister_driver(&speedstep_driver); +} + + +MODULE_AUTHOR ("Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); +MODULE_LICENSE ("GPL"); + +module_init(speedstep_init); +module_exit(speedstep_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/speedstep-lib.c linux.22-ac2/arch/i386/kernel/speedstep-lib.c --- linux.vanilla/arch/i386/kernel/speedstep-lib.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/speedstep-lib.c 2003-07-31 14:49:39.000000000 +0100 @@ -0,0 +1,275 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * + * Library for common functions for Intel SpeedStep v.1 and v.2 support + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "speedstep-lib.h" + + +/* DEBUG + * Define it if you want verbose debug output, e.g. for bug reporting + */ +//#define SPEEDSTEP_DEBUG + +#ifdef SPEEDSTEP_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + +/********************************************************************* + * GET PROCESSOR CORE SPEED IN KHZ * + *********************************************************************/ + +static unsigned int pentium3_get_frequency (unsigned int processor) +{ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + struct { + unsigned int ratio; /* Frequency Multiplier (x10) */ + u8 bitmap; /* power on configuration bits + [27, 25:22] (in MSR 0x2a) */ + } msr_decode_mult [] = { + { 30, 0x01 }, + { 35, 0x05 }, + { 40, 0x02 }, + { 45, 0x06 }, + { 50, 0x00 }, + { 55, 0x04 }, + { 60, 0x0b }, + { 65, 0x0f }, + { 70, 0x09 }, + { 75, 0x0d }, + { 80, 0x0a }, + { 85, 0x26 }, + { 90, 0x20 }, + { 100, 0x2b }, + { 0, 0xff } /* error or unknown value */ + }; + + /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ + struct { + unsigned int value; /* Front Side Bus speed in MHz */ + u8 bitmap; /* power on configuration bits [18: 19] + (in MSR 0x2a) */ + } msr_decode_fsb [] = { + { 66, 0x0 }, + { 100, 0x2 }, + { 133, 0x1 }, + { 0, 0xff} + }; + + u32 msr_lo, msr_tmp; + int i = 0, j = 0; + + /* read MSR 0x2a - we only need the low 32 bits */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + dprintk(KERN_DEBUG "speedstep-lib: P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + msr_tmp = msr_lo; + + /* decode the FSB */ + msr_tmp &= 0x00c0000; + msr_tmp >>= 18; + while (msr_tmp != msr_decode_fsb[i].bitmap) { + if (msr_decode_fsb[i].bitmap == 0xff) + return 0; + i++; + } + + /* decode the multiplier */ + if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) + msr_lo &= 0x03c00000; + else + msr_lo &= 0x0bc00000; + msr_lo >>= 22; + while (msr_lo != msr_decode_mult[j].bitmap) { + if (msr_decode_mult[j].bitmap == 0xff) + return 0; + j++; + } + + return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); +} + + +static unsigned int pentium4_get_frequency(void) +{ + u32 msr_lo, msr_hi; + + rdmsr(0x2c, msr_lo, msr_hi); + + dprintk(KERN_DEBUG "speedstep-lib: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + + msr_lo >>= 24; + return (msr_lo * 100000); +} + + +unsigned int speedstep_get_processor_frequency(unsigned int processor) +{ + switch (processor) { + case SPEEDSTEP_PROCESSOR_P4M: + return pentium4_get_frequency(); + case SPEEDSTEP_PROCESSOR_PIII_T: + case SPEEDSTEP_PROCESSOR_PIII_C: + case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: + return pentium3_get_frequency(processor); + default: + return 0; + }; + return 0; +} +EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); + + +/********************************************************************* + * DETECT SPEEDSTEP-CAPABLE PROCESSOR * + *********************************************************************/ + +unsigned int speedstep_detect_processor (void) +{ + struct cpuinfo_x86 *c = cpu_data; + u32 ebx, msr_lo, msr_hi; + + if ((c->x86_vendor != X86_VENDOR_INTEL) || + ((c->x86 != 6) && (c->x86 != 0xF))) + return 0; + + if (c->x86 == 0xF) { + /* Intel Mobile Pentium 4-M + * or Intel Mobile Pentium 4 with 533 MHz FSB */ + if (c->x86_model != 2) + return 0; + + if ((c->x86_mask != 4) && /* B-stepping [M-P4-M] */ + (c->x86_mask != 7) && /* C-stepping [M-P4-M] */ + (c->x86_mask != 9)) /* D-stepping [M-P4-M or M-P4/533] */ + return 0; + + ebx = cpuid_ebx(0x00000001); + ebx &= 0x000000FF; + if ((ebx != 0x0e) && (ebx != 0x0f)) + return 0; + + return SPEEDSTEP_PROCESSOR_P4M; + } + + switch (c->x86_model) { + case 0x0B: /* Intel PIII [Tualatin] */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + 0x06 for mobile PIII-M */ + ebx = cpuid_ebx(0x00000001); + + ebx &= 0x000000FF; + if (ebx != 0x06) + return 0; + + /* So far all PIII-M processors support SpeedStep. See + * Intel's 24540640.pdf of June 2003 + */ + + return SPEEDSTEP_PROCESSOR_PIII_T; + + case 0x08: /* Intel PIII [Coppermine] */ + + /* all mobile PIII Coppermines have FSB 100 MHz + * ==> sort out a few desktop PIIIs. */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); + dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); + msr_lo &= 0x00c0000; + if (msr_lo != 0x0080000) + return 0; + + /* + * If the processor is a mobile version, + * platform ID has bit 50 set + * it has SpeedStep technology if either + * bit 56 or 57 is set + */ + rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); + dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); + if ((msr_hi & (1<<18)) && (msr_hi & (3<<24))) { + if (c->x86_mask == 0x01) + return SPEEDSTEP_PROCESSOR_PIII_C_EARLY; + else + return SPEEDSTEP_PROCESSOR_PIII_C; + } + + default: + return 0; + } +} +EXPORT_SYMBOL_GPL(speedstep_detect_processor); + + +/********************************************************************* + * DETECT SPEEDSTEP SPEEDS * + *********************************************************************/ + +unsigned int speedstep_get_freqs(unsigned int processor, + unsigned int *low_speed, + unsigned int *high_speed, + void (*set_state) (unsigned int state, + unsigned int notify) + ) +{ + unsigned int prev_speed; + unsigned int ret = 0; + unsigned long flags; + + if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) + return EINVAL; + + /* get current speed */ + prev_speed = speedstep_get_processor_frequency(processor); + if (!prev_speed) + return EIO; + + local_irq_save(flags); + + /* switch to low state */ + set_state(SPEEDSTEP_LOW, 0); + *low_speed = speedstep_get_processor_frequency(processor); + if (!*low_speed) { + ret = EIO; + goto out; + } + + /* switch to high state */ + set_state(SPEEDSTEP_HIGH, 0); + *high_speed = speedstep_get_processor_frequency(processor); + if (!*high_speed) { + ret = EIO; + goto out; + } + + if (*low_speed == *high_speed) { + ret = ENODEV; + goto out; + } + + /* switch to previous state, if necessary */ + if (*high_speed != prev_speed) + set_state(SPEEDSTEP_LOW, 0); + + out: + local_irq_restore(flags); + return (ret); +} +EXPORT_SYMBOL_GPL(speedstep_get_freqs); + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); +MODULE_LICENSE ("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/speedstep-lib.h linux.22-ac2/arch/i386/kernel/speedstep-lib.h --- linux.vanilla/arch/i386/kernel/speedstep-lib.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/speedstep-lib.h 2003-07-31 14:49:39.000000000 +0100 @@ -0,0 +1,41 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * + * Library for common functions for Intel SpeedStep v.1 and v.2 support + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + + +/* processors */ + +#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ +#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ +#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ +#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M with 100 MHz FSB */ + +/* speedstep states -- only two of them */ + +#define SPEEDSTEP_HIGH 0x00000000 +#define SPEEDSTEP_LOW 0x00000001 + + +/* detect a speedstep-capable processor */ +extern unsigned int speedstep_detect_processor (void); + +/* detect the current speed (in khz) of the processor */ +extern unsigned int speedstep_get_processor_frequency(unsigned int processor); + + +/* detect the low and high speeds of the processor. The callback + * set_state"'s first argument is either SPEEDSTEP_HIGH or + * SPEEDSTEP_LOW; the second argument is zero so that no + * cpufreq_notify_transition calls are initiated. + */ +extern unsigned int speedstep_get_freqs(unsigned int processor, + unsigned int *low_speed, + unsigned int *high_speed, + void (*set_state) (unsigned int state, unsigned int notify)); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/time.c linux.22-ac2/arch/i386/kernel/time.c --- linux.vanilla/arch/i386/kernel/time.c 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/kernel/time.c 2003-06-29 16:10:34.000000000 +0100 @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -833,6 +834,49 @@ return 0; } +#ifdef CONFIG_CPU_FREQ +static unsigned int ref_freq = 0; +static unsigned long loops_per_jiffy_ref = 0; + +#ifndef CONFIG_SMP +static unsigned long fast_gettimeoffset_ref = 0; +static unsigned long cpu_khz_ref = 0; +#endif + +static int +time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + + if (!ref_freq) { + ref_freq = freq->old; + loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; +#ifndef CONFIG_SMP + fast_gettimeoffset_ref = fast_gettimeoffset_quotient; + cpu_khz_ref = cpu_khz; +#endif + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); +#ifndef CONFIG_SMP + if (use_tsc) { + fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); + cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); + } +#endif + } + + return 0; +} + +static struct notifier_block time_cpufreq_notifier_block = { + .notifier_call = time_cpufreq_notifier +}; +#endif + void __init time_init(void) { extern int x86_udelay_tsc; @@ -901,6 +945,9 @@ "0" (eax), "1" (edx)); printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); } +#if defined(CONFIG_CPU_FREQ) + cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); +#endif } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/kernel/traps.c linux.22-ac2/arch/i386/kernel/traps.c --- linux.vanilla/arch/i386/kernel/traps.c 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/i386/kernel/traps.c 2003-06-29 16:10:34.000000000 +0100 @@ -284,6 +284,20 @@ void die(const char * str, struct pt_regs * regs, long err) { +#ifdef CONFIG_PNPBIOS + if (regs->xcs == 0x60 || regs->xcs == 0x68) + { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; + printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); + __asm__ volatile( + "movl %0, %%esp\n\t" + "jmp %1\n\t" + : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip)); + panic("do_trap: can't hit this"); + } +#endif console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/Makefile linux.22-ac2/arch/i386/Makefile --- linux.vanilla/arch/i386/Makefile 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/Makefile 2003-06-29 16:10:34.000000000 +0100 @@ -53,11 +53,11 @@ endif ifdef CONFIG_MPENTIUMIII -CFLAGS += -march=i686 +CFLAGS += $(call check_gcc,-march=pentium3,-march=i686) endif ifdef CONFIG_MPENTIUM4 -CFLAGS += -march=i686 +CFLAGS += $(call check_gcc,-march=pentium4,-march=i686) endif ifdef CONFIG_MK6 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/math-emu/fpu_system.h linux.22-ac2/arch/i386/math-emu/fpu_system.h --- linux.vanilla/arch/i386/math-emu/fpu_system.h 2000-12-29 22:07:20.000000000 +0000 +++ linux.22-ac2/arch/i386/math-emu/fpu_system.h 2003-09-01 13:54:30.000000000 +0100 @@ -20,7 +20,7 @@ of the stack frame of math_emulate() */ #define SETUP_DATA_AREA(arg) FPU_info = (struct info *) &arg -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.segments)[(s) >> 3]) +#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/mm/fault.c linux.22-ac2/arch/i386/mm/fault.c --- linux.vanilla/arch/i386/mm/fault.c 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/arch/i386/mm/fault.c 2003-06-29 16:10:34.000000000 +0100 @@ -76,9 +76,7 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, start) == 0) + if (!expand_stack(vma, start)) goto good_area; bad_area: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/i386/mm/init.c linux.22-ac2/arch/i386/mm/init.c --- linux.vanilla/arch/i386/mm/init.c 2003-06-14 00:11:27.000000000 +0100 +++ linux.22-ac2/arch/i386/mm/init.c 2003-06-29 16:10:34.000000000 +0100 @@ -510,7 +510,15 @@ if (!mem_map) BUG(); - +#ifdef CONFIG_HIGHMEM + /* check that fixmap and pkmap do not overlap */ + if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { + printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); + printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); + BUG(); + } +#endif set_max_mapnr_init(); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ia64/kernel/entry.S linux.22-ac2/arch/ia64/kernel/entry.S --- linux.vanilla/arch/ia64/kernel/entry.S 2003-08-28 16:45:28.000000000 +0100 +++ linux.22-ac2/arch/ia64/kernel/entry.S 2003-07-06 13:40:02.000000000 +0100 @@ -1203,7 +1203,7 @@ data8 ia64_ni_syscall data8 ia64_ni_syscall // 1245 data8 ia64_ni_syscall - data8 ia64_ni_syscall + data8 sys_semtimedop data8 ia64_ni_syscall data8 ia64_ni_syscall data8 ia64_ni_syscall // 1250 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ia64/mm/fault.c linux.22-ac2/arch/ia64/mm/fault.c --- linux.vanilla/arch/ia64/mm/fault.c 2003-08-28 16:45:28.000000000 +0100 +++ linux.22-ac2/arch/ia64/mm/fault.c 2003-07-06 13:40:02.000000000 +0100 @@ -154,8 +154,6 @@ check_expansion: if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (rgn_index(address) != rgn_index(vma->vm_start) || rgn_offset(address) >= RGN_MAP_LIMIT) goto bad_area; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/m68k/kernel/ptrace.c linux.22-ac2/arch/m68k/kernel/ptrace.c --- linux.vanilla/arch/m68k/kernel/ptrace.c 2002-08-03 16:08:20.000000000 +0100 +++ linux.22-ac2/arch/m68k/kernel/ptrace.c 2003-06-29 16:10:38.000000000 +0100 @@ -133,14 +133,9 @@ ret = ptrace_attach(child); goto out_tsk; } - ret = -ESRCH; - if (!(child->ptrace & PT_PTRACED)) - goto out_tsk; - if (child->state != TASK_STOPPED) { - if (request != PTRACE_KILL) - goto out_tsk; - } - if (child->p_pptr != current) + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) goto out_tsk; switch (request) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/mips/mm/fault.c linux.22-ac2/arch/mips/mm/fault.c --- linux.vanilla/arch/mips/mm/fault.c 2003-08-28 16:45:29.000000000 +0100 +++ linux.22-ac2/arch/mips/mm/fault.c 2003-08-28 16:53:58.000000000 +0100 @@ -112,8 +112,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/mips64/mm/fault.c linux.22-ac2/arch/mips64/mm/fault.c --- linux.vanilla/arch/mips64/mm/fault.c 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/mips64/mm/fault.c 2003-08-28 16:55:19.000000000 +0100 @@ -135,8 +135,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/parisc/config.in linux.22-ac2/arch/parisc/config.in --- linux.vanilla/arch/parisc/config.in 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/parisc/config.in 2003-07-30 21:18:35.000000000 +0100 @@ -47,12 +47,9 @@ bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Chassis LCD and LED support' CONFIG_CHASSIS_LCD_LED -bool 'Kernel Debugger support' CONFIG_KWDB -# define_bool CONFIG_KWDB n - bool 'U2/Uturn I/O MMU' CONFIG_IOMMU_CCIO bool 'VSC/GSC/HSC bus support' CONFIG_GSC -dep_bool ' Lasi I/O support' CONFIG_GSC_LASI $CONFIG_GSC +dep_bool ' Asp/Lasi I/O support' CONFIG_GSC_LASI $CONFIG_GSC dep_bool ' Wax I/O support' CONFIG_GSC_WAX $CONFIG_GSC dep_bool 'EISA support' CONFIG_EISA $CONFIG_GSC @@ -194,6 +191,7 @@ #bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ +bool 'Debug spinlocks' CONFIG_DEBUG_SPINLOCK endmenu source crypto/Config.in diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/parisc/kernel/lasimap.map linux.22-ac2/arch/parisc/kernel/lasimap.map --- linux.vanilla/arch/parisc/kernel/lasimap.map 2000-12-05 20:29:39.000000000 +0000 +++ linux.22-ac2/arch/parisc/kernel/lasimap.map 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -# HP 712 kernel keymap. This uses 7 modifier combinations. - -keymaps 0-2,4-5,8,12 -# ie, plain, Shift, AltGr, Control, Control+Shift, Alt and Control+Alt - - -# Change the above line into -# keymaps 0-2,4-6,8,12 -# in case you want the entries -# altgr control keycode 83 = Boot -# altgr control keycode 111 = Boot -# below. -# -# In fact AltGr is used very little, and one more keymap can -# be saved by mapping AltGr to Alt (and adapting a few entries): -# keycode 100 = Alt -# -keycode 1 = F9 F19 Console_21 - control keycode 1 = F9 - alt keycode 1 = Console_9 - control alt keycode 1 = Console_9 -keycode 2 = -keycode 3 = F5 F15 Console_17 - control keycode 3 = F5 - alt keycode 3 = Console_5 - control alt keycode 3 = Console_5 -keycode 4 = F3 F13 Console_15 - control keycode 4 = F3 - alt keycode 4 = Console_3 - control alt keycode 4 = Console_3 -keycode 5 = F1 F11 Console_13 - control keycode 5 = F1 - alt keycode 5 = Console_1 - control alt keycode 5 = Console_1 -keycode 6 = F2 F12 Console_14 - control keycode 6 = F2 - alt keycode 6 = Console_2 - control alt keycode 6 = Console_2 -keycode 7 = F12 F12 Console_24 - control keycode 7 = F12 - alt keycode 7 = Console_12 - control alt keycode 7 = Console_12 -keycode 8 = -keycode 9 = F10 F20 Console_22 - control keycode 9 = F10 - alt keycode 9 = Console_10 - control alt keycode 9 = Console_10 -keycode 10 = F8 F18 Console_20 - control keycode 10 = F8 - alt keycode 10 = Console_8 - control alt keycode 10 = Console_8 -keycode 11 = F6 F16 Console_18 - control keycode 11 = F6 - alt keycode 11 = Console_6 - control alt keycode 11 = Console_6 -keycode 12 = F4 F14 Console_16 - control keycode 12 = F4 - alt keycode 12 = Console_4 - control alt keycode 12 = Console_4 -keycode 13 = Tab Tab - alt keycode 13 = Meta_Tab -keycode 14 = grave asciitilde - control keycode 14 = nul - alt keycode 14 = Meta_grave -keycode 15 = -keycode 16 = -keycode 17 = Alt -keycode 18 = Shift -keycode 19 = -keycode 20 = Control -keycode 21 = q -keycode 22 = one exclam exclam -keycode 23 = -keycode 24 = -keycode 25 = -keycode 26 = z -keycode 27 = s -keycode 28 = a - altgr keycode 28 = Hex_A -keycode 29 = w -keycode 30 = two at at -keycode 31 = -keycode 32 = -keycode 33 = c - altgr keycode 46 = Hex_C -keycode 34 = x -keycode 35 = d - altgr keycode 35 = Hex_D -keycode 36 = e - altgr keycode 36 = Hex_E -keycode 37 = four dollar -keycode 38 = three numbersign -keycode 39 = -keycode 40 = -keycode 41 = -keycode 42 = v -keycode 43 = f - altgr keycode 43 = Hex_F -keycode 44 = t -keycode 45 = r -keycode 46 = five percent -keycode 47 = -keycode 48 = -keycode 49 = n -keycode 50 = b - altgr keycode 50 = Hex_B -keycode 51 = h -keycode 52 = g -keycode 53 = y -keycode 54 = six asciicircum -keycode 55 = -keycode 56 = -keycode 57 = -keycode 58 = m -keycode 59 = j -keycode 60 = u -keycode 61 = seven ampersand -keycode 62 = eight asterisk asterisk -keycode 63 = -keycode 64 = -keycode 65 = comma less - alt keycode 65 = Meta_comma -keycode 66 = k -keycode 67 = i -keycode 68 = o -keycode 69 = zero parenright bracketright -keycode 70 = nine parenleft bracketleft -keycode 71 = -keycode 72 = -keycode 73 = period greater - control keycode 73 = Compose - alt keycode 73 = Meta_period -keycode 74 = slash question - control keycode 74 = Delete - alt keycode 53 = Meta_slash -keycode 75 = l -keycode 76 = semicolon colon - alt keycode 39 = Meta_semicolon -keycode 77 = p -keycode 78 = minus underscore -keycode 79 = -keycode 80 = -keycode 81 = -keycode 82 = apostrophe quotedbl - control keycode 82 = Control_g - alt keycode 40 = Meta_apostrophe -keycode 83 = -keycode 84 = bracketleft braceleft - control keycode 84 = Escape - alt keycode 26 = Meta_bracketleft -keycode 85 = equal plus -keycode 86 = -keycode 87 = -keycode 88 = Caps_Lock -keycode 88 = -keycode 89 = -keycode 89 = -keycode 89 = -keycode 90 = Return - alt keycode 90 = Meta_Control_m -keycode 91 = bracketright braceright asciitilde - control keycode 91 = Control_bracketright - alt keycode 91 = Meta_bracketright -keycode 92 = -keycode 93 = backslash bar - control keycode 43 = Control_backslash - alt keycode 43 = Meta_backslash -keycode 94 = -keycode 95 = -keycode 96 = -keycode 97 = -keycode 98 = -keycode 99 = -keycode 100 = -keycode 101 = -keycode 102 = BackSpace -keycode 103 = -keycode 104 = -keycode 105 = KP_1 - alt keycode 105 = Ascii_1 - altgr keycode 105 = Hex_1 -keycode 106 = -keycode 107 = KP_4 - alt keycode 107 = Ascii_4 - altgr keycode 107 = Hex_4 -keycode 108 = KP_7 - alt keycode 108 = Ascii_7 - altgr keycode 108 = Hex_7 -keycode 109 = -keycode 110 = -keycode 111 = -keycode 112 = KP_0 - alt keycode 82 = Ascii_0 - altgr keycode 82 = Hex_0 -keycode 113 = KP_Period -keycode 114 = KP_2 - alt keycode 114 = Ascii_2 - altgr keycode 114 = Hex_2 -keycode 115 = KP_5 - alt keycode 115 = Ascii_5 - altgr keycode 115 = Hex_5 -keycode 116 = KP_6 - alt keycode 116 = Ascii_6 - altgr keycode 116 = Hex_6 -keycode 117 = KP_8 - alt keycode 117 = Ascii_8 - altgr keycode 117 = Hex_8 -keycode 118 = Escape -keycode 119 = -keycode 120 = F11 -keycode 121 = KP_Add -keycode 122 = KP_3 - alt keycode 122 = Ascii_3 - altgr keycode 122 = Hex_3 -keycode 123 = KP_Subtract -keycode 124 = KP_Multiply -keycode 125 = KP_9 - alt keycode 125 = Ascii_9 - altgr keycode 125 = Hex_9 -keycode 126 = -# 131!! -keycode 127 = F7 F17 Console_19 - control keycode 127 = F7 - alt keycode 127 = Console_7 - control alt keycode 127 = Console_7 - -string F1 = "\033[[A" -string F2 = "\033[[B" -string F3 = "\033[[C" -string F4 = "\033[[D" -string F5 = "\033[[E" -string F6 = "\033[17~" -string F7 = "\033[18~" -string F8 = "\033[19~" -string F9 = "\033[20~" -string F10 = "\033[21~" -string F11 = "\033[23~" -string F12 = "\033[24~" -string F13 = "\033[25~" -string F14 = "\033[26~" -string F15 = "\033[28~" -string F16 = "\033[29~" -string F17 = "\033[31~" -string F18 = "\033[32~" -string F19 = "\033[33~" -string F20 = "\033[34~" -string Find = "\033[1~" -string Insert = "\033[2~" -string Remove = "\033[3~" -string Select = "\033[4~" -string Prior = "\033[5~" -string Next = "\033[6~" -string Macro = "\033[M" -string Pause = "\033[P" -compose '`' 'A' to 'À' -compose '`' 'a' to 'à' -compose '\'' 'A' to 'Á' -compose '\'' 'a' to 'á' -compose '^' 'A' to 'Â' -compose '^' 'a' to 'â' -compose '~' 'A' to 'Ã' -compose '~' 'a' to 'ã' -compose '"' 'A' to 'Ä' -compose '"' 'a' to 'ä' -compose 'O' 'A' to 'Å' -compose 'o' 'a' to 'å' -compose '0' 'A' to 'Å' -compose '0' 'a' to 'å' -compose 'A' 'A' to 'Å' -compose 'a' 'a' to 'å' -compose 'A' 'E' to 'Æ' -compose 'a' 'e' to 'æ' -compose ',' 'C' to 'Ç' -compose ',' 'c' to 'ç' -compose '`' 'E' to 'È' -compose '`' 'e' to 'è' -compose '\'' 'E' to 'É' -compose '\'' 'e' to 'é' -compose '^' 'E' to 'Ê' -compose '^' 'e' to 'ê' -compose '"' 'E' to 'Ë' -compose '"' 'e' to 'ë' -compose '`' 'I' to 'Ì' -compose '`' 'i' to 'ì' -compose '\'' 'I' to 'Í' -compose '\'' 'i' to 'í' -compose '^' 'I' to 'Î' -compose '^' 'i' to 'î' -compose '"' 'I' to 'Ï' -compose '"' 'i' to 'ï' -compose '-' 'D' to 'Ð' -compose '-' 'd' to 'ð' -compose '~' 'N' to 'Ñ' -compose '~' 'n' to 'ñ' -compose '`' 'O' to 'Ò' -compose '`' 'o' to 'ò' -compose '\'' 'O' to 'Ó' -compose '\'' 'o' to 'ó' -compose '^' 'O' to 'Ô' -compose '^' 'o' to 'ô' -compose '~' 'O' to 'Õ' -compose '~' 'o' to 'õ' -compose '"' 'O' to 'Ö' -compose '"' 'o' to 'ö' -compose '/' 'O' to 'Ø' -compose '/' 'o' to 'ø' -compose '`' 'U' to 'Ù' -compose '`' 'u' to 'ù' -compose '\'' 'U' to 'Ú' -compose '\'' 'u' to 'ú' -compose '^' 'U' to 'Û' -compose '^' 'u' to 'û' -compose '"' 'U' to 'Ü' -compose '"' 'u' to 'ü' -compose '\'' 'Y' to 'Ý' -compose '\'' 'y' to 'ý' -compose 'T' 'H' to 'Þ' -compose 't' 'h' to 'þ' -compose 's' 's' to 'ß' -compose '"' 'y' to 'ÿ' -compose 's' 'z' to 'ß' -compose 'i' 'j' to 'ÿ' diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/8xx_io/uart.c linux.22-ac2/arch/ppc/8xx_io/uart.c --- linux.vanilla/arch/ppc/8xx_io/uart.c 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/ppc/8xx_io/uart.c 2003-07-22 18:27:08.000000000 +0100 @@ -1679,7 +1679,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/kernel/entry.S linux.22-ac2/arch/ppc/kernel/entry.S --- linux.vanilla/arch/ppc/kernel/entry.S 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/ppc/kernel/entry.S 2003-07-22 18:27:30.000000000 +0100 @@ -260,7 +260,9 @@ .globl ret_from_fork ret_from_fork: +#ifdef CONFIG_SMP bl schedule_tail +#endif lwz r0,TASK_PTRACE(r2) andi. r0,r0,PT_TRACESYS bnel- syscall_trace diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/kernel/idle.c linux.22-ac2/arch/ppc/kernel/idle.c --- linux.vanilla/arch/ppc/kernel/idle.c 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/ppc/kernel/idle.c 2003-07-22 18:27:30.000000000 +0100 @@ -46,9 +46,6 @@ do_power_save = 1; /* endless loop with no priority at all */ - current->nice = 20; - current->counter = -100; - init_idle(); for (;;) { #ifdef CONFIG_SMP if (!do_power_save) { @@ -64,13 +61,12 @@ } } #endif +#ifdef CONFIG_6xx if (do_power_save && !current->need_resched) +#endif /* CONFIG_6xx */ power_save(); - - if (current->need_resched) { - schedule(); - check_pgt_cache(); - } + schedule(); + check_pgt_cache(); } return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/kernel/mk_defs.c linux.22-ac2/arch/ppc/kernel/mk_defs.c --- linux.vanilla/arch/ppc/kernel/mk_defs.c 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/ppc/kernel/mk_defs.c 2003-06-29 16:15:03.000000000 +0100 @@ -34,8 +34,7 @@ /*DEFINE(KERNELBASE, KERNELBASE);*/ DEFINE(STATE, offsetof(struct task_struct, state)); DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task)); - DEFINE(COUNTER, offsetof(struct task_struct, counter)); - DEFINE(PROCESSOR, offsetof(struct task_struct, processor)); + DEFINE(PROCESSOR, offsetof(struct task_struct, cpu)); DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending)); DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/kernel/ppc_defs.h linux.22-ac2/arch/ppc/kernel/ppc_defs.h --- linux.vanilla/arch/ppc/kernel/ppc_defs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/arch/ppc/kernel/ppc_defs.h 2003-06-29 16:10:37.000000000 +0100 @@ -0,0 +1,81 @@ +/* + * WARNING! This file is automatically generated - DO NOT EDIT! + */ +#define STATE 0 +#define NEXT_TASK 76 +#define PROCESSOR 32 +#define SIGPENDING 8 +#define THREAD 608 +#define MM 84 +#define ACTIVE_MM 88 +#define TASK_STRUCT_SIZE 1520 +#define KSP 0 +#define PGDIR 16 +#define LAST_SYSCALL 20 +#define PT_REGS 8 +#define PT_TRACESYS 2 +#define TASK_FLAGS 4 +#define TASK_PTRACE 24 +#define NEED_RESCHED 20 +#define THREAD_FPR0 24 +#define THREAD_FPSCR 284 +#define THREAD_VR0 288 +#define THREAD_VRSAVE 816 +#define THREAD_VSCR 800 +#define TASK_UNION_SIZE 8192 +#define STACK_FRAME_OVERHEAD 16 +#define INT_FRAME_SIZE 192 +#define GPR0 16 +#define GPR1 20 +#define GPR2 24 +#define GPR3 28 +#define GPR4 32 +#define GPR5 36 +#define GPR6 40 +#define GPR7 44 +#define GPR8 48 +#define GPR9 52 +#define GPR10 56 +#define GPR11 60 +#define GPR12 64 +#define GPR13 68 +#define GPR14 72 +#define GPR15 76 +#define GPR16 80 +#define GPR17 84 +#define GPR18 88 +#define GPR19 92 +#define GPR20 96 +#define GPR21 100 +#define GPR22 104 +#define GPR23 108 +#define GPR24 112 +#define GPR25 116 +#define GPR26 120 +#define GPR27 124 +#define GPR28 128 +#define GPR29 132 +#define GPR30 136 +#define GPR31 140 +#define _NIP 144 +#define _MSR 148 +#define _CTR 156 +#define _LINK 160 +#define _CCR 168 +#define _MQ 172 +#define _XER 164 +#define _DAR 180 +#define _DSISR 184 +#define _DEAR 180 +#define _ESR 184 +#define ORIG_GPR3 152 +#define RESULT 188 +#define TRAP 176 +#define CLONE_VM 256 +#define MM_PGD 12 +#define CPU_SPEC_ENTRY_SIZE 32 +#define CPU_SPEC_PVR_MASK 0 +#define CPU_SPEC_PVR_VALUE 4 +#define CPU_SPEC_FEATURES 12 +#define CPU_SPEC_SETUP 28 +#define NUM_USER_SEGMENTS 8 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/kernel/smp.c linux.22-ac2/arch/ppc/kernel/smp.c --- linux.vanilla/arch/ppc/kernel/smp.c 2003-08-28 16:45:30.000000000 +0100 +++ linux.22-ac2/arch/ppc/kernel/smp.c 2003-07-22 18:27:44.000000000 +0100 @@ -294,8 +294,6 @@ cpu_callin_map[0] = 1; current->processor = 0; - init_idle(); - for (i = 0; i < NR_CPUS; i++) { prof_counter[i] = 1; prof_multiplier[i] = 1; @@ -351,7 +349,8 @@ p = init_task.prev_task; if (!p) panic("No idle task for CPU %d", i); - del_from_runqueue(p); + init_idle(p, i); + unhash_process(p); init_tasks[i] = p; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc/mm/fault.c linux.22-ac2/arch/ppc/mm/fault.c --- linux.vanilla/arch/ppc/mm/fault.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/ppc/mm/fault.c 2003-07-22 18:27:44.000000000 +0100 @@ -141,42 +141,40 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (!is_write) - goto bad_area; - - /* - * N.B. The rs6000/xcoff ABI allows programs to access up to - * a few hundred bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB - * below the stack pointer (r1) before decrementing it. - * The exec code can write slightly over 640kB to the stack - * before setting the user r1. Thus we allow the stack to - * expand to 1MB without further checks. - */ - if (address + 0x100000 < vma->vm_end) { - /* get user regs even if this fault is in kernel mode */ - struct pt_regs *uregs = current->thread.regs; - if (uregs == NULL) - goto bad_area; - - /* - * A user-mode access to an address a long way below - * the stack pointer is only valid if the instruction - * is one which would update the stack pointer to the - * address accessed if the instruction completed, - * i.e. either stwu rs,n(r1) or stwux rs,r1,rb - * (or the byte, halfword, float or double forms). - * - * If we don't check this then any write to the area - * between the last mapped region and the stack will - * expand the stack rather than segfaulting. - */ - if (address + 2048 < uregs->gpr[1] - && (!user_mode(regs) || !store_updates_sp(regs))) - goto bad_area; - } + if (!is_write) + goto bad_area; + + /* + * N.B. The rs6000/xcoff ABI allows programs to access up to + * a few hundred bytes below the stack pointer. + * The kernel signal delivery code writes up to about 1.5kB + * below the stack pointer (r1) before decrementing it. + * The exec code can write slightly over 640kB to the stack + * before setting the user r1. Thus we allow the stack to + * expand to 1MB without further checks. + */ + if (address + 0x100000 < vma->vm_end) { + /* get user regs even if this fault is in kernel mode */ + struct pt_regs *uregs = current->thread.regs; + if (uregs == NULL) + goto bad_area; + + /* + * A user-mode access to an address a long way below + * the stack pointer is only valid if the instruction + * is one which would update the stack pointer to the + * address accessed if the instruction completed, + * i.e. either stwu rs,n(r1) or stwux rs,r1,rb + * (or the byte, halfword, float or double forms). + * + * If we don't check this then any write to the area + * between the last mapped region and the stack will + * expand the stack rather than segfaulting. + */ + if (address + 2048 < uregs->gpr[1] + && (!user_mode(regs) || !store_updates_sp(regs))) + goto bad_area; + } if (expand_stack(vma, address)) goto bad_area; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/ppc64/kernel/sys_ppc32.c linux.22-ac2/arch/ppc64/kernel/sys_ppc32.c --- linux.vanilla/arch/ppc64/kernel/sys_ppc32.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/ppc64/kernel/sys_ppc32.c 2003-07-31 14:34:30.000000000 +0100 @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/defconfig linux.22-ac2/arch/s390/defconfig --- linux.vanilla/arch/s390/defconfig 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390/defconfig 2003-07-07 16:05:42.000000000 +0100 @@ -34,6 +34,8 @@ CONFIG_FAST_IRQ=y CONFIG_MACHCHK_WARNING=y CONFIG_CHSC=y +CONFIG_QDIO=m +# CONFIG_QDIO_PERF_STATS is not set CONFIG_IPL=y CONFIG_IPL_TAPE=y # CONFIG_IPL_VM is not set @@ -125,6 +127,14 @@ # CONFIG_CHANDEV=y CONFIG_HOTPLUG=y +CONFIG_QETH=m + +# +# Gigabit Ethernet default settings +# +# CONFIG_QETH_IPV6 is not set +CONFIG_QETH_VLAN=y +# CONFIG_QETH_PERF_STATS is not set CONFIG_CTC=m CONFIG_IUCV=m diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/asm-offsets.c linux.22-ac2/arch/s390/kernel/asm-offsets.c --- linux.vanilla/arch/s390/kernel/asm-offsets.c 2002-08-03 16:08:22.000000000 +0100 +++ linux.22-ac2/arch/s390/kernel/asm-offsets.c 2003-06-29 16:10:42.000000000 +0100 @@ -26,7 +26,7 @@ DEFINE(__TASK_need_resched, offsetof(struct task_struct, need_resched),); DEFINE(__TASK_ptrace, offsetof(struct task_struct, ptrace),); - DEFINE(__TASK_processor, offsetof(struct task_struct, processor),); + DEFINE(__TASK_processor, offsetof(struct task_struct, cpu),); return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/bitmap.S linux.22-ac2/arch/s390/kernel/bitmap.S --- linux.vanilla/arch/s390/kernel/bitmap.S 2000-05-12 19:41:44.000000000 +0100 +++ linux.22-ac2/arch/s390/kernel/bitmap.S 2003-06-29 16:10:42.000000000 +0100 @@ -35,3 +35,21 @@ .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + .globl _sb_findmap +_sb_findmap: + .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/entry.S linux.22-ac2/arch/s390/kernel/entry.S --- linux.vanilla/arch/s390/kernel/entry.S 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/arch/s390/kernel/entry.S 2003-06-29 16:10:42.000000000 +0100 @@ -254,13 +254,14 @@ ret_from_fork: basr %r13,0 l %r13,.Lentry_base-.(%r13) # setup base pointer to &entry_base + # not saving R14 here because we go to sysc_return ultimately + l %r1,BASED(.Lschedtail) + basr %r14,%r1 # call schedule_tail (unlock stuff) GET_CURRENT # load pointer to task_struct to R9 stosm 24(%r15),0x03 # reenable interrupts sr %r0,%r0 # child returns 0 st %r0,SP_R2(%r15) # store return value (change R2 on stack) - l %r1,BASED(.Lschedtail) - la %r14,BASED(sysc_return) - br %r1 # call schedule_tail, return to sysc_return + b BASED(sysc_return) # # clone, fork, vfork, exec and sigreturn need glue, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/process.c linux.22-ac2/arch/s390/kernel/process.c --- linux.vanilla/arch/s390/kernel/process.c 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/arch/s390/kernel/process.c 2003-06-29 16:10:42.000000000 +0100 @@ -50,15 +50,11 @@ * The idle loop on a S390... */ -int cpu_idle(void *unused) +int cpu_idle(void) { psw_t wait_psw; unsigned long reg; - /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { if (current->need_resched) { schedule(); @@ -94,7 +90,7 @@ { struct task_struct *tsk = current; - printk("CPU: %d %s\n", tsk->processor, print_tainted()); + printk("CPU: %d %s\n", tsk->cpu, print_tainted()); printk("Process %s (pid: %d, task: %08lx, ksp: %08x)\n", current->comm, current->pid, (unsigned long) tsk, tsk->thread.ksp); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/setup.c linux.22-ac2/arch/s390/kernel/setup.c --- linux.vanilla/arch/s390/kernel/setup.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390/kernel/setup.c 2003-07-06 20:11:57.000000000 +0100 @@ -276,9 +276,9 @@ static int __init conmode_setup(char *str) { -#if defined(CONFIG_HWC_CONSOLE) - if (strncmp(str, "hwc", 4) == 0) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) + SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) @@ -310,8 +310,8 @@ */ cpcmd("TERM CONMODE 3215", NULL, 0); if (ptr == NULL) { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif return; } @@ -320,16 +320,16 @@ SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } else if (MACHINE_IS_P390) { @@ -339,8 +339,8 @@ SET_CONSOLE_3270; #endif } else { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } @@ -383,21 +383,25 @@ /* * Reboot, halt and power_off stubs. They just call _machine_restart, - * _machine_halt or _machine_power_off. + * _machine_halt or _machine_power_off after making sure that all pending + * printks reached their destination. */ void machine_restart(char *command) { + console_unblank(); _machine_restart(command); } void machine_halt(void) { + console_unblank(); _machine_halt(); } void machine_power_off(void) { + console_unblank(); _machine_power_off(); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/smp.c linux.22-ac2/arch/s390/kernel/smp.c --- linux.vanilla/arch/s390/kernel/smp.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/arch/s390/kernel/smp.c 2003-06-29 16:10:42.000000000 +0100 @@ -38,7 +38,7 @@ #include /* prototypes */ -extern int cpu_idle(void * unused); +extern int cpu_idle(void); extern __u16 boot_cpu_addr; extern volatile int __cpu_logical_map[]; @@ -56,6 +56,7 @@ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned long cpu_online_map; +unsigned long cache_decay_ticks; /* * Setup routine for controlling SMP activation @@ -468,7 +469,7 @@ { int curr_cpu; - current->processor = 0; + current->cpu = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; @@ -509,7 +510,7 @@ pfault_init(); #endif /* cpu_idle will call schedule for us */ - return cpu_idle(NULL); + return cpu_idle(); } /* @@ -547,12 +548,9 @@ idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; cpu_lowcore = get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; @@ -604,6 +602,8 @@ panic("Couldn't request external interrupt 0x1202"); smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); + + cache_decay_ticks = (200 * HZ) / 1000; /* Is 200ms ok? Robus? XXX */ /* * Initialize the logical to physical CPU number mapping diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390/kernel/traps.c linux.22-ac2/arch/s390/kernel/traps.c --- linux.vanilla/arch/s390/kernel/traps.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/arch/s390/kernel/traps.c 2003-06-29 16:10:42.000000000 +0100 @@ -142,7 +142,7 @@ * We can't print the backtrace of a running process. It is * unreliable at best and can cause kernel oopses. */ - if (task_has_cpu(tsk)) + if (tsk->state == TASK_RUNNING) return; show_trace((unsigned long *) tsk->thread.ksp); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/defconfig linux.22-ac2/arch/s390x/defconfig --- linux.vanilla/arch/s390x/defconfig 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390x/defconfig 2003-07-07 16:05:42.000000000 +0100 @@ -35,6 +35,8 @@ CONFIG_FAST_IRQ=y CONFIG_MACHCHK_WARNING=y CONFIG_CHSC=y +CONFIG_QDIO=m +# CONFIG_QDIO_PERF_STATS is not set CONFIG_IPL=y CONFIG_IPL_TAPE=y # CONFIG_IPL_VM is not set diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/asm-offsets.c linux.22-ac2/arch/s390x/kernel/asm-offsets.c --- linux.vanilla/arch/s390x/kernel/asm-offsets.c 2002-08-03 16:08:22.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/asm-offsets.c 2003-06-29 16:10:43.000000000 +0100 @@ -26,7 +26,7 @@ DEFINE(__TASK_need_resched, offsetof(struct task_struct, need_resched),); DEFINE(__TASK_ptrace, offsetof(struct task_struct, ptrace),); - DEFINE(__TASK_processor, offsetof(struct task_struct, processor),); + DEFINE(__TASK_processor, offsetof(struct task_struct, cpu),); return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/bitmap.S linux.22-ac2/arch/s390x/kernel/bitmap.S --- linux.vanilla/arch/s390x/kernel/bitmap.S 2001-02-13 22:13:44.000000000 +0000 +++ linux.22-ac2/arch/s390x/kernel/bitmap.S 2003-06-29 16:10:43.000000000 +0100 @@ -35,3 +35,22 @@ .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + .globl _sb_findmap +_sb_findmap: + .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/entry.S linux.22-ac2/arch/s390x/kernel/entry.S --- linux.vanilla/arch/s390x/kernel/entry.S 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/entry.S 2003-06-29 16:10:43.000000000 +0100 @@ -240,11 +240,11 @@ # .globl ret_from_fork ret_from_fork: + brasl %r14,schedule_tail GET_CURRENT # load pointer to task_struct to R9 stosm 48(%r15),0x03 # reenable interrupts xc SP_R2(8,%r15),SP_R2(%r15) # child returns 0 - larl %r14,sysc_return - jg schedule_tail # return to sysc_return + j sysc_return # # clone, fork, vfork, exec and sigreturn need glue, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/linux32.c linux.22-ac2/arch/s390x/kernel/linux32.c --- linux.vanilla/arch/s390x/kernel/linux32.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/linux32.c 2003-07-14 12:57:01.000000000 +0100 @@ -4499,7 +4499,7 @@ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { /* Result is out of bounds. */ - do_munmap(current->mm, addr, len); + do_munmap(current->mm, addr, len, 1); error = -ENOMEM; } up_write(¤t->mm->mmap_sem); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/process.c linux.22-ac2/arch/s390x/kernel/process.c --- linux.vanilla/arch/s390x/kernel/process.c 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/process.c 2003-06-29 16:10:43.000000000 +0100 @@ -55,10 +55,6 @@ psw_t wait_psw; unsigned long reg; - /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { if (current->need_resched) { schedule(); @@ -91,7 +87,7 @@ { struct task_struct *tsk = current; - printk("CPU: %d %s\n", tsk->processor, print_tainted()); + printk("CPU: %d %s\n", tsk->cpu, print_tainted()); printk("Process %s (pid: %d, task: %016lx, ksp: %016lx)\n", current->comm, current->pid, (unsigned long) tsk, tsk->thread.ksp); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/setup.c linux.22-ac2/arch/s390x/kernel/setup.c --- linux.vanilla/arch/s390x/kernel/setup.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/setup.c 2003-07-06 20:12:22.000000000 +0100 @@ -164,9 +164,9 @@ static int __init conmode_setup(char *str) { -#if defined(CONFIG_HWC_CONSOLE) - if (strncmp(str, "hwc", 4) == 0) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) + SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) @@ -198,8 +198,8 @@ */ cpcmd("TERM CONMODE 3215", NULL, 0); if (ptr == NULL) { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif return; } @@ -208,16 +208,16 @@ SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; -#elif defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#elif defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } else if (MACHINE_IS_P390) { @@ -227,8 +227,8 @@ SET_CONSOLE_3270; #endif } else { -#if defined(CONFIG_HWC_CONSOLE) - SET_CONSOLE_HWC; +#if defined(CONFIG_SCLP_CONSOLE) + SET_CONSOLE_SCLP; #endif } } @@ -271,21 +271,25 @@ /* * Reboot, halt and power_off stubs. They just call _machine_restart, - * _machine_halt or _machine_power_off. + * _machine_halt or _machine_power_off after making sure that all pending + * printks reached their destination. */ void machine_restart(char *command) { + console_unblank(); _machine_restart(command); } void machine_halt(void) { + console_unblank(); _machine_halt(); } void machine_power_off(void) { + console_unblank(); _machine_power_off(); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/smp.c linux.22-ac2/arch/s390x/kernel/smp.c --- linux.vanilla/arch/s390x/kernel/smp.c 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/arch/s390x/kernel/smp.c 2003-06-29 16:10:43.000000000 +0100 @@ -38,7 +38,7 @@ #include /* prototypes */ -extern int cpu_idle(void * unused); +extern int cpu_idle(void); extern __u16 boot_cpu_addr; extern volatile int __cpu_logical_map[]; @@ -56,6 +56,7 @@ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned long cpu_online_map; +unsigned long cache_decay_ticks; /* * Setup routine for controlling SMP activation @@ -451,7 +452,7 @@ { int curr_cpu; - current->processor = 0; + current->cpu = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; @@ -491,7 +492,7 @@ pfault_init(); #endif /* cpu_idle will call schedule for us */ - return cpu_idle(NULL); + return cpu_idle(); } /* @@ -529,12 +530,9 @@ idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; cpu_lowcore = get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; @@ -588,6 +586,8 @@ smp_count_cpus(); memset(lowcore_ptr,0,sizeof(lowcore_ptr)); + cache_decay_ticks = (200 * HZ) / 1000; /* Is 200ms ok? Robus? XXX */ + /* * Initialize the logical to physical CPU number mapping */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/s390x/kernel/traps.c linux.22-ac2/arch/s390x/kernel/traps.c --- linux.vanilla/arch/s390x/kernel/traps.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/arch/s390x/kernel/traps.c 2003-06-29 16:10:43.000000000 +0100 @@ -144,7 +144,7 @@ * We can't print the backtrace of a running process. It is * unreliable at best and can cause kernel oopses. */ - if (task_has_cpu(tsk)) + if (tsk->state == TASK_RUNNING) return; show_trace((unsigned long *) tsk->thread.ksp); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sh/mm/fault.c linux.22-ac2/arch/sh/mm/fault.c --- linux.vanilla/arch/sh/mm/fault.c 2003-08-28 16:45:31.000000000 +0100 +++ linux.22-ac2/arch/sh/mm/fault.c 2003-07-22 18:28:03.000000000 +0100 @@ -76,8 +76,6 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, start) == 0) goto good_area; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sparc/kernel/sunos_ioctl.c linux.22-ac2/arch/sparc/kernel/sunos_ioctl.c --- linux.vanilla/arch/sparc/kernel/sunos_ioctl.c 2000-09-07 16:32:00.000000000 +0100 +++ linux.22-ac2/arch/sparc/kernel/sunos_ioctl.c 2003-06-29 16:10:35.000000000 +0100 @@ -39,8 +39,12 @@ { int ret = -EBADF; - if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) + read_lock(¤t->files->file_lock); + if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); /* First handle an easy compat. case for tty ldisc. */ if(cmd == TIOCSETD) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sparc/mm/fault.c linux.22-ac2/arch/sparc/mm/fault.c --- linux.vanilla/arch/sparc/mm/fault.c 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/arch/sparc/mm/fault.c 2003-06-29 16:10:35.000000000 +0100 @@ -249,8 +249,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; /* @@ -496,8 +494,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; good_area: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sparc64/kernel/sunos_ioctl32.c linux.22-ac2/arch/sparc64/kernel/sunos_ioctl32.c --- linux.vanilla/arch/sparc64/kernel/sunos_ioctl32.c 2000-08-05 02:16:11.000000000 +0100 +++ linux.22-ac2/arch/sparc64/kernel/sunos_ioctl32.c 2003-06-29 16:10:39.000000000 +0100 @@ -100,8 +100,12 @@ if(fd >= SUNOS_NR_OPEN) goto out; - if(!fcheck(fd)) + read_lock(¤t->files->file_lock); + if(!fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); if(cmd == TIOCSETD) { mm_segment_t old_fs = get_fs(); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sparc64/mm/fault.c linux.22-ac2/arch/sparc64/mm/fault.c --- linux.vanilla/arch/sparc64/mm/fault.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/arch/sparc64/mm/fault.c 2003-06-29 16:10:39.000000000 +0100 @@ -373,8 +373,6 @@ if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (!(fault_code & FAULT_CODE_WRITE)) { /* Non-faulting loads shouldn't expand stack. */ insn = get_fault_insn(regs, insn); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/arch/sparc64/solaris/timod.c linux.22-ac2/arch/sparc64/solaris/timod.c --- linux.vanilla/arch/sparc64/solaris/timod.c 2002-08-03 16:08:22.000000000 +0100 +++ linux.22-ac2/arch/sparc64/solaris/timod.c 2003-06-29 16:10:39.000000000 +0100 @@ -149,7 +149,9 @@ struct socket *sock; SOLD("wakeing socket"); + read_lock(¤t->files->file_lock); sock = ¤t->files->fd[fd]->f_dentry->d_inode->u.socket_i; + read_unlock(¤t->files->file_lock); wake_up_interruptible(&sock->wait); read_lock(&sock->sk->callback_lock); if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) @@ -163,7 +165,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = sock->pfirst; sock->pfirst = it; if (!sock->plast) @@ -177,7 +181,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg at end"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = NULL; if (sock->plast) sock->plast->next = it; @@ -355,7 +361,11 @@ (int (*)(int, unsigned long *))SYS(socketcall); int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLD("entry"); @@ -636,7 +646,11 @@ SOLD("entry"); SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p)); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL)); @@ -847,7 +861,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; @@ -914,7 +930,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/CREDITS linux.22-ac2/CREDITS --- linux.vanilla/CREDITS 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/CREDITS 2003-08-28 16:56:47.000000000 +0100 @@ -2603,6 +2603,12 @@ S: 7000 Stuttgart 50 S: Germany +N: Andrew Rodland +E: arodland@linuxguru.net +D: That crazy morse code thing. +P: D2B1 5215 B1B9 18E0 B6AD 6ADD 4373 165F 1770 BD5C +S: Pennsylvania, USA + N: Christoph Rohland E: hans-christoph.rohland@sap.com E: ch.rohland@gmx.net diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/00-INDEX linux.22-ac2/Documentation/00-INDEX --- linux.vanilla/Documentation/00-INDEX 2001-08-27 15:44:15.000000000 +0100 +++ linux.22-ac2/Documentation/00-INDEX 2003-06-29 16:10:46.000000000 +0100 @@ -52,6 +52,8 @@ - directory with information on the CD-ROM drivers that Linux has. computone.txt - info on Computone Intelliport II/Plus Multiport Serial Driver +cpufreq + - describes the CPU frequency and voltage scaling support cpqarray.txt - info on using Compaq's SMART2 Intelligent Disk Array Controllers. devices.txt diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cciss.txt linux.22-ac2/Documentation/cciss.txt --- linux.vanilla/Documentation/cciss.txt 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/Documentation/cciss.txt 2003-08-13 14:46:11.000000000 +0100 @@ -127,3 +127,55 @@ access these devices too, as if the array controller were merely a SCSI controller in the same way that we are allowing it to access SCSI tape drives. +Monitor Threads +--------------- + +For multipath configurations (acheived via a higher level driver, such +as the "md" driver) it is important that failure of a controller is detected. +Ordinarily, the driver is entirely interrupt driven. If a failure occurs +in such a way that the processor cannot receive interrupts from an adapter, +the driver will wait forever for i/o's to complete. In a multipath +configuration this is undesirable, as the md driver relies on i/o's being +reported as failed by the low level driver to trigger failing over to an +alternate controller. The monitor threads allow the driver to detect such +situations and report outstanding i/o's as having failed so that recovery +actions such switching to an alternate controller can occur. The monitor +threads periodically sends a trivial "no-operation" command down to +the controllers and expect them to complete within a a reasonable (short) +time period. The firmware on the adapter is designed such that no matter +how busy the adapter is serving i/o, it can respond quickly to a +"no-operation" command. In the event that a deadline elapses before a no +operation command completes, all outstanding commands on that controller +are reported back to the upper layers as having failed, and any new commands +sent to the controller are immediately reported back as failed. + +To enable the monitor threads, the compile time option must be enabled +(via the usual linux kernel configuration) and the monitor thread must +be enabled at runtime as well. A system may have many adapters, but +perhaps only a single pair operating in a multipath configuration. +In this way, it is possible to run monitoring threads only for those +adapters which require it. + +To start a monitoring thread on the first cciss adapter, "cciss0" with +a polling interval of 30 seconds, execute the following command: + + echo "monitor 30" > /proc/driver/cciss/cciss0 + +To change the polling interval, to say, 60 seconds: + + echo "monitor 60" > /proc/driver/cciss/cciss0 + +(Note, the change will not take effect until the previous polling +interval elapses.) + +To disable the monitoring thread, set the polling interval to 0 seconds: + + echo "monitor 0" > /proc/driver/cciss/cciss0 + +(Again, the monitoring thread will not exit until the previous polling +interval elapses.) + +The minimum monitoring period is 10 seconds, and the maximum monitoring +period is 3600 seconds (1 hour). The no-operation command must complete +with 5 seconds of submission in all cases or the controller will be presumed +failed. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/Changes linux.22-ac2/Documentation/Changes --- linux.vanilla/Documentation/Changes 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/Documentation/Changes 2003-07-31 14:47:50.000000000 +0100 @@ -56,7 +56,9 @@ o e2fsprogs 1.25 # tune2fs o jfsutils 1.0.12 # fsck.jfs -V o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs +o xfsprogs 2.1.0 # xfs_db -V o pcmcia-cs 3.1.21 # cardmgr -V +o quota-tools 3.09 # quota -V o PPP 2.4.0 # pppd --version o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version @@ -190,6 +192,17 @@ versions of mkreiserfs, resize_reiserfs, debugreiserfs and reiserfsck. These utils work on both i386 and alpha platforms. +Xfsprogs +-------- + +The latest version of xfsprogs contains mkfs.xfs, xfs_db, and the +xfs_repair utilities, among others, for the XFS filesystem. It is +architecture independent and any version from 2.0.0 onward should +work correctly with this version of the XFS kernel code. For the new +(v2) log format that has better support for stripe-size aligning on +LVM and MD devices at least xfsprogs 2.1.0 is needed. + + Pcmcia-cs --------- @@ -197,6 +210,14 @@ kernel source. Pay attention when you recompile your kernel ;-). Also, be sure to upgrade to the latest pcmcia-cs release. +Quota-tools +----------- + +Support for 32 bit uid's and gid's is required if you want to use +the newer version 2 quota format. Quota-tools version 3.07 and +newer has this support. Use the recommended version or newer +from the table above. + Intel IA32 microcode -------------------- @@ -327,6 +348,10 @@ ------------- o +Xfsprogs +-------- +o + LVM toolset ----------- o @@ -335,6 +360,10 @@ --------- o +Quota-tools +---------- +o + Jade ---- o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/Configure.help linux.22-ac2/Documentation/Configure.help --- linux.vanilla/Documentation/Configure.help 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/Documentation/Configure.help 2003-09-09 19:05:21.000000000 +0100 @@ -262,6 +262,12 @@ If you don't have this computer, you may safely say N. +Clustered APIC support +CONFIG_X86_CLUSTERED_APIC + This option is required to support systems with more than 8 logical CPUs. + + If you don't have such a computer, you may safely say N. + IO-APIC support on uniprocessors CONFIG_X86_UP_IOAPIC An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an @@ -464,8 +470,14 @@ The initial RAM disk is a RAM disk that is loaded by the boot loader (loadlin or lilo) and that is mounted as root before the normal boot procedure. It is typically used to load modules needed to mount the - "real" root file system, etc. See - for details. + "real" root file system, etc. + + Due to a problem elsewhere in the kernel, initial RAM disks _must_ + have the file system on them created with a 1024 byte block size. + If any other value is used, the kernel will be unable to mount the + RAM disk at boot time, causing a kernel panic. + + See for details. Embed root filesystem ramdisk into the kernel CONFIG_EMBEDDED_RAMDISK @@ -1141,6 +1153,15 @@ Say Y here if you have an IDE controller which uses any of these chipsets: CMD643, CMD646 and CMD648. +Compaq Triflex IDE support +CONFIG_BLK_DEV_TRIFLEX + Say Y here if you have a Compaq Triflex IDE controller, such + as those commonly found on Compaq Pentium-Pro systems + + If you want to compile it as a module, say M here and read + . The module will be called + triflex.o. + CY82C693 chipset support CONFIG_BLK_DEV_CY82C693 This driver adds detection and support for the CY82C693 chipset @@ -1210,11 +1231,26 @@ This is a driver for the OPTi 82C621 EIDE controller. Please read the comments at the top of . +National SCx200 chipset support +CONFIG_BLK_DEV_SC1200 + This driver adds support for the built in IDE on the National + SCx200 series of embedded x86 "Geode" systems + + If you want to compile it as a module, say M here and read + . The module will be called + sc1200.o. + ServerWorks OSB4/CSB5 chipset support CONFIG_BLK_DEV_SVWKS This driver adds PIO/(U)DMA support for the ServerWorks OSB4/CSB5 chipsets. +SGI IOC4 chipset support +CONFIG_BLK_DEV_SGIIOC4 + This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4 + chipset. Please say Y here, if you have an Altix System from + Silicon Graphics Inc. + Intel PIIXn chipsets support CONFIG_BLK_DEV_PIIX This driver adds PIO mode setting and tuning for all PIIX IDE @@ -1241,20 +1277,23 @@ If unsure, say N. -PROMISE PDC20246/PDC20262/PDC20265/PDC20267/PDC20268 support +PROMISE PDC20246/PDC20262/PDC20265/PDC20267 support CONFIG_BLK_DEV_PDC202XX_OLD - Promise Ultra33 or PDC20246 - Promise Ultra66 or PDC20262 - Promise Ultra100 or PDC20265/PDC20267/PDC20268 + Promise Ultra 33 [PDC20246] + Promise Ultra 66 [PDC20262] + Promise FastTrak 66 [PDC20263] + Promise MB Ultra 100 [PDC20265] + Promise Ultra 100 [PDC20267] This driver adds up to 4 more EIDE devices sharing a single - interrupt. This add-on card is a bootable PCI UDMA controller. Since + interrupt. This device is a bootable PCI UDMA controller. Since multiple cards can be installed and there are BIOS ROM problems that - happen if the BIOS revisions of all installed cards (three-max) do + happen if the BIOS revisions of all installed cards (max of three) do not match, the driver attempts to do dynamic tuning of the chipset - at boot-time for max-speed. Ultra33 BIOS 1.25 or newer is required + at boot-time for max speed. Ultra33 BIOS 1.25 or newer is required for more than one card. This card may require that you say Y to - "Special UDMA Feature". + "Special UDMA Feature" to force UDMA mode for connected UDMA capable + disk drives. If you say Y here, you need to say Y to "Use DMA by default when available" as well. @@ -1264,7 +1303,30 @@ If unsure, say N. -Special UDMA Feature +PROMISE PDC202{68|69|70|71|75|76|77} support +CONFIG_BLK_DEV_PDC202XX_NEW + Promise Ultra 100 TX2 [PDC20268] + Promise Ultra 133 PTX2 [PDC20269] + Promise FastTrak LP/TX2/TX4 [PDC20270] + Promise FastTrak TX2000 [PDC20271] + Promise MB Ultra 133 [PDC20275] + Promise MB FastTrak 133 [PDC20276] + Promise FastTrak 133 [PDC20277] + + This driver adds up to 4 more EIDE devices sharing a single + interrupt. This device is a bootable PCI UDMA controller. Since + multiple cards can be installed and there are BIOS ROM problems that + happen if the BIOS revisions of all installed cards (max of five) do + not match, the driver attempts to do dynamic tuning of the chipset + at boot-time for max speed. Ultra33 BIOS 1.25 or newer is required + for more than one card. + + If you say Y here, you need to say Y to "Use DMA by default when + available" as well. + + If unsure, say N. + +Override-Enable UDMA for Promise Controllers CONFIG_PDC202XX_BURST This option causes the pdc202xx driver to enable UDMA modes on the PDC202xx even when the PDC202xx BIOS has not done so. @@ -1274,14 +1336,24 @@ used successfully on a PDC20265/Ultra100, allowing use of UDMA modes when the PDC20265 BIOS has been disabled (for faster boot up). - Please read the comments at the top of - . - If unsure, say N. -Special FastTrak Feature +Use FastTrak RAID capable device as plain IDE controller CONFIG_PDC202XX_FORCE - For FastTrak enable overriding BIOS. + Setting this option causes the kernel to use your Promise IDE disk + controller as an ordinary IDE controller, rather than as a FastTrak + RAID controller. RAID is a system for using multiple physical disks + as one virtual disk. + + You need to say Y here if you have a PDC20276 IDE interface but either + you do not have a RAID disk array, or you wish to use the Linux + internal RAID software (/dev/mdX). + + You need to say N here if you wish to use your Promise controller to + control a FastTrak RAID disk array, and you you must also say Y to + CONFIG_BLK_DEV_ATARAID_PDC. + + If unsure, say Y. SiS5513 chipset support CONFIG_BLK_DEV_SIS5513 @@ -1856,6 +1928,20 @@ want), say M here and read . The module will be called lvm-mod.o. +Device-mapper support +CONFIG_BLK_DEV_DM + Device-mapper is a low level volume manager. It works by allowing + people to specify mappings for ranges of logical sectors. Various + mapping types are available, in addition people may write their own + modules containing custom mappings if they wish. + + Higher level volume managers such as LVM2 use this driver. + + If you want to compile this as a module, say M here and read + . The module will be called dm-mod.o. + + If unsure, say N. + Multiple devices driver support (RAID and LVM) CONFIG_MD Support multiple physical spindles through a single logical device. @@ -3217,6 +3303,71 @@ If you want to compile it as a module, say M here and read . If unsure, say `N'. +AH/ESP match support (EXPERIMENTAL) +CONFIG_IP6_NF_MATCH_AHESP + This module allows one to match AH and ESP packets. + + If you want to compile it as a module, say M here and read + . The modules will be called + ip6t_ah.o and ip6t_esp.o. + + If unsure, say 'N'. + +Routing header match support +CONFIG_IP6_NF_MATCH_RT + rt matching allows you to match packets based on the routing + header of the packet. + + If you want to compile it as a module, say M here and read + . The module will be called + ip6t_rt.o. + + If unsure, say 'N'. + +Hop-by-hop and Dst opts header match support +CONFIG_IP6_NF_MATCH_OPTS + This allows one to match packets based on the hop-by-hop + and destination options headers of a packet. + + If you want to compile it as a module, say M here and read + . The modules will be called + ip6t_hbh.o and ip6t_dst.o. + + If unsure, say 'N'. + +Fragmentation header match support +CONFIG_IP6_NF_MATCH_FRAG + frag matching allows you to match packets based on the fragmentation + header of the packet. + + If you want to compile it as a module, say M here and read + . The module will be called + ip6t_frag.o. + + If unsure, say 'N'. + +HL match support +CONFIG_IP6_NF_MATCH_HL + HL matching allows you to match packets based on the hop + limit of the packet. + + If you want to compile it as a module, say M here and read + . The module will be called + ip6t_hl.o. + + If unsure, say 'N'. + +IPv6 Extension Headers Match (EXPERIMENTAL) +CONFIG_IP6_NF_MATCH_IPV6HEADER + This module allows one to match packets based upon + the ipv6 extension headers. + + If you want to compile it as a module, say M here and read + . The module will be called + ip6t_ipv6header.o. + + If unsure, say 'N'. + SYN flood protection CONFIG_SYN_COOKIES Normal TCP/IP networking is open to an attack known as "SYN @@ -3706,11 +3857,10 @@ Generic SiS support CONFIG_AGP_SIS - This option gives you AGP support for the GLX component of the "soon - to be released" XFree86 4.x on Silicon Integrated Systems [SiS] - chipsets. + This option gives you AGP support for the GLX component of + XFree86 4.x on Silicon Integrated Systems [SiS] chipsets. - Note that 5591/5592 AGP chipsets are NOT supported. + The 5591/5592 AGP bridge is only generically supported. You should say Y here if you use XFree86 3.3.6 or 4.x and want to use GLX or DRI. If unsure, say N. @@ -4886,12 +5036,11 @@ messages. Most people will want to say N here. If unsure, you will also want to say N. -Matrox unified accelerated driver CONFIG_FB_MATROX - Say Y here if you have a Matrox Millennium, Millennium II, Mystique, - Mystique 220, Productiva G100, Mystique G200, Millennium G200, - Matrox G400, G450 or G550 card in your box. At this time, support for - the G-series digital output is almost non-existant. + Say Y here if you have a Matrox Millennium, Matrox Millennium II, + Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox + Mystique G200, Matrox Millennium G200, Matrox Marvel G200 video, + Matrox G400, G450 or G550 card in your box. This driver is also available as a module ( = code which can be inserted and removed from the running kernel whenever you want). @@ -4902,7 +5051,6 @@ module load time. The parameters look like "video=matrox:XXX", and are described in . -Matrox Millennium I/II support CONFIG_FB_MATROX_MILLENIUM Say Y here if you have a Matrox Millennium or Matrox Millennium II video card. If you select "Advanced lowlevel driver options" below, @@ -4910,7 +5058,6 @@ packed pixel, 24 bpp packed pixel and 32 bpp packed pixel. You can also use font widths different from 8. -Matrox Mystique support CONFIG_FB_MATROX_MYSTIQUE Say Y here if you have a Matrox Mystique or Matrox Mystique 220 video card. If you select "Advanced lowlevel driver options" below, @@ -4971,7 +5118,6 @@ If you compile it as module, it will create a module named i2c-matroxfb.o. -Matrox G400 second head support CONFIG_FB_MATROX_MAVEN WARNING !!! This support does not work with G450 !!! @@ -5109,19 +5255,36 @@ SIS acceleration CONFIG_FB_SIS - This is the frame buffer device driver for the SiS 630 and 640 Super - Socket 7 UMA cards. Specs available at . + This is the frame buffer device driver for SiS VGA controllers. + + This driver is required for SiS DRM. + + See for detailed + documentation. + + Hardware specs available at . SIS 630/540/730 support CONFIG_FB_SIS_300 - This is the frame buffer device driver for the SiS 630 and related - Super Socket 7 UMA cards. Specs available at - . + This is the frame buffer device driver for SiS 300/305/630/730 VGA + controllers. + + This driver is required for SiS DRM. + + See for detailed + documentation. + + Hardware specs available at . SIS 315H/315 support CONFIG_FB_SIS_315 - This is the frame buffer device driver for the SiS 315 graphics - card. Specs available at . + This is the frame buffer device driver for SiS 315 (including 315H + and 315PRO), 650, M650, 651, 740 and Xabre VGA controllers. + + See for detailed + documentation. + + Hardware specs available at . IMS Twin Turbo display support CONFIG_FB_IMSTT @@ -5453,6 +5616,19 @@ replacement for kerneld.) Say Y here and read about configuring it in . +Kernel .config file saved in kernel image +CONFIG_IKCONFIG + This option enables the complete Linux kernel ".config" file contents + to be saved in the kernel (zipped) image file. It provides + documentation of which kernel options are used in a running kernel or + in an on-disk kernel. It can be extracted from the kernel image file + with a script and used as input to rebuild the current kernel or to + build another kernel. Since the kernel image is zipped, using this + option adds approximately 8 KB to a kernel image file. + This option is not available as a module. If you want a separate + file to save the kernel's .config contents, use 'installkernel' or 'cp' + or a similar tool, or just save it in '/lib/modules/'. + ARP daemon support CONFIG_ARPD Normally, the kernel maintains an internal cache which maps IP @@ -6915,7 +7091,7 @@ CONFIG_CISS_SCSI_TAPE When enabled (Y), this option allows SCSI tape drives and SCSI medium changers (tape robots) to be accessed via a Compaq 5xxx array - controller. (See Documentation/cciss.txt for more details.) + controller. (See for more details.) "SCSI support" and "SCSI tape support" must also be enabled for this option to work. @@ -6923,6 +7099,15 @@ When this option is disabled (N), the SCSI portion of the driver is not compiled. +Enable monitor thread +CONFIG_CISS_MONITOR_THREAD + Intended for use with multipath configurations (see the md driver). + This option allows a per-adapter monitoring thread to periodically + poll the adapter to detect failure modes in which the processor + is unable to receive interrupts from the adapter, thus enabling + fail-over to an alternate adapter in such situations. See + for more details. + QuickNet Internet LineJack/PhoneJack support CONFIG_PHONE_IXJ Say M if you have a telephony card manufactured by Quicknet @@ -7087,6 +7272,16 @@ architecture is based on LSI Logic's Message Passing Interface (MPI) specification. +Maximum number of scatter gather entries +CONFIG_FUSION_MAX_SGE + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver defaults to 40, a reasonable number + for most systems. However, the user may increase this up to 128. + Increasing this parameter will require significantly more memory + on a per controller instance. Increasing the parameter is not + necessary (or recommended) unless the user will be running + large I/O's via the raw interface. + Fusion MPT enhanced SCSI error reporting [optional] module CONFIG_FUSION_ISENSE The isense module (roughly stands for Interpret SENSE data) is @@ -7354,6 +7549,27 @@ there should be no noticeable performance impact as long as you have logging turned off. +QDIO base support for IBM S/390 and zSeries +CONFIG_QDIO + This driver provides the Queued Direct I/O base support for the + IBM S/390 (G5 and G6) and eServer zSeries (z800 and z900). + + For details please refer to the documentation provided by IBM at + + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called qdio.o. If you want to compile it as a + module, say M here and read . + + If unsure, say Y. + +Performance statistics for QDIO base support +CONFIG_QDIO_PERF_STATS + Say Y here to get performance statistics in /proc/qdio_perf + + If unsure, say N. + SGI WD93C93 SCSI Driver CONFIG_SCSI_SGIWD93 Say Y here to support the on-board WD93C93 SCSI controller found (a) @@ -7704,9 +7920,7 @@ Adapters. Consult the SCSI-HOWTO, available from , and the files and - for more information. If this - driver does not work correctly without modification, please contact - the author, Leonard N. Zubkoff, by email to lnz@dandelion.com. + for more information. You can also build this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), @@ -8627,16 +8841,28 @@ say M here and read . The module will be called AM53C974.o. -AMI MegaRAID support +AMI MegaRAID support (old driver) CONFIG_SCSI_MEGARAID This driver supports the AMI MegaRAID 418, 428, 438, 466, 762, 490 - and 467 SCSI host adapters. + and 467 SCSI host adapters. This is the old and very heavily tested + driver but lacks features like clustering. If you want to compile this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The module will be called megaraid.o. +AMI MegaRAID support (new driver) +CONFIG_SCSI_MEGARAID2 + This driver supports the AMI MegaRAID 418, 428, 438, 466, 762, 490 + and 467 SCSI host adapters. This is the newer less tested but more + featureful driver + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called megaraid2.o. + Intel/ICP (former GDT SCSI Disk Array) RAID Controller support CONFIG_SCSI_GDTH Formerly called GDT SCSI Disk Array Controller Support. @@ -8979,6 +9205,17 @@ Say Y if you really want or need the debugging output, everyone else says N. +CONFIG_IEEE1394_OUI_DB + If you say Y here, then an OUI list (vendor unique ID's) will be + compiled into the ieee1394 module. This doesn't really do much + accept being able to display the vendor of a hardware node. The + downside is that it adds about 300k to the size of the module, + or kernel (depending on whether you compile ieee1394 as a + module, or static in the kernel). + + This option is not needed for userspace programs like gscanbus + to show this information. + Network device support CONFIG_NETDEVICES You can say N here if you don't intend to connect your Linux box to @@ -9377,7 +9614,7 @@ Aironet 4500/4800 I365 broken support CONFIG_AIRONET4500_I365 If you have a PCMCIA Aironet 4500/4800 card which you want to use - without the standard PCMCIA cardservices provided by the pcmcia-cs + without the standard PCMCIA card services provided by the pcmcia-cs package, say Y here. This is not recommended, so say N. Aironet 4500/4800 PCMCIA support @@ -10456,6 +10693,26 @@ The module will be called dscc4.o. For general information about modules read . +PCISYNC feature +CONFIG_DSCC4_PCISYNC + Due to Etinc's design choice for its PCISYNC cards, some operations + are only allowed on specific ports of the DSCC4. This option is the + only way for the driver to know that it shouldn't return a success + code for these operations. + + Please say Y if your card is an Etinc's PCISYNC. + +Hard reset support +CONFIG_DSCC4_PCI_RST + Various DSCC4 bug forbid any reliable software reset of the asic. + As a replacement, some vendors provide a way to assert the PCI #RST + pin of DSCC4 through the GPIO port of the card. If you choose Y, the + driver will make use of this feature before module removal (i.e. rmmod). + This feature is known to exist on Commtech's cards. + Contact your manufacturer for details. + + Say Y if yout card supports this feature. + LanMedia Corp. serial boards (SSI/V.35, T1/E1, HSSI, T3) CONFIG_LANMEDIA This is a driver for the following Lan Media family of serial @@ -10804,6 +11061,15 @@ If unsure, say N here. +Raw HDLC Ethernet device support +CONFIG_HDLC_RAW_ETH + Say Y to this option if you want generic HDLC driver to support + raw HDLC Ethernet device emulation over WAN (Wide Area Network) + connections. + You will need it for Ethernet over HDLC bridges. + + If unsure, say N here. + Cisco HDLC support CONFIG_HDLC_CISCO Say Y to this option if you want generic HDLC driver to support @@ -10818,13 +11084,6 @@ If unsure, say N here. -Frame-Relay bridging support -CONFIG_HDLC_FR_BRIDGE - Say Y to this option if you want generic HDLC driver to support - bridging LAN frames over Frame-Relay links. - - If unsure, say N here. - Synchronous Point-to-Point Protocol (PPP) support CONFIG_HDLC_PPP Say Y to this option if you want generic HDLC driver to support @@ -11195,33 +11454,60 @@ The safe and default value for this is N. -SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter family support +Marvell Yukon / SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter family support CONFIG_SK98LIN - Say Y here if you have a SysKonnect SK-98xx or SK-95xx Gigabit - Ethernet Server Adapter. The following adapters are supported by - this driver: - - SK-9521 10/100/1000Base-T Adapter - - SK-9821 Gigabit Ethernet 1000Base-T Server Adapter - - SK-9822 Gigabit Ethernet 1000Base-T Dual Port Server Adapter - - SK-9841 Gigabit Ethernet 1000Base-LX Server Adapter - - SK-9842 Gigabit Ethernet 1000Base-LX Dual Port Server Adapter - - SK-9843 Gigabit Ethernet 1000Base-SX Server Adapter - - SK-9844 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter - - SK-9861 Gigabit Ethernet 1000Base-SX Server Adapter - - SK-9862 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter - - SK-9871 Gigabit Ethernet 1000Base-ZX Server Adapter - - SK-9872 Gigabit Ethernet 1000Base-ZX Dual Port Server Adapter + Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx + compliant Gigabit Ethernet Adapter. The following adapters are supported + by this driver: + - 3Com 3C940 Gigabit LOM Ethernet Adapter + - 3Com 3C941 Gigabit LOM Ethernet Adapter + - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T) + - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link) + - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX) + - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link) + - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX) + - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link) + - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition) + - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link) + - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX) + - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link) + - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971T Gigabit Ethernet Adapter + - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter + - Marvell RDK-8001 Adapter + - Marvell RDK-8002 Adapter + - Marvell RDK-8003 Adapter + - Marvell RDK-8004 Adapter + - Marvell RDK-8006 Adapter + - Marvell RDK-8007 Adapter + - Marvell RDK-8008 Adapter + - Marvell RDK-8009 Adapter + - Marvell RDK-8011 Adapter + - Marvell RDK-8012 Adapter + - SK-9521 V2.0 10/100/1000Base-T Adapter - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter + - SK-9521 10/100/1000Base-T Adapter + - DGE-530T Gigabit Ethernet Adapter + - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter + - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L) + - EG1032 v2 Instant Gigabit Network Adapter + - EG1064 v2 Instant Gigabit Network Adapter The adapters support Jumbo Frames. The dual link adapters support link-failover and dual port features. - The V2.0 adapters support the scatter-gather functionality with - sendfile(). Read Documentation/networking/sk98lin.txt for information about + Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support + the scatter-gather functionality with sendfile(). Please refer to + Documentation/networking/sk98lin.txt for more information about optional driver parameters. Questions concerning this driver may be addressed to: linux@syskonnect.de @@ -11231,6 +11517,122 @@ say M here and read Documentation/modules.txt. This is recommended. The module will be called sk98lin.o. +Marvell Yukon Chipset +CONFIG_SK98LIN_T1 + This driver supports: + + - 3Com 3C940 Gigabit LOM Ethernet Adapter + - 3Com 3C941 Gigabit LOM Ethernet Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +SysKonnect SK98xx Support +CONFIG_SK98LIN_T2 + This driver supports: + + - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T) + - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link) + - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX) + - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link) + - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX) + - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link) + - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition) + - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link) + - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX) + - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link) + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +SysKonnect SK98xx Support +CONFIG_SK98LIN_T3 + This driver supports: + + - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971T Gigabit Ethernet Adapter + - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T4 + This driver supports: + + - Marvell RDK-8001 Adapter + - Marvell RDK-8002 Adapter + - Marvell RDK-8003 Adapter + - Marvell RDK-8004 Adapter + - Marvell RDK-8006 Adapter + - Marvell RDK-8007 Adapter + - Marvell RDK-8008 Adapter + - Marvell RDK-8009 Adapter + - Marvell RDK-8011 Adapter + - Marvell RDK-8012 Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T5 + This driver supports: + + - SK-9521 V2.0 10/100/1000Base-T Adapter + - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter + - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter + - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter + - SK-9521 10/100/1000Base-T Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T6 + This driver supports: + + - DGE-530T Gigabit Ethernet Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T7 + This driver supports: + + - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T8 + This driver supports: + + - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L) + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + +Marvell Yukon Chipset +CONFIG_SK98LIN_T9 + This driver supports: + + - EG1032 v2 Instant Gigabit Network Adapter + - EG1064 v2 Instant Gigabit Network Adapter + + Questions concerning this driver may be addressed to: + linux@syskonnect.de + + Sun GEM support CONFIG_SUNGEM Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also @@ -12066,6 +12468,11 @@ . The module will be called apricot.o. +Broadcom BCM4400 support +CONFIG_NET_BCM4400 + This adds support for the Broadcom 4400 series of Ethernet drivers + using the v2.0.0 drivers. See Documentation/networking/bcm4400.txt + Generic DECchip & DIGITAL EtherWORKS PCI/EISA CONFIG_DE4X5 This is support for the DIGITAL series of PCI/EISA Ethernet cards. @@ -12101,6 +12508,15 @@ module, say M here and read as well as . +New Tulip bus configuration +CONFIG_TULIP_MWI + This configures your Tulip card specifically for the card and + system cache line size type you are using. + + This is experimental code, not yet tested on many boards. + + If unsure, say N. + Use PCI shared memory for NIC registers CONFIG_TULIP_MMIO Use PCI shared memory for the NIC registers, rather than going through @@ -12843,6 +13259,22 @@ The module will be called mcdx.o. If you want to compile it as a module, say M here and read . +Goldstar R420 CDROM support +CONFIG_GSCD + If this is your CD-ROM drive, say Y here. As described in the file + , you might have to change a setting + in the file before compiling the + kernel. Please read the file . + + If you say Y here, you should also say Y or M to "ISO 9660 CD-ROM + file system support" below, because that's the file system used on + CD-ROMs. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called gscd.o. If you want to compile it as a + module, say M here and read . + Matsushita/Panasonic/Creative, Longshine, TEAC CD-ROM support CONFIG_SBPCD This driver supports most of the drives which use the Panasonic or @@ -12937,22 +13369,6 @@ The module will be called sonycd535.o. If you want to compile it as a module, say M here and read . -Goldstar R420 CD-ROM support -CONFIG_GSCD - If this is your CD-ROM drive, say Y here. As described in the file - , you might have to change a setting - in the file before compiling the - kernel. Please read the file . - - If you say Y here, you should also say Y or M to "ISO 9660 CD-ROM - file system support" below, because that's the file system used on - CD-ROMs. - - This driver is also available as a module ( = code which can be - inserted in and removed from the running kernel whenever you want). - The module will be called gscd.o. If you want to compile it as a - module, say M here and read . - Philips/LMS CM206 CD-ROM support CONFIG_CM206 If you have a Philips/LMS CD-ROM drive cm206 in combination with a @@ -13017,14 +13433,9 @@ Quota support CONFIG_QUOTA - If you say Y here, you will be able to set per user limits for disk - usage (also called disk quotas). Currently, it works only for the - ext2 file system. You need additional software in order to use quota support (you can download sources from ). For further details, read the Quota mini-HOWTO, available from - . Probably the quota - support is only useful for multi user systems. If unsure, say N. VFS v0 quota format support CONFIG_QFMT_V2 @@ -13032,6 +13443,24 @@ need this functionality say Y here. Note that you will need latest quota utilities for new quota format with this kernel. +Compatible quota interfaces +CONFIG_QIFACE_COMPAT + This option will enable old quota interface in kernel. + If you have old quota tools (version <= 3.04) and you don't want to + upgrade them say Y here. + +Original quota interface +CONFIG_QIFACE_V1 + This is the oldest quota interface. It was used for old quota format. + If you have old quota tools and you use old quota format choose this + interface (if unsure, this interface is the best one to choose). + +VFS v0 quota interface +CONFIG_QIFACE_V2 + This quota interface was used by VFS v0 quota format. If you need + support for VFS v0 quota format (eg. you're using quota on ReiserFS) + and you don't want to upgrade quota tools, choose this interface. + Memory Technology Device (MTD) support CONFIG_MTD Memory Technology Devices are flash, RAM and similar chips, often @@ -13092,14 +13521,15 @@ . The module will be called redboot.o +Command line partition table parsing CONFIG_MTD_CMDLINE_PARTS Allow generic configuration of the MTD paritition tables via the kernel command line. Multiple flash resources are supported for hardware where - different kinds of flash memory are available. + different kinds of flash memory are available. You will still need the parsing functions to be called by the driver - for your particular device. It won't happen automatically. The - SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for + for your particular device. It won't happen automatically. The + SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for example. The format for the command line is as follows: @@ -13108,12 +13538,12 @@ := :[,] := [@offset][][ro] := unique id used in mapping driver/device - := standard linux memsize OR "-" to denote all + := standard linux memsize OR "-" to denote all remaining space := (NAME) - Due to the way Linux handles the command line, no spaces are - allowed in the partition definition, including mtd id's and partition + Due to the way Linux handles the command line, no spaces are + allowed in the partition definition, including mtd id's and partition names. Examples: @@ -13124,8 +13554,19 @@ Same flash, but 2 named partitions, the first one being read-only: mtdparts=sa1100:256k(ARMboot)ro,-(root) + If compiled as a module, it will be called cmdlinepart.o. + If unsure, say 'N'. +MTD concatenating support +CONFIG_MTD_CONCAT + Support for concatenating several MTD devices into a single + (virtual) one. This allows you to have -for example- a JFFS(2) + file system spanning multiple physical flash chips. If unsure, + say 'Y'. + + If compiled as a module, it will be called mtdconcat.o. + ARM Firmware Suite flash layout / partition parsing CONFIG_MTD_AFS_PARTS The ARM Firmware Suite allows the user to divide flash devices into @@ -13293,6 +13734,7 @@ If you wish to support CFI devices on a physical bus which is 32 bits wide, say 'Y'. +Support 64-bit buswidth CONFIG_MTD_CFI_B8 If you wish to support CFI devices on a physical bus which is 64 bits wide, say 'Y'. @@ -13312,6 +13754,7 @@ If your flash chips are interleaved in fours - i.e. you have four flash chips addressed by each bus cycle, then say 'Y'. +Support 8-chip flash interleave CONFIG_MTD_CFI_I8 If your flash chips are interleaved in eights - i.e. you have eight flash chips addressed by each bus cycle, then say 'Y'. @@ -13348,6 +13791,14 @@ provides support for one of those command sets, used on chips chips including the AMD Am29LV320. +Support for ST (Advanced Architecture) flash chips +CONFIG_MTD_CFI_STAA + The Common Flash Interface defines a number of different command + sets which a CFI-compliant chip may claim to implement. This code + provides support for one of those command sets. + + If compiled as a module, it will be called cfi_cmdset_0020.o. + CFI support for Intel/Sharp Standard Commands CONFIG_MTD_CFI_INTELSTD The Common Flash Interface defines a number of different command @@ -13380,11 +13831,6 @@ . The module will be called amd_flash.o -CONFIG_MTD_CFI_STAA - The Common Flash Interface defines a number of different command - sets which a CFI-compliant chip may claim to implement. This code - provides support for one of those command sets. - Support for RAM chips in bus mapping CONFIG_MTD_RAM This option enables basic support for RAM chips accessed through @@ -13532,6 +13978,14 @@ D-Box 2 board. If you have one of these boards and would like to use the flash chips on it, say 'Y'. +CFI Flash devices mapped on IBM Redwood +CONFIG_MTD_REDWOOD + This enables access routines for the flash chips on the IBM + Redwood board. If you have one of these boards and would like to + use the flash chips on it, say 'Y'. + + If compiled as a module, it will be called redwood.o. + CFI Flash device mapped on the XScale IQ80310 board CONFIG_MTD_IQ80310 This enables access routines for the flash chips on the Intel XScale @@ -13664,6 +14118,20 @@ CONFIG_MTD_NAND_SPIA If you had to ask, you don't have one. Say 'N'. +SmartMediaCard on autronix autcpu12 board +CONFIG_MTD_NAND_AUTCPU12 + This enables the driver for the autronix autcpu12 board to + access the SmartMediaCard. + + If compiled as a module, it will be called autcpu12.o. + +Support for Cirrus Logic EBD7312 evaluation board +CONFIG_MTD_NAND_EDB7312 + This enables the driver for the Cirrus Logic EBD7312 evaluation + board to access the onboard NAND Flash. + + If compiled as a module, it will be called edb7312.o. + M-Systems Disk-On-Chip 1000 support CONFIG_MTD_DOC1000 This provides an MTD device driver for the M-Systems DiskOnChip @@ -13793,6 +14261,12 @@ is only really useful if you are developing on this driver or suspect a possible hardware or driver bug. If unsure say N. +DEC MS02-NV NVRAM module support +CONFIG_MTD_MS02NV + Support for NVRAM module on DECstation. + + If compiled as a module, it will be called ms02-nv.o. + Use extra onboard system memory as MTD device CONFIG_MTD_SLRAM If your CPU cannot cache all of the physical memory in your machine, @@ -14201,12 +14675,11 @@ The module will be called powermate.o. If you want to compile it as a module, say M here and read . -Aiptek HyperPen tablet support +Aiptek 6000U/8000U tablet support CONFIG_USB_AIPTEK - Say Y here if you want to use the USB version of the Aiptek HyperPen - Digital Tablet (models 4000U, 5000U, 6000U, 8000U, and 12000U.) - Make sure to say Y to "Mouse support" (CONFIG_INPUT_MOUSEDEV) and/or - "Event interface support" (CONFIG_INPUT_EVDEV) as well. + Say Y here if you want to use the USB version of the Aiptek 6000U/8000U + tablet. Make sure to say Y to "Event interface support" + (CONFIG_INPUT_EVDEV) as well. This driver is also available as a module ( = code which can be inserted in and removed from the running kernel whenever you want). @@ -14918,7 +15391,7 @@ This code is also available as a module ( = code which can be inserted in and removed from the running kernel whenever you want). - The module will be called ax8817x.o. If you want to compile it as a + The module will be called catc.o. If you want to compile it as a module, say M here and read . USB Kodak DC-2xx Camera support @@ -15712,7 +16185,7 @@ debugging output from the driver. This is unlike previous versions of the driver, where enabling this option would turn on debugging output automatically. - + Example: mount -t befs /dev/hda2 /mnt -o debug @@ -16295,7 +16768,7 @@ say M here and read . If unsure, say N. -Apple HFS file system support +Apple Macintosh file system support CONFIG_HFS_FS If you say Y here, you will be able to mount Macintosh-formatted floppy disks and hard drive partitions with full read-write access. @@ -16308,7 +16781,7 @@ compile it as a module, say M here and read . -Apple HFS+ (Extended HFS) file system support +Apple Extended file system support (EXPERIMENTAL) CONFIG_HFSPLUS_FS If you say Y here, you will be able to mount extended format Macintosh-formatted hard drive partitions with full read-write access. @@ -16501,6 +16974,78 @@ Say Y here if you want to try writing to UFS partitions. This is experimental, so you should back up your UFS partitions beforehand. +XFS filesystem support +CONFIG_XFS_FS + XFS is a high performance journaling filesystem which originated + on the SGI IRIX platform. It is completely multi-threaded, can + support large files and large filesystems, extended attributes, + variable block sizes, is extent based, and makes extensive use of + Btrees (directories, extents, free space) to aid both performance + and scalability. + + Refer to the documentation at + for complete details. This implementation is on-disk compatible + with the IRIX version of XFS. + + If you want to compile this file system as a module ( = code which + can be inserted in and removed from the running kernel whenever you + want), say M here and read . The + module will be called xfs.o. Be aware, however, that if the file + system of your root partition is compiled as a module, you'll need + to use an initial ramdisk (initrd) to boot. + +Quota support +CONFIG_XFS_QUOTA + If you say Y here, you will be able to set limits for disk usage on + a per user and/or per group basis under XFS. XFS considers quota + information as filesystem metadata and uses journaling to provide a + higher level guarantee of consistency. The on-disk data format for + quota is also compatible with the IRIX version of XFS, allowing a + filesystem to be migrated between Linux and IRIX without any need + for conversion. + + If unsure, say N. More comprehensive documentation can be found in + README.quota in the xfsprogs package. XFS quota can be used either + with or without the generic quota support enabled (CONFIG_QUOTA) - + they are completely independent subsystems. + +Realtime support (EXPERIMENTAL) +CONFIG_XFS_RT + If you say Y here you will be able to mount and use XFS filesystems + which contain a realtime subvolume. The realtime subvolume is a + separate area of disk space where only file data is stored. The + realtime subvolume is designed to provide very deterministic + data rates suitable for media streaming applications. + + See the xfs man page in section 5 for a bit more information. + + This feature is unsupported at this time, is not yet fully + functional, and may cause serious problems. + + If unsure, say N. + +Debugging support (EXPERIMENTAL) +CONFIG_XFS_DEBUG + Say Y here to get an XFS build with many debugging features, + including ASSERT checks, function wrappers around macros, + and extra sanity-checking functions in various code paths. + + Note that the resulting code will be HUGE and SLOW, and probably + not useful unless you are debugging a particular problem. + + Say N unless you are an XFS developer, or play one on TV. + +Pagebuf debugging support (EXPERIMENTAL) +CONFIG_PAGEBUF_DEBUG + Say Y here to get an XFS build which may help you debug pagebuf + problems. Enabling this option will attach tracing information + to pagebufs, which can be read with the kdb kernel debugger. + + Note that you will also have to enable the sysctl in + /proc/sys/vm/pagebuf/debug for this to work. + + Say N unless you're interested in debugging pagebuf. + Advanced partition selection CONFIG_PARTITION_ADVANCED Say Y here if you would like to use hard disks under Linux which @@ -16550,7 +17095,7 @@ Say Y here if you would like to use hard disks under Linux which were partitioned on a Macintosh. -Windows Logical Disk Manager (Dynamic Disk) support (EXPERIMENTAL) +Windows Logical Disk Manager (Dynamic Disk) support CONFIG_LDM_PARTITION Say Y here if you would like to use hard disks under Linux which were partitioned using Windows 2000's or XP's Logical Disk Manager. @@ -16565,8 +17110,7 @@ Normal partitions are now called Basic Disks under Windows 2000 and XP. - Technical documentation to accompany this driver is available from: - . + For a fuller description read . If unsure, say N. @@ -16639,8 +17183,9 @@ Intel EFI GUID partition support CONFIG_EFI_PARTITION Say Y here if you would like to use hard disks under Linux which - were partitioned using EFI GPT. Presently only useful on the - IA-64 platform. + were partitioned using EFI GPT. This is the default partition + scheme on IA64, and can be used on other platforms when + large block device (64-bit block address) support is desired. Ultrix partition table support CONFIG_ULTRIX_PARTITION @@ -17482,17 +18027,36 @@ HIL keyboard support CONFIG_HIL The "Human Interface Loop" is a older, 8-channel USB-like controller - used in Hewlett Packard PA-RISC based machines. There are a few - cases where it is seen on PC/MAC architectures as well, usually also - manufactured by HP. This driver is based off MACH and BSD drivers, - and implements support for a keyboard attached to the HIL port. + used in several Hewlett Packard models. This driver is based off + MACH and BSD drivers, and implements support for a keyboard attached + to the HIL port, but not for any other types of HIL input devices + like mice or tablets. However, it has been thoroughly tested and is + stable. + Full support for the USB-like functions and non-keyboard channels of - the HIL is not provided for in this driver. There are vestiges of - mouse support in the driver, but it is probably not working. The - necessary hardware documentation to fully support the HIL controller - and interface it to the linux-input API is lacking. + the HIL is currently being added to the PA-RISC port and will + be backported to work on the m68k port as well. + + Enable this option if you intend to use a HIL keyboard as your + primary keyboard and/or do not wish to test the new HIL driver. - Enable this option if you intend to use a HIL keyboard. +HP System Device Controller support +CONFIG_HP_SDC + This option enables supports for the the "System Device Controller", + an i8042 carrying microcode to manage a few miscellanous devices + on some Hewlett Packard systems. The SDC itself contains a 10ms + resolution timer/clock capable of delivering interrupts on periodic + and one-shot basis. The SDC may also be connected to a battery-backed + real-time clock, a basic audio waveform generator, and an HP-HIL + Master Link Controller serving up to seven input devices. + + By itself this option is rather useless, but enabling it will + enable selection of drivers for the abovementioned devices. + It is, however, incompatible with the old, reliable HIL keyboard + driver, and the new HIL driver is experimental, so if you plan to + use a HIL keyboard as your primary keyboard, you may wish to + keep using that driver until the new HIL drivers have had more + testing. Include IOP (IIfx/Quadra 9x0) ADB driver CONFIG_ADB_IOP @@ -17690,6 +18254,27 @@ If you want to compile this driver as a module, say M here and read . The module will be called pcxx.o. +Cyclades-PC300 support +CONFIG_PC300 + This is a driver for the Cyclades-PC300 synchronous communication + boards. These boards provide synchronous serial interfaces to your + Linux box (interfaces currently available are RS-232/V.35, X.21 and + T1/E1). If you wish to support Multilink PPP, please select the + option below this one and read the file README.mlppp provided by PC300 + package. + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read Documentation/modules.txt. The module will be + called pc300.o. + + If you haven't heard about it, it's safe to say N. + +Cyclades-PC300 Sync TTY (to MLPPP) support +CONFIG_PC300_MLPPP + Say 'Y' to this option if you are planning to use Multilink PPP over the + PC300 synchronous communication boards. + SDL RISCom/8 card support CONFIG_RISCOM8 This is a driver for the SDL Communications RISCom/8 multiport card, @@ -17809,6 +18394,42 @@ read . The module will be called istallion.o. +PDC software console support +CONFIG_PDC_CONSOLE + Saying Y here will enable the software based PDC console to be + used as the system console. This is useful for machines in + which the hardware based console has not been written yet. The + following steps must be competed to use the PDC console: + + 1. create the device entry (mknod /dev/ttyB0 c 60 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + +Serial MUX support +CONFIG_SERIAL_MUX + Saying Y here will enable the hardware MUX serial driver for + the Nova and K Class systems. Due to limitations in the 2.4 + serial console driver, the Serial MUX shares the same device + as the PDC software console (Instructions for creating the + /dev/ttyB0 device is listed in the PDC software console + support help). Hopefully the Serial MUX code will share the + /dev/ttyS0 code in new serial console code for 2.6. + +PDC software console support +CONFIG_PDC_CONSOLE + Saying Y here will enable the software based PDC console to be + used as the system console. This is useful for machines in + which the hardware based console has not been written yet. The + following steps must be competed to use the PDC console: + + 1. create the device entry (mknod /dev/ttyB0 c 11 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + Microgate SyncLink adapter support CONFIG_SYNCLINK Provides support for the SyncLink ISA and PCI multiprotocol serial @@ -17958,6 +18579,14 @@ doing that; to actually get it to happen you need to pass the option "console=lp0" to the kernel at boot time. + Note that kernel messages can get lost if the printer is out of + paper (or off, or unplugged, or too busy..), but this behaviour + can be changed. See drivers/char/lp.c (do this at your own risk). + + Note that kernel messages can get lost if the printer is out of + paper (or off, or unplugged, or too busy..), but this behaviour + can be changed. See drivers/char/lp.c (do this at your own risk). + If the printer is out of paper (or off, or unplugged, or too busy..) the kernel will stall until the printer is ready again. By defining CONSOLE_LP_STRICT to 0 (at your own risk) you @@ -18259,6 +18888,15 @@ it as a module, say M here and read . The module will be called i2c-proc.o. +Powermac Keywest I2C interface +CONFIG_I2C_KEYWEST + This supports the use of the I2C interface in the combo-I/O + chip on recent Apple machines. Say Y if you have such a machine. + + This driver is also available as a module. If you want to compile + it as a module, say M here and read Documentation/modules.txt. + The module will be called i2c-keywest.o. + Bus Mouse Support CONFIG_BUSMOUSE Say Y here if your machine has a bus mouse as opposed to a serial @@ -18768,6 +19406,13 @@ selected, the module will be called i810.o. AGP support is required for this driver to work. +Intel 830M, 845G, 852GM, 855GM, 865G +CONFIG_DRM_I830 + Choose this option if you have a system that has Intel 830M, 845G, + 852GM, 855GM or 865G integrated graphics. If M is selected, the + module will be called i830.o. AGP support is required for this driver + to work. + Matrox G200/G400/G450 CONFIG_DRM_MGA Choose this option if you have a Matrox G200, G400 or G450 graphics @@ -19013,6 +19658,30 @@ particular, many Toshiba laptops require this for correct operation of the AC module. +ACPI Relaxed AML Checking +CONFIG_ACPI_RELAXED_AML + If you say `Y' here, the ACPI interpreter will relax its checking + for valid AML and will ignore some AML mistakes, such as off-by-one + errors in region sizes. Some laptps may require this option. In + particular, many Toshiba laptops require this for correct operation + of the AC module. + +ACPI Relaxed AML Checking +CONFIG_ACPI_RELAXED_AML + If you say `Y' here, the ACPI interpreter will relax its checking + for valid AML and will ignore some AML mistakes, such as off-by-one + errors in region sizes. Some laptps may require this option. In + particular, many Toshiba laptops require this for correct operation + of the AC module. + +ACPI Relaxed AML Checking +CONFIG_ACPI_RELAXED_AML + If you say `Y' here, the ACPI interpreter will relax its checking + for valid AML and will ignore some AML mistakes, such as off-by-one + errors in region sizes. Some laptops may require this option. In + particular, many Toshiba laptops require this for correct operation + of the AC module. + ACPI Bus Manager CONFIG_ACPI_BUSMGR The ACPI Bus Manager enumerates devices in the ACPI namespace, and @@ -19561,6 +20230,15 @@ . The module will be called cpuid.o +x86 BIOS Enhanced Disk Drive support +CONFIG_EDD + Say Y or M here if you want to enable BIOS Enhanced Disk Drive + Services real mode BIOS calls to determine which disk + BIOS tries boot from. This information is then exported via /proc. + + This option is experimental, but believed to be safe, + and most disk controller BIOS vendors do not yet implement this feature. + SBC-60XX Watchdog Timer CONFIG_60XX_WDT This driver can be used with the watchdog timer found on some @@ -21158,17 +21836,6 @@ feature. See and for more information. -PPP filtering for ISDN -CONFIG_IPPP_FILTER - Say Y here if you want to be able to filter the packets passing over - IPPP interfaces. This allows you to control which packets count as - activity (i.e. which packets will reset the idle timer or bring up - a demand-dialled link) and which packets are to be dropped entirely. - You need to say Y here if you wish to use the pass-filter and - active-filter options to ipppd. - - If unsure, say N. - Support generic MP (RFC 1717) CONFIG_ISDN_MPP With synchronous PPP enabled, it is possible to increase throughput @@ -21540,6 +22207,11 @@ This enables HiSax support for the HFC-S+, HFC-SP and HFC-PCMCIA cards. This code is not finished yet. +Formula-n enter:now PCI card (EXPERIMENTAL) +CONFIG_HISAX_ENTERNOW_PCI + This enables HiSax support for the Formula-n enter:now PCI + ISDN card. + Am7930 CONFIG_HISAX_AMD7930 This enables HiSax support for the AMD7930 chips on some SPARCs. @@ -22017,7 +22689,6 @@ SCO Module (SCO links) RFCOMM Module (RFCOMM protocol) BNEP Module (BNEP protocol) - CMTP Module (CMTP protocol) Say Y here to enable Linux Bluetooth support and to build BlueZ Core layer. @@ -22072,15 +22743,6 @@ Say Y here to compile BNEP support into the kernel or say M to compile it as module (bnep.o). -CMTP protocol support -CONFIG_BLUEZ_CMTP - CMTP (CAPI Message Transport Protocol) is a transport layer - for CAPI messages. CMTP is required for the Bluetooth Common - ISDN Access Profile. - - Say Y here to compile CMTP support into the kernel or say M to - compile it as module (cmtp.o). - BNEP multicast filter support CONFIG_BLUEZ_BNEP_MC_FILTER This option enables the multicast filter support for BNEP. @@ -24238,24 +24900,28 @@ system console. Available only if 3270 support is compiled in statically. -Support for HWC line mode terminal -CONFIG_HWC - Include support for IBM HWC line-mode terminals. - -Console on HWC line mode terminal -CONFIG_HWC_CONSOLE - Include support for using an IBM HWC line-mode terminal as the Linux +Support for SCLP +CONFIG_SCLP + Include support for the IBM SCLP interface to the service element. + +Support for SCLP line mode terminal +CONFIG_SCLP_TTY + Include support for IBM SCLP line-mode terminals. + +Support for console on SCLP line mode terminal +CONFIG_SCLP_CONSOLE + Include support for using an IBM SCLP line-mode terminal as a Linux system console. -Control Program Identification -CONFIG_HWC_CPI - Allows for Control Program Identification via the HWC interface, - i.e. provides a mean to pass an OS instance name (system name) - to the machine. - - This option should only be selected as a module since the - system name has to be passed as module parameter. The module - will be called hwc_cpi.o. +Control-Program Identification +CONFIG_SCLP_CPI + This option enables the hardware console interface for system + identification. This is commonly used for workload management and + gives you a nice name for the system on the service element. + Please select this option as a module since built-in operation is + completely untested. + You should only select this option if you know what you are doing, + need this feature and intend to run your kernel in LPAR. S/390 tape device support CONFIG_S390_TAPE @@ -24351,20 +25017,6 @@ enabled, you'll be able to toggle chpids logically offline and online. Even if you don't understand what this means, you should say "Y". -Process warning machine checks -CONFIG_MACHCHK_WARNING - Select this option if you want the machine check handler on IBM S/390 or - zSeries to process warning machine checks (e.g. on power failures). - If unsure, say "Y". - -Use chscs for Common I/O -CONFIG_CHSC - Select this option if you want the s390 common I/O layer to use information - obtained by channel subsystem calls. This will enable Linux to process link - failures and resource accessibility events. Moreover, if you have procfs - enabled, you'll be able to toggle chpids logically offline and online. Even - if you don't understand what this means, you should say "Y". - Kernel support for 31 bit ELF binaries CONFIG_S390_SUPPORT Select this option if you want to enable your system kernel to @@ -24433,12 +25085,40 @@ a debugging option; you probably do not want to set it unless you are an S390 port maintainer. +Gigabit Ethernet device support +CONFIG_QETH + This driver supports the IBM S/390 and zSeries OSA Express adapters + in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN + interfaces in QDIO and HIPER mode. + + For details please refer to the documentation provided by IBM at + + + This driver is also available as a module (code which can be + inserted in and removed from the running kernel whenever you + want). If you want to compile it as a module, say 'M' here and + read file Documentation/modules.txt. + +IPv6 support for gigabit ethernet +CONFIG_QETH_IPV6 + If CONFIG_QETH is switched on, this option will include IPv6 + support in the qeth device driver. + +VLAN support for gigabit ethernet +CONFIG_QETH_VLAN + If CONFIG_QETH is switched on, this option will include IEEE + 802.1q VLAN support in the qeth device driver. + +Performance statistics in /proc +CONFIG_QETH_PERF_STATS + When switched on, this option will add a file in the proc-fs + (/proc/qeth_perf_stats) containing performance statistics. It + may slightly impact performance, so this is only recommended for + internal tuning of the device driver. + # # ARM options # -# CML2 transition note: CML1 asks ARCH_ARCA5K, then has ARCH_A5K and ARCH_ARK -# as subquestions. CML2 asks the subquestions in the armtype menu and makes -# ARCH_ARCA5K a derived symbol. ARM System type CONFIG_ARCH_ARCA5K This selects what ARM system you wish to build the kernel for. It @@ -24735,6 +25415,46 @@ Say Y if you want support for the ARM1020 processor. Otherwise, say N. +Disable I-Cache +CONFIG_CPU_ICACHE_DISABLE + Say Y here to disable the processor instruction cache. Unless + you have a reason not to or are unsure, say N. + +Disable D-Cache +CONFIG_CPU_DCACHE_DISABLE + Say Y here to disable the processor data cache. Unless + you have a reason not to or are unsure, say N. + +Force write through D-cache +CONFIG_CPU_DCACHE_WRITETHROUGH + Say Y here to use the data cache in write-through mode. Unless you + specifically require this or are unsure, say N. + +Round robin I and D cache replacement algorithm +CONFIG_CPU_CACHE_ROUND_ROBIN + Say Y here to use the predictable round-robin cache replacement + policy. Unless you specifically require this or are unsure, say N. + +Disable branch prediction +CONFIG_CPU_BPREDICT_DISABLE + Say Y here to disable branch prediction. If unsure, say N. + +Compressed boot loader in ROM/flash +CONFIG_ZBOOT_ROM + Say Y here if you intend to execute your compressed kernel image (zImage) + directly from ROM or flash. If unsure, say N. + +Compressed ROM boot loader base address +CONFIG_ZBOOT_ROM_TEXT + The base address for zImage. Unless you have special requirements, you + should not change this value. + +Compressed ROM boot loader BSS address +CONFIG_ZBOOT_ROM_BSS + The base address of 64KiB of read/write memory, which must be available + while the decompressor is running. Unless you have special requirements, + you should not change this value. + Support StrongARM SA-110 processor CONFIG_CPU_SA110 The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and @@ -25250,6 +25970,44 @@ brave people. System crashes and other bad things are likely to occur if you use this driver. If in doubt, select N. +Tieman Voyager USB Braille display support (EXPERIMENTAL) +CONFIG_USB_BRLVGER + Say Y here if you want to use the Voyager USB Braille display from + Tieman. See for more + information. + + This code is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called brlvger.o. If you want to compile it as + a module, say M here and read . + +KB Gear JamStudio tablet support +CONFIG_USB_KBTAB + Say Y here if you want to use the USB version of the KB Gear + JamStudio tablet. Make sure to say Y to "Mouse support" + (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support" + (CONFIG_INPUT_EVDEV) as well. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called kbtab.o. If you want to compile it as a + module, say M here and read . + +USB Inside Out Edgeport Serial Driver (TI devices) +CONFIG_USB_SERIAL_EDGEPORT_TI + Say Y here if you want to use any of the devices from Inside Out + Networks (Digi) that are not supported by the io_edgeport driver. + This includes the Edgeport/1 device. + + This code is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called io_ti.o. If you want to compile it + as a module, say M here and read . + +USB Keyspan MPR Firmware +CONFIG_USB_SERIAL_KEYSPAN_MPR + Say Y here to include firmware for the Keyspan MPR converter. + Winbond W83977AF IrDA Device Driver CONFIG_WINBOND_FIR Say Y here if you want to build IrDA support for the Winbond @@ -25320,7 +26078,7 @@ CONFIG_ALI_FIR Say Y here if you want to build support for the ALi M5123 FIR Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, - M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports + M1535, M1535D, M1535+, M1535D South Bridge. This driver supports SIR, MIR and FIR (4Mbps) speeds. If you want to compile it as a module, say M here and read @@ -26497,6 +27255,11 @@ best used in conjunction with the NMI watchdog so that spinlock deadlocks are also debuggable. +Additional run-time checks +CONFIG_CHECKING + Enables some internal consistency checks for kernel debugging. + You should normally say N. + Read-write spinlock debugging CONFIG_DEBUG_RWLOCK If you say Y here then read-write lock processing will count how many @@ -26516,6 +27279,14 @@ of the BUG call as well as the EIP and oops trace. This aids debugging but costs about 70-100K of memory. +Morse code panics +CONFIG_PANIC_MORSE + Say Y here to receive panic messages in morse code on your keyboard LEDs, and + optionally the PC speaker, if available. + The kernel param "panicblink" controls this feature, set it to 0 to disable, + 1 for LEDs only, 2 for pc speaker, or 3 for both. If you disable this option, + then you will receive a steady blink on the LEDs instead. + Include kgdb kernel debugger CONFIG_KGDB Include in-kernel hooks for kgdb, the Linux kernel source level @@ -26561,9 +27332,11 @@ U2/Uturn I/O MMU CONFIG_IOMMU_CCIO - Say Y here to enable DMA management routines for the first - generation of PA-RISC cache-coherent machines. Programs the - U2/Uturn chip in "Virtual Mode" and use the I/O MMU. + The U2/UTurn is a bus converter with io mmu present in the Cxxx, D, + J, K, and R class machines. Compiling this driver into the kernel will + not hurt anything, removing it will reduce your kernel by about 14k. + + If unsure, say Y. LBA/Elroy PCI support CONFIG_PCI_LBA @@ -26977,20 +27750,13 @@ Say Y if you want support for the ARM926T processor. Otherwise, say N. -Support CPU clock change (EXPERIMENTAL) -CONFIG_CPU_FREQ - CPU clock scaling allows you to change the clock speed of the - running CPU on the fly. This is a nice method to save battery power, - because the lower the clock speed, the less power the CPU - consumes. Note that this driver doesn't automatically change the CPU - clock speed, you need some userland tools (which still have to be - written) to implement the policy. If you don't understand what this - is all about, it's safe to say 'N'. - SiS CONFIG_DRM_SIS - Choose this option if you have a SIS graphics card. AGP support is - required for this driver to work. + Choose this option if you have a SIS 300 series graphics card or + VGA controller (300, 305, 540, 630, 730). + + AGP support as well as the SiS framebuffer driver are required + for this driver to work. Etrax Ethernet slave support (over lp0/1) CONFIG_ETRAX_ETHERNET_LPSLAVE @@ -27000,7 +27766,7 @@ Slave has its own LEDs CONFIG_ETRAX_ETHERNET_LPSLAVE_HAS_LEDS - Enable if the slave has it's own LEDs. + Enable if the slave has its own LEDs. ATA/IDE support CONFIG_ETRAX_IDE @@ -27170,6 +27936,12 @@ If unsure, say N. +Hotplug firmware loading support (EXPERIMENTAL) +CONFIG_FW_LOADER + This option is provided for the case where no in-kernel-tree modules require + hotplug firmware loading support, but a module built outside the kernel tree + does. + NatSemi SCx200 support CONFIG_SCx200 This provides basic support for the National Semiconductor SCx200 @@ -27331,7 +28103,7 @@ If compiled as a module, it will be called uclinux.o. NatSemi SCx200 I2C using GPIO pins -CONFIG_SCx200_GPIO +CONFIG_SCx200_I2C Enable the use of two GPIO pins of a SCx200 processor as an I2C bus. If you don't know what to do here, say N. @@ -27393,7 +28165,229 @@ This option is provided for the case where no in-kernel-tree modules require CRC32 functions, but a module built outside the kernel tree does. Such modules that use library CRC32 functions - require that you say M or Y here. + require M here. + +CONFIG_CPU_FREQ + Clock scaling allows you to change the clock speed of CPUs on the + fly. This is a nice method to save battery power on notebooks, + because the lower the clock speed, the less power the CPU consumes. + + For more information, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_CPU_FREQ_TABLE + Many CPUFreq drivers use these helpers, so only say N here if + the CPUFreq driver of your choice doesn't need these helpers. + + If in doubt, say Y. + + Wolfson AC97 Touchscreen support (EXPERIMENTAL) +CONFIG_SOUND_WM97XX + Say Y here to support the Wolfson WM9705 and WM9712 touchscreen + controllers. These controllers are mainly found in PDA's + i.e. Dell Axim and Toshiba e740 + + This is experimental code. + Please see Documentation/wolfson-touchscreen.txt for + a complete list of parameters. + + In order to use this driver, a char device called wm97xx with a major + number of 10 and minor number 16 will have to be created under + /dev/touchscreen. + + e.g. + mknod /dev/touchscreen/wm97xx c 10 16 + + If you want to compile this as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here. The module will be called ac97_plugin_wm97xx.o. + + If unsure, say N. + + +CONFIG_CPU_FREQ_24_API + This enables the /proc/sys/cpu/ sysctl interface for controlling + CPUFreq, as known from the 2.4.-kernel patches for CPUFreq. 2.5 + uses /proc/cpufreq instead. Please note that some drivers do not + work well with the 2.4. /proc/sys/cpu sysctl interface, so if in + doubt, say N here. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_POWERNOW_K6 + This adds the CPUFreq driver for mobile AMD K6-2+ and mobile + AMD K6-3+ processors. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_POWERNOW_K7 + This adds the CPUFreq driver for mobile AMD Athlon/Duron + K7 processors. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_P4_CLOCKMOD + This adds the CPUFreq driver for Intel Pentium 4 / XEON + processors. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_ELAN_CPUFREQ + This adds the CPUFreq driver for AMD Elan SC400 and SC410 + processors. + + You need to specify the processor maximum speed as boot + parameter: elanfreq=maxspeed (in kHz) or as module + parameter "max_freq". + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_LONGHAUL + This adds the CPUFreq driver for VIA Samuel/CyrixIII, + VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T + processors. + + If you do not want to scale the Front Side Bus or voltage, + pass the module parameter "dont_scale_fsb=1" or + "dont_scale_voltage=1". Additionally, it is advised that + you pass the current Front Side Bus speed (in MHz) to + this module as module parameter "current_fsb", e.g. + "current_fsb=133" for a Front Side Bus speed of 133 MHz. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_SPEEDSTEP_ICH + This adds the CPUFreq driver for certain mobile Intel Pentium III + (Coppermine), all mobile Intel Pentium III-M (Tulaatin) and all + mobile Intel Pentium 4 P4-Ms on chipsets with an Intel ICH2, ICH3, + or ICH4 southbridge. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_SPEEDSTEP_CENTRINO + This adds the CPUFreq driver for Enhanced SpeedStep enabled + mobile CPUs. This means Intel Pentium M (Centrino) CPUs. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_LONGRUN + This adds the CPUFreq driver for Transmeta Crusoe processors which + support LongRun. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_X86_GX_SUSPMOD + This adds the CPUFreq driver for NatSemi Geode processors which + support suspend modulation. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +CONFIG_CPU_FREQ_GOV_USERSPACE + Enable this cpufreq governor when you either want to set the + CPU frequency manually or when an userspace programm shall + be able to set the CPU dynamically, like on LART + ( http://www.lart.tudelft.nl/ ) + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say Y. + +Chassis LCD and LED support +CONFIG_CHASSIS_LCD_LED + Say Y here if you want to enable support for the Heartbeat, + Disk/Network activities LEDs on some PA-RISC machines, + or support for the LCD that can be found on recent material. + + This has nothing to do with LED State support for A, J and E class. + + If unsure, say Y. + +VSC/GSC/HSC bus support +CONFIG_GSC + The VSC, GSC and HSC busses were used from the earliest 700-series + workstations up to and including the C360/J2240 workstations. They + were also used in servers from the E-class to the K-class. They + are not found in B1000, C3000, J5000, A500, L1000, N4000 and upwards. + If in doubt, say "Y". + +Wax I/O support +CONFIG_GSC_WAX + Say Y here to support the Wax multifunction chip found in some + older systems, including B/C/D/R class and 715/64, 715/80 and + 715/100. Wax includes an EISA adapter, a serial port (not always + used), a HIL interface chip and is also known to be used as the + GSC bridge for an X.25 GSC card. + +GSCtoPCI/Dino PCI support +CONFIG_GSC_DINO + Say Y here to support the Dino & Cujo GSC to PCI bridges found in + machines from the B132 to the C360, the J2240 and the A180. Some + GSC/HSC cards (eg gigabit & dual 100 Mbit Ethernet) have a Dino on + the card, and you also need to say Y here if you have such a card. + Note that Dino also supplies one of the serial ports on certain + machines. If in doubt, say Y. + +HPET timers +CONFIG_HPET_TIMER + Use the IA-PC HPET (High Precision Event Timer) to manage + time in preference to the PIT and RTC, if a HPET is + present. The HPET provides a stable time base on SMP + systems, unlike the RTC, but it is more expensive to access, + as it is off-chip. You can find the HPET spec at + . + + If unsure, say Y. + +IOMMU support +CONFIG_GART_IOMMU + Support the K8 IOMMU. Needed to run systems with more than 4GB of memory + properly with 32-bit PCI devices that do not support DAC (Double Address + Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter. + Normally the kernel will take the right choice by itself. + If unsure say Y + +Debug __init statements +CONFIG_INIT_DEBUG + Fill __init and __initdata at the end of boot. This helps debugging + invalid uses of __init and __initdata after initialization. + +Force IOMMU to on +CONFIG_IOMMU_DEBUG + Force the IOMMU to on even when you have less than 4GB of memory and add + debugging code. + Can be disabled at boot time with iommu=noforce. + +IOMMU leak tracing +CONFIG_IOMMU_LEAK + Add a simple leak tracer to the IOMMU code. This is useful when you + are debugging a buggy device driver that leaks IOMMU mappings. + +pSeries Hypervisor Virtual Console support +CONFIG_HVC_CONSOLE + pSeries machines when partitioned support a hypervisor virtual + console. This driver allows each pSeries partition to have a console + which is accessed via the HMC. CONFIG_CRYPTO This option provides the core Cryptographic API. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cpu-freq/core.txt linux.22-ac2/Documentation/cpu-freq/core.txt --- linux.vanilla/Documentation/cpu-freq/core.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/cpu-freq/core.txt 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,94 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + C P U F r e q C o r e + + + Dominik Brodowski + David Kimdon + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + +Contents: +--------- +1. CPUFreq core and interfaces +2. CPUFreq notifiers + +1. General Information +======================= + +The CPUFreq core code is located in linux/kernel/cpufreq.c. This +cpufreq code offers a standardized interface for the CPUFreq +architecture drivers (those pieces of code that do actual +frequency transitions), as well as to "notifiers". These are device +drivers or other part of the kernel that need to be informed of +policy changes (ex. thermal modules like ACPI) or of all +frequency changes (ex. timing code) or even need to force certain +speed limits (like LCD drivers on ARM architecture). Additionally, the +kernel "constant" loops_per_jiffy is updated on frequency changes +here. + +Reference counting is done by cpufreq_get_cpu and cpufreq_put_cpu, +which make sure that the cpufreq processor driver is correctly +registered with the core, and will not be unloaded until +cpufreq_put_cpu is called. + +2. CPUFreq notifiers +==================== + +CPUFreq notifiers conform to the standard kernel notifier interface. +See linux/include/linux/notifier.h for details on notifiers. + +There are two different CPUFreq notifiers - policy notifiers and +transition notifiers. + + +2.1 CPUFreq policy notifiers +---------------------------- + +These are notified when a new policy is intended to be set. Each +CPUFreq policy notifier is called three times for a policy transition: + +1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if + they see a need for this - may it be thermal considerations or + hardware limitations. + +2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid + hardware failure. + +3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy + - if two hardware drivers failed to agree on a new policy before this + stage, the incompatible hardware shall be shut down, and the user + informed of this. + +The phase is specified in the second argument to the notifier. + +The third argument, a void *pointer, points to a struct cpufreq_policy +consisting of five values: cpu, min, max, policy and max_cpu_freq. min +and max are the lower and upper frequencies (in kHz) of the new +policy, policy the new policy, cpu the number of the affected CPU; and +max_cpu_freq the maximum supported CPU frequency. This value is given +for informational purposes only. + + +2.2 CPUFreq transition notifiers +-------------------------------- + +These are notified twice when the CPUfreq driver switches the CPU core +frequency and this change has any external implications. + +The second argument specifies the phase - CPUFREQ_PRECHANGE or +CPUFREQ_POSTCHANGE. + +The third argument is a struct cpufreq_freqs with the following +values: +cpu - number of the affected CPU +old - old frequency +new - new frequency diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cpu-freq/cpu-drivers.txt linux.22-ac2/Documentation/cpu-freq/cpu-drivers.txt --- linux.vanilla/Documentation/cpu-freq/cpu-drivers.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/cpu-freq/cpu-drivers.txt 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,210 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + C P U D r i v e r s + + - information for developers - + + + Dominik Brodowski + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + +Contents: +--------- +1. What To Do? +1.1 Initialization +1.2 Per-CPU Initialization +1.3 verify +1.4 target or setpolicy? +1.5 target +1.6 setpolicy +2. Frequency Table Helpers + + + +1. What To Do? +============== + +So, you just got a brand-new CPU / chipset with datasheets and want to +add cpufreq support for this CPU / chipset? Great. Here are some hints +on what is necessary: + + +1.1 Initialization +------------------ + +First of all, in an __initcall level 7 (module_init()) or later +function check whether this kernel runs on the right CPU and the right +chipset. If so, register a struct cpufreq_driver with the CPUfreq core +using cpufreq_register_driver() + +What shall this struct cpufreq_driver contain? + +cpufreq_driver.name - The name of this driver. + +cpufreq_driver.owner - THIS_MODULE; + +cpufreq_driver.init - A pointer to the per-CPU initialization + function. + +cpufreq_driver.verify - A pointer to a "verification" function. + +cpufreq_driver.setpolicy _or_ +cpufreq_driver.target - See below on the differences. + +And optionally + +cpufreq_driver.exit - A pointer to a per-CPU cleanup function. + +cpufreq_driver.attr - A pointer to a NULL-terminated list of + "struct freq_attr" which allow to + export values to sysfs. + + +1.2 Per-CPU Initialization +-------------------------- + +Whenever a new CPU is registered with the device model, or after the +cpufreq driver registers itself, the per-CPU initialization function +cpufreq_driver.init is called. It takes a struct cpufreq_policy +*policy as argument. What to do now? + +If necessary, activate the CPUfreq support on your CPU. + +Then, the driver must fill in the following values: + +policy->cpuinfo.min_freq _and_ +policy->cpuinfo.max_freq - the minimum and maximum frequency + (in kHz) which is supported by + this CPU +policy->cpuinfo.transition_latency the time it takes on this CPU to + switch between two frequencies (if + appropriate, else specify + CPUFREQ_ETERNAL) + +policy->cur The current operating frequency of + this CPU (if appropriate) +policy->min, +policy->max, +policy->policy and, if necessary, +policy->governor must contain the "default policy" for + this CPU. A few moments later, + cpufreq_driver.verify and either + cpufreq_driver.setpolicy or + cpufreq_driver.target is called with + these values. + +For setting some of these values, the frequency table helpers might be +helpful. See the section 2 for more information on them. + + +1.3 verify +------------ + +When the user decides a new policy (consisting of +"policy,governor,min,max") shall be set, this policy must be validated +so that incompatible values can be corrected. For verifying these +values, a frequency table helper and/or the +cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned +int min_freq, unsigned int max_freq) function might be helpful. See +section 2 for details on frequency table helpers. + +You need to make sure that at least one valid frequency (or operating +range) is within policy->min and policy->max. If necessary, increase +policy->max fist, and only if this is no solution, decreas policy->min. + + +1.4 target or setpolicy? +---------------------------- + +Most cpufreq drivers or even most cpu frequency scaling algorithms +only allow the CPU to be set to one frequency. For these, you use the +->target call. + +Some cpufreq-capable processors switch the frequency between certain +limits on their own. These shall use the ->setpolicy call + + +1.4. target +------------- + +The target call has three arguments: struct cpufreq_policy *policy, +unsigned int target_frequency, unsigned int relation. + +The CPUfreq driver must set the new frequency when called here. The +actual frequency must be determined using the following rules: + +- keep close to "target_freq" +- policy->min <= new_freq <= policy->max (THIS MUST BE VALID!!!) +- if relation==CPUFREQ_REL_L, try to select a new_freq higher than or equal + target_freq. ("L for lowest, but no lower than") +- if relation==CPUFREQ_REL_H, try to select a new_freq lower than or equal + target_freq. ("H for highest, but no higher than") + +Here again the frequency table helper might assist you - see section 3 +for details. + + +1.5 setpolicy +--------------- + +The setpolicy call only takes a struct cpufreq_policy *policy as +argument. You need to set the lower limit of the in-processor or +in-chipset dynamic frequency switching to policy->min, the upper limit +to policy->max, and -if supported- select a performance-oriented +setting when policy->policy is CPUFREQ_POLICY_PERFORMANCE, and a +powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check +the reference implementation in arch/i386/kernel/cpu/cpufreq/longrun.c + + + +2. Frequency Table Helpers +========================== + +As most cpufreq processors only allow for being set to a few specific +frequencies, a "frequency table" with some functions might assist in +some work of the processor driver. Such a "frequency table" consists +of an array of struct cpufreq_freq_table entries, with any value in +"index" you want to use, and the corresponding frequency in +"frequency". At the end of the table, you need to add a +cpufreq_freq_table entry with frequency set to CPUFREQ_TABLE_END. And +if you want to skip one entry in the table, set the frequency to +CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending +order. + +By calling cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); +the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and +policy->min and policy->max are set to the same values. This is +helpful for the per-CPU initialization stage. + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); +assures that at least one valid frequency is within policy->min and +policy->max, and all other criteria are met. This is helpful for the +->verify call. + +int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int target_freq, + unsigned int relation, + unsigned int *index); + +is the corresponding frequency table helper for the ->target +stage. Just pass the values to this function, and the unsigned int +index returns the number of the frequency table entry which contains +the frequency the CPU shall be set to. PLEASE NOTE: This is not the +"index" which is in this cpufreq_table_entry.index, but instead +cpufreq_table[index]. So, the new frequency is +cpufreq_table[index].frequency, and the value you stored into the +frequency table "index" field is +cpufreq_table[index].index. + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cpu-freq/governors.txt linux.22-ac2/Documentation/cpu-freq/governors.txt --- linux.vanilla/Documentation/cpu-freq/governors.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/cpu-freq/governors.txt 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,155 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + C P U F r e q G o v e r n o r s + + - information for users and developers - + + + Dominik Brodowski + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + +Contents: +--------- +1. What is a CPUFreq Governor? + +2. Governors In the Linux Kernel +2.1 Performance +2.2 Powersave +2.3 Userspace + +3. The Governor Interface in the CPUfreq Core + + + +1. What Is A CPUFreq Governor? +============================== + +Most cpufreq drivers (in fact, all except one, longrun) or even most +cpu frequency scaling algorithms only offer the CPU to be set to one +frequency. In order to offer dynamic frequency scaling, the cpufreq +core must be able to tell these drivers of a "target frequency". So +these specific drivers will be transformed to offer a "->target" +call instead of the existing "->setpolicy" call. For "longrun", all +stays the same, though. + +How to decide what frequency within the CPUfreq policy should be used? +That's done using "cpufreq governors". Two are already in this patch +-- they're the already existing "powersave" and "performance" which +set the frequency statically to the lowest or highest frequency, +respectively. At least two more such governors will be ready for +addition in the near future, but likely many more as there are various +different theories and models about dynamic frequency scaling +around. Using such a generic interface as cpufreq offers to scaling +governors, these can be tested extensively, and the best one can be +selected for each specific use. + +Basically, it's the following flow graph: + +CPU can be set to switch independetly | CPU can only be set + within specific "limits" | to specific frequencies + + "CPUfreq policy" + consists of frequency limits (policy->{min,max}) + and CPUfreq governor to be used + / \ + / \ + / the cpufreq governor decides + / (dynamically or statically) + / what target_freq to set within + / the limits of policy->{min,max} + / \ + / \ + Using the ->setpolicy call, Using the ->target call, + the limits and the the frequency closest + "policy" is set. to target_freq is set. + It is assured that it + is within policy->{min,max} + + +2. Governors In the Linux Kernel +================================ + +2.1 Performance +--------------- + +The CPUfreq governor "performance" sets the CPU statically to the +highest frequency within the borders of scaling_min_freq and +scaling_max_freq. + + +2.1 Powersave +------------- + +The CPUfreq governor "powersave" sets the CPU statically to the +lowest frequency within the borders of scaling_min_freq and +scaling_max_freq. + + +2.2 Userspace +------------- + +The CPUfreq governor "userspace" allows the user, or any userspace +program running with UID "root", to set the CPU to a specific frequency +by making a sysfs file "scaling_setspeed" available in the CPU-device +directory. + + + +3. The Governor Interface in the CPUfreq Core +============================================= + +A new governor must register itself with the CPUfreq core using +"cpufreq_register_governor". The struct cpufreq_governor, which has to +be passed to that function, must contain the following values: + +governor->name - A unique name for this governor +governor->governor - The governor callback function +governor->owner - .THIS_MODULE for the governor module (if + appropriate) + +The governor->governor callback is called with the current (or to-be-set) +cpufreq_policy struct for that CPU, and an unsigned int event. The +following events are currently defined: + +CPUFREQ_GOV_START: This governor shall start its duty for the CPU + policy->cpu +CPUFREQ_GOV_STOP: This governor shall end its duty for the CPU + policy->cpu +CPUFREQ_GOV_LIMITS: The limits for CPU policy->cpu have changed to + policy->min and policy->max. + +If you need other "events" externally of your driver, _only_ use the +cpufreq_governor_l(unsigned int cpu, unsigned int event) call to the +CPUfreq core to ensure proper locking. + + +The CPUfreq governor may call the CPU processor driver using one of +these two functions: + +int cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + +int __cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + +target_freq must be within policy->min and policy->max, of course. +What's the difference between these two functions? When your governor +still is in a direct code path of a call to governor->governor, the +per-CPU cpufreq lock is still held in the cpufreq core, and there's +no need to lock it again (in fact, this would cause a deadlock). So +use __cpufreq_driver_target only in these cases. In all other cases +(for example, when there's a "daemonized" function that wakes up +every second), use cpufreq_driver_target to lock the cpufreq per-CPU +lock before the command is passed to the cpufreq processor driver. + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cpu-freq/index.txt linux.22-ac2/Documentation/cpu-freq/index.txt --- linux.vanilla/Documentation/cpu-freq/index.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/cpu-freq/index.txt 2003-06-29 16:10:47.000000000 +0100 @@ -0,0 +1,56 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + + + + Dominik Brodowski + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + + +Documents in this directory: +---------------------------- +core.txt - General description of the CPUFreq core and + of CPUFreq notifiers + +cpu-drivers.txt - How to implement a new cpufreq processor driver + +governors.txt - What are cpufreq governors and how to + implement them? + +index.txt - File index, Mailing list and Links (this document) + +user-guide.txt - User Guide to CPUFreq + + +Mailing List +------------ +There is a CPU frequency changing CVS commit and general list where +you can report bugs, problems or submit patches. To post a message, +send an email to cpufreq@www.linux.org.uk, to subscribe go to +http://www.linux.org.uk/mailman/listinfo/cpufreq. Previous post to the +mailing list are available to subscribers at +http://www.linux.org.uk/mailman/private/cpufreq/. + + +Links +----- +the FTP archives: +* ftp://ftp.linux.org.uk/pub/linux/cpufreq/ + +how to access the CVS repository: +* http://cvs.arm.linux.org.uk/ + +the CPUFreq Mailing list: +* http://www.linux.org.uk/mailman/listinfo/cpufreq + +Clock and voltage scaling for the SA-1100: +* http://www.lart.tudelft.nl/projects/scaling diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/cpu-freq/user-guide.txt linux.22-ac2/Documentation/cpu-freq/user-guide.txt --- linux.vanilla/Documentation/cpu-freq/user-guide.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/cpu-freq/user-guide.txt 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,182 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + U S E R G U I D E + + + Dominik Brodowski + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + +Contents: +--------- +1. Supported Architectures and Processors +1.1 ARM +1.2 x86 +1.3 sparc64 +1.4 ppc +1.5 SuperH + +2. "Policy" / "Governor"? +2.1 Policy +2.2 Governor + +3. How to change the CPU cpufreq policy and/or speed +3.1 Preferred interface: sysfs +3.2 Deprecated interfaces + + + +1. Supported Architectures and Processors +========================================= + +1.1 ARM +------- + +The following ARM processors are supported by cpufreq: + +ARM Integrator +ARM-SA1100 +ARM-SA1110 + + +1.2 x86 +------- + +The following processors for the x86 architecture are supported by cpufreq: + +AMD Elan - SC400, SC410 +AMD mobile K6-2+ +AMD mobile K6-3+ +AMD mobile Duron +AMD mobile Athlon +Cyrix Media GXm +Intel mobile PIII and Intel mobile PIII-M on certain chipsets +Intel Pentium 4, Intel Xeon +Intel Pentium M (Centrino) +National Semiconductors Geode GX +Transmeta Crusoe +VIA Cyrix 3 / C3 +various processors on some ACPI 2.0-compatible systems [*] + +[*] Only if "ACPI Processor Performance States" are available +to the ACPI<->BIOS interface. + + +1.3 sparc64 +----------- + +The following processors for the sparc64 architecture are supported by +cpufreq: + +UltraSPARC-III + + +1.4 ppc +------- + +Several "PowerBook" and "iBook2" notebooks are supported. + + +1.5 SuperH +---------- + +The following SuperH processors are supported by cpufreq: + +SH-3 +SH-4 + + +2. "Policy" / "Governor" ? +========================== + +Some CPU frequency scaling-capable processor switch between various +frequencies and operating voltages "on the fly" without any kernel or +user involvement. This guarantees very fast switching to a frequency +which is high enough to serve the user's needs, but low enough to save +power. + + +2.1 Policy +---------- + +On these systems, all you can do is select the lower and upper +frequency limit as well as whether you want more aggressive +power-saving or more instantly available processing power. + + +2.2 Governor +------------ + +On all other cpufreq implementations, these boundaries still need to +be set. Then, a "governor" must be selected. Such a "governor" decides +what speed the processor shall run within the boundaries. One such +"governor" is the "userspace" governor. This one allows the user - or +a yet-to-implement userspace program - to decide what specific speed +the processor shall run at. + + +3. How to change the CPU cpufreq policy and/or speed +==================================================== + +3.1 Preferred Interface: sysfs +------------------------------ + +The preferred interface is located in the sysfs filesystem. If you +mounted it at /sys, the cpufreq interface is located in a subdirectory +"cpufreq" within the cpu-device directory +(e.g. /sys/devices/system/cpu/cpu0/cpufreq/ for the first CPU). + +cpuinfo_min_freq : this file shows the minimum operating + frequency the processor can run at(in kHz) +cpuinfo_max_freq : this file shows the maximum operating + frequency the processor can run at(in kHz) +scaling_driver : this file shows what cpufreq driver is + used to set the frequency on this CPU + +scaling_available_governors : this file shows the CPUfreq governors + available in this kernel. You can see the + currently activated governor in + +scaling_governor, and by "echoing" the name of another + governor you can change it. Please note + that some governors won't load - they only + work on some specific architectures or + processors. +scaling_min_freq and +scaling_max_freq show the current "policy limits" (in + kHz). By echoing new values into these + files, you can change these limits. + + +If you have selected the "userspace" governor which allows you to +set the CPU operating frequency to a specific value, you can read out +the current frequency in + +scaling_setspeed. By "echoing" a new frequency into this + you can change the speed of the CPU, + but only within the limits of + scaling_min_freq and scaling_max_freq. + + +3.2 Deprecated Interfaces +------------------------- + +Depending on your kernel configuration, you might find the following +cpufreq-related files: +/proc/cpufreq +/proc/sys/cpu/*/speed +/proc/sys/cpu/*/speed-min +/proc/sys/cpu/*/speed-max + +These are files for deprecated interfaces to cpufreq, which offer far +less functionality. Because of this, these interfaces aren't described +here. + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/DocBook/atascsi-dev.tmpl linux.22-ac2/Documentation/DocBook/atascsi-dev.tmpl --- linux.vanilla/Documentation/DocBook/atascsi-dev.tmpl 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/DocBook/atascsi-dev.tmpl 2003-07-07 16:11:09.000000000 +0100 @@ -0,0 +1,60 @@ + + + + + ATA-SCSI Developer's Guide + + + + Jeff + Garzik + + + + + 2003 + Jeff Garzik + + + + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + + + + + + + + libata Library +!Edrivers/scsi/libata.c + + + + libata Internals +!Idrivers/scsi/libata.c + + + + ata_piix Internals +!Idrivers/scsi/ata_piix.c + + + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/DocBook/Makefile linux.22-ac2/Documentation/DocBook/Makefile --- linux.vanilla/Documentation/DocBook/Makefile 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/Documentation/DocBook/Makefile 2003-07-07 16:11:09.000000000 +0100 @@ -2,7 +2,7 @@ kernel-api.sgml parportbook.sgml kernel-hacking.sgml \ kernel-locking.sgml via-audio.sgml mousedrivers.sgml sis900.sgml \ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \ - journal-api.sgml + journal-api.sgml atascsi-dev.sgml PS := $(patsubst %.sgml, %.ps, $(BOOKS)) PDF := $(patsubst %.sgml, %.pdf, $(BOOKS)) @@ -79,6 +79,12 @@ $(TOPDIR)/scripts/docgen $(TOPDIR)/arch/i386/kernel/mca.c \ mcabook.sgml +atascsi-dev.sgml: atascsi-dev.tmpl $(TOPDIR)/drivers/scsi/libata.c \ + $(TOPDIR)/drivers/scsi/ata_piix.c + $(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/scsi/libata.c \ + $(TOPDIR)/drivers/scsi/ata_piix.c \ + < atascsi-dev.tmpl > atascsi-dev.sgml + videobook.sgml: videobook.tmpl $(TOPDIR)/drivers/media/video/videodev.c $(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/media/video/videodev.c \ videobook.sgml diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/DriverFixers linux.22-ac2/Documentation/DriverFixers --- linux.vanilla/Documentation/DriverFixers 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/DriverFixers 2003-06-29 16:10:47.000000000 +0100 @@ -0,0 +1,75 @@ +People who fix drivers as a business - ie for money. (No recommendation, +business association or other relationship implied. This for the benefit of +American lawyers is just a list of people who have asked to be listed - nothing +more). + +Companies Who Will Do Small Contract Work +----------------------------------------- + +Company: BitWizard +Contact: Rogier Wolff +E-Mail: R.E.Wolff@BitWizard.nl + +Company: Caederus +Contact: Justin Mitchell +E-Mail: info@caederus.com +Location: Swansea, Wales, UK +URL: http://www.caederus.com/ + +Company: Calsoft Inc +Contact: Anupam Bhide +E-Mail: anupam@calsoftinc.com +URL: http://www.calsoftinc.com +Location: Pune, India + +Company: Hansen Partnership Inc +Contact: James Bottomley +E-Mail: James.Bottomley@HansenPartnership.com +Location: 1, Partridge Square, Oswego, Illinois 60543, USA + +Company: Linking +Contact: Elmer Joandi +E-Mail: elmer@linkingsoft.com + +Company: Penguru Consulting, LLC +Contact: Komron Takmil +E-Mail: komron@penguru.net +Location: Salt Lake City, UT USA + +Company: 7Chips +Contact: Vadim Lebedev +E-Mail: vadim@7chips.com +Location: Paris, France +Notes: Experienced in Linux and uClinux on x86/ARM/Motorola + +Company: Weinigel Ingenjörsbyrå AB +Contact: Christer Weinigel +E-Mail: christer@weinigel.se +Location: Stockholm, Sweden + +Company: WildOpenSource +Contact: Martin Hicks +E-Mail: info@wildopensource.com + + +Companies Only Interested In Larger ($10000+) Jobs +-------------------------------------------------- + + + +Companies Only Interested In Very Large ($100000+) Jobs +------------------------------------------------------- + + +To be added to the list: email giving the +following information + +Company: CompanyName [Required] +Contact: ContactName [Required] +E-Mail: An email address [Required] +URL: Web site [Optional] +Location: Area/Country [Optional] +Telephone: Contact phone number [Optional] +Speciality: Any specific speciality [Optional] +Notes: Any other notes (eg certifications, specialities) + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/filesystems/00-INDEX linux.22-ac2/Documentation/filesystems/00-INDEX --- linux.vanilla/Documentation/filesystems/00-INDEX 2002-11-29 21:27:11.000000000 +0000 +++ linux.22-ac2/Documentation/filesystems/00-INDEX 2003-06-29 16:10:44.000000000 +0100 @@ -48,3 +48,5 @@ - info on using the VFAT filesystem used in Windows NT and Windows 95 vfs.txt - Overview of the Virtual File System +xfs.txt + - info and mount options for the XFS filesystem. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/filesystems/xfs.txt linux.22-ac2/Documentation/filesystems/xfs.txt --- linux.vanilla/Documentation/filesystems/xfs.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/filesystems/xfs.txt 2003-06-29 16:10:45.000000000 +0100 @@ -0,0 +1,124 @@ + +The SGI XFS Filesystem +====================== + +XFS is a high performance journaling filesystem which originated +on the SGI IRIX platform. It is completely multi-threaded, can +support large files and large filesystems, extended attributes, +variable block sizes, is extent based, and makes extensive use of +Btrees (directories, extents, free space) to aid both performance +and scalability. + +Refer to the documentation at http://oss.sgi.com/projects/xfs/ +for further details. This implementation is on-disk compatible +with the IRIX version of XFS. + + +Options +======= + +When mounting an XFS filesystem, the following options are accepted. + + biosize=size + Sets the preferred buffered I/O size (default size is 64K). + "size" must be expressed as the logarithm (base2) of the + desired I/O size. + Valid values for this option are 14 through 16, inclusive + (i.e. 16K, 32K, and 64K bytes). On machines with a 4K + pagesize, 13 (8K bytes) is also a valid size. + The preferred buffered I/O size can also be altered on an + individual file basis using the ioctl(2) system call. + + dmapi + Enable the DMAPI (Data Management API) event callouts. + Use with the "mtpt" option. + + irixsgid + Do not inherit the ISGID bit on subdirectories of ISGID + directories, if the process creating the subdirectory + is not a member of the parent directory group ID. + This matches IRIX behavior. + + logbufs=value + Set the number of in-memory log buffers. Valid numbers range + from 2-8 inclusive. + The default value is 8 buffers for filesystems with a + blocksize of 64K, 4 buffers for filesystems with a blocksize + of 32K, 3 buffers for filesystems with a blocksize of 16K + and 2 buffers for all other configurations. Increasing the + number of buffers may increase performance on some workloads + at the cost of the memory used for the additional log buffers + and their associated control structures. + + logbsize=value + Set the size of each in-memory log buffer. + Size may be specified in bytes, or in kilobytes with a "k" suffix. + Valid sizes for version 1 and version 2 logs are 16384 (16k) and + 32768 (32k). Valid sizes for version 2 logs also include + 65536 (64k), 131072 (128k) and 262144 (256k). + The default value for machines with more than 32MB of memory + is 32768, machines with less memory use 16384 by default. + + logdev=device and rtdev=device + Use an external log (metadata journal) and/or real-time device. + An XFS filesystem has up to three parts: a data section, a log + section, and a real-time section. The real-time section is + optional, and the log section can be separate from the data + section or contained within it. + + mtpt=mountpoint + Use with the "dmapi" option. The value specified here will be + included in the DMAPI mount event, and should be the path of + the actual mountpoint that is used. + + noalign + Data allocations will not be aligned at stripe unit boundaries. + + noatime + Access timestamps are not updated when a file is read. + + norecovery + The filesystem will be mounted without running log recovery. + If the filesystem was not cleanly unmounted, it is likely to + be inconsistent when mounted in "norecovery" mode. + Some files or directories may not be accessible because of this. + Filesystems mounted "norecovery" must be mounted read-only or + the mount will fail. + + osyncisosync + Make O_SYNC writes implement true O_SYNC. WITHOUT this option, + Linux XFS behaves as if an "osyncisdsync" option is used, + which will make writes to files opened with the O_SYNC flag set + behave as if the O_DSYNC flag had been used instead. + This can result in better performance without compromising + data safety. + However if this option is not in effect, timestamp updates from + O_SYNC writes can be lost if the system crashes. + If timestamp updates are critical, use the osyncisosync option. + + quota/usrquota/uqnoenforce + User disk quota accounting enabled, and limits (optionally) + enforced. + + grpquota/gqnoenforce + Group disk quota accounting enabled and limits (optionally) + enforced. + + sunit=value and swidth=value + Used to specify the stripe unit and width for a RAID device or + a stripe volume. "value" must be specified in 512-byte block + units. + If this option is not specified and the filesystem was made on + a stripe volume or the stripe width or unit were specified for + the RAID device at mkfs time, then the mount system call will + restore the value from the superblock. For filesystems that + are made directly on RAID devices, these options can be used + to override the information in the superblock if the underlying + disk layout changes after the filesystem has been created. + The "swidth" option is required if the "sunit" option has been + specified, and must be a multiple of the "sunit" value. + + nouuid + Don't check for double mounted file systems using the file system uuid. + This is useful to mount LVM snapshot volumes. + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/firmware_class/firmware_sample_driver.c linux.22-ac2/Documentation/firmware_class/firmware_sample_driver.c --- linux.vanilla/Documentation/firmware_class/firmware_sample_driver.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/firmware_class/firmware_sample_driver.c 2003-07-31 14:09:15.000000000 +0100 @@ -0,0 +1,121 @@ +/* + * firmware_sample_driver.c - + * + * Copyright (c) 2003 Manuel Estrada Sainz + * + * Sample code on how to use request_firmware() from drivers. + * + * Note that register_firmware() is currently useless. + * + */ + +#include +#include +#include +#include + +#include "linux/firmware.h" + +#define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE +#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE +char __init inkernel_firmware[] = "let's say that this is firmware\n"; +#endif + +static char ghost_device[] = "ghost0"; + +static void sample_firmware_load(char *firmware, int size) +{ + u8 buf[size+1]; + memcpy(buf, firmware, size); + buf[size] = '\0'; + printk("firmware_sample_driver: firmware: %s\n", buf); +} + +static void sample_probe_default(void) +{ + /* uses the default method to get the firmware */ + const struct firmware *fw_entry; + printk("firmware_sample_driver: a ghost device got inserted :)\n"); + + if(request_firmware(&fw_entry, "sample_driver_fw", ghost_device)!=0) + { + printk(KERN_ERR + "firmware_sample_driver: Firmware not available\n"); + return; + } + + sample_firmware_load(fw_entry->data, fw_entry->size); + + release_firmware(fw_entry); + + /* finish setting up the device */ +} +static void sample_probe_specific(void) +{ + /* Uses some specific hotplug support to get the firmware from + * userspace directly into the hardware, or via some sysfs file */ + + /* NOTE: This currently doesn't work */ + + printk("firmware_sample_driver: a ghost device got inserted :)\n"); + + if(request_firmware(NULL, "sample_driver_fw", ghost_device)!=0) + { + printk(KERN_ERR + "firmware_sample_driver: Firmware load failed\n"); + return; + } + + /* request_firmware blocks until userspace finished, so at + * this point the firmware should be already in the device */ + + /* finish setting up the device */ +} +static void sample_probe_async_cont(const struct firmware *fw, void *context) +{ + if(!fw){ + printk(KERN_ERR + "firmware_sample_driver: firmware load failed\n"); + return; + } + + printk("firmware_sample_driver: device pointer \"%s\"\n", + (char *)context); + sample_firmware_load(fw->data, fw->size); +} +static void sample_probe_async(void) +{ + /* Let's say that I can't sleep */ + int error; + error = request_firmware_nowait (THIS_MODULE, + "sample_driver_fw", ghost_device, + "my device pointer", + sample_probe_async_cont); + if(error){ + printk(KERN_ERR + "firmware_sample_driver:" + " request_firmware_nowait failed\n"); + } +} + +static int sample_init(void) +{ +#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE + register_firmware("sample_driver_fw", inkernel_firmware, + sizeof(inkernel_firmware)); +#endif + /* since there is no real hardware insertion I just call the + * sample probe functions here */ + sample_probe_specific(); + sample_probe_default(); + sample_probe_async(); + return 0; +} +static void __exit sample_exit(void) +{ +} + +module_init (sample_init); +module_exit (sample_exit); + +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/firmware_class/hotplug-script linux.22-ac2/Documentation/firmware_class/hotplug-script --- linux.vanilla/Documentation/firmware_class/hotplug-script 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/firmware_class/hotplug-script 2003-07-31 14:09:15.000000000 +0100 @@ -0,0 +1,16 @@ +#!/bin/sh + +# Simple hotplug script sample: +# +# Both $DEVPATH and $FIRMWARE are already provided in the environment. + +HOTPLUG_FW_DIR=/usr/lib/hotplug/firmware/ + +echo 1 > /sysfs/$DEVPATH/loading +cat $HOTPLUG_FW_DIR/$FIRMWARE > /sysfs/$DEVPATH/data +echo 0 > /sysfs/$DEVPATH/loading + +# To cancel the load in case of error: +# +# echo -1 > /sysfs/$DEVPATH/loading +# diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/firmware_class/README linux.22-ac2/Documentation/firmware_class/README --- linux.vanilla/Documentation/firmware_class/README 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/firmware_class/README 2003-07-31 14:09:15.000000000 +0100 @@ -0,0 +1,58 @@ + + request_firmware() hotplug interface: + ------------------------------------ + Copyright (C) 2003 Manuel Estrada Sainz + + Why: + --- + + Today, the most extended way to use firmware in the Linux kernel is linking + it statically in a header file. Which has political and technical issues: + + 1) Some firmware is not legal to redistribute. + 2) The firmware occupies memory permanently, even though it often is just + used once. + 3) Some people, like the Debian crowd, don't consider some firmware free + enough and remove entire drivers (e.g.: keyspan). + + about in-kernel persistence: + --------------------------- + Under some circumstances, as explained below, it would be interesting to keep + firmware images in non-swappable kernel memory or even in the kernel image + (probably within initramfs). + + Note that this functionality has not been implemented. + + - Why OPTIONAL in-kernel persistence may be a good idea sometimes: + + - If the device that needs the firmware is needed to access the + filesystem. When upon some error the device has to be reset and the + firmware reloaded, it won't be possible to get it from userspace. + e.g.: + - A diskless client with a network card that needs firmware. + - The filesystem is stored in a disk behind an scsi device + that needs firmware. + - Replacing buggy DSDT/SSDT ACPI tables on boot. + Note: this would require the persistent objects to be included + within the kernel image, probably within initramfs. + + And the same device can be needed to access the filesystem or not depending + on the setup, so I think that the choice on what firmware to make + persistent should be left to userspace. + + - Why register_firmware()+__init can be useful: + - For boot devices needing firmware. + - To make the transition easier: + The firmware can be declared __init and register_firmware() + called on module_init. Then the firmware is warranted to be + there even if "firmware hotplug userspace" is not there yet or + it doesn't yet provide the needed firmware. + Once the firmware is widely available in userspace, it can be + removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE). + + In either case, if firmware hotplug support is there, it can move the + firmware out of kernel memory into the real filesystem for later + usage. + + Note: If persistence is implemented on top of initramfs, + register_firmware() may not be appropriate. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/i386/zero-page.txt linux.22-ac2/Documentation/i386/zero-page.txt --- linux.vanilla/Documentation/i386/zero-page.txt 1999-08-30 18:47:02.000000000 +0100 +++ linux.22-ac2/Documentation/i386/zero-page.txt 2003-06-29 16:10:46.000000000 +0100 @@ -31,6 +31,7 @@ 0x1e0 unsigned long ALT_MEM_K, alternative mem check, in Kb 0x1e8 char number of entries in E820MAP (below) +0x1e9 unsigned char number of entries in EDDBUF (below) 0x1f1 char size of setup.S, number of sectors 0x1f2 unsigned short MOUNT_ROOT_RDONLY (if !=0) 0x1f4 unsigned short size of compressed kernel-part in the @@ -66,6 +67,7 @@ 0x220 4 bytes (setup.S) 0x224 unsigned short setup.S heap end pointer 0x2d0 - 0x600 E820MAP +0x600 - 0x7D4 EDDBUF (setup.S) 0x800 string, 2K max COMMAND_LINE, the kernel commandline as copied using CL_OFFSET. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/networking/00-INDEX linux.22-ac2/Documentation/networking/00-INDEX --- linux.vanilla/Documentation/networking/00-INDEX 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/Documentation/networking/00-INDEX 2003-08-13 14:10:39.000000000 +0100 @@ -97,8 +97,8 @@ sis900.txt - SiS 900/7016 Fast Ethernet device driver info. sk98lin.txt - - SysKonnect SK-98xx and SK-98xx Gigabit Ethernet Adapter family - driver info. + - Marvell Yukon Chipset / SysKonnect SK-98xx compliant Gigabit + Ethernet Adapter family driver info skfp.txt - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. smc9.txt diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/networking/generic-hdlc.txt linux.22-ac2/Documentation/networking/generic-hdlc.txt --- linux.vanilla/Documentation/networking/generic-hdlc.txt 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/Documentation/networking/generic-hdlc.txt 2003-06-29 16:10:45.000000000 +0100 @@ -1,11 +1,13 @@ -Generic HDLC layer for Linux kernel 2.4/2.5 +Generic HDLC layer Krzysztof Halasa -May, 2001 +January, 2003 Generic HDLC layer currently supports: -- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP), -- raw HDLC (IPv4 only), +- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP). + Normal (routed) and Ethernet-bridged (Ethernet device emulation) + interfaces can share a single PVC. +- raw HDLC - either IP (IPv4) interface or Ethernet device emulation. - Cisco HDLC, - PPP (uses syncppp.c), - X.25 (uses X.25 routines). @@ -15,6 +17,10 @@ - RISCom/N2 by SDL Communications Inc. - and others, some not in the official kernel. +Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible +with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging). + + Make sure the hdlc.o and the hardware driver are loaded. It should create a number of "hdlc" (hdlc0 etc) network devices, one for each WAN port. You'll need the "sethdlc" utility, get it from: @@ -32,8 +38,10 @@ sethdlc hdlc0 cisco interval 10 timeout 25 or sethdlc hdlc0 rs232 clock ext - sethdlc fr lmi ansi - sethdlc create 99 + sethdlc hdlc0 fr lmi ansi + sethdlc hdlc0 create 99 + ifconfig hdlc0 up + ifconfig pvc0 localIP pointopoint remoteIP In Frame Relay mode, ifconfig master hdlc device up (without assigning any IP address to it) before using pvc devices. @@ -58,6 +66,9 @@ no-parity / crc16 / crc16-pr0 (CRC16 with preset zeros) / crc32-itu crc16-itu (CRC16 with ITU-T polynomial) / crc16-itu-pr0 - sets parity +* hdlc-eth - Ethernet device emulation using HDLC. Parity and encoding + as above. + * cisco - sets Cisco HDLC mode (IP, IPv6 and IPX supported) interval - time in seconds between keepalive packets timeout - time in seconds after last received keepalive packet before @@ -77,7 +88,12 @@ n392 - error threshold - both user and network n393 - monitored events count - both user and network -* create | delete n - FR only - adds / deletes PVC interface with DLCI #n. +Frame-Relay only: +* create n | delete n - adds / deletes PVC interface with DLCI #n. + Newly created interface will be named pvc0, pvc1 etc. + +* create ether n | delete ether n - adds a device for Ethernet-bridged + frames. The device will be named pvceth0, pvceth1 etc. @@ -104,11 +120,11 @@ If you have a problem with N2 or C101 card, you can issue the "private" -command to see port's packet descriptor rings: +command to see port's packet descriptor rings (in kernel logs): sethdlc hdlc0 private -The hardware driver have to be build with CONFIG_HDLC_DEBUG_RINGS. +The hardware driver has to be build with CONFIG_HDLC_DEBUG_RINGS. Attaching this info to bug reports would be helpful. Anyway, let me know if you have problems using this. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/networking/sk98lin.txt linux.22-ac2/Documentation/networking/sk98lin.txt --- linux.vanilla/Documentation/networking/sk98lin.txt 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/Documentation/networking/sk98lin.txt 2003-08-13 14:10:39.000000000 +0100 @@ -1,11 +1,11 @@ -(C)Copyright 1999-2002 SysKonnect GmbH. +(C)Copyright 1999-2003 Marvell(R). All rights reserved =========================================================================== -sk98lin.txt created 19-Dec-2002 +sk98lin.txt created 07-Aug-2003 -Readme File for sk98lin v6.02 -SysKonnect SK-98xx Gigabit Ethernet Adapter family driver for LINUX +Readme File for sk98lin v6.15 +Marvell Yukon/SysKonnect SK-98xx Gigabit Ethernet Adapter family driver for LINUX This file contains 1 Overview @@ -27,8 +27,9 @@ 1 Overview =========== -The sk98lin driver supports the SysKonnect SK-98xx and SK-95xx family -on Linux. It has been tested with Linux on Intel/x86 machines. +The sk98lin driver supports the Marvell Yukon and SysKonnect +SK-98xx/SK-95xx compliant Gigabit Ethernet Adapter on Linux. It has +been tested with Linux on Intel/x86 machines. *** @@ -75,8 +76,8 @@ To integrate the driver permanently into the kernel, proceed as follows: 1. Select the menu "Network device support" and then "Ethernet(1000Mbit)" -2. Mark "SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter - family support" with (*) +2. Mark "Marvell Yukon Chipset / SysKonnect SK-98xx family support" + with (*) 3. Build a new kernel when the configuration of the above options is finished. 4. Install the new kernel. @@ -87,8 +88,8 @@ 1. Enable 'loadable module support' in the kernel. 2. For automatic driver start, enable the 'Kernel module loader'. 3. Select the menu "Network device support" and then "Ethernet(1000Mbit)" -4. Mark "SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter - family support" with (M) +4. Mark "Marvell Yukon Chipset / SysKonnect SK-98xx family support" + with (M) 5. Execute the command "make modules". 6. Execute the command "make modules_install". The appropiate modules will be installed. @@ -100,21 +101,23 @@ To load the module manually, proceed as follows: 1. Enter "modprobe sk98lin". -2. If the SysKonnect SK-98xx adapter is installed in your computer and you - have a /proc file system, execute the command: +2. If a Marvell Yukon or SysKonnect SK-98xx adapter is installed in + your computer and you have a /proc file system, execute the command: "ls /proc/net/sk98lin/" This should produce an output containing a line with the following format: eth0 eth1 ... which indicates that your adapter has been found and initialized. - NOTE 1: If you have more than one SysKonnect SK-98xx adapter installed, - the adapters will be listed as 'eth0', 'eth1', 'eth2', etc. - For each adapter, repeat steps 3 and 4 below. - - NOTE 2: If you have other Ethernet adapters installed, your SysKonnect - SK-98xx adapter will be mapped to the next available number, - e.g. 'eth1'. The mapping is executed automatically. + NOTE 1: If you have more than one Marvell Yukon or SysKonnect SK-98xx + adapter installed, the adapters will be listed as 'eth0', + 'eth1', 'eth2', etc. + For each adapter, repeat steps 3 and 4 below. + + NOTE 2: If you have other Ethernet adapters installed, your Marvell + Yukon or SysKonnect SK-98xx adapter will be mapped to the + next available number, e.g. 'eth1'. The mapping is executed + automatically. The module installation message (displayed either in a system log file or on the console) prints a line for each adapter found containing the corresponding 'ethX'. @@ -141,7 +144,7 @@ Use 'ping ' to verify the connection to other computers on your network. 5. To check the adapter configuration view /proc/net/sk98lin/[devicename]. - For example by executing: + For example by executing: "cat /proc/net/sk98lin/eth0" Unload the module @@ -184,7 +187,7 @@ unload and reload the driver. The syntax of the driver parameters is: - modprobe sk98lin parameter=value1[,value2[,value3...]] + modprobe sk98lin parameter=value1[,value2[,value3...]] where value1 refers to the first adapter, value2 to the second etc. @@ -198,7 +201,7 @@ to FULL, and on the second adapter to HALF. Then, you must enter: - modprobe sk98lin AutoNeg=On,Off DupCap=Full,Half + modprobe sk98lin AutoNeg_A=On,Off DupCap_A=Full,Half NOTE: The number of adapters that can be configured this way is limited in the driver (file skge.c, constant SK_MAX_CARD_PARAM). @@ -283,6 +286,101 @@ 4.2 Adapter Parameters ----------------------- +Connection Type +--------------- +Parameter: ConType +Values: Auto, 100FD, 100HD, 10FD, 10HD +Default: Auto + +The parameter 'ConType' is a combination of all five per-port parameters +within one single parameter. This simplifies the configuration of both ports +of an adapter card! The different values of this variable reflect the most +meaningful combinations of port parameters. + +The following table shows the values of 'ConType' and the corresponding +combinations of the per-port parameters: + + ConType | DupCap AutoNeg FlowCtrl Role Speed + ----------+------------------------------------------------------ + Auto | Both On SymOrRem Auto Auto + 100FD | Full Off None Auto (ignored) 100 + 100HD | Half Off None Auto (ignored) 100 + 10FD | Full Off None Auto (ignored) 10 + 10HD | Half Off None Auto (ignored) 10 + +Stating any other port parameter together with this 'ConType' variable +will result in a merged configuration of those settings. This due to +the fact, that the per-port parameters (e.g. Speed_? ) have a higher +priority than the combined variable 'ConType'. + +NOTE: This parameter is always used on both ports of the adapter card. + +Interrupt Moderation +-------------------- +Parameter: Moderation +Values: None, Static, Dynamic +Default: None + +Interrupt moderation is employed to limit the maxmimum number of interrupts +the driver has to serve. That is, one or more interrupts (which indicate any +transmit or receive packet to be processed) are queued until the driver +processes them. When queued interrupts are to be served, is determined by the +'IntsPerSec' parameter, which is explained later below. + +Possible modes: + + -- None - No interrupt moderation is applied on the adapter card. + Therefore, each transmit or receive interrupt is served immediately + as soon as it appears on the interrupt line of the adapter card. + + -- Static - Interrupt moderation is applied on the adapter card. + All transmit and receive interrupts are queued until a complete + moderation interval ends. If such a moderation interval ends, all + queued interrupts are processed in one big bunch without any delay. + The term 'static' reflects the fact, that interrupt moderation is + always enabled, regardless how much network load is currently + passing via a particular interface. In addition, the duration of + the moderation interval has a fixed length that never changes while + the driver is operational. + + -- Dynamic - Interrupt moderation might be applied on the adapter card, + depending on the load of the system. If the driver detects that the + system load is too high, the driver tries to shield the system against + too much network load by enabling interrupt moderation. If - at a later + time - the CPU utilizaton decreases again (or if the network load is + negligible) the interrupt moderation will automatically be disabled. + +Interrupt moderation should be used when the driver has to handle one or more +interfaces with a high network load, which - as a consequence - leads also to a +high CPU utilization. When moderation is applied in such high network load +situations, CPU load might be reduced by 20-30%. + +NOTE: The drawback of using interrupt moderation is an increase of the round- +trip-time (RTT), due to the queueing and serving of interrupts at dedicated +moderation times. + +Interrupts per second +--------------------- +Parameter: IntsPerSec +Values: 30...40000 (interrupts per second) +Default: 2000 + +This parameter is only used, if either static or dynamic interrupt moderation +is used on a network adapter card. Using this paramter if no moderation is +applied, will lead to no action performed. + +This parameter determines the length of any interrupt moderation interval. +Assuming that static interrupt moderation is to be used, an 'IntsPerSec' +parameter value of 2000 will lead to an interrupt moderation interval of +500 microseconds. + +NOTE: The duration of the moderation interval is to be chosen with care. +At first glance, selecting a very long duration (e.g. only 100 interrupts per +second) seems to be meaningful, but the increase of packet-processing delay +is tremendous. On the other hand, selecting a very short moderation time might +compensate the use of any moderation being applied. + + Preferred Port -------------- Parameter: PrefPort @@ -365,10 +463,10 @@ 6 VLAN and Link Aggregation Support (IEEE 802.1, 802.1q, 802.3ad) ================================================================== -The SysKonnect Linux drivers are able to support VLAN and Link Aggregation -according to IEEE standards 802.1, 802.1q, and 802.3ad. These features are -only available after installation of open source modules available on the -Internet: +The Marvell Yukon/SysKonnect Linux drivers are able to support VLAN and +Link Aggregation according to IEEE standards 802.1, 802.1q, and 802.3ad. +These features are only available after installation of open source +modules available on the Internet: For VLAN go to: http://scry.wanfear.com/~greear/vlan.html For Link Aggregation go to: http://www.st.rim.or.jp/~yumo @@ -459,208 +557,13 @@ support for help (linux@syskonnect.de). When contacting our technical support, please ensure that the following information is available: -- System Manufacturer and Model -- Boards in your system +- System Manufacturer and HW Informations (CPU, Memory... ) +- PCI-Boards in your system - Distribution - Kernel version - Driver version *** -8 History -========== -VERSION 6.00 (In-Kernel version) -New Features: -- Support for SK-98xx V2.0 adapters -- Support for gmac -- Support for kernel 2.4.x and kernel 2.2.x -- Zerocopy support for kernel 2.4.x with sendfile() -- Support for scatter-gather functionality with sendfile() -- Speed support for SK-98xx V2.0 adapters -- New ProcFs entries -- New module parameters -Problems fixed: -- ProcFS initialization -- csum packet error -- Ierror/crc counter error (#10767) -- rx_too_long counter error (#10751) -Known limitations: -- None - -VERSION 4.11 -New Features: -- none -Problems fixed: -- Error statistic counter fix (#10620) -- RLMT-Fixes (#10659, #10639, #10650) -- LM80 sensor initialization fix (#10623) -- SK-CSUM memory fixes (#10610). -Known limitations: -- None - -VERSION 4.10 -New Features: -- New ProcFs entries -Problems fixed: -- Corrected some printk's -Known limitations: -- None - -VERSION 4.09 -New Features: -- IFF_RUNNING support (link status) -- New ProcFs entries -Problems fixed: -- too long counters -- too short counters -- Kernel error compilation -Known limitations: -- None - -VERSION 4.06 (In-Kernel version) -Problems fixed: -- MTU init problems - -VERSION 4.04 -Problems fixed: -- removed VLAN error messages - -VERSION 4.02 (In-Kernel version) -New Features: -- Add Kernel 2.4 changes -Known limitations: -- None - -VERSION 4.01 (In-Kernel version) -Problems fixed: -- Full statistics support for DualNet mode -Known limitations: -- None - -VERSION 4.00 (In-Kernel version) -Problems fixed: -- Memory leak found -New Features: -- Proc filesystem integration -- DualNet functionality integrated -- Rlmt networks added -Known limitations: -- statistics partially incorrect in DualNet mode - -VERSION 3.04 (In-Kernel version) -Problems fixed: -- Driver start failed on UltraSPARC -- Rx checksum calculation for big endian machines did not work -- Jumbo frames were counted as input-errors in netstat - -VERSION 3.03 (Standalone version) -Problems fixed: -- Compilation did not find script "printver.sh" if "." not in PATH -Known limitations: -- None - -VERSION 3.02 (In-Kernel version) -Problems fixed: -- None -New Features: -- Integration in Linux kernel source (2.2.14 and 2.3.29) -Known limitations: -- None - -VERSION 3.01 -Problems fixed: -- None -New Features: -- Full source release -Known limitations: -- None - -VERSION 3.00 -Problems fixed: -- None -New Features: -- Support for 1000Base-T adapters (SK-9821 and SK-9822) -Known limitations: -- None - -VERSION 1.07 -Problems fixed: -- RlmtMode parameter value strings were wrong (#10437) -- Driver sent too many RLMT frames (#10439) -- Driver did not recognize network segmentation (#10440) -- RLMT switched too often on segmented network (#10441) -Known limitations: -- None - -VERSION 1.06 -Problems fixed: -- System panic'ed after some time when running with - RlmtMode=CheckOtherLink or RlmtMode=CheckSeg (#10421) - Panic message: "Kernel panic: skput: over ... dev: eth0" -- Driver did not switch back to default port when connected - back-to-back (#10422). -Changes: -- RlmtMode parameter names have changed -New features: -- There is now a version for ALPHA processors -Known limitations: -- None - -VERSION 1.05 -Problems fixed: -- Driver failed to load on kernels with version information - for module symbols enabled -Known limitations: -- None - -VERSION 1.04 -Problems fixed: -- Large frame support does work now (no autonegotiation - support for large frames, just manually selectable) -New Features: -- Receive checksumming in hardware -- Performance optimizations - Some numbers (on two PII-400 machines, back-to-back): - netpipe: 300 MBit/sec, with large frames: 470 MBit/sec - ttcp: 38 MByte/sec, with large frames: 60 MByte/sec - ttcp (UDP send): 66 MByte/sec, with large frames: 106 MByte/sec -Known limitations: -- None - -VERSION 1.03 -Problems fixed: -- Unloading with "rmmod" caused segmentation fault (#10415) -- The link LED flickered from time to time, if no link was - established (#10402) -- Installation problems with RedHat 6.0 (#10409) -New Features: -- Connection state ouput at "network connection up" -Known limitations: -- None - -VERSION 1.02 -Problems fixed: -- Failed with multiple adapters -- Failed with Single Port adapters -- Startup string was only displayed if adapter found -- No link could be established on certain switches when the switches were - rebooted. (#10377) -Known limitations: -- Segmentation fault at "rmmod" with kernel 2.2.3 on some machines - -VERSION 1.01 -Problems fixed: -- Sensor status was not set back to 'ok' after 'warning/error'. (#10386) -Changes: -- improved parallelism in driver - -VERSION 1.00 -Known limitations: -- not tested with all kernel versions (I don't have that much time :-) -- only x86 version available (if you need others, ask for it) -- source code not completely available ***End of Readme File*** - - - diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/s390/TAPE linux.22-ac2/Documentation/s390/TAPE --- linux.vanilla/Documentation/s390/TAPE 2001-07-25 22:12:01.000000000 +0100 +++ linux.22-ac2/Documentation/s390/TAPE 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -Channel attached Tape device driver - ------------------------------WARNING----------------------------------------- -This driver is considered to be EXPERIMENTAL. Do NOT use it in -production environments. Feel free to test it and report problems back to us. ------------------------------------------------------------------------------ - -The LINUX for zSeries tape device driver manages channel attached tape drives -which are compatible to IBM 3480 or IBM 3490 magnetic tape subsystems. This -includes various models of these devices (for example the 3490E). - - -Tape driver features - -The device driver supports a maximum of 128 tape devices. -No official LINUX device major number is assigned to the zSeries tape device -driver. It allocates major numbers dynamically and reports them on system -startup. -Typically it will get major number 254 for both the character device front-end -and the block device front-end. - -The tape device driver needs no kernel parameters. All supported devices -present are detected on driver initialization at system startup or module load. -The devices detected are ordered by their subchannel numbers. The device with -the lowest subchannel number becomes device 0, the next one will be device 1 -and so on. - - -Tape character device front-end - -The usual way to read or write to the tape device is through the character -device front-end. The zSeries tape device driver provides two character devices -for each physical device -- the first of these will rewind automatically when -it is closed, the second will not rewind automatically. - -The character device nodes are named /dev/rtibm0 (rewinding) and /dev/ntibm0 -(non-rewinding) for the first device, /dev/rtibm1 and /dev/ntibm1 for the -second, and so on. - -The character device front-end can be used as any other LINUX tape device. You -can write to it and read from it using LINUX facilities such as GNU tar. The -tool mt can be used to perform control operations, such as rewinding the tape -or skipping a file. - -Most LINUX tape software should work with either tape character device. - - -Tape block device front-end - -The tape device may also be accessed as a block device in read-only mode. -This could be used for software installation in the same way as it is used with -other operation systems on the zSeries platform (and most LINUX -distributions are shipped on compact disk using ISO9660 filesystems). - -One block device node is provided for each physical device. These are named -/dev/btibm0 for the first device, /dev/btibm1 for the second and so on. -You should only use the ISO9660 filesystem on LINUX for zSeries tapes because -the physical tape devices cannot perform fast seeks and the ISO9660 system is -optimized for this situation. - - -Tape block device example - -In this example a tape with an ISO9660 filesystem is created using the first -tape device. ISO9660 filesystem support must be built into your system kernel -for this. -The mt command is used to issue tape commands and the mkisofs command to -create an ISO9660 filesystem: - -- create a LINUX directory (somedir) with the contents of the filesystem - mkdir somedir - cp contents somedir - -- insert a tape - -- ensure the tape is at the beginning - mt -f /dev/ntibm0 rewind - -- set the blocksize of the character driver. The blocksize 2048 bytes - is commonly used on ISO9660 CD-Roms - mt -f /dev/ntibm0 setblk 2048 - -- write the filesystem to the character device driver - mkisofs -o /dev/ntibm0 somedir - -- rewind the tape again - mt -f /dev/ntibm0 rewind - -- Now you can mount your new filesystem as a block device: - mount -t iso9660 -o ro,block=2048 /dev/btibm0 /mnt - -TODO List - - - Driver has to be stabelized still - -BUGS - -This driver is considered BETA, which means some weaknesses may still -be in it. -If an error occurs which cannot be handled by the code you will get a -sense-data dump.In that case please do the following: - -1. set the tape driver debug level to maximum: - echo 6 >/proc/s390dbf/tape/level - -2. re-perform the actions which produced the bug. (Hopefully the bug will - reappear.) - -3. get a snapshot from the debug-feature: - cat /proc/s390dbf/tape/hex_ascii >somefile - -4. Now put the snapshot together with a detailed description of the situation - that led to the bug: - - Which tool did you use? - - Which hardware do you have? - - Was your tape unit online? - - Is it a shared tape unit? - -5. Send an email with your bug report to: - mailto:Linux390@de.ibm.com - - diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/sched-coding.txt linux.22-ac2/Documentation/sched-coding.txt --- linux.vanilla/Documentation/sched-coding.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/sched-coding.txt 2003-06-29 16:10:47.000000000 +0100 @@ -0,0 +1,129 @@ + Reference for various scheduler-related methods in the O(1) scheduler + Robert Love , MontaVista Software + + +Note most of these methods are local to kernel/sched.c - this is by design. +The scheduler is meant to be self-contained and abstracted away. This document +is primarily for understanding the scheduler, not interfacing to it. Some of +the discussed interfaces, however, are general process/scheduling methods. +They are typically defined in include/linux/sched.h. + + +Main Scheduling Methods +----------------------- + +void load_balance(runqueue_t *this_rq, int idle) + Attempts to pull tasks from one cpu to another to balance cpu usage, + if needed. This method is called explicitly if the runqueues are + inbalanced or periodically by the timer tick. Prior to calling, + the current runqueue must be locked and interrupts disabled. + +void schedule() + The main scheduling function. Upon return, the highest priority + process will be active. + + +Locking +------- + +Each runqueue has its own lock, rq->lock. When multiple runqueues need +to be locked, lock acquires must be ordered by ascending &runqueue value. + +A specific runqueue is locked via + + task_rq_lock(task_t pid, unsigned long *flags) + +which disables preemption, disables interrupts, and locks the runqueue pid is +running on. Likewise, + + task_rq_unlock(task_t pid, unsigned long *flags) + +unlocks the runqueue pid is running on, restores interrupts to their previous +state, and reenables preemption. + +The routines + + double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) + +and + + double_rq_unlock(runqueue_t *rq1, runqueue_t rq2) + +safely lock and unlock, respectively, the two specified runqueues. They do +not, however, disable and restore interrupts. Users are required to do so +manually before and after calls. + + +Values +------ + +MAX_PRIO + The maximum priority of the system, stored in the task as task->prio. + Lower priorities are higher. Normal (non-RT) priorities range from + MAX_RT_PRIO to (MAX_PRIO - 1). +MAX_RT_PRIO + The maximum real-time priority of the system. Valid RT priorities + range from 0 to (MAX_RT_PRIO - 1). +MAX_USER_RT_PRIO + The maximum real-time priority that is exported to user-space. Should + always be equal to or less than MAX_RT_PRIO. Setting it less allows + kernel threads to have higher priorities than any user-space task. +MIN_TIMESLICE +MAX_TIMESLICE + Respectively, the minimum and maximum timeslices (quanta) of a process. + +Data +---- + +struct runqueue + The main per-CPU runqueue data structure. +struct task_struct + The main per-process data structure. + + +General Methods +--------------- + +cpu_rq(cpu) + Returns the runqueue of the specified cpu. +this_rq() + Returns the runqueue of the current cpu. +task_rq(task) + Returns the runqueue which holds the specified task. +cpu_curr(cpu) + Returns the task currently running on the given cpu. +rt_task(task) + Returns true if task is real-time, false if not. +task_cpu(task) + + +Process Control Methods +----------------------- + +void set_user_nice(task_t *p, long nice) + Sets the "nice" value of task p to the given value. +int setscheduler(pid_t pid, int policy, struct sched_param *param) + Sets the scheduling policy and parameters for the given pid. +void set_cpus_allowed(task_t *p, unsigned long new_mask) + Sets a given task's CPU affinity and migrates it to a proper cpu. + Callers must have a valid reference to the task and assure the + task not exit prematurely. No locks can be held during the call. +set_task_state(tsk, state_value) + Sets the given task's state to the given value. +set_current_state(state_value) + Sets the current task's state to the given value. +void set_tsk_need_resched(struct task_struct *tsk) + Sets need_resched in the given task. +void clear_tsk_need_resched(struct task_struct *tsk) + Clears need_resched in the given task. +void set_need_resched() + Sets need_resched in the current task. +void set_task_cpu(task, cpu) + Sets task->cpu to cpu on SMP. Noop on UP. +void clear_need_resched() + Clears need_resched in the current task. +int need_resched() + Returns true if need_resched is set in the current task, false + otherwise. +yield() + Place the current process at the end of the runqueue and call schedule. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/sched-design.txt linux.22-ac2/Documentation/sched-design.txt --- linux.vanilla/Documentation/sched-design.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/sched-design.txt 2003-06-29 16:10:47.000000000 +0100 @@ -0,0 +1,165 @@ + Goals, Design and Implementation of the + new ultra-scalable O(1) scheduler + + + This is an edited version of an email Ingo Molnar sent to + lkml on 4 Jan 2002. It describes the goals, design, and + implementation of Ingo's new ultra-scalable O(1) scheduler. + Last Updated: 18 April 2002. + + +Goal +==== + +The main goal of the new scheduler is to keep all the good things we know +and love about the current Linux scheduler: + + - good interactive performance even during high load: if the user + types or clicks then the system must react instantly and must execute + the user tasks smoothly, even during considerable background load. + + - good scheduling/wakeup performance with 1-2 runnable processes. + + - fairness: no process should stay without any timeslice for any + unreasonable amount of time. No process should get an unjustly high + amount of CPU time. + + - priorities: less important tasks can be started with lower priority, + more important tasks with higher priority. + + - SMP efficiency: no CPU should stay idle if there is work to do. + + - SMP affinity: processes which run on one CPU should stay affine to + that CPU. Processes should not bounce between CPUs too frequently. + + - plus additional scheduler features: RT scheduling, CPU binding. + +and the goal is also to add a few new things: + + - fully O(1) scheduling. Are you tired of the recalculation loop + blowing the L1 cache away every now and then? Do you think the goodness + loop is taking a bit too long to finish if there are lots of runnable + processes? This new scheduler takes no prisoners: wakeup(), schedule(), + the timer interrupt are all O(1) algorithms. There is no recalculation + loop. There is no goodness loop either. + + - 'perfect' SMP scalability. With the new scheduler there is no 'big' + runqueue_lock anymore - it's all per-CPU runqueues and locks - two + tasks on two separate CPUs can wake up, schedule and context-switch + completely in parallel, without any interlocking. All + scheduling-relevant data is structured for maximum scalability. + + - better SMP affinity. The old scheduler has a particular weakness that + causes the random bouncing of tasks between CPUs if/when higher + priority/interactive tasks, this was observed and reported by many + people. The reason is that the timeslice recalculation loop first needs + every currently running task to consume its timeslice. But when this + happens on eg. an 8-way system, then this property starves an + increasing number of CPUs from executing any process. Once the last + task that has a timeslice left has finished using up that timeslice, + the recalculation loop is triggered and other CPUs can start executing + tasks again - after having idled around for a number of timer ticks. + The more CPUs, the worse this effect. + + Furthermore, this same effect causes the bouncing effect as well: + whenever there is such a 'timeslice squeeze' of the global runqueue, + idle processors start executing tasks which are not affine to that CPU. + (because the affine tasks have finished off their timeslices already.) + + The new scheduler solves this problem by distributing timeslices on a + per-CPU basis, without having any global synchronization or + recalculation. + + - batch scheduling. A significant proportion of computing-intensive tasks + benefit from batch-scheduling, where timeslices are long and processes + are roundrobin scheduled. The new scheduler does such batch-scheduling + of the lowest priority tasks - so nice +19 jobs will get + 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are + in essence SCHED_IDLE, from an interactiveness point of view. + + - handle extreme loads more smoothly, without breakdown and scheduling + storms. + + - O(1) RT scheduling. For those RT folks who are paranoid about the + O(nr_running) property of the goodness loop and the recalculation loop. + + - run fork()ed children before the parent. Andrea has pointed out the + advantages of this a few months ago, but patches for this feature + do not work with the old scheduler as well as they should, + because idle processes often steal the new child before the fork()ing + CPU gets to execute it. + + +Design +====== + +the core of the new scheduler are the following mechanizms: + + - *two*, priority-ordered 'priority arrays' per CPU. There is an 'active' + array and an 'expired' array. The active array contains all tasks that + are affine to this CPU and have timeslices left. The expired array + contains all tasks which have used up their timeslices - but this array + is kept sorted as well. The active and expired array is not accessed + directly, it's accessed through two pointers in the per-CPU runqueue + structure. If all active tasks are used up then we 'switch' the two + pointers and from now on the ready-to-go (former-) expired array is the + active array - and the empty active array serves as the new collector + for expired tasks. + + - there is a 64-bit bitmap cache for array indices. Finding the highest + priority task is thus a matter of two x86 BSFL bit-search instructions. + +the split-array solution enables us to have an arbitrary number of active +and expired tasks, and the recalculation of timeslices can be done +immediately when the timeslice expires. Because the arrays are always +access through the pointers in the runqueue, switching the two arrays can +be done very quickly. + +this is a hybride priority-list approach coupled with roundrobin +scheduling and the array-switch method of distributing timeslices. + + - there is a per-task 'load estimator'. + +one of the toughest things to get right is good interactive feel during +heavy system load. While playing with various scheduler variants i found +that the best interactive feel is achieved not by 'boosting' interactive +tasks, but by 'punishing' tasks that want to use more CPU time than there +is available. This method is also much easier to do in an O(1) fashion. + +to establish the actual 'load' the task contributes to the system, a +complex-looking but pretty accurate method is used: there is a 4-entry +'history' ringbuffer of the task's activities during the last 4 seconds. +This ringbuffer is operated without much overhead. The entries tell the +scheduler a pretty accurate load-history of the task: has it used up more +CPU time or less during the past N seconds. [the size '4' and the interval +of 4x 1 seconds was found by lots of experimentation - this part is +flexible and can be changed in both directions.] + +the penalty a task gets for generating more load than the CPU can handle +is a priority decrease - there is a maximum amount to this penalty +relative to their static priority, so even fully CPU-bound tasks will +observe each other's priorities, and will share the CPU accordingly. + +the SMP load-balancer can be extended/switched with additional parallel +computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs +can be supported easily by changing the load-balancer. Right now it's +tuned for my SMP systems. + +i skipped the prev->mm == next->mm advantage - no workload i know of shows +any sensitivity to this. It can be added back by sacrificing O(1) +schedule() [the current and one-lower priority list can be searched for a +that->mm == current->mm condition], but costs a fair number of cycles +during a number of important workloads, so i wanted to avoid this as much +as possible. + +- the SMP idle-task startup code was still racy and the new scheduler +triggered this. So i streamlined the idle-setup code a bit. We do not call +into schedule() before all processors have started up fully and all idle +threads are in place. + +- the patch also cleans up a number of aspects of sched.c - moves code +into other areas of the kernel where it's appropriate, and simplifies +certain code paths and data constructs. As a result, the new scheduler's +code is smaller than the old one. + + Ingo diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/sonypi.txt linux.22-ac2/Documentation/sonypi.txt --- linux.vanilla/Documentation/sonypi.txt 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/Documentation/sonypi.txt 2003-08-28 22:12:00.000000000 +0100 @@ -8,7 +8,9 @@ Copyright (C) 2000 Andrew Tridgell This driver enables access to the Sony Programmable I/O Control Device which -can be found in many (all ?) Sony Vaio laptops. +can be found in many Sony Vaio laptops. Some newer Sony laptops (seems to be +limited to new FX series laptops, at least the FX501 and the FX702) lack a +sonypi device and are not supported at all by this driver. It will give access (through a user space utility) to some events those laptops generate, like: @@ -96,6 +98,7 @@ SONYPI_THUMBPHRASE_MASK 0x0200 SONYPI_MEYE_MASK 0x0400 SONYPI_MEMORYSTICK_MASK 0x0800 + SONYPI_BATTERY_MASK 0x1000 useinput: if set (which is the default) jogdial events are forwarded to the input subsystem as mouse wheel diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/video4linux/meye.txt linux.22-ac2/Documentation/video4linux/meye.txt --- linux.vanilla/Documentation/video4linux/meye.txt 2003-06-14 00:11:26.000000000 +0100 +++ linux.22-ac2/Documentation/video4linux/meye.txt 2003-08-28 22:11:47.000000000 +0100 @@ -16,6 +16,23 @@ MJPEG hardware grabbing is supported via a private API (see below). +Hardware supported: +------------------- + +This driver supports the 'second' version of the MotionEye camera :) + +The first version was connected directly on the video bus of the Neomagic +video card and is unsupported. + +The second one, made by Kawasaki Steel is fully supported by this +driver (PCI vendor/device is 0x136b/0xff01) + +The third one, present in recent (more or less last year) Picturebooks +(C1M* models), is not supported. The manufacturer has given the specs +to the developers under a NDA (which allows the develoment of a GPL +driver however), but things are not moving very fast (see +http://r-engine.sourceforge.net/) (PCI vendor/device is 0x10cf/0x2011). + Driver options: --------------- diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/vm/overcommit-accounting linux.22-ac2/Documentation/vm/overcommit-accounting --- linux.vanilla/Documentation/vm/overcommit-accounting 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/vm/overcommit-accounting 2003-06-29 16:10:46.000000000 +0100 @@ -0,0 +1,70 @@ +* This describes the overcommit management facility in the latest kernel + tree (FIXME: actually it also describes the stuff that isnt yet done) + +The Linux kernel supports four overcommit handling modes + +0 - Heuristic overcommit handling. Obvious overcommits of + address space are refused. Used for a typical system. It + ensures a seriously wild allocation fails while allowing + overcommit to reduce swap usage + +1 - No overcommit handling. Appropriate for some scientific + applications + +2 - (NEW) strict overcommit. The total address space commit + for the system is not permitted to exceed swap + half ram. + In almost all situations this means a process will not be + killed while accessing pages but only by malloc failures + that are reported back by the kernel mmap/brk code. + +3 - (NEW) paranoid overcommit The total address space commit + for the system is not permitted to exceed swap. The machine + will never kill a process accessing pages it has mapped + except due to a bug (ie report it!) + +Gotchas +------- + +The C language stack growth does an implicit mremap. If you want absolute +guarantees and run close to the edge you MUST mmap your stack for the +largest size you think you will need. For typical stack usage is does +not matter much but its a corner case if you really really care + +In modes 2 and 3 the MAP_NORESERVE flag is ignored. + + +How It Works +------------ + +The overcommit is based on the following rules + +For a file backed map + SHARED or READ-only - 0 cost (the file is the map not swap) + PRIVATE WRITABLE - size of mapping per instance + +For an anonymous or /dev/zero map + SHARED - size of mapping + PRIVATE READ-only - 0 cost (but of little use) + PRIVATE WRITABLE - size of mapping per instance + +Additional accounting + Pages made writable copies by mmap + shmfs memory drawn from the same pool + +Status +------ + +o We account mmap memory mappings +o We account mprotect changes in commit +o We account mremap changes in size +o We account brk +o We account munmap +o We report the commit status in /proc +o Account and check on fork +o Review stack handling/building on exec +o SHMfs accounting +o Implement actual limit enforcement + +To Do +----- +o Account ptrace pages (this is hard) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Documentation/wolfson-touchscreen.txt linux.22-ac2/Documentation/wolfson-touchscreen.txt --- linux.vanilla/Documentation/wolfson-touchscreen.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/Documentation/wolfson-touchscreen.txt 2003-07-31 14:00:07.000000000 +0100 @@ -0,0 +1,178 @@ + +Wolfson Microelectronics WM9705 and WM9712 Touchscreen Controllers +=================================================================== + +The WM9705 and WM9712 are high performance AC97 audio codecs with built +in touchscreen controllers that are mainly found in portable devices. +i.e. Dell Axim and Toshiba e740. + +This driver uses the AC97 link controller for all communication with the +codec and can be either built into the kernel or built as a module. + +Build Instructions +================== + +The driver will be built into the kernel if "sound card support" = y and +"wolfson AC97 touchscreen support" = y in the kernel sound configuration. + +To build as a module, "wolfson AC97 touchscreen support" = m +in the kernel sound configuration. + + +Driver Features +=============== + + * supports WM9705, WM9712 + * polling mode + * coordinate polling + * adjustable rpu/dpp settings + * adjustable pressure current + * adjustable sample settle delay + * 4 and 5 wire touchscreens (5 wire is WM9712 only) + * pen down detection + * power management + * AUX ADC sampling + + +Driver Usage +============ + +In order to use this driver, a char device called wm97xx with a major +number of 10 and minor number 16 will have to be created under +/dev/touchscreen. + +e.g. +mknod /dev/touchscreen/wm97xx c 10 16 + + +Driver Parameters +================= +The driver can accept several parameters for fine tuning the touchscreen. +However, the syntax is different between module options and passing the +options in the kernel command line. + +e.g. + +rpu=1 (module) +rpu:1 (kernel command line) + + +1. Codec sample mode. (mode) + + The WM9712 can sample touchscreen data in 3 different operating + modes. i.e. polling, coordinate and continous. + + Polling:- The driver polls the codec and issues 3 seperate commands + over the AC97 link to read X,Y and pressure. + + Coordinate: - The driver polls the codec and only issues 1 command over + the AC97 link to read X,Y and pressure. This mode has + strict timing requirements and may drop samples if + interrupted. However, it is less demanding on the AC97 + link. Note: this mode requires a larger delay than polling + mode. + + Continuous:- The codec automatically samples X,Y and pressure and then + sends the data over the AC97 link in slots. This is then + same method used by the codec when recording audio. + + Set mode = 0 for polling, 1 for coordinate and 2 for continuous. + + Default mode = 0 + + + +2. WM9712 Internal pull up for pen detect. (rpu) + + Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive) + i.e. pull up resistance = 64k Ohms / rpu. + + Adjust this value if you are having problems with pen detect not + detecting any down events. + + Set rpu = value + + Default rpu = 1 + + + +3. WM9705 Pen detect comparator threshold. (pdd) + + 0 to Vmid in 15 steps, 0 = use zero power comparator with Vmid threshold + i.e. 1 = Vmid/15 threshold + 15 = Vmid/1 threshold + + Adjust this value if you are having problems with pen detect not + detecting any down events. + + Set pdd = value + + Default pdd = 0 + + + +4. Set current used for pressure measurement. (pil) + + Set pil = 2 to use 400uA + pil = 1 to use 200uA and + pil = 0 to disable pressure measurement. + + This is used to increase the range of values returned by the adc + when measureing touchpanel pressure. + + Default pil = 0 + + + +5. WM9712 Set 5 wire touchscreen mode. (five_wire) + + Set five_wire = 1 to enable 5 wire mode on the WM9712. + + Default five_wire = 0 + + NOTE: Five wire mode does not allow for readback of pressure. + + + +6. ADC sample delay. (delay) + + For accurate touchpanel measurements, some settling time may be + required between the switch matrix applying a voltage across the + touchpanel plate and the ADC sampling the signal. + + This delay can be set by setting delay = n, where n is the array + position of the delay in the array delay_table below. + Long delays > 1ms are supported for completeness, but are not + recommended. + + Default delay = 4 + + wm_delay uS AC97 link frames + ==================================== + 0 21 1 + 1 42 2 + 2 84 4 + 3 167 8 + 4 333 16 + 5 667 32 + 6 1000 48 + 7 1333 64 + 8 2000 96 + 9 2667 128 + 10 3333 160 + 11 4000 192 + 12 4667 224 + 13 5333 256 + 14 6000 288 + 15 0 0 (No delay, switch matrix always on) + + + +Contact +======= + +Further information about the WM9705 and WM9712 can be found on the +Wolfson Website. http://www.wolfsonmicro.com + +Please report bugs to liam.girdwood@wolfsonmicro.com or + linux@wolfsonmicro.com diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/acpi/tables.c linux.22-ac2/drivers/acpi/tables.c --- linux.vanilla/drivers/acpi/tables.c 2003-08-28 16:45:33.000000000 +0100 +++ linux.22-ac2/drivers/acpi/tables.c 2003-08-28 16:59:45.000000000 +0100 @@ -261,10 +261,17 @@ /* Map the DSDT header via the pointer in the FADT */ if (id == ACPI_DSDT) { - struct acpi_table_fadt *fadt = (struct acpi_table_fadt *) *header; + struct fadt_descriptor_rev2 *fadt = (struct fadt_descriptor_rev2 *) *header; + + if (fadt->header.revision == 3 && fadt->Xdsdt) { + *header = (void *) __acpi_map_table(fadt->Xdsdt, + sizeof(struct acpi_table_header)); + } else if (fadt->V1_dsdt) { + *header = (void *) __acpi_map_table(fadt->V1_dsdt, + sizeof(struct acpi_table_header)); + } else + *header = 0; - *header = (void *) __acpi_map_table(fadt->dsdt_addr, - sizeof(struct acpi_table_header)); if (!*header) { printk(KERN_WARNING PREFIX "Unable to map DSDT\n"); return -ENODEV; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/cciss.c linux.22-ac2/drivers/block/cciss.c --- linux.vanilla/drivers/block/cciss.c 2003-08-28 16:45:33.000000000 +0100 +++ linux.22-ac2/drivers/block/cciss.c 2003-08-13 14:46:11.000000000 +0100 @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -109,8 +110,19 @@ #define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */ +#ifdef CONFIG_CISS_MONITOR_THREAD +static int cciss_monitor(void *ctlr); +static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd, + unsigned long count, int (*cciss_monitor)(void *), int *rc); +#else +#define cciss_monitor(x) +#define kill_monitor_thead(x) +#endif + static ctlr_info_t *hba[MAX_CTLR]; +static u32 heartbeat_timer = 0; + static struct proc_dir_entry *proc_cciss; static void do_cciss_request(request_queue_t *q); @@ -188,7 +200,11 @@ "Current # commands on controller: %d\n" "Max Q depth since init: %d\n" "Max # commands on controller since init: %d\n" - "Max SG entries since init: %d\n\n", + "Max SG entries since init: %d\n" + MONITOR_PERIOD_PATTERN + MONITOR_DEADLINE_PATTERN + MONITOR_STATUS_PATTERN + "\n", h->devname, h->product_name, (unsigned long)h->board_id, @@ -196,7 +212,10 @@ (unsigned int)h->intr, h->num_luns, h->Qdepth, h->commands_outstanding, - h->maxQsinceinit, h->max_outstanding, h->maxSG); + h->maxQsinceinit, h->max_outstanding, h->maxSG, + MONITOR_PERIOD_VALUE(h), + MONITOR_DEADLINE_VALUE(h), + CTLR_STATUS(h)); pos += size; len += size; cciss_proc_tape_report(ctlr, buffer, &pos, &len); @@ -231,10 +250,8 @@ { unsigned char cmd[80]; int len; -#ifdef CONFIG_CISS_SCSI_TAPE ctlr_info_t *h = (ctlr_info_t *) data; int rc; -#endif if (count > sizeof(cmd)-1) return -EINVAL; @@ -244,6 +261,7 @@ len = strlen(cmd); if (cmd[len-1] == '\n') cmd[--len] = '\0'; + # ifdef CONFIG_CISS_SCSI_TAPE if (strcmp("engage scsi", cmd)==0) { rc = cciss_engage_scsi(h->ctlr); @@ -254,6 +272,10 @@ /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ # endif + + if (START_MONITOR_THREAD(h, cmd, count, cciss_monitor, &rc) == 0) + return rc; + return -EINVAL; } @@ -407,7 +429,7 @@ printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk); #endif /* CCISS_DEBUG */ - if (ctlr > MAX_CTLR || hba[ctlr] == NULL) + if (ctlr > MAX_CTLR || hba[ctlr] == NULL || !CTLR_IS_ALIVE(hba[ctlr])) return -ENXIO; /* * Root is allowed to open raw volume zero even if its not configured @@ -1107,7 +1129,8 @@ size_t size, unsigned int use_unit_num, unsigned int log_unit, - __u8 page_code ) + __u8 page_code, + __u8 cmdtype) { ctlr_info_t *h = hba[ctlr]; CommandList_struct *c; @@ -1131,6 +1154,9 @@ } c->Header.Tag.lower = c->busaddr; /* tag is phys addr of cmd */ /* Fill in Request block */ + c->Request.CDB[0] = cmd; + c->Request.Type.Type = cmdtype; + if (cmdtype == TYPE_CMD) { switch (cmd) { case CISS_INQUIRY: /* If the logical unit number is 0 then, this is going @@ -1150,11 +1176,9 @@ c->Request.CDB[2] = page_code; } c->Request.CDBLen = 6; - c->Request.Type.Type = TYPE_CMD; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; /* Read */ c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = CISS_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case CISS_REPORT_LOG: @@ -1163,11 +1187,9 @@ So we have nothing to write. */ c->Request.CDBLen = 12; - c->Request.Type.Type = TYPE_CMD; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; /* Read */ c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = CISS_REPORT_LOG; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; @@ -1178,18 +1200,38 @@ hba[ctlr]->drv[log_unit].LunID; c->Header.LUN.LogDev.Mode = 1; c->Request.CDBLen = 10; - c->Request.Type.Type = TYPE_CMD; /* It is a command. */ c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; /* Read */ c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = CCISS_READ_CAPACITY; break; default: printk(KERN_WARNING "cciss: Unknown Command 0x%x sent attempted\n", cmd); cmd_free(h, c, 1); return IO_ERROR; - }; + } + } else if (cmdtype == TYPE_MSG) { + switch (cmd) { + case 3: /* No-Op message */ + c->Request.CDBLen = 1; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_WRITE; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + break; + default: + printk(KERN_WARNING + "cciss%d: unknown message type %d\n", + ctlr, cmd); + cmd_free(h, c, 1); + return IO_ERROR; + } + } else { + printk(KERN_WARNING + "cciss%d: unknown command type %d\n", ctlr, cmdtype); + cmd_free(h, c, 1); + return IO_ERROR; + } /* Fill in the scatter gather information */ if (size > 0) { @@ -1352,7 +1394,7 @@ } return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, - sizeof(ReportLunData_struct), 0, 0, 0 ); + sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD); if (return_code == IO_OK) { listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0])); @@ -1451,7 +1493,7 @@ memset(size_buff, 0, sizeof(ReadCapdata_struct)); return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff, sizeof(ReadCapdata_struct), 1, - logvol, 0 ); + logvol, 0, TYPE_CMD); if (return_code == IO_OK) { total_size = (0xff & (unsigned int) size_buff->total_size[0]) << 24; @@ -1482,7 +1524,7 @@ /* Execute the command to read the disk geometry */ memset(inq_buff, 0, sizeof(InquiryData_struct)); return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, - sizeof(InquiryData_struct), 1, logvol ,0xC1 ); + sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD); if (return_code == IO_OK) { if (inq_buff->data_byte[8] == 0xFF) { printk(KERN_WARNING @@ -1590,7 +1632,8 @@ } memset(size_buff, 0, sizeof(ReadCapdata_struct)); return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff, - sizeof( ReadCapdata_struct), 1, logvol, 0 ); + sizeof( ReadCapdata_struct), 1, logvol, 0, + TYPE_CMD); if (return_code == IO_OK) { total_size = (0xff & (unsigned int)(size_buff->total_size[0])) << 24; @@ -1619,7 +1662,7 @@ /* Execute the command to read the disk geometry */ memset(inq_buff, 0, sizeof(InquiryData_struct)); return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, - sizeof(InquiryData_struct), 1, logvol ,0xC1 ); + sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD); if (return_code == IO_OK) { if (inq_buff->data_byte[8] == 0xFF) { printk(KERN_WARNING "cciss: reading geometry failed, " @@ -2236,6 +2279,15 @@ goto startio; } + /* make sure controller is alive. */ + if (!CTLR_IS_ALIVE(h)) { + printk(KERN_WARNING "cciss%d: I/O quit ", h->ctlr); + blkdev_dequeue_request(creq); + complete_buffers(creq->bh, 0); + end_that_request_last(creq); + return; + } + if (( c = cmd_alloc(h, 1)) == NULL) goto startio; @@ -2833,7 +2885,174 @@ kfree(hba[i]); hba[i]=NULL; } +#ifdef CONFIG_CISS_MONITOR_THREAD +static void fail_all_cmds(unsigned long ctlr) +{ + /* If we get here, the board is apparently dead. */ + ctlr_info_t *h = hba[ctlr]; + CommandList_struct *c; + unsigned long flags; + + printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); + h->alive = 0; /* the controller apparently died... */ + + spin_lock_irqsave(&io_request_lock, flags); + + pci_disable_device(h->pdev); /* Make sure it is really dead. */ + + /* move everything off the request queue onto the completed queue */ + while( (c = h->reqQ) != NULL ) { + removeQ(&(h->reqQ), c); + h->Qdepth--; + addQ (&(h->cmpQ), c); + } + + /* Now, fail everything on the completed queue with a HW error */ + while( (c = h->cmpQ) != NULL ) { + removeQ(&h->cmpQ, c); + c->err_info->CommandStatus = CMD_HARDWARE_ERR; + if (c->cmd_type == CMD_RWREQ) { + complete_command(h, c, 0); + } else if (c->cmd_type == CMD_IOCTL_PEND) + complete(c->waiting); +# ifdef CONFIG_CISS_SCSI_TAPE + else if (c->cmd_type == CMD_SCSI) + complete_scsi_command(c, 0, 0); +# endif + } + spin_unlock_irqrestore(&io_request_lock, flags); + return; +} +static int cciss_monitor(void *ctlr) +{ + /* If the board fails, we ought to detect that. So we periodically + send down a No-Op message and expect it to complete quickly. If it + doesn't, then we assume the board is dead, and fail all commands. + This is useful mostly in a multipath configuration, so that failover + will happen. */ + + int rc; + ctlr_info_t *h = (ctlr_info_t *) ctlr; + unsigned long flags; + u32 current_timer; + + daemonize(); + exit_files(current); + reparent_to_init(); + + printk("cciss%d: Monitor thread starting.\n", h->ctlr); + + /* only listen to signals if the HA was loaded as a module. */ +#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM)) + siginitsetinv(¤t->blocked, SHUTDOWN_SIGS); + sprintf(current->comm, "ccissmon%d", h->ctlr); + h->monitor_thread = current; + + init_timer(&h->watchdog); + h->watchdog.function = fail_all_cmds; + h->watchdog.data = (unsigned long) h->ctlr; + while (1) { + /* check heartbeat timer */ + current_timer = readl(&h->cfgtable->HeartBeat); + current_timer &= 0x0fffffff; + if (heartbeat_timer == current_timer) { + fail_all_cmds(h->ctlr); + break; + } + else + heartbeat_timer = current_timer; + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(h->monitor_period * HZ); + h->watchdog.expires = jiffies + HZ * h->monitor_deadline; + add_timer(&h->watchdog); + /* send down a trivial command (no op message) to ctlr */ + rc = sendcmd_withirq(3, h->ctlr, NULL, 0, 0, 0, 0, TYPE_MSG); + del_timer(&h->watchdog); + if (!CTLR_IS_ALIVE(h)) + break; + if (signal_pending(current)) { + printk(KERN_WARNING "%s received signal.\n", + current->comm); + break; + } + if (h->monitor_period == 0) /* zero period means exit thread */ + break; + } + printk(KERN_INFO "%s exiting.\n", current->comm); + spin_lock_irqsave(&io_request_lock, flags); + h->monitor_started = 0; + h->monitor_thread = NULL; + spin_unlock_irqrestore(&io_request_lock, flags); + return 0; +} +static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd, + unsigned long count, int (*cciss_monitor)(void *), int *rc) +{ + unsigned long flags; + unsigned int new_period, old_period, new_deadline, old_deadline; + + if (strncmp("monitor", cmd, 7) == 0) { + new_period = simple_strtol(cmd + 8, NULL, 10); + spin_lock_irqsave(&io_request_lock, flags); + new_deadline = h->monitor_deadline; + spin_unlock_irqrestore(&io_request_lock, flags); + } else if (strncmp("deadline", cmd, 8) == 0) { + new_deadline = simple_strtol(cmd + 9, NULL, 10); + spin_lock_irqsave(&io_request_lock, flags); + new_period = h->monitor_period; + spin_unlock_irqrestore(&io_request_lock, flags); + } else + return -1; + if (new_period != 0 && new_period < CCISS_MIN_PERIOD) + new_period = CCISS_MIN_PERIOD; + if (new_period > CCISS_MAX_PERIOD) + new_period = CCISS_MAX_PERIOD; + if (new_deadline >= new_period) { + new_deadline = new_period - 5; + printk(KERN_INFO "setting deadline to %d\n", new_deadline); + } + spin_lock_irqsave(&io_request_lock, flags); + if (h->monitor_started != 0) { + old_period = h->monitor_period; + old_deadline = h->monitor_deadline; + h->monitor_period = new_period; + h->monitor_deadline = new_deadline; + spin_unlock_irqrestore(&io_request_lock, flags); + if (new_period == 0) { + printk(KERN_INFO "cciss%d: stopping monitor thread\n", + h->ctlr); + *rc = count; + return 0; + } + if (new_period != old_period) + printk(KERN_INFO "cciss%d: adjusting monitor thread " + "period from %d to %d seconds\n", + h->ctlr, old_period, new_period); + if (new_deadline != old_deadline) + printk(KERN_INFO "cciss%d: adjusting monitor thread " + "deadline from %d to %d seconds\n", + h->ctlr, old_deadline, new_deadline); + *rc = count; + return 0; + } + h->monitor_started = 1; + h->monitor_period = new_period; + h->monitor_deadline = new_deadline; + spin_unlock_irqrestore(&io_request_lock, flags); + kernel_thread(cciss_monitor, h, 0); + *rc = count; + return 0; +} +static void kill_monitor_thread(ctlr_info_t *h) +{ + if (h->monitor_thread) + send_sig(SIGKILL, h->monitor_thread, 1); +} +#else +#define kill_monitor_thread(h) +#endif /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. @@ -2861,6 +3080,7 @@ sprintf(hba[i]->devname, "cciss%d", i); hba[i]->ctlr = i; hba[i]->pdev = pdev; + ASSERT_CTLR_ALIVE(hba[i]); if (register_blkdev(MAJOR_NR+i, hba[i]->devname, &cciss_fops)) { printk(KERN_ERR "cciss: Unable to get major number " @@ -2993,14 +3213,17 @@ "already be removed \n"); return; } - /* Turn board interrupts off and send the flush cache command */ - /* sendcmd will turn off interrupt, and send the flush... - * To write all data in the battery backed cache to disks */ + kill_monitor_thread(hba[i]); + /* no sense in trying to flush a dead board's cache. */ + if (CTLR_IS_ALIVE(hba[i])) { + /* Turn board interrupts off and flush the cache */ + /* write all data in the battery backed cache to disks */ memset(flush_buf, 0, 4); - return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4,0,0,0, NULL); - if (return_code != IO_OK) { + return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, + 4, 0, 0, 0, NULL); + if (return_code != IO_OK) printk(KERN_WARNING - "Error Flushing cache on controller %d\n", i); + "cciss%d: Error flushing cache\n", i); } free_irq(hba[i]->intr, hba[i]); pci_set_drvdata(pdev, NULL); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/cciss.h linux.22-ac2/drivers/block/cciss.h --- linux.vanilla/drivers/block/cciss.h 2003-08-28 16:45:33.000000000 +0100 +++ linux.22-ac2/drivers/block/cciss.h 2003-09-01 13:54:21.000000000 +0100 @@ -91,6 +91,40 @@ #ifdef CONFIG_CISS_SCSI_TAPE void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ #endif +#ifdef CONFIG_CISS_MONITOR_THREAD + struct timer_list watchdog; + struct task_struct *monitor_thread; + unsigned int monitor_period; + unsigned int monitor_deadline; + unsigned char alive; + unsigned char monitor_started; +#define CCISS_MIN_PERIOD 10 +#define CCISS_MAX_PERIOD 3600 +#define CTLR_IS_ALIVE(h) (h->alive) +#define ASSERT_CTLR_ALIVE(h) { h->alive = 1; \ + h->monitor_period = 0; \ + h->monitor_started = 0; } +#define MONITOR_STATUS_PATTERN "Status: %s\n" +#define CTLR_STATUS(h) CTLR_IS_ALIVE(h) ? "operational" : "failed" +#define MONITOR_PERIOD_PATTERN "Monitor thread period: %d\n" +#define MONITOR_PERIOD_VALUE(h) (h->monitor_period) +#define MONITOR_DEADLINE_PATTERN "Monitor thread deadline: %d\n" +#define MONITOR_DEADLINE_VALUE(h) (h->monitor_deadline) +#define START_MONITOR_THREAD(h, cmd, count, cciss_monitor, rc) \ + start_monitor_thread(h, cmd, count, cciss_monitor, rc) +#else + +#define MONITOR_PERIOD_PATTERN "%s" +#define MONITOR_PERIOD_VALUE(h) "" +#define MONITOR_DEADLINE_PATTERN "%s" +#define MONITOR_DEADLINE_VALUE(h) "" +#define MONITOR_STATUS_PATTERN "%s\n" +#define CTLR_STATUS(h) "" +#define CTLR_IS_ALIVE(h) (1) +#define ASSERT_CTLR_ALIVE(h) +#define START_MONITOR_THREAD(a,b,c,d,rc) (*rc == 0) + +#endif }; /* Defining the diffent access_menthods */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/Config.in linux.22-ac2/drivers/block/Config.in --- linux.vanilla/drivers/block/Config.in 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/drivers/block/Config.in 2003-08-13 14:46:11.000000000 +0100 @@ -36,6 +36,7 @@ dep_tristate 'Compaq SMART2 support' CONFIG_BLK_CPQ_DA $CONFIG_PCI dep_tristate 'Compaq Smart Array 5xxx support' CONFIG_BLK_CPQ_CISS_DA $CONFIG_PCI dep_mbool ' SCSI tape drive support for Smart Array 5xxx' CONFIG_CISS_SCSI_TAPE $CONFIG_BLK_CPQ_CISS_DA $CONFIG_SCSI +dep_mbool ' Enable monitor thread' CONFIG_CISS_MONITOR_THREAD $CONFIG_BLK_CPQ_CISS_DA dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI dep_tristate 'Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/DAC960.c linux.22-ac2/drivers/block/DAC960.c --- linux.vanilla/drivers/block/DAC960.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/drivers/block/DAC960.c 2003-06-29 16:09:59.000000000 +0100 @@ -1133,6 +1133,26 @@ DAC960PU/PD/PL 3.51 and above DAC960PU/PD/PL/P 2.73 and above */ +#if defined(__alpha__) + /* + DEC Alpha machines were often equipped with DAC960 cards that were + OEMed from Mylex, and had their own custom firmware. Version 2.70, + the last custom FW revision to be released by DEC for these older + controllers, appears to work quite well with this driver. + + Cards tested successfully were several versions each of the PD and + PU, called by DEC the KZPSC and KZPAC, respectively, and having + the Manufacturer Numbers (from Mylex), usually on a sticker on the + back of the board, of: + + KZPSC D040347 (1ch) or D040348 (2ch) or D040349 (3ch) + KZPAC D040395 (1ch) or D040396 (2ch) or D040397 (3ch) + */ +# define FIRMWARE_27x "2.70" +#else +# define FIRMWARE_27x "2.73" +#endif + if (Enquiry2.FirmwareID.MajorVersion == 0) { Enquiry2.FirmwareID.MajorVersion = @@ -1152,7 +1172,7 @@ (Controller->FirmwareVersion[0] == '3' && strcmp(Controller->FirmwareVersion, "3.51") >= 0) || (Controller->FirmwareVersion[0] == '2' && - strcmp(Controller->FirmwareVersion, "2.73") >= 0))) + strcmp(Controller->FirmwareVersion, FIRMWARE_27x) >= 0))) { DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION"); DAC960_Error("Firmware Version = '%s'\n", Controller, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/elevator.c linux.22-ac2/drivers/block/elevator.c --- linux.vanilla/drivers/block/elevator.c 2003-06-14 00:11:29.000000000 +0100 +++ linux.22-ac2/drivers/block/elevator.c 2003-06-29 16:09:59.000000000 +0100 @@ -27,6 +27,25 @@ #include #include + +static int compatible(struct request *req, struct buffer_head *bh, + request_queue_t *q, int rw, + int count, int max_sectors) +{ + if (q->head_active) + return 0; + if (req->waiting) + return 0; + if (req->rq_dev != bh->b_rdev) + return 0; + if (req->cmd != rw) + return 0; + if (req->nr_sectors + count > max_sectors) + return 0; + return 1; +} + + /* * This is a bit tricky. It's given that bh and rq are for the same * device, but the next request might of course not be. Run through @@ -83,22 +102,38 @@ struct list_head *entry = &q->queue_head; unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE; struct request *__rq; - int backmerge_only = 0; + + /* + * Quick one-entry cache of last merge + * nb. we do no latency accounting this way. + */ + + if (q->last_request) { + struct request *__rq = q->last_request; - while (!backmerge_only && (entry = entry->prev) != head) { + if (compatible(__rq, bh, q, rw, count, max_sectors)) { + if (__rq->sector + __rq->nr_sectors == bh->b_rsector) { + *req = __rq; + return ELEVATOR_BACK_MERGE; + } + } + } + + + while ((entry = entry->prev) != head) { __rq = blkdev_entry_to_request(entry); /* * we can't insert beyond a zero sequence point */ if (__rq->elevator_sequence <= 0) - backmerge_only = 1; + break; if (__rq->waiting) continue; if (__rq->rq_dev != bh->b_rdev) continue; - if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head) && !backmerge_only) + if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head)) *req = __rq; if (__rq->cmd != rw) continue; @@ -108,7 +143,7 @@ ret = ELEVATOR_BACK_MERGE; *req = __rq; break; - } else if (__rq->sector - count == bh->b_rsector && !backmerge_only) { + } else if (__rq->sector - count == bh->b_rsector) { ret = ELEVATOR_FRONT_MERGE; __rq->elevator_sequence--; *req = __rq; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/block/ll_rw_blk.c linux.22-ac2/drivers/block/ll_rw_blk.c --- linux.vanilla/drivers/block/ll_rw_blk.c 2003-08-28 16:45:33.000000000 +0100 +++ linux.22-ac2/drivers/block/ll_rw_blk.c 2003-07-22 18:30:53.000000000 +0100 @@ -379,8 +379,12 @@ { if (q->plugged) { q->plugged = 0; - if (!list_empty(&q->queue_head)) + if (!list_empty(&q->queue_head)) { + if (q->last_request == + blkdev_entry_next_request(&q->queue_head)) + q->last_request = NULL; q->request_fn(q); + } } } @@ -528,6 +532,7 @@ q->plug_tq.routine = &generic_unplug_device; q->plug_tq.data = q; q->plugged = 0; + q->last_request = NULL; q->can_throttle = 0; /* @@ -867,6 +872,7 @@ * inserted at elevator_merge time */ list_add(&req->queue, insert_here); + q->last_request = req; } /* @@ -889,24 +895,26 @@ if (q->can_throttle) oversized_batch = blk_oversized_queue_batch(q); - rl->count++; - /* - * paranoia check - */ - if (req->cmd == READ || req->cmd == WRITE) - rl->pending[req->cmd]--; - if (rl->pending[READ] > q->nr_requests) - printk("blk: reads: %u\n", rl->pending[READ]); - if (rl->pending[WRITE] > q->nr_requests) - printk("blk: writes: %u\n", rl->pending[WRITE]); - if (rl->pending[READ] + rl->pending[WRITE] > q->nr_requests) - printk("blk: r/w: %u + %u > %u\n", rl->pending[READ], rl->pending[WRITE], q->nr_requests); - list_add(&req->queue, &rl->free); - if (rl->count >= q->batch_requests && !oversized_batch) { + if (q->last_request == req) + q->last_request = NULL; + rl->count++; + /* + * paranoia check + */ + if (req->cmd == READ || req->cmd == WRITE) + rl->pending[req->cmd]--; + if (rl->pending[READ] > q->nr_requests) + printk("blk: reads: %u\n", rl->pending[READ]); + if (rl->pending[WRITE] > q->nr_requests) + printk("blk: writes: %u\n", rl->pending[WRITE]); + if (rl->pending[READ] + rl->pending[WRITE] > q->nr_requests) + printk("blk: r/w: %u + %u > %u\n", rl->pending[READ], rl->pending[WRITE], q->nr_requests); + list_add(&req->queue, &rl->free); + if (rl->count >= q->batch_requests && !oversized_batch) { smp_mb(); if (waitqueue_active(&q->wait_for_requests)) wake_up(&q->wait_for_requests); - } + } } } @@ -942,6 +950,7 @@ req->bhtail = next->bhtail; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; list_del(&next->queue); + q->last_request = req; /* One last thing: we have removed a request, so we now have one less expected IO to complete for accounting purposes. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/cdrom/cdrom.c linux.22-ac2/drivers/cdrom/cdrom.c --- linux.vanilla/drivers/cdrom/cdrom.c 2002-11-29 21:27:13.000000000 +0000 +++ linux.22-ac2/drivers/cdrom/cdrom.c 2003-06-29 16:10:14.000000000 +0100 @@ -467,6 +467,10 @@ if ((fp->f_mode & FMODE_WRITE) && !CDROM_CAN(CDC_DVD_RAM)) return -EROFS; + + /* If the device is opened O_EXCL but there are other openers, return busy */ + if ( (fp->f_flags & O_EXCL) && (cdi->use_count>0) ) + return -EBUSY; /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/agp/agpgart_be.c linux.22-ac2/drivers/char/agp/agpgart_be.c --- linux.vanilla/drivers/char/agp/agpgart_be.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/agp/agpgart_be.c 2003-08-17 16:46:08.000000000 +0100 @@ -4735,6 +4735,412 @@ #endif /* CONFIG_AGP_HP_ZX1 */ +#ifdef CONFIG_AGP_ATI +static aper_size_info_lvl2 ati_generic_sizes[7] = +{ + {2048, 524288, 0x0000000c}, + {1024, 262144, 0x0000000a}, + {512, 131072, 0x00000008}, + {256, 65536, 0x00000006}, + {128, 32768, 0x00000004}, + {64, 16384, 0x00000002}, + {32, 8192, 0x00000000} +}; + +static gatt_mask ati_generic_masks[] = +{ + {0x00000001, 0} +}; + +typedef struct _ati_page_map { + unsigned long *real; + unsigned long *remapped; +} ati_page_map; + +static struct _ati_generic_private { + volatile u8 *registers; + ati_page_map **gatt_pages; + int num_tables; +} ati_generic_private; + +static int ati_create_page_map(ati_page_map *page_map) +{ + int i, err = 0; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) + return -ENOMEM; + + SetPageReserved(virt_to_page(page_map->real)); + /* + * fredi - WARNING: added looking at the changes during + * 2.4.20. I dont know if it's needed though. + */ +#ifdef CONFIG_X86 + err = change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL_NOCACHE); +#endif + CACHE_FLUSH(); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), + PAGE_SIZE); + if (page_map->remapped == NULL || err) { + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); + page_map->real = NULL; + return -ENOMEM; + } + CACHE_FLUSH(); + + for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) + page_map->remapped[i] = agp_bridge.scratch_page; + + return 0; +} + +static void ati_free_page_map(ati_page_map *page_map) +{ + /* + * fredi - WARNING: added looking at the changes during + * 2.4.20. I dont know if it's needed though. + */ +#ifdef CONFIG_X86 + change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL); +#endif + iounmap(page_map->remapped); + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); +} + +static void ati_free_gatt_pages(void) +{ + int i; + ati_page_map **tables; + ati_page_map *entry; + + tables = ati_generic_private.gatt_pages; + for(i = 0; i < ati_generic_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) + ati_free_page_map(entry); + kfree(entry); + } + } + kfree(tables); +} + +static int ati_create_gatt_pages(int nr_tables) +{ + ati_page_map **tables; + ati_page_map *entry; + int retval = 0; + int i; + + tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *), + GFP_KERNEL); + if (tables == NULL) + return -ENOMEM; + + memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1)); + for (i = 0; i < nr_tables; i++) { + entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL); + if (entry == NULL) { + retval = -ENOMEM; + break; + } + memset(entry, 0, sizeof(ati_page_map)); + tables[i] = entry; + retval = ati_create_page_map(entry); + if (retval != 0) break; + } + ati_generic_private.num_tables = nr_tables; + ati_generic_private.gatt_pages = tables; + + if (retval != 0) ati_free_gatt_pages(); + + return retval; +} + +/* + *Since we don't need contigious memory we just try + * to get the gatt table once + */ + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#undef GET_GATT +#define GET_GATT(addr) (ati_generic_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +static int ati_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long *cur_gatt; + unsigned long addr; + + num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; + + if (type != 0 || mem->type != 0) + return -EINVAL; + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) + return -EBUSY; + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); + } + agp_bridge.tlb_flush(mem); + return 0; +} + +static int ati_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned long *cur_gatt; + unsigned long addr; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int ati_create_gatt_table(void) +{ + aper_size_info_lvl2 *value; + ati_page_map page_dir; + unsigned long addr; + int retval; + u32 temp; + int i; + aper_size_info_lvl2 *current_size; + + value = A_SIZE_LVL2(agp_bridge.current_size); + retval = ati_create_page_map(&page_dir); + if (retval != 0) + return retval; + + retval = ati_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + ati_free_page_map(&page_dir); + return retval; + } + + agp_bridge.gatt_table_real = (u32 *)page_dir.real; + agp_bridge.gatt_table = (u32 *)page_dir.remapped; + agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real); + + /* Write out the size register */ + current_size = A_SIZE_LVL2(agp_bridge.current_size); + + if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) { + pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, temp); + pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp); + } else { + pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, temp); + pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp); + } + + /* + * Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + pci_read_config_dword(agp_bridge.dev, ATI_APBASE, &temp); + addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge.gart_bus_addr = addr; + + /* Calculate the agp offset */ + for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { + page_dir.remapped[GET_PAGE_DIR_OFF(addr)] = + virt_to_bus(ati_generic_private.gatt_pages[i]->real); + page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001; + } + + return 0; +} + +static int ati_free_gatt_table(void) +{ + ati_page_map page_dir; + + page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; + + ati_free_gatt_pages(); + ati_free_page_map(&page_dir); + return 0; +} + +static int ati_fetch_size(void) +{ + int i; + u32 temp; + aper_size_info_lvl2 *values; + + if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) { + pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp); + } else { + pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp); + } + + temp = (temp & 0x0000000e); + values = A_SIZE_LVL2(agp_bridge.aperture_sizes); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int ati_configure(void) +{ + u32 temp; + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge.dev, ATI_GART_MMBASE_ADDR, &temp); + temp = (temp & 0xfffff000); + ati_generic_private.registers = (volatile u8 *) ioremap(temp, 4096); + + if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) { + pci_write_config_dword(agp_bridge.dev, ATI_RS100_IG_AGPMODE, 0x20000); + } else { + pci_write_config_dword(agp_bridge.dev, ATI_RS300_IG_AGPMODE, 0x20000); + } + + /* address to map too */ + /* + pci_read_config_dword(agp_bridge.dev, ATI_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + printk(KERN_INFO "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); + */ + OUTREG32(ati_generic_private.registers, ATI_GART_FEATURE_ID, 0x60000); + + /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ + pci_read_config_dword(agp_bridge.dev, 4, &temp); + pci_write_config_dword(agp_bridge.dev, 4, temp | (1<<14)); + + /* Write out the address of the gatt table */ + OUTREG32(ati_generic_private.registers, ATI_GART_BASE, + agp_bridge.gatt_bus_addr); + + /* Flush the tlb */ + OUTREG32(ati_generic_private.registers, ATI_GART_CACHE_CNTRL, 1); + return 0; +} + +static void ati_cleanup(void) +{ + aper_size_info_lvl2 *previous_size; + u32 temp; + + previous_size = A_SIZE_LVL2(agp_bridge.previous_size); + + /* Write back the previous size and disable gart translation */ + if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) || + (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) { + pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, temp); + } else { + pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, temp); + } + iounmap((void *) ati_generic_private.registers); +} + +static void ati_tlbflush(agp_memory * mem) +{ + OUTREG32(ati_generic_private.registers, ATI_GART_CACHE_CNTRL, 1); +} + +static unsigned long ati_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + return addr | agp_bridge.masks[0].mask; +} + +static int __init ati_generic_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = ati_generic_masks; + agp_bridge.aperture_sizes = (void *) ati_generic_sizes; + agp_bridge.size_type = LVL2_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = (void *) &ati_generic_private; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = ati_configure; + agp_bridge.fetch_size = ati_fetch_size; + agp_bridge.cleanup = ati_cleanup; + agp_bridge.tlb_flush = ati_tlbflush; + agp_bridge.mask_memory = ati_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = ati_create_gatt_table; + agp_bridge.free_gatt_table = ati_free_gatt_table; + agp_bridge.insert_memory = ati_insert_memory; + agp_bridge.remove_memory = ati_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} +#endif /* CONFIG_AGP_ATI */ + /* per-chipset initialization data. * note -- all chipsets for a single vendor MUST be grouped together */ @@ -5123,6 +5529,12 @@ "Via", "Apollo Pro KT400", via_generic_setup }, + { PCI_DEVICE_ID_VIA_CLE266, + PCI_VENDOR_ID_VIA, + VIA_CLE266, + "Via", + "CLE266", + via_generic_setup }, { PCI_DEVICE_ID_VIA_P4M266, PCI_VENDOR_ID_VIA, VIA_APOLLO_P4M266, @@ -5167,6 +5579,51 @@ hp_zx1_setup }, #endif +#ifdef CONFIG_AGP_ATI + { PCI_DEVICE_ID_ATI_RS100, + PCI_VENDOR_ID_ATI, + ATI_RS100, + "ATI", + "IGP320/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS200, + PCI_VENDOR_ID_ATI, + ATI_RS200, + "ATI", + "IGP330/340/345/350/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS250, + PCI_VENDOR_ID_ATI, + ATI_RS250, + "ATI", + "IGP7000/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS300_100, + PCI_VENDOR_ID_ATI, + ATI_RS300_100, + "ATI", + "IGP9100/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS300_133, + PCI_VENDOR_ID_ATI, + ATI_RS300_133, + "ATI", + "IGP9100/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS300_166, + PCI_VENDOR_ID_ATI, + ATI_RS300_166, + "ATI", + "IGP9100/M", + ati_generic_setup }, + { PCI_DEVICE_ID_ATI_RS300_200, + PCI_VENDOR_ID_ATI, + ATI_RS300_200, + "ATI", + "IGP9100/M", + ati_generic_setup }, +#endif /* CONFIG_AGP_ATI */ + { 0, }, /* dummy final entry, always present */ }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/agp/agp.h linux.22-ac2/drivers/char/agp/agp.h --- linux.vanilla/drivers/char/agp/agp.h 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/agp/agp.h 2003-07-31 13:48:29.000000000 +0100 @@ -292,6 +292,30 @@ #ifndef PCI_DEVICE_ID_AL_M1671_0 #define PCI_DEVICE_ID_AL_M1671_0 0x1671 #endif +#ifndef PCI_VENDOR_ID_ATI +#define PCI_VENDOR_ID_ATI 0x1002 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS100 +#define PCI_DEVICE_ID_ATI_RS100 0xcab0 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS200 +#define PCI_DEVICE_ID_ATI_RS200 0xcab2 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS250 +#define PCI_DEVICE_ID_ATI_RS250 0xcab3 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS300_100 +#define PCI_DEVICE_ID_ATI_RS300_100 0x5830 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS300_133 +#define PCI_DEVICE_ID_ATI_RS300_133 0x5831 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS300_166 +#define PCI_DEVICE_ID_ATI_RS300_166 0x5832 +#endif +#ifndef PCI_DEVICE_ID_ATI_RS300_200 +#define PCI_DEVICE_ID_ATI_RS300_200 0x5833 +#endif /* intel register */ #define INTEL_APBASE 0x10 @@ -475,4 +499,18 @@ #define HP_ZX1_PDIR_BASE 0x320 #define HP_ZX1_CACHE_FLUSH 0x428 +/* ATI register */ +#define ATI_APBASE 0x10 +#define ATI_GART_MMBASE_ADDR 0x14 +#define ATI_RS100_APSIZE 0xac +#define ATI_RS300_APSIZE 0xf8 +#define ATI_RS100_IG_AGPMODE 0xb0 +#define ATI_RS300_IG_AGPMODE 0xfc + +#define ATI_GART_FEATURE_ID 0x00 +#define ATI_GART_BASE 0x04 +#define ATI_GART_CACHE_SZBASE 0x08 +#define ATI_GART_CACHE_CNTRL 0x0c +#define ATI_GART_CACHE_ENTRY_CNTRL 0x10 + #endif /* _AGP_BACKEND_PRIV_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/alim6117_wdt.c linux.22-ac2/drivers/char/alim6117_wdt.c --- linux.vanilla/drivers/char/alim6117_wdt.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/alim6117_wdt.c 2003-08-28 22:44:13.000000000 +0100 @@ -0,0 +1,446 @@ +/** + * ALi M6117 Watchdog timer driver. + * + * (c) Copyright 2003 Federico Bareilles , + * Instituto Argentino de Radio Astronomia (IAR). + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * The author does NOT admit liability nor provide warranty for any + * of this software. This material is provided "AS-IS" in the hope + * that it may be useful for others. + * + * Based on alim1535_wdt.c by Alan Cox and other WDT by several + * authors... + * + * ALi (Acer Labs) M6117 is an i386 that has the watchdog timer + * built in. Watchdog uses a 32.768KHz clock with a 24 bits + * counter. The timer ranges is from 30.5u sec to 512 sec with + * resolution 30.5u sec. When the timer times out; a system reset, + * NMI or IRQ may happen. This can be decided by the user's + * programming. + **/ + +#define ALI_WDT_VERSION "0.01c" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OUR_NAME "alim6117_wdt" + +/* Port definitions: */ +#define M6117_PORT_INDEX 0x22 +#define M6117_PORT_DATA 0x23 +/* YES, the two unused ports of 8259: + * 0020-003f : pic1 + * + * The 8259 Interrup Controller uses four port addresses (0x20 through + * 0x23). Although IBM documentation indicates that these four port + * addresses are reserved for the 8259, only the two lower ports (0x20 + * and 0x21) ar documented as usable by programers. The two ports + * (0x22 and 0x23) are used only when reprogramming the 8259 for + * special dedicated systems that operate in modes which are not + * compatible with normal IBM PC operation (this case). + **/ + +/* Index for ALI M6117: */ +#define ALI_LOCK_REGISTER 0x13 +#define ALI_WDT 0x37 +#define ALI_WDT_SELECT 0x38 +#define ALI_WDT_DATA0 0x39 +#define ALI_WDT_DATA1 0x3a +#define ALI_WDT_DATA2 0x3b +#define ALI_WDT_CTRL 0x3c + +/* Time out generates signal select: */ +#define WDT_SIGNAL_IRQ3 0x10 +#define WDT_SIGNAL_IRQ4 0x20 +#define WDT_SIGNAL_IRQ5 0x30 +#define WDT_SIGNAL_IRQ6 0x40 +#define WDT_SIGNAL_IRQ7 0x50 +#define WDT_SIGNAL_IRQ9 0x60 +#define WDT_SIGNAL_IRQ10 0x70 +#define WDT_SIGNAL_IRQ11 0x80 +#define WDT_SIGNAL_IRQ12 0x90 +#define WDT_SIGNAL_IRQ14 0xa0 +#define WDT_SIGNAL_IRQ15 0xb0 +#define WDT_SIGNAL_NMI 0xc0 +#define WDT_SIGNAL_SRSET 0xd0 +/* set signal to use: */ +#define WDT_SIGNAL WDT_SIGNAL_SRSET + +/* ALI_WD_TIME_FACTOR is 1000000/30.5 */ +#define ALI_WD_TIME_FACTOR 32787 /* (from seconds to ALi counter) */ + +#ifdef CONFIG_WATCHDOG_NOWAYOUT +static int nowayout = 1; +#else +static int nowayout = 0; +#endif + +static unsigned long wdt_is_open; +static char ali_expect_close; +static int wdt_timeout = 60; +static int wdt_run = 0; +static spinlock_t ali_lock; /* Needed for 2.6/pre-empt to avoid clashing + index writes */ + +MODULE_PARM(nowayout, "i"); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); + +MODULE_PARM(wdt_timeout, "i"); +MODULE_PARM_DESC(wdt_timeout,"Watchdog timeout in seconds"); + + +static int alim6117_read(int index) +{ + outb(index, M6117_PORT_INDEX); + return inb(M6117_PORT_DATA); +} + +static void alim6117_write(int index, int data) +{ + outb(index, M6117_PORT_INDEX); + outb(data, M6117_PORT_DATA); +} + +static void alim6117_ulock_conf_register(void) +{ + alim6117_write(ALI_LOCK_REGISTER, 0xc5); +} + +static void alim6117_lock_conf_register(void) +{ + alim6117_write(ALI_LOCK_REGISTER, 0x00); +} + +static void alim6117_set_timeout(int time) +{ + u32 timeout_bits; + + timeout_bits = time * ALI_WD_TIME_FACTOR; + alim6117_write(ALI_WDT_DATA0, timeout_bits & 0xff); + alim6117_write(ALI_WDT_DATA1, (timeout_bits & 0xff00) >> 8); + alim6117_write(ALI_WDT_DATA2, (timeout_bits & 0xff0000) >> 16); + + return; +} + +static void alim6117_wdt_disable(void) +{ + int val = alim6117_read(ALI_WDT); + + val &= 0xbf; /* 1011|1111 */ + alim6117_write(ALI_WDT, val); +} + +static void alim6117_wdt_enable(void) +{ + int val = alim6117_read(ALI_WDT); + + val |= 0x40; /* 0100|0000 */ + alim6117_write(ALI_WDT, val); +} + +static void alim6117_wdt_signal_select(int signal) +{ + int val = alim6117_read(ALI_WDT_SELECT); + + val &= 0xf0; + val |= signal; + alim6117_write(ALI_WDT_SELECT, val); +} + +static void ali_wdt_ping(void) +{ + int val; + + /* if no run, no ping; wdt start when ping it. */ + if (wdt_run) { + spin_lock(&ali_lock); + alim6117_ulock_conf_register(); + val = alim6117_read(ALI_WDT); + val &= ~0x40; /* 0100|0000 */ + alim6117_write(ALI_WDT, val); + val |= 0x40; /* 0100|0000 */ + alim6117_write(ALI_WDT, val); + alim6117_lock_conf_register(); + spin_unlock(&ali_lock); + /* + printk(KERN_INFO OUR_NAME ": WDT ping...\n"); + */ + } else { + printk(KERN_WARNING OUR_NAME ": WDT is stopped\n"); + } +} + +static void ali_wdt_start(void) +{ + spin_lock(&ali_lock); + alim6117_ulock_conf_register(); + alim6117_wdt_disable(); + alim6117_set_timeout(wdt_timeout); + alim6117_wdt_signal_select(WDT_SIGNAL); + alim6117_wdt_enable(); + alim6117_lock_conf_register(); + spin_unlock(&ali_lock); + wdt_run = 1; +} + +static void ali_wdt_stop(void) +{ + int val; + if ( wdt_run ) { + spin_lock(&ali_lock); + alim6117_ulock_conf_register(); + val = alim6117_read(ALI_WDT); + val &= ~0x40; /* 0100|0000 */ + alim6117_write(ALI_WDT, val); + alim6117_lock_conf_register(); + spin_unlock(&ali_lock); + wdt_run = 0; + /* + printk(KERN_INFO OUR_NAME ": WDT stop...\n"); + */ + } +} + +/** + * ali_wdt_notify_sys: + * @this: our notifier block + * @code: the event being reported + * @unused: unused + * + * Our notifier is called on system shutdowns. We want to turn the timer + * off at reboot otherwise the machine will reboot again during memory + * test or worse yet during the following fsck. + * + */ + +static int ali_wdt_notify_sys(struct notifier_block *this, + unsigned long code, void *unused) +{ + if (code == SYS_DOWN || code == SYS_HALT) { + /* Turn the timer off */ + ali_wdt_stop(); + } + return NOTIFY_DONE; +} + +/** + * ali_write - writes to ALi watchdog + * @file: file handle to the watchdog + * @data: user address of data + * @len: length of data + * @ppos: pointer to the file offset + * + * Handle a write to the ALi watchdog. Writing to the file pings + * the watchdog and resets it. Writing the magic 'V' sequence allows + * the next close to turn off the watchdog. + */ + +static ssize_t ali_write(struct file *file, const char *data, + size_t len, loff_t * ppos) +{ + /* Can't seek (pwrite) on this device */ + if (ppos != &file->f_pos) + return -ESPIPE; + + /* Check if we've got the magic character 'V' and reload the timer */ + if (len) { + size_t i; + + ali_expect_close = 0; + + /* scan to see wether or not we got the magic character */ + for (i = 0; i != len; i++) { + u8 c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + ali_expect_close = 42; + } + ali_wdt_ping(); + return 1; + } + return 0; +} + +/** + * ali_ioctl - handle watchdog ioctls + * @inode: inode of the device + * @file: file handle to the device + * @cmd: watchdog command + * @arg: argument pointer + * + * Handle the watchdog ioctls supported by the ALi driver. + */ + +static int ali_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int options; + + static struct watchdog_info ident = { + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .firmware_version = 0, + .identity = "ALi M6117 WDT", + }; + + switch (cmd) { + case WDIOC_KEEPALIVE: + ali_wdt_ping(); + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(options, (int *) arg)) + return -EFAULT; + if (options < 1 || options > 512) + return -EFAULT; + wdt_timeout = options; + ali_wdt_start(); + case WDIOC_GETTIMEOUT: + return put_user(wdt_timeout, (int *) arg); + case WDIOC_GETSUPPORT: + if (copy_to_user((struct watchdog_info *) arg, &ident, sizeof(ident))) + return -EFAULT; + return 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, (int *) arg); + case WDIOC_SETOPTIONS: + if (get_user(options, (int *) arg)) + return -EFAULT; + if (options & WDIOS_DISABLECARD) { + ali_wdt_stop(); + return 0; + } + if (options & WDIOS_ENABLECARD) { + ali_wdt_start(); + return 0; + } + return -EINVAL; + + default: + return -ENOTTY; + + } +} + +/** + * ali_open - handle open of ali watchdog + * @inode: inode of device + * @file: file handle to device + * + * Open the ALi watchdog device. Ensure only one person opens it + * at a time. Also start the watchdog running. + */ + +static int ali_open(struct inode *inode, struct file *file) +{ + if(test_and_set_bit(0, &wdt_is_open)) + return -EBUSY; + ali_wdt_start(); + + return 0; +} + +/** + * ali_release - close an ALi watchdog + * @inode: inode from VFS + * @file: file from VFS + * + * Close the ALi watchdog device. Actual shutdown of the timer + * only occurs if the magic sequence has been set or nowayout is + * disabled. + */ + +static int ali_release(struct inode *inode, struct file *file) +{ + if (ali_expect_close == 42 && !nowayout) { + ali_wdt_stop(); + } else { + printk(KERN_CRIT OUR_NAME + ": Unexpected close, not stopping watchdog!\n"); + } + ali_expect_close = 0; + clear_bit(0, &wdt_is_open); + + return 0; +} + +static struct file_operations ali_fops = { + .owner = THIS_MODULE, + .write = ali_write, + .ioctl = ali_ioctl, + .open = ali_open, + .release = ali_release, +}; + +static struct miscdevice ali_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &ali_fops, +}; + +/* + * The WDT needs to learn about soft shutdowns in order to turn the + * timebomb registers off. + */ + +static struct notifier_block ali_notifier = { + .notifier_call = ali_wdt_notify_sys, + .next = NULL, + .priority = 0 +}; + +static int __init alim6117_init(void) +{ + if (wdt_timeout < 1 || wdt_timeout > 512){ + printk(KERN_ERR OUR_NAME + ": Timeout out of range (0 < wdt_timeout <= 512)\n"); + return -EIO; + } + + spin_lock_init(&ali_lock); + + if (misc_register(&ali_miscdev) != 0) { + printk(KERN_ERR OUR_NAME + ": cannot register watchdog device node.\n"); + return -EIO; + } + + register_reboot_notifier(&ali_notifier); + + printk(KERN_INFO "WDT driver for ALi M6117 v(" + ALI_WDT_VERSION ") initialising.\n"); + + return 0; +} + +static void __exit alim6117_exit(void) +{ + misc_deregister(&ali_miscdev); + unregister_reboot_notifier(&ali_notifier); + + ali_wdt_stop(); /* Stop the timer */ +} + +module_init(alim6117_init); +module_exit(alim6117_exit); + +MODULE_AUTHOR("Federico Bareilles "); +MODULE_DESCRIPTION("Driver for watchdog timer in ALi M6117 chip."); +MODULE_LICENSE("GPL"); +MODULE_SUPPORTED_DEVICE("watchdog"); + +EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/amd76x_pm.c linux.22-ac2/drivers/char/amd76x_pm.c --- linux.vanilla/drivers/char/amd76x_pm.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/amd76x_pm.c 2003-08-28 22:16:15.000000000 +0100 @@ -474,7 +474,7 @@ } #endif - +#ifdef AMD76X_POS /* * Activate sleep state via its ACPI register (PM1_CNT). */ @@ -489,8 +489,6 @@ outw(regshort, amd76x_pm_cfg.slp_reg); } - -#ifdef AMD76X_POS /* * Wrapper function to activate POS sleep state. */ @@ -577,16 +575,18 @@ int found; /* Find northbridge */ - found = pci_module_init(&amd_nb_driver); - if (found < 0) { + found = pci_register_driver(&amd_nb_driver); + if (found <= 0) { printk(KERN_ERR "amd76x_pm: Could not find northbridge\n"); + pci_unregister_driver(&amd_nb_driver); return 1; } /* Find southbridge */ - found = pci_module_init(&amd_sb_driver); - if (found < 0) { + found = pci_register_driver(&amd_sb_driver); + if (found <= 0) { printk(KERN_ERR "amd76x_pm: Could not find southbridge\n"); + pci_unregister_driver(&amd_sb_driver); pci_unregister_driver(&amd_nb_driver); return 1; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/amd7xx_tco.c linux.22-ac2/drivers/char/amd7xx_tco.c --- linux.vanilla/drivers/char/amd7xx_tco.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/amd7xx_tco.c 2003-07-31 14:36:31.000000000 +0100 @@ -1,6 +1,6 @@ /* * AMD 766/768 TCO Timer Driver - * (c) Copyright 2002 Zwane Mwaikambo + * (c) Copyright 2002 Zwane Mwaikambo * All Rights Reserved. * * Parts from; @@ -34,35 +34,48 @@ #include #include -#define AMDTCO_MODULE_VER "build 20020601" +#define AMDTCO_MODULE_VER "build 20021116" #define AMDTCO_MODULE_NAME "amd7xx_tco" #define PFX AMDTCO_MODULE_NAME ": " -#define MAX_TIMEOUT 38 /* max of 38 seconds */ +#define MAX_TIMEOUT 38 /* max of 38 seconds, although the system will only + * reset itself after the second timeout */ /* pmbase registers */ -#define GLOBAL_SMI_REG 0x2a -#define TCO_EN (1 << 1) /* bit 1 in global SMI register */ #define TCO_RELOAD_REG 0x40 /* bits 0-5 are current count, 6-7 are reserved */ #define TCO_INITVAL_REG 0x41 /* bits 0-5 are value to load, 6-7 are reserved */ #define TCO_TIMEOUT_MASK 0x3f +#define TCO_STATUS1_REG 0x44 #define TCO_STATUS2_REG 0x46 #define NDTO_STS2 (1 << 1) /* we're interested in the second timeout */ #define BOOT_STS (1 << 2) /* will be set if NDTO_STS2 was set before reboot */ #define TCO_CTRL1_REG 0x48 #define TCO_HALT (1 << 11) +#define NO_REBOOT (1 << 10) /* in DevB:3x48 */ -static char banner[] __initdata = KERN_INFO PFX AMDTCO_MODULE_VER; +static char banner[] __initdata = KERN_INFO PFX AMDTCO_MODULE_VER "\n"; static int timeout = 38; static u32 pmbase; /* PMxx I/O base */ static struct pci_dev *dev; static struct semaphore open_sem; -spinlock_t amdtco_lock; /* only for device access */ +static spinlock_t amdtco_lock; /* only for device access */ static int expect_close = 0; MODULE_PARM(timeout, "i"); MODULE_PARM_DESC(timeout, "range is 0-38 seconds, default is 38"); +static inline u8 seconds_to_ticks(int seconds) +{ + /* the internal timer is stored as ticks which decrement + * every 0.6 seconds */ + return (seconds * 10) / 6; +} + +static inline int ticks_to_seconds(u8 ticks) +{ + return (ticks * 6) / 10; +} + static inline int amdtco_status(void) { u16 reg; @@ -81,28 +94,19 @@ static inline void amdtco_ping(void) { - u8 reg; - - spin_lock(&amdtco_lock); - reg = inb(pmbase+TCO_RELOAD_REG); - outb(1 | reg, pmbase+TCO_RELOAD_REG); - spin_unlock(&amdtco_lock); + outb(1, pmbase+TCO_RELOAD_REG); } static inline int amdtco_gettimeout(void) { - return inb(TCO_RELOAD_REG) & TCO_TIMEOUT_MASK; + u8 reg = inb(pmbase+TCO_RELOAD_REG) & TCO_TIMEOUT_MASK; + return ticks_to_seconds(reg); } static inline void amdtco_settimeout(unsigned int timeout) { - u8 reg; - - spin_lock(&amdtco_lock); - reg = inb(pmbase+TCO_INITVAL_REG); - reg |= timeout & TCO_TIMEOUT_MASK; + u8 reg = seconds_to_ticks(timeout) & TCO_TIMEOUT_MASK; outb(reg, pmbase+TCO_INITVAL_REG); - spin_unlock(&amdtco_lock); } static inline void amdtco_global_enable(void) @@ -110,9 +114,12 @@ u16 reg; spin_lock(&amdtco_lock); - reg = inw(pmbase+GLOBAL_SMI_REG); - reg |= TCO_EN; - outw(reg, pmbase+GLOBAL_SMI_REG); + + /* clear NO_REBOOT on DevB:3x48 p97 */ + pci_read_config_word(dev, 0x48, ®); + reg &= ~NO_REBOOT; + pci_write_config_word(dev, 0x48, reg); + spin_unlock(&amdtco_lock); } @@ -150,10 +157,12 @@ if (timeout > MAX_TIMEOUT) timeout = MAX_TIMEOUT; + amdtco_disable(); amdtco_settimeout(timeout); amdtco_global_enable(); + amdtco_enable(); amdtco_ping(); - printk(KERN_INFO PFX "Watchdog enabled, timeout = %d/%d seconds", + printk(KERN_INFO PFX "Watchdog enabled, timeout = %ds of %ds\n", amdtco_gettimeout(), timeout); return 0; @@ -202,7 +211,7 @@ case WDIOC_GETTIMEOUT: return put_user(amdtco_gettimeout(), (int *)arg); - + case WDIOC_SETOPTIONS: if (copy_from_user(&tmp, (int *)arg, sizeof tmp)) return -EFAULT; @@ -225,7 +234,7 @@ printk(KERN_INFO PFX "Watchdog disabled\n"); } else { amdtco_ping(); - printk(KERN_CRIT PFX "Unexpected close!, timeout in %d seconds)\n", timeout); + printk(KERN_CRIT PFX "Unexpected close!, timeout in %d seconds\n", timeout); } up(&open_sem); @@ -253,10 +262,9 @@ } #endif amdtco_ping(); - return len; } - return 0; + return len; } @@ -291,7 +299,6 @@ }; static struct pci_device_id amdtco_pci_tbl[] __initdata = { - /* AMD 766 PCI_IDs here */ { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; @@ -361,6 +368,9 @@ if (ints[0] > 0) timeout = ints[1]; + if (!timeout || timeout > 38) + timeout = MAX_TIMEOUT; + return 1; } @@ -370,7 +380,7 @@ module_init(amdtco_init); module_exit(amdtco_exit); -MODULE_AUTHOR("Zwane Mwaikambo "); +MODULE_AUTHOR("Zwane Mwaikambo "); MODULE_DESCRIPTION("AMD 766/768 TCO Timer Driver"); MODULE_LICENSE("GPL"); EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/amiserial.c linux.22-ac2/drivers/char/amiserial.c --- linux.vanilla/drivers/char/amiserial.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/amiserial.c 2003-06-29 16:10:02.000000000 +0100 @@ -1544,7 +1544,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cd1865/cd1865.c linux.22-ac2/drivers/char/cd1865/cd1865.c --- linux.vanilla/drivers/char/cd1865/cd1865.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/cd1865/cd1865.c 2003-06-29 16:10:04.000000000 +0100 @@ -0,0 +1,2913 @@ +/* -*- linux-c -*- */ +/* + * This code was modified from + * specialix.c -- specialix IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#define VERSION "2.11" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cdsiolx.h" +#include "../cd1865.h" /* will move all files up one level */ +#include "siolx.h" +#include "plx9060.h" + +#define SIOLX_NORMAL_MAJOR 254 /* One is needed */ +#define SIOLX_ID 0x10 +#define CD186x_MSMR 0x61 /* modem/timer iack */ +#define CD186x_TSMR 0x62 /* tx iack */ +#define CD186x_RSMR 0x63 /* rx iack */ + +/* Configurable options: */ + +/* Am I paranoid or not ? ;-) */ +#define SIOLX_PARANOIA_CHECK + +/* Do I trust the IRQ from the card? (enabeling it doesn't seem to help) + When the IRQ routine leaves the chip in a state that is keeps on + requiring attention, the timer doesn't help either. */ +#undef SIOLX_TIMER +/* + * The following defines are mostly for testing purposes. But if you need + * some nice reporting in your syslog, you can define them also. + */ +#undef SIOLX_REPORT_FIFO +#undef SIOLX_REPORT_OVERRUN + +#ifdef CONFIG_SIOLX_RTSCTS /* may need to set this */ +#define SIOLX_CRTSCTS(bla) 1 +#else +#define SIOLX_CRTSCTS(tty) C_CRTSCTS(tty) +#endif + +/* Used to be outb (0xff, 0x80); */ +#define short_pause() udelay (1) + +#define SIOLX_LEGAL_FLAGS \ + (ASYNC_HUP_NOTIFY | ASYNC_SAK | ASYNC_SPLIT_TERMIOS | \ + ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \ + ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP) + +#ifndef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif + +DECLARE_TASK_QUEUE(tq_siolx); + +#undef RS_EVENT_WRITE_WAKEUP +#define RS_EVENT_WRITE_WAKEUP 0 + +#define SIOLX_TYPE_NORMAL 1 +#define SIOLX_TYPE_CALLOUT 2 + +#define BD_8000P 1 +#define BD_16000P 2 +#define BD_8000C 3 +#define BD_16000C 4 +#define BD_MAX BD_16000C + +static struct siolx_board *SiolxIrqRoot[SIOLX_NUMINTS]; + +static char *sio16_board_type[] = +{ + "unknown", + " 8000P ", + "16000P ", + " 8000C ", + "16000C " +}; +static struct tty_driver siolx_driver, siolx_callout_driver; +static int siolx_refcount; +static unsigned char * tmp_buf; +static DECLARE_MUTEX(tmp_buf_sem); +static unsigned long baud_table[] = +{ + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, + 9600, 19200, 38400, 57600, 115200, 0, +}; +static int siolx_debug = 0; /* turns on lots of */ + /* debugging messages*/ +static int siolx_major = SIOLX_NORMAL_MAJOR; +#ifdef MODULE +static int siolx_minorstart = 256; +#endif +static int siolx_vendor_id = PCI_VENDOR_ID_PLX; +static int siolx_device_id = PCI_DEVICE_ID_PLX_9060SD; +static int siolx_subsystem_vendor = AURASUBSYSTEM_VENDOR_ID; +static int siolx_subsystem_pci_device = AURASUBSYSTEM_MPASYNCPCI; +static int siolx_subsystem_cpci_device = AURASUBSYSTEM_MPASYNCcPCI; +static int siolx_bhindex = SIOLX_BH; /* if this softinterrupt slot is filled */ + +MODULE_PARM(siolx_vendor_id, "i"); +MODULE_PARM(siolx_device_id, "i"); +#ifdef MODULE +MODULE_PARM(siolx_minorstart, "i"); +#endif +MODULE_PARM(siolx_major, "i"); +MODULE_PARM(siolx_subsystem_vendor, "i"); +MODULE_PARM(siolx_subsystem_pci_device, "i"); +MODULE_PARM(siolx_subsystem_cpci_device, "i"); +MODULE_PARM(siolx_bhindex, "i"); + +static struct siolx_board *siolx_board_root; +static struct siolx_board *siolx_board_last; +static struct siolx_port *siolx_port_root; +static struct siolx_port *siolx_port_last; +static unsigned int NumSiolxPorts; +static struct tty_struct **siolx_table; /* make dynamic */ +static struct termios **siolx_termios; +static struct termios **siolx_termios_locked; +static int siolx_driver_registered; +static int siolx_callout_driver_registered; + +#ifdef SIOLX_TIMER +static struct timer_list missed_irq_timer; +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs); +#endif + +extern struct tty_driver *get_tty_driver(kdev_t device); + +static inline int port_No_by_chip (struct siolx_port const * port) +{ + return SIOLX_PORT(port->boardport); +} + +/* Describe the current board and port configuration */ + +static int siolx_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct siolx_port *port = siolx_port_root; + off_t begin = 0; + int len = 0; + unsigned int typeno; + char *revision = "$Revision: 1.11 $"; + + len += sprintf(page, "SIOLX Version %s. %s\n", VERSION, revision); + len += sprintf(page+len, "TTY MAJOR = %d, CUA MAJOR = %d.\n", + siolx_driver.major, siolx_callout_driver.major); + + for (port = siolx_port_root; port != NULL; port = port->next_by_global_list) + { + typeno = port->board->boardtype; + if(typeno > BD_MAX) + { + typeno = 0; + } + len += sprintf(page+len, + "%3.3d: bd %2.2d: %s: ch %d: pt %2.2d/%d: tp %4.4d%c: bs %2.2d: sl %2.2d: ir %2.2d: fl %c%c%c%c%c\n", + siolx_driver.minor_start + port->driverport, + port->board->boardnumber, + sio16_board_type[typeno], + port->board->chipnumber, + port->boardport, + port_No_by_chip(port), /* port relative to chip */ + port->board->chiptype, + port->board->chiprev, + port->board->pdev.bus->number, + PCI_SLOT(port->board->pdev.devfn), + port->board->irq, + (port->flags & ASYNC_INITIALIZED) ? 'I' : ' ', + (port->flags & ASYNC_CALLOUT_ACTIVE) ? 'D' : ' ', + (port->flags & ASYNC_NORMAL_ACTIVE) ? 'T' : ' ', + (port->flags & ASYNC_CLOSING) ? 'C' : ' ', + port->board->reario ? 'R' : ' '); + if (len+begin > off+count) + { + goto done; + } + if (len+begin < off) + { + begin += len; + len = 0; + } + } + *eof = 1; + done: + if (off >= len+begin) + { + return 0; + } + *start = page + (off-begin); + return ((count < begin+len-off) ? count : begin+len-off); +} + +#ifndef MODULE +static int GetMinorStart(void) /* minor start can be determined on fly when driver linked to kernel */ +{ + struct tty_driver *ttydriver; + int minor_start = 0; + kdev_t device; + + device = MKDEV(siolx_major, minor_start); + while(ttydriver = get_tty_driver(device), ttydriver != NULL) + { + minor_start += ttydriver->num; + device = MKDEV(TTY_MAJOR, minor_start); + } + return minor_start; + +} +#endif + +/* only once per board chain */ +void SiolxResetBoard(struct siolx_board * bp, struct pci_dev *pdev) +{ + register unsigned int regvalue; + unsigned char savedvalue; + /* + * Yuch. Here's the deal with the reset bits in the + * ECNTL register of the 9060SD. + * + * It appears that LCLRST resets the PLX local configuration + * registers (not the PCI configuration registers) to their + * default values. We need to use LCLRST because it + * is the command (I think) that pulls the local reset + * line on the local bus side of the 9060SD. + * + * Unfortunately, by resetting the PLX local configuration + * registers, we can't use the damn board. So we must + * reinitialize them. The easiest way to do that is to run + * the LDREG command. Unfortunately, it has the side effect + * of reinitializing the PCI configuration registers. It seems, + * however that only the value stowed in ILINE gets choked; all + * of the others seem to be properly preserved. + * + * So, what the code does now is to get a copy of ILINE by + * hand, and then restore it after reloading the registers. + */ + + bp->pdev = *pdev; + bp->plx_vaddr = (unsigned long) ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if(bp->plx_vaddr) + { + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + } +} + +void SiolxShutdownBoard(struct siolx_board * bp) +{ + register unsigned int regvalue; + unsigned char savedvalue; + struct pci_dev *pdev; + + if(bp->chipnumber == 0) /* only shutdown first in a chain */ + { + pdev = &bp->pdev; + + writel(0, bp->plx_vaddr + PLX_ICSR); + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + } +} + +static inline int siolx_paranoia_check(struct siolx_port const * port, + kdev_t device, const char *routine) +{ +#ifdef SIOLX_PARANOIA_CHECK + static const char *badmagic = + KERN_ERR "siolx: Warning: bad siolx port magic number for device %s in %s\n"; + static const char *badinfo = + KERN_ERR "siolx: Warning: null siolx port for device %s in %s\n"; + + if (!port) + { + printk(badinfo, kdevname(device), routine); + return 1; + } + if (port->magic != SIOLX_MAGIC) + { + printk(badmagic, kdevname(device), routine); + return 1; + } +#endif + return 0; +} + + +/* + * + * Service functions for siolx Aurora Asynchronous Adapter driver. + * + */ + +/* Get board number from pointer */ +static inline int board_No (struct siolx_board * bp) +{ + return bp->boardnumber; /* note same for all chips/boards in a chain */ +} + + +/* Get port number from pointer */ +static inline int port_No (struct siolx_port const * port) +{ + return port->driverport; /* offset from minor start */ +} + +/* Get pointer to board from pointer to port */ +static inline struct siolx_board * port_Board(struct siolx_port const * port) +{ + return port->board; /* same for ports on both chips on a board */ +} + + +/* Input Byte from CL CD186x register */ +static inline unsigned char siolx_in(struct siolx_board * bp, unsigned short reg) +{ + return readb (bp->base + reg); +} + + +/* Output Byte to CL CD186x register */ +static inline void siolx_out(struct siolx_board * bp, unsigned short reg, + unsigned char val) +{ + writeb(val, bp->base + reg); +} + + +/* Wait for Channel Command Register ready */ +static int siolx_wait_CCR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (!siolx_in(bp, CD186x_CCR)) + { + return 0; + } + } + printk(KERN_ERR "siolx:board %d: timeout waiting for CCR.\n", board_No(bp)); + return -1; +} + +/* Wait for ready */ +static int siolx_wait_GIVR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (siolx_in(bp, CD186x_GIVR) == (unsigned char) 0xff) + { + return 0; + } + } + printk(KERN_ERR "siolx: board %d: timeout waiting for GIVR.\n", board_No(bp)); + return -1; +} + +static inline void siolx_release_io_range(struct siolx_board * bp) +{ + if((bp->chipnumber == 0) && bp->vaddr) /* only release from first board in a chain */ + { + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + } +} + +/* Must be called with enabled interrupts */ + +static inline void siolx_long_delay(unsigned long delay) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(delay); +} + +/* Reset and setup CD186x chip */ +static int siolx_init_CD186x(struct siolx_board * bp) +{ + unsigned long flags; + int scaler; + int rv = 1; + int rev; + int chip; + + save_flags(flags); /* not sure of need to turn off ints */ + cli(); + if(siolx_wait_CCR(bp)) + { + restore_flags(flags); + return 0; /* Wait for CCR ready */ + } + siolx_out(bp, CD186x_CAR, 0); + siolx_out(bp, CD186x_GIVR, 0); + siolx_out(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */ + if(siolx_wait_GIVR(bp)) + { + restore_flags(flags); + return 0; + } + sti(); + siolx_long_delay(HZ/20); /* Delay 0.05 sec */ + cli(); + siolx_out(bp, CD186x_GIVR, SIOLX_ID | (bp->chipnumber ? 0x80 : 0)); /* Set ID for this chip */ +#if 0 + siolx_out(bp, CD186x_GICR, 0); /* Clear all bits */ +#endif + scaler = SIOLX_OSCFREQ/1000; + siolx_out(bp, CD186x_PPRH, scaler >> 8); + siolx_out(bp, CD186x_PPRL, scaler & 0xff); + + /* Chip revcode pkgtype + GFRCR SRCR bit 7 + CD180 rev B 0x81 0 + CD180 rev C 0x82 0 + CD1864 rev A 0x82 1 + CD1865 rev A 0x83 1 -- Do not use!!! Does not work. + CD1865 rev B 0x84 1 + -- Thanks to Gwen Wang, Cirrus Logic. + */ + + switch (siolx_in(bp, CD186x_GFRCR)) + { + case 0x82: + chip = 1864; + rev='A'; + break; + case 0x83: + chip = 1865; + rev='A'; + break; + case 0x84: + chip = 1865; + rev='B'; + break; + case 0x85: + chip = 1865; + rev='C'; + break; /* Does not exist at this time */ + default: + chip=-1; + rev='x'; + break; + } + +#if SIOLX_DEBUG > 2 + printk (KERN_DEBUG " GFCR = 0x%02x\n", siolx_in(bp, CD186x_GFRCR) ); +#endif + + siolx_out(bp, CD186x_MSMR, CD186x_MRAR); /* load up match regs with address regs */ + siolx_out(bp, CD186x_TSMR, CD186x_TRAR); + siolx_out(bp, CD186x_RSMR, CD186x_RRAR); + +#if 0 + DEBUGPRINT((KERN_ALERT "match reg values are msmr %x, tsmr %x, rsmr %x.\n", + siolx_in(bp, CD186x_MSMR), + siolx_in(bp, CD186x_TSMR), + siolx_in(bp, CD186x_RSMR))); +#endif + + siolx_out(bp, CD186x_SRCR, SRCR_AUTOPRI | SRCR_GLOBPRI | SRCR_REGACKEN); + /* Setting up prescaler. We need 4 ticks per 1 ms */ + + printk(KERN_INFO"siolx: CD%4.4d%c detected at 0x%lx, IRQ %d, on Aurora asynchronous adapter board %d, chip number %d.\n", + chip, rev, bp->base, bp->irq, board_No(bp), bp->chipnumber); + + bp->chiptype = chip; + bp->chiprev = rev; + + restore_flags(flags); + return rv; +} + + +#ifdef SIOLX_TIMER +void missed_irq (unsigned long data) +{ + if (siolx_in ((struct siolx_board *)data, CD186x_SRSR) & + (SRSR_RREQint | + SRSR_TREQint | + SRSR_MREQint)) + { + printk (KERN_INFO "Missed interrupt... Calling int from timer. \n"); + siolx_interrupt (((struct siolx_board *)data)->irq, + NULL, NULL); + } + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +} +#endif + +/* Main probing routine, also sets irq. */ +static int siolx_probe(struct siolx_board *bp) +{ + unsigned char val1, val2; + + /* Are the I/O ports here ? */ + siolx_out(bp, CD186x_PPRL, 0x5a); + short_pause (); + val1 = siolx_in(bp, CD186x_PPRL); + + siolx_out(bp, CD186x_PPRL, 0xa5); + short_pause (); + val2 = siolx_in(bp, CD186x_PPRL); + + if ((val1 != 0x5a) || (val2 != 0xa5)) + { + printk(KERN_INFO + "siolx: cd serial chip not found at base %ld.\n", + bp->base); + return 1; + } + + /* Reset CD186x */ + if (!siolx_init_CD186x(bp)) + { + return -EIO; + } + +#ifdef SIOLX_TIMER + init_timer (&missed_irq_timer); + missed_irq_timer.function = missed_irq; + missed_irq_timer.data = (unsigned long) bp; + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +#endif + return 0; +} + +/* + * + * Interrupt processing routines. + * */ + +static inline void siolx_mark_event(struct siolx_port * port, int event) +{ + /* + * I'm not quite happy with current scheme all serial + * drivers use their own BH routine. + * It seems this easily can be done with one BH routine + * serving for all serial drivers. + * For now I must introduce another one - SIOLX_BH. + * Still hope this will be changed in near future. + * -- Dmitry. + */ + /* I use a module parameter that can be set at module + * load time so that this driver can be downloaded into + * a kernel where the value of SIOLX_BX has been allocated + * to something else. This kludge was not necessary + * in the ASLX driver because AURORA_BH had already + * been allocated for the sparc and there was no + * similar driver for x86 while the ASLX driver probably + * will not work for the SPARC and is not guaranteed to + * do so (at some point I should clean this situation up) -- Joachim*/ + set_bit(event, &port->event); + queue_task(&port->tqueue, &tq_siolx); + mark_bh(siolx_bhindex); +} + +static inline struct siolx_port * siolx_get_port(struct siolx_board * bp, + unsigned char const * what) +{ + unsigned char channel; + struct siolx_port * port; + + channel = siolx_in(bp, CD186x_GICR) >> GICR_CHAN_OFF; + if (channel < CD186x_NCH) + { + port = bp->portlist; + while(port) + { + if(channel == 0) + { + break; + } + port = port->next_by_board; + --channel; + } + + if(port && (port->flags & ASYNC_INITIALIZED)) /* port should be opened */ + { + return port; + } + } + printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n", + board_No(bp), what, channel); + return NULL; +} + + +static inline void siolx_receive_exc(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char status; + unsigned char ch; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n", + board_No(bp), port_No(port)); + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + status = siolx_in(bp, CD186x_RCSR); + if (status & RCSR_OE) + { + port->overrun++; +#if SIOLX_DEBUG + printk(KERN_DEBUG "sx%d: port %d: Overrun. Total %ld overruns.\n", + board_No(bp), port_No(port), port->overrun); +#endif + } + status &= port->mark_mask; +#else + status = siolx_in(bp, CD186x_RCSR) & port->mark_mask; +#endif + ch = siolx_in(bp, CD186x_RDR); + if (!status) + { + return; + } + if (status & RCSR_TOUT) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Receiver timeout. Hardware problems ?\n", + board_No(bp), bp->chipnumber, port_No(port)); + return; + + } + else if (status & RCSR_BREAK) + { +#ifdef SIOLX_DEBUG + printk(KERN_DEBUG "siolx: board %d: chip %d: port %d: Handling break...\n", + board_No(bp), bp->chipnumber, port_No(port)); +#endif + *tty->flip.flag_buf_ptr++ = TTY_BREAK; + if (port->flags & ASYNC_SAK) + { + do_SAK(tty); + } + + } + else if (status & RCSR_PE) + { + *tty->flip.flag_buf_ptr++ = TTY_PARITY; + } + else if (status & RCSR_FE) + { + *tty->flip.flag_buf_ptr++ = TTY_FRAME; + } + + else if (status & RCSR_OE) + { + *tty->flip.flag_buf_ptr++ = TTY_OVERRUN; + } + + else + { + *tty->flip.flag_buf_ptr++ = 0; + } + + *tty->flip.char_buf_ptr++ = ch; + tty->flip.count++; + queue_task(&tty->flip.tqueue, &tq_timer); +} + + +static inline void siolx_receive(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + + count = siolx_in(bp, CD186x_RDCR); + +#ifdef SIOLX_REPORT_FIFO + port->hits[count > 8 ? 9 : count]++; +#endif + + while (count--) + { + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Working around flip buffer overflow.\n", + board_No(bp), bp->chipnumber, port_No(port)); + break; + } + *tty->flip.char_buf_ptr++ = siolx_in(bp, CD186x_RDR); + *tty->flip.flag_buf_ptr++ = 0; + tty->flip.count++; + } + queue_task(&tty->flip.tqueue, &tq_timer); +} + +static inline void siolx_transmit(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Transmit"))) + return; + + tty = port->tty; + + if(port->IER & IER_TXEMPTY) + { + /* FIFO drained */ +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXEMPTY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if(((port->xmit_cnt <= 0) && !port->break_length) || + tty->stopped || tty->hw_stopped) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if (port->break_length) + { + if (port->break_length > 0) + { + if (port->COR2 & COR2_ETC) + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_SBRK); + port->COR2 &= ~COR2_ETC; + } + count = MIN(port->break_length, 0xff); + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_DELAY); + siolx_out(bp, CD186x_TDR, count); + if (!(port->break_length -= count)) + { + port->break_length--; + } + } + else + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_EBRK); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + port->break_length = 0; + } + return; + } + + count = CD186x_NFIFO; + do + { + siolx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]); + port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE-1); + if (--port->xmit_cnt <= 0) + { + break; + } + } while (--count > 0); + + if (port->xmit_cnt <= 0) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + } + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } +} + + +static inline void siolx_check_modem(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char mcr; + +#ifdef SIOLX_DEBUG + printk (KERN_DEBUG "Modem intr. "); +#endif + if (!(port = siolx_get_port(bp, "Modem"))) + { + return; + } + + tty = port->tty; + + mcr = siolx_in(bp, CD186x_MCR); + DEBUGPRINT((KERN_ALERT "mcr = %02x.\n", mcr)); + + if ((mcr & MCR_CDCHG)) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "CD just changed... ")); +#endif + if (siolx_in(bp, CD186x_MSVR) & MSVR_CD) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Waking up guys in open.\n")); +#endif + wake_up_interruptible(&port->open_wait); /* note no linefeed in previous print */ + } + else if (!((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_CALLOUT_NOHUP))) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Sending HUP.\n")); /* note no linefeed in previous print */ +#endif + MOD_INC_USE_COUNT; + if (schedule_task(&port->tqueue_hangup) == 0) + { + MOD_DEC_USE_COUNT; + } + } + else + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(("Don't need to send HUP.\n")); /* note no linefeed in previous print */ +#endif + } + } + +#ifdef SIOLX_BRAIN_DAMAGED_CTS + if (mcr & MCR_CTSCHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_CTS) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } + if (mcr & MCR_DSSXHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_DSR) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } +#endif /* SIOLX_BRAIN_DAMAGED_CTS */ + + /* Clear change bits */ + siolx_out(bp, CD186x_MCR, 0); +} + +/* The main interrupt processing routine */ +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + unsigned char status; + unsigned char rcsr; + struct siolx_board *bp; + + if((irq < 0) || (irq >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt value %i.\n", irq); + return; + } + /* walk through all the cards on the interrupt that occurred. */ + for(bp = SiolxIrqRoot[irq]; bp != NULL; bp = bp->next_by_interrupt) + + { + while((readl(bp->intstatus) & PLX_ICSRINTACTIVE) != 0) /* work on on board */ + { + status = siolx_in(bp, CD186x_SRSR); + + if(status & SRSR_RREQint) + { + siolx_in(bp, CD186x_RRAR); + rcsr = siolx_in(bp, CD186x_RCSR); + if(rcsr == 0) + { + siolx_receive(bp); + } + else + { + siolx_receive_exc(bp); + } + } + else if (status & SRSR_TREQint) + { + siolx_in(bp, CD186x_TRAR); + siolx_transmit(bp); + } + else if (status & SRSR_MREQint) + { + siolx_in(bp, CD186x_MRAR); + siolx_check_modem(bp); + } + siolx_out(bp, CD186x_EOIR, 1); /* acknowledge the interrupt */ + bp = bp->next_by_chain; /* go to next chip on card -- maybe this one */ + } /* it does not matter if bp changes all in a chain have same next by interrupt */ + } +} + + +/* + * Setting up port characteristics. + * Must be called with disabled interrupts + */ +static void siolx_change_speed(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + unsigned long baud; + long tmp; + unsigned char cor1 = 0, cor3 = 0; + unsigned char mcor1 = 0, mcor2 = 0; + static int again; + + tty = port->tty; + + if(!tty || !tty->termios) + { + return; + } + + port->IER = 0; + port->COR2 = 0; + /* Select port on the board */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + /* The Siolx board doens't implement the RTS lines. + They are used to set the IRQ level. Don't touch them. */ + /* Must check how to apply these to sio16 boards */ + if (SIOLX_CRTSCTS(tty)) + { + port->MSVR = (MSVR_DTR | (siolx_in(bp, CD186x_MSVR) & MSVR_RTS)); + } + else + { + port->MSVR = (siolx_in(bp, CD186x_MSVR) & MSVR_RTS); + } +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_DEBUG "siolx: got MSVR=%02x.\n", port->MSVR)); +#endif + baud = C_BAUD(tty); + + if (baud & CBAUDEX) + { + baud &= ~CBAUDEX; + if((baud < 1) || (baud > 2)) + { + port->tty->termios->c_cflag &= ~CBAUDEX; + } + else + { + baud += 15; + } + } + if (baud == 15) + { + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + { + baud ++; + } + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + { + baud += 2; + } + } + + + if (!baud_table[baud]) + { + /* Drop DTR & exit */ +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "siolx: Dropping DTR... Hmm....\n")); +#endif + if (!SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~ MSVR_DTR; + siolx_out(bp, CD186x_MSVR, port->MSVR ); + } +#ifdef DEBUG_SIOLX + else + { + DEBUGPRINT((KERN_DEBUG "siolx: Can't drop DTR: no DTR.\n")); + } +#endif + return; + } + else + { + /* Set DTR on */ + if (!SIOLX_CRTSCTS (tty)) + { + port ->MSVR |= MSVR_DTR; + } + } + + /* + * Now we must calculate some speed depended things + */ + + /* Set baud rate for port */ + tmp = port->custom_divisor ; + if(tmp) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Using custom baud rate divisor %ld. \n" + "This is an untested option, please be carefull.\n", + board_No(bp), + bp->chipnumber, + port_No(port), tmp)); + } + else + { + tmp = (((SIOLX_OSCFREQ + baud_table[baud]/2) / baud_table[baud] + + CD186x_TPC/2) / CD186x_TPC); + } + + if ((tmp < 0x10) && time_before(again, jiffies)) + { + again = jiffies + HZ * 60; + /* Page 48 of version 2.0 of the CL-CD1865 databook */ + if (tmp >= 12) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d:Baud rate divisor is %ld. \n" + "Performance degradation is possible.\n" + "Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, + port_No (port), tmp)); + } else + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Baud rate divisor is %ld. \n" + " Warning: overstressing Cirrus chip. " + " This might not work.\n" + " Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, port_No (port), tmp)); + } + } + + siolx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_RBPRL, tmp & 0xff); + siolx_out(bp, CD186x_TBPRL, tmp & 0xff); + + if (port->custom_divisor) + { + baud = (SIOLX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor; + baud = ( baud + 5 ) / 10; + } else + { + baud = (baud_table[baud] + 5) / 10; /* Estimated CPS */ + } + + /* Two timer ticks seems enough to wakeup something like SLIP driver */ + tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO; + port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? + SERIAL_XMIT_SIZE - 1 : tmp); + + /* Receiver timeout will be transmission time for 1.5 chars */ + tmp = (SIOLX_TPS + SIOLX_TPS/2 + baud/2) / baud; + tmp = (tmp > 0xff) ? 0xff : tmp; + siolx_out(bp, CD186x_RTPR, tmp); + + switch (C_CSIZE(tty)) + { + case CS5: + cor1 |= COR1_5BITS; + break; + case CS6: + cor1 |= COR1_6BITS; + break; + case CS7: + cor1 |= COR1_7BITS; + break; + case CS8: + cor1 |= COR1_8BITS; + break; + } + + if (C_CSTOPB(tty)) + { + cor1 |= COR1_2SB; + } + + cor1 |= COR1_IGNORE; + if (C_PARENB(tty)) + { + cor1 |= COR1_NORMPAR; + if (C_PARODD(tty)) + { + cor1 |= COR1_ODDP; + } + if (I_INPCK(tty)) + { + cor1 &= ~COR1_IGNORE; + } + } + /* Set marking of some errors */ + port->mark_mask = RCSR_OE | RCSR_TOUT; + if (I_INPCK(tty)) + { + port->mark_mask |= RCSR_FE | RCSR_PE; + } + if (I_BRKINT(tty) || I_PARMRK(tty)) + { + port->mark_mask |= RCSR_BREAK; + } + if (I_IGNPAR(tty)) + { + port->mark_mask &= ~(RCSR_FE | RCSR_PE); + } + if (I_IGNBRK(tty)) + { + port->mark_mask &= ~RCSR_BREAK; + if (I_IGNPAR(tty)) + { + /* Real raw mode. Ignore all */ + port->mark_mask &= ~RCSR_OE; + } + } + /* Enable Hardware Flow Control */ + if (C_CRTSCTS(tty)) + { +#ifdef SIOLX_BRAIN_DAMAGED_CTS + port->IER |= IER_DSR | IER_CTS; + mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; + mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; + tty->hw_stopped = !(siolx_in(bp, CD186x_MSVR) & (MSVR_CTS|MSVR_DSR)); +#else + port->COR2 |= COR2_CTSAE; +#endif + } + /* Enable Software Flow Control. FIXME: I'm not sure about this */ + /* Some people reported that it works, but I still doubt it */ + if (I_IXON(tty)) + { + port->COR2 |= COR2_TXIBE; + cor3 |= (COR3_FCT | COR3_SCDE); + if (I_IXANY(tty)) + { + port->COR2 |= COR2_IXM; + } + siolx_out(bp, CD186x_SCHR1, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR2, STOP_CHAR(tty)); + siolx_out(bp, CD186x_SCHR3, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR4, STOP_CHAR(tty)); + } + if (!C_CLOCAL(tty)) + { + /* Enable CD check */ + port->IER |= IER_CD; + mcor1 |= MCOR1_CDZD; + mcor2 |= MCOR2_CDOD; + } + + if (C_CREAD(tty)) + { + /* Enable receiver */ + port->IER |= IER_RXD; + } + + /* Set input FIFO size (1-8 bytes) */ + cor3 |= SIOLX_RXFIFO; + /* Setting up CD186x channel registers */ + siolx_out(bp, CD186x_COR1, cor1); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_COR3, cor3); + /* Make CD186x know about registers change */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3); + /* Setting up modem option registers */ +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_ALERT "siolx: Mcor1 = %02x, mcor2 = %02x.\n", mcor1, mcor2)); +#endif + siolx_out(bp, CD186x_MCOR1, mcor1); + siolx_out(bp, CD186x_MCOR2, mcor2); + /* Enable CD186x transmitter & receiver */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_TXEN | CCR_RXEN); + /* Enable interrupts */ + siolx_out(bp, CD186x_IER, port->IER); + /* And finally set the modem lines... */ + siolx_out(bp, CD186x_MSVR, port->MSVR); +} + + +/* Must be called with interrupts enabled */ +static int siolx_setup_port(struct siolx_board *bp, struct siolx_port *port) +{ + unsigned long flags; + + if (port->flags & ASYNC_INITIALIZED) + { + return 0; + } + + if (!port->xmit_buf) + { + /* We may sleep in get_free_page() */ + unsigned long tmp; + + if (!(tmp = get_free_page(GFP_KERNEL))) + { + return -ENOMEM; + } + + if (port->xmit_buf) + { + free_page(tmp); + return -ERESTARTSYS; + } + port->xmit_buf = (unsigned char *) tmp; + } + + save_flags(flags); cli(); + + if (port->tty) + { + clear_bit(TTY_IO_ERROR, &port->tty->flags); + } + + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + siolx_change_speed(bp, port); + port->flags |= ASYNC_INITIALIZED; + + restore_flags(flags); + return 0; +} + + +/* Must be called with interrupts disabled */ +static void siolx_shutdown_port(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + + if (!(port->flags & ASYNC_INITIALIZED)) + { + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Total %ld overruns were detected.\n", + board_No(bp), bp->chipnumber, port_No(port), port->overrun)); +#endif +#ifdef SIOLX_REPORT_FIFO + { + int i; + + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: FIFO hits [ ", + board_No(bp), bp->chipnumber, port_No(port))); + for (i = 0; i < 10; i++) + { + DEBUGPRINT(("%ld ", port->hits[i])); + } + DEBUGPRINT(("].\n")); + } +#endif + if (port->xmit_buf) + { + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + + /* Select port */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + if (!(tty = port->tty) || C_HUPCL(tty)) + { + /* Drop DTR */ + siolx_out(bp, CD186x_MSVDTR, 0); + } + + /* Reset port */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SOFTRESET); + /* Disable all interrupts from this port */ + port->IER = 0; + siolx_out(bp, CD186x_IER, port->IER); + + if (tty) + { + set_bit(TTY_IO_ERROR, &tty->flags); + } + port->flags &= ~ASYNC_INITIALIZED; + + /* + * If this is the last opened port on the board + * shutdown whole board + */ + MOD_DEC_USE_COUNT; +} + + +static int block_til_ready(struct tty_struct *tty, struct file * filp, + struct siolx_port *port) +{ + DECLARE_WAITQUEUE(wait, current); + struct siolx_board *bp = port_Board(port); + int retval; + int do_clocal = 0; + int CD; + + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) + { + interruptible_sleep_on(&port->close_wait); + if (port->flags & ASYNC_HUP_NOTIFY) + { + return -EAGAIN; + } + else + { + return -ERESTARTSYS; + } + } + + /* + * If this is a callout device, then just make sure the normal + * device isn't being used. + */ + if (tty->driver.subtype == SIOLX_TYPE_CALLOUT) + { + if (port->flags & ASYNC_NORMAL_ACTIVE) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_SESSION_LOCKOUT) && + (port->session != current->session)) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_PGRP_LOCKOUT) && + (port->pgrp != current->pgrp)) + { + return -EBUSY; + } + port->flags |= ASYNC_CALLOUT_ACTIVE; + return 0; + } + + /* + * If non-blocking mode is set, or the port is not enabled, + * then make the check up front and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) + { + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + return -EBUSY; + } + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + if (port->normal_termios.c_cflag & CLOCAL) + { + do_clocal = 1; + } + } + else + { + if (C_CLOCAL(tty)) + { + do_clocal = 1; + } + } + + /* + * Block waiting for the carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, info->count is dropped by one, so that + * rs_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&port->open_wait, &wait); + cli(); + if (!tty_hung_up_p(filp)) + { + port->count--; + } + sti(); + port->blocked_open++; + while (1) + { + cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + CD = siolx_in(bp, CD186x_MSVR) & MSVR_CD; + if (!(port->flags & ASYNC_CALLOUT_ACTIVE)) + { + if (SIOLX_CRTSCTS (tty)) + { + /* Activate RTS */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + else + { + /* Activate DTR */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + } + sti(); + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) + { + if (port->flags & ASYNC_HUP_NOTIFY) + { + retval = -EAGAIN; + } + else + { + retval = -ERESTARTSYS; + } + break; + } + if (!(port->flags & ASYNC_CALLOUT_ACTIVE) && + !(port->flags & ASYNC_CLOSING) && + (do_clocal || CD)) + { + break; + } + if (signal_pending(current)) + { + retval = -ERESTARTSYS; + break; + } + schedule(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&port->open_wait, &wait); + if (!tty_hung_up_p(filp)) + { + port->count++; + } + port->blocked_open--; + if (retval) + { + return retval; + } + + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; +} + +static inline struct siolx_port *siolx_portstruc(register int line) +{ + register struct siolx_port *pp; + + line -= siolx_driver.minor_start; + for(pp = siolx_port_root; (pp != NULL) && (line >= 0); --line, pp = pp->next_by_global_list) + { + if(line == 0) + { + return pp; + } + } + return NULL; +} + + +static int siolx_open(struct tty_struct * tty, struct file * filp) +{ + int error; + struct siolx_port * port; + struct siolx_board * bp; + unsigned long flags; + + port = siolx_portstruc(MINOR(tty->device)); + + if(port == NULL) + { + return -ENODEV; + } + bp = port->board; + if(bp == NULL) + { + return -ENODEV; + } + +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Board = %d, bp = %p, port = %p, portno = %d.\n", + bp->boardnumber, bp, port, siolx_portstruc(MINOR(tty->device))); +#endif + + if (siolx_paranoia_check(port, tty->device, "siolx_open")) + return -ENODEV; + + MOD_INC_USE_COUNT; + + port->count++; + tty->driver_data = port; + port->tty = tty; + + if ((error = siolx_setup_port(bp, port))) + return error; + + if ((error = block_til_ready(tty, filp, port))) + return error; + + if ((port->count == 1) && (port->flags & ASYNC_SPLIT_TERMIOS)) { + if (tty->driver.subtype == SIOLX_TYPE_NORMAL) + *tty->termios = port->normal_termios; + else + *tty->termios = port->callout_termios; + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + + port->session = current->session; + port->pgrp = current->pgrp; + return 0; +} + + +static void siolx_close(struct tty_struct * tty, struct file * filp) +{ + struct siolx_port *port = (struct siolx_port *) tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + unsigned long timeout; + + if (!port || siolx_paranoia_check(port, tty->device, "close")) + return; + + save_flags(flags); cli(); + if (tty_hung_up_p(filp)) { + restore_flags(flags); + return; + } + + bp = port_Board(port); + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { + printk(KERN_ERR "sx%d: siolx_close: bad port count;" + " tty->count is 1, port count is %d\n", + board_No(bp), port->count); + port->count = 1; + } + if (--port->count < 0) { + printk(KERN_ERR "sx%d: siolx_close: bad port count for tty%d: %d\n", + board_No(bp), port_No(port), port->count); + port->count = 0; + } + if (port->count) { + restore_flags(flags); + return; + } + port->flags |= ASYNC_CLOSING; + /* + * Save the termios structure, since this port may have + * separate termios for callout and dialin. + */ + if (port->flags & ASYNC_NORMAL_ACTIVE) + port->normal_termios = *tty->termios; + if (port->flags & ASYNC_CALLOUT_ACTIVE) + port->callout_termios = *tty->termios; + /* + * Now we wait for the transmit buffer to clear; and we notify + * the line discipline to only process XON/XOFF characters. + */ + tty->closing = 1; + if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, port->closing_wait); + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + port->IER &= ~IER_RXD; + if (port->flags & ASYNC_INITIALIZED) { + port->IER &= ~IER_TXRDY; + port->IER |= IER_TXEMPTY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = jiffies+HZ; + while(port->IER & IER_TXEMPTY) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->timeout); + if (time_after(jiffies, timeout)) { + printk (KERN_INFO "siolx: Timeout waiting for close\n"); + break; + } + } + + } + siolx_shutdown_port(bp, port); + if (tty->driver.flush_buffer) + tty->driver.flush_buffer(tty); + if (tty->ldisc.flush_buffer) + tty->ldisc.flush_buffer(tty); + tty->closing = 0; + port->event = 0; + port->tty = 0; + if (port->blocked_open) { + if (port->close_delay) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->close_delay); + } + wake_up_interruptible(&port->open_wait); + } + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE| + ASYNC_CLOSING); + wake_up_interruptible(&port->close_wait); + restore_flags(flags); +} + + +static int siolx_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + int c, total = 0; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_write")) + return 0; + + bp = port_Board(port); + + if (!tty || !port->xmit_buf || !tmp_buf) + return 0; + + save_flags(flags); + if (from_user) { + down(&tmp_buf_sem); + while (1) { + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) + break; + + c -= copy_from_user(tmp_buf, buf, c); + if (!c) { + if (!total) + total = -EFAULT; + break; + } + + cli(); + c = MIN(c, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + memcpy(port->xmit_buf + port->xmit_head, tmp_buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + up(&tmp_buf_sem); + } else { + while (1) { + cli(); + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) { + restore_flags(flags); + break; + } + memcpy(port->xmit_buf + port->xmit_head, buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + } + + cli(); + if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped && + !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); + return total; +} + + +static void siolx_put_char(struct tty_struct * tty, unsigned char ch) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_put_char")) + return; + + if (!tty || !port->xmit_buf) + return; + + save_flags(flags); cli(); + + if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { + restore_flags(flags); + return; + } + + port->xmit_buf[port->xmit_head++] = ch; + port->xmit_head &= SERIAL_XMIT_SIZE - 1; + port->xmit_cnt++; + restore_flags(flags); +} + + +static void siolx_flush_chars(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_chars")) + return; + + if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !port->xmit_buf) + return; + + save_flags(flags); cli(); + port->IER |= IER_TXRDY; + siolx_out(port_Board(port), CD186x_CAR, port_No_by_chip(port)); + siolx_out(port_Board(port), CD186x_IER, port->IER); + restore_flags(flags); +} + + +static int siolx_write_room(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int ret; + + if (siolx_paranoia_check(port, tty->device, "siolx_write_room")) + return 0; + + ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1; + if (ret < 0) + ret = 0; + return ret; +} + + +static int siolx_chars_in_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + + if (siolx_paranoia_check(port, tty->device, "siolx_chars_in_buffer")) + return 0; + + return port->xmit_cnt; +} + + +static void siolx_flush_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_buffer")) + return; + + save_flags(flags); cli(); + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + restore_flags(flags); + + wake_up_interruptible(&tty->write_wait); + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); +} + + +static int siolx_get_modem_info(struct siolx_port * port, unsigned int *value) +{ + struct siolx_board * bp; + unsigned char status; + unsigned int result; + unsigned long flags; + + bp = port_Board(port); + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + status = siolx_in(bp, CD186x_MSVR); + restore_flags(flags); +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Got msvr[%d] = %02x, car = %d.\n", + port_No(port), status, siolx_in (bp, CD186x_CAR)); + printk (KERN_DEBUG "siolx_port = %p, port = %p\n", siolx_port, port); +#endif + if (SIOLX_CRTSCTS(port->tty)) { + result = /* (status & MSVR_RTS) ? */ TIOCM_DTR /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_RTS : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } else { + result = /* (status & MSVR_RTS) ? */ TIOCM_RTS /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_DTR : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } + put_user(result,(unsigned int *) value); + return 0; +} + + +static int siolx_set_modem_info(struct siolx_port * port, unsigned int cmd, + unsigned int *value) +{ + int error; + unsigned int arg; + unsigned long flags; + struct siolx_board *bp = port_Board(port); + + error = verify_area(VERIFY_READ, value, sizeof(int)); + if (error) + return error; + + get_user(arg, (unsigned long *) value); + switch (cmd) { + case TIOCMBIS: + /* if (arg & TIOCM_RTS) + port->MSVR |= MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; */ + + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR |= MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; + } + break; + case TIOCMBIC: + /* if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; */ + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; + } + break; + case TIOCMSET: + /* port->MSVR = (arg & TIOCM_RTS) ? (port->MSVR | MSVR_RTS) : + (port->MSVR & ~MSVR_RTS); */ + /* port->MSVR = (arg & TIOCM_DTR) ? (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); */ + if (SIOLX_CRTSCTS(port->tty)) { + port->MSVR = (arg & TIOCM_RTS) ? + (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); + } else { + port->MSVR = (arg & TIOCM_DTR) ? + (port->MSVR | MSVR_DTR): + (port->MSVR & ~MSVR_DTR); + } + break; + default: + return -EINVAL; + } + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); + return 0; +} + + +static inline void siolx_send_break(struct siolx_port * port, unsigned long length) +{ + struct siolx_board *bp = port_Board(port); + unsigned long flags; + + save_flags(flags); cli(); + port->break_length = SIOLX_TPS / HZ * length; + port->COR2 |= COR2_ETC; + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_IER, port->IER); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + siolx_wait_CCR(bp); + restore_flags(flags); +} + + +static inline int siolx_set_serial_info(struct siolx_port * port, + struct serial_struct * newinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int change_speed; + unsigned long flags; + int error; + + error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp)); + if (error) + return error; + + if (copy_from_user(&tmp, newinfo, sizeof(tmp))) + return -EFAULT; + +#if 0 + if ((tmp.irq != bp->irq) || + (tmp.port != bp->base) || + (tmp.type != PORT_CIRRUS) || + (tmp.baud_base != (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) || + (tmp.custom_divisor != 0) || + (tmp.xmit_fifo_size != CD186x_NFIFO) || + (tmp.flags & ~SIOLX_LEGAL_FLAGS)) + return -EINVAL; +#endif + + change_speed = ((port->flags & ASYNC_SPD_MASK) != + (tmp.flags & ASYNC_SPD_MASK)); + change_speed |= (tmp.custom_divisor != port->custom_divisor); + + if (!capable(CAP_SYS_ADMIN)) { + if ((tmp.close_delay != port->close_delay) || + (tmp.closing_wait != port->closing_wait) || + ((tmp.flags & ~ASYNC_USR_MASK) != + (port->flags & ~ASYNC_USR_MASK))) + return -EPERM; + port->flags = ((port->flags & ~ASYNC_USR_MASK) | + (tmp.flags & ASYNC_USR_MASK)); + port->custom_divisor = tmp.custom_divisor; + } else { + port->flags = ((port->flags & ~ASYNC_FLAGS) | + (tmp.flags & ASYNC_FLAGS)); + port->close_delay = tmp.close_delay; + port->closing_wait = tmp.closing_wait; + port->custom_divisor = tmp.custom_divisor; + } + if (change_speed) { + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + return 0; +} + + +static inline int siolx_get_serial_info(struct siolx_port * port, + struct serial_struct * retinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int error; + + error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)); + if (error) + return error; + + memset(&tmp, 0, sizeof(tmp)); + tmp.type = PORT_CIRRUS; + tmp.line = port->driverport; + tmp.port = bp->base; + tmp.irq = bp->irq; + tmp.flags = port->flags; + tmp.baud_base = (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC; + tmp.close_delay = port->close_delay * HZ/100; + tmp.closing_wait = port->closing_wait * HZ/100; + tmp.custom_divisor = port->custom_divisor; + tmp.xmit_fifo_size = CD186x_NFIFO; + if (copy_to_user(retinfo, &tmp, sizeof(tmp))) + return -EFAULT; + return 0; +} + + +static int siolx_ioctl(struct tty_struct * tty, struct file * filp, + unsigned int cmd, unsigned long arg) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int error; + int retval; + + if (siolx_paranoia_check(port, tty->device, "siolx_ioctl")) + return -ENODEV; + + switch (cmd) { + case TCSBRK: /* SVID version: non-zero arg --> no break */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + if (!arg) + siolx_send_break(port, HZ/4); /* 1/4 second */ + return 0; + case TCSBRKP: /* support for POSIX tcsendbreak() */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + siolx_send_break(port, arg ? arg*(HZ/10) : HZ/4); + return 0; + case TIOCGSOFTCAR: + error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long)); + if (error) + return error; + put_user(C_CLOCAL(tty) ? 1 : 0, + (unsigned long *) arg); + return 0; + case TIOCSSOFTCAR: + get_user(arg, (unsigned long *) arg); + tty->termios->c_cflag = + ((tty->termios->c_cflag & ~CLOCAL) | + (arg ? CLOCAL : 0)); + return 0; + case TIOCMGET: + error = verify_area(VERIFY_WRITE, (void *) arg, + sizeof(unsigned int)); + if (error) + return error; + return siolx_get_modem_info(port, (unsigned int *) arg); + case TIOCMBIS: + case TIOCMBIC: + case TIOCMSET: + return siolx_set_modem_info(port, cmd, (unsigned int *) arg); + case TIOCGSERIAL: + return siolx_get_serial_info(port, (struct serial_struct *) arg); + case TIOCSSERIAL: + return siolx_set_serial_info(port, (struct serial_struct *) arg); + default: + return -ENOIOCTLCMD; + } + return 0; +} + + +static void siolx_throttle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_throttle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + + /* Use DTR instead of RTS ! */ + if (SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~MSVR_DTR; + } + else + { + /* Auch!!! I think the system shouldn't call this then. */ + /* Or maybe we're supposed (allowed?) to do our side of hw + handshake anyway, even when hardware handshake is off. + When you see this in your logs, please report.... */ + printk (KERN_ERR "sx%d: Need to throttle, but can't (hardware hs is off)\n", + port_No (port)); + } + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) + { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH2); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_unthrottle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_unthrottle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + /* XXXX Use DTR INSTEAD???? */ + if (SIOLX_CRTSCTS(tty)) { + port->MSVR |= MSVR_DTR; + } /* Else clause: see remark in "siolx_throttle"... */ + + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH1); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_stop(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_stop")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + restore_flags(flags); +} + + +static void siolx_start(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_start")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); +} + + +/* + * This routine is called from the scheduler tqueue when the interrupt + * routine has signalled that a hangup has occurred. The path of + * hangup processing is: + * + * serial interrupt routine -> (scheduler tqueue) -> + * do_siolx_hangup() -> tty->hangup() -> siolx_hangup() + * + */ +static void do_siolx_hangup(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + tty = port->tty; + if (tty) + { + tty_hangup(tty); /* FIXME: module removal race here */ + } + MOD_DEC_USE_COUNT; +} + + +static void siolx_hangup(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + + if (siolx_paranoia_check(port, tty->device, "siolx_hangup")) + return; + + bp = port_Board(port); + + siolx_shutdown_port(bp, port); + port->event = 0; + port->count = 0; + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE); + port->tty = 0; + wake_up_interruptible(&port->open_wait); +} + + +static void siolx_set_termios(struct tty_struct * tty, struct termios * old_termios) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_set_termios")) + return; + + if (tty->termios->c_cflag == old_termios->c_cflag && + tty->termios->c_iflag == old_termios->c_iflag) + return; + + save_flags(flags); cli(); + siolx_change_speed(port_Board(port), port); + restore_flags(flags); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + siolx_start(tty); + } +} + + +static void do_siolx_bh(void) +{ + run_task_queue(&tq_siolx); +} + + +static void do_softint(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + if(!(tty = port->tty)) + return; + + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event)) { + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); + wake_up_interruptible(&tty->write_wait); + } +} + +static int siolx_finish_init_drivers(void) +{ + register struct siolx_board *bp; + register unsigned int count; + unsigned int maxport; + struct siolx_port *port; + struct siolx_port *lastport; + int error; + + bp = siolx_board_root; + + while(bp) + { + if(bp->chipnumber == 0) + { + maxport = SIOLX_NPORT; + } + else if((bp->boardtype == BD_16000C) && bp->reario) /* must be second chip of 16000C */ + { + maxport = SIOLX_NPORT/2; + } + else + { + maxport = SIOLX_NPORT; /* must be second chip of 16000P */ + } + + port = NULL; /* probably unnecessary */ + lastport = NULL; + for(count = 0; count < maxport; ++count) + { + port = (struct siolx_port*)kmalloc(sizeof(struct siolx_port), GFP_KERNEL); + if(port == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create port structure on board %p.\n", bp); + break; /* no memory available */ + } + memset(port, 0, sizeof(struct siolx_port)); + + port->callout_termios = siolx_callout_driver.init_termios; + port->normal_termios = siolx_driver.init_termios; + port->magic = SIOLX_MAGIC; + port->tqueue.routine = do_softint; + port->tqueue.data = port; + port->tqueue_hangup.routine = do_siolx_hangup; + port->tqueue_hangup.data = port; + port->close_delay = 50 * HZ/100; + port->closing_wait = 3000 * HZ/100; + init_waitqueue_head(&port->open_wait); + init_waitqueue_head(&port->close_wait); + + port->board = bp; + port->driverport = NumSiolxPorts; + port->boardport = (count + (port->board->chipnumber*SIOLX_NPORT)); /* 0-16 */ + + if(count == 0) + { + bp->portlist = port; + } + else if(lastport) /* if count != 0 lastport should be non-null */ + { + lastport->next_by_board = port; + } + if(siolx_port_root == NULL) + { + siolx_port_root = port; + siolx_port_last = port; + } + else + { + siolx_port_last->next_by_global_list = port; + siolx_port_last = port; + } + lastport = port; + ++NumSiolxPorts; + } + bp = bp->next_by_global_list; + } + + siolx_driver.num = NumSiolxPorts; + + siolx_table = (struct tty_struct **) kmalloc(NumSiolxPorts*sizeof(struct tty_struct *), GFP_KERNEL); + if(siolx_table == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_table.\n"); + return 1; + } + memset(siolx_table, 0, NumSiolxPorts*sizeof(struct tty_struct *)); + + siolx_termios = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios.\n"); + return 1; + } + memset(siolx_termios, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_termios_locked = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios_locked == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios_locked.\n"); + return 1; + } + memset(siolx_termios_locked, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + if ((error = tty_register_driver(&siolx_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = 0; + } + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter driver, error = %d\n", + error); + return 1; + } + if ((error = tty_register_driver(&siolx_callout_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + tty_unregister_driver(&siolx_driver); + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter callout driver, error = %d\n", + error); + return 1; + } + siolx_driver_registered = 1; + siolx_callout_driver_registered = 1; + return 0; /* success */ +} + +static int siolx_init_drivers(void) +{ + if (!(tmp_buf = (unsigned char *) get_free_page(GFP_KERNEL))) + { + printk(KERN_ERR "siolx: Couldn't get free page.\n"); + return 1; + } + init_bh(siolx_bhindex, do_siolx_bh); + memset(&siolx_driver, 0, sizeof(siolx_driver)); + siolx_driver.magic = TTY_DRIVER_MAGIC; + + siolx_driver.driver_name = "aurasiolx"; + siolx_driver.name = "ttyS"; + siolx_driver.major = siolx_major; +#ifdef MODULE + siolx_driver.minor_start = siolx_minorstart; /* changed from command line */ +#else + siolx_driver.minor_start = GetMinorStart(); +#endif + siolx_driver.num = 0; /* will be changed */ + + siolx_driver.type = TTY_DRIVER_TYPE_SERIAL; + siolx_driver.subtype = SIOLX_TYPE_NORMAL; + siolx_driver.init_termios = tty_std_termios; + siolx_driver.init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + siolx_driver.flags = TTY_DRIVER_REAL_RAW; + siolx_driver.refcount = &siolx_refcount; + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + siolx_driver.open = siolx_open; + siolx_driver.close = siolx_close; + siolx_driver.write = siolx_write; + siolx_driver.put_char = siolx_put_char; + siolx_driver.flush_chars = siolx_flush_chars; + siolx_driver.write_room = siolx_write_room; + siolx_driver.chars_in_buffer = siolx_chars_in_buffer; + siolx_driver.flush_buffer = siolx_flush_buffer; + siolx_driver.ioctl = siolx_ioctl; + siolx_driver.throttle = siolx_throttle; + siolx_driver.unthrottle = siolx_unthrottle; + siolx_driver.set_termios = siolx_set_termios; + siolx_driver.stop = siolx_stop; + siolx_driver.start = siolx_start; + siolx_driver.hangup = siolx_hangup; + + siolx_callout_driver = siolx_driver; + siolx_callout_driver.name = "cuw"; + siolx_callout_driver.major = (siolx_major+1); + siolx_callout_driver.subtype = SIOLX_TYPE_CALLOUT; + + siolx_driver.read_proc = siolx_read_proc; + return 0; +} + + +static void siolx_release_drivers(void) +{ + unsigned int intr_val; + struct siolx_board *bp; + + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + if(siolx_driver_registered) + { + tty_unregister_driver(&siolx_driver); + siolx_driver_registered = 0; + } + if(siolx_callout_driver_registered) + { + tty_unregister_driver(&siolx_callout_driver); + siolx_callout_driver_registered = 0; + } + /* unallocate and turn off ints */ + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) + { + if(SiolxIrqRoot[intr_val] != NULL) + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; + bp = bp->next_by_interrupt) + { + SiolxShutdownBoard(bp); /* turn off int; release the plx vaddr space */ + } + free_irq(intr_val, &SiolxIrqRoot[intr_val]); + } + } + +} + +static void siolx_release_memory(void) +{ + register struct siolx_board *bp; + register struct siolx_port *port; + + while(siolx_board_root) + { + bp = siolx_board_root; + siolx_board_root = bp->next_by_global_list; + siolx_release_io_range(bp); /* releases the chip vaddr */ + kfree(bp); + } + while(siolx_port_root) + { + port = siolx_port_root; + if(port->xmit_buf) + { /* should have been done when port shutdown */ + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + siolx_port_root = port->next_by_global_list; + kfree(port); + } + if(siolx_table) + { + kfree(siolx_table); + siolx_table = NULL; + } + if(siolx_termios) + { + kfree(siolx_termios); + siolx_termios = NULL; + } + if(siolx_termios_locked) + { + kfree(siolx_termios_locked); + siolx_termios_locked = NULL; + } + +#ifdef SIOLX_TIMER + del_timer (&missed_irq_timer); +#endif +} + + +static void siolx_cleanup(void) +{ + siolx_release_drivers(); + siolx_release_memory(); +} + +/* + * This routine must be called by kernel at boot time + */ + +static int __init siolx_init(void) +{ + unsigned char bus; + unsigned char devfn; + struct siolx_board *bp; + struct siolx_board *bp2; + unsigned int boardcount; + struct pci_dev *pdev = NULL; + unsigned int ecntl; + unsigned int intr_val; + + printk(KERN_ALERT "aurora interea miseris mortalibus almam extulerat lucem\n"); + printk(KERN_ALERT " referens opera atque labores\n"); + printk(KERN_INFO "siolx: Siolx Aurora Asynchronous Adapter driver v" VERSION ", (c) Telford Tools, Inc.\n"); +#ifdef CONFIG_SIOLX_RTSCTS + printk (KERN_INFO "siolx: DTR/RTS pin is always RTS.\n"); +#else + printk (KERN_INFO "siolx: DTR/RTS pin is RTS when CRTSCTS is on.\n"); +#endif + memset(SiolxIrqRoot, 0, sizeof(SiolxIrqRoot)); + tmp_buf = NULL; + siolx_board_root = NULL; /* clear out the global pointers */ + siolx_board_last = NULL; + siolx_port_root = NULL; + siolx_port_last = NULL; + NumSiolxPorts = 0; + siolx_table = NULL; /* make dynamic */ + siolx_termios = NULL; + siolx_termios_locked = NULL; + siolx_driver_registered = 0; + siolx_callout_driver_registered = 0; + + boardcount = 0; + + if (siolx_init_drivers()) + { + printk(KERN_INFO "siolx: Could not initialize drivers.\n"); + return -EIO; + } + + if (!pci_present()) + { + printk(KERN_INFO "siolx: Could not find PCI bus.\n"); + return -EIO; /* no PCI bus no Aurora cards */ + } + + while(1) + { + pdev = pci_find_device (siolx_vendor_id, siolx_device_id, pdev); + if (!pdev) + { + break; /* found no devices */ + } + + DEBUGPRINT((KERN_ALERT "%s\n", pdev->name)); + DEBUGPRINT((KERN_ALERT "subsystem vendor is %x.\n", + pdev->subsystem_vendor)); + DEBUGPRINT((KERN_ALERT "subsystem device is %x.\n", + pdev->subsystem_device)); + DEBUGPRINT((KERN_ALERT + "BAR0 = %lx\nBAR1 = %lx\nBAR2 = %lx\nBAR3 = %lx\nBAR4 = %lx\nBAR5 = %lx\n", + pci_resource_start(pdev, 0), + pci_resource_start(pdev, 1), + pci_resource_start(pdev, 2), + pci_resource_start(pdev, 3), + pci_resource_start(pdev, 4), + pci_resource_start(pdev, 5))); + DEBUGPRINT((KERN_ALERT + "LAS0 = %lx\nLAS1 = %lx\nLAS2 = %lx\nLAS3 = %lx\nLAS4 = %lx\nLAS5 = %lx\n", + pci_resource_len(pdev, 0), + pci_resource_len(pdev, 1), + pci_resource_len(pdev, 2), + pci_resource_len(pdev, 3), + pci_resource_len(pdev, 4), + pci_resource_len(pdev, 5))); + + if(pdev->subsystem_vendor == siolx_subsystem_vendor) + { + if(pdev->subsystem_device == siolx_subsystem_pci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT "siolx: Failed to create board structure on board %d.\n", boardcount); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000P; + } + else if(pdev->subsystem_device == siolx_subsystem_cpci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create board structure on board%p.\n", bp); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000C; + } + else + { + continue; + } + } + else + { + continue; + } + + DEBUGPRINT((KERN_ALERT "siolx: interrupt is %i.\n", pdev->irq)); + bus = pdev->bus->number; + devfn = pdev->devfn; + DEBUGPRINT((KERN_ALERT "siolx: bus is %x, slot is %x.\n", bus, PCI_SLOT(devfn))); + + if (pci_enable_device(pdev)) + { + kfree(bp); + continue; /* enable failed */ + } + pci_set_master(pdev); + + bp->irq = pdev->irq; + SiolxResetBoard(bp, pdev); /* make sure the board is in a known state */ + if(bp->plx_vaddr == 0) + { + printk(KERN_ALERT "siolx: failed to remap plx address space.\n"); + kfree(bp); + continue; + } + bp->vaddr = (unsigned long) ioremap_nocache(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if(bp->vaddr) + { + bp->base = (bp->vaddr + MPASYNC_CHIP1_OFFSET); + bp->boardnumber = boardcount; + if (siolx_probe(bp)) /* failure is nonzero */ + { + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + kfree(bp); /* something wrong with board */ + continue; + } + intr_val = bp->irq; + if((intr_val < 0) || (intr_val >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt %i board %p.\n", intr_val, bp); + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); /* release chip space */ + bp->vaddr = 0; + kfree(bp); /* release the board structure */ + continue; + } + bp->next_by_interrupt = SiolxIrqRoot[intr_val]; + SiolxIrqRoot[intr_val] = bp; + if(siolx_board_last == NULL) + { + siolx_board_root = bp; + siolx_board_last = bp; + } + else + { + siolx_board_last->next_by_global_list = bp; + siolx_board_last = bp; + } + bp->chipnumber = 0; + bp->intstatus = bp->plx_vaddr + PLX_ICSR; + bp->next_by_chain = bp; /* one item chain */ + ecntl = readl(bp->plx_vaddr + PLX_ECNTL); + boardcount++; /* added a board */ + if(pci_resource_len(pdev, 2) > MPASYNC_CHIP2_OFFSET) + { + ++(bp->boardtype); /* works because how types are defined 8000X --> 16000X*/ + if(bp->boardtype == BD_16000C) + { + if((ecntl & PLX_ECNTLUSERI) == 0) + { + bp->reario = 1; + } + } + bp2 = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp2 == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create second board structure on board %p.\n", bp); + /* fall through because must turn on ints for other chip */ + } + else + { + memset(bp2, 0, sizeof(struct siolx_board)); /* unnecessary */ + *bp2 = *bp; /* note that all guys in chain point to same next_by interrupt */ + bp->next_by_chain = bp2; /* circular list */ + bp2->next_by_chain = bp;/* now chain two elements*/ + ++(bp2->chipnumber); /* chipnumber 1 */ + bp2->base = (bp2->vaddr + MPASYNC_CHIP2_OFFSET); + if(siolx_probe(bp2)) + { + printk(KERN_ALERT "siolx: Failed to probe second board structure on board %p.\n", bp); + kfree(bp2); + /* fall through because must turn on ints for other chip */ + /* don't release pci memory remap -- still works for other chip */ + } + else if(siolx_board_last == NULL) + { + siolx_board_root = bp2; /* this case should not occur */ + siolx_board_last = bp2; + } + else + { + siolx_board_last->next_by_global_list = bp2; + siolx_board_last = bp2; + } + /* don't increment boardnumber */ + } + } + } + else /* could not remap the cd18xx space */ + { + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + kfree(bp); + } + } + if (boardcount == 0) + { + printk(KERN_INFO "siolx: No Aurora Asynchronous Adapter boards detected.\n"); + siolx_cleanup(); /* don't need any allocated memory */ + return -EIO; + } + if (siolx_finish_init_drivers()) + { + printk(KERN_INFO "siolx: Could not finish driver initialization.\n"); + siolx_cleanup(); + return -EIO; + } + + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) /* trying to install as few int handlers as possible */ + { /* one for each group of boards (actually chips) on a given irq */ + if(SiolxIrqRoot[intr_val] != NULL) + { + if (request_irq(intr_val, siolx_interrupt, SA_SHIRQ, "siolx Aurora Asynchronous Adapter", + &SiolxIrqRoot[intr_val]) == 0) + /* interrupts on perboard basis + * cycle through chips and then + * ports */ + /* NOTE PLX INTS ARE OFF -- so turn them on */ + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; bp = bp->next_by_interrupt) + { + writel(PLX_ICSRLCLINTPCI | PLX_ICSRPCIINTS, bp->plx_vaddr + PLX_ICSR); /* enable interrupts */ + } + } + else + { + printk(KERN_ALERT "siolx: Unable to get interrupt, board set up not complete %i.\n", intr_val); + /* no interrupts but on all lists */ + } + } + } + return 0; +} + +module_init(siolx_init); +module_exit(siolx_cleanup); +MODULE_DESCRIPTION("multiport Aurora asynchronous driver"); +MODULE_AUTHOR("Joachim Martillo "); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cd1865/cdsiolx.h linux.22-ac2/drivers/char/cd1865/cdsiolx.h --- linux.vanilla/drivers/char/cd1865/cdsiolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/cd1865/cdsiolx.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,136 @@ +/* -*- linux-c -*- */ +/* + * This file was modified from + * linux/drivers/char/siolx_io8.h -- + * Siolx IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * */ + +#ifndef __LINUX_SIOLX_H +#define __LINUX_SIOLX_H + +#include + +#ifdef __KERNEL__ + +#define SIOLX_NBOARD 8 + +/* eight ports per chip. */ +#define SIOLX_NPORT 8 +#define SIOLX_PORT(line) ((line) & (SIOLX_NPORT - 1)) + +#define MHz *1000000 /* I'm ashamed of myself. */ + +/* On-board oscillator frequency */ +#define SIOLX_OSCFREQ (33 MHz) +/* oregano is in /1 which mace 66Mhz is in /2 mode */ + +/* Ticks per sec. Used for setting receiver timeout and break length */ +#define SIOLX_TPS 4000 + +/* Yeah, after heavy testing I decided it must be 6. + * Sure, You can change it if needed. + */ +#define SIOLX_RXFIFO 6 /* Max. receiver FIFO size (1-8) */ + +#define SIOLX_MAGIC 0x0907 + +#define SIOLX_CCR_TIMEOUT 10000 /* CCR timeout. You may need to wait upto + 10 milliseconds before the internal + processor is available again after + you give it a command */ +#define SIOLX_NUMINTS 32 + +struct siolx_board +{ + unsigned long flags; + unsigned long base; + unsigned char irq; + unsigned char DTR; + unsigned long vaddr; + unsigned long plx_vaddr; + unsigned long intstatus; + struct siolx_board *next_by_chain; /* chains are circular */ + struct siolx_board *next_by_interrupt; /* only chip 0 */ + struct siolx_board *next_by_global_list; /* all boards not circular */ + struct siolx_port *portlist; + struct pci_dev pdev; + unsigned int chipnumber; /* for 8000X this structure really defines the board + * for 16000X the chain corresponds to a board and each + * structure corresponds to a dhip on a single board */ + unsigned int boardnumber; /* same for all boards/chips in a board chain */ + unsigned int boardtype; + unsigned int chiptype; + unsigned int chiprev; + unsigned int reario; + unsigned int rj45; +}; + +#define DRIVER_DEBUG() (siolx_debug) +#define DEBUGPRINT(arg) if(DRIVER_DEBUG()) printk arg + +struct siolx_port +{ + int magic; + int baud_base; + int flags; + struct tty_struct * tty; + int count; + int blocked_open; + int event; + int timeout; + int close_delay; + long session; + long pgrp; + unsigned char * xmit_buf; + int custom_divisor; + int xmit_head; + int xmit_tail; + int xmit_cnt; + struct termios normal_termios; + struct termios callout_termios; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + struct tq_struct tqueue; + struct tq_struct tqueue_hangup; + short wakeup_chars; + short break_length; + unsigned short closing_wait; + unsigned char mark_mask; + unsigned char IER; + unsigned char MSVR; + unsigned char COR2; +#ifdef SIOLX_REPORT_OVERRUN + unsigned long overrun; +#endif +#ifdef SIOLX_REPORT_FIFO + unsigned long hits[10]; +#endif + struct siolx_port *next_by_global_list; + struct siolx_port *next_by_board; + struct siolx_board *board; + unsigned int boardport; /* relative to chain 0-15 for 16000X */ + unsigned int driverport; /* maps to minor device number */ +}; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SIOLX_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cd1865/Makefile linux.22-ac2/drivers/char/cd1865/Makefile --- linux.vanilla/drivers/char/cd1865/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/cd1865/Makefile 2003-06-29 16:10:04.000000000 +0100 @@ -0,0 +1,27 @@ +# Copyright (C) 2001 By Joachim Martillo, Telford Tools, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version +# 2 of the License, or (at your option) any later version. + +# File: drivers/net/WAN/atiXX50/Makefile +# +# Makefile for the Aurora ESSC based cards +# Specifically the 2520, 4020, 4520, 8520 +# + +all: SILX.o + +O_TARGET := SILX.o + +obj-y := cd1865.o +obj-m := $(O_TARGET) + +EXTRA_CFLAGS += -I. + +include $(TOPDIR)/Rules.make + +clean: + rm -f core *.o *.a *.s *~ + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cd1865/plx9060.h linux.22-ac2/drivers/char/cd1865/plx9060.h --- linux.vanilla/drivers/char/cd1865/plx9060.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/cd1865/plx9060.h 2003-06-29 16:10:04.000000000 +0100 @@ -0,0 +1,97 @@ +#ifndef _PLX9060_H_ +#define _PLX9060_H_ +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + * This module contains the definitions for the PLX + * 9060SD PCI controller chip. + * + * COPYRIGHT (c) 1996-1998 BY AURORA TECHNOLOGIES, INC., WALTHAM, MA. + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + * file: plx9060.h + * author: cmw + * created: 11/21/1996 + * info: $Id: plx9060.h,v 1.2 2002/06/11 02:50:02 martillo Exp $ + */ + +/* + * $Log: plx9060.h,v $ + * Revision 1.2 2002/06/11 02:50:02 martillo + * using silx_ and SILX_ instead of sx_ and SX_ + * + * Revision 1.1 2002/05/21 17:30:16 martillo + * first pass for the sio16 driver. + * + * Revision 1.4 1999/02/12 15:38:13 bkd + * Changed PLX_ECNTUSER0 to PLX_ECNTLUSERO and added PLX_ECNTLUSERI. + * + * Revision 1.3 1998/03/23 19:35:42 bkd + * Added definitions for all of the missing PLX9060SD registers. + * + * Revision 1.2 1998/03/13 21:02:16 bkd + * Updated copyright date to include 1998. + * + * Revision 1.1 1996/11/23 01:07:46 bkd + * cmw/bkd (PCI port): + * Initial check-in. + * + */ + +/* + * Register definitions + */ + +#define PLX_LAS0RR 0x00 +#define PLX_LAS0BAR 0x04 +#define PLX_LAR 0x08 +#define PLX_ENDR 0x0c +#define PLX_EROMRR 0x10 +#define PLX_EROMBAR 0x14 +#define PLX_LAS0BRD 0x18 +#define PLX_LAS1RR 0x30 +#define PLX_LAS1BAR 0x34 +#define PLX_LAS1BRD 0x38 + +#define PLX_MBR0 0x40 +#define PLX_MBR1 0x44 +#define PLX_MBR2 0x48 +#define PLX_MBR3 0x4c +#define PLX_PCI2LCLDBR 0x60 +#define PLX_LCL2PCIDBR 0x64 +#define PLX_ICSR 0x68 +#define PLX_ECNTL 0x6c + +/* + * Bit definitions + */ + +#define PLX_ECNTLUSERO 0x00010000 /* turn on user output */ +#define PLX_ECNTLUSERI 0x00020000 /* user input */ +#define PLX_ECNTLLDREG 0x20000000 /* reload configuration registers */ +#define PLX_ECNTLLCLRST 0x40000000 /* local bus reset */ +#define PLX_ECNTLINITSTAT 0x80000000 /* mark board init'ed */ + + +#define PLX_ICSRLSERR_ENA 0x00000001 /* enable local bus LSERR# */ +#define PLX_ICSRLSERRP_ENA 0x00000002 /* enable local bus LSERR# PCI */ +#define PLX_ICSRPCIINTS 0x00000100 /* enable PCI interrupts */ +#define PLX_ICSRLCLINTPCI 0x00000800 +#define PLX_ICSRINTACTIVE 0x00008000 /* RO: local interrupt active */ + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cd1865/siolx.h linux.22-ac2/drivers/char/cd1865/siolx.h --- linux.vanilla/drivers/char/cd1865/siolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/cd1865/siolx.h 2003-06-29 16:10:04.000000000 +0100 @@ -0,0 +1,94 @@ +/* -*- linux-c -*- */ +#ifndef _SIOLX_H_ +#define _SIOLX_H_ + +/* + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + */ + +#define AURASUBSYSTEM_VENDOR_ID 0x125c +#define AURASUBSYSTEM_MPASYNCPCI 0x0640 +#define AURASUBSYSTEM_MPASYNCcPCI 0x0641 + +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + */ + +/* + * Register sets. These must match the order of the registers specified + * in the prom on the board! + */ + +#define MPASYNC_REG_CSR 1 +#define MPASYNC_REG_CD 2 + +#define MPASYNC_CHIP1_OFFSET 0x080 +#define MPASYNC_CHIP2_OFFSET 0x100 + +#define MPASYNC_REG_NO_OBP_CSR 1 +#define MPASYNC_REG_NO_OBP_CD 3 + +#define TX_FIFO 0x8 /* how deep is the chip fifo */ + +/* + * state flags + */ + +/* + * the following defines the model types + */ + +#define OREGANO_MODEL(mod) ((mod) == BD_16000P || (mod) == BD_8000P) +#define MACE_MODEL(mod) ((mod) == BD_16000C || (mod) == BD_8000C) + +/* + * I/O options: + */ + +#define MACE8_STD 0x0 /* 8000CP -- standard I/O */ +#define MACE8_RJ45 0x1 /* 8000CP -- rear RJ45 I/O */ + +#define MACE16_STD 0x0 /* 16000CP -- standard I/O */ +#define MACE16_RJ45 0x1 /* 16000CP -- rear RJ45 I/O */ + +#define SE2_CLK ((unsigned int) 11059200) /* 11.0592 MHz */ +#define SE_CLK ((unsigned int) 14745600) /* 14.7456 MHz */ +#define SE3_CLK ((unsigned int) 33000000) /* 33.3333 MHz */ + +/* divide x by y, rounded */ +#define ROUND_DIV(x, y) (((x) + ((y) >> 1)) / (y)) + +/* Calculate a 16 bit baud rate divisor for the given "encoded" + * (multiplied by two) baud rate. + */ + +/* chip types: */ +#define CT_UNKNOWN 0x0 /* unknown */ +#define CT_CL_CD180 0x1 /* Cirrus Logic CD-180 */ +#define CT_CL_CD1864 0x2 /* Cirrus Logic CD-1864 */ +#define CT_CL_CD1865 0x3 /* Cirrus Logic CD-1864 */ + +/* chip revisions: */ +#define CR_UNKNOWN 0x0 /* unknown */ +#define CR_REVA 0x1 /* revision A */ +#define CR_REVB 0x2 /* revision B */ +#define CR_REVC 0x3 /* revision C */ +/* ...and so on ... */ + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/Config.in linux.22-ac2/drivers/char/Config.in --- linux.vanilla/drivers/char/Config.in 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/Config.in 2003-08-28 22:46:45.000000000 +0100 @@ -35,6 +35,7 @@ fi bool 'Non-standard serial port support' CONFIG_SERIAL_NONSTANDARD if [ "$CONFIG_SERIAL_NONSTANDARD" = "y" ]; then + tristate ' Aurora Technology, Inc. asynchronous PCI cards V2' CONFIG_ATI_CD1865 tristate ' Computone IntelliPort Plus serial support' CONFIG_COMPUTONE tristate ' Comtrol Rocketport support' CONFIG_ROCKETPORT tristate ' Cyclades async mux support' CONFIG_CYCLADES @@ -158,7 +159,7 @@ dep_tristate 'Texas Instruments parallel link cable support' CONFIG_TIPAR $CONFIG_PARPORT fi -if [ "$CONFIG_PPC64" ] ; then +if [ "$CONFIG_PPC64" = "y" ] ; then bool 'pSeries Hypervisor Virtual Console support' CONFIG_HVC_CONSOLE fi if [ "$CONFIG_ALL_PPC" = "y" ]; then @@ -216,6 +217,7 @@ tristate ' Acquire SBC Watchdog Timer' CONFIG_ACQUIRE_WDT tristate ' Advantech SBC Watchdog Timer' CONFIG_ADVANTECH_WDT tristate ' ALi M7101 PMU on ALi 1535D+ Watchdog Timer' CONFIG_ALIM1535_WDT + tristate ' ALi M6117 Watchdog Timer' CONFIG_ALIM6117_WDT tristate ' ALi M7101 PMU Watchdog Timer' CONFIG_ALIM7101_WDT tristate ' AMD "Elan" SC520 Watchdog Timer' CONFIG_SC520_WDT tristate ' Berkshire Products PC Watchdog' CONFIG_PCWATCHDOG @@ -283,6 +285,7 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" -a "$CONFIG_X86" = "y" -a "$CONFIG_X86_64" != "y" ]; then dep_tristate 'Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)' CONFIG_SONYPI $CONFIG_PCI fi +tristate 'Vertical blank driver' CONFIG_VBLANK $CONFIG_PCI mainmenu_option next_comment comment 'Ftape, the floppy tape device driver' @@ -315,6 +318,7 @@ if [ "$CONFIG_IA64" = "y" ]; then bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 fi + bool ' ATI IGP chipset support' CONFIG_AGP_ATI fi bool 'Direct Rendering Manager (XFree86 DRI support)' CONFIG_DRM diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/console.c linux.22-ac2/drivers/char/console.c --- linux.vanilla/drivers/char/console.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/console.c 2003-06-29 16:10:00.000000000 +0100 @@ -2393,7 +2393,7 @@ tty->winsize.ws_row = video_num_lines; tty->winsize.ws_col = video_num_columns; } - if (tty->count == 1) + if (atomic_read(&tty->count) == 1) vcs_make_devfs (currcons, 0); return 0; } @@ -2402,7 +2402,7 @@ { if (!tty) return; - if (tty->count != 1) return; + if (atomic_read(&tty->count) != 1) return; vcs_make_devfs (MINOR (tty->device) - tty->driver.minor_start, 1); tty->driver_data = 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/cyclades.c linux.22-ac2/drivers/char/cyclades.c --- linux.vanilla/drivers/char/cyclades.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/cyclades.c 2003-06-29 16:10:00.000000000 +0100 @@ -2822,7 +2822,7 @@ #ifdef CY_DEBUG_OPEN printk("cyc:cy_close ttyC%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/Config.in linux.22-ac2/drivers/char/drm/Config.in --- linux.vanilla/drivers/char/drm/Config.in 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/Config.in 2003-07-28 21:09:43.000000000 +0100 @@ -6,11 +6,13 @@ # tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX -#tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA +tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA tristate ' ATI Rage 128' CONFIG_DRM_R128 -dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP +tristate ' ATI Radeon' CONFIG_DRM_RADEON dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP dep_mbool ' Enabled XFree 4.1 ioctl interface by default' CONFIG_DRM_I810_XFREE_41 $CONFIG_DRM_I810 dep_tristate ' Intel 830M' CONFIG_DRM_I830 $CONFIG_AGP dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP +dep_tristate ' S3 Savage' CONFIG_DRM_S3 $CONFIG_AGP dep_tristate ' SiS' CONFIG_DRM_SIS $CONFIG_AGP +dep_tristate ' VIA CLE266' CONFIG_DRM_VIA $CONFIG_AGP diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_agpsupport.h linux.22-ac2/drivers/char/drm/drm_agpsupport.h --- linux.vanilla/drivers/char/drm/drm_agpsupport.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/drm_agpsupport.h 2003-09-09 22:27:29.000000000 +0100 @@ -73,9 +73,14 @@ drm_device_t *dev = priv->dev; int retcode; - if (!dev->agp || dev->agp->acquired || !drm_agp->acquire) + if (!dev->agp) + return -ENODEV; + if (dev->agp->acquired) + return -EBUSY; + if(!drm_agp->acquire) return -EINVAL; - if ((retcode = drm_agp->acquire())) return retcode; + if ((retcode = drm_agp->acquire()) != 0) + return retcode; dev->agp->acquired = 1; return 0; } @@ -259,67 +264,13 @@ return NULL; } head->memory = NULL; - switch (head->agp_info.chipset) { - case INTEL_GENERIC: head->chipset = "Intel"; break; - case INTEL_LX: head->chipset = "Intel 440LX"; break; - case INTEL_BX: head->chipset = "Intel 440BX"; break; - case INTEL_GX: head->chipset = "Intel 440GX"; break; - case INTEL_I810: head->chipset = "Intel i810"; break; - case INTEL_I815: head->chipset = "Intel i815"; break; - case INTEL_I820: head->chipset = "Intel i820"; break; - case INTEL_I840: head->chipset = "Intel i840"; break; - case INTEL_I845: head->chipset = "Intel i845"; break; - case INTEL_I850: head->chipset = "Intel i850"; break; - - case VIA_GENERIC: head->chipset = "VIA"; break; - case VIA_VP3: head->chipset = "VIA VP3"; break; - case VIA_MVP3: head->chipset = "VIA MVP3"; break; - case VIA_MVP4: head->chipset = "VIA MVP4"; break; - case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133"; - break; - case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133"; - break; - case VIA_APOLLO_KT400: head->chipset = "VIA Apollo KT400"; - break; - case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; - break; - case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; - break; - - case SIS_GENERIC: head->chipset = "SiS"; break; - case AMD_GENERIC: head->chipset = "AMD"; break; - case AMD_IRONGATE: head->chipset = "AMD Irongate"; break; - case AMD_8151: head->chipset = "AMD 8151"; break; - case ALI_GENERIC: head->chipset = "ALi"; break; - case ALI_M1541: head->chipset = "ALi M1541"; break; - - case ALI_M1621: head->chipset = "ALi M1621"; break; - case ALI_M1631: head->chipset = "ALi M1631"; break; - case ALI_M1632: head->chipset = "ALi M1632"; break; - case ALI_M1641: head->chipset = "ALi M1641"; break; - case ALI_M1644: head->chipset = "ALi M1644"; break; - case ALI_M1647: head->chipset = "ALi M1647"; break; - case ALI_M1651: head->chipset = "ALi M1651"; break; - - case SVWRKS_HE: head->chipset = "Serverworks HE"; - break; - case SVWRKS_LE: head->chipset = "Serverworks LE"; - break; - case SVWRKS_GENERIC: head->chipset = "Serverworks Generic"; - break; - - case HP_ZX1: head->chipset = "HP ZX1"; break; - - default: head->chipset = "Unknown"; break; - } head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; - DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n", + DRM_INFO("AGP %d.%d Aperture @ 0x%08lx %ZuMB\n", head->agp_info.version.major, head->agp_info.version.minor, - head->chipset, head->agp_info.aper_base, head->agp_info.aper_size); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_bufs.h linux.22-ac2/drivers/char/drm/drm_bufs.h --- linux.vanilla/drivers/char/drm/drm_bufs.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_bufs.h 2003-09-09 22:27:29.000000000 +0100 @@ -136,6 +136,7 @@ } map->offset = (unsigned long)map->handle; if ( map->flags & _DRM_CONTAINS_LOCK ) { + dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_context.h linux.22-ac2/drivers/char/drm/drm_context.h --- linux.vanilla/drivers/char/drm/drm_context.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_context.h 2003-09-09 22:27:29.000000000 +0100 @@ -554,7 +554,7 @@ /* Allocate a new queue */ down(&dev->struct_sem); - queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES); + queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES); memset(queue, 0, sizeof(*queue)); atomic_set(&queue->use_count, 1); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_dma.h linux.22-ac2/drivers/char/drm/drm_dma.h --- linux.vanilla/drivers/char/drm/drm_dma.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_dma.h 2003-09-09 22:27:29.000000000 +0100 @@ -30,7 +30,7 @@ */ #include "drmP.h" - +#include "drm_os_linux.h" #include /* For task queue support */ #ifndef __HAVE_DMA_WAITQUEUE @@ -537,8 +537,18 @@ dev->tq.data = dev; #endif +#if __HAVE_VBL_IRQ + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init( &dev->vbl_lock ); + + INIT_LIST_HEAD( &dev->vbl_sigs.head ); + + dev->vbl_pending = 0; +#endif + /* Before installing handler */ - DRIVER_PREINSTALL(); + DRM(driver_irq_preinstall)(dev); /* Install handler */ ret = request_irq( dev->irq, DRM(dma_service), @@ -551,7 +561,7 @@ } /* After installing handler */ - DRIVER_POSTINSTALL(); + DRM(driver_irq_postinstall)(dev); return 0; } @@ -570,7 +580,7 @@ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); - DRIVER_UNINSTALL(); + DRM(driver_irq_uninstall)( dev ); free_irq( irq, dev ); @@ -597,6 +607,142 @@ } } +#if __HAVE_VBL_IRQ + +int DRM(wait_vblank)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!dev->irq) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, + sizeof(vblwait) ); + + switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read( &dev->vbl_received ); + vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if ( flags & _DRM_VBLANK_SIGNAL ) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig; + + vblwait.reply.sequence = atomic_read( &dev->vbl_received ); + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + /* Check if this task has already scheduled the same signal + * for the same vblank sequence number; nothing to be done in + * that case + */ + list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) { + if (vbl_sig->sequence == vblwait.request.sequence + && vbl_sig->info.si_signo == vblwait.request.signal + && vbl_sig->task == current) + { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + goto done; + } + } + + if ( dev->vbl_pending >= 100 ) { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + return -EBUSY; + } + + dev->vbl_pending++; + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + + if ( !( vbl_sig = kmalloc(sizeof(drm_vbl_sig_t), GFP_KERNEL) ) ) + return -ENOMEM; + + + memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + } else { + ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); + + do_gettimeofday( &now ); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + +done: + DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, + sizeof(vblwait) ); + + return ret; +} + +void DRM(vbl_send_signals)( drm_device_t *dev ) +{ + struct list_head *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read( &dev->vbl_received ); + unsigned long flags; + + spin_lock_irqsave( &dev->vbl_lock, flags ); + + list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) { + if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); + + list_del( (struct list_head *) vbl_sig ); + + + kfree( vbl_sig ); + dev->vbl_pending--; + } + } + + spin_unlock_irqrestore( &dev->vbl_lock, flags ); +} + +#endif /* __HAVE_VBL_IRQ */ + +#else + +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + case DRM_UNINST_HANDLER: + return 0; + default: + return -EINVAL; + } +} + #endif /* __HAVE_DMA_IRQ */ #endif /* __HAVE_DMA */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_drv.h linux.22-ac2/drivers/char/drm/drm_drv.h --- linux.vanilla/drivers/char/drm/drm_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -115,18 +115,34 @@ #ifndef DRIVER_FOPS #define DRIVER_FOPS \ static struct file_operations DRM(fops) = { \ - owner: THIS_MODULE, \ - open: DRM(open), \ - flush: DRM(flush), \ - release: DRM(release), \ - ioctl: DRM(ioctl), \ - mmap: DRM(mmap), \ - read: DRM(read), \ - fasync: DRM(fasync), \ - poll: DRM(poll), \ + .owner = THIS_MODULE, \ + .open = DRM(open), \ + .flush = DRM(flush), \ + .release = DRM(release), \ + .ioctl = DRM(ioctl), \ + .mmap = DRM(mmap), \ + .read = DRM(read), \ + .fasync = DRM(fasync), \ + .poll = DRM(poll), \ } #endif +#ifndef MODULE +/* DRM(options) is called by the kernel to parse command-line options + * passed via the boot-loader (e.g., LILO). It calls the insmod option + * routine, drm_parse_drm. + */ +/* Use an additional macro to avoid preprocessor troubles */ +#define DRM_OPTIONS_FUNC DRM(options) +static int __init DRM(options)( char *str ) +{ + DRM(parse_options)( str ); + return 1; +} + +__setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC ); +#undef DRM_OPTIONS_FUNC +#endif /* * The default number of instances (minor numbers) to initialize. @@ -187,10 +203,8 @@ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ -#if __HAVE_DMA_IRQ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 }, #endif -#endif #if __REALLY_HAVE_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 }, @@ -208,6 +222,10 @@ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 }, #endif +#if __HAVE_VBL_IRQ + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 }, +#endif + DRIVER_IOCTLS }; @@ -292,7 +310,7 @@ dev->map_count = 0; dev->vmalist = NULL; - dev->lock.hw_lock = NULL; + dev->sigdata.lock = dev->lock.hw_lock = NULL; init_waitqueue_head( &dev->lock.lock_queue ); dev->queue_count = 0; dev->queue_reserved = 0; @@ -477,7 +495,7 @@ DRM(dma_takedown)( dev ); #endif if ( dev->lock.hw_lock ) { - dev->lock.hw_lock = NULL; /* SHM removed */ + dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible( &dev->lock.lock_queue ); } @@ -705,7 +723,7 @@ int i; for (i = 0; i < DRM(numdevs); i++) { - if (MINOR(inode->i_rdev) == DRM(minor)[i]) { + if (minor(inode->i_rdev) == DRM(minor)[i]) { dev = &(DRM(device)[i]); break; } @@ -747,8 +765,8 @@ * Begin inline drm_release */ - DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count ); + DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count ); if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && @@ -873,8 +891,9 @@ atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); ++priv->ioctl_count; - DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n", - current->pid, cmd, nr, dev->device, priv->authenticated ); + DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", + current->pid, cmd, nr, (long)dev->device, + priv->authenticated ); if ( nr >= DRIVER_IOCTL_COUNT ) { retcode = -EINVAL; @@ -1027,6 +1046,25 @@ atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] ); +#if __HAVE_KERNEL_CTX_SWITCH + /* We no longer really hold it, but if we are the next + * agent to request it then we should just be able to + * take it immediately and not eat the ioctl. + */ + dev->lock.pid = 0; + { + __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; + unsigned int old, new, prev, ctx; + + ctx = lock.context; + do { + old = *plock; + new = ctx; + prev = cmpxchg(plock, old, new); + } while (prev != old); + } + wake_up_interruptible(&dev->lock.lock_queue); +#else DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); #if __HAVE_DMA_SCHEDULE @@ -1041,6 +1079,7 @@ DRM_ERROR( "\n" ); } } +#endif /* !__HAVE_KERNEL_CTX_SWITCH */ unblock_all_signals(); return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_fops.h linux.22-ac2/drivers/char/drm/drm_fops.h --- linux.vanilla/drivers/char/drm/drm_fops.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_fops.h 2003-09-09 22:27:29.000000000 +0100 @@ -37,7 +37,7 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev) { - kdev_t minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -94,25 +94,8 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count); - if ( dev->lock.hw_lock && - _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.pid == current->pid ) { - DRM_DEBUG( "Process %d closed fd, freeing lock for context %d\n", - current->pid, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); -#if __HAVE_RELEASE - DRIVER_RELEASE(); -#endif - DRM(lock_free)( dev, &dev->lock.hw_lock->lock, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); - - /* FIXME: may require heavy-handed reset of - hardware at this point, possibly - processed via a callback to the X - server. */ - } + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count); return 0; } @@ -122,7 +105,7 @@ drm_device_t *dev = priv->dev; int retcode; - DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device); + DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)dev->device); retcode = fasync_helper(fd, filp, on, &dev->buf_async); if (retcode < 0) return retcode; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm.h linux.22-ac2/drivers/char/drm/drm.h --- linux.vanilla/drivers/char/drm/drm.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -38,10 +38,27 @@ #if defined(__linux__) #include #include /* For _IO* macros */ -#define DRM_IOCTL_NR(n) _IOC_NR(n) -#elif defined(__FreeBSD__) +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOC_VOID _IOC_NONE +#define DRM_IOC_READ _IOC_READ +#define DRM_IOC_WRITE _IOC_WRITE +#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) +#elif defined(__FreeBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) && defined(XFree86Server) +/* Prevent name collision when including sys/ioccom.h */ +#undef ioctl #include -#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define ioctl(a,b,c) xf86ioctl(a,b,c) +#else +#include +#endif /* __FreeBSD__ && xf86ioctl */ +#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define DRM_IOC_VOID IOC_VOID +#define DRM_IOC_READ IOC_OUT +#define DRM_IOC_WRITE IOC_IN +#define DRM_IOC_READWRITE IOC_INOUT +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif #define XFREE86_VERSION(major,minor,patch,snap) \ @@ -84,6 +101,10 @@ /* Warning: If you change this structure, make sure you change * XF86DRIClipRectRec in the server as well */ +/* KW: Actually it's illegal to change either for + * backwards-compatibility reasons. + */ + typedef struct drm_clip_rect { unsigned short x1; unsigned short y1; @@ -106,6 +127,8 @@ #include "radeon_drm.h" #include "sis_drm.h" #include "i830_drm.h" +#include "savage_drm.h" +#include "via_drm.h" typedef struct drm_version { int version_major; /* Major version */ @@ -332,6 +355,32 @@ int funcnum; } drm_irq_busid_t; +typedef enum { + _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */ + _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */ + _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */ +} drm_vblank_seq_type_t; + +#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL + +struct drm_wait_vblank_request { + drm_vblank_seq_type_t type; + unsigned int sequence; + unsigned long signal; +}; + +struct drm_wait_vblank_reply { + drm_vblank_seq_type_t type; + unsigned int sequence; + long tval_sec; + long tval_usec; +}; + +typedef union drm_wait_vblank { + struct drm_wait_vblank_request request; + struct drm_wait_vblank_reply reply; +} drm_wait_vblank_t; + typedef struct drm_agp_mode { unsigned long mode; } drm_agp_mode_t; @@ -371,10 +420,9 @@ #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - +#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) +#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) +#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) @@ -427,86 +475,10 @@ #define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t) -/* MGA specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) -#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) -#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) -#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) - -/* i810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) -#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) -#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) -#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) -#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) -#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) - - -/* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) -#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) -#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) -#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) -#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) -#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) -#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) -#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) -#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) -#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) -#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) - -/* Radeon specific ioctls */ -#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) -#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) -#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) -#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) -#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) -#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) -#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) -#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) -#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) -#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) -#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) -#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) - -/* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) - -/* I830 specific ioctls */ -#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) -#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) -#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) -#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) -#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) -#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) + +/* Device specfic ioctls should only be in their respective headers + * The device specific ioctl range is 0x40 to 0x79. */ +#define DRM_COMMAND_BASE 0x40 #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_ioctl.h linux.22-ac2/drivers/char/drm/drm_ioctl.h --- linux.vanilla/drivers/char/drm/drm_ioctl.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/drm_ioctl.h 2003-09-09 22:27:29.000000000 +0100 @@ -111,7 +111,7 @@ do { struct pci_dev *pci_dev; - int b, d, f; + int domain, b, d, f; char *p; for(p = dev->unique; p && *p && *p != ':'; p++); @@ -123,6 +123,27 @@ f = (int)simple_strtoul(p+1, &p, 10); if (*p) break; + domain = b >> 8; + b &= 0xff; + +#ifdef __alpha__ + /* + * Find the hose the device is on (the domain number is the + * hose index) and offset the bus by the root bus of that + * hose. + */ + for(pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL); + pci_dev; + pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,pci_dev)) { + struct pci_controller *hose = pci_dev->sysdata; + + if (hose->index == domain) { + b += hose->bus->number; + break; + } + } +#endif + pci_dev = pci_find_slot(b, PCI_DEVFN(d,f)); if (pci_dev) { dev->pdev = pci_dev; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_lock.h linux.22-ac2/drivers/char/drm/drm_lock.h --- linux.vanilla/drivers/char/drm/drm_lock.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_lock.h 2003-09-09 22:27:29.000000000 +0100 @@ -236,7 +236,7 @@ /* Allow signal delivery if lock isn't held */ - if (!_DRM_LOCK_IS_HELD(s->lock->lock) + if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_memory.h linux.22-ac2/drivers/char/drm/drm_memory.h --- linux.vanilla/drivers/char/drm/drm_memory.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_memory.h 2003-09-09 22:27:29.000000000 +0100 @@ -313,6 +313,29 @@ return pt; } +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = ioremap_nocache(offset, size))) { + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&DRM(mem_lock)); + return NULL; + } + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; + DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&DRM(mem_lock)); + return pt; +} + void DRM(ioremapfree)(void *pt, unsigned long size) { int alloc_count; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_os_linux.h linux.22-ac2/drivers/char/drm/drm_os_linux.h --- linux.vanilla/drivers/char/drm/drm_os_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/drm_os_linux.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,56 @@ +#define __NO_VERSION__ + +#include /* For task queue support */ +#include + + +/* For data going from/to the kernel through the ioctl argument */ +#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_from_user(&arg1, arg2, arg3) ) \ + return -EFAULT +#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_to_user(arg1, &arg2, arg3) ) \ + return -EFAULT + + +#warning the author of this code needs to read up on list_entry +#define DRM_GETSAREA() \ +do { \ + struct list_head *list; \ + list_for_each( list, &dev->maplist->head ) { \ + drm_map_list_t *entry = (drm_map_list_t *)list; \ + if ( entry->map && \ + entry->map->type == _DRM_SHM && \ + (entry->map->flags & _DRM_CONTAINS_LOCK) ) { \ + dev_priv->sarea = entry->map; \ + break; \ + } \ + } \ +} while (0) + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if((signed)(end - jiffies) <= 0) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) + + + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drmP.h linux.22-ac2/drivers/char/drm/drmP.h --- linux.vanilla/drivers/char/drm/drmP.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drmP.h 2003-09-09 22:27:29.000000000 +0100 @@ -53,6 +53,7 @@ #include #include /* For (un)lock_kernel */ #include +#include #if defined(__alpha__) || defined(__powerpc__) #include /* For pte_wrprotect */ #endif @@ -71,10 +72,7 @@ #include #include "drm.h" -/* page_to_bus for earlier kernels, not optimal in all cases */ -#ifndef page_to_bus -#define page_to_bus(page) ((unsigned int)(virt_to_bus(page_address(page)))) -#endif +#include "drm_os_linux.h" /* DRM template customization defaults */ @@ -209,6 +207,7 @@ (unsigned long)(n),sizeof(*(ptr)))) #endif /* i386 & alpha */ #endif +#define __REALLY_HAVE_SG (__HAVE_SG) /* Begin the DRM... */ @@ -251,41 +250,58 @@ #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) -#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) + /* Backward compatibility section */ +#ifndef minor +#define minor(x) MINOR((x)) +#endif -/* Macros to make printk easier */ +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(x) +#endif -#if ( __GNUC__ > 2 ) -#define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__, ##arg) -#define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ - DRM(mem_stats)[area].name , ##arg) -#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) +#ifndef pte_offset_map +#define pte_offset_map pte_offset +#define pte_unmap(pte) +#endif -#if DRM_DEBUG_CODE -#define DRM_DEBUG(fmt, arg...) \ - do { \ - if ( DRM(flags) & DRM_FLAG_DEBUG ) \ - printk(KERN_DEBUG \ - "[" DRM_NAME ":%s] " fmt , \ - __FUNCTION__, \ - ##arg); \ - } while (0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) +static inline struct page * vmalloc_to_page(void * vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pgd_t *pgd = pgd_offset_k(addr); + pmd_t *pmd; + pte_t *ptep, pte; + + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + ptep = pte_offset_map(pmd, addr); + pte = *ptep; + if (pte_present(pte)) + page = pte_page(pte); + pte_unmap(ptep); + } + } + return page; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) +#define DRM_RPR_ARG(vma) #else -#define DRM_DEBUG(fmt, arg...) do { } while (0) +#define DRM_RPR_ARG(vma) vma, #endif -#else /* Gcc 2.x */ -/* Work around a C preprocessor bug */ +#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) + printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) #define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ DRM(mem_stats)[area].name , ##arg) #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) @@ -294,16 +310,13 @@ do { \ if ( DRM(flags) & DRM_FLAG_DEBUG ) \ printk(KERN_DEBUG \ - "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ - ##arg); \ + "[" DRM_NAME ":%s] " fmt , \ + __FUNCTION__ , ##arg); \ } while (0) #else #define DRM_DEBUG(fmt, arg...) do { } while (0) #endif -#endif /* Gcc 2.x */ - - #define DRM_PROC_LIMIT (PAGE_SIZE-80) #define DRM_PROC_PRINT(fmt, arg...) \ @@ -318,6 +331,9 @@ #define DRM_IOREMAP(map) \ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP_NOCACHE(map) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) + #define DRM_IOREMAPFREE(map) \ do { \ if ( (map)->handle && (map)->size ) \ @@ -599,6 +615,17 @@ drm_map_t *map; } drm_map_list_t; +#if __HAVE_VBL_IRQ + +typedef struct drm_vbl_sig { + struct list_head head; + unsigned int sequence; + struct siginfo info; + struct task_struct *task; +} drm_vbl_sig_t; + +#endif + typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ @@ -658,6 +685,13 @@ int last_context; /* Last current context */ unsigned long last_switch; /* jiffies at last context switch */ struct tq_struct tq; +#if __HAVE_VBL_IRQ + wait_queue_head_t vbl_queue; + atomic_t vbl_received; + spinlock_t vbl_lock; + drm_vbl_sig_t vbl_sigs; + unsigned int vbl_pending; +#endif cycles_t ctx_start; cycles_t lck_start; #if __HAVE_DMA_HISTOGRAM @@ -725,16 +759,16 @@ /* Mapping support (drm_vm.h) */ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma); @@ -756,6 +790,7 @@ extern void DRM(free_pages)(unsigned long address, int order, int area); extern void *DRM(ioremap)(unsigned long offset, unsigned long size); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); extern void DRM(ioremapfree)(void *pt, unsigned long size); #if __REALLY_HAVE_AGP @@ -885,6 +920,15 @@ extern int DRM(irq_uninstall)( drm_device_t *dev ); extern void DRM(dma_service)( int irq, void *device, struct pt_regs *regs ); +extern void DRM(driver_irq_preinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_postinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_uninstall)( drm_device_t *dev ); +#if __HAVE_VBL_IRQ +extern int DRM(wait_vblank)(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); +extern void DRM(vbl_send_signals)( drm_device_t *dev ); +#endif #if __HAVE_DMA_IRQ_BH extern void DRM(dma_immediate_bh)( void *dev ); #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_proc.h linux.22-ac2/drivers/char/drm/drm_proc.h --- linux.vanilla/drivers/char/drm/drm_proc.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_proc.h 2003-09-09 22:27:29.000000000 +0100 @@ -147,10 +147,10 @@ *eof = 0; if (dev->unique) { - DRM_PROC_PRINT("%s 0x%x %s\n", - dev->name, dev->device, dev->unique); + DRM_PROC_PRINT("%s 0x%lx %s\n", + dev->name, (long)dev->device, dev->unique); } else { - DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device); + DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)dev->device); } if (len > request + offset) return request; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_sarea.h linux.22-ac2/drivers/char/drm/drm_sarea.h --- linux.vanilla/drivers/char/drm/drm_sarea.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/drm_sarea.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,57 @@ +/* sarea.h -- SAREA definitions -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Michel Dänzer + */ + +#ifndef _DRM_SAREA_H_ +#define _DRM_SAREA_H_ + +#define SAREA_MAX_DRAWABLES 256 + +typedef struct _drm_sarea_drawable_t { + unsigned int stamp; + unsigned int flags; +} drm_sarea_drawable_t; + +typedef struct _dri_sarea_frame_t { + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int fullscreen; +} drm_sarea_frame_t; + +typedef struct _drm_sarea_t { + /* first thing is always the drm locking structure */ + drm_hw_lock_t lock; + /* NOT_DONE: Use readers/writer lock for drawable_lock */ + drm_hw_lock_t drawable_lock; + drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; + drm_sarea_frame_t frame; + drm_context_t dummy_context; +} drm_sarea_t; + +#endif /* _DRM_SAREA_H_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_stub.h linux.22-ac2/drivers/char/drm/drm_stub.h --- linux.vanilla/drivers/char/drm/drm_stub.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/drm_stub.h 2003-09-09 22:27:29.000000000 +0100 @@ -48,7 +48,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp) { - int minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); int err = -ENODEV; struct file_operations *old_fops; @@ -65,8 +65,8 @@ } static struct file_operations DRM(stub_fops) = { - owner: THIS_MODULE, - open: DRM(stub_open) + .owner = THIS_MODULE, + .open = DRM(stub_open) }; static int DRM(stub_getminor)(const char *name, struct file_operations *fops, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/drm_vm.h linux.22-ac2/drivers/char/drm/drm_vm.h --- linux.vanilla/drivers/char/drm/drm_vm.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/drm_vm.h 2003-09-09 22:27:29.000000000 +0100 @@ -57,7 +57,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused) + int write_access) { #if __REALLY_HAVE_AGP drm_file_t *priv = vma->vm_file->private_data; @@ -70,7 +70,7 @@ * Find the right map */ - if(!dev->agp->cant_use_aperture) goto vm_nopage_error; + if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; list_for_each(list, &dev->maplist->head) { r_list = (drm_map_list_t *)list; @@ -141,9 +141,7 @@ return NOPAGE_OOM; get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx => 0x%08llx\n", address, (u64)page_to_bus(page)); -#endif + DRM_DEBUG("shm_nopage 0x%lx\n", address); return page; } @@ -245,10 +243,7 @@ get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx (page %lu) => 0x%08llx\n", address, page_nr, - (u64)page_to_bus(page)); -#endif + DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); return page; } @@ -449,12 +444,12 @@ } offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ - if (io_remap_page_range(vma->vm_start, + if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot, 0)) #else - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot)) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/gamma_dma.c linux.22-ac2/drivers/char/drm/gamma_dma.c --- linux.vanilla/drivers/char/drm/gamma_dma.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/gamma_dma.c 2003-07-28 21:09:43.000000000 +0100 @@ -31,33 +31,32 @@ #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" #include /* For task queue support */ #include - static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, unsigned long length) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address)); - while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) - ; + (drm_gamma_private_t *)dev->dev_private; + mb(); + while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); + GAMMA_WRITE(GAMMA_DMAADDRESS, address); + while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) cpu_relax(); GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); } void gamma_dma_quiescent_single(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); @@ -71,56 +70,50 @@ void gamma_dma_quiescent_dual(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); - GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); - /* Read from first MX */ + /* Read from first MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); - /* Read from second MX */ + /* Read from second MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); } void gamma_dma_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); } static inline int gamma_dma_is_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - return !GAMMA_READ(GAMMA_DMACOUNT); + (drm_gamma_private_t *)dev->dev_private; + return(!GAMMA_READ(GAMMA_DMACOUNT)); } void gamma_dma_service(int irq, void *device, struct pt_regs *regs) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; + drm_device_t *dev = (drm_device_t *)device; + drm_device_dma_t *dma = dev->dma; drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); @@ -164,7 +157,9 @@ } buf = dma->next_buffer; - address = (unsigned long)buf->address; + /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ + /* So we pass the buffer index value into the physical page offset */ + address = buf->idx << 12; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", @@ -231,6 +226,9 @@ buf->time_dispatched = get_cycles(); #endif + /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ + address = buf->idx << 12; + gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; @@ -523,11 +521,11 @@ } } if (retcode) { - DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", + DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d %d/%d\n", d->context, last_buf->waiting, last_buf->pending, - DRM_WAITCOUNT(dev, d->context), + (long)DRM_WAITCOUNT(dev, d->context), last_buf->idx, last_buf->list, last_buf->pid, @@ -581,3 +579,267 @@ return retcode; } + +/* ============================================================= + * DMA initialization, cleanup + */ + +static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) +{ + drm_gamma_private_t *dev_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + int i; + struct list_head *list; + unsigned long *pgt; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + if ( !dev_priv ) + return -ENOMEM; + + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); + + list_for_each(list, &dev->maplist->head) { + #warning list_entry() is needed here + drm_map_list_t *r_list = (drm_map_list_t *)list; + if( r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK ) { + dev_priv->sarea = r_list->map; + break; + } + } + + DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 ); + DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 ); + DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 ); + DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 ); + + dev_priv->sarea_priv = (drm_gamma_sarea_t *) + ((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + if (init->pcimode) { + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = virt_to_phys((void*)buf->address) | 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + } else { + DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + DRM_IOREMAP( dev_priv->buffers ); + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = (unsigned long)buf->address + 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1) cpu_relax(); + GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe) ; + } + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); cpu_relax(); + GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); + GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); + + return 0; +} + +int gamma_do_cleanup_dma( drm_device_t *dev ) +{ + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( dev->dev_private ) { + drm_gamma_private_t *dev_priv = dev->dev_private; + + DRM_IOREMAPFREE( dev_priv->buffers ); + + DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_init_t init; + + if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case GAMMA_INIT_DMA: + return gamma_do_init_dma( dev, &init ); + case GAMMA_CLEANUP_DMA: + return gamma_do_cleanup_dma( dev ); + } + + return -EINVAL; +} + +static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) +{ + drm_device_dma_t *dma = dev->dma; + unsigned int *screenbuf; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + /* We've DRM_RESTRICTED this DMA buffer */ + + screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; + +#if 0 + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x200; /* Allow FBColor through */ + *buffer++ = 0x53B; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x53A; /* Tag */ + *buffer++ = copy->SrcAddress; + *buffer++ = 0x539; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x53D; /* Tag - DMAOutputCount */ + *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ + + /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ + /* Now put it back to the screen */ + + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x400; /* Allow Sync through */ + *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ + *buffer++ = 0x155; /* FBSourceData | count */ + *buffer++ = 0x537; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x536; /* Tag */ + *buffer++ = copy->DstAddress; + *buffer++ = 0x535; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x530; /* Tag - DMAAddr */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x531; + *buffer++ = copy->Count; /* initiates DMA transfer of color data */ +#endif + + /* need to dispatch it now */ + + return 0; +} + +int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_copy_t copy; + + if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) ) + return -EFAULT; + + return gamma_do_copy_dma( dev, © ); +} + +/* ============================================================= + * Per Context SAREA Support + */ + +int gamma_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + + map = dev->context_sareas[request.ctx_id]; + up(&dev->struct_sem); + + request.handle = map->handle; + if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request))) + return -EFAULT; + return 0; +} + +int gamma_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + r_list = NULL; + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + if(r_list->map && + r_list->map->handle == request.handle) break; + } + if (list == &(dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + map = r_list->map; + up(&dev->struct_sem); + + if (!map) return -EINVAL; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + dev->context_sareas[request.ctx_id] = map; + up(&dev->struct_sem); + return 0; +} + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/gamma_drm.h linux.22-ac2/drivers/char/drm/gamma_drm.h --- linux.vanilla/drivers/char/drm/gamma_drm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/gamma_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,89 @@ +#ifndef _GAMMA_DRM_H_ +#define _GAMMA_DRM_H_ + +typedef struct _drm_gamma_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_gamma_tex_region_t; + +typedef struct { + unsigned int GDeltaMode; + unsigned int GDepthMode; + unsigned int GGeometryMode; + unsigned int GTransformMode; +} drm_gamma_context_regs_t; + +typedef struct _drm_gamma_sarea { + drm_gamma_context_regs_t context_state; + + unsigned int dirty; + + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + +#define GAMMA_NR_TEX_REGIONS 64 + drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; +} drm_gamma_sarea_t; + +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest because these are part of the stable kernel<->userspace ABI + */ + +/* Gamma specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) +#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) + +typedef struct drm_gamma_copy { + unsigned int DMAOutputAddress; + unsigned int DMAOutputCount; + unsigned int DMAReadGLINTSource; + unsigned int DMARectangleWriteAddress; + unsigned int DMARectangleWriteLinePitch; + unsigned int DMARectangleWrite; + unsigned int DMARectangleReadAddress; + unsigned int DMARectangleReadLinePitch; + unsigned int DMARectangleRead; + unsigned int DMARectangleReadTarget; +} drm_gamma_copy_t; + +typedef struct drm_gamma_init { + enum { + GAMMA_INIT_DMA = 0x01, + GAMMA_CLEANUP_DMA = 0x02 + } func; + + int sarea_priv_offset; + int pcimode; + unsigned int mmio0; + unsigned int mmio1; + unsigned int mmio2; + unsigned int mmio3; + unsigned int buffers_offset; +} drm_gamma_init_t; + +#endif /* _GAMMA_DRM_H_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/gamma_drv.c linux.22-ac2/drivers/char/drm/gamma_drv.c --- linux.vanilla/drivers/char/drm/gamma_drv.c 2001-08-08 17:42:14.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/gamma_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -32,57 +32,18 @@ #include #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "gamma" -#define DRIVER_DESC "3DLabs gamma" -#define DRIVER_DATE "20010216" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 } - - -#define __HAVE_COUNTERS 5 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_DMA -#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL -#define __HAVE_COUNTER10 _DRM_STAT_MISSED - - #include "drm_auth.h" +#include "drm_agpsupport.h" #include "drm_bufs.h" #include "drm_context.h" #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init gamma_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", gamma_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/gamma_drv.h linux.22-ac2/drivers/char/drm/gamma_drv.h --- linux.vanilla/drivers/char/drm/gamma_drv.h 2001-08-08 17:42:14.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/gamma_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -32,8 +32,9 @@ #ifndef _GAMMA_DRV_H_ #define _GAMMA_DRV_H_ - typedef struct drm_gamma_private { + drm_gamma_sarea_t *sarea_priv; + drm_map_t *sarea; drm_map_t *buffers; drm_map_t *mmio0; drm_map_t *mmio1; @@ -51,6 +52,11 @@ } \ } while (0) + /* gamma_dma.c */ +extern int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); extern void gamma_dma_ready(drm_device_t *dev); extern void gamma_dma_quiescent_single(drm_device_t *dev); @@ -63,6 +69,7 @@ extern int gamma_find_devices(void); extern int gamma_found(void); +#define GLINT_DRI_BUF_COUNT 256 #define GAMMA_OFF(reg) \ ((reg < 0x1000) \ @@ -78,7 +85,6 @@ ((reg < 0x10000) ? dev_priv->mmio1->handle : \ ((reg < 0x11000) ? dev_priv->mmio2->handle : \ dev_priv->mmio3->handle)))) - #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) #define GAMMA_READ(reg) GAMMA_DEREF(reg) @@ -91,9 +97,11 @@ #define GAMMA_FILTERMODE 0x8c00 #define GAMMA_GCOMMANDINTFLAGS 0x0c50 #define GAMMA_GCOMMANDMODE 0x0c40 +#define GAMMA_QUEUED_DMA_MODE 1<<1 #define GAMMA_GCOMMANDSTATUS 0x0c60 #define GAMMA_GDELAYTIMER 0x0c38 #define GAMMA_GDMACONTROL 0x0060 +#define GAMMA_USE_AGP 1<<1 #define GAMMA_GINTENABLE 0x0808 #define GAMMA_GINTFLAGS 0x0810 #define GAMMA_INFIFOSPACE 0x0018 @@ -101,5 +109,12 @@ #define GAMMA_OUTPUTFIFO 0x2000 #define GAMMA_SYNC 0x8c40 #define GAMMA_SYNC_TAG 0x0188 +#define GAMMA_PAGETABLEADDR 0x0C00 +#define GAMMA_PAGETABLELENGTH 0x0C08 + +#define GAMMA_PASSTHROUGH 0x1FE +#define GAMMA_DMAADDRTAG 0x530 +#define GAMMA_DMACOUNTTAG 0x531 +#define GAMMA_COMMANDINTTAG 0x532 #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/gamma.h linux.22-ac2/drivers/char/drm/gamma.h --- linux.vanilla/drivers/char/drm/gamma.h 2001-08-08 17:42:14.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/gamma.h 2003-07-28 21:09:43.000000000 +0100 @@ -38,9 +38,36 @@ */ #define __HAVE_MTRR 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "gamma" +#define DRIVER_DESC "3DLabs gamma" +#define DRIVER_DATE "20010624" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_INIT)] = { gamma_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_COPY)] = { gamma_dma_copy, 1, 1 } + +#define IOCTL_TABLE_NAME DRM(ioctls) +#define IOCTL_FUNC_NAME DRM(ioctl) + +#define __HAVE_COUNTERS 5 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_DMA +#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL +#define __HAVE_COUNTER10 _DRM_STAT_MISSED + /* DMA customization: */ #define __HAVE_DMA 1 +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 0 #define __HAVE_OLD_DMA 1 #define __HAVE_PCI_DMA 1 @@ -61,33 +88,61 @@ #define __HAVE_DMA_QUIESCENT 1 #define DRIVER_DMA_QUIESCENT() do { \ /* FIXME ! */ \ - gamma_dma_quiescent_dual(dev); \ + gamma_dma_quiescent_single(dev); \ return 0; \ } while (0) #define __HAVE_DMA_IRQ 1 #define __HAVE_DMA_IRQ_BH 1 + +#if 1 #define DRIVER_PREINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ - GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \ GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \ } while (0) - #define DRIVER_POSTINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \ } while (0) +#else +#define DRIVER_POSTINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \ + GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \ +} while (0) + +#define DRIVER_PREINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\ + GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\ +} while (0) +#endif #define DRIVER_UNINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \ } while (0) +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ + ((drm_gamma_private_t *)((dev)->dev_private))->buffers + #endif /* __GAMMA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i810_dma.c linux.22-ac2/drivers/char/drm/i810_dma.c --- linux.vanilla/drivers/char/drm/i810_dma.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i810_dma.c 2003-07-28 21:09:43.000000000 +0100 @@ -26,21 +26,20 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell + * Keith Whitwell * */ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" #include /* For task queue support */ -#include +#include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 @@ -51,29 +50,27 @@ #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (dev_priv->ring.space < n*4) \ - i810_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ +#define BEGIN_LP_RING(n) do { \ + if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ +#define ADVANCE_LP_RING() do { \ + if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ - if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ +#define OUT_RING(n) do { \ + if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ } while (0) static inline void i810_print_status_page(drm_device_t *dev) @@ -135,14 +132,14 @@ } static struct file_operations i810_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i810_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i810_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -165,7 +162,7 @@ buf_priv->currently_mapped = I810_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -183,28 +180,31 @@ if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I810_BUF_MAPPED; + + + + down_write( ¤t->mm->mmap_sem ); + + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + + + + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -213,15 +213,21 @@ drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I810_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I810_BUF_MAPPED) + return -EINVAL; + + + + down_write( ¤t->mm->mmap_sem ); + + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + + + + up_write( ¤t->mm->mmap_sem ); + buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -273,8 +279,9 @@ dev_priv->ring.Size); } if(dev_priv->hw_status_page != 0UL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, (void *)dev_priv->hw_status_page, - dev_priv->dma_status_page); + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I810_WRITE(0x02080, 0x1ffff000); } @@ -301,8 +308,6 @@ end = jiffies + (HZ*3); while (ring->space < n) { - int i; - ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -311,13 +316,12 @@ end = jiffies + (HZ*3); iters++; - if((signed)(end - jiffies) <= 0) { + if(time_before(end, jiffies)) { DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("lockup\n"); goto out_wait_ring; } - - for (i = 0 ; i < 2000 ; i++) ; + udelay(1); } out_wait_ring: @@ -405,9 +409,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -440,8 +441,9 @@ dev_priv->zi1 = init->depth_offset | init->pitch_bits; /* Program Hardware Status Page */ - dev_priv->hw_status_page = (unsigned long)pci_alloc_consistent(dev->pdev, PAGE_SIZE, - &dev_priv->dma_status_page); + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); @@ -451,7 +453,7 @@ memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I810_WRITE(0x02080, dev_priv->dma_status_page); + I810_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -532,16 +534,12 @@ /* Most efficient way to verify state for the i810 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ static void i810EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); @@ -553,14 +551,13 @@ OUT_RING( code[I810_CTXREG_ST1] ); for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("constext state dropped!!!\n"); } if (j & 1) @@ -574,7 +571,6 @@ { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); @@ -585,14 +581,14 @@ OUT_RING( code[I810_TEXREG_MI3] ); for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("texture state dropped!!!\n"); } if (j & 1) @@ -617,9 +613,9 @@ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( tmp ); - } else - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", - tmp, dev_priv->front_di1, dev_priv->back_di1); + } + else + printk("buffer state dropped\n"); /* invarient: */ @@ -704,7 +700,6 @@ continue; if ( flags & I810_FRONT ) { - DRM_DEBUG("clear front\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -717,7 +712,6 @@ } if ( flags & I810_BACK ) { - DRM_DEBUG("clear back\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -730,7 +724,6 @@ } if ( flags & I810_DEPTH ) { - DRM_DEBUG("clear depth\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -756,8 +749,6 @@ int i; RING_LOCALS; - DRM_DEBUG("swapbuffers\n"); - i810_kernel_lost_context(dev); if (nbox > I810_NR_SAREA_CLIPRECTS) @@ -776,10 +767,6 @@ pbox->y2 > dev_priv->h) continue; - DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", - pbox[i].x1, pbox[i].y1, - pbox[i].x2, pbox[i].y2); - BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); OUT_RING( pitch | (0xCC << 16)); @@ -804,7 +791,7 @@ int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; - int i = 0, u; + int i = 0; RING_LOCALS; i810_kernel_lost_context(dev); @@ -812,33 +799,16 @@ if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (discard) { - u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, - I810_BUF_HARDWARE); - if(u != I810_BUF_CLIENT) { - DRM_DEBUG("xxxx 2\n"); - } - } - if (used > 4*1024) used = 0; if (sarea_priv->dirty) i810EmitState( dev ); - DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", - address, used, nbox); - - dev_priv->counter++; - DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); - DRM_DEBUG( "i810_dma_dispatch\n"); - DRM_DEBUG( "start : %lx\n", start); - DRM_DEBUG( "used : %d\n", used); - DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); - if (buf_priv->currently_mapped == I810_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | + unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); + + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim | ((used/4)-2)); if (used & 4) { @@ -871,154 +841,62 @@ } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I810_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - - -/* Interrupts are only for flushing */ -void i810_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u16 temp; - - atomic_inc(&dev->counts[_DRM_STAT_IRQ]); - temp = I810_READ16(I810REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); } -void i810_dma_immediate_bh(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i810_dma_emit_flush(drm_device_t *dev) -{ - drm_i810_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - i810_kernel_lost_context(dev); - - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -static inline void i810_dma_quiescent_emit(drm_device_t *dev) +void i810_dma_quiescent(drm_device_t *dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; +/* printk("%s\n", __FUNCTION__); */ + i810_kernel_lost_context(dev); BEGIN_LP_RING(4); OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -void i810_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); } static int i810_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; int i, ret = 0; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } + i810_kernel_lost_context(dev); - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1030,7 +908,7 @@ if (used == I810_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I810_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1070,7 +948,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_flush_ioctl\n"); if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1101,9 +978,6 @@ return -EINVAL; } - DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); - if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex( dev, @@ -1152,8 +1026,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_swap_bufs\n"); - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_swap_buf called without lock held\n"); return -EINVAL; @@ -1189,7 +1061,6 @@ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - DRM_DEBUG("getbuf\n"); if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d))) return -EFAULT; @@ -1202,9 +1073,6 @@ retcode = i810_dma_get_buffer(dev, &d, filp); - DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; @@ -1212,47 +1080,19 @@ return retcode; } -int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int i810_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_i810_copy_t d; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i810_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; - - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i810_dma called without lock held\n"); - return -EINVAL; - } - - if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - - if (copy_from_user(buf_priv->virtual, d.address, d.used)) - return -EFAULT; - - sarea_priv->last_dispatch = (int) hw_status[5]; - + /* Never copy - 2.4.x doesn't need it */ return 0; } int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - if(VM_DONTCOPY == 0) return 1; + /* Never copy - 2.4.x doesn't need it */ return 0; } @@ -1371,7 +1211,8 @@ data.offset = dev_priv->overlay_offset; data.physical = dev_priv->overlay_physical; - copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)); + if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data))) + return -EFAULT; return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i810_drm.h linux.22-ac2/drivers/char/drm/i810_drm.h --- linux.vanilla/drivers/char/drm/i810_drm.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i810_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -88,6 +88,8 @@ #define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ #define I810_TEX_SETUP_SIZE 8 +/* Flags for clear ioctl + */ #define I810_FRONT 0x1 #define I810_BACK 0x2 #define I810_DEPTH 0x4 @@ -166,14 +168,34 @@ } drm_i810_sarea_t; +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest since these are part of the stable kernel<->userspace ABI + */ + +/* i810 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) +#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) +#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) +#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) +#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) +#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) + typedef struct _drm_i810_clear { int clear_color; int clear_depth; int flags; } drm_i810_clear_t; - - /* These may be placeholders if we have more cliprects than * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to * false, indicating that the buffer will be dispatched again with a @@ -191,6 +213,17 @@ void *address; /* Address to copy from */ } drm_i810_copy_t; +#define PR_TRIANGLES (0x0<<18) +#define PR_TRISTRIP_0 (0x1<<18) +#define PR_TRISTRIP_1 (0x2<<18) +#define PR_TRIFAN (0x3<<18) +#define PR_POLYGON (0x4<<18) +#define PR_LINES (0x5<<18) +#define PR_LINESTRIP (0x6<<18) +#define PR_RECTS (0x7<<18) +#define PR_MASK (0x7<<18) + + typedef struct drm_i810_dma { void *virtual; int request_idx; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i810_drv.c linux.22-ac2/drivers/char/drm/i810_drv.c --- linux.vanilla/drivers/char/drm/i810_drv.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i810_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -33,42 +33,10 @@ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i810" -#define DRIVER_DESC "Intel i810" -#define DRIVER_DATE "20010920" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } - - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -77,25 +45,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i810_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i810_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i810_drv.h linux.22-ac2/drivers/char/drm/i810_drv.h --- linux.vanilla/drivers/char/drm/i810_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i810_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -63,10 +63,9 @@ unsigned long hw_status_page; unsigned long counter; - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; @@ -78,6 +77,7 @@ int overlay_physical; int w, h; int pitch; + } drm_i810_private_t; /* i810_dma.c */ @@ -92,8 +92,13 @@ extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); + +/* Obsolete: + */ extern int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +/* Obsolete: + */ extern int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -111,9 +116,6 @@ extern void i810_dma_quiescent(drm_device_t *dev); -#define I810_VERBOSE 0 - - int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -196,6 +198,7 @@ #define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) #define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) +#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23)) #define BR00_BITBLT_CLIENT 0x40000000 #define BR00_OP_COLOR_BLT 0x10000000 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i810.h linux.22-ac2/drivers/char/drm/i810.h --- linux.vanilla/drivers/char/drm/i810.h 2001-08-08 17:42:14.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/i810.h 2003-07-28 21:09:43.000000000 +0100 @@ -41,6 +41,47 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i810" +#define DRIVER_DESC "Intel i810" +#define DRIVER_DATE "20020211" + +/* Interface history + * + * 1.1 - XFree86 4.1 + * 1.2 - XvMC interfaces + * - XFree86 4.2 + * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) + * - Remove requirement for interrupt (leave stubs again) + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 1 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } + + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,50 +101,10 @@ i810_dma_quiescent( dev ); \ } while (0) -#define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I810_WRITE16( I810REG_HWSTAM, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I810_WRITE16( I810REG_INT_MASK_R, tmp ); \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_POSTINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_UNINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ - } \ -} while (0) +/* Don't need an irq any more. The template code will make sure that + * a noop stub is generated for compatibility. + */ +#define __HAVE_DMA_IRQ 0 /* Buffer customization: */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830_dma.c linux.22-ac2/drivers/char/drm/i830_dma.c --- linux.vanilla/drivers/char/drm/i830_dma.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i830_dma.c 2003-07-28 21:09:43.000000000 +0100 @@ -26,20 +26,22 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell - * Abraham vd Merwe + * Keith Whitwell + * Abraham vd Merwe * */ + #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" #include /* For task queue support */ +#include /* For FASTCALL on unlock_page() */ #include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif + +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 @@ -48,54 +50,24 @@ #define I830_BUF_UNMAPPED 0 #define I830_BUF_MAPPED 1 -#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define DO_IDLE_WORKAROUND() \ -do { \ - int _head; \ - int _tail; \ - do { \ - _head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \ - _tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \ - udelay(1); \ - } while(_head != _tail); \ -} while(0) - -#define I830_SYNC_WORKAROUND 0 - -#define BEGIN_LP_RING(n) do { \ - if (I830_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (I830_SYNC_WORKAROUND) \ - DO_IDLE_WORKAROUND(); \ - if (dev_priv->ring.space < n*4) \ - i830_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ -} while (0) - -#define ADVANCE_LP_RING() do { \ - if (I830_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I830_WRITE(LP_RING + RING_TAIL, outring); \ -} while(0) - -#define OUT_RING(n) do { \ - if (I830_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ -} while (0); + + + + + + + + + + static inline void i830_print_status_page(drm_device_t *dev) { drm_device_dma_t *dma = dev->dma; drm_i830_private_t *dev_priv = dev->dev_private; - u8 *temp = dev_priv->hw_status_page; + u32 *temp = (u32 *)dev_priv->hw_status_page; int i; DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]); @@ -149,14 +121,14 @@ } static struct file_operations i830_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i830_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i830_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -179,7 +151,7 @@ buf_priv->currently_mapped = I830_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -197,28 +169,24 @@ if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i830_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I830_BUF_MAPPED; + down_write( ¤t->mm->mmap_sem ); + old_fops = filp->f_op; + filp->f_op = &i830_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_ERROR("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -227,15 +195,15 @@ drm_i830_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I830_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I830_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + up_write(¤t->mm->mmap_sem); + buf_priv->currently_mapped = I830_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -260,7 +228,7 @@ retcode = i830_map_buffer(buf, filp); if(retcode) { i830_freelist_put(dev, buf); - DRM_DEBUG("mapbuf failed, retcode %d\n", retcode); + DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } buf->pid = priv->pid; @@ -286,12 +254,22 @@ DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, dev_priv->ring.Size); } - if(dev_priv->hw_status_page != NULL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, - dev_priv->hw_status_page, dev_priv->dma_status_page); + if(dev_priv->hw_status_page != 0UL) { + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I830_WRITE(0x02080, 0x1ffff000); } + + /* Disable interrupts here because after dev_private + * is freed, it's too late. + */ + if (dev->irq) { + I830_WRITE16( I830REG_INT_MASK_R, 0xffff ); + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); + } + DRM(free)(dev->dev_private, sizeof(drm_i830_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; @@ -305,7 +283,7 @@ return 0; } -static int i830_wait_ring(drm_device_t *dev, int n) +int i830_wait_ring(drm_device_t *dev, int n, const char *caller) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_ring_buffer_t *ring = &(dev_priv->ring); @@ -314,7 +292,7 @@ unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; end = jiffies + (HZ*3); - while (ring->space < n) { + while (ring->space < n) { ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -325,13 +303,13 @@ } iters++; - if(time_before(end,jiffies)) { + if(time_before(end, jiffies)) { DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("lockup\n"); goto out_wait_ring; } - - udelay(1); + udelay(1); + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; } out_wait_ring: @@ -344,9 +322,12 @@ drm_i830_ring_buffer_t *ring = &(dev_priv->ring); ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; - ring->tail = I830_READ(LP_RING + RING_TAIL); + ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; + + if (ring->head == ring->tail) + dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; } static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) @@ -420,9 +401,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -446,11 +424,17 @@ dev_priv->pitch = init->pitch; dev_priv->back_offset = init->back_offset; dev_priv->depth_offset = init->depth_offset; + dev_priv->front_offset = init->front_offset; dev_priv->front_di1 = init->front_offset | init->pitch_bits; dev_priv->back_di1 = init->back_offset | init->pitch_bits; dev_priv->zi1 = init->depth_offset | init->pitch_bits; + DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); + DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); + DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); + DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); + dev_priv->cpp = init->cpp; /* We are using seperate values as placeholders for mechanisms for * private backbuffer/depthbuffer usage. @@ -458,20 +442,23 @@ dev_priv->back_pitch = init->back_pitch; dev_priv->depth_pitch = init->depth_pitch; + dev_priv->do_boxes = 0; + dev_priv->use_mi_batchbuffer_start = 0; /* Program Hardware Status Page */ - dev_priv->hw_status_page = pci_alloc_consistent(dev->pdev, PAGE_SIZE, + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, &dev_priv->dma_status_page); - if(dev_priv->hw_status_page == NULL) { + if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i830_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); + DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I830_WRITE(0x02080, dev_priv->dma_status_page); + I830_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -517,83 +504,107 @@ return retcode; } +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define ST1_ENABLE (1<<16) +#define ST1_MASK (0xffff) + /* Most efficient way to verify state for the i830 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ -static void i830EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitContextVerified( drm_device_t *dev, + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_CTX_SETUP_SIZE ); - for ( i = 0 ; i < I830_CTX_SETUP_SIZE ; i++ ) { + BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 ); + + for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) { tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && + (tmp & (0x1f<<24)) < (0x1d<<24)) { + OUT_RING( tmp ); + j++; + } else { + DRM_ERROR("Skipping %d\n", i); + } + } -#if 0 - if ((tmp & (7<<29)) == (3<<29) && + OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD ); + OUT_RING( code[I830_CTXREG_BLENDCOLR] ); + j += 2; + + for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) { + tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && (tmp & (0x1f<<24)) < (0x1d<<24)) { OUT_RING( tmp ); j++; } else { - printk("Skipping %d\n", i); + DRM_ERROR("Skipping %d\n", i); } -#else - OUT_RING( tmp ); - j++; -#endif } + OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD ); + OUT_RING( code[I830_CTXREG_MCSB1] ); + j += 2; + if (j & 1) OUT_RING( 0 ); ADVANCE_LP_RING(); } -static void i830EmitTexVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); - - OUT_RING( GFX_OP_MAP_INFO ); - OUT_RING( code[I830_TEXREG_MI1] ); - OUT_RING( code[I830_TEXREG_MI2] ); - OUT_RING( code[I830_TEXREG_MI3] ); - OUT_RING( code[I830_TEXREG_MI4] ); - OUT_RING( code[I830_TEXREG_MI5] ); - - for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - OUT_RING( tmp ); - j++; - } + if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || + (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) == + (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) { + + BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); + + OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */ + OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */ + OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */ + OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */ + OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */ + OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */ + + for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + OUT_RING( tmp ); + j++; + } - if (j & 1) - OUT_RING( 0 ); + if (j & 1) + OUT_RING( 0 ); - ADVANCE_LP_RING(); + ADVANCE_LP_RING(); + } + else + printk("rejected packet %x\n", code[0]); } static void i830EmitTexBlendVerified( drm_device_t *dev, - volatile unsigned int *code, - volatile unsigned int num) + unsigned int *code, + unsigned int num) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( num ); + if (!num) + return; + + BEGIN_LP_RING( num + 1 ); for ( i = 0 ; i < num ; i++ ) { tmp = code[i]; @@ -616,6 +627,8 @@ int i; RING_LOCALS; + return; /* Is this right ? -- Arjan */ + BEGIN_LP_RING( 258 ); if(is_shared == 1) { @@ -629,44 +642,43 @@ OUT_RING(palette[i]); } OUT_RING(0); + /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! + */ } /* Need to do some additional checking when setting the dest buffer. */ static void i830EmitDestVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 ); + BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 ); + tmp = code[I830_DESTREG_CBUFADDR]; - if (tmp == dev_priv->front_di1) { - /* Don't use fence when front buffer rendering */ - OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_COLOR_BACK | - BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) ); - OUT_RING( tmp ); + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + if (((int)outring) & 8) { + OUT_RING(0); + OUT_RING(0); + } OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_DEPTH | - BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); - OUT_RING( dev_priv->zi1 ); - } else if(tmp == dev_priv->back_di1) { - OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | BUF_3D_USE_FENCE); OUT_RING( tmp ); + OUT_RING( 0 ); OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); OUT_RING( dev_priv->zi1 ); + OUT_RING( 0 ); } else { - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + DRM_ERROR("bad di1 %x (allow %x or %x)\n", tmp, dev_priv->front_di1, dev_priv->back_di1); } @@ -688,25 +700,39 @@ if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { OUT_RING( tmp ); } else { - DRM_DEBUG("bad scissor enable\n"); + DRM_ERROR("bad scissor enable\n"); OUT_RING( 0 ); } - OUT_RING( code[I830_DESTREG_SENABLE] ); - OUT_RING( GFX_OP_SCISSOR_RECT ); OUT_RING( code[I830_DESTREG_SR1] ); OUT_RING( code[I830_DESTREG_SR2] ); + OUT_RING( 0 ); ADVANCE_LP_RING(); } +static void i830EmitStippleVerified( drm_device_t *dev, + unsigned int *code ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + BEGIN_LP_RING( 2 ); + OUT_RING( GFX_OP_STIPPLE ); + OUT_RING( code[1] ); + ADVANCE_LP_RING(); +} + + static void i830EmitState( drm_device_t *dev ) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; + DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); + if (dirty & I830_UPLOAD_BUFFERS) { i830EmitDestVerified( dev, sarea_priv->BufferState ); sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; @@ -740,17 +766,154 @@ } if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + } else { + if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { + i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); + } + + /* 1.3: + */ +#if 0 + if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } +#endif + } + + /* 1.3: + */ + if (dirty & I830_UPLOAD_STIPPLE) { + i830EmitStippleVerified( dev, + sarea_priv->StippleState); + sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; + } + + if (dirty & I830_UPLOAD_TEX2) { + i830EmitTexVerified( dev, sarea_priv->TexState2 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX2; + } + + if (dirty & I830_UPLOAD_TEX3) { + i830EmitTexVerified( dev, sarea_priv->TexState3 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX3; + } + + + if (dirty & I830_UPLOAD_TEXBLEND2) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState2, + sarea_priv->TexBlendStateWordsUsed2); + + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; + } + + if (dirty & I830_UPLOAD_TEXBLEND3) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState3, + sarea_priv->TexBlendStateWordsUsed3); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; + } +} + +/* ================================================================ + * Performance monitoring functions + */ + +static void i830_fill_box( drm_device_t *dev, + int x, int y, int w, int h, + int r, int g, int b ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + u32 color; + unsigned int BR13, CMD; + RING_LOCALS; + + BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24); + CMD = XY_COLOR_BLT_CMD; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + + if (dev_priv->cpp == 4) { + BR13 |= (1<<25); + CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); } else { - if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); - } - if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { - i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); - } + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | + ((b & 0xf8) >> 3)); + } + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD ); + OUT_RING( BR13 ); + OUT_RING( (y << 16) | x ); + OUT_RING( ((y+h) << 16) | (x+w) ); + + if ( dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_offset ); + } else { + OUT_RING( dev_priv->back_offset ); + } + + OUT_RING( color ); + ADVANCE_LP_RING(); +} + +static void i830_cp_performance_boxes( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + /* Purple box for page flipping + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP ) + i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 ); + + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT ) + i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 ); + + /* Blue box: lost context? + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT ) + i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 ); + + /* Yellow box for texture swaps + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD ) + i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) ) + i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->dma_used) { + int bar = dev_priv->dma_used / 10240; + if (bar > 100) bar = 100; + if (bar < 1) bar = 1; + i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 ); + dev_priv->dma_used = 0; } + + dev_priv->sarea_priv->perf_boxes = 0; } static void i830_dma_dispatch_clear( drm_device_t *dev, int flags, @@ -768,6 +931,15 @@ unsigned int BR13, CMD, D_CMD; RING_LOCALS; + + if ( dev_priv->current_page == 1 ) { + unsigned int tmp = flags; + + flags &= ~(I830_FRONT | I830_BACK); + if ( tmp & I830_FRONT ) flags |= I830_BACK; + if ( tmp & I830_BACK ) flags |= I830_FRONT; + } + i830_kernel_lost_context(dev); switch(cpp) { @@ -808,7 +980,7 @@ OUT_RING( BR13 ); OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( 0 ); + OUT_RING( dev_priv->front_offset ); OUT_RING( clear_color ); ADVANCE_LP_RING(); } @@ -847,13 +1019,17 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = dev_priv->cpp; - int ofs = dev_priv->back_offset; int i; unsigned int CMD, BR13; RING_LOCALS; DRM_DEBUG("swapbuffers\n"); + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) + i830_cp_performance_boxes( dev ); + switch(cpp) { case 2: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); @@ -870,7 +1046,6 @@ break; } - i830_kernel_lost_context(dev); if (nbox > I830_NR_SAREA_CLIPRECTS) nbox = I830_NR_SAREA_CLIPRECTS; @@ -890,23 +1065,72 @@ BEGIN_LP_RING( 8 ); OUT_RING( CMD ); OUT_RING( BR13 ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); + OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); - OUT_RING( (pbox->y2 << 16) | - pbox->x2 ); - - OUT_RING( 0 /* front ofs always zero */ ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->front_offset ); + else + OUT_RING( dev_priv->back_offset ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( BR13 & 0xffff ); - OUT_RING( ofs ); + + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->back_offset ); + else + OUT_RING( dev_priv->front_offset ); ADVANCE_LP_RING(); } } +static void i830_dma_dispatch_flip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pf_current_page); + + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) { + dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; + i830_cp_performance_boxes( dev ); + } + + + BEGIN_LP_RING( 2 ); + OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP ); + OUT_RING( 0 ); + if ( dev_priv->current_page == 0 ) { + OUT_RING( dev_priv->back_offset ); + dev_priv->current_page = 1; + } else { + OUT_RING( dev_priv->front_offset ); + dev_priv->current_page = 0; + } + OUT_RING(0); + ADVANCE_LP_RING(); + + + BEGIN_LP_RING( 2 ); + OUT_RING( MI_WAIT_FOR_EVENT | + MI_WAIT_FOR_PLANE_A_FLIP ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} static void i830_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf, @@ -936,7 +1160,7 @@ } } - if (used > 4*1024) + if (used > 4*1023) used = 0; if (sarea_priv->dirty) @@ -953,12 +1177,19 @@ DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); if (buf_priv->currently_mapped == I830_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | - ((used/4)-2)); + u32 *vp = buf_priv->virtual; + + vp[0] = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | + ((used/4)-2)); + + if (dev_priv->use_mi_batchbuffer_start) { + vp[used/4] = MI_BATCH_BUFFER_END; + used += 4; + } if (used & 4) { - *(u32 *)((u32)buf_priv->virtual + used) = 0; + vp[used/4] = 0; used += 4; } @@ -978,80 +1209,45 @@ ADVANCE_LP_RING(); } - BEGIN_LP_RING(4); + if (dev_priv->use_mi_batchbuffer_start) { + BEGIN_LP_RING(2); + OUT_RING( MI_BATCH_BUFFER_START | (2<<6) ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + ADVANCE_LP_RING(); + } + else { + BEGIN_LP_RING(4); + OUT_RING( MI_BATCH_BUFFER ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + OUT_RING( start + used - 4 ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + } - OUT_RING( MI_BATCH_BUFFER ); - OUT_RING( start | MI_BATCH_NON_SECURE ); - OUT_RING( start + used - 4 ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); - } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I830_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - -/* Interrupts are only for flushing */ -void i830_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u16 temp; - - temp = I830_READ16(I830REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I830_WRITE16(I830REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); -} - -void DRM(dma_immediate_bh)(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); } -static inline void i830_dma_emit_flush(drm_device_t *dev) -{ - drm_i830_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - - i830_kernel_lost_context(dev); - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i830_dma_quiescent_emit(drm_device_t *dev) +void i830_dma_quiescent(drm_device_t *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1062,79 +1258,27 @@ OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -void i830_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); } static int i830_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + drm_i830_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; - int i, ret = 0; + int i, ret = 0; + RING_LOCALS; + + i830_kernel_lost_context(dev); - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1146,7 +1290,7 @@ if (used == I830_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I830_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1185,8 +1329,7 @@ { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - - DRM_DEBUG("i830_flush_ioctl\n"); + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i830_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1275,6 +1418,53 @@ return 0; } + + +/* Not sure why this isn't set all the time: + */ +static void i830_do_init_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +int i830_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + if (dev_priv->current_page != 0) + i830_dma_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_flip_buf called without lock held\n"); + return -EINVAL; + } + + if (!dev_priv->page_flipping) + i830_do_init_pageflip( dev ); + + i830_dma_dispatch_flip( dev ); + return 0; +} + int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -1324,46 +1514,80 @@ return retcode; } -int i830_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, +int i830_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + return 0; +} + + + +int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - drm_i830_copy_t d; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i830_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_getparam_t param; + int value; - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i830_dma called without lock held\n"); + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return -EINVAL; } - - if (copy_from_user(&d, (drm_i830_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I830_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - if (copy_from_user(buf_priv->virtual, d.address, d.used)) + if (copy_from_user(¶m, (drm_i830_getparam_t *)arg, sizeof(param) )) return -EFAULT; - sarea_priv->last_dispatch = (int) hw_status[5]; + switch( param.param ) { + case I830_PARAM_IRQ_ACTIVE: + value = dev->irq ? 1 : 0; + break; + default: + return -EINVAL; + } + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } -int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) + +int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) { - if(VM_DONTCOPY == 0) return 1; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_setparam_t param; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user(¶m, (drm_i830_setparam_t *)arg, sizeof(param) )) + return -EFAULT; + + switch( param.param ) { + case I830_SETPARAM_USE_MI_BATCHBUFFER_START: + dev_priv->use_mi_batchbuffer_start = param.value; + break; + default: + return -EINVAL; + } + return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830_drm.h linux.22-ac2/drivers/char/drm/i830_drm.h --- linux.vanilla/drivers/char/drm/i830_drm.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i830_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -3,6 +3,9 @@ /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. + * + * KW: Actually, you can't ever change them because doing so would + * break backwards compatibility. */ #ifndef _I830_DEFINES_ @@ -18,14 +21,12 @@ #define I830_NR_TEX_REGIONS 64 #define I830_LOG_MIN_TEX_REGION_SIZE 16 -/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */ -#if !defined(I830_ENABLE_4_TEXTURES) +/* KW: These aren't correct but someone set them to two and then + * released the module. Now we can't change them as doing so would + * break backwards compatibility. + */ #define I830_TEXTURE_COUNT 2 -#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */ -#else /* defined(I830_ENABLE_4_TEXTURES) */ -#define I830_TEXTURE_COUNT 4 -#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */ -#endif /* I830_ENABLE_4_TEXTURES */ +#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT #define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */ @@ -57,6 +58,7 @@ #define I830_UPLOAD_TEXBLEND_MASK 0xf00000 #define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n)) #define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000 +#define I830_UPLOAD_STIPPLE 0x8000000 /* Indices into buf.Setup where various bits of state are mirrored per * context and per buffer. These can be fired at the card as a unit, @@ -73,7 +75,6 @@ */ #define I830_DESTREG_CBUFADDR 0 -/* Invarient */ #define I830_DESTREG_DBUFADDR 1 #define I830_DESTREG_DV0 2 #define I830_DESTREG_DV1 3 @@ -109,6 +110,13 @@ #define I830_CTXREG_MCSB1 16 #define I830_CTX_SETUP_SIZE 17 +/* 1.3: Stipple state + */ +#define I830_STPREG_ST0 0 +#define I830_STPREG_ST1 1 +#define I830_STP_SETUP_SIZE 2 + + /* Texture state (per tex unit) */ @@ -124,6 +132,18 @@ #define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */ #define I830_TEX_SETUP_SIZE 10 +#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */ +#define I830_TEXREG_TM0S0 1 +#define I830_TEXREG_TM0S1 2 +#define I830_TEXREG_TM0S2 3 +#define I830_TEXREG_TM0S3 4 +#define I830_TEXREG_TM0S4 5 +#define I830_TEXREG_NOP0 6 /* noop */ +#define I830_TEXREG_NOP1 7 /* noop */ +#define I830_TEXREG_NOP2 8 /* noop */ +#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */ +#define __I830_TEX_SETUP_SIZE 10 + #define I830_FRONT 0x1 #define I830_BACK 0x2 #define I830_DEPTH 0x4 @@ -199,8 +219,53 @@ int ctxOwner; /* last context to upload state */ int vertex_prim; + + int pf_enabled; /* is pageflipping allowed? */ + int pf_active; + int pf_current_page; /* which buffer is being displayed? */ + + int perf_boxes; /* performance boxes to be displayed */ + + /* Here's the state for texunits 2,3: + */ + unsigned int TexState2[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState2[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed2; + + unsigned int TexState3[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState3[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed3; + + unsigned int StippleState[I830_STP_SETUP_SIZE]; } drm_i830_sarea_t; +/* Flags for perf_boxes + */ +#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */ +#define I830_BOX_FLIP 0x2 /* populated by kernel */ +#define I830_BOX_WAIT 0x4 /* populated by kernel & client */ +#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */ +#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */ + + +/* I830 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) +#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) +#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) +#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) +#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) +#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) +#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I830_FLIP DRM_IO ( 0x49) +#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(0x4a, drm_i830_irq_emit_t) +#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( 0x4b, drm_i830_irq_wait_t) +#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(0x4c, drm_i830_getparam_t) +#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(0x4d, drm_i830_setparam_t) + typedef struct _drm_i830_clear { int clear_color; int clear_depth; @@ -235,4 +300,36 @@ int granted; } drm_i830_dma_t; + +/* 1.3: Userspace can request & wait on irq's: + */ +typedef struct drm_i830_irq_emit { + int *irq_seq; +} drm_i830_irq_emit_t; + +typedef struct drm_i830_irq_wait { + int irq_seq; +} drm_i830_irq_wait_t; + + +/* 1.3: New ioctl to query kernel params: + */ +#define I830_PARAM_IRQ_ACTIVE 1 + +typedef struct drm_i830_getparam { + int param; + int *value; +} drm_i830_getparam_t; + + +/* 1.3: New ioctl to set kernel params: + */ +#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1 + +typedef struct drm_i830_setparam { + int param; + int value; +} drm_i830_setparam_t; + + #endif /* _I830_DRM_H_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830_drv.c linux.22-ac2/drivers/char/drm/i830_drv.c --- linux.vanilla/drivers/char/drm/i830_drv.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i830_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -29,41 +29,16 @@ * Jeff Hartmann * Gareth Hughes * Abraham vd Merwe + * Keith Whitwell */ #include #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i830" -#define DRIVER_DESC "Intel 830M" -#define DRIVER_DATE "20011004" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -72,25 +47,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i830_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i830_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830_drv.h linux.22-ac2/drivers/char/drm/i830_drv.h --- linux.vanilla/drivers/char/drm/i830_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i830_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -61,24 +61,36 @@ drm_i830_sarea_t *sarea_priv; drm_i830_ring_buffer_t ring; - u8 *hw_status_page; + unsigned long hw_status_page; unsigned long counter; - - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; u32 front_di1, back_di1, zi1; int back_offset; int depth_offset; + int front_offset; int w, h; int pitch; int back_pitch; int depth_pitch; unsigned int cpp; + + int do_boxes; + int dma_used; + + int current_page; + int page_flipping; + + wait_queue_head_t irq_queue; + atomic_t irq_received; + atomic_t irq_emitted; + + int use_mi_batchbuffer_start; + } drm_i830_private_t; /* i830_dma.c */ @@ -109,24 +121,81 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#define I830_VERBOSE 0 +extern int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int i830_getparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +extern int i830_setparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +/* i830_irq.c */ +extern int i830_irq_emit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_irq_wait( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_wait_irq(drm_device_t *dev, int irq_nr); +extern int i830_emit_irq(drm_device_t *dev); + #define I830_BASE(reg) ((unsigned long) \ dev_priv->mmio_map->handle) #define I830_ADDR(reg) (I830_BASE(reg) + reg) -#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg) -#define I830_READ(reg) I830_DEREF(reg) -#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0) +#define I830_DEREF(reg) *(__volatile__ unsigned int *)I830_ADDR(reg) +#define I830_READ(reg) readl((volatile u32 *)I830_ADDR(reg)) +#define I830_WRITE(reg,val) writel(val, (volatile u32 *)I830_ADDR(reg)) #define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg) #define I830_READ16(reg) I830_DEREF16(reg) #define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0) + + +#define I830_VERBOSE 0 + +#define RING_LOCALS unsigned int outring, ringmask, outcount; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I830_VERBOSE) \ + printk("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i830_wait_ring(dev, n*4, __FUNCTION__); \ + outcount = 0; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + + +#define OUT_RING(n) do { \ + if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outcount++; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ + dev_priv->ring.tail = outring; \ + dev_priv->ring.space -= outcount * 4; \ + I830_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller); + + #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) +#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16)) +#define LOAD_TEXTURE_MAP0 (1<<11) + #define INST_PARSER_CLIENT 0x00000000 #define INST_OP_FLUSH 0x02000000 #define INST_FLUSH_MAP_CACHE 0x00000001 @@ -142,18 +211,21 @@ #define I830REG_INT_MASK_R 0x020a8 #define I830REG_INT_ENABLE_R 0x020a0 +#define I830_IRQ_RESERVED ((1<<13)|(3<<2)) + + #define LP_RING 0x2030 #define HP_RING 0x2040 #define RING_TAIL 0x00 -#define TAIL_ADDR 0x000FFFF8 +#define TAIL_ADDR 0x001FFFF8 #define RING_HEAD 0x04 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_START 0x08 -#define START_ADDR 0x00FFFFF8 +#define START_ADDR 0x0xFFFFF000 #define RING_LEN 0x0C -#define RING_NR_PAGES 0x000FF000 +#define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 @@ -184,6 +256,12 @@ #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) + +#define CMD_3D (0x3<<29) +#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) +#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) #define BR00_BITBLT_CLIENT 0x40000000 #define BR00_OP_COLOR_BLT 0x10000000 @@ -208,8 +286,15 @@ #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) +#define MI_WAIT_FOR_EVENT ((0x3<<23)) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) + +#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830.h linux.22-ac2/drivers/char/drm/i830.h --- linux.vanilla/drivers/char/drm/i830.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/i830.h 2003-07-28 21:09:43.000000000 +0100 @@ -41,6 +41,48 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i830" +#define DRIVER_DESC "Intel 830M" +#define DRIVER_DATE "20021108" + +/* Interface history: + * + * 1.1: Original. + * 1.2: ? + * 1.3: New irq emit/wait ioctls. + * New pageflip ioctl. + * New getparam ioctl. + * State for texunits 3&4 in sarea. + * New (alternative) layout for texture state. + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLIP)] = { i830_flip_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETPARAM)] = { i830_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SETPARAM)] = { i830_setparam, 1, 0 } + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,51 +102,50 @@ i830_dma_quiescent( dev ); \ } while (0) + +/* Driver will work either way: IRQ's save cpu time when waiting for + * the card, but are subject to subtle interactions between bios, + * hardware and the driver. + */ +#define USE_IRQS 0 + + +#if USE_IRQS #define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I830_WRITE16( I830REG_HWSTAM, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I830_WRITE16( I830REG_INT_MASK_R, tmp ); \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ -} while (0) +#define __HAVE_SHARED_IRQ 1 -#define DRIVER_POSTINSTALL() do { \ - drm_i830_private_t *dev_priv = \ +#define DRIVER_PREINSTALL() do { \ + drm_i830_private_t *dev_priv = \ (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ + \ + I830_WRITE16( I830REG_HWSTAM, 0xffff ); \ + I830_WRITE16( I830REG_INT_MASK_R, 0x0 ); \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); \ } while (0) -#define DRIVER_UNINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I830_READ16( I830REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I830_WRITE16( I830REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ - } \ + +#define DRIVER_POSTINSTALL() do { \ + drm_i830_private_t *dev_priv = \ + (drm_i830_private_t *)dev->dev_private; \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x2 ); \ + atomic_set(&dev_priv->irq_received, 0); \ + atomic_set(&dev_priv->irq_emitted, 0); \ + init_waitqueue_head(&dev_priv->irq_queue); \ } while (0) + +/* This gets called too late to be useful: dev_priv has already been + * freed. + */ +#define DRIVER_UNINSTALL() do { \ +} while (0) + +#else +#define __HAVE_DMA_IRQ 0 +#endif + + + /* Buffer customization: */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/i830_irq.c linux.22-ac2/drivers/char/drm/i830_irq.c --- linux.vanilla/drivers/char/drm/i830_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/i830_irq.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,178 @@ +/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Keith Whitwell + * + */ + + +#include "i830.h" +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" +#include /* For task queue support */ +#include + + +void DRM(dma_service)(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + u16 temp; + + temp = I830_READ16(I830REG_INT_IDENTITY_R); + printk("%s: %x\n", __FUNCTION__, temp); + + if(temp == 0) + return; + + I830_WRITE16(I830REG_INT_IDENTITY_R, temp); + + if (temp & 2) { + atomic_inc(&dev_priv->irq_received); + wake_up_interruptible(&dev_priv->irq_queue); + } +} + + +int i830_emit_irq(drm_device_t *dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s\n", __FUNCTION__); + + atomic_inc(&dev_priv->irq_emitted); + + BEGIN_LP_RING(2); + OUT_RING( 0 ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + + return atomic_read(&dev_priv->irq_emitted); +} + + +int i830_wait_irq(drm_device_t *dev, int irq_nr) +{ + drm_i830_private_t *dev_priv = + (drm_i830_private_t *)dev->dev_private; + DECLARE_WAITQUEUE(entry, current); + unsigned long end = jiffies + HZ*3; + int ret = 0; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + return 0; + + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; + + add_wait_queue(&dev_priv->irq_queue, &entry); + + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + break; + if (time_after(jiffies, end)) { + DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", + I830_READ16( I830REG_INT_IDENTITY_R ), + I830_READ16( I830REG_INT_MASK_R ), + I830_READ16( I830REG_INT_ENABLE_R ), + I830_READ16( I830REG_HWSTAM )); + + ret = -EBUSY; /* Lockup? Missed irq? */ + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->irq_queue, &entry); + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_emit_t emit; + int result; + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_irq_emit called without lock held\n"); + return -EINVAL; + } + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) )) + return -EFAULT; + + result = i830_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg, + sizeof(irqwait) )) + return -EFAULT; + + return i830_wait_irq( dev, irqwait.irq_seq ); +} + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/Makefile linux.22-ac2/drivers/char/drm/Makefile --- linux.vanilla/drivers/char/drm/Makefile 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/Makefile 2003-07-28 21:09:43.000000000 +0100 @@ -3,18 +3,20 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. O_TARGET := drm.o -list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o radeon.o ffb.o sis.o +list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o radeon.o ffb.o sis.o savage.o gamma-objs := gamma_drv.o gamma_dma.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o i810-objs := i810_drv.o i810_dma.o -i830-objs := i830_drv.o i830_dma.o - -radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o +i830-objs := i830_drv.o i830_dma.o i830_irq.o +savage-objs := savage_dma.o savage_drv.o savage_state.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o ffb-objs := ffb_drv.o ffb_context.o sis-objs := sis_drv.o sis_ds.o sis_mm.o +via-objs := via_drv.o via_ds.o via_map.o via_mm.o + obj-$(CONFIG_DRM_GAMMA) += gamma.o obj-$(CONFIG_DRM_TDFX) += tdfx.o @@ -25,6 +27,8 @@ obj-$(CONFIG_DRM_I830) += i830.o obj-$(CONFIG_DRM_FFB) += ffb.o obj-$(CONFIG_DRM_SIS) += sis.o +obj-$(CONFIG_DRM_S3) += savage.o +obj-$(CONFIG_DRM_VIA) += via.o include $(TOPDIR)/Rules.make @@ -54,3 +58,9 @@ sis.o: $(sis-objs) $(lib) $(LD) -r -o $@ $(sis-objs) $(lib) + +savage.o: $(savage-objs) $(lib) + $(LD) -r -o $@ $(savage-objs) $(lib) + +via.o: $(via-objs) $(lib) + $(LD) -r -o $@ $(via-objs) $(lib) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_dma.c linux.22-ac2/drivers/char/drm/mga_dma.c --- linux.vanilla/drivers/char/drm/mga_dma.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/mga_dma.c 2003-07-28 21:09:43.000000000 +0100 @@ -35,10 +35,11 @@ #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#include /* For task queue support */ -#include +#include +#include "drm_os_linux.h" #define MGA_DEFAULT_USEC_TIMEOUT 10000 #define MGA_FREELIST_DEBUG 0 @@ -52,7 +53,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK; @@ -74,7 +75,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK; @@ -93,7 +94,7 @@ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_primary_buffer_t *primary = &dev_priv->prim; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The primary DMA stream should look like new right about now. */ @@ -114,7 +115,7 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* Okay, so we've completely screwed up and locked the engine. * How about we clean up after ourselves? @@ -160,8 +161,8 @@ u32 head, tail; u32 status = 0; int i; - DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DMA_LOCALS; + DRM_DEBUG( "\n" ); /* We need to wait so that we can do an safe flush */ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { @@ -207,7 +208,7 @@ mga_flush_write_combine(); MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) @@ -215,7 +216,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA_WRAP(); @@ -250,7 +251,7 @@ MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); set_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ) @@ -258,7 +259,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 head = dev_priv->primary->offset; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_wrap++; DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap ); @@ -267,7 +268,7 @@ MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL ); clear_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } @@ -307,8 +308,7 @@ drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; - DRM_DEBUG( "%s: count=%d\n", - __FUNCTION__, dma->buf_count ); + DRM_DEBUG( "count=%d\n", dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); @@ -354,7 +354,7 @@ drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; drm_mga_freelist_t *next; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); entry = dev_priv->head; while ( entry ) { @@ -392,7 +392,7 @@ drm_mga_freelist_t *prev; drm_mga_freelist_t *tail = dev_priv->tail; u32 head, wrap; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); head = MGA_READ( MGA_PRIMADDRESS ); wrap = dev_priv->sarea_priv->last_wrap; @@ -424,8 +424,7 @@ drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_freelist_t *head, *entry, *prev; - DRM_DEBUG( "%s: age=0x%06lx wrap=%d\n", - __FUNCTION__, + DRM_DEBUG( "age=0x%06lx wrap=%d\n", buf_priv->list_entry->age.head - dev_priv->primary->offset, buf_priv->list_entry->age.wrap ); @@ -458,9 +457,8 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) { drm_mga_private_t *dev_priv; - struct list_head *list; int ret; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); if ( !dev_priv ) @@ -494,15 +492,8 @@ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; - list_for_each( list, &dev->maplist->head ) { - drm_map_list_t *entry = (drm_map_list_t *)list; - if ( entry->map && - entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK) ) { - dev_priv->sarea = entry->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR( "failed to find sarea!\n" ); /* Assign dev_private so we can do cleanup. */ @@ -626,8 +617,6 @@ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; - spin_lock_init( &dev_priv->prim.list_lock ); - dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; @@ -650,7 +639,7 @@ int mga_do_cleanup_dma( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -725,7 +714,7 @@ #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle( dev_priv ); if ( ret < 0 ) - DRM_INFO( __FUNCTION__": -EBUSY\n" ); + DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ ); return ret; #else return mga_do_wait_for_idle( dev_priv ); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_drm.h linux.22-ac2/drivers/char/drm/mga_drm.h --- linux.vanilla/drivers/char/drm/mga_drm.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/mga_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -225,6 +225,20 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmMga.h) */ + +/* MGA specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) +#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) +#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) + typedef struct _drm_mga_warp_index { int installed; unsigned long phys_addr; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_drv.c linux.22-ac2/drivers/char/drm/mga_drv.c --- linux.vanilla/drivers/char/drm/mga_drv.c 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/mga_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -32,37 +32,9 @@ #include #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "mga" -#define DRIVER_DESC "Matrox G200/G400" -#define DRIVER_DATE "20010321" - -#define DRIVER_MAJOR 3 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 2 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, - - -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -70,27 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init mga_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", mga_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_drv.h linux.22-ac2/drivers/char/drm/mga_drv.h --- linux.vanilla/drivers/char/drm/mga_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/mga_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -46,8 +46,6 @@ u32 last_wrap; u32 high_mark; - - spinlock_t list_lock; } drm_mga_primary_buffer_t; typedef struct drm_mga_freelist { @@ -257,7 +255,7 @@ #define BEGIN_DMA_WRAP() \ do { \ if ( MGA_VERBOSE ) { \ - DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ + DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ } \ prim = dev_priv->prim.start; \ @@ -276,7 +274,7 @@ #define FLUSH_DMA() \ do { \ if ( 0 ) { \ - DRM_INFO( "%s:\n" , __FUNCTION__); \ + DRM_INFO( "%s:\n", __FUNCTION__ ); \ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ dev_priv->prim.tail, \ MGA_READ( MGA_PRIMADDRESS ) - \ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga.h linux.22-ac2/drivers/char/drm/mga.h --- linux.vanilla/drivers/char/drm/mga.h 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/mga.h 2003-07-28 21:09:43.000000000 +0100 @@ -41,6 +41,33 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "mga" +#define DRIVER_DESC "Matrox G200/G400" +#define DRIVER_DATE "20010321" + +#define DRIVER_MAJOR 3 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, + +#define __HAVE_COUNTERS 3 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY + /* Driver customization: */ #define DRIVER_PRETAKEDOWN() do { \ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_state.c linux.22-ac2/drivers/char/drm/mga_state.c --- linux.vanilla/drivers/char/drm/mga_state.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/mga_state.c 2003-07-28 21:09:43.000000000 +0100 @@ -34,8 +34,9 @@ #include "mga.h" #include "drmP.h" -#include "mga_drv.h" #include "drm.h" +#include "mga_drm.h" +#include "mga_drv.h" /* ================================================================ @@ -512,7 +513,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG("%s:\n" , __FUNCTION__); + DRM_DEBUG( "\n" ); BEGIN_DMA( 1 ); @@ -606,7 +607,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_frame.head = dev_priv->prim.tail; sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; @@ -760,8 +761,7 @@ u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; - DRM_DEBUG( "%s: buf=%d used=%d\n", - __FUNCTION__, buf->idx, buf->used ); + DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); y2 = length / 64; @@ -815,7 +815,7 @@ int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA( 4 + nbox ); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/mga_warp.c linux.22-ac2/drivers/char/drm/mga_warp.c --- linux.vanilla/drivers/char/drm/mga_warp.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/mga_warp.c 2003-07-28 21:09:43.000000000 +0100 @@ -27,8 +27,11 @@ * Gareth Hughes */ + #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" #include "mga_ucode.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128_cce.c linux.22-ac2/drivers/char/drm/r128_cce.c --- linux.vanilla/drivers/char/drm/r128_cce.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/r128_cce.c 2003-07-28 21:09:43.000000000 +0100 @@ -30,14 +30,14 @@ #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" - -#include /* For task queue support */ -#include +#include "drm_os_linux.h" +#include #define R128_FIFO_DEBUG 0 - /* CCE microcode (from ATI) */ static u32 r128_cce_microcode[] = { 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, @@ -83,6 +83,7 @@ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +int r128_do_wait_for_idle( drm_r128_private_t *dev_priv ); int R128_READ_PLL(drm_device_t *dev, int addr) { @@ -131,7 +132,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -147,7 +148,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -157,7 +158,7 @@ int i, ret; ret = r128_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) { @@ -168,7 +169,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -183,7 +184,7 @@ { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); r128_do_wait_for_idle( dev_priv ); @@ -319,7 +320,7 @@ u32 ring_start; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The manual (p. 2) says this address is in "VM space". This * means it's an offset from the start of AGP space. @@ -351,8 +352,8 @@ R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -374,9 +375,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) { drm_r128_private_t *dev_priv; - struct list_head *list; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -481,15 +481,8 @@ dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) | (dev_priv->span_offset >> 5)); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -622,16 +615,20 @@ if ( dev->dev_private ) { drm_r128_private_t *dev_priv = dev->dev_private; +#if __REALLY_HAVE_SG if ( !dev_priv->is_pci ) { +#endif DRM_IOREMAPFREE( dev_priv->cce_ring ); DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); +#if __REALLY_HAVE_SG } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); } +#endif DRM(free)( dev->dev_private, sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); @@ -713,7 +710,7 @@ */ if ( stop.idle ) { ret = r128_do_cce_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CCE. If the engine isn't idle, @@ -790,7 +787,7 @@ static int r128_do_init_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET ); dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL ); @@ -808,7 +805,7 @@ int r128_do_cleanup_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset ); R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128_drm.h linux.22-ac2/drivers/char/drm/r128_drm.h --- linux.vanilla/drivers/char/drm/r128_drm.h 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/r128_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -170,6 +170,27 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmR128.h) */ + +/* Rage 128 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t) + typedef struct drm_r128_init { enum { R128_INIT_CCE = 0x01, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128_drv.c linux.22-ac2/drivers/char/drm/r128_drv.c --- linux.vanilla/drivers/char/drm/r128_drv.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/r128_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -32,48 +32,11 @@ #include #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "r128" -#define DRIVER_DESC "ATI Rage 128" -#define DRIVER_DATE "20010917" - -#define DRIVER_MAJOR 2 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -81,26 +44,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init r128_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", r128_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128_drv.h linux.22-ac2/drivers/char/drm/r128_drv.h --- linux.vanilla/drivers/char/drm/r128_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/r128_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -33,9 +33,10 @@ #ifndef __R128_DRV_H__ #define __R128_DRV_H__ +#include -#define GET_RING_HEAD( ring ) le32_to_cpu( *(ring)->head ) -#define SET_RING_HEAD( ring, val ) *(ring)->head = cpu_to_le32( val ) +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) typedef struct drm_r128_freelist { unsigned int age; @@ -384,44 +385,11 @@ #define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define R128_ADDR(reg) (R128_BASE( reg ) + reg) -#define R128_DEREF(reg) *(volatile u32 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ(reg) (_R128_READ((u32 *)R128_ADDR(reg))) -static inline u32 _R128_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define R128_WRITE(reg,val) \ -do { \ - wmb(); \ - R128_DEREF(reg) = val; \ -} while (0) -#else -#define R128_READ(reg) le32_to_cpu( R128_DEREF( reg ) ) -#define R128_WRITE(reg,val) \ -do { \ - R128_DEREF( reg ) = cpu_to_le32( val ); \ -} while (0) -#endif +#define R128_READ(reg) readl( (volatile u32 *) R128_ADDR(reg) ) +#define R128_WRITE(reg,val) writel( (val) , (volatile u32 *) R128_ADDR(reg)) -#define R128_DEREF8(reg) *(volatile u8 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ8(reg) _R128_READ8((u8 *)R128_ADDR(reg)) -static inline u8 _R128_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define R128_WRITE8(reg,val) \ -do { \ - wmb(); \ - R128_DEREF8(reg) = val; \ -} while (0) -#else -#define R128_READ8(reg) R128_DEREF8( reg ) -#define R128_WRITE8(reg,val) do { R128_DEREF8( reg ) = val; } while (0) -#endif +#define R128_READ8(reg) readb( (volatile u8 *) R128_ADDR(reg) ) +#define R128_WRITE8(reg,val) writeb( (val), (volatile u8 *) R128_ADDR(reg) ) #define R128_WRITE_PLL(addr,val) \ do { \ @@ -470,6 +438,7 @@ return -EBUSY; \ } \ __ring_space_done: ; \ + break; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -493,7 +462,11 @@ * Ring control */ +#if defined(__powerpc__) +#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else #define r128_flush_write_combine() mb() +#endif #define R128_VERBOSE 0 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128.h linux.22-ac2/drivers/char/drm/r128.h --- linux.vanilla/drivers/char/drm/r128.h 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/r128.h 2003-07-28 21:09:43.000000000 +0100 @@ -43,6 +43,35 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "r128" +#define DRIVER_DESC "ATI Rage 128" +#define DRIVER_DATE "20010917" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 + + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, + /* Driver customization: */ #define DRIVER_PRERELEASE() do { \ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/r128_state.c linux.22-ac2/drivers/char/drm/r128_state.c --- linux.vanilla/drivers/char/drm/r128_state.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/r128_state.c 2003-07-28 21:09:43.000000000 +0100 @@ -29,9 +29,9 @@ #include "r128.h" #include "drmP.h" -#include "r128_drv.h" #include "drm.h" -#include +#include "r128_drm.h" +#include "r128_drv.h" /* ================================================================ @@ -528,7 +528,7 @@ { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + DRM_DEBUG( "page=%d\n", dev_priv->current_page ); #if R128_PERFORMANCE_BOXES /* Do some trivial performance monitoring... @@ -577,8 +577,7 @@ int prim = buf_priv->prim; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: buf=%d nbox=%d\n", - __FUNCTION__, buf->idx, sarea_priv->nbox ); + DRM_DEBUG( "buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox ); if ( 0 ) r128_print_dirty( "dispatch_vertex", sarea_priv->dirty ); @@ -789,7 +788,7 @@ u32 *data; int dword_shift, dwords; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_cp.c linux.22-ac2/drivers/char/drm/radeon_cp.c --- linux.vanilla/drivers/char/drm/radeon_cp.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/radeon_cp.c 2003-07-28 21:09:43.000000000 +0100 @@ -30,21 +30,278 @@ #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" +#include "drm_os_linux.h" #include /* For task queue support */ #include - #define RADEON_FIFO_DEBUG 0 -#if defined(__alpha__) -# define PCIGART_ENABLED -#else -# undef PCIGART_ENABLED -#endif /* CP microcode (from ATI) */ +static u32 R200_cp_microcode[][2] = { + { 0x21007000, 0000000000 }, + { 0x20007000, 0000000000 }, + { 0x000000ab, 0x00000004 }, + { 0x000000af, 0x00000004 }, + { 0x66544a49, 0000000000 }, + { 0x49494174, 0000000000 }, + { 0x54517d83, 0000000000 }, + { 0x498d8b64, 0000000000 }, + { 0x49494949, 0000000000 }, + { 0x49da493c, 0000000000 }, + { 0x49989898, 0000000000 }, + { 0xd34949d5, 0000000000 }, + { 0x9dc90e11, 0000000000 }, + { 0xce9b9b9b, 0000000000 }, + { 0x000f0000, 0x00000016 }, + { 0x352e232c, 0000000000 }, + { 0x00000013, 0x00000004 }, + { 0x000f0000, 0x00000016 }, + { 0x352e272c, 0000000000 }, + { 0x000f0001, 0x00000016 }, + { 0x3239362f, 0000000000 }, + { 0x000077ef, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00000016, 0x00000004 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000002 }, + { 0x00000016, 0x00000004 }, + { 0x000077e0, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x000037e1, 0x00000002 }, + { 0x040067e1, 0x00000006 }, + { 0x000077e0, 0x00000002 }, + { 0x000077e1, 0x00000002 }, + { 0x000077e1, 0x00000006 }, + { 0xffffffff, 0000000000 }, + { 0x10000000, 0000000000 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000006 }, + { 0x00007675, 0x00000002 }, + { 0x00007676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0003802b, 0x00000002 }, + { 0x04002676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0000002e, 0x00000018 }, + { 0x0000002e, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x0000002f, 0x00000018 }, + { 0x0000002f, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x01605000, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00098000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x64c0603d, 0x00000004 }, + { 0x00080000, 0x00000016 }, + { 0000000000, 0000000000 }, + { 0x0400251d, 0x00000002 }, + { 0x00007580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x04002580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x00000046, 0x00000004 }, + { 0x00005000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x00019000, 0x00000002 }, + { 0x00011055, 0x00000014 }, + { 0x00000055, 0x00000012 }, + { 0x0400250f, 0x00000002 }, + { 0x0000504a, 0x00000004 }, + { 0x00007565, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000051, 0x00000004 }, + { 0x01e655b4, 0x00000002 }, + { 0x4401b0dc, 0x00000002 }, + { 0x01c110dc, 0x00000002 }, + { 0x2666705d, 0x00000018 }, + { 0x040c2565, 0x00000002 }, + { 0x0000005d, 0x00000018 }, + { 0x04002564, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000054, 0x00000004 }, + { 0x00401060, 0x00000008 }, + { 0x00101000, 0x00000002 }, + { 0x000d80ff, 0x00000002 }, + { 0x00800063, 0x00000008 }, + { 0x000f9000, 0x00000002 }, + { 0x000e00ff, 0x00000002 }, + { 0000000000, 0x00000006 }, + { 0x00000080, 0x00000018 }, + { 0x00000054, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00009000, 0x00000002 }, + { 0x00041000, 0x00000002 }, + { 0x0c00350e, 0x00000002 }, + { 0x00049000, 0x00000002 }, + { 0x00051000, 0x00000002 }, + { 0x01e785f8, 0x00000002 }, + { 0x00200000, 0x00000002 }, + { 0x00600073, 0x0000000c }, + { 0x00007563, 0x00000002 }, + { 0x006075f0, 0x00000021 }, + { 0x20007068, 0x00000004 }, + { 0x00005068, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00007577, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x0000750f, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00600076, 0x0000000c }, + { 0x006075f0, 0x00000021 }, + { 0x000075f8, 0x00000002 }, + { 0x00000076, 0x00000004 }, + { 0x000a750e, 0x00000002 }, + { 0x0020750f, 0x00000002 }, + { 0x00600079, 0x00000004 }, + { 0x00007570, 0x00000002 }, + { 0x00007571, 0x00000002 }, + { 0x00007572, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00007568, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000084, 0x0000000c }, + { 0x00058000, 0x00000002 }, + { 0x0c607562, 0x00000002 }, + { 0x00000086, 0x00000004 }, + { 0x00600085, 0x00000004 }, + { 0x400070dd, 0000000000 }, + { 0x000380dd, 0x00000002 }, + { 0x00000093, 0x0000001c }, + { 0x00065095, 0x00000018 }, + { 0x040025bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x040075bc, 0000000000 }, + { 0x000075bb, 0x00000002 }, + { 0x000075bc, 0000000000 }, + { 0x00090000, 0x00000006 }, + { 0x00090000, 0x00000002 }, + { 0x000d8002, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x01665000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x000671cc, 0x00000002 }, + { 0x0286f1cd, 0x00000002 }, + { 0x000000a3, 0x00000010 }, + { 0x21007000, 0000000000 }, + { 0x000000aa, 0x0000001c }, + { 0x00065000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x000b0000, 0x00000002 }, + { 0x38067000, 0x00000002 }, + { 0x000a00a6, 0x00000004 }, + { 0x20007000, 0000000000 }, + { 0x01200000, 0x00000002 }, + { 0x20077000, 0x00000002 }, + { 0x01200000, 0x00000002 }, + { 0x20007000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0120751b, 0x00000002 }, + { 0x8040750a, 0x00000002 }, + { 0x8040750b, 0x00000002 }, + { 0x00110000, 0x00000002 }, + { 0x000380dd, 0x00000002 }, + { 0x000000bd, 0x0000001c }, + { 0x00061096, 0x00000018 }, + { 0x844075bd, 0x00000002 }, + { 0x00061095, 0x00000018 }, + { 0x840075bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x844075bc, 0x00000002 }, + { 0x000000c0, 0x00000004 }, + { 0x804075bd, 0x00000002 }, + { 0x800075bb, 0x00000002 }, + { 0x804075bc, 0x00000002 }, + { 0x00108000, 0x00000002 }, + { 0x01400000, 0x00000002 }, + { 0x006000c4, 0x0000000c }, + { 0x20c07000, 0x00000020 }, + { 0x000000c6, 0x00000012 }, + { 0x00800000, 0x00000006 }, + { 0x0080751d, 0x00000006 }, + { 0x000025bb, 0x00000002 }, + { 0x000040c0, 0x00000004 }, + { 0x0000775c, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460275d, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x00007999, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460299b, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x01e00830, 0x00000002 }, + { 0x21007000, 0000000000 }, + { 0x00005000, 0x00000002 }, + { 0x00038042, 0x00000002 }, + { 0x040025e0, 0x00000002 }, + { 0x000075e1, 0000000000 }, + { 0x00000001, 0000000000 }, + { 0x000380d9, 0x00000002 }, + { 0x04007394, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, +}; + + static u32 radeon_cp_microcode[][2] = { { 0x21007000, 0000000000 }, { 0x20007000, 0000000000 }, @@ -346,6 +603,8 @@ u32 tmp; int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + tmp = RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT ); tmp |= RADEON_RB2D_DC_FLUSH_ALL; RADEON_WRITE( RADEON_RB2D_DSTCACHE_CTLSTAT, tmp ); @@ -370,6 +629,8 @@ { int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { int slots = ( RADEON_READ( RADEON_RBBM_STATUS ) & RADEON_RBBM_FIFOCNT_MASK ); @@ -388,8 +649,10 @@ { int i, ret; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + ret = radeon_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(RADEON_READ( RADEON_RBBM_STATUS ) @@ -416,16 +679,31 @@ static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv ) { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); RADEON_WRITE( RADEON_CP_ME_RAM_ADDR, 0 ); - for ( i = 0 ; i < 256 ; i++ ) { - RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, - radeon_cp_microcode[i][1] ); - RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, - radeon_cp_microcode[i][0] ); + + if (dev_priv->is_r200) + { + DRM_INFO("Loading R200 Microcode\n"); + for ( i = 0 ; i < 256 ; i++ ) + { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + R200_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + R200_cp_microcode[i][0] ); + } + } + else + { + for ( i = 0 ; i < 256 ; i++ ) { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + radeon_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + radeon_cp_microcode[i][0] ); + } } } @@ -435,7 +713,7 @@ */ static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); #if 0 u32 tmp; @@ -449,7 +727,7 @@ int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 6 ); @@ -458,6 +736,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); return radeon_do_wait_for_idle( dev_priv ); } @@ -467,7 +746,7 @@ static void radeon_do_cp_start( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); @@ -482,6 +761,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); } /* Reset the Command Processor. This will not flush any pending @@ -491,7 +771,7 @@ static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv ) { u32 cur_read_ptr; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); @@ -505,7 +785,7 @@ */ static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); @@ -518,7 +798,7 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_pixcache_flush( dev_priv ); @@ -603,6 +883,7 @@ /* Set the write pointer delay */ RADEON_WRITE( RADEON_CP_RB_WPTR_DELAY, 0 ); + RADEON_READ( RADEON_CP_RB_WPTR_DELAY ); /* read back to propagate */ /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); @@ -622,13 +903,63 @@ RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } + /* Initialize the scratch register pointer. This will cause + * the scratch register values to be written out to memory + * whenever they are updated. + * + * We simply put this behind the ring read pointer, this works + * with PCI GART as well as (whatever kind of) AGP GART + */ + RADEON_WRITE( RADEON_SCRATCH_ADDR, RADEON_READ( RADEON_CP_RB_RPTR_ADDR ) + + RADEON_SCRATCH_REG_OFFSET ); + + dev_priv->scratch = ((__volatile__ u32 *) + dev_priv->ring.head + + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); + + RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); + + /* Writeback doesn't seem to work everywhere, test it first */ + writel(0, &dev_priv->scratch[1]); + RADEON_WRITE( RADEON_SCRATCH_REG1, 0xdeadbeef ); + + for ( tmp = 0 ; tmp < dev_priv->usec_timeout ; tmp++ ) { + if ( readl( &dev_priv->scratch[1] ) == 0xdeadbeef ) + break; + udelay(1); + } + + if ( tmp < dev_priv->usec_timeout ) { + dev_priv->writeback_works = 1; + DRM_DEBUG( "writeback test succeeded, tmp=%d\n", tmp ); + } else { + dev_priv->writeback_works = 0; + DRM_DEBUG( "writeback test failed\n" ); + } + + dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; + RADEON_WRITE( RADEON_LAST_FRAME_REG, + dev_priv->sarea_priv->last_frame ); + + dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; + RADEON_WRITE( RADEON_LAST_DISPATCH_REG, + dev_priv->sarea_priv->last_dispatch ); + + dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; + RADEON_WRITE( RADEON_LAST_CLEAR_REG, + dev_priv->sarea_priv->last_clear ); + /* Set ring buffer size */ +#ifdef __BIG_ENDIAN + RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT ); +#else RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw ); +#endif radeon_do_wait_for_idle( dev_priv ); @@ -647,9 +978,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) { drm_radeon_private_t *dev_priv; - struct list_head *list; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -659,17 +989,6 @@ dev_priv->is_pci = init->is_pci; -#if !defined(PCIGART_ENABLED) - /* PCI support is not 100% working, so we disable it here. - */ - if ( dev_priv->is_pci ) { - DRM_ERROR( "PCI GART not yet supported for Radeon!\n" ); - dev->dev_private = (void *)dev_priv; - radeon_do_cleanup_cp(dev); - return -EINVAL; - } -#endif - if ( dev_priv->is_pci && !dev->sg ) { DRM_ERROR( "PCI GART memory not allocated!\n" ); dev->dev_private = (void *)dev_priv; @@ -686,12 +1005,10 @@ return -EINVAL; } + dev_priv->is_r200 = (init->func == RADEON_INIT_R200_CP); + dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; - /* Simple idle check. - */ - atomic_set( &dev_priv->idle_count, 0 ); - /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. @@ -743,17 +1060,17 @@ * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | - RADEON_Z_ENABLE | (dev_priv->color_fmt << 10) | - RADEON_ZBLOCK16); + (1<<15)); - dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | - RADEON_Z_TEST_ALWAYS | - RADEON_STENCIL_TEST_ALWAYS | - RADEON_STENCIL_S_FAIL_KEEP | - RADEON_STENCIL_ZPASS_KEEP | - RADEON_STENCIL_ZFAIL_KEEP | - RADEON_Z_WRITE_ENABLE); + dev_priv->depth_clear.rb3d_zstencilcntl = + (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | + RADEON_STENCIL_TEST_ALWAYS | + RADEON_STENCIL_S_FAIL_REPLACE | + RADEON_STENCIL_ZPASS_REPLACE | + RADEON_STENCIL_ZFAIL_REPLACE | + RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | @@ -767,15 +1084,8 @@ RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -896,34 +1206,7 @@ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; -#if 0 - /* Initialize the scratch register pointer. This will cause - * the scratch register values to be written out to memory - * whenever they are updated. - * FIXME: This doesn't quite work yet, so we're disabling it - * for the release. - */ - RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset + - RADEON_SCRATCH_REG_OFFSET) ); - RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); -#endif - - dev_priv->scratch = ((__volatile__ u32 *) - dev_priv->ring_rptr->handle + - (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); - - dev_priv->sarea_priv->last_frame = 0; - RADEON_WRITE( RADEON_LAST_FRAME_REG, - dev_priv->sarea_priv->last_frame ); - - dev_priv->sarea_priv->last_dispatch = 0; - RADEON_WRITE( RADEON_LAST_DISPATCH_REG, - dev_priv->sarea_priv->last_dispatch ); - - dev_priv->sarea_priv->last_clear = 0; - RADEON_WRITE( RADEON_LAST_CLEAR_REG, - dev_priv->sarea_priv->last_clear ); - +#if __REALLY_HAVE_SG if ( dev_priv->is_pci ) { if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart, &dev_priv->bus_pci_gart)) { @@ -953,19 +1236,20 @@ RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */ RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */ } else { +#endif /* __REALLY_HAVE_SG */ /* Turn off PCI GART */ tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN; RADEON_WRITE( RADEON_AIC_CNTL, tmp ); +#if __REALLY_HAVE_SG } +#endif /* __REALLY_HAVE_SG */ radeon_cp_load_microcode( dev_priv ); radeon_cp_init_ring_buffer( dev, dev_priv ); -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif dev->dev_private = (void *)dev_priv; @@ -976,7 +1260,7 @@ int radeon_do_cleanup_cp( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -986,10 +1270,12 @@ DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); } else { +#if __REALLY_HAVE_SG if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); +#endif /* __REALLY_HAVE_SG */ } DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), @@ -1012,6 +1298,7 @@ switch ( init.func ) { case RADEON_INIT_CP: + case RADEON_INIT_R200_CP: return radeon_do_init_cp( dev, &init ); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp( dev ); @@ -1075,7 +1362,7 @@ */ if ( stop.idle ) { ret = radeon_do_cp_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, @@ -1145,117 +1432,74 @@ * Fullscreen mode */ -static int radeon_do_init_pageflip( drm_device_t *dev ) -{ - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET ); - dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, - dev_priv->crtc_offset_cntl | - RADEON_CRTC_OFFSET_FLIP_CNTL ); - - dev_priv->page_flipping = 1; - dev_priv->current_page = 0; - - return 0; -} - -int radeon_do_cleanup_pageflip( drm_device_t *dev ) +/* KW: Deprecated to say the least: + */ +int radeon_fullscreen(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) { - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); - - dev_priv->page_flipping = 0; - dev_priv->current_page = 0; - return 0; } -int radeon_fullscreen( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_radeon_fullscreen_t fs; - - LOCK_TEST_WITH_RETURN( dev ); - - if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg, - sizeof(fs) ) ) - return -EFAULT; - - switch ( fs.func ) { - case RADEON_INIT_FULLSCREEN: - return radeon_do_init_pageflip( dev ); - case RADEON_CLEANUP_FULLSCREEN: - return radeon_do_cleanup_pageflip( dev ); - } - - return -EINVAL; -} - /* ================================================================ * Freelist management */ -#define RADEON_BUFFER_USED 0xffffffff -#define RADEON_BUFFER_FREE 0 -#if 0 -static int radeon_freelist_init( drm_device_t *dev ) +/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through + * bufs until freelist code is used. Note this hides a problem with + * the scratch register * (used to keep track of last buffer + * completed) being written to before * the last buffer has actually + * completed rendering. + * + * KW: It's also a good way to find free buffers quickly. + * + * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't + * sleep. However, bugs in older versions of radeon_accel.c mean that + * we essentially have to do this, else old clients will break. + * + * However, it does leave open a potential deadlock where all the + * buffers are held by other clients, which can't release them because + * they can't get the lock. + */ + +drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; - drm_radeon_freelist_t *entry; - int i; - - dev_priv->head = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( dev_priv->head == NULL ) - return -ENOMEM; - - memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) ); - dev_priv->head->age = RADEON_BUFFER_USED; + drm_buf_t *buf; + int i, t; + int start; - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; + if ( ++dev_priv->last_buf >= dma->buf_count ) + dev_priv->last_buf = 0; - entry = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( !entry ) return -ENOMEM; - - entry->age = RADEON_BUFFER_FREE; - entry->buf = buf; - entry->prev = dev_priv->head; - entry->next = dev_priv->head->next; - if ( !entry->next ) - dev_priv->tail = entry; - - buf_priv->discard = 0; - buf_priv->dispatched = 0; - buf_priv->list_entry = entry; + start = dev_priv->last_buf; - dev_priv->head->next = entry; + for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { + u32 done_age = GET_SCRATCH( 1 ); + DRM_DEBUG("done_age = %d\n",done_age); + for ( i = start ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; + buf->pending = 0; + return buf; + } + start = 0; + } - if ( dev_priv->head->next ) - dev_priv->head->next->prev = entry; + if (t) { + udelay(1); + dev_priv->stats.freelist_loops++; + } } - return 0; - + DRM_DEBUG( "returning NULL!\n" ); + return NULL; } -#endif - +#if 0 drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; @@ -1263,76 +1507,40 @@ drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; int i, t; -#if ROTATE_BUFS int start; -#endif - - /* FIXME: Optimize -- use freelist code */ + u32 done_age = readl(&dev_priv->scratch[1]); - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if ( buf->pid == 0 ) { - DRM_DEBUG( " ret buf=%d last=%d pid=0\n", - buf->idx, dev_priv->last_buf ); - return buf; - } - DRM_DEBUG( " skipping buf=%d pid=%d\n", - buf->idx, buf->pid ); - } - -#if ROTATE_BUFS if ( ++dev_priv->last_buf >= dma->buf_count ) dev_priv->last_buf = 0; + start = dev_priv->last_buf; -#endif - for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { -#if 0 - /* FIXME: Disable this for now */ - u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH]; -#else - u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG ); -#endif -#if ROTATE_BUFS + dev_priv->stats.freelist_loops++; + + for ( t = 0 ; t < 2 ; t++ ) { for ( i = start ; i < dma->buf_count ; i++ ) { -#else - for ( i = 0 ; i < dma->buf_count ; i++ ) { -#endif buf = dma->buflist[i]; buf_priv = buf->dev_private; - if ( buf->pending && buf_priv->age <= done_age ) { - /* The buffer has been processed, so it - * can now be used. - */ + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; buf->pending = 0; - DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age ); return buf; } - DRM_DEBUG( " skipping buf=%d age=%d done=%d\n", - buf->idx, buf_priv->age, - done_age ); -#if ROTATE_BUFS - start = 0; -#endif } - udelay( 1 ); + start = 0; } - DRM_ERROR( "returning NULL!\n" ); return NULL; } +#endif void radeon_freelist_reset( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; -#if ROTATE_BUFS drm_radeon_private_t *dev_priv = dev->dev_private; -#endif int i; -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif for ( i = 0 ; i < dma->buf_count ; i++ ) { drm_buf_t *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -1349,11 +1557,23 @@ { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; + u32 last_head = GET_RING_HEAD(ring); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { - radeon_update_ring_snapshot( ring ); + u32 head = GET_RING_HEAD(ring); + + ring->space = (head - ring->tail) * sizeof(u32); + if ( ring->space <= 0 ) + ring->space += ring->size; if ( ring->space > n ) return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + if (head != last_head) + i = 0; + last_head = head; + udelay( 1 ); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_drm.h linux.22-ac2/drivers/char/drm/radeon_drm.h --- linux.vanilla/drivers/char/drm/radeon_drm.h 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/radeon_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -2,6 +2,7 @@ * * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,6 +27,7 @@ * Authors: * Kevin E. Martin * Gareth Hughes + * Keith Whitwell */ #ifndef __RADEON_DRM_H__ @@ -37,7 +39,8 @@ #ifndef __RADEON_SAREA_DEFINES__ #define __RADEON_SAREA_DEFINES__ -/* What needs to be changed for the current vertex buffer? +/* Old style state flags, required for sarea interface (1.1 and 1.2 + * clears) and 1.2 drm_vertex2 ioctl. */ #define RADEON_UPLOAD_CONTEXT 0x00000001 #define RADEON_UPLOAD_VERTFMT 0x00000002 @@ -56,11 +59,136 @@ #define RADEON_UPLOAD_TEX2IMAGES 0x00004000 #define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ #define RADEON_REQUIRE_QUIESCENCE 0x00010000 -#define RADEON_UPLOAD_ALL 0x0001ffff +#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ +#define RADEON_UPLOAD_ALL 0x003effff +#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff + + +/* New style per-packet identifiers for use in cmd_buffer ioctl with + * the RADEON_EMIT_PACKET command. Comments relate new packets to old + * state bits and the packet size: + */ +#define RADEON_EMIT_PP_MISC 0 /* context/7 */ +#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ +#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ +#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ +#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ +#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ +#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ +#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ +#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ +#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ +#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ +#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ +#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ +#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ +#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ +#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ +#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ +#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ +#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ +#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ +#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ +#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ +#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ +#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ +#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ +#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ +#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ +#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ +#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ +#define R200_EMIT_VAP_CTL 32 /* vap/1 */ +#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ +#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ +#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ +#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ +#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ +#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ +#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ +#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ +#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ +#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ +#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ +#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ +#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ +#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ +#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ +#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ +#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ +#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ +#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ +#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ +#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ +#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ +#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ +#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ +#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ +#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ +#define R200_EMIT_PP_CUBIC_FACES_0 61 +#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 +#define R200_EMIT_PP_CUBIC_FACES_1 63 +#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 +#define R200_EMIT_PP_CUBIC_FACES_2 65 +#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 +#define R200_EMIT_PP_CUBIC_FACES_3 67 +#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 +#define R200_EMIT_PP_CUBIC_FACES_4 69 +#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 +#define R200_EMIT_PP_CUBIC_FACES_5 71 +#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 +#define RADEON_MAX_STATE_PACKETS 73 + + +/* Commands understood by cmd_buffer ioctl. More can be added but + * obviously these can't be removed or changed: + */ +#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ +#define RADEON_CMD_SCALARS 2 /* emit scalar data */ +#define RADEON_CMD_VECTORS 3 /* emit vector data */ +#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ +#define RADEON_CMD_PACKET3 5 /* emit hw packet */ +#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ +#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ +#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: + * doesn't make the cpu wait, just + * the graphics hardware */ + + +typedef union { + int i; + struct { + unsigned char cmd_type, pad0, pad1, pad2; + } header; + struct { + unsigned char cmd_type, packet_id, pad0, pad1; + } packet; + struct { + unsigned char cmd_type, offset, stride, count; + } scalars; + struct { + unsigned char cmd_type, offset, stride, count; + } vectors; + struct { + unsigned char cmd_type, buf_idx, pad0, pad1; + } dma; + struct { + unsigned char cmd_type, flags, pad0, pad1; + } wait; +} drm_radeon_cmd_header_t; + +#define RADEON_WAIT_2D 0x1 +#define RADEON_WAIT_3D 0x2 + #define RADEON_FRONT 0x1 #define RADEON_BACK 0x2 #define RADEON_DEPTH 0x4 +#define RADEON_STENCIL 0x8 /* Primitive types */ @@ -78,12 +206,9 @@ /* Byte offsets for indirect buffer data */ #define RADEON_INDEX_PRIM_OFFSET 20 -#define RADEON_HOSTDATA_BLIT_OFFSET 32 #define RADEON_SCRATCH_REG_OFFSET 32 -/* Keep these small for testing - */ #define RADEON_NR_SAREA_CLIPRECTS 12 /* There are 2 heaps (local/AGP). Each region within a heap is a @@ -95,7 +220,7 @@ #define RADEON_NR_TEX_REGIONS 64 #define RADEON_LOG_TEX_GRANULARITY 16 -#define RADEON_MAX_TEXTURE_LEVELS 11 +#define RADEON_MAX_TEXTURE_LEVELS 12 #define RADEON_MAX_TEXTURE_UNITS 3 #endif /* __RADEON_SAREA_DEFINES__ */ @@ -155,28 +280,18 @@ /* Setup state */ unsigned int se_cntl_status; /* 0x2140 */ -#ifdef TCL_ENABLE - /* TCL state */ - radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */ - radeon_color_regs_t se_tcl_material_ambient; - radeon_color_regs_t se_tcl_material_diffuse; - radeon_color_regs_t se_tcl_material_specular; - unsigned int se_tcl_shininess; - unsigned int se_tcl_output_vtx_fmt; - unsigned int se_tcl_output_vtx_sel; - unsigned int se_tcl_matrix_select_0; - unsigned int se_tcl_matrix_select_1; - unsigned int se_tcl_ucp_vert_blend_ctl; - unsigned int se_tcl_texture_proc_ctl; - unsigned int se_tcl_light_model_ctl; - unsigned int se_tcl_per_light_ctl[4]; -#endif - /* Misc state */ unsigned int re_top_left; /* 0x26c0 */ unsigned int re_misc; } drm_radeon_context_regs_t; +typedef struct { + /* Zbias state */ + unsigned int se_zbias_factor; /* 0x1dac */ + unsigned int se_zbias_constant; +} drm_radeon_context2_regs_t; + + /* Setup registers for each texture unit */ typedef struct { @@ -186,24 +301,37 @@ unsigned int pp_txcblend; unsigned int pp_txablend; unsigned int pp_tfactor; - unsigned int pp_border_color; - -#ifdef CUBIC_ENABLE - unsigned int pp_cubic_faces; - unsigned int pp_cubic_offset[5]; -#endif } drm_radeon_texture_regs_t; typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim:8; + unsigned int stateidx:8; + unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ + unsigned int vc_format; /* vertex format */ +} drm_radeon_prim_t; + + +typedef struct { + drm_radeon_context_regs_t context; + drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; + drm_radeon_context2_regs_t context2; + unsigned int dirty; +} drm_radeon_state_t; + + +typedef struct { unsigned char next, prev; unsigned char in_use; int age; } drm_radeon_tex_region_t; typedef struct { - /* The channel for communication of state information to the kernel - * on firing a vertex buffer. + /* The channel for communication of state information to the + * kernel on firing a vertex buffer with either of the + * obsoleted vertex/index ioctls. */ drm_radeon_context_regs_t context_state; drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; @@ -225,16 +353,50 @@ drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1]; int tex_age[RADEON_NR_TEX_HEAPS]; int ctx_owner; + int pfState; /* number of 3d windows (0,1,2ormore) */ + int pfCurrentPage; /* which buffer is being displayed? */ + int crtc2_base; /* CRTC2 frame offset */ } drm_radeon_sarea_t; /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmRadeon.h) + * + * KW: actually it's illegal to change any of this (backwards compatibility). */ + +/* Radeon specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) +#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) +#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t) +#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t) +#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t) +#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52) +#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR( 0x53, drm_radeon_mem_alloc_t) +#define DRM_IOCTL_RADEON_FREE DRM_IOW( 0x54, drm_radeon_mem_free_t) +#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t) +#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t) +#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t) + typedef struct drm_radeon_init { enum { RADEON_INIT_CP = 0x01, - RADEON_CLEANUP_CP = 0x02 + RADEON_CLEANUP_CP = 0x02, + RADEON_INIT_R200_CP = 0x03, } func; unsigned long sarea_priv_offset; int is_pci; @@ -285,7 +447,7 @@ unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; - unsigned int depth_mask; + unsigned int depth_mask; /* misnamed field: should be stencil */ drm_radeon_clear_rect_t *depth_boxes; } drm_radeon_clear_t; @@ -304,6 +466,36 @@ int discard; /* Client finished with buffer? */ } drm_radeon_indices_t; +/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices + * - allows multiple primitives and state changes in a single ioctl + * - supports driver change to emit native primitives + */ +typedef struct drm_radeon_vertex2 { + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ + int nr_states; + drm_radeon_state_t *state; + int nr_prims; + drm_radeon_prim_t *prim; +} drm_radeon_vertex2_t; + +/* v1.3 - obsoletes drm_radeon_vertex2 + * - allows arbitarily large cliprect list + * - allows updating of tcl packet, vector and scalar state + * - allows memory-efficient description of state updates + * - allows state to be emitted without a primitive + * (for clears, ctx switches) + * - allows more than one dma buffer to be referenced per ioctl + * - supports tcl driver + * - may be extended in future versions with new cmd types, packets + */ +typedef struct drm_radeon_cmd_buffer { + int bufsz; + char *buf; + int nbox; + drm_clip_rect_t *boxes; +} drm_radeon_cmd_buffer_t; + typedef struct drm_radeon_tex_image { unsigned int x, y; /* Blit coordinates */ unsigned int width, height; @@ -330,4 +522,55 @@ int discard; } drm_radeon_indirect_t; + +/* 1.3: An ioctl to get parameters that aren't available to the 3d + * client any other way. + */ +#define RADEON_PARAM_AGP_BUFFER_OFFSET 1 /* card offset of 1st agp buffer */ +#define RADEON_PARAM_LAST_FRAME 2 +#define RADEON_PARAM_LAST_DISPATCH 3 +#define RADEON_PARAM_LAST_CLEAR 4 +#define RADEON_PARAM_IRQ_NR 5 +#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */ + +typedef struct drm_radeon_getparam { + int param; + int *value; +} drm_radeon_getparam_t; + +/* 1.6: Set up a memory manager for regions of shared memory: + */ +#define RADEON_MEM_REGION_AGP 1 +#define RADEON_MEM_REGION_FB 2 + +typedef struct drm_radeon_mem_alloc { + int region; + int alignment; + int size; + int *region_offset; /* offset from start of fb or agp */ +} drm_radeon_mem_alloc_t; + +typedef struct drm_radeon_mem_free { + int region; + int region_offset; +} drm_radeon_mem_free_t; + +typedef struct drm_radeon_mem_init_heap { + int region; + int size; + int start; +} drm_radeon_mem_init_heap_t; + + +/* 1.6: Userspace can request & wait on irq's: + */ +typedef struct drm_radeon_irq_emit { + int *irq_seq; +} drm_radeon_irq_emit_t; + +typedef struct drm_radeon_irq_wait { + int irq_seq; +} drm_radeon_irq_wait_t; + + #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_drv.c linux.22-ac2/drivers/char/drm/radeon_drv.c --- linux.vanilla/drivers/char/drm/radeon_drv.c 2001-08-08 17:42:15.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/radeon_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -30,47 +30,11 @@ #include #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "radeon" -#define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20010405" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -78,26 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init radeon_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", radeon_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_drv.h linux.22-ac2/drivers/char/drm/radeon_drv.h --- linux.vanilla/drivers/char/drm/radeon_drv.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/radeon_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -31,6 +31,9 @@ #ifndef __RADEON_DRV_H__ #define __RADEON_DRV_H__ +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) + typedef struct drm_radeon_freelist { unsigned int age; drm_buf_t *buf; @@ -58,6 +61,15 @@ u32 se_cntl; } drm_radeon_depth_clear_t; + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + int start; + int size; + int pid; /* 0: free, -1: heap, other: real pids */ +}; + typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; drm_radeon_sarea_t *sarea_priv; @@ -71,27 +83,32 @@ drm_radeon_freelist_t *head; drm_radeon_freelist_t *tail; -/* FIXME: ROTATE_BUFS is a hask to cycle through bufs until freelist - code is used. Note this hides a problem with the scratch register - (used to keep track of last buffer completed) being written to before - the last buffer has actually completed rendering. */ -#define ROTATE_BUFS 1 -#if ROTATE_BUFS int last_buf; -#endif volatile u32 *scratch; + int writeback_works; int usec_timeout; + + int is_r200; + int is_pci; unsigned long phys_pci_gart; dma_addr_t bus_pci_gart; - atomic_t idle_count; + struct { + u32 boxes; + int freelist_timeouts; + int freelist_loops; + int requested_bufs; + int last_frame_reads; + int last_clear_reads; + int clears; + int texture_uploads; + } stats; + int do_boxes; int page_flipping; int current_page; - u32 crtc_offset; - u32 crtc_offset_cntl; u32 color_fmt; unsigned int front_offset; @@ -116,14 +133,18 @@ drm_map_t *ring_rptr; drm_map_t *buffers; drm_map_t *agp_textures; + + struct mem_block *agp_heap; + struct mem_block *fb_heap; + + /* SW interrupt */ + wait_queue_head_t swi_queue; + atomic_t swi_emitted; + } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { u32 age; - int prim; - int discard; - int dispatched; - drm_radeon_freelist_t *list_entry; } drm_radeon_buf_priv_t; /* radeon_cp.c */ @@ -149,14 +170,6 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); -static inline void -radeon_update_ring_snapshot( drm_radeon_ring_buffer_t *ring ) -{ - ring->space = (*(volatile int *)ring->head - ring->tail) * sizeof(u32); - if ( ring->space <= 0 ) - ring->space += ring->size; -} - extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); extern int radeon_do_cleanup_cp( drm_device_t *dev ); extern int radeon_do_cleanup_pageflip( drm_device_t *dev ); @@ -176,6 +189,34 @@ unsigned int cmd, unsigned long arg ); extern int radeon_cp_indirect( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); +extern int radeon_cp_vertex2(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_cmdbuf(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_getparam(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_flip(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_mem_alloc(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_free(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_init_heap(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern void radeon_mem_takedown( struct mem_block **heap ); +extern void radeon_mem_release( struct mem_block *heap ); + + /* radeon_irq.c */ +extern int radeon_irq_emit(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_irq_wait(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_emit_and_wait_irq(drm_device_t *dev); +extern int radeon_wait_irq(drm_device_t *dev, int swi_nr); +extern int radeon_emit_irq(drm_device_t *dev); + + +/* Flags for stats.boxes + */ +#define RADEON_BOX_DMA_IDLE 0x1 +#define RADEON_BOX_RING_FULL 0x2 +#define RADEON_BOX_FLIP 0x4 +#define RADEON_BOX_WAIT_IDLE 0x8 +#define RADEON_BOX_TEXTURE_LOAD 0x10 + /* Register definitions, register access macros and drmAddMap constants @@ -202,10 +243,10 @@ #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) +#define RADEON_CRTC2_OFFSET 0x0324 +#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_RB3D_COLORPITCH 0x1c48 -#define RADEON_RB3D_DEPTHCLEARVALUE 0x1c30 -#define RADEON_RB3D_DEPTHXY_OFFSET 0x1c60 #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) @@ -240,6 +281,24 @@ #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 +#define GET_SCRATCH( x ) (dev_priv->writeback_works \ + ? readl( &dev_priv->scratch[(x)] ) \ + : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) + + +#define RADEON_GEN_INT_CNTL 0x0040 +# define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) +# define RADEON_SW_INT_ENABLE (1 << 25) + +#define RADEON_GEN_INT_STATUS 0x0044 +# define RADEON_CRTC_VBLANK_STAT (1 << 0) +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) +# define RADEON_SW_INT_TEST (1 << 25) +# define RADEON_SW_INT_TEST_ACK (1 << 25) +# define RADEON_SW_INT_FIRE (1 << 26) + #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) @@ -253,6 +312,12 @@ # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) +#define RADEON_RBBM_GUICNTL 0x172c +# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) +# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) +# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) +# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) + #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MCLK_CNTL 0x0012 @@ -290,10 +355,8 @@ # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) -# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) -# define RADEON_ZBLOCK8 (0 << 15) -# define RADEON_ZBLOCK16 (1 << 15) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 +#define RADEON_RB3D_DEPTHPITCH 0x1c28 #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_STENCILREFMASK 0x1d7c #define RADEON_RB3D_ZCACHE_MODE 0x3250 @@ -306,9 +369,9 @@ # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) -# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16) -# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) -# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20) +# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) +# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) +# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_WRITE_ENABLE (1 << 30) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) @@ -357,6 +420,16 @@ #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 +#define RADEON_SE_ZBIAS_FACTOR 0x1db0 +#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 +#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 +#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 +# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 +# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 +#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 +#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 +# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 +#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 @@ -421,6 +494,7 @@ #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 +# define RADEON_BUF_SWAP_32BIT (2 << 16) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 @@ -457,11 +531,14 @@ #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 +# define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_IMMD 0x00002900 -# define RADEON_3D_CLEAR_ZMASK 0x00003200 +# define RADEON_3D_DRAW_INDX 0x00002A00 +# define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 +# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 #define RADEON_CP_PACKET_MASK 0xC0000000 #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 @@ -470,6 +547,7 @@ #define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 #define RADEON_VTX_Z_PRESENT (1 << 31) +#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) #define RADEON_PRIM_TYPE_NONE (0 << 0) #define RADEON_PRIM_TYPE_POINT (1 << 0) @@ -482,6 +560,7 @@ #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) +#define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) @@ -508,6 +587,105 @@ #define RADEON_TXFORMAT_ARGB4444 5 #define RADEON_TXFORMAT_ARGB8888 6 #define RADEON_TXFORMAT_RGBA8888 7 +#define RADEON_TXFORMAT_VYUY422 10 +#define RADEON_TXFORMAT_YVYU422 11 +#define RADEON_TXFORMAT_DXT1 12 +#define RADEON_TXFORMAT_DXT23 14 +#define RADEON_TXFORMAT_DXT45 15 + +#define R200_PP_TXCBLEND_0 0x2f00 +#define R200_PP_TXCBLEND_1 0x2f10 +#define R200_PP_TXCBLEND_2 0x2f20 +#define R200_PP_TXCBLEND_3 0x2f30 +#define R200_PP_TXCBLEND_4 0x2f40 +#define R200_PP_TXCBLEND_5 0x2f50 +#define R200_PP_TXCBLEND_6 0x2f60 +#define R200_PP_TXCBLEND_7 0x2f70 +#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 +#define R200_PP_TFACTOR_0 0x2ee0 +#define R200_SE_VTX_FMT_0 0x2088 +#define R200_SE_VAP_CNTL 0x2080 +#define R200_SE_TCL_MATRIX_SEL_0 0x2230 +#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 +#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 +#define R200_PP_TXFILTER_5 0x2ca0 +#define R200_PP_TXFILTER_4 0x2c80 +#define R200_PP_TXFILTER_3 0x2c60 +#define R200_PP_TXFILTER_2 0x2c40 +#define R200_PP_TXFILTER_1 0x2c20 +#define R200_PP_TXFILTER_0 0x2c00 +#define R200_PP_TXOFFSET_5 0x2d78 +#define R200_PP_TXOFFSET_4 0x2d60 +#define R200_PP_TXOFFSET_3 0x2d48 +#define R200_PP_TXOFFSET_2 0x2d30 +#define R200_PP_TXOFFSET_1 0x2d18 +#define R200_PP_TXOFFSET_0 0x2d00 + +#define R200_PP_CUBIC_FACES_0 0x2c18 +#define R200_PP_CUBIC_FACES_1 0x2c38 +#define R200_PP_CUBIC_FACES_2 0x2c58 +#define R200_PP_CUBIC_FACES_3 0x2c78 +#define R200_PP_CUBIC_FACES_4 0x2c98 +#define R200_PP_CUBIC_FACES_5 0x2cb8 +#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 +#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 +#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c +#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 +#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 +#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c +#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 +#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 +#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 +#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c +#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 +#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 +#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c +#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 +#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 +#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c +#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 +#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 +#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 +#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c +#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 +#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 +#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c +#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 +#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 +#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c +#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 +#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 +#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 +#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c + +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTE_CNTL 0x20b0 +#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 +#define R200_PP_TAM_DEBUG3 0x2d9c +#define R200_PP_CNTL_X 0x2cc4 +#define R200_SE_VAP_CNTL_STATUS 0x2140 +#define R200_RE_SCISSOR_TL_0 0x1cd8 +#define R200_RE_SCISSOR_TL_1 0x1ce0 +#define R200_RE_SCISSOR_TL_2 0x1ce8 +#define R200_RB3D_DEPTHXY_OFFSET 0x1d60 +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTX_STATE_CNTL 0x2180 +#define R200_RE_POINTSIZE 0x2648 +#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 + + +#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 +#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 +#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 +#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 +#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 +#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 +#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 +#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b +#define R200_3D_DRAW_IMMD_2 0xC0003500 +#define R200_SE_VTX_FMT_1 0x208c +#define R200_RE_CNTL 0x1c50 + /* Constants */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -515,6 +693,7 @@ #define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 #define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 #define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 +#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 #define RADEON_LAST_DISPATCH 1 #define RADEON_MAX_VB_AGE 0x7fffffff @@ -526,41 +705,11 @@ #define RADEON_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg) -#define RADEON_DEREF(reg) *(volatile u32 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ(reg) (_RADEON_READ((u32 *)RADEON_ADDR( reg ))) -static inline u32 _RADEON_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define RADEON_WRITE(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF(reg) = val; \ -} while (0) -#else -#define RADEON_READ(reg) RADEON_DEREF( reg ) -#define RADEON_WRITE(reg, val) do { RADEON_DEREF( reg ) = val; } while (0) -#endif +#define RADEON_READ(reg) readl( (volatile u32 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE(reg,val) writel( (val), (volatile u32 *) RADEON_ADDR(reg)) -#define RADEON_DEREF8(reg) *(volatile u8 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ8(reg) _RADEON_READ8((u8 *)RADEON_ADDR( reg )) -static inline u8 _RADEON_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define RADEON_WRITE8(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF8( reg ) = val; \ -} while (0) -#else -#define RADEON_READ8(reg) RADEON_DEREF8( reg ) -#define RADEON_WRITE8(reg, val) do { RADEON_DEREF8( reg ) = val; } while (0) -#endif +#define RADEON_READ8(reg) readb( (volatile u8 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE8(reg,val) writeb( (val), (volatile u8 *) RADEON_ADDR(reg)) #define RADEON_WRITE_PLL( addr, val ) \ do { \ @@ -647,20 +796,16 @@ } \ } while (0) + +/* Perfbox functionality only. + */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ - drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; \ - if ( ring->space < ring->high_mark ) { \ - for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ - radeon_update_ring_snapshot( ring ); \ - if ( ring->space >= ring->high_mark ) \ - goto __ring_space_done; \ - udelay( 1 ); \ - } \ - DRM_ERROR( "ring space check failed!\n" ); \ - return -EBUSY; \ + if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ + u32 head = GET_RING_HEAD(&dev_priv->ring); \ + if (head == dev_priv->ring.tail) \ + dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ } \ - __ring_space_done: ; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -668,7 +813,7 @@ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret = radeon_do_cp_idle( dev_priv ); \ - if ( __ret < 0 ) return __ret; \ + if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ radeon_freelist_reset( dev ); \ } \ @@ -694,12 +839,17 @@ * Ring control */ -#define radeon_flush_write_combine() mb() +#if defined(__powerpc__) +#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else +#define radeon_flush_write_combine() wmb() +#warning PCI posting bug +#endif #define RADEON_VERBOSE 0 -#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring; +#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ @@ -707,9 +857,10 @@ n, __FUNCTION__ ); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ + COMMIT_RING(); \ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ - dev_priv->ring.space -= (n) * sizeof(u32); \ + _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ mask = dev_priv->ring.tail_mask; \ @@ -720,9 +871,22 @@ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ - radeon_flush_write_combine(); \ - dev_priv->ring.tail = write; \ - RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \ + if (((dev_priv->ring.tail + _nr) & mask) != write) { \ + DRM_ERROR( \ + "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ + ((dev_priv->ring.tail + _nr) & mask), \ + write, __LINE__); \ + } else \ + dev_priv->ring.tail = write; \ +} while (0) + +#define COMMIT_RING() do { \ + /* Flush writes to ring */ \ + rmb(); \ + GET_RING_HEAD( &dev_priv->ring ); \ + RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ + /* read from PCI bus to ensure correct posting */ \ + RADEON_READ( RADEON_CP_RB_RPTR ); \ } while (0) #define OUT_RING( x ) do { \ @@ -734,6 +898,33 @@ write &= mask; \ } while (0) -#define RADEON_PERFORMANCE_BOXES 0 +#define OUT_RING_REG( reg, val ) do { \ + OUT_RING( CP_PACKET0( reg, 0 ) ); \ + OUT_RING( val ); \ +} while (0) + + +#define OUT_RING_USER_TABLE( tab, sz ) do { \ + int _size = (sz); \ + int *_tab = (tab); \ + \ + if (write + _size > mask) { \ + int i = (mask+1) - write; \ + if (__copy_from_user( (int *)(ring+write), \ + _tab, i*4 )) \ + return -EFAULT; \ + write = 0; \ + _size -= i; \ + _tab += i; \ + } \ + \ + if (_size && __copy_from_user( (int *)(ring+write), \ + _tab, _size*4 )) \ + return -EFAULT; \ + \ + write += _size; \ + write &= mask; \ +} while (0) + #endif /* __RADEON_DRV_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon.h linux.22-ac2/drivers/char/drm/radeon.h --- linux.vanilla/drivers/char/drm/radeon.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/radeon.h 2003-07-28 21:09:43.000000000 +0100 @@ -44,7 +44,78 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 -/* Driver customization: +#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." + +#define DRIVER_NAME "radeon" +#define DRIVER_DESC "ATI Radeon" +#define DRIVER_DATE "20020828" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 7 +#define DRIVER_PATCHLEVEL 0 + +/* Interface history: + * + * 1.1 - ?? + * 1.2 - Add vertex2 ioctl (keith) + * - Add stencil capability to clear ioctl (gareth, keith) + * - Increase MAX_TEXTURE_LEVELS (brian) + * 1.3 - Add cmdbuf ioctl (keith) + * - Add support for new radeon packets (keith) + * - Add getparam ioctl (keith) + * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). + * 1.4 - Add scratch registers to get_param ioctl. + * 1.5 - Add r200 packets to cmdbuf ioctl + * - Add r200 function to init ioctl + * - Add 'scalar2' instruction to cmdbuf + * 1.6 - Add static agp memory manager + * Add irq handler (won't be turned on unless X server knows to) + * Add irq ioctls and irq_active getparam. + * Add wait command for cmdbuf ioctl + * Add agp offset query for getparam + * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] + * and R200_PP_CUBIC_OFFSET_F1_[0..5]. + * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and + * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) + */ +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, + + +#define USE_IRQS 1 +#if USE_IRQS +#define __HAVE_DMA_IRQ 1 +#define __HAVE_VBL_IRQ 1 +#define __HAVE_SHARED_IRQ 1 + +/* When a client dies: + * - Check for and clean up flipped page state + * - Free any alloced agp memory. + * + * DRM infrastructure takes care of reclaiming dma buffers. */ #define DRIVER_PRERELEASE() do { \ if ( dev->dev_private ) { \ @@ -52,31 +123,36 @@ if ( dev_priv->page_flipping ) { \ radeon_do_cleanup_pageflip( dev ); \ } \ + radeon_mem_release( dev_priv->agp_heap ); \ } \ } while (0) +/* On unloading the module: + * - Free memory heap structure + * - Remove mappings made at startup and free dev_private. + */ #define DRIVER_PRETAKEDOWN() do { \ - if ( dev->dev_private ) radeon_do_cleanup_cp( dev ); \ + if ( dev->dev_private ) { \ + drm_radeon_private_t *dev_priv = dev->dev_private; \ + radeon_mem_takedown( &(dev_priv->agp_heap) ); \ + radeon_do_cleanup_cp( dev ); \ + } \ } while (0) +#else +#define __HAVE_DMA_IRQ 0 +#endif + /* DMA customization: */ #define __HAVE_DMA 1 -#if 0 -/* GH: Remove this for now... */ -#define __HAVE_DMA_QUIESCENT 1 -#define DRIVER_DMA_QUIESCENT() do { \ - drm_radeon_private_t *dev_priv = dev->dev_private; \ - return radeon_do_cp_idle( dev_priv ); \ -} while (0) -#endif /* Buffer customization: */ #define DRIVER_BUF_PRIV_T drm_radeon_buf_priv_t -#define DRIVER_AGP_BUFFERS_MAP( dev ) \ +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ ((drm_radeon_private_t *)((dev)->dev_private))->buffers #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_irq.c linux.22-ac2/drivers/char/drm/radeon_irq.c --- linux.vanilla/drivers/char/drm/radeon_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/radeon_irq.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,258 @@ +/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + * Michel Dänzer + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Interrupts - Used for device synchronization and flushing in the + * following circumstances: + * + * - Exclusive FB access with hw idle: + * - Wait for GUI Idle (?) interrupt, then do normal flush. + * + * - Frame throttling, NV_fence: + * - Drop marker irq's into command stream ahead of time. + * - Wait on irq's with lock *not held* + * - Check each for termination condition + * + * - Internally in cp_getbuffer, etc: + * - as above, but wait with lock held??? + * + * NOTE: These functions are misleadingly named -- the irq's aren't + * tied to dma at all, this is just a hangover from dri prehistory. + */ + +void DRM(dma_service)(int irq, void *arg, struct pt_regs *reg) +{ + drm_device_t *dev = (drm_device_t *) arg; + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + u32 stat; + + stat = RADEON_READ(RADEON_GEN_INT_STATUS) + & (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT); + if (!stat) + return; + + /* SW interrupt */ + if (stat & RADEON_SW_INT_TEST) { + wake_up_interruptible( &dev_priv->swi_queue ); + } + + /* VBLANK interrupt */ + if (stat & RADEON_CRTC_VBLANK_STAT) { + atomic_inc(&dev->vbl_received); + wake_up_interruptible(&dev->vbl_queue); + DRM(vbl_send_signals)(dev); + } + + /* Acknowledge all the bits in GEN_INT_STATUS -- seem to get + * more than we asked for... + */ + RADEON_WRITE(RADEON_GEN_INT_STATUS, stat); +} + +static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv) +{ + u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS ) + & (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT); + if (tmp) + RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp ); +} + +int radeon_emit_irq(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + unsigned int ret; + RING_LOCALS; + + atomic_inc(&dev_priv->swi_emitted); + ret = atomic_read(&dev_priv->swi_emitted); + + BEGIN_RING( 4 ); + OUT_RING_REG( RADEON_LAST_SWI_REG, ret ); + OUT_RING_REG( RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE ); + ADVANCE_RING(); + COMMIT_RING(); + + return ret; +} + + +int radeon_wait_irq(drm_device_t *dev, int swi_nr) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + int ret = 0; + + if (RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr) + return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* This is a hack to work around mysterious freezes on certain + * systems: + */ + radeon_acknowledge_irqs( dev_priv ); + + DRM_WAIT_ON( ret, dev_priv->swi_queue, 3 * HZ, + RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr ); + + return ret; +} + +int radeon_emit_and_wait_irq(drm_device_t *dev) +{ + return radeon_wait_irq( dev, radeon_emit_irq(dev) ); +} + + +int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + unsigned int cur_vblank; + int ret = 0; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + radeon_acknowledge_irqs( dev_priv ); + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* Assume that the user has missed the current sequence number + * by about a day rather than she wants to wait for years + * using vertical blanks... + */ + DRM_WAIT_ON( ret, dev->vbl_queue, 3*HZ, + ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) + - *sequence ) <= (1<<23) ) ); + + *sequence = cur_vblank; + + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int radeon_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_emit_t emit; + int result; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data, + sizeof(emit) ); + + result = radeon_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int radeon_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data, + sizeof(irqwait) ); + + return radeon_wait_irq( dev, irqwait.irq_seq ); +} + + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + + /* Clear bits if they're already high */ + radeon_acknowledge_irqs( dev_priv ); +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + atomic_set(&dev_priv->swi_emitted, 0); + init_waitqueue_head( &dev_priv->swi_queue ); + + /* Turn on SW and VBL ints */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, + RADEON_CRTC_VBLANK_MASK | + RADEON_SW_INT_ENABLE ); +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + if ( dev_priv ) { + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_mem.c linux.22-ac2/drivers/char/drm/radeon_mem.c --- linux.vanilla/drivers/char/drm/radeon_mem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/radeon_mem.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,338 @@ +/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Very simple allocator for agp memory, working on a static range + * already mapped into each client's address space. + */ + +static struct mem_block *split_block(struct mem_block *p, int start, int size, + int pid ) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + + out: + /* Our block is in the middle */ + p->pid = pid; + return p; +} + +static struct mem_block *alloc_block( struct mem_block *heap, int size, + int align2, int pid ) +{ + struct mem_block *p; + int mask = (1 << align2)-1; + + for (p = heap->next ; p != heap ; p = p->next) { + int start = (p->start + mask) & ~mask; + if (p->pid == 0 && start + size <= p->start + p->size) + return split_block( p, start, size, pid ); + } + + return NULL; +} + +static struct mem_block *find_block( struct mem_block *heap, int start ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + if (p->start == start) + return p; + + return NULL; +} + + +static void free_block( struct mem_block *p ) +{ + p->pid = 0; + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + if (p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + + if (p->prev->pid == 0) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + kfree(p); + } +} + +static void print_heap( struct mem_block *heap ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n", + p->start, p->start + p->size, + p->size, p->pid); +} + +/* Initialize. How to check for an uninitialized heap? + */ +static int init_heap(struct mem_block **heap, int start, int size) +{ + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); + + if (!blocks) + return -ENOMEM; + + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); + if (!*heap) { + kfree( blocks ); + return -ENOMEM; + } + + blocks->start = start; + blocks->size = size; + blocks->pid = 0; + blocks->next = blocks->prev = *heap; + + memset( *heap, 0, sizeof(**heap) ); + (*heap)->pid = -1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + + +/* Free all blocks associated with the releasing pid. + */ +void radeon_mem_release( struct mem_block *heap ) +{ + int pid = current->pid; + struct mem_block *p; + + if (!heap || !heap->next) + return; + + for (p = heap->next ; p != heap ; p = p->next) { + if (p->pid == pid) + p->pid = 0; + } + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + for (p = heap->next ; p != heap ; p = p->next) { + while (p->pid == 0 && p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + } +} + +/* Shutdown. + */ +void radeon_mem_takedown( struct mem_block **heap ) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next ; p != *heap ; ) { + struct mem_block *q = p; + p = p->next; + kfree(q); + } + + kfree( *heap ); + *heap = 0; +} + + + +/* IOCTL HANDLERS */ + +static struct mem_block **get_heap( drm_radeon_private_t *dev_priv, + int region ) +{ + switch( region ) { + case RADEON_MEM_REGION_AGP: + return &dev_priv->agp_heap; + case RADEON_MEM_REGION_FB: + return &dev_priv->fb_heap; + default: + return 0; + } +} + +int radeon_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_alloc_t alloc; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data, + sizeof(alloc) ); + + heap = get_heap( dev_priv, alloc.region ); + if (!heap || !*heap) + return -EFAULT; + + /* Make things easier on ourselves: all allocations at least + * 4k aligned. + */ + if (alloc.alignment < 12) + alloc.alignment = 12; + + block = alloc_block( *heap, alloc.size, alloc.alignment, + current->pid ); + + if (!block) + return -ENOMEM; + + if ( copy_to_user( alloc.region_offset, &block->start, + sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + + +int radeon_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_free_t memfree; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data, + sizeof(memfree) ); + + heap = get_heap( dev_priv, memfree.region ); + if (!heap || !*heap) + return -EFAULT; + + block = find_block( *heap, memfree.region_offset ); + if (!block) + return -EFAULT; + + if (block->pid != current->pid) + return -EPERM; + + free_block( block ); + return 0; +} + +int radeon_mem_init_heap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_init_heap_t initheap; + struct mem_block **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data, + sizeof(initheap) ); + + heap = get_heap( dev_priv, initheap.region ); + if (!heap) + return -EFAULT; + + if (*heap) { + DRM_ERROR("heap already initialized?"); + return -EFAULT; + } + + return init_heap( heap, initheap.start, initheap.size ); +} + + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/radeon_state.c linux.22-ac2/drivers/char/drm/radeon_state.c --- linux.vanilla/drivers/char/drm/radeon_state.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/radeon_state.c 2003-07-28 21:09:43.000000000 +0100 @@ -29,10 +29,11 @@ #include "radeon.h" #include "drmP.h" -#include "radeon_drv.h" #include "drm.h" -#include - +#include "drm_sarea.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" /* ================================================================ * CP hardware state programming functions @@ -47,360 +48,254 @@ box->x1, box->y1, box->x2, box->y2 ); BEGIN_RING( 4 ); - OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) ); OUT_RING( (box->y1 << 16) | box->x1 ); - OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) ); OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 14 ); - - OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); - OUT_RING( ctx->pp_misc ); - OUT_RING( ctx->pp_fog_color ); - OUT_RING( ctx->re_solid_color ); - OUT_RING( ctx->rb3d_blendcntl ); - OUT_RING( ctx->rb3d_depthoffset ); - OUT_RING( ctx->rb3d_depthpitch ); - OUT_RING( ctx->rb3d_zstencilcntl ); - - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); - OUT_RING( ctx->pp_cntl ); - OUT_RING( ctx->rb3d_cntl ); - OUT_RING( ctx->rb3d_coloroffset ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); - OUT_RING( ctx->rb3d_colorpitch ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); - OUT_RING( ctx->se_coord_fmt ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); - OUT_RING( ctx->re_line_pattern ); - OUT_RING( ctx->re_line_state ); - - OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); - OUT_RING( ctx->se_line_width ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); - OUT_RING( ctx->pp_lum_matrix ); - - OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); - OUT_RING( ctx->pp_rot_matrix_0 ); - OUT_RING( ctx->pp_rot_matrix_1 ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); - OUT_RING( ctx->rb3d_stencilrefmask ); - OUT_RING( ctx->rb3d_ropcntl ); - OUT_RING( ctx->rb3d_planemask ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 7 ); - - OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); - OUT_RING( ctx->se_vport_xscale ); - OUT_RING( ctx->se_vport_xoffset ); - OUT_RING( ctx->se_vport_yscale ); - OUT_RING( ctx->se_vport_yoffset ); - OUT_RING( ctx->se_vport_zscale ); - OUT_RING( ctx->se_vport_zoffset ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( ctx->se_cntl ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); - OUT_RING( ctx->se_cntl_status ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv ) -{ -#ifdef TCL_ENABLE - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 29 ); - - OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) ); - OUT_RING( ctx->se_tcl_material_emmissive.red ); - OUT_RING( ctx->se_tcl_material_emmissive.green ); - OUT_RING( ctx->se_tcl_material_emmissive.blue ); - OUT_RING( ctx->se_tcl_material_emmissive.alpha ); - OUT_RING( ctx->se_tcl_material_ambient.red ); - OUT_RING( ctx->se_tcl_material_ambient.green ); - OUT_RING( ctx->se_tcl_material_ambient.blue ); - OUT_RING( ctx->se_tcl_material_ambient.alpha ); - OUT_RING( ctx->se_tcl_material_diffuse.red ); - OUT_RING( ctx->se_tcl_material_diffuse.green ); - OUT_RING( ctx->se_tcl_material_diffuse.blue ); - OUT_RING( ctx->se_tcl_material_diffuse.alpha ); - OUT_RING( ctx->se_tcl_material_specular.red ); - OUT_RING( ctx->se_tcl_material_specular.green ); - OUT_RING( ctx->se_tcl_material_specular.blue ); - OUT_RING( ctx->se_tcl_material_specular.alpha ); - OUT_RING( ctx->se_tcl_shininess ); - OUT_RING( ctx->se_tcl_output_vtx_fmt ); - OUT_RING( ctx->se_tcl_output_vtx_sel ); - OUT_RING( ctx->se_tcl_matrix_select_0 ); - OUT_RING( ctx->se_tcl_matrix_select_1 ); - OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl ); - OUT_RING( ctx->se_tcl_texture_proc_ctl ); - OUT_RING( ctx->se_tcl_light_model_ctl ); - for ( i = 0 ; i < 4 ; i++ ) { - OUT_RING( ctx->se_tcl_per_light_ctl[i] ); - } - ADVANCE_RING(); -#else - DRM_ERROR( "TCL not enabled!\n" ); -#endif } -static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); - OUT_RING( ctx->re_misc ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) +/* Emit 1.1 state + */ +static void radeon_emit_state( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx, + drm_radeon_texture_regs_t *tex, + unsigned int dirty ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2]; RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_state( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned int dirty = sarea_priv->dirty; - - DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); + DRM_DEBUG( "dirty=0x%08x\n", dirty ); if ( dirty & RADEON_UPLOAD_CONTEXT ) { - radeon_emit_context( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT; + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); + OUT_RING( ctx->pp_misc ); + OUT_RING( ctx->pp_fog_color ); + OUT_RING( ctx->re_solid_color ); + OUT_RING( ctx->rb3d_blendcntl ); + OUT_RING( ctx->rb3d_depthoffset ); + OUT_RING( ctx->rb3d_depthpitch ); + OUT_RING( ctx->rb3d_zstencilcntl ); + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); + OUT_RING( ctx->pp_cntl ); + OUT_RING( ctx->rb3d_cntl ); + OUT_RING( ctx->rb3d_coloroffset ); + OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); + OUT_RING( ctx->rb3d_colorpitch ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VERTFMT ) { - radeon_emit_vertfmt( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); + OUT_RING( ctx->se_coord_fmt ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_LINE ) { - radeon_emit_line( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_LINE; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); + OUT_RING( ctx->re_line_pattern ); + OUT_RING( ctx->re_line_state ); + OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); + OUT_RING( ctx->se_line_width ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_BUMPMAP ) { - radeon_emit_bumpmap( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); + OUT_RING( ctx->pp_lum_matrix ); + OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); + OUT_RING( ctx->pp_rot_matrix_0 ); + OUT_RING( ctx->pp_rot_matrix_1 ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MASKS ) { - radeon_emit_masks( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); + OUT_RING( ctx->rb3d_stencilrefmask ); + OUT_RING( ctx->rb3d_ropcntl ); + OUT_RING( ctx->rb3d_planemask ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VIEWPORT ) { - radeon_emit_viewport( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT; + BEGIN_RING( 7 ); + OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); + OUT_RING( ctx->se_vport_xscale ); + OUT_RING( ctx->se_vport_xoffset ); + OUT_RING( ctx->se_vport_yscale ); + OUT_RING( ctx->se_vport_yoffset ); + OUT_RING( ctx->se_vport_zscale ); + OUT_RING( ctx->se_vport_zoffset ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_SETUP ) { - radeon_emit_setup( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP; - } - - if ( dirty & RADEON_UPLOAD_TCL ) { -#ifdef TCL_ENABLE - radeon_emit_tcl( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TCL; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); + OUT_RING( ctx->se_cntl ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); + OUT_RING( ctx->se_cntl_status ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MISC ) { - radeon_emit_misc( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MISC; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); + OUT_RING( ctx->re_misc ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX0 ) { - radeon_emit_tex0( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); + OUT_RING( tex[0].pp_txfilter ); + OUT_RING( tex[0].pp_txformat ); + OUT_RING( tex[0].pp_txoffset ); + OUT_RING( tex[0].pp_txcblend ); + OUT_RING( tex[0].pp_txablend ); + OUT_RING( tex[0].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); + OUT_RING( tex[0].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX1 ) { - radeon_emit_tex1( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); + OUT_RING( tex[1].pp_txfilter ); + OUT_RING( tex[1].pp_txformat ); + OUT_RING( tex[1].pp_txoffset ); + OUT_RING( tex[1].pp_txcblend ); + OUT_RING( tex[1].pp_txablend ); + OUT_RING( tex[1].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); + OUT_RING( tex[1].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX2 ) { -#if 0 - radeon_emit_tex2( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); + OUT_RING( tex[2].pp_txfilter ); + OUT_RING( tex[2].pp_txformat ); + OUT_RING( tex[2].pp_txoffset ); + OUT_RING( tex[2].pp_txcblend ); + OUT_RING( tex[2].pp_txablend ); + OUT_RING( tex[2].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); + OUT_RING( tex[2].pp_border_color ); + ADVANCE_RING(); } +} - sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | - RADEON_UPLOAD_TEX1IMAGES | - RADEON_UPLOAD_TEX2IMAGES | - RADEON_REQUIRE_QUIESCENCE); +/* Emit 1.2 state + */ +static void radeon_emit_state2( drm_radeon_private_t *dev_priv, + drm_radeon_state_t *state ) +{ + RING_LOCALS; + + if (state->dirty & RADEON_UPLOAD_ZBIAS) { + BEGIN_RING( 3 ); + OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) ); + OUT_RING( state->context2.se_zbias_factor ); + OUT_RING( state->context2.se_zbias_constant ); + ADVANCE_RING(); + } + + radeon_emit_state( dev_priv, &state->context, + state->tex, state->dirty ); } +/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in + * 1.3 cmdbuffers allow all previous state to be updated as well as + * the tcl scalar and vector areas. + */ +static struct { + int start; + int len; + const char *name; +} packet[RADEON_MAX_STATE_PACKETS] = { + { RADEON_PP_MISC,7,"RADEON_PP_MISC" }, + { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" }, + { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" }, + { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" }, + { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" }, + { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" }, + { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" }, + { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" }, + { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" }, + { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" }, + { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" }, + { RADEON_RE_MISC,1,"RADEON_RE_MISC" }, + { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" }, + { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" }, + { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" }, + { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" }, + { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" }, + { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" }, + { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" }, + { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" }, + { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" }, + { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" }, + { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" }, + { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" }, + { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" }, + { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" }, + { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" }, + { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" }, + { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" }, + { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" }, + { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" }, + { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" }, + { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" }, + { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" }, + { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" }, + { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" }, + { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" }, + { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" }, + { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" }, + { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" }, + { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" }, + { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" }, + { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" }, + { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" }, + { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" }, + { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" }, + { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" }, + { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" }, + { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" }, + { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" }, + { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" }, + { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" }, + { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" }, + { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" }, + { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" }, + { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" }, + { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" }, + { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" }, + { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" }, + { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" }, + { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" }, + { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */ + { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */ + { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" }, + { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" }, + { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" }, + { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" }, + { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" }, + { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" }, + { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" }, + { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" }, + { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" }, + { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" }, +}; + + -#if RADEON_PERFORMANCE_BOXES /* ================================================================ * Performance monitoring functions */ @@ -409,10 +304,12 @@ int x, int y, int w, int h, int r, int g, int b ) { - u32 pitch, offset; u32 color; RING_LOCALS; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + switch ( dev_priv->color_fmt ) { case RADEON_COLOR_FORMAT_RGB565: color = (((r & 0xf8) << 8) | @@ -425,8 +322,11 @@ break; } - offset = dev_priv->back_offset; - pitch = dev_priv->back_pitch >> 3; + BEGIN_RING( 4 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( 0xffffffff ); + ADVANCE_RING(); BEGIN_RING( 6 ); @@ -438,7 +338,12 @@ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); - OUT_RING( (pitch << 22) | (offset >> 5) ); + if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_pitch_offset ); + } else { + OUT_RING( dev_priv->back_pitch_offset ); + } + OUT_RING( color ); OUT_RING( (x << 16) | y ); @@ -449,53 +354,77 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) { - if ( atomic_read( &dev_priv->idle_count ) == 0 ) { - radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); - } else { - atomic_set( &dev_priv->idle_count, 0 ); + /* Collapse various things into a wait flag -- trying to + * guess if userspase slept -- better just to have them tell us. + */ + if (dev_priv->stats.last_frame_reads > 1 || + dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } -} -#endif + if (dev_priv->stats.freelist_loops) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + } + + /* Purple box for page flipping + */ + if ( dev_priv->stats.boxes & RADEON_BOX_FLIP ) + radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 ); + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE ) + radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 ); + /* Blue box: lost context? + */ + + /* Yellow box for texture swaps + */ + if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD ) + radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) ) + radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->stats.requested_bufs) { + if (dev_priv->stats.requested_bufs > 100) + dev_priv->stats.requested_bufs = 100; + + radeon_clear_box( dev_priv, 4, 16, + dev_priv->stats.requested_bufs, 4, + 196, 128, 128 ); + } + + memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) ); + +} /* ================================================================ * CP command dispatch functions */ -static void radeon_print_dirty( const char *msg, unsigned int flags ) -{ - DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", - msg, - flags, - (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", - (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", - (flags & RADEON_UPLOAD_LINE) ? "line, " : "", - (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", - (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", - (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", - (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", - (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "", - (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", - (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", - (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", - (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", - (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", - (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); -} - static void radeon_cp_dispatch_clear( drm_device_t *dev, drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes ) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; drm_clip_rect_t *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; + u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "flags = 0x%x\n", flags ); + + dev_priv->stats.clears++; if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { unsigned int tmp = flags; @@ -505,127 +434,277 @@ if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; } - for ( i = 0 ; i < nbox ; i++ ) { - int x = pbox[i].x1; - int y = pbox[i].y1; - int w = pbox[i].x2 - x; - int h = pbox[i].y2 - y; + if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", - x, y, w, h, flags ); + BEGIN_RING( 4 ); - if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - BEGIN_RING( 4 ); + /* Ensure the 3D stream is idle before doing a + * 2D fill to clear the front or back buffer. + */ + RADEON_WAIT_UNTIL_3D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( clear->color_mask ); - /* Ensure the 3D stream is idle before doing a - * 2D fill to clear the front or back buffer. - */ - RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); - OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); - OUT_RING( clear->color_mask ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", + x, y, w, h, flags ); + + if ( flags & RADEON_FRONT ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( clear->clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & RADEON_BACK ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( clear->clear_color ); - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } } + } - if ( flags & RADEON_FRONT ) { - BEGIN_RING( 6 ); + /* We have to clear the depth and/or stencil buffers by + * rendering a quad into just those buffers. Thus, we have to + * make sure the 3D engine is configured correctly. + */ + if ( dev_priv->is_r200 && + (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + int tempPP_CNTL; + int tempRE_CNTL; + int tempRB3D_CNTL; + int tempRB3D_ZSTENCILCNTL; + int tempRB3D_STENCILREFMASK; + int tempRB3D_PLANEMASK; + int tempSE_CNTL; + int tempSE_VTE_CNTL; + int tempSE_VTX_FMT_0; + int tempSE_VTX_FMT_1; + int tempSE_VAP_CNTL; + int tempRE_AUX_SCISSOR_CNTL; - OUT_RING( dev_priv->front_pitch_offset ); - OUT_RING( clear->clear_color ); + tempPP_CNTL = 0; + tempRE_CNTL = 0; - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + tempRB3D_CNTL = depth_clear->rb3d_cntl; + tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */ - ADVANCE_RING(); + tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; + tempRB3D_STENCILREFMASK = 0x0; + + tempSE_CNTL = depth_clear->se_cntl; + + + + /* Disable TCL */ + + tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ + (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); + + tempRB3D_PLANEMASK = 0x0; + + tempRE_AUX_SCISSOR_CNTL = 0x0; + + tempSE_VTE_CNTL = + SE_VTE_CNTL__VTX_XY_FMT_MASK | + SE_VTE_CNTL__VTX_Z_FMT_MASK; + + /* Vertex format (X, Y, Z, W)*/ + tempSE_VTX_FMT_0 = + SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | + SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; + tempSE_VTX_FMT_1 = 0x0; + + + /* + * Depth buffer specific enables + */ + if (flags & RADEON_DEPTH) { + /* Enable depth buffer */ + tempRB3D_CNTL |= RADEON_Z_ENABLE; + } else { + /* Disable depth buffer */ + tempRB3D_CNTL &= ~RADEON_Z_ENABLE; } - if ( flags & RADEON_BACK ) { - BEGIN_RING( 6 ); + /* + * Stencil buffer specific enables + */ + if ( flags & RADEON_STENCIL ) { + tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = clear->depth_mask; + } else { + tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = 0x00000000; + } - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + BEGIN_RING( 26 ); + RADEON_WAIT_UNTIL_2D_IDLE(); - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( clear->clear_color ); + OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL ); + OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL ); + OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL ); + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + tempRB3D_ZSTENCILCNTL ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + tempRB3D_STENCILREFMASK ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK ); + OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL ); + OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL ); + OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 ); + OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 ); + OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL ); + OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL, + tempRE_AUX_SCISSOR_CNTL ); + ADVANCE_RING(); - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) ); + OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | + RADEON_PRIM_WALK_RING | + (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + ADVANCE_RING(); } + } + else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { + + rb3d_cntl = depth_clear->rb3d_cntl; if ( flags & RADEON_DEPTH ) { - drm_radeon_depth_clear_t *depth_clear = - &dev_priv->depth_clear; + rb3d_cntl |= RADEON_Z_ENABLE; + } else { + rb3d_cntl &= ~RADEON_Z_ENABLE; + } - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } + if ( flags & RADEON_STENCIL ) { + rb3d_cntl |= RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ + } else { + rb3d_cntl &= ~RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = 0x00000000; + } - /* FIXME: Render a rectangle to clear the depth - * buffer. So much for those "fast Z clears"... - */ - BEGIN_RING( 23 ); + BEGIN_RING( 13 ); + RADEON_WAIT_UNTIL_2D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); + OUT_RING( 0x00000000 ); + OUT_RING( rb3d_cntl ); + + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + depth_clear->rb3d_zstencilcntl ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + rb3d_stencilrefmask ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, + 0x00000000 ); + OUT_RING_REG( RADEON_SE_CNTL, + depth_clear->se_cntl ); + ADVANCE_RING(); + + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - RADEON_WAIT_UNTIL_2D_IDLE(); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( depth_clear->rb3d_cntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) ); - OUT_RING( depth_clear->rb3d_zstencilcntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( depth_clear->se_cntl ); + BEGIN_RING( 15 ); - OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) ); - OUT_RING( RADEON_VTX_Z_PRESENT ); + OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) ); + OUT_RING( RADEON_VTX_Z_PRESENT | + RADEON_VTX_PKCOLOR_PRESENT); OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | RADEON_PRIM_WALK_RING | RADEON_MAOS_ENABLE | RADEON_VTX_FMT_RADEON_MODE | (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); ADVANCE_RING(); - - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_SETUP | - RADEON_UPLOAD_MASKS); } } @@ -651,13 +730,13 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) + radeon_cp_performance_boxes( dev_priv ); + /* Wait for the 3D stream to idle before dispatching the bitblt. * This will prevent data corruption between the two streams. @@ -689,9 +768,17 @@ RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS ); - - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( dev_priv->front_pitch_offset ); + + /* Make this work even if front & back are flipped: + */ + if (dev_priv->current_page == 0) { + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( dev_priv->front_pitch_offset ); + } + else { + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( dev_priv->back_pitch_offset ); + } OUT_RING( (x << 16) | y ); OUT_RING( (x << 16) | y ); @@ -717,29 +804,33 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev ) { drm_radeon_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle; + int offset = (dev_priv->current_page == 1) + ? dev_priv->front_offset : dev_priv->back_offset; + RING_LOCALS; + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pfCurrentPage); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) { + dev_priv->stats.boxes |= RADEON_BOX_FLIP; + radeon_cp_performance_boxes( dev_priv ); + } + /* Update the frame offsets for both CRTCs + */ BEGIN_RING( 6 ); RADEON_WAIT_UNTIL_3D_IDLE(); - RADEON_WAIT_UNTIL_PAGE_FLIPPED(); - - OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET, 0 ) ); - - if ( dev_priv->current_page == 0 ) { - OUT_RING( dev_priv->back_offset ); - dev_priv->current_page = 1; - } else { - OUT_RING( dev_priv->front_offset ); - dev_priv->current_page = 0; - } + OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch + + sarea->frame.x + * ( dev_priv->color_fmt - 2 ) ) & ~7 ) + + offset ); + OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base + + offset ); ADVANCE_RING(); @@ -748,6 +839,8 @@ * performing the swapbuffer ioctl. */ dev_priv->sarea_priv->last_frame++; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = + 1 - dev_priv->current_page; BEGIN_RING( 2 ); @@ -756,82 +849,118 @@ ADVANCE_RING(); } +static int bad_prim_vertex_nr( int primitive, int nr ) +{ + switch (primitive & RADEON_PRIM_TYPE_MASK) { + case RADEON_PRIM_TYPE_NONE: + case RADEON_PRIM_TYPE_POINT: + return nr < 1; + case RADEON_PRIM_TYPE_LINE: + return (nr & 1) || nr == 0; + case RADEON_PRIM_TYPE_LINE_STRIP: + return nr < 2; + case RADEON_PRIM_TYPE_TRI_LIST: + case RADEON_PRIM_TYPE_3VRT_POINT_LIST: + case RADEON_PRIM_TYPE_3VRT_LINE_LIST: + case RADEON_PRIM_TYPE_RECT_LIST: + return nr % 3 || nr == 0; + case RADEON_PRIM_TYPE_TRI_FAN: + case RADEON_PRIM_TYPE_TRI_STRIP: + return nr < 3; + default: + return 1; + } +} + + + +typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim; + unsigned int numverts; + unsigned int offset; + unsigned int vc_format; +} drm_radeon_tcl_prim_t; + static void radeon_cp_dispatch_vertex( drm_device_t *dev, - drm_buf_t *buf ) + drm_buf_t *buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) + { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset + buf->offset; - int size = buf->used; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + buf->offset + prim->start; + int numverts = (int)prim->numverts; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox ); - if ( 0 ) - radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty ); + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, prim->numverts )) { + DRM_ERROR( "bad prim %x numverts %d\n", + prim->prim, prim->numverts ); + return; + } + + do { + /* Emit the next cliprect */ + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; - if ( buf->used ) { - buf_priv->dispatched = 1; - - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); + radeon_emit_clip_rect( dev_priv, &box ); } - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); - /* Emit the vertex buffer rendering commands */ - BEGIN_RING( 5 ); + OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( numverts ); + OUT_RING( prim->vc_format ); + OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (numverts << RADEON_NUM_VERTICES_SHIFT) ); - OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); - OUT_RING( offset ); - OUT_RING( size ); - OUT_RING( format ); - OUT_RING( prim | RADEON_PRIM_WALK_LIST | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (size << RADEON_NUM_VERTICES_SHIFT) ); + ADVANCE_RING(); - ADVANCE_RING(); + i++; + } while ( i < nbox ); +} - i++; - } while ( i < sarea_priv->nbox ); - } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); +static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; - dev_priv->sarea_priv->last_dispatch++; + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; + buf->pending = 1; + buf->used = 0; } - static void radeon_cp_dispatch_indirect( drm_device_t *dev, drm_buf_t *buf, int start, int end ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end ); @@ -852,8 +981,6 @@ data[dwords++] = RADEON_CP_PACKET2; } - buf_priv->dispatched = 1; - /* Fire off the indirect buffer */ BEGIN_RING( 3 ); @@ -863,100 +990,75 @@ ADVANCE_RING(); } - - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - - /* Emit the indirect buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); - - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } - - dev_priv->sarea_priv->last_dispatch++; } + static void radeon_cp_dispatch_indices( drm_device_t *dev, - drm_buf_t *buf, - int start, int end, - int count ) + drm_buf_t *elt_buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + prim->offset; u32 *data; int dwords; int i = 0; - RING_LOCALS; - DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); - - if ( 0 ) - radeon_print_dirty( "dispatch_indices", sarea_priv->dirty ); - - if ( start != end ) { - buf_priv->dispatched = 1; - - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } - - dwords = (end - start + 3) / sizeof(u32); - - data = (u32 *)((char *)dev_priv->buffers->handle - + buf->offset + start); + int start = prim->start + RADEON_INDEX_PRIM_OFFSET; + int count = (prim->finish - start) / sizeof(u16); - data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); - - data[1] = offset; - data[2] = RADEON_MAX_VB_VERTS; - data[3] = format; - data[4] = (prim | RADEON_PRIM_WALK_IND | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (count << RADEON_NUM_VERTICES_SHIFT) ); - - if ( count & 0x1 ) { - data[dwords-1] &= 0x0000ffff; - } - - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } - - radeon_cp_dispatch_indirect( dev, buf, start, end ); - - i++; - } while ( i < sarea_priv->nbox ); + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->offset, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, count )) { + DRM_ERROR( "bad prim %x count %d\n", + prim->prim, count ); + return; } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); + if ( start >= prim->finish || + (prim->start & 0x7) ) { + DRM_ERROR( "buffer prim %d\n", prim->prim ); + return; + } + + dwords = (prim->finish - prim->start + 3) / sizeof(u32); + + data = (u32 *)((char *)dev_priv->buffers->handle + + elt_buf->offset + prim->start); + + data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); + data[1] = offset; + data[2] = prim->numverts; + data[3] = prim->vc_format; + data[4] = (prim->prim | + RADEON_PRIM_WALK_IND | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (count << RADEON_NUM_VERTICES_SHIFT) ); + + do { + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; + + radeon_emit_clip_rect( dev_priv, &box ); + } - buf->pending = 1; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + radeon_cp_dispatch_indirect( dev, elt_buf, + prim->start, + prim->finish ); - dev_priv->sarea_priv->last_dispatch++; + i++; + } while ( i < nbox ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; } #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) @@ -967,25 +1069,35 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; u32 format; u32 *buffer; - u8 *data; + const u8 *data; int size, dwords, tex_width, blit_width; - u32 y, height; - int ret = 0, i; + u32 height; + int i; RING_LOCALS; - /* FIXME: Be smarter about this... + dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; + + /* Flush the pixel cache. This ensures no pixel data gets mixed + * up with the texture data from the host data blit, otherwise + * part of the texture image may be corrupted. */ - buf = radeon_freelist_get( dev ); - if ( !buf ) return -EAGAIN; + BEGIN_RING( 4 ); + RADEON_FLUSH_CACHE(); + RADEON_WAIT_UNTIL_IDLE(); + ADVANCE_RING(); - DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", - tex->offset >> 10, tex->pitch, tex->format, - image->x, image->y, image->width, image->height ); +#ifdef __BIG_ENDIAN + /* The Mesa texture functions provide the data in little endian as the + * chip wants it, but we need to compensate for the fact that the CP + * ring gets byte-swapped + */ + BEGIN_RING( 2 ); + OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT ); + ADVANCE_RING(); +#endif - buf_priv = buf->dev_private; /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll @@ -1002,6 +1114,8 @@ case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: + case RADEON_TXFORMAT_VYUY422: + case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; @@ -1017,56 +1131,46 @@ return -EINVAL; } - DRM_DEBUG( " tex=%dx%d blit=%d\n", - tex_width, tex->height, blit_width ); - - /* Flush the pixel cache. This ensures no pixel data gets mixed - * up with the texture data from the host data blit, otherwise - * part of the texture image may be corrupted. - */ - BEGIN_RING( 4 ); - - RADEON_FLUSH_CACHE(); - RADEON_WAIT_UNTIL_IDLE(); + DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width ); - ADVANCE_RING(); + do { + DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", + tex->offset >> 10, tex->pitch, tex->format, + image->x, image->y, image->width, image->height ); - /* Make a copy of the parameters in case we have to update them - * for a multi-pass texture blit. + /* Make a copy of some parameters in case we have to + * update them for a multi-pass texture blit. */ - y = image->y; height = image->height; - data = (u8 *)image->data; + data = (const u8 *)image->data; size = height * blit_width; if ( size > RADEON_MAX_TEXTURE_SIZE ) { - /* Texture image is too large, do a multipass upload */ - ret = -EAGAIN; - - /* Adjust the blit size to fit the indirect buffer */ height = RADEON_MAX_TEXTURE_SIZE / blit_width; size = height * blit_width; - - /* Update the input parameters for next time */ - image->y += height; - image->height -= height; - image->data = (char *)image->data + size; - - if ( copy_to_user( tex->image, image, sizeof(*image) ) ) { - DRM_ERROR( "EFAULT on tex->image\n" ); - return -EFAULT; - } } else if ( size < 4 && size > 0 ) { size = 4; + } else if ( size == 0 ) { + return 0; + } + + buf = radeon_freelist_get( dev ); + if ( 0 && !buf ) { + radeon_do_cp_idle( dev_priv ); + buf = radeon_freelist_get( dev ); + } + if ( !buf ) { + DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); + copy_to_user( tex->image, image, sizeof(*image) ); + return -EAGAIN; } - dwords = size / 4; /* Dispatch the indirect buffer. */ - buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset); - + buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset); + dwords = size / 4; buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | @@ -1080,7 +1184,7 @@ buffer[2] = (tex->pitch << 22) | (tex->offset >> 10); buffer[3] = 0xffffffff; buffer[4] = 0xffffffff; - buffer[5] = (y << 16) | image->x; + buffer[5] = (image->y << 16) | image->x; buffer[6] = (height << 16) | image->width; buffer[7] = dwords; @@ -1112,30 +1216,34 @@ buf->pid = current->pid; buf->used = (dwords + 8) * sizeof(u32); - buf_priv->discard = 1; radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); + radeon_cp_discard_buffer( dev, buf ); + + /* Update the input parameters for next time */ + image->y += height; + image->height -= height; + (const u8 *)image->data += size; + } while (image->height > 0); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING( 4 ); - RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); - ADVANCE_RING(); - - return ret; + return 0; } + static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 35 ); @@ -1158,31 +1266,95 @@ int radeon_cp_clear( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_clear_t clear; + drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; + DRM_DEBUG( "\n" ); + + LOCK_TEST_WITH_RETURN( dev ); + + if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, + sizeof(clear) ) ) + return -EFAULT; + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + + if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + + if ( copy_from_user( &depth_boxes, clear.depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) + return -EFAULT; + + radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + + COMMIT_RING(); + return 0; +} + + +/* Not sure why this isn't set all the time: + */ +static int radeon_do_init_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "\n" ); + + BEGIN_RING( 6 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + ADVANCE_RING(); + + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; + + return 0; +} + +/* Called whenever a client dies, from DRM(release). + * NOTE: Lock isn't necessarily held when this is called! + */ +int radeon_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "\n" ); + + if (dev_priv->current_page != 0) + radeon_cp_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +/* Swapping and flipping are different operations, need different ioctls. + * They can & should be intermixed to support multiple 3d windows. + */ +int radeon_cp_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_clear_t clear; - drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); - if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, - sizeof(clear) ) ) - return -EFAULT; - RING_SPACE_TEST_WITH_RETURN( dev_priv ); - if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) - sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - - if ( copy_from_user( &depth_boxes, clear.depth_boxes, - sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) - return -EFAULT; - - radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + if (!dev_priv->page_flipping) + radeon_do_init_pageflip( dev ); + + radeon_cp_dispatch_flip( dev ); + COMMIT_RING(); return 0; } @@ -1193,7 +1365,7 @@ drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); @@ -1202,14 +1374,10 @@ if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if ( !dev_priv->page_flipping ) { - radeon_cp_dispatch_swap( dev ); - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); - } else { - radeon_cp_dispatch_flip( dev ); - } + radeon_cp_dispatch_swap( dev ); + dev_priv->sarea_priv->ctx_owner = 0; + COMMIT_RING(); return 0; } @@ -1219,10 +1387,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_vertex_t vertex; + drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN( dev ); @@ -1235,8 +1404,8 @@ sizeof(vertex) ) ) return -EFAULT; - DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", - __FUNCTION__, current->pid, + DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n", + current->pid, vertex.idx, vertex.count, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { @@ -1254,7 +1423,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[vertex.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1266,12 +1434,39 @@ return -EINVAL; } - buf->used = vertex.count; - buf_priv->prim = vertex.prim; - buf_priv->discard = vertex.discard; + /* Build up a prim_t record: + */ + if (vertex.count) { + buf->used = vertex.count; /* not used? */ + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + prim.start = 0; + prim.finish = vertex.count; /* unused */ + prim.prim = vertex.prim; + prim.numverts = vertex.count; + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_vertex( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + } - radeon_cp_dispatch_vertex( dev, buf ); + if (vertex.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1281,10 +1476,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indices_t elts; + drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN( dev ); @@ -1317,7 +1513,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[elts.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1342,11 +1537,37 @@ } buf->used = elts.end; - buf_priv->prim = elts.prim; - buf_priv->discard = elts.discard; - radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + + /* Build up a prim_t record: + */ + prim.start = elts.start; + prim.finish = elts.end; + prim.prim = elts.prim; + prim.offset = 0; /* offset from start of dma buffers */ + prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_indices( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + if (elts.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1358,6 +1579,7 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_texture_t tex; drm_radeon_tex_image_t image; + int ret; LOCK_TEST_WITH_RETURN( dev ); @@ -1377,7 +1599,10 @@ RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); - return radeon_cp_dispatch_texture( dev, &tex, &image ); + ret = radeon_cp_dispatch_texture( dev, &tex, &image ); + + COMMIT_RING(); + return ret; } int radeon_cp_stipple( struct inode *inode, struct file *filp, @@ -1402,6 +1627,7 @@ radeon_cp_dispatch_stipple( dev, mask ); + COMMIT_RING(); return 0; } @@ -1413,7 +1639,6 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indirect_t indirect; RING_LOCALS; @@ -1439,7 +1664,6 @@ } buf = dma->buflist[indirect.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1461,7 +1685,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf->used = indirect.end; - buf_priv->discard = indirect.discard; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. @@ -1477,6 +1700,526 @@ * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); + if (indirect.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + + + COMMIT_RING(); + return 0; +} + +int radeon_cp_vertex2(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_vertex2_t vertex; + int i; + unsigned char laststate; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data, + sizeof(vertex) ); + + DRM_DEBUG( "pid=%d index=%d discard=%d\n", + current->pid, + vertex.idx, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + buf = dma->buflist[vertex.idx]; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + return -EINVAL; + + for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) { + drm_radeon_prim_t prim; + drm_radeon_tcl_prim_t tclprim; + + if ( copy_from_user( &prim, &vertex.prim[i], sizeof(prim) ) ) + return -EFAULT; + + if ( prim.stateidx != laststate ) { + drm_radeon_state_t state; + + if ( copy_from_user( &state, + &vertex.state[prim.stateidx], + sizeof(state) ) ) + return -EFAULT; + + radeon_emit_state2( dev_priv, &state ); + + laststate = prim.stateidx; + } + + tclprim.start = prim.start; + tclprim.finish = prim.finish; + tclprim.prim = prim.prim; + tclprim.vc_format = prim.vc_format; + + if ( prim.prim & RADEON_PRIM_WALK_IND ) { + tclprim.offset = prim.numverts * 64; + tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + + radeon_cp_dispatch_indices( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } else { + tclprim.numverts = prim.numverts; + tclprim.offset = 0; /* not used */ + + radeon_cp_dispatch_vertex( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } + + if (sarea_priv->nbox == 1) + sarea_priv->nbox = 0; + } + + if ( vertex.discard ) { + radeon_cp_discard_buffer( dev, buf ); + } + + COMMIT_RING(); + return 0; +} + + +static int radeon_emit_packets( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int id = (int)header.packet.packet_id; + int sz, reg; + int *data = (int *)cmdbuf->buf; + RING_LOCALS; + + if (id >= RADEON_MAX_STATE_PACKETS) + return -EINVAL; + + sz = packet[id].len; + reg = packet[id].start; + + if (sz * sizeof(int) > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING(sz+1); + OUT_RING( CP_PACKET0( reg, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_scalars( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = header.scalars.offset; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +/* God this is ugly + */ +static __inline__ int radeon_emit_scalars2( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = ((unsigned int)header.scalars.offset) + 0x100; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_vectors( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.vectors.count; + int *data = (int *)cmdbuf->buf; + int start = header.vectors.offset; + int stride = header.vectors.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + + +static int radeon_emit_packet3( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_packet3_cliprect( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf, + int orig_nbox ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_clip_rect_t box; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + drm_clip_rect_t *boxes = cmdbuf->boxes; + int i = 0; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + if (!orig_nbox) + goto out; + + do { + if ( i < cmdbuf->nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return -EFAULT; + /* FIXME The second and subsequent times round + * this loop, send a WAIT_UNTIL_3D_IDLE before + * calling emit_clip_rect(). This fixes a + * lockup on fast machines when sending + * several cliprects with a cmdbuf, as when + * waving a 2D window over a 3D + * window. Something in the commands from user + * space seems to hang the card when they're + * sent several times in a row. That would be + * the correct place to fix it but this works + * around it until I can figure that out - Tim + * Smith */ + if ( i ) { + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + } + radeon_emit_clip_rect( dev_priv, &box ); + } + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + } while ( ++i < cmdbuf->nbox ); + if (cmdbuf->nbox == 1) + cmdbuf->nbox = 0; + + out: + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_wait( drm_device_t *dev, int flags ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s: %x\n", __FUNCTION__, flags); + switch (flags) { + case RADEON_WAIT_2D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_2D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_2D|RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_IDLE(); + ADVANCE_RING(); + break; + default: + return -EINVAL; + } + + return 0; +} + +int radeon_cp_cmdbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf = 0; + int idx; + drm_radeon_cmd_buffer_t cmdbuf; + drm_radeon_cmd_header_t header; + int orig_nbox; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data, + sizeof(cmdbuf) ); + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + + if (verify_area( VERIFY_READ, cmdbuf.buf, cmdbuf.bufsz )) + return -EFAULT; + + if (cmdbuf.nbox && + verify_area( VERIFY_READ, cmdbuf.boxes, + cmdbuf.nbox * sizeof(drm_clip_rect_t))) + return -EFAULT; + + orig_nbox = cmdbuf.nbox; + + while ( cmdbuf.bufsz >= sizeof(header) ) { + + if (__get_user( header.i, (int *)cmdbuf.buf )) { + DRM_ERROR("__get_user %p\n", cmdbuf.buf); + return -EFAULT; + } + + cmdbuf.buf += sizeof(header); + cmdbuf.bufsz -= sizeof(header); + + switch (header.header.cmd_type) { + case RADEON_CMD_PACKET: + DRM_DEBUG("RADEON_CMD_PACKET\n"); + if (radeon_emit_packets( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_packets failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS: + DRM_DEBUG("RADEON_CMD_SCALARS\n"); + if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_VECTORS: + DRM_DEBUG("RADEON_CMD_VECTORS\n"); + if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_vectors failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_DMA_DISCARD: + DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); + idx = header.dma.buf_idx; + if ( idx < 0 || idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + idx, dma->buf_count - 1 ); + return -EINVAL; + } + + buf = dma->buflist[idx]; + if ( buf->pid != current->pid || buf->pending ) { + DRM_ERROR( "bad buffer\n" ); + return -EINVAL; + } + + radeon_cp_discard_buffer( dev, buf ); + break; + + case RADEON_CMD_PACKET3: + DRM_DEBUG("RADEON_CMD_PACKET3\n"); + if (radeon_emit_packet3( dev, &cmdbuf )) { + DRM_ERROR("radeon_emit_packet3 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_PACKET3_CLIP: + DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); + if (radeon_emit_packet3_cliprect( dev, &cmdbuf, orig_nbox )) { + DRM_ERROR("radeon_emit_packet3_clip failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS2: + DRM_DEBUG("RADEON_CMD_SCALARS2\n"); + if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars2 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_WAIT: + DRM_DEBUG("RADEON_CMD_WAIT\n"); + if (radeon_emit_wait( dev, header.wait.flags )) { + DRM_ERROR("radeon_emit_wait failed\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("bad cmd_type %d at %p\n", + header.header.cmd_type, + cmdbuf.buf - sizeof(header)); + return -EINVAL; + } + } + + + DRM_DEBUG("DONE\n"); + COMMIT_RING(); + return 0; +} + + + +int radeon_cp_getparam(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t param; + int value; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data, + sizeof(param) ); + + DRM_DEBUG( "pid=%d\n", current->pid ); + + switch( param.param ) { + case RADEON_PARAM_AGP_BUFFER_OFFSET: + value = dev_priv->agp_buffers_offset; + break; + case RADEON_PARAM_LAST_FRAME: + dev_priv->stats.last_frame_reads++; + value = GET_SCRATCH( 0 ); + break; + case RADEON_PARAM_LAST_DISPATCH: + value = GET_SCRATCH( 1 ); + break; + case RADEON_PARAM_LAST_CLEAR: + dev_priv->stats.last_clear_reads++; + value = GET_SCRATCH( 2 ); + break; + case RADEON_PARAM_IRQ_NR: + value = dev->irq; + break; + case RADEON_PARAM_AGP_BASE: + value = dev_priv->agp_vm_start; + break; + default: + return -EINVAL; + } + + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage_dma.c linux.22-ac2/drivers/char/drm/savage_dma.c --- linux.vanilla/drivers/char/drm/savage_dma.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage_dma.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/*=========================================================*/ +#define __NO_VERSION__ +#include "savage.h" +#include "drmP.h" +#include "savage_drv.h" + +#include /* For task queue support */ +#include + +#define SAVAGE_DEFAULT_USEC_TIMEOUT 10000 +#define SAVAGE_FREELIST_DEBUG 0 + + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage_drm.h linux.22-ac2/drivers/char/drm/savage_drm.h --- linux.vanilla/drivers/char/drm/savage_drm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,238 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SAVAGE_DRM_H__ +#define __SAVAGE_DRM_H__ + +#ifndef __SAVAGE_SAREA_DEFINES__ +#define __SAVAGE_SAREA_DEFINES__ + +#define DRM_SAVAGE_MEM_PAGE (1UL<<12) +#define DRM_SAVAGE_MEM_WORK 32 +#define DRM_SAVAGE_MEM_LOCATION_PCI 1 +#define DRM_SAVAGE_MEM_LOCATION_AGP 2 +#define DRM_SAVAGE_DMA_AGP_SIZE (16*1024*1024) + +typedef struct drm_savage_alloc_cont_mem +{ + size_t size; /*size of buffer*/ + unsigned long type; /*4k page or word*/ + unsigned long alignment; + unsigned long location; /*agp or pci*/ + + unsigned long phyaddress; + unsigned long linear; +} drm_savage_alloc_cont_mem_t; + +typedef struct drm_savage_get_physcis_address +{ + unsigned long v_address; + unsigned long p_address; +} drm_savage_get_physcis_address_t; + +/*ioctl number*/ +#define DRM_IOCTL_SAVAGE_ALLOC_CONTINUOUS_MEM \ + DRM_IOWR(0x40,drm_savage_alloc_cont_mem_t) +#define DRM_IOCTL_SAVAGE_GET_PHYSICS_ADDRESS \ + DRM_IOWR(0x41, drm_savage_get_physcis_address_t) +#define DRM_IOCTL_SAVAGE_FREE_CONTINUOUS_MEM \ + DRM_IOWR(0x42, drm_savage_alloc_cont_mem_t) + +#define SAVAGE_FRONT 0x1 +#define SAVAGE_BACK 0x2 +#define SAVAGE_DEPTH 0x4 +#define SAVAGE_STENCIL 0x8 + +/* What needs to be changed for the current vertex dma buffer? + */ +#define SAVAGE_UPLOAD_CTX 0x1 +#define SAVAGE_UPLOAD_TEX0 0x2 +#define SAVAGE_UPLOAD_TEX1 0x4 +#define SAVAGE_UPLOAD_PIPE 0x8 /* <- seems should be removed, Jiayo Hsu */ +#define SAVAGE_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */ +#define SAVAGE_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */ +#define SAVAGE_UPLOAD_2D 0x40 +#define SAVAGE_WAIT_AGE 0x80 /* handled client-side */ +#define SAVAGE_UPLOAD_CLIPRECTS 0x100 /* handled client-side */ +/*frank:add Buffer state 2001/11/15*/ +#define SAVAGE_UPLOAD_BUFFERS 0x200 +/* original marked off in MGA drivers , Jiayo Hsu Oct.23,2001 */ + +/* Keep these small for testing. + */ +#define SAVAGE_NR_SAREA_CLIPRECTS 8 + +/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 + * regions, subject to a minimum region size of (1<<16) == 64k. + * + * Clients may subdivide regions internally, but when sharing between + * clients, the region size is the minimum granularity. + */ + +#define SAVAGE_CARD_HEAP 0 +#define SAVAGE_AGP_HEAP 1 +#define SAVAGE_NR_TEX_HEAPS 2 +#define SAVAGE_NR_TEX_REGIONS 16 /* num. of global texture manage list element*/ +#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 /* each region 64K, Jiayo Hsu */ + +#endif /* __SAVAGE_SAREA_DEFINES__ */ + +/* drm_tex_region_t define in drm.h */ + +typedef drm_tex_region_t drm_savage_tex_region_t; + +/* Setup registers for 2D, X server + */ +typedef struct { + unsigned int pitch; +} drm_savage_server_regs_t; + + +typedef struct _drm_savage_sarea { + /* The channel for communication of state information to the kernel + * on firing a vertex dma buffer. + */ + unsigned int setup[28]; /* 3D context registers */ + drm_savage_server_regs_t server_state; + + unsigned int dirty; + + unsigned int vertsize; /* vertext size */ + + /* The current cliprects, or a subset thereof. + */ + drm_clip_rect_t boxes[SAVAGE_NR_SAREA_CLIPRECTS]; + unsigned int nbox; + + /* Information about the most recently used 3d drawable. The + * client fills in the req_* fields, the server fills in the + * exported_ fields and puts the cliprects into boxes, above. + * + * The client clears the exported_drawable field before + * clobbering the boxes data. + */ + unsigned int req_drawable; /* the X drawable id */ + unsigned int req_draw_buffer; /* SAVAGE_FRONT or SAVAGE_BACK */ + + unsigned int exported_drawable; + unsigned int exported_index; + unsigned int exported_stamp; + unsigned int exported_buffers; + unsigned int exported_nfront; + unsigned int exported_nback; + int exported_back_x, exported_front_x, exported_w; + int exported_back_y, exported_front_y, exported_h; + drm_clip_rect_t exported_boxes[SAVAGE_NR_SAREA_CLIPRECTS]; + + /* Counters for aging textures and for client-side throttling. + */ + unsigned int status[4]; + + + /* LRU lists for texture memory in agp space and on the card. + */ + drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; + unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; + + /* Mechanism to validate card state. + */ + int ctxOwner; + unsigned long shadow_status[64];/*too big?*/ + + /*agp offset*/ + unsigned long agp_offset; +} drm_savage_sarea_t,*drm_savage_sarea_ptr; + + + +typedef struct drm_savage_init { + + unsigned long sarea_priv_offset; + + int chipset; + int sgram; + + unsigned int maccess; + + unsigned int fb_cpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + + unsigned int depth_cpp; + unsigned int depth_offset, depth_pitch; + + unsigned int texture_offset[SAVAGE_NR_TEX_HEAPS]; + unsigned int texture_size[SAVAGE_NR_TEX_HEAPS]; + + unsigned long fb_offset; + unsigned long mmio_offset; + unsigned long status_offset; +} drm_savage_init_t; + +typedef struct drm_savage_fullscreen { + enum { + SAVAGE_INIT_FULLSCREEN = 0x01, + SAVAGE_CLEANUP_FULLSCREEN = 0x02 + } func; +} drm_savage_fullscreen_t; + +typedef struct drm_savage_clear { + unsigned int flags; + unsigned int clear_color; + unsigned int clear_depth; + unsigned int color_mask; + unsigned int depth_mask; +} drm_savage_clear_t; + +typedef struct drm_savage_vertex { + int idx; /* buffer to queue */ + int used; /* bytes in use */ + int discard; /* client finished with buffer? */ +} drm_savage_vertex_t; + +typedef struct drm_savage_indices { + int idx; /* buffer to queue */ + unsigned int start; + unsigned int end; + int discard; /* client finished with buffer? */ +} drm_savage_indices_t; + +typedef struct drm_savage_iload { + int idx; + unsigned int dstorg; + unsigned int length; +} drm_savage_iload_t; + +typedef struct _drm_savage_blit { + unsigned int planemask; + unsigned int srcorg; + unsigned int dstorg; + int src_pitch, dst_pitch; + int delta_sx, delta_sy; + int delta_dx, delta_dy; + int height, ydir; /* flip image vertically */ + int source_pitch, dest_pitch; +} drm_savage_blit_t; + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage_drv.c linux.22-ac2/drivers/char/drm/savage_drv.c --- linux.vanilla/drivers/char/drm/savage_drv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,248 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "savage.h" +#include "drmP.h" +#include "savage_drv.h" + +#define DRIVER_AUTHOR "John Zhao, S3 Graphics Inc." + +#define DRIVER_NAME "savage" +#define DRIVER_DESC "Savage4 Family" +#define DRIVER_DATE "20011023" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +/* Currently Savage4 not implement DMA */ +/* mark off by Jiayo Hsu, Oct. 23, 2001*/ + + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_SAVAGE_ALLOC_CONTINUOUS_MEM)] \ + = {savage_alloc_continuous_mem,1,0},\ + [DRM_IOCTL_NR( DRM_IOCTL_SAVAGE_GET_PHYSICS_ADDRESS)] \ + = {savage_get_physics_address,1,0},\ + [DRM_IOCTL_NR(DRM_IOCTL_SAVAGE_FREE_CONTINUOUS_MEM)] \ + = {savage_free_cont_mem,1,0} + +int savage_alloc_continuous_mem(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_savage_alloc_cont_mem_t cont_mem; + unsigned long size, addr; + void *ret; + int i; + mem_map_t *p; + pgprot_t flags; + + /* add to list */ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_map_t *map; + drm_map_list_t *list; + + dma_addr_t pa; + + if (copy_from_user(&cont_mem, (drm_savage_alloc_cont_mem_t *) arg, sizeof(cont_mem))) + return -EFAULT; + + /*check the parameters */ + if (cont_mem.size <= 0) + return -EINVAL; + if( 0xFFFFFFFFUL / cont_mem.size < cont_mem.type ) + return -EINVAL; + + map = DRM(alloc) (sizeof(*map), DRM_MEM_MAPS); + if (!map) + return -ENOMEM; + + size = cont_mem.type * cont_mem.size; + + ret = pci_alloc_consistent(/*FIXME*/NULL, size, &pa); + if (ret == NULL) + return -ENOMEM; + + /* Set the reserverd flag so that the remap_page_range can map these page */ + for (i = 0, p = virt_to_page(ret); i < size / PAGE_SIZE; i++, p++) + SetPageReserved(p); + + cont_mem.phyaddress = pa; + cont_mem.location = DRM_SAVAGE_MEM_LOCATION_PCI; /* pci only at present */ + + /*Map the memory to user space */ + down_write(¤t->mm->mmap_sem); + addr = do_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, cont_mem.phyaddress); + if ((unsigned long)addr > -1024UL) + { + up_write(¤t->mm->mmap_sem); + return -EINVAL; + } + pgprot_val(flags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER; + if (remap_page_range(addr, cont_mem.phyaddress, size, flags)) + { + up_write(¤t->mm->mmap_sem); + return -EINVAL; + } + up_write(¤t->mm->mmap_sem); + + for (i = 0, p = virt_to_page(ret); i < size / PAGE_SIZE; i++, p++) + ClearPageReserved(p); + + cont_mem.linear = addr; + + /*map list */ + map->handle = ret; /* to distinguish with other */ + map->offset = cont_mem.phyaddress; + map->size = size; + map->mtrr = -1; + /*map-flags,type?? */ + + list = DRM(alloc) (sizeof(*list), DRM_MEM_MAPS); + if (!list) { + DRM(free) (map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } + memset(list, 0, sizeof(*list)); + list->map = map; + + down(&dev->struct_sem); + list_add(&list->head, &dev->maplist->head); + up(&dev->struct_sem); + + if (copy_to_user((drm_savage_alloc_cont_mem_t *) arg, &cont_mem, sizeof(cont_mem))) + return -EFAULT; + +#warning "Race at the very least" + for (i = 0, p = virt_to_page(ret); i < size / PAGE_SIZE; i++, p++) + atomic_set(&p->count, 1); + + return 1; /*success */ +} + +int savage_get_physics_address(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + + drm_savage_get_physcis_address_t req; + unsigned long buf; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + struct mm_struct *mm; + + if (copy_from_user(&req, (drm_savage_get_physcis_address_t *) arg, sizeof(req))) + return -EFAULT; + buf = req.v_address; + +#warning "FIXME: need to redo logic for this" + /*What kind of virtual address ? */ + if (buf >= (unsigned long) high_memory) + mm = &init_mm; + else + mm = current->mm; + + spin_lock(&mm->page_table_lock); + + pgd = pgd_offset(mm, buf); + pmd = pmd_offset(pgd, buf); + pte = pte_offset(pmd, buf); + + if (!pte_present(*pte)) + { + spin_unlock(&mm->page_table_lock); + return -EINVAL; + } + req.p_address = ((pte_val(*pte) & PAGE_MASK) | (buf & (PAGE_SIZE - 1))); + spin_unlock(&mm->page_table_lock); + + if (copy_to_user((drm_savage_get_physcis_address_t *) arg, &req, sizeof(req))) + return -EFAULT; + return 1; +} + +/*free the continuous memory*/ +int savage_free_cont_mem(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_savage_alloc_cont_mem_t cont_mem; + unsigned long size; + + /*map list */ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_map_t *map; + struct list_head *list; + drm_map_list_t *r_list = NULL; + + if (copy_from_user(&cont_mem, (drm_savage_alloc_cont_mem_t *) arg, sizeof(cont_mem))) + return -EFAULT; +#warning "fix size overflow check" + size = cont_mem.type * cont_mem.size; + if (size <= 0) + return -EINVAL; + + /* find the map in the list */ + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *) list; + + if (r_list->map && r_list->map->offset == cont_mem.phyaddress) + break; + } + /*find none */ + if (list == (&dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + map = r_list->map; + list_del(list); + DRM(free) (list, sizeof(*list), DRM_MEM_MAPS); + + /*unmap the user space */ + if (do_munmap(current->mm, cont_mem.linear, size, 0) != 0) + return -EFAULT; + /*free the page */ + pci_free_consistent(NULL, size, map->handle, cont_mem.phyaddress); + + return 1; +} + + +#include "drm_agpsupport.h" +#include "drm_auth.h" +#include "drm_bufs.h" +#include "drm_context.h" +#include "drm_dma.h" +#include "drm_drawable.h" +#include "drm_drv.h" +#include "drm_fops.h" +#include "drm_init.h" +#include "drm_ioctl.h" +#include "drm_lock.h" +#include "drm_memory.h" +#include "drm_proc.h" +#include "drm_vm.h" +#include "drm_stub.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage_drv.h linux.22-ac2/drivers/char/drm/savage_drv.h --- linux.vanilla/drivers/char/drm/savage_drv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage_drv.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,27 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __SAVAGE_DRV_H__ +#define __SAVAGE_DRV_H__ + +#endif /* end #ifndef __SAVAGE_DRV_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage.h linux.22-ac2/drivers/char/drm/savage.h --- linux.vanilla/drivers/char/drm/savage.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SAVAGE_H__ +#define __SAVAGE_H__ + +/* This remains constant for all DRM template files. + */ +#define DRM(x) savage_##x + +/* General customization: + */ +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 1 +#define __HAVE_MTRR 1 +#define __HAVE_CTX_BITMAP 1 + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/savage_state.c linux.22-ac2/drivers/char/drm/savage_state.c --- linux.vanilla/drivers/char/drm/savage_state.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/savage_state.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,23 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/sis_drm.h linux.22-ac2/drivers/char/drm/sis_drm.h --- linux.vanilla/drivers/char/drm/sis_drm.h 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/sis_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -2,6 +2,16 @@ #ifndef _sis_drm_public_h_ #define _sis_drm_public_h_ +/* SiS specific ioctls */ +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) + typedef struct { int context; unsigned int offset; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/sis_drv.c linux.22-ac2/drivers/char/drm/sis_drv.c --- linux.vanilla/drivers/char/drm/sis_drv.c 2002-02-25 19:37:57.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/sis_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -31,31 +31,6 @@ #include "sis_drm.h" #include "sis_drv.h" -#define DRIVER_AUTHOR "SIS" -#define DRIVER_NAME "sis" -#define DRIVER_DESC "SIS 300/630/540" -#define DRIVER_DATE "20010503" -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ - /* AGP Memory Management */ \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } -#if 0 /* these don't appear to be defined */ - /* SIS Stereo */ - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } -#endif - -#define __HAVE_COUNTERS 5 - #include "drm_auth.h" #include "drm_agpsupport.h" #include "drm_bufs.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/sis_ds.c linux.22-ac2/drivers/char/drm/sis_ds.c --- linux.vanilla/drivers/char/drm/sis_ds.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/sis_ds.c 2003-07-28 21:09:43.000000000 +0100 @@ -49,16 +49,19 @@ set_t *set; set = (set_t *)MALLOC(sizeof(set_t)); - if (set) { + if(set) + { for(i = 0; i < SET_SIZE; i++){ set->list[i].free_next = i+1; set->list[i].alloc_next = -1; - } + } + set->list[SET_SIZE-1].free_next = -1; set->free = 0; set->alloc = -1; set->trace = -1; } + return set; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/sis.h linux.22-ac2/drivers/char/drm/sis.h --- linux.vanilla/drivers/char/drm/sis.h 2001-12-21 17:41:53.000000000 +0000 +++ linux.22-ac2/drivers/char/drm/sis.h 2003-07-28 21:09:43.000000000 +0100 @@ -24,7 +24,7 @@ * DEALINGS IN THE SOFTWARE. * */ -/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.1 2001/05/19 18:29:22 dawes Exp $ */ +/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */ #ifndef __SIS_H__ #define __SIS_H__ @@ -42,6 +42,31 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "SIS" +#define DRIVER_NAME "sis" +#define DRIVER_DESC "SIS 300/630/540" +#define DRIVER_DATE "20010503" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ + /* AGP Memory Management */ \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } +#if 0 /* these don't appear to be defined */ + /* SIS Stereo */ + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } +#endif + +#define __HAVE_COUNTERS 5 + /* Buffer customization: */ #define DRIVER_AGP_BUFFERS_MAP( dev ) \ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/tdfx_drv.c linux.22-ac2/drivers/char/drm/tdfx_drv.c --- linux.vanilla/drivers/char/drm/tdfx_drv.c 2001-08-27 15:40:33.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/tdfx_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -82,25 +82,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init tdfx_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", tdfx_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_drm.h linux.22-ac2/drivers/char/drm/via_drm.h --- linux.vanilla/drivers/char/drm/via_drm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_drm.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _VIA_DRM_H_ +#define _VIA_DRM_H_ + +/* WARNING: These defines must be the same as what the Xserver uses. + * if you change them, you must change the defines in the Xserver. + */ + +#ifndef _VIA_DEFINES_ +#define _VIA_DEFINES_ + +#define VIA_DMA_BUF_ORDER 12 +#define VIA_DMA_BUF_SZ (1 << VIA_DMA_BUF_ORDER) +#define VIA_DMA_BUF_NR 256 +#define VIA_NR_SAREA_CLIPRECTS 8 + +/* Each region is a minimum of 64k, and there are at most 64 of them. + */ +#define VIA_NR_TEX_REGIONS 64 +#define VIA_LOG_MIN_TEX_REGION_SIZE 16 +#endif + +#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ +#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ +#define VIA_UPLOAD_CTX 0x4 +#define VIA_UPLOAD_BUFFERS 0x8 +#define VIA_UPLOAD_TEX0 0x10 +#define VIA_UPLOAD_TEX1 0x20 +#define VIA_UPLOAD_CLIPRECTS 0x40 +#define VIA_UPLOAD_ALL 0xff + +/* VIA specific ioctls */ +#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(0x40, drm_via_mem_t) +#define DRM_IOCTL_VIA_FREEMEM DRM_IOW(0x41, drm_via_mem_t) +#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(0x42, drm_via_agp_t) +#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(0x43, drm_via_fb_t) +#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(0x44, drm_via_init_t) + +/* Indices into buf.Setup where various bits of state are mirrored per + * context and per buffer. These can be fired at the card as a unit, + * or in a piecewise fashion as required. + */ + +#define VIA_TEX_SETUP_SIZE 8 + +/* Flags for clear ioctl + */ +#define VIA_FRONT 0x1 +#define VIA_BACK 0x2 +#define VIA_DEPTH 0x4 +#define VIA_STENCIL 0x8 +#define VIDEO 0 +#define AGP 1 + +typedef struct { + unsigned int offset; + unsigned int size; +} drm_via_agp_t; + +typedef struct { + unsigned int offset; + unsigned int size; +} drm_via_fb_t; + +typedef struct { + unsigned int context; + unsigned int type; + unsigned int size; + unsigned long index; + unsigned long offset; +} drm_via_mem_t; + +typedef struct _drm_via_init { + enum { + VIA_INIT_MAP = 0x01, + VIA_CLEANUP_MAP = 0x02 + } func; + + unsigned long sarea_priv_offset; + unsigned long fb_offset; + unsigned long mmio_offset; + unsigned long agpAddr; +} drm_via_init_t; + +/* Warning: If you change the SAREA structure you must change the Xserver + * structure as well */ + +typedef struct _drm_via_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char inUse; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_via_tex_region_t; + +typedef struct _drm_via_sarea { + unsigned int dirty; + unsigned int nbox; + drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS]; + drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1]; + int texAge; /* last time texture was uploaded */ + int ctxOwner; /* last context to upload state */ + int vertexPrim; +} drm_via_sarea_t; + +typedef struct _drm_via_flush_agp { + unsigned int offset; + unsigned int size; + unsigned int index; + int discard; /* client is finished with the buffer? */ +} drm_via_flush_agp_t; + +typedef struct _drm_via_flush_sys { + unsigned int offset; + unsigned int size; + unsigned long index; + int discard; /* client is finished with the buffer? */ +} drm_via_flush_sys_t; + +#ifdef __KERNEL__ +int via_fb_init(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_agp_init(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_dma_alloc(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_dma_free(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int via_map_init(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +#endif +#endif /* _VIA_DRM_H_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_drv.c linux.22-ac2/drivers/char/drm/via_drv.c --- linux.vanilla/drivers/char/drm/via_drv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_drv.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include "via.h" +#include "drmP.h" +#include "via_drm.h" +#include "via_drv.h" + +#define DRIVER_AUTHOR "VIA" + +#define DRIVER_NAME "via" +#define DRIVER_DESC "VIA CLE 266" +#define DRIVER_DATE "20020814" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_VIA_ALLOCMEM)] = { via_mem_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_VIA_FREEMEM)] = { via_mem_free, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_VIA_AGP_INIT)] = { via_agp_init, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_VIA_FB_INIT)] = { via_fb_init, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_VIA_MAP_INIT)] = { via_map_init, 1, 0 } + + +#define __HAVE_COUNTERS 0 + +#include "drm_auth.h" +#include "drm_agpsupport.h" +#include "drm_bufs.h" +#include "drm_context.h" +#include "drm_dma.h" +#include "drm_drawable.h" +#include "drm_drv.h" +#include "drm_fops.h" +#include "drm_init.h" +#include "drm_ioctl.h" +#include "drm_lists.h" +#include "drm_lock.h" +#include "drm_memory.h" +#include "drm_proc.h" +#include "drm_vm.h" +#include "drm_stub.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_drv.h linux.22-ac2/drivers/char/drm/via_drv.h --- linux.vanilla/drivers/char/drm/via_drv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_drv.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,149 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _VIA_DRV_H_ +#define _VIA_DRV_H_ + +typedef struct drm_via_private { + drm_via_sarea_t *sarea_priv; + drm_map_t *sarea; + drm_map_t *fb; + drm_map_t *mmio; + unsigned long agpAddr; +} drm_via_private_t; + +extern int via_do_init_map(drm_device_t *dev, drm_via_init_t *init); +extern int via_do_cleanup_map(drm_device_t *dev); +extern int via_map_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +/*=* [DBG] For RedHat7.3 insert kernel module has unresolved symbol + cmpxchg() *=*/ + +/* Include this here so that driver can be used with older kernels. */ +#ifndef __HAVE_ARCH_CMPXCHG + +#ifdef CONFIG_SMP +#define LOCK_PREFIX "lock ; " +#else +#define LOCK_PREFIX "" +#endif + +#if defined(__alpha__) +static __inline__ unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%2\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" + "2: mb\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m)); + + return prev; +} + +static __inline__ unsigned long +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%2\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" + "2: mb\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m)); + + return prev; +} + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + return old; +} +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#elif __i386__ +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ + (unsigned long)(n),sizeof(*(ptr)))) +#endif /* i386 & alpha */ +#endif +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_ds.c linux.22-ac2/drivers/char/drm/via_ds.c --- linux.vanilla/drivers/char/drm/via_ds.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_ds.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,395 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Fixes: + * Changed to use via_ prefixes on globals + * Fixed malloc failure paths + * Reformatted to Linux style + * Removed ITEM_TYPE typedef, FREE/MALLOC and other macro bits + */ + +#define __NO_VERSION__ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "via_ds.h" + +#warning "Fix variable/global names to use via_" + +extern unsigned int VIA_DEBUG; + +set_t *via_set_init(void) +{ + int i; + set_t *set; + set = (set_t *)kmalloc(sizeof(set_t), GFP_KERNEL); + if(set == NULL) + return NULL; + for (i = 0; i < SET_SIZE; i++) { + set->list[i].free_next = i + 1; + set->list[i].alloc_next = -1; + } + set->list[SET_SIZE - 1].free_next = -1; + set->free = 0; + set->alloc = -1; + set->trace = -1; + return set; +} + +int via_set_add(set_t * set, unsigned int item) +{ + int free = set->free; + if (free != -1) { + set->list[free].val = item; + set->free = set->list[free].free_next; + } else { + return 0; + } + set->list[free].alloc_next = set->alloc; + set->alloc = free; + set->list[free].free_next = -1; + return 1; +} + +int via_set_del(set_t * set, unsigned int item) +{ + int alloc = set->alloc; + int prev = -1; + + while (alloc != -1) { + if (set->list[alloc].val == item) { + if (prev != -1) + set->list[prev].alloc_next = set->list[alloc].alloc_next; + else + set->alloc = set->list[alloc].alloc_next; + break; + } + prev = alloc; + alloc = set->list[alloc].alloc_next; + } + + if (alloc == -1) + return 0; + + set->list[alloc].free_next = set->free; + set->free = alloc; + set->list[alloc].alloc_next = -1; + + return 1; +} + +/* setFirst -> setAdd -> setNext is wrong */ + +int via_set_first(set_t * set, unsigned int * item) +{ + if (set->alloc == -1) + return 0; + + *item = set->list[set->alloc].val; + set->trace = set->list[set->alloc].alloc_next; + + + return 1; +} + +int via_set_next(set_t * set, unsigned int * item) +{ + if (set->trace == -1) + return 0; + + *item = set->list[set->trace].val; + set->trace = set->list[set->trace].alloc_next; + + return 1; +} + +int via_set_destroy(set_t * set) +{ + kfree(set); + return 1; +} + +#define ISFREE(bptr) ((bptr)->free) + +#define PRINTF(fmt, arg...) do{}while(0) + +void via_mmDumpMemInfo(memHeap_t * heap) +{ + TMemBlock *p; + + PRINTF("Memory heap %p:\n", heap); + + if (heap == 0) + PRINTF(" heap == 0\n"); + else { + p = (TMemBlock *) heap; + + while (p) { + PRINTF(" Offset:%08x, Size:%08x, %c%c\n", p->ofs, p->size, p->free ? '.' : 'U', p->reserved ? 'R' : '.'); + p = p->next; + } + } + + PRINTF("End of memory blocks\n"); +} + +memHeap_t *via_mmInit(int ofs, int size) +{ + PMemBlock blocks; + + if (size <= 0) + return 0; + + + blocks = (TMemBlock *) kmalloc(sizeof(TMemBlock), GFP_KERNEL); + + if (blocks) { + memset(blocks, 0, sizeof(TMemBlock)); + blocks->ofs = ofs; + blocks->size = size; + blocks->free = 1; + return (memHeap_t *) blocks; + } else + return NULL; +} + +memHeap_t *via_mmAddRange(memHeap_t * heap, int ofs, int size) +{ + PMemBlock blocks; + blocks = (TMemBlock *) kmalloc(2 * sizeof(TMemBlock), GFP_KERNEL); + + if (blocks) { + memset(blocks, 0, 2 * sizeof(TMemBlock)); + blocks[0].size = size; + blocks[0].free = 1; + blocks[0].ofs = ofs; + blocks[0].next = &blocks[1]; + + /* Discontinuity - stops JoinBlock from trying to join non-adjacent + * ranges. + */ + blocks[1].size = 0; + blocks[1].free = 0; + blocks[1].ofs = ofs + size; + blocks[1].next = (PMemBlock) heap; + return (memHeap_t *) blocks; + } else + return heap; +} + +static TMemBlock *SliceBlock(TMemBlock * p, int startofs, int size, int reserved, int alignment) +{ + TMemBlock *newblock; + + /* break left */ + if (startofs > p->ofs) { + newblock = (TMemBlock *) kmalloc(sizeof(TMemBlock), GFP_KERNEL); + if(newblock == NULL) + return NULL; + memset(newblock, 0, sizeof(TMemBlock)); + newblock->ofs = startofs; + newblock->size = p->size - (startofs - p->ofs); + newblock->free = 1; + newblock->next = p->next; + p->size -= newblock->size; + p->next = newblock; + p = newblock; + } + + /* break right */ + if (size < p->size) { + newblock = (TMemBlock *) kmalloc(sizeof(TMemBlock), GFP_KERNEL); + if(newblock == NULL) + return NULL; + memset(newblock, 0, sizeof(TMemBlock)); + newblock->ofs = startofs + size; + newblock->size = p->size - size; + newblock->free = 1; + newblock->next = p->next; + p->size = size; + p->next = newblock; + } + + /* p = middle block */ + p->align = alignment; + p->free = 0; + p->reserved = reserved; + return p; +} + +PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch) +{ + int mask, startofs, endofs; + TMemBlock *p; + + if (!heap || align2 < 0 || size <= 0) + return NULL; + + mask = (1 << align2) - 1; + startofs = 0; + p = (TMemBlock *) heap; + + while (p) { + if (ISFREE(p)) { + startofs = (p->ofs + mask) & ~mask; + + if (startofs < startSearch) + startofs = startSearch; + + endofs = startofs + size; + + if (endofs <= (p->ofs + p->size)) + break; + } + + p = p->next; + } + + if (!p) + return NULL; + + p = SliceBlock(p, startofs, size, 0, mask + 1); + p->heap = heap; + + return p; +} + +static __inline__ int Join2Blocks(TMemBlock * p) +{ + if (p->free && p->next && p->next->free) { + TMemBlock *q = p->next; + p->size += q->size; + p->next = q->next; + kfree(q); + + return 1; + } + + return 0; +} + +int via_mmFreeMem(PMemBlock b) +{ + TMemBlock *p, *prev; + + if (!b) + return 0; + + if (!b->heap) { + return -1; + } + + p = b->heap; + prev = NULL; + + while (p && p != b) { + prev = p; + p = p->next; + } + + if (!p || p->free || p->reserved) { + if (!p) + BUG(); + else if (p->free) + BUG(); + else + BUG(); + return -1; + } + + p->free = 1; + Join2Blocks(p); + + if (prev) + Join2Blocks(prev); + + return 0; +} + +int via_mm_ReserveMem(memHeap_t * heap, int offset, int size) +{ + int endofs; + TMemBlock *p; + + if (!heap || size <= 0) + return -1; + endofs = offset + size; + p = (TMemBlock *) heap; + + while (p && p->ofs <= offset) { + if (ISFREE(p) && endofs <= (p->ofs + p->size)) { + SliceBlock(p, offset, size, 1, 1); + return 0; + } + p = p->next; + } + return -1; +} + +int via_mm_FreeReserved(memHeap_t * heap, int offset) +{ + TMemBlock *p, *prev; + + if (!heap) + return -1; + + p = (TMemBlock *) heap; + prev = NULL; + + while (p && p->ofs != offset) { + prev = p; + p = p->next; + } + + if (!p || !p->reserved) + return -1; + p->free = 1; + p->reserved = 0; + Join2Blocks(p); + + if (prev) + Join2Blocks(prev); + + return 0; +} + +void via_mm_Destroy(memHeap_t * heap) +{ + TMemBlock *p, *q; + + if (!heap) + return; + p = (TMemBlock *) heap; + + while (p) { + q = p->next; + kfree(p); + p = q; + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_ds.h linux.22-ac2/drivers/char/drm/via_ds.h --- linux.vanilla/drivers/char/drm/via_ds.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_ds.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,134 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _via_ds_h_ +#define _via_ds_h_ + +/* Set Data Structure */ + +#define SET_SIZE 5000 + +typedef struct { + unsigned int val; + int alloc_next, free_next; +} list_item_t; + +typedef struct { + int alloc; + int free; + int trace; + list_item_t list[SET_SIZE]; +} set_t; + +set_t *via_set_init(void); +int via_set_add(set_t *set, unsigned int item); +int via_set_del(set_t *set, unsigned int item); +int via_set_first(set_t *set, unsigned int *item); +int via_set_next(set_t *set, unsigned int *item); +int via_set_destroy(set_t *set); + +#endif + + +#ifndef MM_INC +#define MM_INC + +struct mem_block_t { + struct mem_block_t *next; + struct mem_block_t *heap; + int ofs,size; + int align; + int free:1; + int reserved:1; +}; +typedef struct mem_block_t TMemBlock; +typedef struct mem_block_t *PMemBlock; + +/* a heap is just the first block in a chain */ +typedef struct mem_block_t memHeap_t; + +static __inline__ int mmBlockSize(PMemBlock b) +{ + return b->size; +} + +static __inline__ int mmOffset(PMemBlock b) +{ + return b->ofs; +} + +static __inline__ void mmMarkReserved(PMemBlock b) +{ + b->reserved = 1; +} + +/* + * input: total size in bytes + * return: a heap pointer if OK, NULL if error + */ + +memHeap_t *via_mmInit(int ofs, int size); + +memHeap_t *via_mmAddRange(memHeap_t *heap, int ofs, int size); + + +/* + * Allocate 'size' bytes with 2^align2 bytes alignment, + * restrict the search to free memory after 'startSearch' + * depth and back buffers should be in different 4mb banks + * to get better page hits if possible + * input: size = size of block + * align2 = 2^align2 bytes alignment + * startSearch = linear offset from start of heap to begin search + * return: pointer to the allocated block, 0 if error + */ + +PMemBlock via_mmAllocMem(memHeap_t *heap, int size, int align2, int startSearch); + +/* + * Free block starts at offset + * input: pointer to a block + * return: 0 if OK, -1 if error + */ +int via_mmFreeMem(PMemBlock b); + +/* + * Reserve 'size' bytes block start at offset + * This is used to prevent allocation of memory already used + * by the X server for the front buffer, pixmaps, and cursor + * input: size, offset + * output: 0 if OK, -1 if error + */ +int via_mmReserveMem(memHeap_t *heap, int offset,int size); +int via_mmFreeReserved(memHeap_t *heap, int offset); + +/* + * destroy MM + */ +void via_mmDestroy(memHeap_t *mmInit); + +/* For debuging purpose. */ +void via_mmDumpMemInfo(memHeap_t *mmInit); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via.h linux.22-ac2/drivers/char/drm/via.h --- linux.vanilla/drivers/char/drm/via.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __VIA_H__ +#define __VIA_H__ + + +#define DRM(x) viadrv_##x + + +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 0 +#define __HAVE_MTRR 1 +#define __HAVE_CTX_BITMAP 1 + + +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ + ((drm_via_private_t *)((dev)->dev_private))->buffers + +extern int via_init_context(int context); +extern int via_final_context(int context); + +#define DRIVER_CTX_CTOR via_init_context +#define DRIVER_CTX_DTOR via_final_context + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_map.c linux.22-ac2/drivers/char/drm/via_map.c --- linux.vanilla/drivers/char/drm/via_map.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_map.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#define __NO_VERSION__ +#include "via.h" +#include "drmP.h" +#include "via_drv.h" + +int via_do_init_map(drm_device_t *dev, drm_via_init_t *init) +{ + drm_via_private_t *dev_priv; + struct list_head *list; + + DRM_DEBUG("%s\n", __FUNCTION__); + + dev_priv = DRM(alloc)(sizeof(drm_via_private_t), DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + + memset(dev_priv, 0, sizeof(drm_via_private_t)); + + list_for_each(list, &dev->maplist->head) { + drm_map_list_t *r_list = (drm_map_list_t *)list; + if ( r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK) { + dev_priv->sarea = r_list->map; + break; + } + } + if (!dev_priv->sarea) { + DRM_ERROR("could not find sarea!\n"); + dev->dev_private = (void *)dev_priv; + via_do_cleanup_map(dev); + return -EINVAL; + } + + DRM_FIND_MAP(dev_priv->fb, init->fb_offset); + if (!dev_priv->fb) { + DRM_ERROR("could not find framebuffer!\n"); + dev->dev_private = (void *)dev_priv; + via_do_cleanup_map(dev); + return -EINVAL; + } + DRM_FIND_MAP(dev_priv->mmio, init->mmio_offset); + if (!dev_priv->mmio) { + DRM_ERROR("could not find mmio region!\n"); + dev->dev_private = (void *)dev_priv; + via_do_cleanup_map(dev); + return -EINVAL; + } + + dev_priv->sarea_priv = (drm_via_sarea_t *)((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + dev_priv->agpAddr = init->agpAddr; + + dev->dev_private = (void *)dev_priv; + + return 0; +} + +int via_do_cleanup_map(drm_device_t *dev) +{ + if (dev->dev_private) { + + drm_via_private_t *dev_priv = dev->dev_private; + + DRM(free)(dev_priv, sizeof(drm_via_private_t), + DRM_MEM_DRIVER); + dev->dev_private = NULL; + } + + return 0; +} + +int via_map_init(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_via_init_t init; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (copy_from_user(&init, (drm_via_init_t *)arg, sizeof(init))) + return -EFAULT; + + switch (init.func) { + case VIA_INIT_MAP: + return via_do_init_map(dev, &init); + case VIA_CLEANUP_MAP: + return via_do_cleanup_map(dev); + } + + return -EINVAL; +} + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_mm.c linux.22-ac2/drivers/char/drm/via_mm.c --- linux.vanilla/drivers/char/drm/via_mm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_mm.c 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,338 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#include "via.h" +#include "drmP.h" +#include "via_drm.h" +#include "via_drv.h" +#include "via_ds.h" +#include "via_mm.h" + +#define MAX_CONTEXT 100 + +unsigned int VIA_DEBUG = 1; + +typedef struct { + int used; + int context; + set_t *sets[2]; /* 0 for frame buffer, 1 for AGP , 2 for System*/ +} via_context_t; + +static via_context_t global_ppriv[MAX_CONTEXT]; + +static int add_alloc_set(int context, int type, unsigned int val) +{ + int i, retval = 0; + + for (i = 0; i < MAX_CONTEXT; i++) + { + if (global_ppriv[i].used && global_ppriv[i].context == context) + { + retval = via_set_add(global_ppriv[i].sets[type], val); + break; + } + } + return retval; +} + +static int del_alloc_set(int context, int type, unsigned int val) +{ + int i, retval = 0; + + for (i = 0; i < MAX_CONTEXT; i++) + if (global_ppriv[i].used && global_ppriv[i].context == context) + { + retval = via_set_del(global_ppriv[i].sets[type], val); + break; + } + return retval; +} + +/* agp memory management */ + +static memHeap_t *AgpHeap = NULL; + +#warning "FIXME: heap re-init cases ?" +int via_agp_init(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_via_agp_t agp; + + if (copy_from_user(&agp, (drm_via_agp_t *)arg, sizeof(agp))) + return -EFAULT; + + AgpHeap = via_mmInit(agp.offset, agp.size); + + DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + return 0; +} + +/* fb memory management */ +static memHeap_t *FBHeap = NULL; + +int via_fb_init(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_via_fb_t fb; + + if (copy_from_user(&fb, (drm_via_fb_t *)arg, sizeof(fb))) + return -EFAULT; + + FBHeap = via_mmInit(fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + + return 0; +} + +int via_init_context(int context) +{ + int i; + + for (i = 0; i < MAX_CONTEXT ; i++) + if (global_ppriv[i].used && (global_ppriv[i].context == context)) + break; + + if (i >= MAX_CONTEXT) { + for (i = 0; i < MAX_CONTEXT ; i++) { + if (!global_ppriv[i].used) { + global_ppriv[i].context = context; + global_ppriv[i].used = 1; + global_ppriv[i].sets[0] = via_set_init(); + global_ppriv[i].sets[1] = via_set_init(); + DRM_DEBUG("init allocation set, socket=%d, context = %d\n", + i, context); + break; + } + } + + if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) || + (global_ppriv[i].sets[1] == NULL)) { + return 0; + } + } + + return 1; +} + +int via_final_context(int context) +{ + int i; + for (i=0; isize; + fb.context = mem->context; + + block = via_mmAllocMem(FBHeap, fb.size, 5, 0); + if (block) { + fb.offset = block->ofs; + fb.free = (unsigned int)block; + if (!add_alloc_set(fb.context, VIDEO, fb.free)) { + DRM_DEBUG("adding to allocation set fails\n"); + via_mmFreeMem((PMemBlock)fb.free); + retval = -1; + } + } else { + fb.offset = 0; + fb.size = 0; + fb.free = 0; + } + + mem->offset = fb.offset; + mem->index = fb.free; + + DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, (int)fb.offset); + + return retval; +} + +int via_agp_alloc(drm_via_mem_t* mem) +{ + drm_via_mm_t agp; + PMemBlock block; + int retval = 0; + + if (!AgpHeap) + return -1; + + agp.size = mem->size; + agp.context = mem->context; + + block = via_mmAllocMem(AgpHeap, agp.size, 5, 0); + if (block) { + agp.offset = block->ofs; + agp.free = (unsigned int)block; + if (!add_alloc_set(agp.context, AGP, agp.free)) { + DRM_DEBUG("adding to allocation set fails\n"); + via_mmFreeMem((PMemBlock)agp.free); + retval = -1; + } + } else { + agp.offset = 0; + agp.size = 0; + agp.free = 0; + } + + mem->offset = agp.offset; + mem->index = agp.free; + + DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, (unsigned int)agp.offset); + return retval; +} + +int via_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_via_mem_t mem; + + if (copy_from_user(&mem, (drm_via_mem_t *)arg, sizeof(mem))) + return -EFAULT; + + switch (mem.type) + { + case VIDEO : + if (via_fb_free(&mem) == 0) + return 0; + break; + case AGP : + if (via_agp_free(&mem) == 0) + return 0; + break; + } + return -EINVAL; +} + +int via_fb_free(drm_via_mem_t* mem) +{ + drm_via_mm_t fb; + int retval = 0; + + + if (!FBHeap) + return -1; + + fb.free = mem->index; + fb.context = mem->context; + + if (!fb.free) + return -1; + + via_mmFreeMem((PMemBlock)fb.free); + + if (!del_alloc_set(fb.context, VIDEO, fb.free)) + retval = -1; + + DRM_DEBUG("free fb, free = %d\n", fb.free); + + return retval; +} + +int via_agp_free(drm_via_mem_t* mem) +{ + drm_via_mm_t agp; + + int retval = 0; + + agp.free = mem->index; + agp.context = mem->context; + + if (!agp.free) + return -1; + + via_mmFreeMem((PMemBlock)agp.free); + + if (!del_alloc_set(agp.context, AGP, agp.free)) + retval = -1; + + DRM_DEBUG("free agp, free = %d\n", agp.free); + return retval; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm/via_mm.h linux.22-ac2/drivers/char/drm/via_mm.h --- linux.vanilla/drivers/char/drm/via_mm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/drm/via_mm.h 2003-07-28 21:09:43.000000000 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _via_drm_mm_h_ +#define _via_drm_mm_h_ + +typedef struct { + unsigned int context; + unsigned int size; + unsigned long offset; + unsigned int free; +} drm_via_mm_t; + +typedef struct { + unsigned int size; + unsigned long handle; + void *virtual; +} drm_via_dma_t; + +int via_fb_alloc(drm_via_mem_t *mem); +int via_fb_free(drm_via_mem_t *mem); +int via_agp_alloc(drm_via_mem_t *mem); +int via_agp_free(drm_via_mem_t *mem); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm-4.0/agpsupport.c linux.22-ac2/drivers/char/drm-4.0/agpsupport.c --- linux.vanilla/drivers/char/drm-4.0/agpsupport.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/drm-4.0/agpsupport.c 2003-07-17 13:54:16.000000000 +0100 @@ -275,6 +275,8 @@ break; case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133"; break; + case VIA_APOLLO_KM266: head->chipset = "VIA Apollo KM266 / KL266"; + break; case VIA_APOLLO_KT400: head->chipset = "VIA Apollo KT400"; break; case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm-4.0/drmP.h linux.22-ac2/drivers/char/drm-4.0/drmP.h --- linux.vanilla/drivers/char/drm-4.0/drmP.h 2002-02-25 19:37:57.000000000 +0000 +++ linux.22-ac2/drivers/char/drm-4.0/drmP.h 2003-09-09 22:27:29.000000000 +0100 @@ -257,9 +257,9 @@ /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) + printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__, ##arg) #define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__,\ drm_mem_stats[area].name , ##arg) #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) @@ -268,7 +268,7 @@ do { \ if (drm_flags&DRM_FLAG_DEBUG) \ printk(KERN_DEBUG \ - "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ + "[" DRM_NAME ":%s] " fmt , __FUNCTION__, \ ##arg); \ } while (0) #else diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm-4.0/i810_dma.c linux.22-ac2/drivers/char/drm-4.0/i810_dma.c --- linux.vanilla/drivers/char/drm-4.0/i810_dma.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/drm-4.0/i810_dma.c 2003-06-29 16:10:02.000000000 +0100 @@ -228,7 +228,7 @@ down_write(¤t->mm->mmap_sem); retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, - (size_t) buf->total); + (size_t) buf->total, 0); up_write(¤t->mm->mmap_sem); } buf_priv->currently_mapped = I810_BUF_UNMAPPED; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/drm-4.0/tdfx_drv.c linux.22-ac2/drivers/char/drm-4.0/tdfx_drv.c --- linux.vanilla/drivers/char/drm-4.0/tdfx_drv.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/drm-4.0/tdfx_drv.c 2003-06-29 16:10:02.000000000 +0100 @@ -554,7 +554,6 @@ lock.context, current->pid, j, dev->lock.lock_time, jiffies); current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; schedule_timeout(DRM_LOCK_SLICE-j); DRM_DEBUG("jiffies=%d\n", jiffies); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/dz.c linux.22-ac2/drivers/char/dz.c --- linux.vanilla/drivers/char/dz.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/dz.c 2003-08-28 17:00:08.000000000 +0100 @@ -1052,7 +1052,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/esp.c linux.22-ac2/drivers/char/esp.c --- linux.vanilla/drivers/char/esp.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/esp.c 2003-06-29 16:10:00.000000000 +0100 @@ -136,7 +136,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -2051,7 +2051,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/generic_serial.c linux.22-ac2/drivers/char/generic_serial.c --- linux.vanilla/drivers/char/generic_serial.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/generic_serial.c 2003-06-29 16:10:01.000000000 +0100 @@ -753,7 +753,7 @@ return; } - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_ERR "gs: gs_close: bad port count;" " tty->count is 1, port count is %d\n", port->count); port->count = 1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/i810-tco.c linux.22-ac2/drivers/char/i810-tco.c --- linux.vanilla/drivers/char/i810-tco.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/i810-tco.c 2003-07-31 14:40:23.000000000 +0100 @@ -25,7 +25,8 @@ * 82801AA & 82801AB chip : document number 290655-003, 290677-004, * 82801BA & 82801BAM chip : document number 290687-002, 298242-005, * 82801CA & 82801CAM chip : document number 290716-001, 290718-001, - * 82801DB & 82801E chip : document number 290744-001, 273599-001 + * 82801DB & 82801E chip : document number 290744-001, 273599-001, + * 82801EB & 82801ER chip : document number 252516-001 * * 20000710 Nils Faerber * Initial Version 0.01 @@ -42,9 +43,11 @@ * clean up ioctls (WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS and * WDIOC_SETOPTIONS), made i810tco_getdevice __init, * removed boot_status, removed tco_timer_read, - * added support for 82801DB and 82801E chipset, general cleanup. + * added support for 82801DB and 82801E chipset, + * added support for 82801EB and 8280ER chipset, + * general cleanup. */ - + #include #include #include @@ -167,7 +170,7 @@ * Reload (trigger) the timer. Lock is needed so we dont reload it during * a reprogramming event */ - + static void tco_timer_reload (void) { spin_lock(&tco_lock); @@ -310,6 +313,7 @@ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, }; MODULE_DEVICE_TABLE (pci, i810tco_pci_tbl); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/i810-tco.h linux.22-ac2/drivers/char/i810-tco.h --- linux.vanilla/drivers/char/i810-tco.h 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/i810-tco.h 2003-07-31 14:40:23.000000000 +0100 @@ -1,5 +1,5 @@ /* - * i810-tco 0.05: TCO timer driver for i8xx chipsets + * i810-tco: TCO timer driver for i8xx chipsets * * (c) Copyright 2000 kernel concepts , All Rights Reserved. * http://www.kernelconcepts.de @@ -8,7 +8,7 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. - * + * * Neither kernel concepts nor Nils Faerber admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/ip2main.c linux.22-ac2/drivers/char/ip2main.c --- linux.vanilla/drivers/char/ip2main.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/ip2main.c 2003-08-08 15:10:19.000000000 +0100 @@ -372,7 +372,7 @@ #if defined(MODULE) && defined(IP2DEBUG_OPEN) #define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, ttyc=%d, modc=%x -> %s\n", \ kdevname(tty->device),(pCh->flags),ref_count, \ - tty->count,/*GET_USE_COUNT(module)*/0,s) + atomic_read(&tty->count),/*GET_USE_COUNT(module)*/0,s) #else #define DBG_CNT(s) #endif @@ -988,7 +988,7 @@ static void __init ip2_init_board( int boardnum ) { - int i,rc; + int i; int nports = 0, nboxes = 0; i2ChanStrPtr pCh; i2eBordStrPtr pB = i2BoardPtrTable[boardnum]; @@ -1740,7 +1740,7 @@ noblock: /* first open - Assign termios structure to port */ - if ( tty->count == 1 ) { + if ( atomic_read(&tty->count) == 1 ) { i2QueueCommands(PTYPE_INLINE, pCh, 0, 2, CMD_CTSFL_DSAB, CMD_RTSFL_DSAB); if ( pCh->flags & ASYNC_SPLIT_TERMIOS ) { if ( tty->driver.subtype == SERIAL_TYPE_NORMAL ) { @@ -1805,7 +1805,7 @@ return; } - if ( tty->count > 1 ) { /* not the last close */ + if ( atomic_read(&tty->count) > 1 ) { /* not the last close */ MOD_DEC_USE_COUNT; ip2trace (CHANN, ITRC_CLOSE, 2, 1, 3 ); @@ -3364,7 +3364,7 @@ pCh = DevTable[i]; if (pCh) { tty = pCh->pTTY; - if (tty && tty->count) { + if (tty && atomic_read(&tty->count)) { len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags, tty->termios->c_cflag,tty->termios->c_iflag); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/isicom.c linux.22-ac2/drivers/char/isicom.c --- linux.vanilla/drivers/char/isicom.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/isicom.c 2003-06-29 16:10:01.000000000 +0100 @@ -1160,7 +1160,7 @@ return; } - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_WARNING "ISICOM:(0x%x) isicom_close: bad port count" "tty->count = 1 port count = %d.\n", card->base, port->count); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/istallion.c linux.22-ac2/drivers/char/istallion.c --- linux.vanilla/drivers/char/istallion.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/istallion.c 2003-06-29 16:10:00.000000000 +0100 @@ -1173,7 +1173,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (portp->refcount != 1)) + if ((atomic_read(&tty->count) == 1) && (portp->refcount != 1)) portp->refcount = 1; if (portp->refcount-- > 1) { MOD_DEC_USE_COUNT; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/keyboard.c linux.22-ac2/drivers/char/keyboard.c --- linux.vanilla/drivers/char/keyboard.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/keyboard.c 2003-08-28 17:00:08.000000000 +0100 @@ -198,6 +198,7 @@ unsigned char keycode; char up_flag = down ? 0 : 0200; char raw_mode; + char have_keycode; pm_access(pm_kbd); add_keyboard_randomness(scancode | up_flag); @@ -214,16 +215,30 @@ tty = NULL; } kbd = kbd_table + fg_console; - if ((raw_mode = (kbd->kbdmode == VC_RAW))) { + /* + * Convert scancode to keycode + */ + raw_mode = (kbd->kbdmode == VC_RAW); + have_keycode = kbd_translate(scancode, &keycode, raw_mode); + if (raw_mode) { /* * The following is a workaround for hardware * which sometimes send the key release event twice */ unsigned char next_scancode = scancode|up_flag; - if (up_flag && next_scancode==prev_scancode) { + if (have_keycode && up_flag && next_scancode==prev_scancode) { /* unexpected 2nd release event */ } else { - prev_scancode=next_scancode; + /* + * Only save previous scancode if it was a key-up + * and had a single-byte scancode. + */ + if (!have_keycode) + prev_scancode = 1; + else if (!up_flag || prev_scancode == 1) + prev_scancode = 0; + else + prev_scancode = next_scancode; put_queue(next_scancode); } /* we do not return yet, because we want to maintain @@ -231,10 +246,7 @@ values when finishing RAW mode or when changing VT's */ } - /* - * Convert scancode to keycode - */ - if (!kbd_translate(scancode, &keycode, raw_mode)) + if (!have_keycode) goto out; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/Makefile linux.22-ac2/drivers/char/Makefile --- linux.vanilla/drivers/char/Makefile 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/Makefile 2003-08-28 22:46:24.000000000 +0100 @@ -199,6 +199,12 @@ obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o obj-$(CONFIG_N_HDLC) += n_hdlc.o obj-$(CONFIG_SPECIALIX) += specialix.o + +subdir-$(CONFIG_ATI_CD1865) += cd1865 +ifeq ($(CONFIG_ATI_CD1865),y) + obj-y += cd1865/SILX.o +endif + obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o obj-$(CONFIG_SX) += sx.o generic_serial.o @@ -273,6 +279,8 @@ obj-$(CONFIG_NWFLASH) += nwflash.o obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o scx200.o +obj-$(CONFIG_VBLANK) += vblank.o + # Only one watchdog can succeed. We probe the hardware watchdog # drivers first, then the softdog driver. This means if your hardware # watchdog dies or is 'borrowed' for some reason the software watchdog @@ -295,6 +303,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o +obj-$(CONFIG_ALIM6117_WDT) += alim6117_wdt.o obj-$(CONFIG_ALIM1535_WDT) += alim1535d_wdt.o obj-$(CONFIG_INDYDOG) += indydog.o obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/moxa.c linux.22-ac2/drivers/char/moxa.c --- linux.vanilla/drivers/char/moxa.c 2001-10-25 21:53:47.000000000 +0100 +++ linux.22-ac2/drivers/char/moxa.c 2003-06-29 16:10:01.000000000 +0100 @@ -642,7 +642,7 @@ } ch = (struct moxa_str *) tty->driver_data; - if ((tty->count == 1) && (ch->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (ch->count != 1)) { printk("moxa_close: bad serial port count; tty->count is 1, " "ch->count is %d\n", ch->count); ch->count = 1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/mwave/mwavedd.c linux.22-ac2/drivers/char/mwave/mwavedd.c --- linux.vanilla/drivers/char/mwave/mwavedd.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/mwave/mwavedd.c 2003-06-29 16:10:01.000000000 +0100 @@ -279,7 +279,6 @@ pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) - current->nice = -20; /* boost to provide priority timing */ #else current->priority = 0x28; /* boost to provide priority timing */ #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/mxser.c linux.22-ac2/drivers/char/mxser.c --- linux.vanilla/drivers/char/mxser.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/mxser.c 2003-06-29 16:10:01.000000000 +0100 @@ -824,7 +824,7 @@ MOD_DEC_USE_COUNT; return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/n_hdlc.c linux.22-ac2/drivers/char/n_hdlc.c --- linux.vanilla/drivers/char/n_hdlc.c 2002-02-25 19:37:57.000000000 +0000 +++ linux.22-ac2/drivers/char/n_hdlc.c 2003-06-29 16:10:00.000000000 +0100 @@ -9,7 +9,7 @@ * Al Longyear , Paul Mackerras * * Original release 01/11/99 - * $Id: n_hdlc.c,v 3.3 2001/11/08 16:16:03 paulkf Exp $ + * $Id: n_hdlc.c,v 3.6 2002/12/19 18:58:56 paulkf Exp $ * * This code is released under the GNU General Public License (GPL) * @@ -78,7 +78,7 @@ */ #define HDLC_MAGIC 0x239e -#define HDLC_VERSION "$Revision: 3.3 $" +#define HDLC_VERSION "$Revision: 3.6 $" #include #include @@ -172,9 +172,9 @@ /* * HDLC buffer list manipulation functions */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); /* Local functions */ @@ -186,10 +186,10 @@ /* debug level can be set by insmod for debugging purposes */ #define DEBUG_LEVEL_INFO 1 -int debuglevel=0; +static int debuglevel=0; /* max frame size for memory allocations */ -ssize_t maxframe=4096; +static ssize_t maxframe=4096; /* TTY callbacks */ @@ -265,7 +265,8 @@ } else break; } - + if (n_hdlc->tbuf) + kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ @@ -905,7 +906,7 @@ * Arguments: list pointer to buffer list * Return Value: None */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) { memset(list,0,sizeof(N_HDLC_BUF_LIST)); spin_lock_init(&list->spinlock); @@ -922,7 +923,7 @@ * * Return Value: None */ -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) { unsigned long flags; spin_lock_irqsave(&list->spinlock,flags); @@ -952,7 +953,7 @@ * * pointer to HDLC buffer if available, otherwise NULL */ -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) { unsigned long flags; N_HDLC_BUF *buf; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/n_tty.c linux.22-ac2/drivers/char/n_tty.c --- linux.vanilla/drivers/char/n_tty.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/n_tty.c 2003-08-08 13:42:38.000000000 +0100 @@ -119,7 +119,7 @@ */ static void check_unthrottle(struct tty_struct * tty) { - if (tty->count && + if (atomic_read(&tty->count) && test_and_clear_bit(TTY_THROTTLED, &tty->flags) && tty->driver.unthrottle) tty->driver.unthrottle(tty); @@ -1170,7 +1170,8 @@ retval = -ERESTARTSYS; break; } - if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { + if (tty_hung_up_p(file) || + (tty->link && !atomic_read(&tty->link->count))) { retval = -EIO; break; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/nwflash.c linux.22-ac2/drivers/char/nwflash.c --- linux.vanilla/drivers/char/nwflash.c 2001-10-12 21:48:42.000000000 +0100 +++ linux.22-ac2/drivers/char/nwflash.c 2003-06-29 16:10:00.000000000 +0100 @@ -154,12 +154,11 @@ if (down_interruptible(&nwflash_sem)) return -ERESTARTSYS; - ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); - if (ret == 0) { - ret = count; - *ppos += count; - } + ret = count - copy_to_user(buf, (void *)(FLASH_BASE + p), count); + *ppos += ret; up(&nwflash_sem); + if (ret == 0) + ret = -EFAULT; } return ret; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/pc_keyb.c linux.22-ac2/drivers/char/pc_keyb.c --- linux.vanilla/drivers/char/pc_keyb.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/pc_keyb.c 2003-06-29 16:10:00.000000000 +0100 @@ -1225,41 +1225,13 @@ #endif /* CONFIG_PSMOUSE */ -static int blink_frequency = HZ/2; +void pckbd_blink (char led) { + led = led ? (0x01 | 0x04) : 0x00; -/* Tell the user who may be running in X and not see the console that we have - panic'ed. This is to distingush panics from "real" lockups. - Could in theory send the panic message as morse, but that is left as an - exercise for the reader. */ -void panic_blink(void) -{ - static unsigned long last_jiffie; - static char led; - /* Roughly 1/2s frequency. KDB uses about 1s. Make sure it is - different. */ - if (!blink_frequency) - return; - if (jiffies - last_jiffie > blink_frequency) { - led ^= 0x01 | 0x04; while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); kbd_write_output(KBD_CMD_SET_LEDS); mdelay(1); while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); mdelay(1); kbd_write_output(led); - last_jiffie = jiffies; - } -} - -static int __init panicblink_setup(char *str) -{ - int par; - if (get_option(&str,&par)) - blink_frequency = par*(1000/HZ); - return 1; } - -/* panicblink=0 disables the blinking as it caused problems with some console - switches. otherwise argument is ms of a blink period. */ -__setup("panicblink=", panicblink_setup); - diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/pcmcia/synclink_cs.c linux.22-ac2/drivers/char/pcmcia/synclink_cs.c --- linux.vanilla/drivers/char/pcmcia/synclink_cs.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/pcmcia/synclink_cs.c 2003-06-29 16:10:02.000000000 +0100 @@ -1,7 +1,7 @@ /* * linux/drivers/char/synclink_cs.c * - * $Id: synclink_cs.c,v 3.4 2002/04/22 14:36:41 paulkf Exp $ + * $Id: synclink_cs.c,v 3.9 2003/05/23 20:16:41 paulkf Exp $ * * Device driver for Microgate SyncLink PC Card * multiprotocol serial adapter. @@ -483,6 +483,7 @@ static int debug_level = 0; static int maxframe[MAX_DEVICE_COUNT] = {0,}; +static int dosyncppp[MAX_DEVICE_COUNT] = {1,1,1,1}; /* The old way: bit map of interrupts to choose from */ /* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */ @@ -499,9 +500,14 @@ MODULE_PARM(cuamajor,"i"); MODULE_PARM(debug_level,"i"); MODULE_PARM(maxframe,"1-" __MODULE_STRING(MAX_DEVICE_COUNT) "i"); +MODULE_PARM(dosyncppp,"1-" __MODULE_STRING(MAX_DEVICE_COUNT) "i"); + +#ifdef MODULE_LICENSE +MODULE_LICENSE("GPL"); +#endif static char *driver_name = "SyncLink PC Card driver"; -static char *driver_version = "$Revision: 3.4 $"; +static char *driver_version = "$Revision: 3.9 $"; static struct tty_driver serial_driver, callout_driver; static int serial_refcount; @@ -2616,17 +2622,20 @@ { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - if (!info || mgslpc_paranoia_check(info, tty->device, "mgslpc_close")) + if (mgslpc_paranoia_check(info, tty->device, "mgslpc_close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->count); - if (!info->count || tty_hung_up_p(filp)) + if (!info->count) + return; + + if (tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. @@ -2747,7 +2756,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { @@ -2757,7 +2766,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } @@ -2937,16 +2946,11 @@ info = mgslpc_device_list; while(info && info->line != line) info = info->next_device; - if ( !info ){ - printk("%s(%d):Can't find specified device on open (line=%d)\n", - __FILE__,__LINE__,line); + if (mgslpc_paranoia_check(info, tty->device, "mgslpc_open")) return -ENODEV; - } tty->driver_data = info; info->tty = tty; - if (mgslpc_paranoia_check(info, tty->device, "mgslpc_open")) - return -ENODEV; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", @@ -3008,6 +3012,8 @@ cleanup: if (retval) { + if (atomic_read(&tty->count) == 1) + info->tty = 0; /* tty layer will release tty struct */ if(MOD_IN_USE) MOD_DEC_USE_COUNT; if(info->count) @@ -3181,8 +3187,7 @@ if (info->line < MAX_DEVICE_COUNT) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; -// info->dosyncppp = dosyncppp[info->line]; - info->dosyncppp = 1; + info->dosyncppp = dosyncppp[info->line]; } mgslpc_device_count++; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/pcwd.c linux.22-ac2/drivers/char/pcwd.c --- linux.vanilla/drivers/char/pcwd.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/pcwd.c 2003-08-12 12:48:30.000000000 +0100 @@ -932,9 +932,7 @@ misc_deregister (&temp_miscdev); release_region (pcwd_info.io_addr, pcwd_info.card_info->io_size); - - if (pcwd_info.flags & PCWD_PCI_REG) - pci_unregister_driver (&pcwd_driver); + pci_unregister_driver (&pcwd_driver); return; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/pcxx.c linux.22-ac2/drivers/char/pcxx.c --- linux.vanilla/drivers/char/pcxx.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/pcxx.c 2003-08-28 22:15:11.000000000 +0100 @@ -123,7 +123,7 @@ MODULE_PARM(numports, "1-4i"); # endif -#endif MODULE +#endif /* MODULE */ static int numcards = 1; static int nbdevs = 0; @@ -581,7 +581,7 @@ return; } /* this check is in serial.c, it won't hurt to do it here too */ - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/pty.c linux.22-ac2/drivers/char/pty.c --- linux.vanilla/drivers/char/pty.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/pty.c 2003-06-29 16:10:00.000000000 +0100 @@ -70,13 +70,18 @@ static void pty_close(struct tty_struct * tty, struct file * filp) { + int count; + if (!tty) return; + + count = atomic_read(&tty->count); if (tty->driver.subtype == PTY_TYPE_MASTER) { - if (tty->count > 1) - printk("master pty_close: count = %d!!\n", tty->count); + if (count > 1) + printk("master pty_close: count = %d!!\n", + atomic_read(&tty->count)); } else { - if (tty->count > 2) + if (count > 2) return; } wake_up_interruptible(&tty->read_wait); @@ -329,7 +334,7 @@ goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) goto out; - if (tty->link->count != 1) + if (atomic_read(&tty->link->count) != 1) goto out; clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/raw.c linux.22-ac2/drivers/char/raw.c --- linux.vanilla/drivers/char/raw.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/raw.c 2003-06-29 16:10:01.000000000 +0100 @@ -86,12 +86,6 @@ filp->f_op = &raw_ctl_fops; return 0; } - - if (!filp->f_iobuf) { - err = alloc_kiovec(1, &filp->f_iobuf); - if (err) - return err; - } down(&raw_devices[minor].mutex); /* @@ -292,7 +286,6 @@ size_t size, loff_t *offp) { struct kiobuf * iobuf; - int new_iobuf; int err = 0; unsigned long blocknr, blocks; size_t transferred; @@ -311,18 +304,10 @@ minor = MINOR(filp->f_dentry->d_inode->i_rdev); - new_iobuf = 0; - iobuf = filp->f_iobuf; - if (test_and_set_bit(0, &filp->f_iobuf_lock)) { - /* - * A parallel read/write is using the preallocated iobuf - * so just run slow and allocate a new one. - */ - err = alloc_kiovec(1, &iobuf); - if (err) - goto out; - new_iobuf = 1; - } + err = alloc_kiovec(1, &iobuf); + if (err) + return err; + dev = to_kdev_t(raw_devices[minor].binding->bd_dev); sector_size = raw_devices[minor].sector_size; @@ -395,10 +380,6 @@ } out_free: - if (!new_iobuf) - clear_bit(0, &filp->f_iobuf_lock); - else - free_kiovec(1, &iobuf); - out: + free_kiovec(1, &iobuf); return err; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/riscom8.c linux.22-ac2/drivers/char/riscom8.c --- linux.vanilla/drivers/char/riscom8.c 2001-09-13 23:21:32.000000000 +0100 +++ linux.22-ac2/drivers/char/riscom8.c 2003-06-29 16:10:00.000000000 +0100 @@ -1142,7 +1142,7 @@ goto out; bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_INFO "rc%d: rc_close: bad port count;" " tty->count is 1, port count is %d\n", board_No(bp), port->count); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/rocket.c linux.22-ac2/drivers/char/rocket.c --- linux.vanilla/drivers/char/rocket.c 2001-09-21 18:55:22.000000000 +0100 +++ linux.22-ac2/drivers/char/rocket.c 2003-08-13 14:22:04.000000000 +0100 @@ -1052,7 +1052,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always @@ -1944,6 +1944,10 @@ str = "8J"; max_num_aiops = 1; break; + case PCI_DEVICE_ID_RP4J: + str = "4J"; + max_num_aiops = 1; + break; case PCI_DEVICE_ID_RP16INTF: str = "16"; max_num_aiops = 2; @@ -2006,6 +2010,10 @@ PCI_DEVICE_ID_RP8J, i, &bus, &device_fn)) if (register_PCI(count+boards_found, bus, device_fn)) count++; + if (!pcibios_find_device(PCI_VENDOR_ID_RP, + PCI_DEVICE_ID_RP4J, i, &bus, &device_fn)) + if (register_PCI(count+boards_found, bus, device_fn)) + count++; if(!pcibios_find_device(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA, i, &bus, &device_fn)) if(register_PCI(count+boards_found, bus, device_fn)) @@ -2031,6 +2039,10 @@ if(register_PCI(count+boards_found, bus, device_fn)) count++; if(!pcibios_find_device(PCI_VENDOR_ID_RP, + PCI_DEVICE_ID_RP4J, i, &bus, &device_fn)) + if(register_PCI(count+boards_found, bus, device_fn)) + count++; + if(!pcibios_find_device(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RPP4, i, &bus, &device_fn)) if(register_PCI(count+boards_found, bus, device_fn)) count++; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/rocket_int.h linux.22-ac2/drivers/char/rocket_int.h --- linux.vanilla/drivers/char/rocket_int.h 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/rocket_int.h 2003-09-01 13:54:30.000000000 +0100 @@ -1200,6 +1200,9 @@ #ifndef PCI_DEVICE_ID_RP8J #define PCI_DEVICE_ID_RP8J 0x0006 #endif +#ifndef PCI_DEVICE_ID_RP4J +#define PCI_DEVICE_ID_RP4J 0x0007 +#endif #ifndef PCI_DEVICE_ID_RPP4 #define PCI_DEVICE_ID_RPP4 0x000A #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/sbc60xxwdt.c linux.22-ac2/drivers/char/sbc60xxwdt.c --- linux.vanilla/drivers/char/sbc60xxwdt.c 2002-11-29 21:27:14.000000000 +0000 +++ linux.22-ac2/drivers/char/sbc60xxwdt.c 2003-08-08 15:11:35.000000000 +0100 @@ -335,7 +335,7 @@ release_region(WDT_START,1); err_out_region1: release_region(WDT_STOP,1); -err_out: +/* err_out: */ return rc; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/serial167.c linux.22-ac2/drivers/char/serial167.c --- linux.vanilla/drivers/char/serial167.c 2002-11-29 21:27:16.000000000 +0000 +++ linux.22-ac2/drivers/char/serial167.c 2003-06-29 16:10:01.000000000 +0100 @@ -1877,7 +1877,7 @@ printk("cy_close ttyS%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/serial_amba.c linux.22-ac2/drivers/char/serial_amba.c --- linux.vanilla/drivers/char/serial_amba.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/serial_amba.c 2003-06-29 16:10:01.000000000 +0100 @@ -1404,7 +1404,7 @@ return; } - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/serial.c linux.22-ac2/drivers/char/serial.c --- linux.vanilla/drivers/char/serial.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/serial.c 2003-07-14 12:42:59.000000000 +0100 @@ -62,6 +62,10 @@ * Robert Schwebel , * Juergen Beisert , * Theodore Ts'o + * 4/02: added TTY_DO_WRITE_WAKEUP to enable n_tty to send POLL_OUTS + * to waiting processes + * Sapan Bhatia + * */ static char *serial_version = "5.05c"; @@ -203,6 +207,7 @@ #include #include #include +#include #if (LINUX_VERSION_CODE >= 131343) #include #endif @@ -306,8 +311,8 @@ { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, { "Startech", 1, 0}, /* usurped by cyclades.c */ { "16C950/954", 128, UART_CLEAR_FIFO | UART_USE_FIFO}, - { "ST16654", 64, UART_CLEAR_FIFO | UART_USE_FIFO | - UART_STARTECH }, + { "ST16654", 64-8, UART_CLEAR_FIFO | UART_USE_FIFO | + UART_STARTECH }, /* ST16654 xmit trigger lvl = 8 */ { "XR16850", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, { "RSA", 2048, UART_CLEAR_FIFO | UART_USE_FIFO }, @@ -370,7 +375,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1218,7 +1223,7 @@ if (!page) return -ENOMEM; - save_flags(flags); cli(); + spin_lock_irqsave( &info->irq_spinlock, flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); @@ -1456,11 +1461,11 @@ change_speed(info, 0); info->flags |= ASYNC_INITIALIZED; - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return 0; errout: - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return retval; } @@ -1484,7 +1489,7 @@ state->irq); #endif - save_flags(flags); cli(); /* Disable interrupts */ + spin_lock_irqsave( &info->irq_spinlock, flags); /* * clear delta_msr_wait queue to avoid mem leaks: we may free the irq @@ -1492,41 +1497,6 @@ */ wake_up_interruptible(&info->delta_msr_wait); - /* - * First unlink the serial port from the IRQ chain... - */ - if (info->next_port) - info->next_port->prev_port = info->prev_port; - if (info->prev_port) - info->prev_port->next_port = info->next_port; - else - IRQ_ports[state->irq] = info->next_port; - figure_IRQ_timeout(state->irq); - - /* - * Free the IRQ, if necessary - */ - if (state->irq && (!IRQ_ports[state->irq] || - !IRQ_ports[state->irq]->next_port)) { - if (IRQ_ports[state->irq]) { - free_irq(state->irq, &IRQ_ports[state->irq]); - retval = request_irq(state->irq, rs_interrupt_single, - SA_SHIRQ, "serial", - &IRQ_ports[state->irq]); - - if (retval) - printk("serial shutdown: request_irq: error %d" - " Couldn't reacquire IRQ.\n", retval); - } else - free_irq(state->irq, &IRQ_ports[state->irq]); - } - - if (info->xmit.buf) { - unsigned long pg = (unsigned long) info->xmit.buf; - info->xmit.buf = 0; - free_page(pg); - } - info->IER = 0; serial_outp(info, UART_IER, 0x00); /* disable all intrs */ #ifdef CONFIG_SERIAL_MANY_PORTS @@ -1583,7 +1553,43 @@ serial_outp(info, UART_IER, UART_IERX_SLEEP); } info->flags &= ~ASYNC_INITIALIZED; - restore_flags(flags); + + /* + * First unlink the serial port from the IRQ chain... + */ + if (info->next_port) + info->next_port->prev_port = info->prev_port; + if (info->prev_port) + info->prev_port->next_port = info->next_port; + else + IRQ_ports[state->irq] = info->next_port; + figure_IRQ_timeout(state->irq); + + /* + * Free the IRQ, if necessary + */ + if (state->irq && (!IRQ_ports[state->irq] || + !IRQ_ports[state->irq]->next_port)) { + if (IRQ_ports[state->irq]) { + free_irq(state->irq, &IRQ_ports[state->irq]); + retval = request_irq(state->irq, rs_interrupt_single, + SA_SHIRQ, "serial", + &IRQ_ports[state->irq]); + + if (retval) + printk("serial shutdown: request_irq: error %d" + " Couldn't reacquire IRQ.\n", retval); + } else + free_irq(state->irq, &IRQ_ports[state->irq]); + } + + if (info->xmit.buf) { + unsigned long pg = (unsigned long) info->xmit.buf; + info->xmit.buf = 0; + free_page(pg); + } + + spin_unlock_irqrestore( &info->irq_spinlock, flags); } #if (LINUX_VERSION_CODE < 131394) /* Linux 2.1.66 */ @@ -2791,7 +2797,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always @@ -3128,6 +3134,7 @@ info->tqueue.routine = do_softint; info->tqueue.data = info; info->state = sstate; + spin_lock_init(&info->irq_spinlock); if (sstate->info) { kfree(info); *ret_info = sstate->info; @@ -3242,6 +3249,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_open ttys%d successful...", info->line); #endif + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return 0; } @@ -3655,6 +3663,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; save_flags(flags); cli(); @@ -3907,7 +3916,14 @@ case 6: /* BAR 4*/ case 7: base_idx=idx-2; /* BAR 5*/ } - + + /* AFAVLAB uses a different mixture of BARs and offsets */ + /* Not that ugly ;) -- HW */ + if (dev->vendor == PCI_VENDOR_ID_AFAVLAB && idx >= 4) { + base_idx = 4; + offset = (idx - 4) * 8; + } + /* Some Titan cards are also a little weird */ if (dev->vendor == PCI_VENDOR_ID_TITAN && (dev->device == PCI_DEVICE_ID_TITAN_400L || @@ -4311,9 +4327,11 @@ pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, + pbn_b0_bt_8_115200, pbn_b0_bt_1_460800, pbn_b0_bt_2_460800, pbn_b0_bt_2_921600, + pbn_b0_bt_4_460800, pbn_b1_1_115200, pbn_b1_2_115200, @@ -4393,9 +4411,11 @@ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, /* pbn_b0_bt_1_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, /* pbn_b0_bt_2_115200 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 8, 115200 }, /* pbn_b0_bt_8_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 460800 }, /* pbn_b0_bt_1_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 460800 }, /* pbn_b0_bt_2_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 921600 }, /* pbn_b0_bt_2_921600 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 4, 460800 }, /* pbn_b0_bt_4_460800 */ { SPCI_FL_BASE1, 1, 115200 }, /* pbn_b1_1_115200 */ { SPCI_FL_BASE1, 2, 115200 }, /* pbn_b1_2_115200 */ @@ -4861,6 +4881,12 @@ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_115200 }, @@ -4873,6 +4899,11 @@ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, + /* AFAVLAB serial card, from Harald Welte */ + { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_115200 }, + /* EKF addition for i960 Boards form EKF with serial port */ { PCI_VENDOR_ID_INTEL, 0x1960, 0xE4BF, PCI_ANY_ID, 0, 0, @@ -4892,6 +4923,7 @@ 0x1048, 0x1500, 0, 0, pbn_b1_1_115200 }, + /* SGI IOC3 board */ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, 0xFF00, 0, 0, 0, pbn_sgi_ioc3 }, @@ -5545,12 +5577,22 @@ tty_register_devfs(&callout_driver, 0, callout_driver.minor_start + state->line); } +#ifdef CONFIG_SERIAL_GSC + probe_serial_gsc(); +#endif +#ifdef CONFIG_SUPERIO + superio_serial_init(); +#endif #ifdef ENABLE_SERIAL_PCI probe_serial_pci(); #endif #ifdef ENABLE_SERIAL_PNP probe_serial_pnp(); #endif + // FIXME: Clean this one up +#if defined(CONFIG_SERIAL_CONSOLE) && defined(CONFIG_PARISC) + serial_console_init(); +#endif return 0; } @@ -5660,6 +5702,7 @@ info->io_type = req->io_type; info->iomem_base = req->iomem_base; info->iomem_reg_shift = req->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; } autoconfig(state); if (state->type == PORT_UNKNOWN) { @@ -5967,6 +6010,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; quot = state->baud_base / baud; cval = cflag & (CSIZE | CSTOPB); #if defined(__powerpc__) || defined(__alpha__) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/serial_txx927.c linux.22-ac2/drivers/char/serial_txx927.c --- linux.vanilla/drivers/char/serial_txx927.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/serial_txx927.c 2003-06-29 16:10:02.000000000 +0100 @@ -155,7 +155,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - kdevname(tty->device), (info->flags), serial_refcount,info->count,tty->count,s) + kdevname(tty->device), (info->flags), serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1407,7 +1407,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/sgiserial.c linux.22-ac2/drivers/char/sgiserial.c --- linux.vanilla/drivers/char/sgiserial.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/sgiserial.c 2003-08-28 20:50:14.000000000 +0100 @@ -1443,7 +1443,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/softdog.c linux.22-ac2/drivers/char/softdog.c --- linux.vanilla/drivers/char/softdog.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/char/softdog.c 2003-07-31 14:18:58.000000000 +0100 @@ -124,7 +124,7 @@ * Shut off the timer. * Lock it in if it's a module and we set nowayout */ - if (expect_close) { + if (expect_close || nowayout == 0) { del_timer(&watchdog_ticktock); } else { printk(KERN_CRIT "SOFTDOG: WDT device closed unexpectedly. WDT will not stop!\n"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/sonypi.c linux.22-ac2/drivers/char/sonypi.c --- linux.vanilla/drivers/char/sonypi.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/sonypi.c 2003-08-28 22:12:00.000000000 +0100 @@ -308,7 +308,7 @@ int i, j; v1 = inb_p(sonypi_device.ioport1); - v2 = inb_p(sonypi_device.ioport2); + v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); for (i = 0; sonypi_eventtypes[i].model; i++) { if (sonypi_device.model != sonypi_eventtypes[i].model) @@ -665,11 +665,13 @@ if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { ioport_list = sonypi_type2_ioport_list; sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; + sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; irq_list = sonypi_type2_irq_list; } else { ioport_list = sonypi_type1_ioport_list; sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; + sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; irq_list = sonypi_type1_irq_list; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/sonypi.h linux.22-ac2/drivers/char/sonypi.h --- linux.vanilla/drivers/char/sonypi.h 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/sonypi.h 2003-09-09 22:27:29.000000000 +0100 @@ -56,12 +56,14 @@ #define SONYPI_BASE 0x50 #define SONYPI_G10A (SONYPI_BASE+0x14) #define SONYPI_TYPE1_REGION_SIZE 0x08 +#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 /* type2 series specifics */ #define SONYPI_SIRQ 0x9b #define SONYPI_SLOB 0x9c #define SONYPI_SHIB 0x9d #define SONYPI_TYPE2_REGION_SIZE 0x20 +#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 /* battery / brightness addresses */ #define SONYPI_BAT_FLAGS 0x81 @@ -167,6 +169,7 @@ #define SONYPI_THUMBPHRASE_MASK 0x00000200 #define SONYPI_MEYE_MASK 0x00000400 #define SONYPI_MEMORYSTICK_MASK 0x00000800 +#define SONYPI_BATTERY_MASK 0x00001000 struct sonypi_event { u8 data; @@ -293,6 +296,13 @@ { 0, 0 } }; +/* The set of possible battery events */ +static struct sonypi_event sonypi_batteryev[] = { + { 0x20, SONYPI_EVENT_BATTERY_INSERT }, + { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, + { 0, 0 } +}; + struct sonypi_eventtypes { int model; u8 data; @@ -307,19 +317,22 @@ { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, - { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_JOGGER_MASK, sonypi_joggerev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_CAPTURE_MASK, sonypi_captureev }, - { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, - { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, - { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_BACK_MASK, sonypi_backev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_HELP_MASK, sonypi_helpev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_ZOOM_MASK, sonypi_zoomev }, { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, - { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, + { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0, 0, 0, 0 } }; @@ -354,6 +367,7 @@ u16 ioport1; u16 ioport2; u16 region_size; + u16 evtype_offset; int camera_power; int bluetooth_power; struct semaphore lock; @@ -380,30 +394,17 @@ } #ifdef CONFIG_ACPI -#include -#if (ACPI_CA_VERSION > 0x20021121) -#ifdef CONFIG_ACPI_EC -#define SONYPI_USE_ACPI -#endif -#endif -#endif /* CONFIG_ACPI */ - -#ifdef CONFIG_ACPI -#ifdef SONYPI_USE_ACPI extern int acpi_disabled; #define SONYPI_ACPI_ACTIVE (!acpi_disabled) #else -#define SONYPI_ACPI_ACTIVE 1 -#endif -#else /* CONFIG_ACPI */ #define SONYPI_ACPI_ACTIVE 0 #endif /* CONFIG_ACPI */ extern int verbose; static inline int sonypi_ec_write(u8 addr, u8 value) { -#ifdef SONYPI_USE_ACPI - if (!acpi_disabled) +#ifdef CONFIG_ACPI_EC + if (SONYPI_ACPI_ACTIVE) return ec_write(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); @@ -417,8 +418,8 @@ } static inline int sonypi_ec_read(u8 addr, u8 *value) { -#ifdef SONYPI_USE_ACPI - if (!acpi_disabled) +#ifdef CONFIG_ACPI_EC + if (SONYPI_ACPI_ACTIVE) return ec_read(addr, value); #endif wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/specialix.c linux.22-ac2/drivers/char/specialix.c --- linux.vanilla/drivers/char/specialix.c 2002-08-03 16:08:23.000000000 +0100 +++ linux.22-ac2/drivers/char/specialix.c 2003-06-29 16:10:00.000000000 +0100 @@ -1517,7 +1517,7 @@ } bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_ERR "sx%d: sx_close: bad port count;" " tty->count is 1, port count is %d\n", board_No(bp), port->count); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/stallion.c linux.22-ac2/drivers/char/stallion.c --- linux.vanilla/drivers/char/stallion.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/char/stallion.c 2003-06-29 16:10:01.000000000 +0100 @@ -1197,7 +1197,7 @@ restore_flags(flags); return; } - if ((tty->count == 1) && (portp->refcount != 1)) + if ((atomic_read(&tty->count) == 1) && (portp->refcount != 1)) portp->refcount = 1; if (portp->refcount-- > 1) { MOD_DEC_USE_COUNT; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/synclink.c linux.22-ac2/drivers/char/synclink.c --- linux.vanilla/drivers/char/synclink.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/char/synclink.c 2003-06-29 16:10:01.000000000 +0100 @@ -1,7 +1,7 @@ /* * linux/drivers/char/synclink.c * - * $Id: synclink.c,v 3.12 2001/07/18 19:14:21 paulkf Exp $ + * $Id: synclink.c,v 3.18 2003/06/18 21:02:25 paulkf Exp $ * * Device driver for Microgate SyncLink ISA and PCI * high speed multiprotocol serial adapters. @@ -198,6 +198,7 @@ int flags; int count; /* count of opens */ int line; + int hw_version; unsigned short close_delay; unsigned short closing_wait; /* time to wait before closing */ @@ -922,7 +923,7 @@ MODULE_PARM(txholdbufs,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i"); static char *driver_name = "SyncLink serial driver"; -static char *driver_version = "$Revision: 3.12 $"; +static char *driver_version = "$Revision: 3.18 $"; static int __init synclink_init_one (struct pci_dev *dev, const struct pci_device_id *ent); @@ -930,6 +931,7 @@ static struct pci_device_id synclink_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); @@ -3251,17 +3253,20 @@ { struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; - if (!info || mgsl_paranoia_check(info, tty->device, "mgsl_close")) + if (mgsl_paranoia_check(info, tty->device, "mgsl_close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_close(%s) entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->count); - if (!info->count || tty_hung_up_p(filp)) + if (!info->count) + return; + + if (tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. @@ -3393,7 +3398,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { @@ -3403,7 +3408,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } @@ -3612,16 +3617,11 @@ info = mgsl_device_list; while(info && info->line != line) info = info->next_device; - if ( !info ){ - printk("%s(%d):Can't find specified device on open (line=%d)\n", - __FILE__,__LINE__,line); + if (mgsl_paranoia_check(info, tty->device, "mgsl_open")) return -ENODEV; - } tty->driver_data = info; info->tty = tty; - if (mgsl_paranoia_check(info, tty->device, "mgsl_open")) - return -ENODEV; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_open(%s), old ref count = %d\n", @@ -3695,6 +3695,8 @@ cleanup: if (retval) { + if (atomic_read(&tty->count) == 1) + info->tty = 0; /* tty layer will release tty struct */ if(MOD_IN_USE) MOD_DEC_USE_COUNT; if(info->count) @@ -4291,9 +4293,7 @@ info->get_tx_holding_index=0; /* restart transmit timer */ - del_timer(&info->tx_timer); - info->tx_timer.expires = jiffies + jiffies_from_ms(5000); - add_timer(&info->tx_timer); + mod_timer(&info->tx_timer, jiffies + jiffies_from_ms(5000)); ret = 1; } @@ -4511,12 +4511,12 @@ info->max_frame_size = 65535; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { - printk( "SyncLink device %s added:PCI bus IO=%04X IRQ=%d Mem=%08X LCR=%08X MaxFrameSize=%u\n", - info->device_name, info->io_base, info->irq_level, + printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", + info->hw_version + 1, info->device_name, info->io_base, info->irq_level, info->phys_memory_base, info->phys_lcr_base, info->max_frame_size ); } else { - printk( "SyncLink device %s added:ISA bus IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", + printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", info->device_name, info->io_base, info->irq_level, info->dma_level, info->max_frame_size ); } @@ -5375,10 +5375,11 @@ info->mbre_bit = BIT8; outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ - /* Enable DMAEN (Port 7, Bit 14) */ - /* This connects the DMA request signal to the ISA bus */ - /* on the ISA adapter. This has no effect for the PCI adapter */ - usc_OutReg( info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14) ); + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable DMAEN (Port 7, Bit 14) */ + /* This connects the DMA request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); + } /* DMA Control Register (DCR) * @@ -6355,10 +6356,11 @@ usc_EnableMasterIrqBit( info ); - /* Enable INTEN (Port 6, Bit12) */ - /* This connects the IRQ request signal to the ISA bus */ - /* on the ISA adapter. This has no effect for the PCI adapter */ - usc_OutReg( info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable INTEN (Port 6, Bit12) */ + /* This connects the IRQ request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); + } } /* end of usc_set_async_mode() */ @@ -6449,10 +6451,11 @@ usc_loopback_frame( info ); usc_set_sdlc_mode( info ); - /* Enable INTEN (Port 6, Bit12) */ - /* This connects the IRQ request signal to the ISA bus */ - /* on the ISA adapter. This has no effect for the PCI adapter */ - usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable INTEN (Port 6, Bit12) */ + /* This connects the IRQ request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); + } usc_enable_aux_clock(info, info->params.clock_speed); @@ -7496,7 +7499,7 @@ EndTime = jiffies + jiffies_from_ms(100); for(;;) { - if ( jiffies > EndTime ) { + if (time_after(jiffies, EndTime)) { rc = FALSE; break; } @@ -7552,7 +7555,7 @@ EndTime = jiffies + jiffies_from_ms(100); for(;;) { - if ( jiffies > EndTime ) { + if (time_after(jiffies, EndTime)) { rc = FALSE; break; } @@ -7600,7 +7603,7 @@ spin_unlock_irqrestore(&info->irq_spinlock,flags); while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { - if ( jiffies > EndTime ) { + if (time_after(jiffies, EndTime)) { rc = FALSE; break; } @@ -7627,8 +7630,7 @@ /* Wait for 16C32 to write receive status to buffer entry. */ status=info->rx_buffer_list[0].status; while ( status == 0 ) { - if ( jiffies > EndTime ) { - printk(KERN_ERR"mark 4\n"); + if (time_after(jiffies, EndTime)) { rc = FALSE; break; } @@ -8202,17 +8204,20 @@ info->bus_type = MGSL_BUS_TYPE_PCI; info->io_addr_size = 8; info->irq_flags = SA_SHIRQ; - - /* Store the PCI9050 misc control register value because a flaw - * in the PCI9050 prevents LCR registers from being read if - * BIOS assigns an LCR base address with bit 7 set. - * - * Only the misc control register is accessed for which only - * write access is needed, so set an initial value and change - * bits to the device instance data as we write the value - * to the actual misc control register. - */ - info->misc_ctrl_value = 0x087e4546; + + if (dev->device == 0x0210) { + /* Version 1 PCI9030 based universal PCI adapter */ + info->misc_ctrl_value = 0x007c4080; + info->hw_version = 1; + } else { + /* Version 0 PCI9050 based 5V PCI adapter + * A PCI9050 bug prevents reading LCR registers if + * LCR base address bit 7 is set. Maintain shadow + * value so we can write to LCR misc control reg. + */ + info->misc_ctrl_value = 0x087e4546; + info->hw_version = 0; + } mgsl_add_device(info); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/synclinkmp.c linux.22-ac2/drivers/char/synclinkmp.c --- linux.vanilla/drivers/char/synclinkmp.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/char/synclinkmp.c 2003-06-29 16:10:04.000000000 +0100 @@ -1,5 +1,5 @@ /* - * $Id: synclinkmp.c,v 3.17 2002/04/22 16:05:39 paulkf Exp $ + * $Id: synclinkmp.c,v 3.21 2003/06/18 21:02:26 paulkf Exp $ * * Device driver for Microgate SyncLink Multiport * high speed multiprotocol serial adapter. @@ -504,7 +504,7 @@ MODULE_PARM(dosyncppp,"1-" __MODULE_STRING(MAX_DEVICES) "i"); static char *driver_name = "SyncLink MultiPort driver"; -static char *driver_version = "$Revision: 3.17 $"; +static char *driver_version = "$Revision: 3.21 $"; static int __devinit synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent); static void __devexit synclinkmp_remove_one(struct pci_dev *dev); @@ -515,6 +515,10 @@ }; MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl); +#ifdef MODULE_LICENSE +MODULE_LICENSE("GPL"); +#endif + static struct pci_driver synclinkmp_pci_driver = { name: "synclinkmp", id_table: synclinkmp_pci_tbl, @@ -748,12 +752,8 @@ info = synclinkmp_device_list; while(info && info->line != line) info = info->next_device; - if ( !info ){ - printk("%s(%d):%s Can't find specified device on open (line=%d)\n", - __FILE__,__LINE__,info->device_name,line); + if (sanity_check(info, tty->device, "open")) return -ENODEV; - } - if ( info->init_error ) { printk("%s(%d):%s device is not allocated, init error=%d\n", __FILE__,__LINE__,info->device_name,info->init_error); @@ -762,8 +762,6 @@ tty->driver_data = info; info->tty = tty; - if (sanity_check(info, tty->device, "open")) - return -ENODEV; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s open(), old ref count = %d\n", @@ -825,6 +823,8 @@ cleanup: if (retval) { + if (atomic_read(&tty->count) == 1) + info->tty = 0; /* tty layer will release tty struct */ if(MOD_IN_USE) MOD_DEC_USE_COUNT; if(info->count) @@ -841,17 +841,20 @@ { SLMP_INFO * info = (SLMP_INFO *)tty->driver_data; - if (!info || sanity_check(info, tty->device, "close")) + if (sanity_check(info, tty->device, "close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s close() entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->count); - if (!info->count || tty_hung_up_p(filp)) + if (!info->count) + return; + + if (tty_hung_up_p(filp)) goto cleanup; - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * tty->count is 1 and the tty structure will be freed. * info->count should be one in this case. @@ -1203,7 +1206,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { @@ -1214,7 +1217,7 @@ schedule_timeout(char_time); if (signal_pending(current)) break; - if (timeout && ((orig_jiffies + timeout) < jiffies)) + if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/tipar.c linux.22-ac2/drivers/char/tipar.c --- linux.vanilla/drivers/char/tipar.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/char/tipar.c 2003-08-08 15:10:48.000000000 +0100 @@ -71,9 +71,11 @@ #define DRIVER_DESC "Device driver for TI/PC parallel link cables" #define DRIVER_LICENSE "GPL" -#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) -#if LINUX_VERSION_CODE < VERSION(2,5,0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) # define minor(x) MINOR(x) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) # define need_resched() (current->need_resched) #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/tty_io.c linux.22-ac2/drivers/char/tty_io.c --- linux.vanilla/drivers/char/tty_io.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/tty_io.c 2003-08-28 17:00:08.000000000 +0100 @@ -145,8 +145,8 @@ extern void au1x00_serial_console_init(void); extern int rs_8xx_init(void); extern void mac_scc_console_init(void); -extern void hwc_console_init(void); -extern void hwc_tty_init(void); +extern void sclp_console_init(void); +extern void sclp_tty_init(void); extern void con3215_init(void); extern void tty3215_init(void); extern void tub3270_con_init(void); @@ -246,14 +246,15 @@ file_list_unlock(); if (tty->driver.type == TTY_DRIVER_TYPE_PTY && tty->driver.subtype == PTY_TYPE_SLAVE && - tty->link && tty->link->count) + tty->link && atomic_read(&tty->link->count)) count++; - if (tty->count != count) { + if (atomic_read(&tty->count) != count) { printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) " "!= #fd's(%d) in %s\n", - kdevname(tty->device), tty->count, count, routine); + kdevname(tty->device), atomic_read(&tty->count), + count, routine); return count; - } + } #endif return 0; } @@ -929,7 +930,7 @@ o_tty->termios_locked = *o_ltp_loc; (*driver->other->refcount)++; if (driver->subtype == PTY_TYPE_MASTER) - o_tty->count++; + atomic_inc(&o_tty->count); /* Establish the links in both directions */ tty->link = o_tty; @@ -950,7 +951,7 @@ tty->termios = *tp_loc; tty->termios_locked = *ltp_loc; (*driver->refcount)++; - tty->count++; + atomic_inc(&tty->count); /* * Structures all installed ... call the ldisc open routines. @@ -989,13 +990,13 @@ * special case for PTY masters: only one open permitted, * and the slave side open count is incremented as well. */ - if (tty->count) { + if (atomic_read(&tty->count)) { retval = -EIO; goto end_init; } - tty->link->count++; + atomic_inc(&tty->link->count); } - tty->count++; + atomic_inc(&tty->count); tty->driver = *driver; /* N.B. why do this every time?? */ success: @@ -1119,7 +1120,7 @@ #ifdef TTY_DEBUG_HANGUP printk(KERN_DEBUG "release_dev of %s (tty count=%d)...", - tty_name(tty, buf), tty->count); + tty_name(tty, buf), atomic_read(&tty->count)); #endif #ifdef TTY_PARANOIA_CHECK @@ -1171,9 +1172,9 @@ * each iteration we avoid any problems. */ while (1) { - tty_closing = tty->count <= 1; + tty_closing = atomic_read(&tty->count) <= 1; o_tty_closing = o_tty && - (o_tty->count <= (pty_master ? 1 : 0)); + (atomic_read(&o_tty->count) <= (pty_master ? 1 : 0)); do_sleep = 0; if (tty_closing) { @@ -1210,17 +1211,20 @@ * block, so it's safe to proceed with closing. */ if (pty_master) { - if (--o_tty->count < 0) { + atomic_dec(&o_tty->count); + if (atomic_read(&o_tty->count) < 0) { printk(KERN_WARNING "release_dev: bad pty slave count " "(%d) for %s\n", - o_tty->count, tty_name(o_tty, buf)); - o_tty->count = 0; + atomic_read(&o_tty->count), + tty_name(o_tty, buf)); + atomic_set(&o_tty->count, 0); } } - if (--tty->count < 0) { + atomic_dec(&tty->count); + if (atomic_read(&tty->count) < 0) { printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s\n", - tty->count, tty_name(tty, buf)); - tty->count = 0; + atomic_read(&tty->count), tty_name(tty, buf)); + atomic_set(&tty->count, 0); } /* @@ -1436,7 +1440,7 @@ } if ((tty->driver.type == TTY_DRIVER_TYPE_SERIAL) && (tty->driver.subtype == SERIAL_TYPE_CALLOUT) && - (tty->count == 1)) { + (atomic_read(&tty->count) == 1)) { static int nr_warns; if (nr_warns < 5) { printk(KERN_WARNING "tty_io.c: " @@ -2277,8 +2281,8 @@ #ifdef CONFIG_TN3215 con3215_init(); #endif -#ifdef CONFIG_HWC - hwc_console_init(); +#ifdef CONFIG_SCLP_CONSOLE + sclp_console_init(); #endif #ifdef CONFIG_STDIO_CONSOLE stdio_console_init(); @@ -2443,8 +2447,8 @@ #ifdef CONFIG_TN3215 tty3215_init(); #endif -#ifdef CONFIG_HWC - hwc_tty_init(); +#ifdef CONFIG_SCLP_TTY + sclp_tty_init(); #endif #ifdef CONFIG_A2232 a2232board_init(); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/vac-serial.c linux.22-ac2/drivers/char/vac-serial.c --- linux.vanilla/drivers/char/vac-serial.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/char/vac-serial.c 2003-08-28 20:50:53.000000000 +0100 @@ -29,7 +29,7 @@ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) baget_printk("(%s):[%x] refc=%d, serc=%d, ttyc=%d-> %s\n", \ - kdevname(tty->device),(info->flags),serial_refcount,info->count,tty->count,s) + kdevname(tty->device),(info->flags),serial_refcount,info->count,atomic_read(&tty->count),s) #else #define DBG_CNT(s) #endif @@ -1658,7 +1658,7 @@ baget_printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif - if ((tty->count == 1) && (state->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/vblank.c linux.22-ac2/drivers/char/vblank.c --- linux.vanilla/drivers/char/vblank.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/char/vblank.c 2003-09-09 19:07:16.000000000 +0100 @@ -0,0 +1,612 @@ +/* + * Vertical blank interrupt driver for PCI devices + * + * (C) Copyright 2003 Soeren Sandmann + * (C) Copyright 2003 Red Hat + * + * Based heavily on svgalib interrupt.c by Matan Ziv-Av + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 0.00 SS Original Driver + * 0.01 Alan Moved to miscdev, rewrote interfaces + * 0.02 SS Fixed lots of bugs + * Andersca + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static int try_vga = 0; /* Set to try generic VGA on unknowns */ + +struct video_card; + +struct vblank_ops +{ + int (*test)(struct video_card *); + void (*enable)(struct video_card *); + void (*disable)(struct video_card *); +}; + +struct video_card +{ + struct pci_dev *pci_dev; /* PCI device */ + unsigned long iobase; /* Base for I/O */ + unsigned long mmio; /* Our MMIO mapping if any */ + unsigned long mmio_len; + struct vblank_ops *vops; + + int running:1; + + unsigned int count; /* Counter for interrupts */ + spinlock_t lock; /* Protects count */ + + wait_queue_head_t wait; /* Woken each vblank */ + struct fasync_struct *async_queue; /* Async chain */ + + u32 saved_pmc; +}; + + +/* + * Based on interrupt.c from svgalib + * Author: Matan Ziv-Av (matan@svgalib.org) + */ + +static int vga_test_vsync(struct video_card *card) +{ + return inb(card->iobase + 0x3C2) & 0x80; +} + +static void vga_clear_disable_vsync(struct video_card *card) +{ + u8 pb; + + outb(0x11, card->iobase + 0x3d4); + pb = inb(card->iobase + 0x3d5); + outb(0x11, card->iobase + 0x3d4); + outb(pb & 0xef, card->iobase + 0x3d5); +} + +static void vga_enable_vsync(struct video_card *card) +{ + u8 pb; + + /* + * Enable interrupt, clear pending + */ + outb(0x11, card->iobase + 0x3d4); + pb = inb(card->iobase + 0x3d5); + outb(0x11, card->iobase + 0x3d4); + outb((pb&0xcf) , card->iobase + 0x3d5); + + /* + * Allow interrupts + */ + outb(0x11, card->iobase + 0x3d4); + pb = inb(card->iobase + 0x3d5); + outb(0x11, card->iobase + 0x3d4); + outb(pb | 0x10 , card->iobase + 0x3d5); +} + +static struct vblank_ops vblank_io_ops = +{ + vga_test_vsync, + vga_enable_vsync, + vga_clear_disable_vsync, +}; + +static int vga_mm_test_vsync(struct video_card *card) +{ + return readb(card->iobase + 0x3C2) & 0x80; +} + +static void vga_mm_clear_disable_vsync(struct video_card *card) +{ + u8 pb; + + writeb(0x11, card->iobase + 0x3d4); + pb = readb(card->iobase + 0x3d5); + writeb(0x11, card->iobase + 0x3d4); + writeb(pb & 0xef, card->iobase + 0x3d5); +} + +static void vga_mm_enable_vsync(struct video_card *card) +{ + u8 pb; + + /* + * Enable interrupt, clear pending + */ + writeb(0x11, card->iobase + 0x3d4); + pb = readb(card->iobase + 0x3d5); + writeb(0x11, card->iobase + 0x3d4); + writeb((pb&0xcf) , card->iobase + 0x3d5); + + /* + * Allow interrupts + */ + writeb(0x11, card->iobase + 0x3d4); + pb = readb(card->iobase + 0x3d5); + writeb(0x11, card->iobase + 0x3d4); + writeb(pb | 0x10 , card->iobase + 0x3d5); +} + +static struct vblank_ops vga_mm_ops = +{ + vga_mm_test_vsync, + vga_mm_enable_vsync, + vga_mm_clear_disable_vsync, +}; + + +static int nv3_test_vsync(struct video_card *card) +{ + return readl(card->iobase+0x400100)&0x100; +} + +static void nv3_clear_and_disable_vsync(struct video_card *card) +{ + /* disable interrupt, clear pending */ + writel(0xffffffff, card->iobase + 0x000100); + writel(0x100, card->iobase + 0x400100); + writel(0, card->iobase + 0x000140); + writel(0, card->iobase + 0x400140); + writel(card->saved_pmc, card->iobase + 0x000200); +} + +static void nv3_enable_vsync(struct video_card *card) +{ + card->saved_pmc = inl(card->iobase + 0x200); + writel(card->saved_pmc|0x1000, card->iobase+0x200); + writel(0x1, card->iobase + 0x000140); + writel(0x100, card->iobase + 0x400140); + writel(0xffffffff, card->iobase + 0x000100); + writel(0xffffffff, card->iobase + 0x400100); +} + +static struct vblank_ops nv3_ops = { + nv3_test_vsync, + nv3_enable_vsync, + nv3_clear_and_disable_vsync, +}; + +static int nv4_test_vsync(struct video_card *card) +{ + return readl(card->iobase+0x600100)&0x1; +} + +static void nv4_clear_and_disable_vsync(struct video_card *card) +{ + /* disable interrupt, clear pending */ + writel(0xffffffff, card->iobase + 0x000100); + writel(0x1, card->iobase + 0x600100); + writel(0, card->iobase + 0x000140); + writel(0, card->iobase + 0x600140); + writel(card->saved_pmc, card->iobase + 0x000200); +} + +static void nv4_enable_vsync(struct video_card *card) +{ + card->saved_pmc = inl(card->iobase + 0x200); + writel(card->saved_pmc|(1<<24),card->iobase+0x200); + writel(0x1, card->iobase + 0x000140); + writel(0x1, card->iobase + 0x600140); + writel(0xffffffff, card->iobase + 0x000100); + writel(0xffffffff, card->iobase + 0x600100); +} + +static struct vblank_ops nv4_ops = { + nv4_test_vsync, + nv4_enable_vsync, + nv4_clear_and_disable_vsync, +}; + + +static int r128_test_vsync(struct video_card *card) +{ + return readl(card->iobase + 0x44) &1; +} + +static void r128_clear_and_disable_vsync(struct video_card *card) +{ + writel(1, card->iobase + 0x44); + writel(readl(card->iobase + 0x40) & 0xfffffffe, card->iobase + 0x40); +} + +static void r128_enable_vsync(struct video_card *card) +{ + writel(1, card->iobase + 0x44); + writel(readl(card->iobase + 0x40) | 1, card->iobase + 0x40); +} + +static struct vblank_ops r128_ops = { + r128_test_vsync, + r128_enable_vsync, + r128_clear_and_disable_vsync +}; + +static int rage_test_vsync(struct video_card *card) +{ + return inl(card->iobase + 0x18) &4; +} + +static void rage_clear_and_disable_vsync(struct video_card *card) +{ + outl((inl(card->iobase + 0x18) & 0xfffffff8) | 4, card->iobase + 0x18); +} + +static void rage_enable_vsync(struct video_card *card) +{ + outl((inl(card->iobase + 0x18) & 0xfffffff8) | 6, card->iobase + 0x18); +} + +static struct vblank_ops rage_ops = { + rage_test_vsync, + rage_enable_vsync, + rage_clear_and_disable_vsync +}; + +static int rendition_test_vsync(struct video_card *card) +{ + return inw(card->iobase + 0x44) & 1; +} + +static void rendition_clear_and_disable_vsync(struct video_card *card) +{ + outw(1, card->iobase + 0x44); + outw(0, card->iobase + 0x46); +} + +static void rendition_enable_vsync(struct video_card *card) +{ + outw(1, card->iobase + 0x44); + outw(1, card->iobase + 0x46); +} + +static struct vblank_ops rendition_ops = { + rendition_test_vsync, + rendition_enable_vsync, + rendition_clear_and_disable_vsync, +}; + +static int is_r128(struct pci_dev *pdev) +{ + switch(pdev->device) + { + case 0x4c45: + case 0x4c46: + case 0x4d46: + case 0x4d4c: + return 1; + case 0x4242: + case 0x4c57: + case 0x4c59: + case 0x4c5a: + return 1; + } + + switch(pdev->device >> 8) + { + case 0x50: + case 0x52: + case 0x53: + case 0x54: + return 1; + case 0x51: + return 1; + } + + return 0; + +} + +static int vga_init_vsync(struct video_card *card) +{ + struct pci_dev *pdev = card->pci_dev; + int res; + + switch(pdev->vendor) + { + case PCI_VENDOR_ID_MATROX: + res = 0; + if(pci_resource_len(pdev, 0) >= 1048576) + res = 1; + card->mmio = (unsigned long)ioremap(pci_resource_start(pdev, res), 0x2000); + card->mmio_len = 0x2000; + if(card->mmio == 0) + goto fail; + card->iobase = card->mmio + 0x1C00; + card->vops = &vga_mm_ops; + break; + case PCI_VENDOR_ID_SI: + card->iobase = pci_resource_start(pdev, 2) - 0x380; + card->vops = &vblank_io_ops; + break; + case PCI_VENDOR_ID_NVIDIA_SGS: + card->mmio = (unsigned long)ioremap(pci_resource_start(pdev, 0), 0x800000); + card->mmio_len = 0x800000; + if(card->mmio == 0) + goto fail; + card->iobase = card->mmio; + if(pdev->device < 0x20) + card->vops = &nv3_ops; + else + card->vops = &nv4_ops; + break; + case PCI_VENDOR_ID_NVIDIA: + card->mmio = (unsigned long)ioremap(pci_resource_start(pdev, 0), 0x800000); + card->mmio_len = 0x800000; + if(card->mmio == 0) + goto fail; + card->iobase = card->mmio; + card->vops= &nv4_ops; + break; + case PCI_VENDOR_ID_ATI: + /* FIXME: Add Radeon */ + if(is_r128(pdev)) + { + card->mmio = (unsigned long)ioremap(pci_resource_start(pdev, 2), 16384); + card->mmio_len = 16384; + if(card->mmio == 0) + goto fail; + card->iobase = card->mmio; + card->vops = &r128_ops; + } + else + { + card->iobase = pci_resource_start(pdev, 1); + card->vops = &rage_ops; + } + break; + case PCI_VENDOR_ID_RENDITION: + card->iobase = pci_resource_start(pdev, 1); + card->vops = &rendition_ops; + break; + default: + if(try_vga) + { + card->iobase = 0; + card->vops = &vblank_io_ops; + break; + } + printk(KERN_ERR "vblank: Unsupported video card %04X:%04X.\n", + pdev->vendor, pdev->device); + return -EOPNOTSUPP; + + } + return 0; +fail: + printk(KERN_ERR "vblank: Unable to map card.\n"); + return -ENOMEM; +} + +static void vblankdev_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct video_card *card = dev_id; + + spin_lock(&card->lock); + if(card->vops->test(card)) + { + card->vops->disable(card); + card->vops->enable(card); + card->count++; + wake_up_interruptible(&card->wait); + kill_fasync(&card->async_queue, SIGIO, POLL_IN); + } + spin_unlock(&card->lock); +} + +static int vblankdev_open(struct inode *inode, struct file *filp) +{ + struct video_card *card; + card = kmalloc(sizeof(struct video_card), GFP_KERNEL); + if(card == NULL) + return -ENOBUFS; + memset(card, 0, sizeof(struct video_card)); + init_waitqueue_head(&card->wait); + spin_lock_init(&card->lock); + filp->private_data = card; + return 0; +} + +static int vblankdev_fasync (int fd, struct file *filp, int mode) +{ + struct video_card *card = filp->private_data; + return fasync_helper(fd, filp, mode, &card->async_queue); +} + +static int vblankdev_release(struct inode *inode, struct file *filp) +{ + struct video_card *card = filp->private_data; + + if(card->running) + { + card->vops->disable(card); + free_irq(card->pci_dev->irq, card); + } + if(card->mmio) + iounmap((void *)card->mmio); + vblankdev_fasync(-1, filp, 0); + kfree(card); + return 0; +} + +static unsigned int vblankdev_poll(struct file *filp, poll_table *poll_table) +{ + struct video_card *card = filp->private_data; + + poll_wait(filp, &card->wait, poll_table); + if(card->count) + return POLLIN; + return 0; +} + +static ssize_t vblankdev_read(struct file *filp, char *buff, size_t count, loff_t *offp) +{ + struct video_card *card = filp->private_data; + unsigned long flags; + int err = -EWOULDBLOCK; + u32 num = 0; + size_t len; + DECLARE_WAITQUEUE(wait, current); + + /* Can't seek (pread) on this device */ + if (offp != &filp->f_pos) + return -ESPIPE; + + + add_wait_queue(&card->wait, &wait); + do + { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&card->lock, flags); + if(card->running) + { + num = card->count; + card->count = 0; + err = 0; + } + spin_unlock_irqrestore(&card->lock, flags); + if(num) + break; + schedule(); + if(signal_pending(current)) + { + err = -EINTR; + break; + } + } + while(!(filp->f_flags & O_NDELAY)); + + remove_wait_queue(&card->wait, &wait); + set_current_state(TASK_RUNNING); + + if(err) + return err; + + len = min(count, (size_t)4); + if(copy_to_user(buff, &num, 4)) + return -EFAULT; + return len; +} + +static int vblankdev_do_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct vblank_bind vbind; + struct pci_dev *pdev; + struct video_card *card = filp->private_data; + + switch(cmd) + { + case VBLIOC_BIND: + if(card->running) + return -EBUSY; + + if(copy_from_user(&vbind, (void *)arg, sizeof(struct vblank_bind))) + return -EFAULT; + + /* Domain isnt used yet */ + pdev = pci_find_slot(vbind.bus, vbind.devfn); + if(pdev == NULL) + return -ENODEV; + + if(pci_enable_device(pdev) < 0) + return -EIO; + + if(pdev->irq == 0) + return -EOPNOTSUPP; + + /* Ok try and bind it */ + card->pci_dev = pdev; + if(vga_init_vsync(card) < 0) + return -EOPNOTSUPP; + card->running = 1; + if(request_irq(pdev->irq, vblankdev_interrupt, SA_SHIRQ, "vblank", card) < 0) + { + card->running = 0; + return -EBUSY; + } + card->vops->enable(card); + return 0; + default: + return -ENOTTY; + } +} + +/* + * We need a lock here to avoid parallel binding of the same + * object. + */ + +static DECLARE_MUTEX(ioctl_sem); + +static int vblankdev_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + + down(&ioctl_sem); + ret = vblankdev_do_ioctl(inode, filp, cmd, arg); + up(&ioctl_sem); + + return ret; +} + +static struct file_operations vblankdev_fops = { + owner: THIS_MODULE, + llseek: no_llseek, + read: vblankdev_read, + ioctl: vblankdev_ioctl, + open: vblankdev_open, + release: vblankdev_release, + fasync: vblankdev_fasync, + poll: vblankdev_poll +}; + +static struct miscdevice vblankdev = { + VBLANK_MINOR, + "vblank", + &vblankdev_fops +}; + +static int vblankdev_init(void) +{ + if(!pci_present()) + return -ENODEV; + if(misc_register(&vblankdev) < 0) + return -EINVAL; + + return 0; +} + +static void vblankdev_exit(void) +{ + misc_deregister(&vblankdev); +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Soeren Sandmann (sandmann@daimi.au.dk), very much based on work by Matan Ziv-Av (matan@svgalib.org), Alan Cox"); +MODULE_PARM(try_vga, "i"); + +module_init(vblankdev_init); +module_exit(vblankdev_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/char/vt.c linux.22-ac2/drivers/char/vt.c --- linux.vanilla/drivers/char/vt.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/char/vt.c 2003-06-29 16:10:00.000000000 +0100 @@ -40,7 +40,8 @@ char vt_dont_switch; extern struct tty_driver console_driver; -#define VT_IS_IN_USE(i) (console_driver.table[i] && console_driver.table[i]->count) +#define VT_IS_IN_USE(i) (console_driver.table[i] && \ + atomic_read(&console_driver.table[i]->count)) #define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || i == sel_cons) /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/cpufreq/freq_table.c linux.22-ac2/drivers/cpufreq/freq_table.c --- linux.vanilla/drivers/cpufreq/freq_table.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/cpufreq/freq_table.c 2003-06-29 16:10:33.000000000 +0100 @@ -0,0 +1,153 @@ +/* + * linux/drivers/cpufreq/freq_table.c + * + * Copyright (C) 2002 - 2003 Dominik Brodowski + */ + +#include +#include +#include +#include +#include + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int i = 0; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); + + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int next_larger = ~0; + unsigned int i = 0; + unsigned int count = 0; + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq >= policy->min) && (freq <= policy->max)) + count++; + else if ((next_larger > freq) && (freq > policy->max)) + next_larger = freq; + } + + if (!count) + policy->max = next_larger; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); + + +int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int target_freq, + unsigned int relation, + unsigned int *index) +{ + struct cpufreq_frequency_table optimal = { .index = ~0, }; + struct cpufreq_frequency_table suboptimal = { .index = ~0, }; + unsigned int i; + + switch (relation) { + case CPUFREQ_RELATION_H: + optimal.frequency = 0; + suboptimal.frequency = ~0; + break; + case CPUFREQ_RELATION_L: + optimal.frequency = ~0; + suboptimal.frequency = 0; + break; + } + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq < policy->min) || (freq > policy->max)) + continue; + switch(relation) { + case CPUFREQ_RELATION_H: + if (freq <= target_freq) { + if (freq >= optimal.frequency) { + optimal.frequency = freq; + optimal.index = i; + } + } else { + if (freq <= suboptimal.frequency) { + suboptimal.frequency = freq; + suboptimal.index = i; + } + } + break; + case CPUFREQ_RELATION_L: + if (freq >= target_freq) { + if (freq <= optimal.frequency) { + optimal.frequency = freq; + optimal.index = i; + } + } else { + if (freq >= suboptimal.frequency) { + suboptimal.frequency = freq; + suboptimal.index = i; + } + } + break; + } + } + if (optimal.index > i) { + if (suboptimal.index > i) + return -EINVAL; + *index = suboptimal.index; + } else + *index = optimal.index; + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("CPUfreq frequency table helpers"); +MODULE_LICENSE ("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/cpufreq/Makefile linux.22-ac2/drivers/cpufreq/Makefile --- linux.vanilla/drivers/cpufreq/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/cpufreq/Makefile 2003-06-29 16:10:33.000000000 +0100 @@ -0,0 +1,12 @@ +O_TARGET := cpufreq.o + +# CPUfreq governors +obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += userspace.o + +# CPUfreq cross-arch helpers +obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o +obj-$(CONFIG_CPU_FREQ_PROC_INTF) += proc_intf.o + +export-objs := userspace.o freq_table.o + +include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/cpufreq/proc_intf.c linux.22-ac2/drivers/cpufreq/proc_intf.c --- linux.vanilla/drivers/cpufreq/proc_intf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/cpufreq/proc_intf.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,246 @@ +/* + * linux/drivers/cpufreq/proc_intf.c + * + * Copyright (C) 2002 - 2003 Dominik Brodowski + */ + +#include +#include +#include +#include +#include +#include +#include + + +#define CPUFREQ_ALL_CPUS ((NR_CPUS)) + +/** + * cpufreq_parse_policy - parse a policy string + * @input_string: the string to parse. + * @policy: the policy written inside input_string + * + * This function parses a "policy string" - something the user echo'es into + * /proc/cpufreq or gives as boot parameter - into a struct cpufreq_policy. + * If there are invalid/missing entries, they are replaced with current + * cpufreq policy. + */ +static int cpufreq_parse_policy(char input_string[42], struct cpufreq_policy *policy) +{ + unsigned int min = 0; + unsigned int max = 0; + unsigned int cpu = 0; + char str_governor[16]; + struct cpufreq_policy current_policy; + unsigned int result = -EFAULT; + + if (!policy) + return -EINVAL; + + policy->min = 0; + policy->max = 0; + policy->policy = 0; + policy->cpu = CPUFREQ_ALL_CPUS; + + if (sscanf(input_string, "%d:%d:%d:%15s", &cpu, &min, &max, str_governor) == 4) + { + policy->min = min; + policy->max = max; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + if (sscanf(input_string, "%d%%%d%%%d%%%15s", &cpu, &min, &max, str_governor) == 4) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + } + + if (sscanf(input_string, "%d:%d:%15s", &min, &max, str_governor) == 3) + { + policy->min = min; + policy->max = max; + result = 0; + goto scan_policy; + } + + if (sscanf(input_string, "%d%%%d%%%15s", &min, &max, str_governor) == 3) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + result = 0; + goto scan_policy; + } + } + + return -EINVAL; + +scan_policy: + result = cpufreq_parse_governor(str_governor, &policy->policy, &policy->governor); + + return result; +} + +/** + * cpufreq_proc_read - read /proc/cpufreq + * + * This function prints out the current cpufreq policy. + */ +static int cpufreq_proc_read ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + char *p = page; + int len = 0; + struct cpufreq_policy policy; + unsigned int min_pctg = 0; + unsigned int max_pctg = 0; + unsigned int i = 0; + + if (off != 0) + goto end; + + p += sprintf(p, " minimum CPU frequency - maximum CPU frequency - policy\n"); + for (i=0;iname); + break; + default: + p += sprintf(p, "INVALID\n"); + break; + } + } +end: + len = (p - page); + if (len <= off+count) + *eof = 1; + *start = page + off; + len -= off; + if (len>count) + len = count; + if (len<0) + len = 0; + + return len; +} + + +/** + * cpufreq_proc_write - handles writing into /proc/cpufreq + * + * This function calls the parsing script and then sets the policy + * accordingly. + */ +static int cpufreq_proc_write ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + char proc_string[42] = {'\0'}; + struct cpufreq_policy policy; + unsigned int i = 0; + + + if ((count > sizeof(proc_string) - 1)) + return -EINVAL; + + if (copy_from_user(proc_string, buffer, count)) + return -EFAULT; + + proc_string[count] = '\0'; + + result = cpufreq_parse_policy(proc_string, &policy); + if (result) + return -EFAULT; + + if (policy.cpu == CPUFREQ_ALL_CPUS) + { + for (i=0; iread_proc = cpufreq_proc_read; + entry->write_proc = cpufreq_proc_write; + } + + return 0; +} + + +/** + * cpufreq_proc_exit - removes "cpufreq" from the /proc root directory. + * + * This function removes "cpufreq" from the /proc root directory. + */ +static void __exit cpufreq_proc_exit (void) +{ + remove_proc_entry("cpufreq", &proc_root); + return; +} + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("CPUfreq /proc/cpufreq interface"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_proc_init); +module_exit(cpufreq_proc_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/cpufreq/userspace.c linux.22-ac2/drivers/cpufreq/userspace.c --- linux.vanilla/drivers/cpufreq/userspace.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/cpufreq/userspace.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,559 @@ +/* + * drivers/cpufreq/userspace.c + * + * Copyright (C) 2001 Russell King + * (C) 2002 - 2003 Dominik Brodowski + * + * $Id:$ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CTL_CPU_VARS_SPEED_MAX(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MAX, \ + .data = &cpu_max_freq[cpunr], \ + .procname = "speed-max", \ + .maxlen = sizeof(cpu_max_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED_MIN(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MIN, \ + .data = &cpu_min_freq[cpunr], \ + .procname = "speed-min", \ + .maxlen = sizeof(cpu_min_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED(cpunr) { \ + .ctl_name = CPU_NR_FREQ, \ + .procname = "speed", \ + .mode = 0644, \ + .proc_handler = cpufreq_procctl, \ + .strategy = cpufreq_sysctl, \ + .extra1 = (void*) (cpunr), } + +#define CTL_TABLE_CPU_VARS(cpunr) static ctl_table ctl_cpu_vars_##cpunr[] = {\ + CTL_CPU_VARS_SPEED_MAX(cpunr), \ + CTL_CPU_VARS_SPEED_MIN(cpunr), \ + CTL_CPU_VARS_SPEED(cpunr), \ + { .ctl_name = 0, }, } + +/* the ctl_table entry for each CPU */ +#define CPU_ENUM(s) { \ + .ctl_name = (CPU_NR + s), \ + .procname = #s, \ + .mode = 0555, \ + .child = ctl_cpu_vars_##s } + +/** + * A few values needed by the userspace governor + */ +static unsigned int cpu_max_freq[NR_CPUS]; +static unsigned int cpu_min_freq[NR_CPUS]; +static unsigned int cpu_cur_freq[NR_CPUS]; +static unsigned int cpu_is_managed[NR_CPUS]; +static struct cpufreq_policy current_policy[NR_CPUS]; + +static DECLARE_MUTEX (userspace_sem); + + +/* keep track of frequency transitions */ +static int +userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + + cpu_cur_freq[freq->cpu] = freq->new; + + return 0; +} + +static struct notifier_block userspace_cpufreq_notifier_block = { + .notifier_call = userspace_cpufreq_notifier +}; + + +/** + * cpufreq_set - set the CPU frequency + * @freq: target frequency in kHz + * @cpu: CPU for which the frequency is to be set + * + * Sets the CPU frequency to freq. + */ +int cpufreq_set(unsigned int freq, unsigned int cpu) +{ + int ret = -EINVAL; + + down(&userspace_sem); + if (!cpu_is_managed[cpu]) + goto err; + + if (freq < cpu_min_freq[cpu]) + freq = cpu_min_freq[cpu]; + if (freq > cpu_max_freq[cpu]) + freq = cpu_max_freq[cpu]; + + ret = cpufreq_driver_target(¤t_policy[cpu], freq, + CPUFREQ_RELATION_L); + + err: + up(&userspace_sem); + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_set); + + +/** + * cpufreq_setmax - set the CPU to the maximum frequency + * @cpu - affected cpu; + * + * Sets the CPU frequency to the maximum frequency supported by + * this CPU. + */ +int cpufreq_setmax(unsigned int cpu) +{ + if (!cpu_is_managed[cpu] || !cpu_online(cpu)) + return -EINVAL; + return cpufreq_set(cpu_max_freq[cpu], cpu); +} +EXPORT_SYMBOL_GPL(cpufreq_setmax); + + +/** + * cpufreq_get - get the current CPU frequency (in kHz) + * @cpu: CPU number + * + * Get the CPU current (static) CPU frequency + */ +unsigned int cpufreq_get(unsigned int cpu) +{ + return cpu_cur_freq[cpu]; +} +EXPORT_SYMBOL(cpufreq_get); + + +#ifdef CONFIG_CPU_FREQ_24_API + + +/*********************** cpufreq_sysctl interface ********************/ +static int +cpufreq_procctl(ctl_table *ctl, int write, struct file *filp, + void *buffer, size_t *lenp) +{ + char buf[16], *p; + int cpu = (int) ctl->extra1; + int len, left = *lenp; + + if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) { + *lenp = 0; + return 0; + } + + if (write) { + unsigned int freq; + + len = left; + if (left > sizeof(buf)) + left = sizeof(buf); + if (copy_from_user(buf, buffer, left)) + return -EFAULT; + buf[sizeof(buf) - 1] = '\0'; + + freq = simple_strtoul(buf, &p, 0); + cpufreq_set(freq, cpu); + } else { + len = sprintf(buf, "%d\n", cpufreq_get(cpu)); + if (len > left) + len = left; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + } + + *lenp = len; + filp->f_pos += len; + return 0; +} + +static int +cpufreq_sysctl(ctl_table *table, int *name, int nlen, + void *oldval, size_t *oldlenp, + void *newval, size_t newlen, void **context) +{ + int cpu = (int) table->extra1; + + if (!cpu_online(cpu)) + return -EINVAL; + + if (oldval && oldlenp) { + size_t oldlen; + + if (get_user(oldlen, oldlenp)) + return -EFAULT; + + if (oldlen != sizeof(unsigned int)) + return -EINVAL; + + if (put_user(cpufreq_get(cpu), (unsigned int *)oldval) || + put_user(sizeof(unsigned int), oldlenp)) + return -EFAULT; + } + if (newval && newlen) { + unsigned int freq; + + if (newlen != sizeof(unsigned int)) + return -EINVAL; + + if (get_user(freq, (unsigned int *)newval)) + return -EFAULT; + + cpufreq_set(freq, cpu); + } + return 1; +} + +/* ctl_table ctl_cpu_vars_{0,1,...,(NR_CPUS-1)} */ +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ + CTL_TABLE_CPU_VARS(0); +#if NR_CPUS > 1 + CTL_TABLE_CPU_VARS(1); +#endif +#if NR_CPUS > 2 + CTL_TABLE_CPU_VARS(2); +#endif +#if NR_CPUS > 3 + CTL_TABLE_CPU_VARS(3); +#endif +#if NR_CPUS > 4 + CTL_TABLE_CPU_VARS(4); +#endif +#if NR_CPUS > 5 + CTL_TABLE_CPU_VARS(5); +#endif +#if NR_CPUS > 6 + CTL_TABLE_CPU_VARS(6); +#endif +#if NR_CPUS > 7 + CTL_TABLE_CPU_VARS(7); +#endif +#if NR_CPUS > 8 + CTL_TABLE_CPU_VARS(8); +#endif +#if NR_CPUS > 9 + CTL_TABLE_CPU_VARS(9); +#endif +#if NR_CPUS > 10 + CTL_TABLE_CPU_VARS(10); +#endif +#if NR_CPUS > 11 + CTL_TABLE_CPU_VARS(11); +#endif +#if NR_CPUS > 12 + CTL_TABLE_CPU_VARS(12); +#endif +#if NR_CPUS > 13 + CTL_TABLE_CPU_VARS(13); +#endif +#if NR_CPUS > 14 + CTL_TABLE_CPU_VARS(14); +#endif +#if NR_CPUS > 15 + CTL_TABLE_CPU_VARS(15); +#endif +#if NR_CPUS > 16 + CTL_TABLE_CPU_VARS(16); +#endif +#if NR_CPUS > 17 + CTL_TABLE_CPU_VARS(17); +#endif +#if NR_CPUS > 18 + CTL_TABLE_CPU_VARS(18); +#endif +#if NR_CPUS > 19 + CTL_TABLE_CPU_VARS(19); +#endif +#if NR_CPUS > 20 + CTL_TABLE_CPU_VARS(20); +#endif +#if NR_CPUS > 21 + CTL_TABLE_CPU_VARS(21); +#endif +#if NR_CPUS > 22 + CTL_TABLE_CPU_VARS(22); +#endif +#if NR_CPUS > 23 + CTL_TABLE_CPU_VARS(23); +#endif +#if NR_CPUS > 24 + CTL_TABLE_CPU_VARS(24); +#endif +#if NR_CPUS > 25 + CTL_TABLE_CPU_VARS(25); +#endif +#if NR_CPUS > 26 + CTL_TABLE_CPU_VARS(26); +#endif +#if NR_CPUS > 27 + CTL_TABLE_CPU_VARS(27); +#endif +#if NR_CPUS > 28 + CTL_TABLE_CPU_VARS(28); +#endif +#if NR_CPUS > 29 + CTL_TABLE_CPU_VARS(29); +#endif +#if NR_CPUS > 30 + CTL_TABLE_CPU_VARS(30); +#endif +#if NR_CPUS > 31 + CTL_TABLE_CPU_VARS(31); +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ +static ctl_table ctl_cpu_table[NR_CPUS + 1] = { + CPU_ENUM(0), +#if NR_CPUS > 1 + CPU_ENUM(1), +#endif +#if NR_CPUS > 2 + CPU_ENUM(2), +#endif +#if NR_CPUS > 3 + CPU_ENUM(3), +#endif +#if NR_CPUS > 4 + CPU_ENUM(4), +#endif +#if NR_CPUS > 5 + CPU_ENUM(5), +#endif +#if NR_CPUS > 6 + CPU_ENUM(6), +#endif +#if NR_CPUS > 7 + CPU_ENUM(7), +#endif +#if NR_CPUS > 8 + CPU_ENUM(8), +#endif +#if NR_CPUS > 9 + CPU_ENUM(9), +#endif +#if NR_CPUS > 10 + CPU_ENUM(10), +#endif +#if NR_CPUS > 11 + CPU_ENUM(11), +#endif +#if NR_CPUS > 12 + CPU_ENUM(12), +#endif +#if NR_CPUS > 13 + CPU_ENUM(13), +#endif +#if NR_CPUS > 14 + CPU_ENUM(14), +#endif +#if NR_CPUS > 15 + CPU_ENUM(15), +#endif +#if NR_CPUS > 16 + CPU_ENUM(16), +#endif +#if NR_CPUS > 17 + CPU_ENUM(17), +#endif +#if NR_CPUS > 18 + CPU_ENUM(18), +#endif +#if NR_CPUS > 19 + CPU_ENUM(19), +#endif +#if NR_CPUS > 20 + CPU_ENUM(20), +#endif +#if NR_CPUS > 21 + CPU_ENUM(21), +#endif +#if NR_CPUS > 22 + CPU_ENUM(22), +#endif +#if NR_CPUS > 23 + CPU_ENUM(23), +#endif +#if NR_CPUS > 24 + CPU_ENUM(24), +#endif +#if NR_CPUS > 25 + CPU_ENUM(25), +#endif +#if NR_CPUS > 26 + CPU_ENUM(26), +#endif +#if NR_CPUS > 27 + CPU_ENUM(27), +#endif +#if NR_CPUS > 28 + CPU_ENUM(28), +#endif +#if NR_CPUS > 29 + CPU_ENUM(29), +#endif +#if NR_CPUS > 30 + CPU_ENUM(30), +#endif +#if NR_CPUS > 31 + CPU_ENUM(31), +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + { + .ctl_name = 0, + } +}; + +static ctl_table ctl_cpu[2] = { + { + .ctl_name = CTL_CPU, + .procname = "cpu", + .mode = 0555, + .child = ctl_cpu_table, + }, + { + .ctl_name = 0, + } +}; + +struct ctl_table_header *cpufreq_sysctl_table; + +static inline void cpufreq_sysctl_init(void) +{ + cpufreq_sysctl_table = register_sysctl_table(ctl_cpu, 0); +} + +static inline void cpufreq_sysctl_exit(void) +{ + unregister_sysctl_table(cpufreq_sysctl_table); +} + +#else +#define cpufreq_sysctl_init() do {} while(0) +#define cpufreq_sysctl_exit() do {} while(0) +#endif /* CONFIG_CPU_FREQ_24API */ + + +static int cpufreq_governor_userspace(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || + !policy->cur) + return -EINVAL; + down(&userspace_sem); + cpu_is_managed[cpu] = 1; + cpu_min_freq[cpu] = policy->min; + cpu_max_freq[cpu] = policy->max; + cpu_cur_freq[cpu] = policy->cur; + memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); + up(&userspace_sem); + break; + case CPUFREQ_GOV_STOP: + down(&userspace_sem); + cpu_is_managed[cpu] = 0; + cpu_min_freq[cpu] = 0; + cpu_max_freq[cpu] = 0; + up(&userspace_sem); + break; + case CPUFREQ_GOV_LIMITS: + down(&userspace_sem); + cpu_min_freq[cpu] = policy->min; + cpu_max_freq[cpu] = policy->max; + if (policy->max < cpu_cur_freq[cpu]) + __cpufreq_driver_target(¤t_policy[cpu], policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > cpu_cur_freq[cpu]) + __cpufreq_driver_target(¤t_policy[cpu], policy->min, + CPUFREQ_RELATION_L); + memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); + up(&userspace_sem); + break; + } + return 0; +} + +/* on ARM SA1100 we need to rely on the values of cpufreq_get() - because + * of this, cpu_cur_freq[] needs to be set early. + */ +#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_SA1100) +extern unsigned int sa11x0_getspeed(void); + +static void cpufreq_sa11x0_compat(void) +{ + cpu_cur_freq[0] = sa11x0_getspeed(); +} +#else +#define cpufreq_sa11x0_compat() do {} while(0) +#endif + + +static struct cpufreq_governor cpufreq_gov_userspace = { + .name = "userspace", + .governor = cpufreq_governor_userspace, +}; +EXPORT_SYMBOL(cpufreq_gov_userspace); + +static int already_init = 0; + +int cpufreq_gov_userspace_init(void) +{ + if (!already_init) { + down(&userspace_sem); + cpufreq_sa11x0_compat(); + cpufreq_sysctl_init(); + cpufreq_register_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + already_init = 1; + up(&userspace_sem); + } + return cpufreq_register_governor(&cpufreq_gov_userspace); +} +EXPORT_SYMBOL(cpufreq_gov_userspace_init); + + +static void __exit cpufreq_gov_userspace_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_userspace); + cpufreq_unregister_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + cpufreq_sysctl_exit(); +} + + +MODULE_AUTHOR ("Dominik Brodowski , Russell King "); +MODULE_DESCRIPTION ("CPUfreq policy governor 'userspace'"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_gov_userspace_init); +module_exit(cpufreq_gov_userspace_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/Config.in linux.22-ac2/drivers/hotplug/Config.in --- linux.vanilla/drivers/hotplug/Config.in 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/Config.in 2003-08-28 17:01:02.000000000 +0100 @@ -6,12 +6,13 @@ dep_tristate 'Support for PCI Hotplug (EXPERIMENTAL)' CONFIG_HOTPLUG_PCI $CONFIG_EXPERIMENTAL $CONFIG_PCI +if [ "$CONFIG_ACPI_INTERPRETER" ]; then + dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_HOTPLUG_PCI +fi dep_tristate ' Compaq PCI Hotplug driver' CONFIG_HOTPLUG_PCI_COMPAQ $CONFIG_HOTPLUG_PCI $CONFIG_X86 dep_mbool ' Save configuration into NVRAM on Compaq servers' CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM $CONFIG_HOTPLUG_PCI_COMPAQ if [ "$CONFIG_X86_IO_APIC" = "y" ]; then dep_tristate ' IBM PCI Hotplug driver' CONFIG_HOTPLUG_PCI_IBM $CONFIG_HOTPLUG_PCI $CONFIG_X86_IO_APIC $CONFIG_X86 fi -if [ "$CONFIG_ACPI_INTERPRETER" ]; then - dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_HOTPLUG_PCI -fi +dep_tristate ' IBM Thinkpad (20H2999) Docking driver (VERY EXPERIMENTAL) ' CONFIG_HOTPLUG_PCI_H2999 $CONFIG_HOTPLUG_PCI $CONFIG_X86 endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/cpqphp_core.c linux.22-ac2/drivers/hotplug/cpqphp_core.c --- linux.vanilla/drivers/hotplug/cpqphp_core.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/cpqphp_core.c 2003-08-28 22:26:32.000000000 +0100 @@ -320,7 +320,7 @@ void *slot_entry= NULL; int result; - dbg(__FUNCTION__"\n"); + dbg("%s\n", __FUNCTION__); tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); @@ -482,7 +482,7 @@ u8 tbus, tdevice, tslot, bridgeSlot; - dbg(__FUNCTION__" %p, %d, %d, %p\n", ops, bus_num, dev_num, slot); + dbg("%s %p, %d, %d, %p\n", __FUNCTION__, ops, bus_num, dev_num, slot); bridgeSlot = 0xFF; @@ -598,7 +598,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -633,7 +633,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -673,7 +673,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -701,7 +701,7 @@ struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); struct controller *ctrl; - dbg(__FUNCTION__"\n"); + dbg("%s\n", __FUNCTION__); if (slot == NULL) return -ENODEV; @@ -722,7 +722,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -740,7 +740,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -758,7 +758,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -777,7 +777,7 @@ if (slot == NULL) return -ENODEV; - dbg(__FUNCTION__" - physical_slot = %s\n", hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); ctrl = slot->ctrl; if (ctrl == NULL) @@ -865,7 +865,7 @@ // TODO: This code can be made to support non-Compaq or Intel subsystem IDs rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid); if (rc) { - err(__FUNCTION__" : pci_read_config_word failed\n"); + err("%s : pci_read_config_word failed\n", __FUNCTION__); return rc; } dbg("Subsystem Vendor ID: %x\n", subsystem_vid); @@ -876,14 +876,14 @@ ctrl = (struct controller *) kmalloc(sizeof(struct controller), GFP_KERNEL); if (!ctrl) { - err(__FUNCTION__" : out of memory\n"); + err("%s : out of memory\n", __FUNCTION__); return -ENOMEM; } memset(ctrl, 0, sizeof(struct controller)); rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); if (rc) { - err(__FUNCTION__" : pci_read_config_word failed\n"); + err("%s : pci_read_config_word failed\n", __FUNCTION__); goto err_free_ctrl; } @@ -1139,7 +1139,7 @@ // Store PCI Config Space for all devices on this bus rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK)); if (rc) { - err(__FUNCTION__": unable to save PCI configuration data, error %d\n", rc); + err("%s: unable to save PCI configuration data, error %d\n", __FUNCTION__, rc); goto err_iounmap; } @@ -1169,7 +1169,7 @@ rc = ctrl_slot_setup(ctrl, smbios_start, smbios_table); if (rc) { err(msg_initialization_err, 6); - err(__FUNCTION__": unable to save PCI configuration data, error %d\n", rc); + err("%s: unable to save PCI configuration data, error %d\n", __FUNCTION__, rc); goto err_iounmap; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/cpqphp_ctrl.c linux.22-ac2/drivers/hotplug/cpqphp_ctrl.c --- linux.vanilla/drivers/hotplug/cpqphp_ctrl.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/cpqphp_ctrl.c 2003-08-28 22:26:32.000000000 +0100 @@ -771,13 +771,13 @@ return(NULL); for (node = *head; node; node = node->next) { - dbg(__FUNCTION__": req_size =%x node=%p, base=%x, length=%x\n", - size, node, node->base, node->length); + dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", + __FUNCTION__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { - dbg(__FUNCTION__": not aligned\n"); + dbg("%s: not aligned\n", __FUNCTION__); // this one isn't base aligned properly // so we'll make a new entry and split it up temp_dword = (node->base | (size-1)) + 1; @@ -803,7 +803,7 @@ // Don't need to check if too small since we already did if (node->length > size) { - dbg(__FUNCTION__": too big\n"); + dbg("%s: too big\n", __FUNCTION__); // this one is longer than we need // so we'll make a new entry and split it up split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); @@ -820,7 +820,7 @@ node->next = split_node; } // End of too big on top end - dbg(__FUNCTION__": got one!!!\n"); + dbg("%s: got one!!!\n", __FUNCTION__); // If we got here, then it is the right size // Now take it out of the list if (*head == node) { @@ -855,7 +855,7 @@ struct pci_resource *node2; int out_of_order = 1; - dbg(__FUNCTION__": head = %p, *head = %p\n", head, *head); + dbg("%s: head = %p, *head = %p\n",__FUNCTION__, head, *head); if (!(*head)) return(1); @@ -942,7 +942,7 @@ // Read to clear posted writes misc = readw(ctrl->hpc_reg + MISC); - dbg (__FUNCTION__" - waking up\n"); + dbg ("%s - waking up\n", __FUNCTION__); wake_up_interruptible(&ctrl->queue); } @@ -1394,8 +1394,8 @@ struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; - dbg(__FUNCTION__": func->device, slot_offset, hp_slot = %d, %d ,%d\n", - func->device, ctrl->slot_device_offset, hp_slot); + dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", + __FUNCTION__, func->device, ctrl->slot_device_offset, hp_slot); // Wait for exclusive access to hardware down(&ctrl->crit_sect); @@ -1444,55 +1444,55 @@ // turn on board and blink green LED // Wait for exclusive access to hardware - dbg(__FUNCTION__": before down\n"); + dbg("%s: before down\n", __FUNCTION__); down(&ctrl->crit_sect); - dbg(__FUNCTION__": after down\n"); + dbg("%s: after down\n", __FUNCTION__); - dbg(__FUNCTION__": before slot_enable\n"); + dbg("%s: before slot_enable\n", __FUNCTION__); slot_enable (ctrl, hp_slot); - dbg(__FUNCTION__": before green_LED_blink\n"); + dbg("%s: before green_LED_blink\n", __FUNCTION__); green_LED_blink (ctrl, hp_slot); - dbg(__FUNCTION__": before amber_LED_blink\n"); + dbg("%s: before amber_LED_blink\n", __FUNCTION__); amber_LED_off (ctrl, hp_slot); - dbg(__FUNCTION__": before set_SOGO\n"); + dbg("%s: before set_SOGO\n", __FUNCTION__); set_SOGO(ctrl); // Wait for SOBS to be unset - dbg(__FUNCTION__": before wait_for_ctrl_irq\n"); + dbg("%s: before wait_for_ctrl_irq\n", __FUNCTION__); wait_for_ctrl_irq (ctrl); - dbg(__FUNCTION__": after wait_for_ctrl_irq\n"); + dbg("%s: after wait_for_ctrl_irq\n", __FUNCTION__); // Done with exclusive hardware access - dbg(__FUNCTION__": before up\n"); + dbg("%s: before up\n", __FUNCTION__); up(&ctrl->crit_sect); - dbg(__FUNCTION__": after up\n"); + dbg("%s: after up\n", __FUNCTION__); // Wait for ~1 second because of hot plug spec - dbg(__FUNCTION__": before long_delay\n"); + dbg("%s: before long_delay\n", __FUNCTION__); long_delay(1*HZ); - dbg(__FUNCTION__": after long_delay\n"); + dbg("%s: after long_delay\n", __FUNCTION__); - dbg(__FUNCTION__": func status = %x\n", func->status); + dbg("%s: func status = %x\n", __FUNCTION__, func->status); // Check for a power fault if (func->status == 0xFF) { // power fault occurred, but it was benign temp_register = 0xFFFFFFFF; - dbg(__FUNCTION__": temp register set to %x by power fault\n", temp_register); + dbg("%s: temp register set to %x by power fault\n", __FUNCTION__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { // Get vendor/device ID u32 rc = pci_read_config_dword_nodev (ctrl->pci_ops, func->bus, func->device, func->function, PCI_VENDOR_ID, &temp_register); - dbg(__FUNCTION__": pci_read_config_dword returns %d\n", rc); - dbg(__FUNCTION__": temp_register is %x\n", temp_register); + dbg("%s: pci_read_config_dword returns %d\n", __FUNCTION__, rc); + dbg("%s: temp_register is %x\n", __FUNCTION__, temp_register); if (rc != 0) { // Something's wrong here temp_register = 0xFFFFFFFF; - dbg(__FUNCTION__": temp register set to %x by error\n", temp_register); + dbg("%s: temp register set to %x by error\n", __FUNCTION__, temp_register); } // Preset return code. It will be changed later if things go okay. rc = NO_ADAPTER_PRESENT; @@ -1508,7 +1508,7 @@ rc = configure_new_device(ctrl, func, 0, &res_lists); - dbg(__FUNCTION__": back from configure_new_device\n"); + dbg("%s: back from configure_new_device\n", __FUNCTION__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; @@ -1545,7 +1545,7 @@ func->is_a_board = 0x01; //next, we will instantiate the linux pci_dev structures (with appropriate driver notification, if already present) - dbg(__FUNCTION__": configure linux pci_dev structure\n"); + dbg("%s: configure linux pci_dev structure\n", __FUNCTION__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); @@ -1612,7 +1612,7 @@ device = func->device; hp_slot = func->device - ctrl->slot_device_offset; - dbg("In "__FUNCTION__", hp_slot = %d\n", hp_slot); + dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot); // When we get here, it is safe to change base Address Registers. // We will attempt to save the base Address Register Lengths @@ -1942,7 +1942,7 @@ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { - dbg("Error! func NULL in "__FUNCTION__"\n"); + dbg("Error! func NULL in %s\n", __FUNCTION__); return ; } @@ -1966,7 +1966,7 @@ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { - dbg("Error! func NULL in "__FUNCTION__"\n"); + dbg("Error! func NULL in %s\n", __FUNCTION__); return ; } @@ -2081,7 +2081,7 @@ } if (rc) { - dbg(__FUNCTION__": rc = %d\n", rc); + dbg("%s: rc = %d\n", __FUNCTION__, rc); } if (p_slot) @@ -2347,11 +2347,11 @@ new_slot = func; - dbg(__FUNCTION__"\n"); + dbg("%s\n", __FUNCTION__); // Check for Multi-function device rc = pci_read_config_byte_nodev (ctrl->pci_ops, func->bus, func->device, func->function, 0x0E, &temp_byte); if (rc) { - dbg(__FUNCTION__": rc = %d\n", rc); + dbg("%s: rc = %d\n", __FUNCTION__, rc); return rc; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/cpqphp.h linux.22-ac2/drivers/hotplug/cpqphp.h --- linux.vanilla/drivers/hotplug/cpqphp.h 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/cpqphp.h 2003-09-01 13:54:30.000000000 +0100 @@ -747,7 +747,7 @@ return 1; hp_slot = slot->device - ctrl->slot_device_offset; - dbg(__FUNCTION__": slot->device = %d, ctrl->slot_device_offset = %d \n", slot->device, ctrl->slot_device_offset); + dbg("%s: slot->device = %d, ctrl->slot_device_offset = %d \n", __FUNCTION__, slot->device, ctrl->slot_device_offset); status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)); @@ -785,7 +785,7 @@ DECLARE_WAITQUEUE(wait, current); int retval = 0; - dbg(__FUNCTION__" - start\n"); + dbg("%s - start\n", __FUNCTION__); add_wait_queue(&ctrl->queue, &wait); set_current_state(TASK_INTERRUPTIBLE); /* Sleep for up to 1 second to wait for the LED to change. */ @@ -795,7 +795,7 @@ if (signal_pending(current)) retval = -EINTR; - dbg(__FUNCTION__" - end\n"); + dbg("%s - end\n", __FUNCTION__); return retval; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/cpqphp_nvram.c linux.22-ac2/drivers/hotplug/cpqphp_nvram.c --- linux.vanilla/drivers/hotplug/cpqphp_nvram.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/hotplug/cpqphp_nvram.c 2003-08-28 22:26:32.000000000 +0100 @@ -160,7 +160,7 @@ (temp6 == 'Q')) { result = 1; } - dbg (__FUNCTION__" - returned %d\n", result); + dbg ("%s - returned %d\n",__FUNCTION__, result); return result; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/cpqphp_pci.c linux.22-ac2/drivers/hotplug/cpqphp_pci.c --- linux.vanilla/drivers/hotplug/cpqphp_pci.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/hotplug/cpqphp_pci.c 2003-08-28 22:26:32.000000000 +0100 @@ -139,7 +139,7 @@ //We did not even find a hotplug rep of the function, create it //This code might be taken out if we can guarantee the creation of functions //in parallel (hotplug and Linux at the same time). - dbg("@@@@@@@@@@@ cpqhp_slot_create in "__FUNCTION__"\n"); + dbg("@@@@@@@@@@@ cpqhp_slot_create in %s\n", __FUNCTION__); temp_func = cpqhp_slot_create(bus->number); if (temp_func == NULL) return -ENOMEM; @@ -306,7 +306,7 @@ memset(&wrapped_dev, 0, sizeof(struct pci_dev_wrapped)); memset(&wrapped_bus, 0, sizeof(struct pci_bus_wrapped)); - dbg(__FUNCTION__": bus/dev/func = %x/%x/%x\n",func->bus, func->device, func->function); + dbg("%s: bus/dev/func = %x/%x/%x\n", __FUNCTION__, func->bus, func->device, func->function); for (j=0; j<8 ; j++) { struct pci_dev* temp = pci_find_slot(func->bus, (func->device << 3) | j); @@ -354,10 +354,10 @@ fakedev.devfn = dev_num << 3; fakedev.bus = &fakebus; fakebus.number = bus_num; - dbg(__FUNCTION__": dev %d, bus %d, pin %d, num %d\n", - dev_num, bus_num, int_pin, irq_num); + dbg("%s: dev %d, bus %d, pin %d, num %d\n", + __FUNCTION__, dev_num, bus_num, int_pin, irq_num); rc = pcibios_set_irq_routing(&fakedev, int_pin - 0x0a, irq_num); - dbg(__FUNCTION__":rc %d\n", rc); + dbg("%s:rc %d\n", __FUNCTION__, rc); if (!rc) return !rc; @@ -1585,7 +1585,7 @@ int rc = 0; struct pci_resource *node; struct pci_resource *t_node; - dbg(__FUNCTION__"\n"); + dbg("%s\n", __FUNCTION__); if (!func) return(1); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/Makefile linux.22-ac2/drivers/hotplug/Makefile --- linux.vanilla/drivers/hotplug/Makefile 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/Makefile 2003-06-29 16:17:32.000000000 +0100 @@ -12,6 +12,7 @@ obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o +obj-$(CONFIG_HOTPLUG_PCI_H2999) += tp600.o pci_hotplug-objs := pci_hotplug_core.o \ pci_hotplug_util.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/pci_hotplug_core.c linux.22-ac2/drivers/hotplug/pci_hotplug_core.c --- linux.vanilla/drivers/hotplug/pci_hotplug_core.c 2002-11-29 21:27:17.000000000 +0000 +++ linux.22-ac2/drivers/hotplug/pci_hotplug_core.c 2003-08-28 22:26:32.000000000 +0100 @@ -49,7 +49,7 @@ #define MY_NAME THIS_MODULE->name #endif -#define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: "__FUNCTION__": " fmt , MY_NAME , ## arg); } while (0) +#define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __FUNCTION__ , ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/pci_hotplug_util.c linux.22-ac2/drivers/hotplug/pci_hotplug_util.c --- linux.vanilla/drivers/hotplug/pci_hotplug_util.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/pci_hotplug_util.c 2003-08-28 22:26:32.000000000 +0100 @@ -41,7 +41,7 @@ #define MY_NAME THIS_MODULE->name #endif -#define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: "__FUNCTION__": " fmt , MY_NAME , ## arg); } while (0) +#define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __FUNCTION__ , ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/tp600.c linux.22-ac2/drivers/hotplug/tp600.c --- linux.vanilla/drivers/hotplug/tp600.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/tp600.c 2003-08-08 15:12:05.000000000 +0100 @@ -0,0 +1,500 @@ +/* + * Drivers for the IBM 20H2999 found in the IBM thinkpad series + * machines. + * + * This driver was done without documentation from IBM + * + * _ + * { } + * | | All reverse engineering done + * | | in accordance with 92/250/EEC + * .-.! !.-. and Copyright (Computer Programs) + * .-! ! ! !.-. Regulations 1992 (S.I. 1992 No. 3233) + * ! ! ! ; + * \ ; + * \ ; + * ! : + * ! | + * | | + * + * + * Various other IBM's tried to obtain docs but failed. For that + * reason we only support warm not hot undocking at the moment. + * + * Known bugs: + * Sometimes we hang with an IRQ storm. I don't know what + * deals with the IRQ disables yet. (Hot dock) + * Sometimes busmastering (and maybe IRQs) don't come back + * (Seems to be a buffering issue for hot dock) + * + * Yet to do: + * ISA is not yet handled (oh god help us) + * Instead of saving/restoring pci devices we should + * re-enumerate that subtree so you can change devices + * (That also deals with stale save problems) + * We need to do a proper warm save/restore interface + * Bridged cards don't yet work + * + * Usage: + * Load module + * Pray + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pci_hotplug.h" +#include "tp600.h" + +static struct h2999_dev *testdev; + +/** + * pci_save_slot - save slot PCI data + * @slot: slot to save + * + * Save the slot data from a PCI device + */ + +static void pci_save_slot(struct h2999_slot *s) +{ + int i, n; + + for(i=0;i<8;i++) + { + struct pci_dev *p = pci_find_slot(s->dev->hotplug_bus, PCI_DEVFN(s->slotid, i)); + s->pci[i] = p; + if(p) + { + for(n = 0; n < 64; n++) + pci_read_config_dword(p, n * 4, &s->save[i][n]); +// printk("Saved %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +static void pci_restore_slot(struct h2999_slot *s) +{ + int i,n; + + for(i = 0 ; i < 8; i++) + { + if(s->pci[i]) + { + pci_set_power_state(s->pci[i], 0); + + for(n = 0; n < 54; n++) + if(n!=1) + pci_write_config_dword(s->pci[i], n * 4, s->save[i][n]); + pci_write_config_dword(s->pci[i], 4, s->save[i][1]); +// printk("Restored %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +/** + * slot_enable - enable H2999 slot + * @slot: slot to enable + * + * Enable a slot. Its not actually clear what this means with + * a hot dock. We can certainly 'discover' the PCI device in the + * slot when asked. + */ + +static int slot_enable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + int i; + pci_restore_slot(s); + for(i=0; i < 8; i++) + { + if(s->pci[i] && (s->drivermap&(1<pci[i]); + } + return 0; +} + +/** + * slot_disable - disable H2999 slot + * @slot: slot to disable + * + * Disable a slot. Its not actually clear what to do here. We could + * report the device as having been removed when we are told to do + * this. + */ + +static int slot_disable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + struct pci_dev *pdev; + int i; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + /* Hack for now */ + if (pdev && pdev->driver) { + if (!pdev->driver->remove) + return -EBUSY; + } + } + + s->drivermap = 0; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + if(pdev) + { + if(pdev->driver) + { + s->drivermap|=(1<driver->remove(pdev); + pdev->driver = NULL; + } + } + } + return 0; +} + +/** + * set_attention_status - set attention callback + * @slot: slot to set + * @value: on/off + * + * Called when the hotplug layer wants to set the attention status of + * the hotplug slot. The H2999 doesn't have an attention control (at + * least not that we know of). So we ignore this. + */ + +static int set_attention_status(struct hotplug_slot *slot, u8 value) +{ + return 0; +} + +/** + * hardware_test - test hardware callback + * @slot: slot to test + * value: test to run + * + * The H2999 does not support any hardware tests that we know of. + */ + +static int hardware_test(struct hotplug_slot *slot, u32 value) +{ + return 0; +} + +/** + * get_power_status - power query callback + * @slot; slot to query + * @value: returned state + * + * Called when the hotplug layer wants to ask us if the slot is + * powered. We work on the basis that all slots are powered when + * the unit is docked. This seems to be correct but I've not actually + * rammed a voltmeter into the slots to see if they are cleverer than + * that. + */ + +static int get_power_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + /* Slots are all powered when docked */ + if(s->dev->docked > 0) + *value = 1; + else + *value = 0; + return 0; +} + +/** + * get_adapter_status - card presence query + * @slot: slot to query + * @value: returned state + * + * If we are not docked, we know the "slot" is empty. If we are + * docked its a bit more complicated. + */ + +static int get_adapter_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + *value = 0; + + if(s->dev->docked) + *value = 1; + return 0; +} + +static struct hotplug_slot_ops h2999_ops = { + THIS_MODULE, + slot_enable, + slot_disable, + set_attention_status, + hardware_test, + get_power_status, + NULL, + NULL, + get_adapter_status +}; + +/** + * h2999_is_docked - check if docked + * @dev: h2999 device + * + * Check if we are currently docked. The method we use at the moment + * relies on poking around behind the bridge. There is no doubt a + * correct way to do this. Maybe one day IBM will be decide to + * actually provide documentation + */ + +static int h2999_is_docked(struct h2999_dev *dev) +{ + struct pci_dev *pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(0,0)); + u32 status; + + if(pdev == NULL) + return 0; /* Shouldnt happen - must be undocked */ + + if(pci_read_config_dword(pdev, PCI_VENDOR_ID, &status)) + return 0; /* Config read failed - its missing */ + + if(status == 0xFFFFFFFFUL) /* Failed */ + return 0; + + /* Must be docked */ + return 1; +} +/** + * h2999_reconfigure_dock - redock event handler + * @dev: h2999 device + * + * A redocking event has occurred. There may also have been an undock + * before hand. If so then the unconfigure routine is called first. + */ + +static void h2999_reconfigure_dock(struct h2999_dev *dev) +{ + int docked, i; + + docked = h2999_is_docked(dev); + + if(docked ^ dev->docked) + { + printk("h2999: Now %sdocked.\n", + docked?"":"un"); + if(docked) + { + /* We should do the re-enumeration of the bus here + The current save/restore is a test hack */ + for(i=0; i < H2999_SLOTS; i++) + { + if(dev->slots[i].hotplug_slot.private != &dev->slots[i]) + BUG(); + slot_enable(&dev->slots[i].hotplug_slot); + } + } + else + { + for(i=0; i < H2999_SLOTS; i++) + { + if(slot_disable(&dev->slots[i].hotplug_slot)) + printk(KERN_ERR "h2999: someone undocked while devices were in use, how rude!\n"); + } + } + dev->docked = docked; + } + /* Clear bits */ + pci_write_config_byte(dev->pdev, 0x1f, 0xf0); +} + +/* + * h2999_attach - attach an H2999 bridge + * @pdev: PCI device + * @unused: unused + * + * Called when the PCI layer discovers an H2999 docking bridge is + * present in the system. We scan the bridge to obtain its current + * status and register it with the hot plug layer + */ + +static int __devinit h2999_attach(struct pci_dev *pdev, const struct pci_device_id *unused) +{ + /* PCI core found a new H2999 */ + struct h2999_dev *dev; + u8 bus; + int i; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if(dev == NULL) + goto nomem; + + memset(dev, 0, sizeof(*dev)); + + dev->pdev = pdev; + + pci_read_config_byte(pdev, PCI_SECONDARY_BUS, &bus); + dev->hotplug_bus = bus; + + /* Requires hotplug_bus and pdev are set */ + + dev->docked = h2999_is_docked(dev); + + printk(KERN_INFO "Found IBM 20H2999. Status is %sdocked, docking bus is %d.\n", + dev->docked?"":"un", dev->hotplug_bus); + + /* + * Allow for 8 devices. On the TP600 at least we have + * 0-3 as the onboard devices, and 4-7 as the slots. + * To add more fun there is an ISA bridge which we + * don't really handle yet. + */ + + for(i = 0; i < H2999_SLOTS; i++) + { + struct h2999_slot *s = &dev->slots[i]; + int ret; + + s->hotplug_slot.info = &s->hotplug_info; + s->hotplug_slot.private = s; + s->slotid = i; + s->dev = dev; + s->live = 1; + s->hotplug_slot.ops = &h2999_ops; + s->hotplug_slot.name = s->name; + s->hotplug_info.power_status = dev->docked; + /* FIXME - should probe here + In truth the hp_register ought to call thse as needed! */ + s->hotplug_info.adapter_status = 0; + s->pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(i, 0)); + snprintf(s->name, SLOT_NAME_SIZE, "Dock%d.%d", dev->hotplug_bus, i); + pci_save_slot(s); + ret = pci_hp_register(&s->hotplug_slot); + if(ret) + { + printk(KERN_ERR "pci_hp_register failed for slot %d with error %d\n", i, ret); + s->live = 0; + } + } + pci_set_drvdata(pdev, dev); + + testdev = dev; + return 0; +nomem: + printk(KERN_ERR "h2999_attach: out of memory.\n"); + return -ENOMEM; +} + +/** + * h2999_cleanup - free H2999 memory resources + * @dev: h2999 device + * + * Unregister and free up all of our slots + */ + +static void __devinit h2999_cleanup(struct h2999_dev *dev) +{ + struct h2999_slot *s; + int slot; + + for(slot = 0; slot < H2999_SLOTS; slot++) + { + s = &dev->slots[slot]; + if(s->live) + pci_hp_deregister(&s->hotplug_slot); + } + kfree(dev); +} + +/** + * h2999_detach - an H2999 controller vanished + * @dev: device that vanished + * + * Called when the PCI layer sees the bridge unplugged. At the moment + * this doesn't happen and since its currently unclear what to do + * in the hot plug layer if it does this may be a good thing 8) + */ + +static void __devinit h2999_detach(struct pci_dev *pdev) +{ + struct h2999_dev *dev = pci_get_drvdata(pdev); + h2999_cleanup(dev); +} + + +static struct pci_device_id h2999_id_tbl[] __devinitdata = { + { PCI_VENDOR_ID_IBM, 0x0095, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, h2999_id_tbl); + +static struct pci_driver h2999_driver = { + name: "h2999", + id_table: h2999_id_tbl, + probe: h2999_attach, + remove: __devexit_p(h2999_detach) + /* FIXME - PM functions */ +}; + +/* + * Test harness + */ + +static struct completion thread_done; + +static int h2999_thread(void *unused) +{ + lock_kernel(); + while(testdev != NULL) + { + set_current_state(TASK_INTERRUPTIBLE); + if(signal_pending(current)) + break; + schedule_timeout(HZ); + h2999_reconfigure_dock(testdev); + } + unlock_kernel(); + complete_and_exit(&thread_done, 0); +} + +static int __init h2999_init_module(void) +{ + int rc; + printk(KERN_INFO "IBM 20H2999 PCI docking bridge driver v0.01\n"); + + init_completion(&thread_done); + + rc = pci_module_init(&h2999_driver); + if (rc == 0) + { + if( kernel_thread(h2999_thread, NULL, CLONE_SIGHAND) >= 0) + return 0; + } + complete(&thread_done); + return rc; +} + +static void __exit h2999_cleanup_module(void) +{ + pci_unregister_driver(&h2999_driver); + wait_for_completion(&thread_done); +} + +module_init(h2999_init_module); +module_exit(h2999_cleanup_module); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("IBM 20H2999 Docking Bridge Driver"); +MODULE_LICENSE("GPL"); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/hotplug/tp600.h linux.22-ac2/drivers/hotplug/tp600.h --- linux.vanilla/drivers/hotplug/tp600.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/hotplug/tp600.h 2003-06-29 16:10:33.000000000 +0100 @@ -0,0 +1,29 @@ +#define SLOT_NAME_SIZE 12 + +struct h2999_slot +{ + int slotid; + + struct hotplug_slot hotplug_slot; + struct hotplug_slot_info hotplug_info; + char name[SLOT_NAME_SIZE]; + + struct h2999_dev *dev; + struct pci_dev *pdev; + int live; + + struct pci_dev *pci[8]; + u32 save[8][64]; + u8 drivermap; +}; + +#define H2999_SLOTS 8 + +struct h2999_dev +{ + int docked; + int hotplug_bus; + struct pci_dev *pdev; + + struct h2999_slot slots[H2999_SLOTS]; +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/Config.in linux.22-ac2/drivers/ide/Config.in --- linux.vanilla/drivers/ide/Config.in 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/Config.in 2003-08-28 17:00:29.000000000 +0100 @@ -71,6 +71,9 @@ dep_tristate ' RZ1000 chipset bugfix/support' CONFIG_BLK_DEV_RZ1000 $CONFIG_X86 dep_tristate ' SCx200 chipset support' CONFIG_BLK_DEV_SC1200 $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' ServerWorks OSB4/CSB5/CSB6 chipsets support' CONFIG_BLK_DEV_SVWKS $CONFIG_BLK_DEV_IDEDMA_PCI + if [ "$CONFIG_IA64" = "y" ]; then + dep_tristate ' SGI IOC4 chipset support' CONFIG_BLK_DEV_SGIIOC4 $CONFIG_BLK_DEV_IDEDMA_PCI + fi dep_tristate ' Silicon Image chipset support' CONFIG_BLK_DEV_SIIMAGE $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' SiS5513 chipset support' CONFIG_BLK_DEV_SIS5513 $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_X86 dep_tristate ' SLC90E66 chipset support' CONFIG_BLK_DEV_SLC90E66 $CONFIG_BLK_DEV_IDEDMA_PCI diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide.c linux.22-ac2/drivers/ide/ide.c --- linux.vanilla/drivers/ide/ide.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide.c 2003-08-16 15:19:59.000000000 +0100 @@ -248,6 +248,7 @@ hwif->ultra_mask = 0x80; /* disable all ultra */ hwif->mwdma_mask = 0x80; /* disable all mwdma */ hwif->swdma_mask = 0x80; /* disable all swdma */ + hwif->sata = 0; /* assume PATA */ default_hwif_iops(hwif); default_hwif_transport(hwif); @@ -458,7 +459,7 @@ unsigned int p, major, minor; unsigned long flags; - if ((drive = get_info_ptr(i_rdev)) == NULL) + if ((drive = ide_info_ptr(i_rdev, 0)) == NULL) return -ENODEV; major = MAJOR(i_rdev); minor = drive->select.b.unit << PARTN_BITS; @@ -547,30 +548,48 @@ static int ide_open (struct inode * inode, struct file * filp) { ide_drive_t *drive; + int force = 1/*FIXME 0*/; + + if(capable(CAP_SYS_ADMIN) && (filp->f_flags & O_NDELAY)) + force = 1; - if ((drive = get_info_ptr(inode->i_rdev)) == NULL) + if ((drive = ide_info_ptr(inode->i_rdev, force)) == NULL) return -ENXIO; - if (drive->driver == &idedefault_driver) - ide_driver_module(1); - if (drive->driver == &idedefault_driver) { - if (drive->media == ide_disk) - (void) request_module("ide-disk"); - if (drive->scsi) - (void) request_module("ide-scsi"); - if (drive->media == ide_cdrom) - (void) request_module("ide-cd"); - if (drive->media == ide_tape) - (void) request_module("ide-tape"); - if (drive->media == ide_floppy) - (void) request_module("ide-floppy"); + + /* + * If the device is present make sure that we attach any + * needed driver + */ + + if (drive->present) + { + if (drive->driver == &idedefault_driver) + ide_driver_module(1); + if (drive->driver == &idedefault_driver) { + if (drive->media == ide_disk) + (void) request_module("ide-disk"); + if (drive->scsi) + (void) request_module("ide-scsi"); + if (drive->media == ide_cdrom) + (void) request_module("ide-cd"); + if (drive->media == ide_tape) + (void) request_module("ide-tape"); + if (drive->media == ide_floppy) + (void) request_module("ide-floppy"); + } + + /* The locking here isnt enough, but this is hard to fix + in the 2.4 cases */ + while (drive->busy) + sleep_on(&drive->wqueue); } - /* The locking here isnt enough, but this is hard to fix - in the 2.4 cases */ - while (drive->busy) - sleep_on(&drive->wqueue); + /* + * Now do the actual open + */ + drive->usage++; - if (!drive->dead) + if (!drive->dead || force) return DRIVER(drive)->open(inode, filp, drive); printk(KERN_WARNING "%s: driver not present\n", drive->name); drive->usage--; @@ -585,7 +604,7 @@ { ide_drive_t *drive; - if ((drive = get_info_ptr(inode->i_rdev)) != NULL) { + if ((drive = ide_info_ptr(inode->i_rdev, 1)) != NULL) { drive->usage--; DRIVER(drive)->release(inode, file, drive); } @@ -636,6 +655,179 @@ extern void init_hwif_data(unsigned int index); +/** + * ide_prepare_tristate - prepare interface for warm unplug + * @drive: drive on this hwif we are using + * + * Prepares a drive for shutdown after a bus tristate. The + * drives must be quiescent and the only user the calling ioctl + */ + +static int ide_prepare_tristate(ide_drive_t *our_drive) +{ + ide_drive_t *drive; + int unit; + unsigned long flags; + int minor; + int p; + int i; + ide_hwif_t *hwif = HWIF(our_drive); + + if(our_drive->busy) + printk("HUH? We are busy.\n"); + + if (!hwif->present) + BUG(); + spin_lock_irqsave(&io_request_lock, flags); + + /* Abort if anything is busy */ + for (unit = 0; unit < MAX_DRIVES; ++unit) { + drive = &hwif->drives[unit]; + if (!drive->present) + continue; + if (drive == our_drive && drive->usage != 1) + goto abort; + if (drive != our_drive && drive->usage) + goto abort; + if (drive->busy) + goto abort; + } + /* Commit to shutdown sequence */ + for (unit = 0; unit < MAX_DRIVES; ++unit) { + drive = &hwif->drives[unit]; + if (!drive->present) + continue; + if (drive != our_drive && DRIVER(drive)->shutdown(drive)) + goto abort; + } + /* We hold the lock here.. which is important as we need to play + with usage counts beyond the scenes */ + + our_drive->usage--; + i = DRIVER(our_drive)->shutdown(our_drive); + if(i) + goto abort_fix; + /* Drive shutdown sequence done */ + /* Prevent new opens ?? */ + spin_unlock_irqrestore(&io_request_lock, flags); + /* + * Flush kernel side caches, and dump the /proc files + */ + spin_unlock_irqrestore(&io_request_lock, flags); + for (unit = 0; unit < MAX_DRIVES; ++unit) { + drive = &hwif->drives[unit]; + if (!drive->present) + continue; + DRIVER(drive)->cleanup(drive); + minor = drive->select.b.unit << PARTN_BITS; + for (p = 0; p < (1<part[p].nr_sects > 0) { + kdev_t devp = MKDEV(hwif->major, minor+p); + invalidate_device(devp, 0); + } + } +#ifdef CONFIG_PROC_FS + destroy_proc_ide_drives(hwif); +#endif + } + spin_lock_irqsave(&io_request_lock, flags); + our_drive->usage++; + for (i = 0; i < MAX_DRIVES; ++i) { + drive = &hwif->drives[i]; + if (drive->de) { + devfs_unregister(drive->de); + drive->de = NULL; + } + if (!drive->present) + continue; + if (drive->id != NULL) { + kfree(drive->id); + drive->id = NULL; + } + drive->present = 0; + /* Safe to clear now */ + drive->dead = 0; + } + spin_unlock_irqrestore(&io_request_lock, flags); + return 0; + +abort_fix: + our_drive->usage++; +abort: + spin_unlock_irqrestore(&io_request_lock, flags); + return -EBUSY; +} + + +/** + * ide_resume_hwif - return a hwif to active mode + * @hwif: interface to resume + * + * Restore a dead interface from tristate back to normality. At this + * point the hardware driver busproc has reconnected the bus, but + * nothing else has happened + */ + +static int ide_resume_hwif(ide_drive_t *our_drive) +{ + ide_hwif_t *hwif = HWIF(our_drive); + int err = ide_wait_hwif_ready(hwif); + int irqd; + int present = 0; + int unit; + + if(err) + { + printk(KERN_ERR "%s: drives not ready.\n", our_drive->name); + return err; + } + + /* The drives are now taking commands */ + + irqd = hwif->irq; + if(irqd) + disable_irq(irqd); + + /* Identify and probe the drives */ + + for (unit = 0; unit < MAX_DRIVES; ++unit) { + ide_drive_t *drive = &hwif->drives[unit]; + drive->dn = ((hwif->channel ? 2 : 0) + unit); + drive->usage = 0; + drive->busy = 0; + hwif->drives[unit].dn = ((hwif->channel ? 2 : 0) + unit); + (void) ide_probe_for_drive(drive); + if (drive->present) + present = 1; + } + ide_probe_reset(hwif); + if(irqd) + enable_irq(irqd); + + if(present) + printk(KERN_INFO "ide: drives found on hot-added interface.\n"); + + /* + * Set up the drive modes (Even if we didnt swap drives + * we may have lost settings when we disconnected the bus) + */ + + ide_tune_drives(hwif); + if(present) + hwif->present = 1; + + /* + * Reattach the devices to drivers + */ + for (unit = 0; unit < MAX_DRIVES; ++unit) { + ide_drive_t *drive = &hwif->drives[unit]; + if(drive->present && !drive->dead) + ide_attach_drive(drive); + } + our_drive->usage++; + return 0; +} + int ide_unregister (unsigned int index) { struct gendisk *gd; @@ -798,7 +990,7 @@ hwif->swdma_mask = old_hwif.swdma_mask; hwif->chipset = old_hwif.chipset; - hwif->hold = old_hwif.hold; + hwif->hold = old_hwif.hold; #ifdef CONFIG_BLK_DEV_IDEPCI hwif->pci_dev = old_hwif.pci_dev; @@ -1534,11 +1726,22 @@ struct request rq; kdev_t dev; ide_settings_t *setting; - + int force = 0; + if (!inode || !(dev = inode->i_rdev)) return -EINVAL; + + switch(cmd) + { + case HDIO_GET_BUSSTATE: + case HDIO_SET_BUSSTATE: + case HDIO_SCAN_HWIF: + case HDIO_UNREGISTER_HWIF: + force = 1; + } + major = MAJOR(dev); minor = MINOR(dev); - if ((drive = get_info_ptr(inode->i_rdev)) == NULL) + if ((drive = ide_info_ptr(inode->i_rdev, force)) == NULL) return -ENODEV; down(&ide_setting_sem); @@ -1737,11 +1940,42 @@ return 0; case HDIO_SET_BUSSTATE: + { + ide_hwif_t *hwif = HWIF(drive); + if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (HWIF(drive)->busproc) +#ifdef OLD_STUFF + if (hwif->busproc) return HWIF(drive)->busproc(drive, (int)arg); return -EOPNOTSUPP; +#else + if(hwif->bus_state == arg) + return 0; + + if(hwif->bus_state == BUSSTATE_ON) + { + /* "drive" may vanish beyond here */ + if((err = ide_prepare_tristate(drive)) != 0) + return err; + hwif->bus_state = arg; + } + if (hwif->busproc) + { + err = hwif->busproc(drive, (int)arg); + if(err) + return err; + } + if(arg != BUSSTATE_OFF) + { + err = ide_resume_hwif(drive); + hwif->bus_state = arg; + if(err) + return err; + } + return 0; +#endif + } default: return DRIVER(drive)->ioctl(drive, inode, file, cmd, arg); @@ -1753,7 +1987,7 @@ { ide_drive_t *drive; - if ((drive = get_info_ptr(i_rdev)) == NULL) + if ((drive = ide_info_ptr(i_rdev, 0)) == NULL) return -ENODEV; return DRIVER(drive)->media_change(drive); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-cd.c linux.22-ac2/drivers/ide/ide-cd.c --- linux.vanilla/drivers/ide/ide-cd.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-cd.c 2003-08-28 17:00:29.000000000 +0100 @@ -3262,7 +3262,7 @@ } if (ide_register_subdriver(drive, &ide_cdrom_driver, IDE_SUBDRIVER_VERSION)) { - printk("%s: Failed to register the driver with " + printk("ide-cd: %s: Failed to register the driver with " "ide.c\n", drive->name); kfree(info); continue; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-default.c linux.22-ac2/drivers/ide/ide-default.c --- linux.vanilla/drivers/ide/ide-default.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-default.c 2003-08-15 15:53:24.000000000 +0100 @@ -40,6 +40,21 @@ { } +static int idedefault_open(struct inode *inode, struct file *filp, ide_drive_t *drive) +{ + MOD_INC_USE_COUNT; + if(filp->f_flags & O_NDELAY) + return 0; + MOD_DEC_USE_COUNT; + drive->usage--; + return -ENXIO; +} + +static void idedefault_release(struct inode *inode, struct file *filp, ide_drive_t *drive) +{ + MOD_DEC_USE_COUNT; +} + int idedefault_init (void); int idedefault_attach(ide_drive_t *drive); @@ -56,6 +71,8 @@ supports_dsc_overlap: 0, init: idedefault_init, attach: idedefault_attach, + open: idedefault_open, + release: idedefault_release }; static ide_module_t idedefault_module = { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-disk.c linux.22-ac2/drivers/ide/ide-disk.c --- linux.vanilla/drivers/ide/ide-disk.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-disk.c 2003-08-17 11:53:12.000000000 +0100 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/ide-disk.c Version 1.18 Mar 05, 2003 + * linux/drivers/ide/ide-disk.c Version 1.18 Aug 16, 2003 * * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) * Copyright (C) 1998-2002 Linux ATA Developemt @@ -41,9 +41,10 @@ * Version 1.16 added suspend-resume-checkpower * Version 1.17 do flush on standy, do flush on ATA < ATA6 * fix wcache setup. + * Version 1.18 LBA48 clamping fixes */ -#define IDEDISK_VERSION "1.17" +#define IDEDISK_VERSION "1.18" #undef REALLY_SLOW_IO /* most systems can safely undef this */ @@ -1196,7 +1197,7 @@ drive->bios_cyl = drive->cyl; drive->capacity48 = capacity_2; drive->capacity = (unsigned long) capacity_2; - return; + goto check_capacity48; /* Determine capacity, and use LBA if the drive properly supports it */ } else if ((id->capability & 2) && lba_capacity_is_ok(id)) { capacity = id->lba_capacity; @@ -1221,12 +1222,14 @@ } drive->capacity = capacity; - - if ((id->command_set_2 & 0x0400) && (id->cfs_enable_2 & 0x0400)) { - drive->capacity48 = id->lba_capacity_2; - drive->head = 255; - drive->sect = 63; - drive->cyl = (unsigned long)(drive->capacity48) / (drive->head * drive->sect); + +check_capacity48: + /* FIXME: most controllers that dont do LBA48 DMA do it PIO so we + ought to handle PIO fallbacks */ + if (drive->addressing == 0 && drive->capacity48 > (1ULL)<<28) { + printk("%s: LBA48 large I/O not supported, capacity limited to 137Gb.\n", + drive->name); + drive->capacity48 = (1ULL)<<28; } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-geometry.c linux.22-ac2/drivers/ide/ide-geometry.c --- linux.vanilla/drivers/ide/ide-geometry.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-geometry.c 2003-08-15 14:33:49.000000000 +0100 @@ -83,7 +83,6 @@ } -extern ide_drive_t * get_info_ptr(kdev_t); extern unsigned long current_capacity (ide_drive_t *); /* @@ -156,7 +155,7 @@ int transl = 1; /* try translation */ int ret = 0; - drive = get_info_ptr(i_rdev); + drive = ide_info_ptr(i_rdev, 0); if (!drive) return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-io.c linux.22-ac2/drivers/ide/ide-io.c --- linux.vanilla/drivers/ide/ide-io.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-io.c 2003-08-15 14:42:40.000000000 +0100 @@ -56,38 +56,6 @@ #include "ide_modes.h" -#if (DISK_RECOVERY_TIME > 0) - -Error So the User Has To Fix the Compilation And Stop Hacking Port 0x43 -Does anyone ever use this anyway ?? - -/* - * For really screwy hardware (hey, at least it *can* be used with Linux) - * we can enforce a minimum delay time between successive operations. - */ -static unsigned long read_timer (ide_hwif_t *hwif) -{ - unsigned long t, flags; - int i; - - /* FIXME this is completely unsafe! */ - local_irq_save(flags); - t = jiffies * 11932; - outb_p(0, 0x43); - i = inb_p(0x40); - i |= inb_p(0x40) << 8; - local_irq_restore(flags); - return (t - i); -} -#endif /* DISK_RECOVERY_TIME */ - -static inline void set_recovery_timer (ide_hwif_t *hwif) -{ -#if (DISK_RECOVERY_TIME > 0) - hwif->last_time = read_timer(hwif); -#endif /* DISK_RECOVERY_TIME */ -} - /* * ide_end_request - complete an IDE I/O * @drive: IDE device for the I/O @@ -237,6 +205,7 @@ * by read a sector's worth of data from the drive. Of course, * this may not help if the drive is *waiting* for data from *us*. */ + void try_to_flush_leftover_data (ide_drive_t *drive) { int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; @@ -573,9 +542,9 @@ EXPORT_SYMBOL(execute_drive_cmd); /** - * start_request - start of I/O and command issuing for IDE + * ide_start_request - start of I/O and command issuing for IDE * - * start_request() initiates handling of a new I/O request. It + * ide_start_request() initiates handling of a new I/O request. It * accepts commands and I/O (read/write) requests. It also does * the final remapping for weird stuff like EZDrive. Once * device mapper can work sector level the EZDrive stuff can go away @@ -583,7 +552,7 @@ * FIXME: this function needs a rename */ -ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) +static ide_startstop_t ide_start_request (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; unsigned long block, blockend; @@ -591,12 +560,12 @@ ide_hwif_t *hwif = HWIF(drive); #ifdef DEBUG - printk("%s: start_request: current=0x%08lx\n", + printk("%s: ide_start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq); #endif /* bail early if we've exceeded max_failures */ - if (drive->max_failures && (drive->failures > drive->max_failures)) { + if (!drive->present || (drive->max_failures && (drive->failures > drive->max_failures))) { goto kill_rq; } @@ -636,10 +605,6 @@ if (block == 0 && drive->remap_0_to_1 == 1) block = 1; /* redirect MBR access to EZ-Drive partn table */ -#if (DISK_RECOVERY_TIME > 0) - while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME); -#endif - SELECT_DRIVE(drive); if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { printk(KERN_ERR "%s: drive not ready for command\n", drive->name); @@ -663,16 +628,6 @@ return ide_stopped; } -EXPORT_SYMBOL(start_request); - -int restart_request (ide_drive_t *drive, struct request *rq) -{ - (void) start_request(drive, rq); - return 0; -} - -EXPORT_SYMBOL(restart_request); - /** * ide_stall_queue - pause an IDE device * @drive: drive to stall @@ -865,7 +820,7 @@ spin_unlock(&io_request_lock); local_irq_enable(); /* allow other IRQs while we start this request */ - startstop = start_request(drive, rq); + startstop = ide_start_request(drive, rq); spin_lock_irq(&io_request_lock); if (hwif->irq != masked_irq) enable_irq(hwif->irq); @@ -1043,7 +998,6 @@ startstop = DRIVER(drive)->error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); } } - set_recovery_timer(hwif); drive->service_time = jiffies - drive->service_start; spin_lock_irq(&io_request_lock); enable_irq(hwif->irq); @@ -1236,7 +1190,6 @@ * same irq as is currently being serviced here, and Linux * won't allow another of the same (on any CPU) until we return. */ - set_recovery_timer(HWIF(drive)); drive->service_time = jiffies - drive->service_start; if (startstop == ide_stopped) { if (hwgroup->handler == NULL) { /* paranoia */ @@ -1256,7 +1209,7 @@ * get_info_ptr() returns the (ide_drive_t *) for a given device number. * It returns NULL if the given device number does not match any present drives. */ -ide_drive_t *get_info_ptr (kdev_t i_rdev) +ide_drive_t *ide_info_ptr (kdev_t i_rdev, int force) { int major = MAJOR(i_rdev); unsigned int h; @@ -1267,7 +1220,7 @@ unsigned unit = DEVICE_NR(i_rdev); if (unit < MAX_DRIVES) { ide_drive_t *drive = &hwif->drives[unit]; - if (drive->present) + if (drive->present || force) return drive; } break; @@ -1276,7 +1229,7 @@ return NULL; } -EXPORT_SYMBOL(get_info_ptr); +EXPORT_SYMBOL(ide_info_ptr); /** * ide_init_drive_cmd - initialize a drive command request diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-iops.c linux.22-ac2/drivers/ide/ide-iops.c --- linux.vanilla/drivers/ide/ide-iops.c 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-iops.c 2003-08-09 15:48:39.000000000 +0100 @@ -741,6 +741,10 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) { + /* SATA has no cable restrictions */ + if (HWIF(drive)->sata) + return 0; + if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) && (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-probe.c linux.22-ac2/drivers/ide/ide-probe.c --- linux.vanilla/drivers/ide/ide-probe.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-probe.c 2003-08-05 13:04:13.000000000 +0100 @@ -553,7 +553,7 @@ } /** - * probe_for_drives - upper level drive probe + * ide_probe_for_drives - upper level drive probe * @drive: drive to probe for * * probe_for_drive() tests for existence of a given drive using do_probe() @@ -564,7 +564,7 @@ * still be 0) */ -static inline u8 probe_for_drive (ide_drive_t *drive) +u8 ide_probe_for_drive (ide_drive_t *drive) { /* * In order to keep things simple we have an id @@ -687,9 +687,6 @@ //EXPORT_SYMBOL(hwif_register); -/* Enable code below on all archs later, for now, I want it on PPC - */ -#ifdef CONFIG_PPC /* * This function waits for the hwif to report a non-busy status * see comments in probe_hwif() @@ -716,7 +713,7 @@ return ((stat & BUSY_STAT) == 0) ? 0 : -EBUSY; } -static int wait_hwif_ready(ide_hwif_t *hwif) +int ide_wait_hwif_ready(ide_hwif_t *hwif) { int rc; @@ -751,7 +748,67 @@ return rc; } -#endif /* CONFIG_PPC */ + +void ide_probe_reset(ide_hwif_t *hwif) +{ + if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { + unsigned long timeout = jiffies + WAIT_WORSTCASE; + u8 stat; + + printk(KERN_WARNING "%s: reset\n", hwif->name); + hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]); + udelay(10); + hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); + do { + ide_delay_50ms(); + stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + } while ((stat & BUSY_STAT) && time_after(timeout, jiffies)); + } +} + +void ide_tune_drives(ide_hwif_t *hwif) +{ + int unit; + + for (unit = 0; unit < MAX_DRIVES; ++unit) { + ide_drive_t *drive = &hwif->drives[unit]; + int enable_dma = 1; + + if (drive->present) { + if (hwif->tuneproc != NULL && + drive->autotune == IDE_TUNE_AUTO) + /* auto-tune PIO mode */ + hwif->tuneproc(drive, 255); + +#ifdef CONFIG_IDEDMA_ONLYDISK + if (drive->media != ide_disk) + enable_dma = 0; +#endif + /* + * MAJOR HACK BARF :-/ + * + * FIXME: chipsets own this cruft! + */ + /* + * Move here to prevent module loading clashing. + */ + // drive->autodma = hwif->autodma; + if ((hwif->ide_dma_check) && + ((drive->autotune == IDE_TUNE_DEFAULT) || + (drive->autotune == IDE_TUNE_AUTO))) { + /* + * Force DMAing for the beginning of the check. + * Some chipsets appear to do interesting + * things, if not checked and cleared. + * PARANOIA!!! + */ + hwif->ide_dma_off_quietly(drive); + if (enable_dma) + hwif->ide_dma_check(drive); + } + } + } +} /* * This routine only knows how to look for drive units 0 and 1 @@ -834,7 +891,7 @@ ide_drive_t *drive = &hwif->drives[unit]; drive->dn = ((hwif->channel ? 2 : 0) + unit); hwif->drives[unit].dn = ((hwif->channel ? 2 : 0) + unit); - (void) probe_for_drive(drive); + (void) ide_probe_for_drive(drive); if (drive->present && !hwif->present) { hwif->present = 1; if (hwif->chipset != ide_4drives || @@ -844,20 +901,8 @@ } } } - if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { - unsigned long timeout = jiffies + WAIT_WORSTCASE; - u8 stat; - - printk(KERN_WARNING "%s: reset\n", hwif->name); - hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]); - udelay(10); - hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); - do { - ide_delay_50ms(); - stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); - } while ((stat & BUSY_STAT) && time_after(timeout, jiffies)); - - } + + ide_probe_reset(hwif); local_irq_restore(flags); /* * Use cached IRQ number. It might be (and is...) changed by probe @@ -865,45 +910,9 @@ */ if (irqd) enable_irq(irqd); + + ide_tune_drives(hwif); - for (unit = 0; unit < MAX_DRIVES; ++unit) { - ide_drive_t *drive = &hwif->drives[unit]; - int enable_dma = 1; - - if (drive->present) { - if (hwif->tuneproc != NULL && - drive->autotune == IDE_TUNE_AUTO) - /* auto-tune PIO mode */ - hwif->tuneproc(drive, 255); - -#ifdef CONFIG_IDEDMA_ONLYDISK - if (drive->media != ide_disk) - enable_dma = 0; -#endif - /* - * MAJOR HACK BARF :-/ - * - * FIXME: chipsets own this cruft! - */ - /* - * Move here to prevent module loading clashing. - */ - // drive->autodma = hwif->autodma; - if ((hwif->ide_dma_check) && - ((drive->autotune == IDE_TUNE_DEFAULT) || - (drive->autotune == IDE_TUNE_AUTO))) { - /* - * Force DMAing for the beginning of the check. - * Some chipsets appear to do interesting - * things, if not checked and cleared. - * PARANOIA!!! - */ - hwif->ide_dma_off_quietly(drive); - if (enable_dma) - hwif->ide_dma_check(drive); - } - } - } } EXPORT_SYMBOL(probe_hwif); @@ -1335,13 +1344,6 @@ EXPORT_SYMBOL(export_ide_init_queue); -u8 export_probe_for_drive (ide_drive_t *drive) -{ - return probe_for_drive(drive); -} - -EXPORT_SYMBOL(export_probe_for_drive); - #ifndef HWIF_PROBE_CLASSIC_METHOD int probe_hwif_init (ide_hwif_t *hwif) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/ide-sibyte.c linux.22-ac2/drivers/ide/ide-sibyte.c --- linux.vanilla/drivers/ide/ide-sibyte.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/ide-sibyte.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -/* - * Copyright (C) 2001, 2002, 2003 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -/* Derived loosely from ide-pmac.c, so: - * - * Copyright (C) 1998 Paul Mackerras. - * Copyright (C) 1995-1998 Mark Lord - */ -#include -#include - -#include - -#define SIBYTE_IDE_BASE (IO_SPACE_BASE + IDE_PHYS - mips_io_port_base) -#define SIBYTE_IDE_REG(pcaddr) (SIBYTE_IDE_BASE + ((pcaddr) << 5)) - -extern void sibyte_set_ideops(ide_hwif_t *hwif); - -void __init sibyte_ide_probe(void) -{ - int i; - ide_hwif_t *hwif = NULL; - - /* - * Find the first untaken slot in hwifs. Also set the io ops - * to the non-swapping SiByte versions. XXXKW It would be - * nice to find a safe place to do this outside of - * ide-sibyte.c so PCI-IDE would work without the SiByte - * driver. - */ - for (i = 0; i < MAX_HWIFS; i++) { - sibyte_set_ideops(&ide_hwifs[i]); - if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET] && (hwif == NULL)) { - hwif = &ide_hwifs[i]; - } - } - if (hwif == NULL) { - printk("No space for SiByte onboard IDE driver in ide_hwifs[]. Not enabled.\n"); - return; - } - - /* - * Set up our stuff; we're a little odd because our io_ports - * aren't in the usual place, and byte-swapping isn't - * necessary. - */ - hwif->hw.io_ports[IDE_DATA_OFFSET] = SIBYTE_IDE_REG(0x1f0); - hwif->hw.io_ports[IDE_ERROR_OFFSET] = SIBYTE_IDE_REG(0x1f1); - hwif->hw.io_ports[IDE_NSECTOR_OFFSET] = SIBYTE_IDE_REG(0x1f2); - hwif->hw.io_ports[IDE_SECTOR_OFFSET] = SIBYTE_IDE_REG(0x1f3); - hwif->hw.io_ports[IDE_LCYL_OFFSET] = SIBYTE_IDE_REG(0x1f4); - hwif->hw.io_ports[IDE_HCYL_OFFSET] = SIBYTE_IDE_REG(0x1f5); - hwif->hw.io_ports[IDE_SELECT_OFFSET] = SIBYTE_IDE_REG(0x1f6); - hwif->hw.io_ports[IDE_STATUS_OFFSET] = SIBYTE_IDE_REG(0x1f7); - hwif->hw.io_ports[IDE_CONTROL_OFFSET] = SIBYTE_IDE_REG(0x3f6); - hwif->hw.irq = K_INT_GB_IDE; - hwif->irq = hwif->hw.irq; - hwif->noprobe = 0; - hwif->hw.ack_intr = NULL; - hwif->mmio = 2; - - memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); - printk("SiByte onboard IDE configured as device %i\n", hwif-ide_hwifs); -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/legacy/Makefile linux.22-ac2/drivers/ide/legacy/Makefile --- linux.vanilla/drivers/ide/legacy/Makefile 2003-06-14 00:11:30.000000000 +0100 +++ linux.22-ac2/drivers/ide/legacy/Makefile 2003-08-28 17:02:35.000000000 +0100 @@ -17,8 +17,11 @@ obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o +obj-$(CONFIG_BLK_DEV_IDE_SIBYTE) += sibyte.o + obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o + # Last of all obj-$(CONFIG_BLK_DEV_HD) += hd.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/legacy/sibyte.c linux.22-ac2/drivers/ide/legacy/sibyte.c --- linux.vanilla/drivers/ide/legacy/sibyte.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/ide/legacy/sibyte.c 2003-08-28 17:01:47.000000000 +0100 @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2001, 2002, 2003 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* Derived loosely from ide-pmac.c, so: + * + * Copyright (C) 1998 Paul Mackerras. + * Copyright (C) 1995-1998 Mark Lord + */ +#include +#include + +#include + +#define SIBYTE_IDE_BASE (IO_SPACE_BASE + IDE_PHYS - mips_io_port_base) +#define SIBYTE_IDE_REG(pcaddr) (SIBYTE_IDE_BASE + ((pcaddr) << 5)) + +extern void sibyte_set_ideops(ide_hwif_t *hwif); + +void __init sibyte_ide_probe(void) +{ + int i; + ide_hwif_t *hwif = NULL; + + /* + * Find the first untaken slot in hwifs. Also set the io ops + * to the non-swapping SiByte versions. XXXKW It would be + * nice to find a safe place to do this outside of + * ide-sibyte.c so PCI-IDE would work without the SiByte + * driver. + */ + for (i = 0; i < MAX_HWIFS; i++) { + sibyte_set_ideops(&ide_hwifs[i]); + if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET] && (hwif == NULL)) { + hwif = &ide_hwifs[i]; + } + } + if (hwif == NULL) { + printk("No space for SiByte onboard IDE driver in ide_hwifs[]. Not enabled.\n"); + return; + } + + /* + * Set up our stuff; we're a little odd because our io_ports + * aren't in the usual place, and byte-swapping isn't + * necessary. + */ + hwif->hw.io_ports[IDE_DATA_OFFSET] = SIBYTE_IDE_REG(0x1f0); + hwif->hw.io_ports[IDE_ERROR_OFFSET] = SIBYTE_IDE_REG(0x1f1); + hwif->hw.io_ports[IDE_NSECTOR_OFFSET] = SIBYTE_IDE_REG(0x1f2); + hwif->hw.io_ports[IDE_SECTOR_OFFSET] = SIBYTE_IDE_REG(0x1f3); + hwif->hw.io_ports[IDE_LCYL_OFFSET] = SIBYTE_IDE_REG(0x1f4); + hwif->hw.io_ports[IDE_HCYL_OFFSET] = SIBYTE_IDE_REG(0x1f5); + hwif->hw.io_ports[IDE_SELECT_OFFSET] = SIBYTE_IDE_REG(0x1f6); + hwif->hw.io_ports[IDE_STATUS_OFFSET] = SIBYTE_IDE_REG(0x1f7); + hwif->hw.io_ports[IDE_CONTROL_OFFSET] = SIBYTE_IDE_REG(0x3f6); + hwif->hw.irq = K_INT_GB_IDE; + hwif->irq = hwif->hw.irq; + hwif->noprobe = 0; + hwif->hw.ack_intr = NULL; + hwif->mmio = 2; + + memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); + printk(KERN_INFO "SiByte onboard IDE configured as device %i\n", hwif-ide_hwifs); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/Makefile linux.22-ac2/drivers/ide/Makefile --- linux.vanilla/drivers/ide/Makefile 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/Makefile 2003-08-09 16:07:32.000000000 +0100 @@ -48,8 +48,6 @@ obj-$(CONFIG_BLK_DEV_IDE) += ide-core.o -obj-$(CONFIG_BLK_DEV_IDE_SIBYTE) += ide-sibyte.o - ifeq ($(CONFIG_BLK_DEV_IDE),y) obj-y += legacy/idedriver-legacy.o obj-y += ppc/idedriver-ppc.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/adma100.c linux.22-ac2/drivers/ide/pci/adma100.c --- linux.vanilla/drivers/ide/pci/adma100.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/adma100.c 2003-09-01 13:23:20.000000000 +0100 @@ -76,5 +76,7 @@ MODULE_DESCRIPTION("Basic PIO support for ADMA100 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, adma100_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/aec62xx.c linux.22-ac2/drivers/ide/pci/aec62xx.c --- linux.vanilla/drivers/ide/pci/aec62xx.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/aec62xx.c 2003-09-01 13:23:20.000000000 +0100 @@ -565,4 +565,6 @@ MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/alim15x3.c linux.22-ac2/drivers/ide/pci/alim15x3.c --- linux.vanilla/drivers/ide/pci/alim15x3.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/alim15x3.c 2003-09-01 13:23:20.000000000 +0100 @@ -915,4 +915,6 @@ MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/amd74xx.c linux.22-ac2/drivers/ide/pci/amd74xx.c --- linux.vanilla/drivers/ide/pci/amd74xx.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/amd74xx.c 2003-09-09 19:16:32.000000000 +0100 @@ -318,7 +318,7 @@ amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0); for (i = 24; i >= 0; i -= 8) if (((u >> i) & 4) && !(amd_80w & (1 << (1 - (i >> 4))))) { - printk(KERN_WARNING "AMD_IDE: Bios didn't set cable bits corectly. Enabling workaround.\n"); + printk(KERN_WARNING "AMD_IDE: Bios didn't set cable bits correctly. Enabling workaround.\n"); amd_80w |= (1 << (1 - (i >> 4))); } break; @@ -480,4 +480,6 @@ MODULE_DESCRIPTION("AMD PCI IDE driver"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/cmd64x.c linux.22-ac2/drivers/ide/pci/cmd64x.c --- linux.vanilla/drivers/ide/pci/cmd64x.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/cmd64x.c 2003-09-01 13:23:20.000000000 +0100 @@ -794,5 +794,7 @@ MODULE_DESCRIPTION("PCI driver module for CMD64x IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/cs5530.c linux.22-ac2/drivers/ide/pci/cs5530.c --- linux.vanilla/drivers/ide/pci/cs5530.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/cs5530.c 2003-09-01 13:23:20.000000000 +0100 @@ -460,4 +460,6 @@ MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cs5530_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/cy82c693.c linux.22-ac2/drivers/ide/pci/cy82c693.c --- linux.vanilla/drivers/ide/pci/cy82c693.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/cy82c693.c 2003-09-01 13:23:20.000000000 +0100 @@ -465,4 +465,6 @@ MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cy82c693_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/generic.c linux.22-ac2/drivers/ide/pci/generic.c --- linux.vanilla/drivers/ide/pci/generic.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/generic.c 2003-09-01 13:23:20.000000000 +0100 @@ -167,4 +167,6 @@ MODULE_DESCRIPTION("PCI driver module for generic PCI IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, generic_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/hpt34x.c linux.22-ac2/drivers/ide/pci/hpt34x.c --- linux.vanilla/drivers/ide/pci/hpt34x.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/hpt34x.c 2003-09-01 13:23:20.000000000 +0100 @@ -367,4 +367,6 @@ MODULE_DESCRIPTION("PCI driver module for Highpoint 34x IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, hpt34x_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/hpt366.c linux.22-ac2/drivers/ide/pci/hpt366.c --- linux.vanilla/drivers/ide/pci/hpt366.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/hpt366.c 2003-09-01 13:23:20.000000000 +0100 @@ -465,7 +465,7 @@ static void hpt3xx_tune_drive (ide_drive_t *drive, u8 pio) { - pio = ide_get_best_pio_mode(drive, pio, 5, NULL); + pio = ide_get_best_pio_mode(drive, 255, pio, NULL); (void) hpt3xx_tune_chipset(drive, (XFER_PIO_0 + pio)); } @@ -1444,4 +1444,6 @@ MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/it8172.c linux.22-ac2/drivers/ide/pci/it8172.c --- linux.vanilla/drivers/ide/pci/it8172.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/it8172.c 2003-09-01 13:23:20.000000000 +0100 @@ -331,4 +331,6 @@ MODULE_DESCRIPTION("PCI driver module for ITE 8172 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, it8172_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/Makefile linux.22-ac2/drivers/ide/pci/Makefile --- linux.vanilla/drivers/ide/pci/Makefile 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/Makefile 2003-07-31 14:23:47.000000000 +0100 @@ -25,6 +25,7 @@ obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o +obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/ns87415.c linux.22-ac2/drivers/ide/pci/ns87415.c --- linux.vanilla/drivers/ide/pci/ns87415.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/ns87415.c 2003-09-01 13:23:20.000000000 +0100 @@ -264,4 +264,6 @@ MODULE_DESCRIPTION("PCI driver module for NS87415 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/opti621.c linux.22-ac2/drivers/ide/pci/opti621.c --- linux.vanilla/drivers/ide/pci/opti621.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/opti621.c 2003-09-01 13:23:20.000000000 +0100 @@ -400,4 +400,6 @@ MODULE_DESCRIPTION("PCI driver module for Opti621 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, opti621_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/pdc202xx_new.c linux.22-ac2/drivers/ide/pci/pdc202xx_new.c --- linux.vanilla/drivers/ide/pci/pdc202xx_new.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/pdc202xx_new.c 2003-09-01 13:23:20.000000000 +0100 @@ -677,4 +677,6 @@ MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/pdc202xx_old.c linux.22-ac2/drivers/ide/pci/pdc202xx_old.c --- linux.vanilla/drivers/ide/pci/pdc202xx_old.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/pdc202xx_old.c 2003-09-09 19:14:12.000000000 +0100 @@ -697,8 +697,8 @@ hwif->tuneproc = &config_chipset_for_pio; hwif->quirkproc = &pdc202xx_quirkproc; - if (hwif->pci_dev->device == PCI_DEVICE_ID_PROMISE_20265) - hwif->addressing = (hwif->channel) ? 0 : 1; +// if (hwif->pci_dev->device == PCI_DEVICE_ID_PROMISE_20265) +// hwif->addressing = (hwif->channel) ? 0 : 1; if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) { hwif->busproc = &pdc202xx_tristate; @@ -880,4 +880,6 @@ MODULE_DESCRIPTION("PCI driver module for older Promise IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/piix.c linux.22-ac2/drivers/ide/pci/piix.c --- linux.vanilla/drivers/ide/pci/piix.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/piix.c 2003-09-09 22:37:49.000000000 +0100 @@ -153,6 +153,7 @@ case PCI_DEVICE_ID_INTEL_82801DB_11: case PCI_DEVICE_ID_INTEL_82801EB_11: case PCI_DEVICE_ID_INTEL_82801E_11: + case PCI_DEVICE_ID_INTEL_ESB_2: p += sprintf(p, "PIIX4 Ultra 100 "); break; case PCI_DEVICE_ID_INTEL_82372FB_1: @@ -290,6 +291,7 @@ case PCI_DEVICE_ID_INTEL_82801DB_10: case PCI_DEVICE_ID_INTEL_82801DB_11: case PCI_DEVICE_ID_INTEL_82801EB_11: + case PCI_DEVICE_ID_INTEL_ESB_2: mode = 3; break; /* UDMA 66 capable */ @@ -599,6 +601,65 @@ } /** + * ich3_busproc - bus isolation ioctl + * @drive: drive to isolate/restore + * @state: bus state to set + * + * Used by the ICH3 to handle bus isolation. We have to do + * a little bit of fixing to keep the hardware happy. + */ + +static int ich3_busproc (ide_drive_t * drive, int state) +{ + ide_hwif_t *hwif = HWIF(drive); + u32 sig_mode; + int shift; + int bits; + + if(hwif->channel == 0) + shift = 17; + else + shift = 19; + + switch (state) { + case BUSSTATE_ON: + bits = 0x00; + hwif->drives[0].failures = 0; + hwif->drives[1].failures = 0; + break; + case BUSSTATE_OFF: + bits = 0x01; + break; + case BUSSTATE_TRISTATE: + bits = 0x10; + break; + default: + return -EINVAL; + } + + if(bits) + { + int port = hwif->channel == 0 ? 0x40 : 0x42; + u16 reg; + hwif->drives[0].failures = hwif->drives[0].max_failures + 1; + hwif->drives[1].failures = hwif->drives[1].max_failures + 1; + /* Turn off IORDY checking to avoid hangs */ + pci_read_config_word(hwif->pci_dev, port, ®); + reg&=~(1<<5)|(1<<1); + pci_write_config_word(hwif->pci_dev, port, reg); + } + /* Todo: Check locking */ + pci_read_config_dword(hwif->pci_dev, 0x54, &sig_mode); + sig_mode&=~(3<pci_dev, 0x54, sig_mode); + + hwif->bus_state = state; + return 0; +} + + +/** * init_chipset_piix - set up the PIIX chipset * @dev: PCI device to set up * @name: Name of the device @@ -621,6 +682,7 @@ case PCI_DEVICE_ID_INTEL_82801DB_11: case PCI_DEVICE_ID_INTEL_82801EB_11: case PCI_DEVICE_ID_INTEL_82801E_11: + case PCI_DEVICE_ID_INTEL_ESB_2: { unsigned int extra = 0; pci_read_config_dword(dev, 0x54, &extra); @@ -693,6 +755,10 @@ case PCI_DEVICE_ID_INTEL_82801AB_1: hwif->ultra_mask = 0x07; break; + case PCI_DEVICE_ID_INTEL_82801CA_10: + case PCI_DEVICE_ID_INTEL_82801CA_11: + hwif->busproc = ich3_busproc; + /* fall through */ default: pci_read_config_byte(hwif->pci_dev, 0x54, ®54h); pci_read_config_byte(hwif->pci_dev, 0x55, ®55h); @@ -811,7 +877,10 @@ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17}, +#if 0 /* SATA is covered by ata_piix scsi driver */ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18}, +#endif + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19}, { 0, }, }; @@ -839,4 +908,6 @@ MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, piix_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/piix.h linux.22-ac2/drivers/ide/pci/piix.h --- linux.vanilla/drivers/ide/pci/piix.h 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/piix.h 2003-09-09 22:27:29.000000000 +0100 @@ -305,6 +305,20 @@ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, .bootable = ON_BOARD, .extra = 0, + },{ /* 19 */ + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_ESB_2, + .name = "ESB", + .init_setup = init_setup_piix, + .init_chipset = init_chipset_piix, + .init_iops = NULL, + .init_hwif = init_hwif_piix, + .init_dma = init_dma_piix, + .channels = 2, + .autodma = AUTODMA, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD, + .extra = 0, },{ .vendor = 0, .device = 0, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/rz1000.c linux.22-ac2/drivers/ide/pci/rz1000.c --- linux.vanilla/drivers/ide/pci/rz1000.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/rz1000.c 2003-09-01 13:23:20.000000000 +0100 @@ -95,5 +95,7 @@ MODULE_DESCRIPTION("PCI driver module for RZ1000 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, rz1000_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/sc1200.c linux.22-ac2/drivers/ide/pci/sc1200.c --- linux.vanilla/drivers/ide/pci/sc1200.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/sc1200.c 2003-09-01 13:23:20.000000000 +0100 @@ -595,4 +595,6 @@ MODULE_DESCRIPTION("PCI driver module for NS SC1200 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sc1200_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/serverworks.c linux.22-ac2/drivers/ide/pci/serverworks.c --- linux.vanilla/drivers/ide/pci/serverworks.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/serverworks.c 2003-09-01 13:23:20.000000000 +0100 @@ -834,4 +834,6 @@ MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, svwks_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/sgiioc4.c linux.22-ac2/drivers/ide/pci/sgiioc4.c --- linux.vanilla/drivers/ide/pci/sgiioc4.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/sgiioc4.c 2003-09-01 13:24:09.000000000 +0100 @@ -0,0 +1,924 @@ +/* + * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/NoticeExplan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sgiioc4.h" + +extern int dma_timer_expiry(ide_drive_t * drive); + +#ifdef CONFIG_PROC_FS +static u8 sgiioc4_proc = 0; +#endif /* CONFIG_PROC_FS */ + +static int n_sgiioc4_devs = 0; + +#define SGIIOC4_HD_SUPPORT 0 + +static inline void +xide_delay(long ticks) +{ + if (!ticks) + return; + + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(ticks); +} + +static void __init +sgiioc4_ide_setup_pci_device(struct pci_dev *dev, const char *name) +{ + unsigned long base = 0, ctl = 0, dma_base = 0, irqport = 0; + ide_hwif_t *hwif = NULL; + int h = 0; + + /* Get the CmdBlk and CtrlBlk Base Registers */ + base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET; + ctl = pci_resource_start(dev, 0) + IOC4_CTRL_OFFSET; + irqport = pci_resource_start(dev, 0) + IOC4_INTR_OFFSET; + dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; + + for (h = 0; h < MAX_HWIFS; ++h) { + hwif = &ide_hwifs[h]; + /* Find an empty HWIF */ + if (hwif->chipset == ide_unknown) + break; + } + + if (hwif->io_ports[IDE_DATA_OFFSET] != base) { + /* Initialize the IO registers */ + sgiioc4_init_hwif_ports(&hwif->hw, base, ctl, irqport); + memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof (hwif->io_ports)); + hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; + } + + hwif->chipset = ide_pci; + hwif->pci_dev = dev; + hwif->channel = 0; /* Single Channel chip */ + hwif->hw.ack_intr = &sgiioc4_checkirq; /* MultiFunction Chip */ + + /* Initializing chipset IRQ Registers */ + hwif->OUTL(0x03, irqport + IOC4_INTR_SET * 4); + + (void) ide_init_sgiioc4(hwif); + + if (dma_base) + ide_dma_sgiioc4(hwif, dma_base); + else + printk(KERN_INFO "%s: %s Bus-Master DMA disabled \n", hwif->name, name); +} + +static unsigned int __init +pci_init_sgiioc4(struct pci_dev *dev, const char *name) +{ + extern pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev, + pciio_endian_t device_end, + pciio_endian_t desired_end); + + if (pci_enable_device(dev)) { + printk(KERN_INFO "Failed to enable device %s at slot %s \n",name,dev->slot_name); + return 1; + } + pci_set_master(dev); +#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) + /* Enable Byte Swapping in the PIC */ + snia_pciio_endian_set(dev, PCIDMA_ENDIAN_LITTLE, PCIDMA_ENDIAN_BIG); +#endif + +#ifdef CONFIG_PROC_FS + sgiioc4_devs[n_sgiioc4_devs++] = dev; + if (!sgiioc4_proc) { + sgiioc4_proc = 1; + ide_pci_register_host_proc(&sgiioc4_procs[0]); + } +#endif + sgiioc4_ide_setup_pci_device(dev, name); + return 0; +} + +static void +sgiioc4_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port, + ide_ioreg_t ctrl_port, ide_ioreg_t irq_port) +{ + ide_ioreg_t reg = data_port; + int i; + + for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) + hw->io_ports[i] = reg + i * 4; /* Registers are word (32 bit) aligned */ + + if (ctrl_port) + hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; + + if (irq_port) + hw->io_ports[IDE_IRQ_OFFSET] = irq_port; +} + +static void +sgiioc4_resetproc(ide_drive_t * drive) +{ + sgiioc4_ide_dma_end(drive); + sgiioc4_clearirq(drive); +} + +static void +sgiioc4_maskproc(ide_drive_t * drive, int mask) +{ + ide_hwif_t *hwif = HWIF(drive); + hwif->OUTB(mask ? (drive->ctl | 2) : (drive->ctl & ~2), IDE_CONTROL_REG); +} + +static void __init +ide_init_sgiioc4(ide_hwif_t * hwif) +{ + hwif->autodma = 1; + hwif->index = 0; /* Channel 0 */ + hwif->channel = 0; + hwif->atapi_dma = 1; + hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ + hwif->mwdma_mask = 0x2; /* Multimode-2 DMA */ + hwif->swdma_mask = 0x2; + hwif->identify = NULL; + hwif->tuneproc = NULL; /* Sets timing for PIO mode */ + hwif->speedproc = NULL; /* Sets timing for DMA &/or PIO modes */ + hwif->selectproc = NULL; /* Use the default selection routine to select drive */ + hwif->reset_poll = NULL; /* No HBA specific reset_poll needed */ + hwif->pre_reset = NULL; /* No HBA specific pre_set needed */ + hwif->resetproc = &sgiioc4_resetproc; /* Reset the IOC4 DMA engine, clear interrupts etc */ + hwif->intrproc = NULL; /* Enable or Disable interrupt from drive */ + hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */ + hwif->quirkproc = NULL; + hwif->busproc = NULL; + + hwif->ide_dma_read = &sgiioc4_ide_dma_read; + hwif->ide_dma_write = &sgiioc4_ide_dma_write; + hwif->ide_dma_begin = &sgiioc4_ide_dma_begin; + hwif->ide_dma_end = &sgiioc4_ide_dma_end; + hwif->ide_dma_check = &sgiioc4_ide_dma_check; + hwif->ide_dma_on = &sgiioc4_ide_dma_on; + hwif->ide_dma_off = &sgiioc4_ide_dma_off; + hwif->ide_dma_off_quietly = &sgiioc4_ide_dma_off_quietly; + hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; + hwif->ide_dma_host_on = &sgiioc4_ide_dma_host_on; + hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off; + hwif->ide_dma_bad_drive = &__ide_dma_bad_drive; + hwif->ide_dma_good_drive = &__ide_dma_good_drive; + hwif->ide_dma_count = &sgiioc4_ide_dma_count; + hwif->ide_dma_verbose = &sgiioc4_ide_dma_verbose; + hwif->ide_dma_retune = &__ide_dma_retune; + hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq; + hwif->ide_dma_timeout = &sgiioc4_ide_dma_timeout; + hwif->INB = &sgiioc4_INB; +} + +static int +sgiioc4_ide_dma_read(ide_drive_t * drive) +{ + struct request *rq = HWGROUP(drive)->rq; + unsigned int count = 0; +#if SGIIOC4_HD_SUPPORT + task_ioreg_t command = WIN_NOP; + u8 lba48 = (drive->addressing == 1) ? 1 : 0; +#endif /* SGIIOC4_HD_SUPPORT */ + + if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_FROMDEVICE))) { + /* try PIO instead of DMA */ + return 1; + } + /* Writes FROM the IOC4 TO Main Memory */ + sgiioc4_configure_for_dma(IOC4_DMA_WRITE, drive); + +#if SGIIOC4_HD_SUPPORT + if (drive->media != ide_disk) /* Command will be issued by the cdrom.c layer */ + return 0; + + /* Hard Disks not supported on IOC4 as yet (05/22/2003) ... this code will be useful when Disks are supported */ + command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA; + if (rq->cmd == IDE_DRIVE_TASKFILE) { + ide_task_t *args = rq->special; + command = args->tfRegister[IDE_COMMAND_OFFSET]; + } + + /* issue cmd to drive */ + ide_execute_command(drive, command, &ide_dma_intr, 2 * WAIT_CMD, + dma_timer_expiry); + return HWIF(drive)->ide_dma_count(drive); +#else /* SGIIOC4_HD_SUPPORT */ + return 0; +#endif /* SGIIOC4_HD_SUPPORT */ +} + +static int +sgiioc4_ide_dma_write(ide_drive_t * drive) +{ + struct request *rq = HWGROUP(drive)->rq; + unsigned int count = 0; +#if SGIIOC4_HD_SUPPORT + task_ioreg_t command = WIN_NOP; + u8 lba48 = (drive->addressing == 1) ? 1 : 0; +#endif /* SGIIOC4_HD_SUPPORT */ + + if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_TODEVICE))) { + /* try PIO instead of DMA */ + return 1; + } + + sgiioc4_configure_for_dma(IOC4_DMA_READ, drive); + /* Writes TO the IOC4 FROM Main Memory */ + +#if SGIIOC4_HD_SUPPORT + if (drive->media != ide_disk) /* Command will be issued by the cdrom.c layer */ + return 0; + + /* Hard Disks not supported on IOC4 as yet (05/22/2003) ... this code will be useful when Disks are supported */ + command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA; + if (rq->cmd == IDE_DRIVE_TASKFILE) { + ide_task_t *args = rq->special; + command = args->tfRegister[IDE_COMMAND_OFFSET]; + } + + /* issue cmd to drive */ + ide_execute_command(drive, command, &ide_dma_intr, 2 * WAIT_CMD, + dma_timer_expiry); + return HWIF(drive)->ide_dma_count(drive); +#else /* SGIIOC4_HD_SUPPORT */ + return 0; +#endif /* SGIIOC4_HD_SUPPORT */ +} + +static int +sgiioc4_ide_dma_begin(ide_drive_t * drive) +{ + ide_hwif_t *hwif = HWIF(drive); + unsigned int reg = hwif->INL(hwif->dma_base + IOC4_DMA_CTRL * 4); + unsigned int temp_reg = reg | IOC4_S_DMA_START; + + hwif->OUTL(temp_reg, hwif->dma_base + IOC4_DMA_CTRL * 4); + + return 0; +} + +/* Stops the IOC4 DMA Engine */ +static int +sgiioc4_ide_dma_end(ide_drive_t * drive) +{ + u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; + ide_hwif_t *hwif = HWIF(drive); + uint64_t dma_base = hwif->dma_base; + int dma_stat = 0, count; + unsigned long *ending_dma = (unsigned long *) hwif->dma_base2; + + hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); + + count = 0; + do { + xide_delay(count); + ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); + count += 10; + } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); + + if (ioc4_dma & IOC4_S_DMA_STOP) { + printk(KERN_ERR "sgiioc4_stopdma(%s): IOC4 DMA STOP bit is still 1 : ioc4_dma_reg 0x%x\n", drive->name, ioc4_dma); + dma_stat = 1; + } + + if (ending_dma) { + do { + for (num = 0; num < 16; num++) { + if (ending_dma[num] & (~0ul)) { + valid = 1; + break; + } + } + xide_delay(cnt); + } while ((cnt++ < 100) && (!valid)); + } + + if (!valid) + printk(KERN_INFO "sgiioc4_ide_dma_end(%s) : Stale DMA Data in Memory\n", drive->name); + + bc_dev = hwif->INL(dma_base + IOC4_BC_DEV * 4); + bc_mem = hwif->INL(dma_base + IOC4_BC_MEM * 4); + + if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { + if (bc_dev > bc_mem + 8) { + printk(KERN_ERR "sgiioc4_ide_dma_end(%s) : WARNING!!! byte_count_at_dev %d != byte_count_at_mem %d\n", + drive->name, bc_dev, bc_mem); + } + } + + drive->waiting_for_dma = 0; + ide_destroy_dmatable(drive); + + return dma_stat; +} + +static int +sgiioc4_ide_dma_check(ide_drive_t * drive) +{ + if (ide_config_drive_speed(drive,XFER_MW_DMA_2)!=0) { + printk(KERN_INFO "Couldnot set %s in Multimode-2 DMA mode | Drive %s using PIO instead\n", + drive->name, drive->name); + drive->using_dma = 0; + } else + drive->using_dma = 1; + + return 0; +} + +static int +sgiioc4_ide_dma_on(ide_drive_t * drive) +{ + drive->using_dma = 1; + + return HWIF(drive)->ide_dma_host_on(drive); +} + +static int +sgiioc4_ide_dma_off(ide_drive_t * drive) +{ + printk(KERN_INFO "%s: DMA disabled\n", drive->name); + + return HWIF(drive)->ide_dma_off_quietly(drive); +} + +static int +sgiioc4_ide_dma_off_quietly(ide_drive_t * drive) +{ + drive->using_dma = 0; + + return HWIF(drive)->ide_dma_host_off(drive); +} + +/* returns 1 if dma irq issued, 0 otherwise */ +static int +sgiioc4_ide_dma_test_irq(ide_drive_t * drive) +{ + return sgiioc4_checkirq(HWIF(drive)); +} + +static int +sgiioc4_ide_dma_host_on(ide_drive_t * drive) +{ + if (drive->using_dma) + return 0; + + return 1; +} + +static int +sgiioc4_ide_dma_host_off(ide_drive_t * drive) +{ + sgiioc4_clearirq(drive); + + return 0; +} + +static int +sgiioc4_ide_dma_count(ide_drive_t * drive) +{ + return HWIF(drive)->ide_dma_begin(drive); +} + +static int +sgiioc4_ide_dma_verbose(ide_drive_t * drive) +{ + if (drive->using_dma == 1) + printk(", UDMA(16)"); + else + printk(", PIO"); + + return 1; +} + +static int +sgiioc4_ide_dma_lostirq(ide_drive_t * drive) +{ + HWIF(drive)->resetproc(drive); + + return __ide_dma_lostirq(drive); +} + +static int +sgiioc4_ide_dma_timeout(ide_drive_t * drive) +{ + printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); + if (HWIF(drive)->ide_dma_test_irq(drive)) + return 0; + + return HWIF(drive)->ide_dma_end(drive); +} + +static u8 +sgiioc4_INB(unsigned long port) +{ + u8 reg = (u8) inb(port); + + if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ + if (reg & 0x51) { /* Not busy...check for interrupt */ + unsigned long other_ir = port - 0x110; + unsigned int intr_reg = (u32) inl(other_ir); + + if (intr_reg & 0x03) { + /* Clear the Interrupt, Error bits on the IOC4 */ + outl(0x03, other_ir); + intr_reg = (u32) inl(other_ir); + } + } + } + + return reg; +} + +/* Creates a dma map for the scatter-gather list entries */ +static void __init +ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) +{ + int num_ports = sizeof (ioc4_dma_regs_t); + + printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, dma_base, dma_base + num_ports - 1); + + if (!request_region(dma_base, num_ports, hwif->name)) { + printk(KERN_ERR "ide_dma_sgiioc4(%s) -- Error, Port Addresses 0x%p to 0x%p ALREADY in use\n", + hwif->name, (void *)dma_base, (void *)dma_base + num_ports - 1); + return; + } + + hwif->dma_base = dma_base; + hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev, + IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, /* 1 Page */ + &hwif->dmatable_dma); + + if (!hwif->dmatable_cpu) + goto dma_alloc_failure; + + hwif->sg_table = kmalloc(sizeof (struct scatterlist) * IOC4_PRD_ENTRIES, GFP_KERNEL); + + if (!hwif->sg_table) { + pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma); + goto dma_alloc_failure; + } + + hwif->dma_base2 = (unsigned long) pci_alloc_consistent(hwif->pci_dev, IOC4_IDE_CACHELINE_SIZE, + (dma_addr_t*)&(hwif->dma_status)); + + if (!hwif->dma_base2) { + pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma); + kfree(hwif->sg_table); + goto dma_alloc_failure; + } + + return; + + dma_alloc_failure: + printk(KERN_INFO "ide_dma_sgiioc4() -- Error! Unable to allocate DMA Maps for drive %s\n", hwif->name); + printk(KERN_INFO "Changing from DMA to PIO mode for Drive %s \n", hwif->name); + + /* Disable DMA because we couldnot allocate any DMA maps */ + hwif->autodma = 0; + hwif->atapi_dma = 0; +} + +/* Initializes the IOC4 DMA Engine */ +static void +sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive) +{ + u32 ioc4_dma; + int count; + ide_hwif_t *hwif = HWIF(drive); + uint64_t dma_base = hwif->dma_base; + uint32_t dma_addr, ending_dma_addr; + + ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); + + if (ioc4_dma & IOC4_S_DMA_ACTIVE) { + printk(KERN_WARNING "sgiioc4_configure_for_dma(%s):Warning!! IOC4 DMA from previous transfer was still active\n", + drive->name); + hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); + count = 0; + do { + xide_delay(count); + ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); + count += 10; + } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); + + if (ioc4_dma & IOC4_S_DMA_STOP) + printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 Dma STOP bit is still 1\n", drive->name); + } + + ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); + if (ioc4_dma & IOC4_S_DMA_ERROR) { + printk(KERN_WARNING "sgiioc4_configure_for__dma(%s) : Warning!! - DMA Error during Previous transfer | status 0x%x \n", + drive->name, ioc4_dma); + hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); + count = 0; + do { + ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); + xide_delay(count); + count += 10; + } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); + + if (ioc4_dma & IOC4_S_DMA_STOP) + printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 DMA STOP bit is still 1\n", drive->name); + } + + /* Address of the Scatter Gather List */ + dma_addr = cpu_to_le32(hwif->dmatable_dma); + hwif->OUTL(dma_addr, dma_base + IOC4_DMA_PTR_L * 4); + + /* Address of the Ending DMA */ + memset((unsigned int *) hwif->dma_base2, 0,IOC4_IDE_CACHELINE_SIZE); + ending_dma_addr = cpu_to_le32(hwif->dma_status); + hwif->OUTL(ending_dma_addr,dma_base + IOC4_DMA_END_ADDR * 4); + + hwif->OUTL(dma_direction, dma_base + IOC4_DMA_CTRL * 4); + drive->waiting_for_dma = 1; +} + +/* IOC4 Scatter Gather list Format */ +/* 128 Bit entries to support 64 bit addresses in the future */ +/* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */ +/* --------------------------------------------------------------------------- */ +/* | Upper 32 bits - Zero | Lower 32 bits- address | */ +/* --------------------------------------------------------------------------- */ +/* | Upper 32 bits - Zero |EOL| 16 Bit Data Length | */ +/* --------------------------------------------------------------------------- */ + +/* Creates the scatter gather list, DMA Table */ +static unsigned int +sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) +{ + ide_hwif_t *hwif = HWIF(drive); + unsigned int *table = hwif->dmatable_cpu; + unsigned int count = 0, i = 1; + struct scatterlist *sg; + + if (rq->cmd == IDE_DRIVE_TASKFILE) + hwif->sg_nents = i = sgiioc4_ide_raw_build_sglist(hwif, rq); + else + hwif->sg_nents = i = sgiioc4_ide_build_sglist(hwif, rq, ddir); + + if (!i) + return 0; /* sglist of length Zero */ + + sg = hwif->sg_table; + while (i && sg_dma_len(sg)) { + dma_addr_t cur_addr; + int cur_len; + cur_addr = sg_dma_address(sg); + cur_len = sg_dma_len(sg); + + while (cur_len) { + if (count++ >= IOC4_PRD_ENTRIES) { + printk(KERN_WARNING "%s: DMA table too small\n", drive->name); + goto use_pio_instead; + } else { + uint32_t xcount, bcount = 0x10000 - (cur_addr & 0xffff); + + if (bcount > cur_len) + bcount = cur_len; + + /* put the addr, length in the IOC4 dma-table format */ + *table = 0x0; + table++; + *table = cpu_to_be32(cur_addr); + table++; + *table = 0x0; + table++; + + xcount = bcount & 0xffff; + *table = cpu_to_be32(xcount); + table++; + + cur_addr += bcount; + cur_len -= bcount; + } + } + + sg++; + i--; + } + + if (count) { + table--; + *table |= cpu_to_be32(0x80000000); + return count; + } + + use_pio_instead: + pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); + hwif->sg_dma_active = 0; + + return 0; /* revert to PIO for this request */ +} + +static int +sgiioc4_checkirq(ide_hwif_t * hwif) +{ + uint8_t intr_reg = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4); + + if (intr_reg & 0x03) + return 1; + + return 0; +} + +static int +sgiioc4_clearirq(ide_drive_t * drive) +{ + u32 intr_reg; + ide_hwif_t *hwif = HWIF(drive); + ide_ioreg_t other_ir = hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2); + + /* Code to check for PCI error conditions */ + intr_reg = hwif->INL(other_ir); + if (intr_reg & 0x03) { + /* Valid IOC4-IDE interrupt */ + u8 stat = hwif->INB(IDE_STATUS_REG); + int count = 0; + do { + xide_delay(count); + stat = hwif->INB(IDE_STATUS_REG); /* Removes Interrupt from IDE Device */ + } while ((stat & 0x80) && (count++ < 1024)); + + if (intr_reg & 0x02) { + /* Error when transferring DMA data on PCI bus */ + uint32_t pci_err_addr_low, pci_err_addr_high, pci_stat_cmd_reg; + + pci_err_addr_low = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET]); + pci_err_addr_high = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + 4); + pci_read_config_dword(hwif->pci_dev, PCI_COMMAND, &pci_stat_cmd_reg); + printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Bus Error when doing DMA : status-cmd reg is 0x%x \n", drive->name, pci_stat_cmd_reg); + printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Error Address is 0x%x%x \n", drive->name, pci_err_addr_high, pci_err_addr_low); + /* Clear the PCI Error indicator */ + pci_write_config_dword(hwif->pci_dev, PCI_COMMAND, 0x00000146); + } + + hwif->OUTL(0x03, other_ir); /* Clear the Interrupt, Error bits on the IOC4 */ + + intr_reg = hwif->INL(other_ir); + } + + return intr_reg; +} + +/** + * "Copied from drivers/ide/ide-dma.c" + * sgiioc4_ide_build_sglist - map IDE scatter gather for DMA I/O + * @hwif: the interface to build the DMA table for + * @rq: the request holding the sg list + * @ddir: data direction + * + * Perform the PCI mapping magic neccessary to access the source + * or target buffers of a request via PCI DMA. The lower layers + * of the kernel provide the neccessary cache management so that + * we can operate in a portable fashion. + * + * This code is identical to ide_build_sglist in ide-dma.c + * however that it not exported and even if it were would create + * dependancy problems for modular drivers. + */ +static int +sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, int ddir) +{ + struct buffer_head *bh; + struct scatterlist *sg = hwif->sg_table; + unsigned long lastdataend = ~0UL; + int nents = 0; + + if (hwif->sg_dma_active) + BUG(); + + bh = rq->bh; + do { + int contig = 0; + + if (bh->b_page) { + if (bh_phys(bh) == lastdataend) + contig = 1; + } else { + if ((unsigned long) bh->b_data == lastdataend) + contig = 1; + } + + if (contig) { + sg[nents - 1].length += bh->b_size; + lastdataend += bh->b_size; + continue; + } + + if (nents >= PRD_ENTRIES) + return 0; + + memset(&sg[nents], 0, sizeof (*sg)); + + if (bh->b_page) { + sg[nents].page = bh->b_page; + sg[nents].offset = bh_offset(bh); + lastdataend = bh_phys(bh) + bh->b_size; + } else { + if ((unsigned long) bh->b_data < PAGE_SIZE) + BUG(); + + sg[nents].address = bh->b_data; + lastdataend = (unsigned long) bh->b_data + bh->b_size; + } + + sg[nents].length = bh->b_size; + nents++; + } while ((bh = bh->b_reqnext) != NULL); + + if (nents == 0) + BUG(); + + hwif->sg_dma_direction = ddir; + return pci_map_sg(hwif->pci_dev, sg, nents, ddir); +} + +/** + * Copied from drivers/ide/ide-dma.c + * sgiioc4_ide_raw_build_sglist - map IDE scatter gather for DMA + * @hwif: the interface to build the DMA table for + * @rq: the request holding the sg list + * + * Perform the PCI mapping magic neccessary to access the source or + * target buffers of a taskfile request via PCI DMA. The lower layers + * of the kernel provide the neccessary cache management so that we can + * operate in a portable fashion + * + * This code is identical to ide_raw_build_sglist in ide-dma.c + * however that it not exported and even if it were would create + * dependancy problems for modular drivers. + */ +static int +sgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq) +{ + struct scatterlist *sg = hwif->sg_table; + int nents = 0; + ide_task_t *args = rq->special; + u8 *virt_addr = rq->buffer; + int sector_count = rq->nr_sectors; + + if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) + hwif->sg_dma_direction = PCI_DMA_TODEVICE; + else + hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; +#if 1 + if (sector_count > 128) { + memset(&sg[nents], 0, sizeof (*sg)); + sg[nents].address = virt_addr; + sg[nents].length = 128 * SECTOR_SIZE; + nents++; + virt_addr = virt_addr + (128 * SECTOR_SIZE); + sector_count -= 128; + } + memset(&sg[nents], 0, sizeof (*sg)); + sg[nents].address = virt_addr; + sg[nents].length = sector_count * SECTOR_SIZE; + nents++; +#else + while (sector_count > 128) { + memset(&sg[nents], 0, sizeof (*sg)); + sg[nents].address = virt_addr; + sg[nents].length = 128 * SECTOR_SIZE; + nents++; + virt_addr = virt_addr + (128 * SECTOR_SIZE); + sector_count -= 128; + }; + memset(&sg[nents], 0, sizeof (*sg)); + sg[nents].address = virt_addr; + sg[nents].length = sector_count * SECTOR_SIZE; + nents++; +#endif + return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction); +} + +#ifdef CONFIG_PROC_FS + +static int +sgiioc4_get_info(char *buffer, char **addr, off_t offset, int count) +{ + char *p = buffer; + unsigned int class_rev; + int i = 0; + + while (i < n_sgiioc4_devs) { + pci_read_config_dword(sgiioc4_devs[i], PCI_CLASS_REVISION, + &class_rev); + class_rev &= 0xff; + + if (sgiioc4_devs[i]->device == PCI_DEVICE_ID_SGI_IOC4) { + p += sprintf(p, "\n SGI IOC4 Chipset rev %d. ", class_rev); + p += sprintf(p, "\n Chipset has 1 IDE channel and supports 2 devices on that channel."); + p += sprintf(p, "\n Chipset supports DMA in MultiMode-2 data transfer protocol.\n"); + /* Do we need more info. here? */ + } + i++; + } + + return p - buffer; +} + +#endif /* CONFIG_PROC_FS */ + +static int __devinit +sgiioc4_init_one(struct pci_dev *dev, const struct pci_device_id *id) +{ + unsigned int class_rev; + ide_pci_device_t *d = &sgiioc4_chipsets[id->driver_data]; + if (dev->device != d->device) { + printk(KERN_ERR "Error in sgiioc4_init_one(dev 0x%p | id 0x%p )\n", (void *) dev, (void *) id); + BUG(); + } + + pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); + class_rev &= 0xff; + + if (class_rev < IOC4_SUPPORTED_FIRMWARE_REV) { + printk(KERN_INFO "Disabling the IOC4 IDE Part due to unsupported Firmware Rev (%d). \n",class_rev); + printk(KERN_INFO "Please upgrade to Firmware Rev 46 or higher \n"); + return 0; + } + + printk(KERN_INFO "%s: IDE controller at PCI slot %s\n", d->name, dev->slot_name); + + if (pci_init_sgiioc4(dev, d->name)) + return 0; + + MOD_INC_USE_COUNT; + + return 0; +} + +static struct pci_device_id sgiioc4_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID, PCI_ANY_ID, 0x0b4000, 0xFFFFFF, 0 }, + { 0 } +}; + +static struct pci_driver driver = { + .name = "SGI-IOC4 IDE", + .id_table = sgiioc4_pci_tbl, + .probe = sgiioc4_init_one, +}; + +static int +sgiioc4_ide_init(void) +{ + return ide_pci_register_driver(&driver); +} + +static void +sgiioc4_ide_exit(void) +{ + ide_pci_unregister_driver(&driver); +} + +module_init(sgiioc4_ide_init); +module_exit(sgiioc4_ide_exit); + +MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)"); +MODULE_DESCRIPTION("PCI driver module for SGI IOC4 Base-IO Card"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(pci, sgiioc4_pci_tbl); + +EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/sgiioc4.h linux.22-ac2/drivers/ide/pci/sgiioc4.h --- linux.vanilla/drivers/ide/pci/sgiioc4.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/sgiioc4.h 2003-09-09 22:27:29.000000000 +0100 @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/NoticeExplan + */ + +#ifndef SGIIOC4_H +#define SGIIOC4_H + +#define IDE_ARCH_ACK_INTR 1 +#include + +/* IOC4 Specific Definitions */ +#define IOC4_CMD_OFFSET 0x100 +#define IOC4_CTRL_OFFSET 0x120 +#define IOC4_DMA_OFFSET 0x140 +#define IOC4_INTR_OFFSET 0x0 + +#define IOC4_TIMING 0x00 +#define IOC4_DMA_PTR_L 0x01 +#define IOC4_DMA_PTR_H 0x02 +#define IOC4_DMA_ADDR_L 0x03 +#define IOC4_DMA_ADDR_H 0x04 +#define IOC4_BC_DEV 0x05 +#define IOC4_BC_MEM 0x06 +#define IOC4_DMA_CTRL 0x07 +#define IOC4_DMA_END_ADDR 0x08 + +/* Bits in the IOC4 Control/Status Register */ +#define IOC4_S_DMA_START 0x01 +#define IOC4_S_DMA_STOP 0x02 +#define IOC4_S_DMA_DIR 0x04 +#define IOC4_S_DMA_ACTIVE 0x08 +#define IOC4_S_DMA_ERROR 0x10 +#define IOC4_ATA_MEMERR 0x02 + +/* Read/Write Directions */ +#define IOC4_DMA_WRITE 0x04 +#define IOC4_DMA_READ 0x00 + +/* Interrupt Register Offsets */ +#define IOC4_INTR_REG 0x03 +#define IOC4_INTR_SET 0x05 +#define IOC4_INTR_CLEAR 0x07 + +#define IOC4_IDE_CACHELINE_SIZE 128 +#define IOC4_SUPPORTED_FIRMWARE_REV 46 + + +/* Weeds out non-IDE interrupts to the IOC4 */ +#define ide_ack_intr(hwif) ((hwif)->hw.ack_intr ? (hwif)->hw.ack_intr(hwif) : 1) + +#define SGIIOC4_MAX_DEVS 32 + +#if defined(CONFIG_PROC_FS) +#include +#include + +static u8 sgiioc4_proc; + +static struct pci_dev *sgiioc4_devs[SGIIOC4_MAX_DEVS]; +static int sgiioc4_get_info(char *, char **, off_t, int); + +static ide_pci_host_proc_t sgiioc4_procs[] __initdata = { + { + .name = "sgiioc4", + .set = 1, + .get_info = sgiioc4_get_info, + .parent = NULL, + } +}; +#endif + +typedef volatile struct { + u32 timing_reg0; + u32 timing_reg1; + u32 low_mem_ptr; + u32 high_mem_ptr; + u32 low_mem_addr; + u32 high_mem_addr; + u32 dev_byte_count; + u32 mem_byte_count; + u32 status; +} ioc4_dma_regs_t; + +/* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */ +/* IOC4 has only 1 IDE channel */ +#define IOC4_PRD_BYTES 16 +#define IOC4_PRD_ENTRIES (PAGE_SIZE /IOC4_PRD_BYTES) + +typedef enum pciio_endian_e { + PCIDMA_ENDIAN_BIG, + PCIDMA_ENDIAN_LITTLE +} pciio_endian_t; + +static void sgiioc4_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port, + ide_ioreg_t ctrl_port, ide_ioreg_t irq_port); +static void sgiioc4_ide_setup_pci_device(struct pci_dev *dev, const char *name); +static void sgiioc4_resetproc(ide_drive_t * drive); +static void sgiioc4_maskproc(ide_drive_t * drive, int mask); +static void sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive); +static void __init ide_init_sgiioc4(ide_hwif_t * hwif); +static void __init ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base); +static int sgiioc4_checkirq(ide_hwif_t * hwif); +static int sgiioc4_clearirq(ide_drive_t * drive); +static int sgiioc4_get_info(char *buffer, char **addr, off_t offset, int count); +static int sgiioc4_ide_dma_read(ide_drive_t * drive); +static int sgiioc4_ide_dma_write(ide_drive_t * drive); +static int sgiioc4_ide_dma_begin(ide_drive_t * drive); +static int sgiioc4_ide_dma_end(ide_drive_t * drive); +static int sgiioc4_ide_dma_check(ide_drive_t * drive); +static int sgiioc4_ide_dma_on(ide_drive_t * drive); +static int sgiioc4_ide_dma_off(ide_drive_t * drive); +static int sgiioc4_ide_dma_off_quietly(ide_drive_t * drive); +static int sgiioc4_ide_dma_test_irq(ide_drive_t * drive); +static int sgiioc4_ide_dma_host_on(ide_drive_t * drive); +static int sgiioc4_ide_dma_host_off(ide_drive_t * drive); +static int sgiioc4_ide_dma_count(ide_drive_t * drive); +static int sgiioc4_ide_dma_verbose(ide_drive_t * drive); +static int sgiioc4_ide_dma_lostirq(ide_drive_t * drive); +static int sgiioc4_ide_dma_timeout(ide_drive_t * drive); +static int sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, + int ddir); +static int sgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq); + +static u8 sgiioc4_INB(unsigned long port); +static inline void xide_delay(long ticks); +extern int (*sgiioc4_display_info) (char *, char **, off_t, int); /* ide-proc.c */ +static unsigned int sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, + int ddir); +static unsigned int __init pci_init_sgiioc4(struct pci_dev *dev, const char *name); + +static ide_pci_device_t sgiioc4_chipsets[] __devinitdata = { + { + /* Channel 0 */ + .vendor = PCI_VENDOR_ID_SGI, + .device = PCI_DEVICE_ID_SGI_IOC4, + .name = "SGIIOC4", + .init_chipset = pci_init_sgiioc4, + .init_iops = NULL, + .init_hwif = ide_init_sgiioc4, + .init_dma = ide_dma_sgiioc4, + .channels = 1, + .autodma = AUTODMA, + .enablebits = { { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00 } }, + .bootable = ON_BOARD, + .extra = 0, + } +}; + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/siimage.c linux.22-ac2/drivers/ide/pci/siimage.c --- linux.vanilla/drivers/ide/pci/siimage.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/siimage.c 2003-09-01 13:23:20.000000000 +0100 @@ -1113,7 +1113,10 @@ hwif->pre_reset = &siimage_pre_reset; if(is_sata(hwif)) + { hwif->busproc = &siimage_busproc; + hwif->sata = 1; + } if (!hwif->dma_base) { hwif->drives[0].autotune = 1; @@ -1216,4 +1219,6 @@ MODULE_DESCRIPTION("PCI driver module for SiI IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, siimage_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/sis5513.c linux.22-ac2/drivers/ide/pci/sis5513.c --- linux.vanilla/drivers/ide/pci/sis5513.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/sis5513.c 2003-09-01 13:23:20.000000000 +0100 @@ -989,6 +989,8 @@ MODULE_DESCRIPTION("PCI driver module for SIS IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl); + EXPORT_NO_SYMBOLS; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/sl82c105.c linux.22-ac2/drivers/ide/pci/sl82c105.c --- linux.vanilla/drivers/ide/pci/sl82c105.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/sl82c105.c 2003-09-01 13:23:20.000000000 +0100 @@ -536,4 +536,6 @@ MODULE_DESCRIPTION("PCI driver module for W82C105 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sl82c105_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/slc90e66.c linux.22-ac2/drivers/ide/pci/slc90e66.c --- linux.vanilla/drivers/ide/pci/slc90e66.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/slc90e66.c 2003-09-01 13:23:20.000000000 +0100 @@ -409,4 +409,6 @@ MODULE_DESCRIPTION("PCI driver module for SLC90E66 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, slc90e66_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/triflex.c linux.22-ac2/drivers/ide/pci/triflex.c --- linux.vanilla/drivers/ide/pci/triflex.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/triflex.c 2003-09-01 13:23:20.000000000 +0100 @@ -253,4 +253,6 @@ MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); +EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/trm290.c linux.22-ac2/drivers/ide/pci/trm290.c --- linux.vanilla/drivers/ide/pci/trm290.c 2003-06-14 00:11:31.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/trm290.c 2003-09-01 13:23:20.000000000 +0100 @@ -449,4 +449,6 @@ MODULE_DESCRIPTION("PCI driver module for Tekram TRM290 IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, trm290_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/pci/via82cxxx.c linux.22-ac2/drivers/ide/pci/via82cxxx.c --- linux.vanilla/drivers/ide/pci/via82cxxx.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/pci/via82cxxx.c 2003-09-01 13:23:20.000000000 +0100 @@ -667,4 +667,6 @@ MODULE_DESCRIPTION("PCI driver module for VIA IDE"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, via_pci_tbl); + EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/raid/hptraid.c linux.22-ac2/drivers/ide/raid/hptraid.c --- linux.vanilla/drivers/ide/raid/hptraid.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/ide/raid/hptraid.c 2003-09-09 19:05:46.000000000 +0100 @@ -18,7 +18,11 @@ Based on work done by Søren Schmidt for FreeBSD Changelog: - 15.06.2003 wweissmann@gmx.at + 19.08.2003 v0.03 wweissmann@gmx.at + * register the raid volume only if all disks are available + * print a warning that raid-(0+)1 failover is not supported + + 15.06.2003 v0.02 wweissmann@gmx.at * correct values of raid-1 superbock * re-add check for availability of all disks * fix offset bug in raid-1 (introduced in raid 0+1 implementation) @@ -584,7 +588,7 @@ ide_drive_t *ideinfo; dev = MKDEV(major,minor); - ideinfo = get_info_ptr (dev); + ideinfo = ide_info_ptr (dev, 0); if (ideinfo==NULL) return 0; @@ -814,10 +818,6 @@ break; } - /* Initialize the gendisk structure */ - - ataraid_register_disk(device,raid[device].sectors); - /* Verify that we have all disks */ count=count_disks(raid+device); @@ -844,7 +844,17 @@ return -ENODEV; } } + printk(KERN_WARNING "ataraid%i: raid-0+1 disk failover is not implemented!\n", + device); } + else if (type == HPT_T_RAID_1) { + printk(KERN_WARNING "ataraid%i: raid-1 disk failover is not implemented!\n", + device); + } + /* Initialize the gendisk structure */ + + ataraid_register_disk(device,raid[device].sectors); + return 0; } @@ -856,7 +866,7 @@ int retval=-ENODEV; int device,i,count=0; - printk(KERN_INFO "Highpoint HPT370 Softwareraid driver for linux version 0.02\n"); + printk(KERN_INFO "Highpoint HPT370 Softwareraid driver for linux version 0.03\n"); for(i=0; oplist[i].op; i++) { do diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/raid/pdcraid.c linux.22-ac2/drivers/ide/raid/pdcraid.c --- linux.vanilla/drivers/ide/raid/pdcraid.c 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/ide/raid/pdcraid.c 2003-08-15 15:02:31.000000000 +0100 @@ -350,7 +350,7 @@ ide_drive_t *ideinfo; dev = MKDEV(major,minor); - ideinfo = get_info_ptr (dev); + ideinfo = ide_info_ptr (dev, 0); if (ideinfo==NULL) return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/ide/raid/silraid.c linux.22-ac2/drivers/ide/raid/silraid.c --- linux.vanilla/drivers/ide/raid/silraid.c 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/ide/raid/silraid.c 2003-08-15 15:02:48.000000000 +0100 @@ -253,7 +253,7 @@ ide_drive_t *ideinfo; dev = MKDEV(major,minor); - ideinfo = get_info_ptr (dev); + ideinfo = ide_info_ptr (dev, 0); if (ideinfo==NULL) return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/isdn/Config.in linux.22-ac2/drivers/isdn/Config.in --- linux.vanilla/drivers/isdn/Config.in 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/isdn/Config.in 2003-09-01 13:21:00.000000000 +0100 @@ -8,7 +8,7 @@ if [ "$CONFIG_INET" != "n" ]; then bool ' Support synchronous PPP' CONFIG_ISDN_PPP if [ "$CONFIG_ISDN_PPP" != "n" ]; then - bool ' PPP filtering for ISDN' CONFIG_IPPP_FILTER $CONFIG_FILTER + dep_bool ' PPP filtering for ISDN' CONFIG_IPPP_FILTER $CONFIG_FILTER bool ' Use VJ-compression with synchronous PPP' CONFIG_ISDN_PPP_VJ bool ' Support generic MP (RFC 1717)' CONFIG_ISDN_MPP dep_tristate ' Support BSD compression' CONFIG_ISDN_PPP_BSDCOMP $CONFIG_ISDN diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/isdn/hisax/st5481.h linux.22-ac2/drivers/isdn/hisax/st5481.h --- linux.vanilla/drivers/isdn/hisax/st5481.h 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/isdn/hisax/st5481.h 2003-09-09 19:18:47.000000000 +0100 @@ -219,13 +219,13 @@ #define L1_EVENT_COUNT (EV_TIMER3 + 1) #define ERR(format, arg...) \ -printk(KERN_ERR __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) +printk(KERN_ERR __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) #define WARN(format, arg...) \ -printk(KERN_WARNING __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) +printk(KERN_WARNING __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) #define INFO(format, arg...) \ -printk(KERN_INFO __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) +printk(KERN_INFO __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) #include "isdnhdlc.h" #include "fsm.h" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/isdn/isdn_tty.c linux.22-ac2/drivers/isdn/isdn_tty.c --- linux.vanilla/drivers/isdn/isdn_tty.c 2001-12-21 17:41:54.000000000 +0000 +++ linux.22-ac2/drivers/isdn/isdn_tty.c 2003-06-29 16:10:14.000000000 +0100 @@ -1807,7 +1807,7 @@ #endif return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/macintosh/macserial.c linux.22-ac2/drivers/macintosh/macserial.c --- linux.vanilla/drivers/macintosh/macserial.c 2002-08-03 16:08:25.000000000 +0100 +++ linux.22-ac2/drivers/macintosh/macserial.c 2003-06-29 16:10:20.000000000 +0100 @@ -1957,7 +1957,7 @@ } OPNDBG("rs_close ttyS%d, count = %d\n", info->line, info->count); - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/Makefile linux.22-ac2/drivers/Makefile --- linux.vanilla/drivers/Makefile 2003-08-28 16:45:32.000000000 +0100 +++ linux.22-ac2/drivers/Makefile 2003-08-28 17:03:10.000000000 +0100 @@ -8,12 +8,13 @@ mod-subdirs := dio hil mtd sbus video macintosh usb input telephony ide \ message/i2o message/fusion scsi md ieee1394 pnp isdn atm \ - fc4 net/hamradio i2c acpi bluetooth + fc4 net/hamradio i2c acpi bluetooth cpufreq subdir-y := parport char block net sound misc media cdrom hotplug subdir-m := $(subdir-y) +subdir-$(CONFIG_CPU_FREQ) += cpufreq subdir-$(CONFIG_DIO) += dio subdir-$(CONFIG_PCI) += pci subdir-$(CONFIG_GSC) += gsc diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/Config.in linux.22-ac2/drivers/md/Config.in --- linux.vanilla/drivers/md/Config.in 2001-09-14 22:22:18.000000000 +0100 +++ linux.22-ac2/drivers/md/Config.in 2003-06-29 16:10:32.000000000 +0100 @@ -14,5 +14,8 @@ dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_tristate ' Device-mapper support (EXPERIMENTAL)' CONFIG_BLK_DEV_DM $CONFIG_MD +fi endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm.c linux.22-ac2/drivers/md/dm.c --- linux.vanilla/drivers/md/dm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,1158 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include + +/* we only need this for the lv_bmap struct definition, not happy */ +#include + +#define DEFAULT_READ_AHEAD 64 + +static const char *_name = DM_NAME; + +static int major = 0; +static int _major = 0; + +struct io_hook { + struct mapped_device *md; + struct target *target; + int rw; + + void (*end_io) (struct buffer_head * bh, int uptodate); + void *context; +}; + +static kmem_cache_t *_io_hook_cache; + +static struct mapped_device *_devs[MAX_DEVICES]; +static struct rw_semaphore _dev_locks[MAX_DEVICES]; + +/* + * This lock is only held by dm_create and dm_set_name to avoid + * race conditions where someone else may create a device with + * the same name. + */ +static spinlock_t _create_lock = SPIN_LOCK_UNLOCKED; + +/* block device arrays */ +static int _block_size[MAX_DEVICES]; +static int _blksize_size[MAX_DEVICES]; +static int _hardsect_size[MAX_DEVICES]; + +static devfs_handle_t _dev_dir; + +static int request(request_queue_t * q, int rw, struct buffer_head *bh); +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb); + +/* + * Protect the mapped_devices referenced from _dev[] + */ +struct mapped_device *dm_get_r(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_read(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_read(_dev_locks + minor); + return md; +} + +struct mapped_device *dm_get_w(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_write(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_write(_dev_locks + minor); + return md; +} + +static int namecmp(struct mapped_device *md, const char *name, int nametype) +{ + switch (nametype) { + case DM_LOOKUP_BY_NAME: + return strcmp(md->name, name); + break; + + case DM_LOOKUP_BY_UUID: + if (!md->uuid) + return -1; /* never equal */ + + return strcmp(md->uuid, name); + break; + + default: + DMWARN("Unknown comparison type in namecmp: %d", nametype); + BUG(); + } + + return -1; +} + +/* + * The interface (eg, ioctl) will probably access the devices + * through these slow 'by name' locks, this needs improving at + * some point if people start playing with *large* numbers of dm + * devices. + */ +struct mapped_device *dm_get_name_r(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (md) { + if (!namecmp(md, name, nametype)) + return md; + + dm_put_r(md); + } + } + + return NULL; +} + +struct mapped_device *dm_get_name_w(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + /* + * To avoid getting write locks on all the devices we try + * and promote a read lock to a write lock, this can + * fail, in which case we just start again. + */ + + restart: + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (!md) + continue; + + if (namecmp(md, name, nametype)) { + dm_put_r(md); + continue; + } + + /* found it */ + dm_put_r(md); + + md = dm_get_w(i); + if (!md) + goto restart; + + if (namecmp(md, name, nametype)) { + dm_put_w(md); + goto restart; + } + + return md; + } + + return NULL; +} + +void dm_put_r(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_read(_dev_locks + minor); +} + +void dm_put_w(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_write(_dev_locks + minor); +} + +/* + * Setup and tear down the driver + */ +static __init void init_locks(void) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + init_rwsem(_dev_locks + i); +} + +static __init int local_init(void) +{ + int r; + + init_locks(); + + /* allocate a slab for the io-hooks */ + if (!_io_hook_cache && + !(_io_hook_cache = kmem_cache_create("dm io hooks", + sizeof(struct io_hook), + 0, 0, NULL, NULL))) + return -ENOMEM; + + _major = major; + r = devfs_register_blkdev(_major, _name, &dm_blk_dops); + if (r < 0) { + DMERR("register_blkdev failed"); + kmem_cache_destroy(_io_hook_cache); + return r; + } + + if (!_major) + _major = r; + + /* set up the arrays */ + read_ahead[_major] = DEFAULT_READ_AHEAD; + blk_size[_major] = _block_size; + blksize_size[_major] = _blksize_size; + hardsect_size[_major] = _hardsect_size; + + blk_queue_make_request(BLK_DEFAULT_QUEUE(_major), request); + + _dev_dir = devfs_mk_dir(0, DM_DIR, NULL); + + return 0; +} + +static void local_exit(void) +{ + if (kmem_cache_destroy(_io_hook_cache)) + DMWARN("io_hooks still allocated during unregistration"); + _io_hook_cache = NULL; + + if (devfs_unregister_blkdev(_major, _name) < 0) + DMERR("devfs_unregister_blkdev failed"); + + read_ahead[_major] = 0; + blk_size[_major] = NULL; + blksize_size[_major] = NULL; + hardsect_size[_major] = NULL; + _major = 0; + + DMINFO("cleaned up"); +} + +/* + * We have a lot of init/exit functions, so it seems easier to + * store them in an array. The disposable macro 'xx' + * expands a prefix into a pair of function names. + */ +static struct { + int (*init)(void); + void (*exit)(void); + +} _inits[] = { +#define xx(n) {n ## _init, n ## _exit}, + xx(local) + xx(dm_target) + xx(dm_linear) + xx(dm_stripe) + xx(dm_snapshot) + xx(dm_interface) +#undef xx +}; + +static int __init dm_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void __exit dm_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + dm_destroy_all(); + while (i--) + _inits[i].exit(); +} + +/* + * Block device functions + */ +static int dm_blk_open(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + md->use_count++; + dm_put_w(md); + + return 0; +} + +static int dm_blk_close(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + if (md->use_count < 1) + DMWARN("incorrect reference count found in mapped_device"); + + md->use_count--; + dm_put_w(md); + + return 0; +} + +/* In 512-byte units */ +#define VOLUME_SIZE(minor) (_block_size[(minor)] << 1) + +static int dm_blk_ioctl(struct inode *inode, struct file *file, + uint command, unsigned long a) +{ + int minor = MINOR(inode->i_rdev); + long size; + + if (minor >= MAX_DEVICES) + return -ENXIO; + + switch (command) { + case BLKROSET: + case BLKROGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + case BLKSSZGET: + //case BLKRRPART: /* Re-read partition tables */ + //case BLKPG: + case BLKELVGET: + case BLKELVSET: + case BLKBSZGET: + case BLKBSZSET: + return blk_ioctl(inode->i_rdev, command, a); + break; + + case BLKGETSIZE: + size = VOLUME_SIZE(minor); + if (copy_to_user((void *) a, &size, sizeof(long))) + return -EFAULT; + break; + + case BLKGETSIZE64: + size = VOLUME_SIZE(minor); + if (put_user((u64) ((u64) size) << 9, (u64 *) a)) + return -EFAULT; + break; + + case BLKRRPART: + return -ENOTTY; + + case LV_BMAP: + return dm_user_bmap(inode, (struct lv_bmap *) a); + + default: + DMWARN("unknown block ioctl 0x%x", command); + return -ENOTTY; + } + + return 0; +} + +static inline struct io_hook *alloc_io_hook(void) +{ + return kmem_cache_alloc(_io_hook_cache, GFP_NOIO); +} + +static inline void free_io_hook(struct io_hook *ih) +{ + kmem_cache_free(_io_hook_cache, ih); +} + +/* + * FIXME: We need to decide if deferred_io's need + * their own slab, I say no for now since they are + * only used when the device is suspended. + */ +static inline struct deferred_io *alloc_deferred(void) +{ + return kmalloc(sizeof(struct deferred_io), GFP_NOIO); +} + +static inline void free_deferred(struct deferred_io *di) +{ + kfree(di); +} + +/* + * Call a target's optional error function if an I/O failed. + */ +static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh) +{ + dm_err_fn err = ih->target->type->err; + + if (err) + return err(bh, ih->rw, ih->target->private); + + return 0; +} + +/* + * bh->b_end_io routine that decrements the pending count + * and then calls the original bh->b_end_io fn. + */ +static void dec_pending(struct buffer_head *bh, int uptodate) +{ + struct io_hook *ih = bh->b_private; + + if (!uptodate && call_err_fn(ih, bh)) + return; + + if (atomic_dec_and_test(&ih->md->pending)) + /* nudge anyone waiting on suspend queue */ + wake_up(&ih->md->wait); + + bh->b_end_io = ih->end_io; + bh->b_private = ih->context; + free_io_hook(ih); + + bh->b_end_io(bh, uptodate); +} + +/* + * Add the bh to the list of deferred io. + */ +static int queue_io(struct buffer_head *bh, int rw) +{ + struct deferred_io *di = alloc_deferred(); + struct mapped_device *md; + + if (!di) + return -ENOMEM; + + md = dm_get_w(MINOR(bh->b_rdev)); + if (!md) { + free_deferred(di); + return -ENXIO; + } + + if (!md->suspended) { + dm_put_w(md); + free_deferred(di); + return 1; + } + + di->bh = bh; + di->rw = rw; + di->next = md->deferred; + md->deferred = di; + + dm_put_w(md); + + return 0; /* deferred successfully */ +} + +/* + * Do the bh mapping for a given leaf + */ +static inline int __map_buffer(struct mapped_device *md, + struct buffer_head *bh, int rw, int leaf) +{ + int r; + dm_map_fn fn; + void *context; + struct io_hook *ih = NULL; + struct target *ti = md->map->targets + leaf; + + fn = ti->type->map; + context = ti->private; + + ih = alloc_io_hook(); + + if (!ih) + return -1; + + ih->md = md; + ih->rw = rw; + ih->target = ti; + ih->end_io = bh->b_end_io; + ih->context = bh->b_private; + + r = fn(bh, rw, context); + + if (r > 0) { + /* hook the end io request fn */ + atomic_inc(&md->pending); + bh->b_end_io = dec_pending; + bh->b_private = ih; + + } else if (r == 0) + /* we don't need to hook */ + free_io_hook(ih); + + else if (r < 0) { + free_io_hook(ih); + return -1; + } + + return r; +} + +/* + * Search the btree for the correct target. + */ +static inline int __find_node(struct dm_table *t, struct buffer_head *bh) +{ + int l, n = 0, k = 0; + offset_t *node; + + for (l = 0; l < t->depth; l++) { + n = get_child(n, k); + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + if (node[k] >= bh->b_rsector) + break; + } + + return (KEYS_PER_NODE * n) + k; +} + +static int request(request_queue_t * q, int rw, struct buffer_head *bh) +{ + struct mapped_device *md; + int r, minor = MINOR(bh->b_rdev); + unsigned int block_size = _blksize_size[minor]; + + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + + /* + * Sanity checks. + */ + if (bh->b_size > block_size) + DMERR("request is larger than block size " + "b_size (%d), block size (%d)", + bh->b_size, block_size); + + if (bh->b_rsector & ((bh->b_size >> 9) - 1)) + DMERR("misaligned block requested logical " + "sector (%lu), b_size (%d)", + bh->b_rsector, bh->b_size); + + /* + * If we're suspended we have to queue + * this io for later. + */ + while (md->suspended) { + dm_put_r(md); + + if (rw == READA) + goto bad_no_lock; + + r = queue_io(bh, rw); + + if (r < 0) + goto bad_no_lock; + + else if (r == 0) + return 0; /* deferred successfully */ + + /* + * We're in a while loop, because someone could suspend + * before we get to the following read lock. + */ + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + } + + if ((r = __map_buffer(md, bh, rw, __find_node(md->map, bh))) < 0) + goto bad; + + dm_put_r(md); + return r; + + bad: + dm_put_r(md); + + bad_no_lock: + buffer_IO_error(bh); + return 0; +} + +static int check_dev_size(int minor, unsigned long block) +{ + /* FIXME: check this */ + unsigned long max_sector = (_block_size[minor] << 1) + 1; + unsigned long sector = (block + 1) * (_blksize_size[minor] >> 9); + + return (sector > max_sector) ? 0 : 1; +} + +/* + * Creates a dummy buffer head and maps it (for lilo). + */ +static int do_bmap(kdev_t dev, unsigned long block, + kdev_t * r_dev, unsigned long *r_block) +{ + struct mapped_device *md; + struct buffer_head bh; + int minor = MINOR(dev), r; + struct target *t; + + md = dm_get_r(minor); + if (!md) + return -ENXIO; + + if (md->suspended) { + dm_put_r(md); + return -EPERM; + } + + if (!check_dev_size(minor, block)) { + dm_put_r(md); + return -EINVAL; + } + + /* setup dummy bh */ + memset(&bh, 0, sizeof(bh)); + bh.b_blocknr = block; + bh.b_dev = bh.b_rdev = dev; + bh.b_size = _blksize_size[minor]; + bh.b_rsector = block * (bh.b_size >> 9); + + /* find target */ + t = md->map->targets + __find_node(md->map, &bh); + + /* do the mapping */ + r = t->type->map(&bh, READ, t->private); + + *r_dev = bh.b_rdev; + *r_block = bh.b_rsector / (bh.b_size >> 9); + + dm_put_r(md); + return r; +} + +/* + * Marshals arguments and results between user and kernel space. + */ +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb) +{ + unsigned long block, r_block; + kdev_t r_dev; + int r; + + if (get_user(block, &lvb->lv_block)) + return -EFAULT; + + if ((r = do_bmap(inode->i_rdev, block, &r_dev, &r_block))) + return r; + + if (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) || + put_user(r_block, &lvb->lv_block)) + return -EFAULT; + + return 0; +} + +/* + * See if the device with a specific minor # is free. The write + * lock is held when it returns successfully. + */ +static inline int specific_dev(int minor, struct mapped_device *md) +{ + if (minor >= MAX_DEVICES) { + DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)", + MAX_DEVICES); + return -1; + } + + down_write(_dev_locks + minor); + if (_devs[minor]) { + /* in use */ + up_write(_dev_locks + minor); + return -1; + } + + return minor; +} + +/* + * Find the first free device. Again the write lock is held on + * success. + */ +static int any_old_dev(struct mapped_device *md) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + if (specific_dev(i, md) != -1) + return i; + + return -1; +} + +/* + * Allocate and initialise a blank device. + * Caller must ensure uuid is null-terminated. + * Device is returned with a write lock held. + */ +static struct mapped_device *alloc_dev(const char *name, const char *uuid, + int minor) +{ + struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); + int len; + + if (!md) { + DMWARN("unable to allocate device, out of memory."); + return NULL; + } + + memset(md, 0, sizeof(*md)); + + /* + * This grabs the write lock if it succeeds. + */ + minor = (minor < 0) ? any_old_dev(md) : specific_dev(minor, md); + if (minor < 0) { + kfree(md); + return NULL; + } + + md->dev = MKDEV(_major, minor); + md->suspended = 0; + + strncpy(md->name, name, sizeof(md->name) - 1); + md->name[sizeof(md->name) - 1] = '\0'; + + /* + * Copy in the uuid. + */ + if (uuid && *uuid) { + len = strlen(uuid) + 1; + if (!(md->uuid = kmalloc(len, GFP_KERNEL))) { + DMWARN("unable to allocate uuid - out of memory."); + kfree(md); + return NULL; + } + strcpy(md->uuid, uuid); + } + + init_waitqueue_head(&md->wait); + return md; +} + +static int __register_device(struct mapped_device *md) +{ + md->devfs_entry = + devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER, + MAJOR(md->dev), MINOR(md->dev), + S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, + &dm_blk_dops, NULL); + + return 0; +} + +static int __unregister_device(struct mapped_device *md) +{ + devfs_unregister(md->devfs_entry); + return 0; +} + +/* + * The hardsect size for a mapped device is the largest hardsect size + * from the devices it maps onto. + */ +static int __find_hardsect_size(struct list_head *devices) +{ + int result = 512, size; + struct list_head *tmp; + + list_for_each(tmp, devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + size = get_hardsect_size(dd->dev); + if (size > result) + result = size; + } + + return result; +} + +/* + * Bind a table to the device. + */ +static int __bind(struct mapped_device *md, struct dm_table *t) +{ + int minor = MINOR(md->dev); + + md->map = t; + + if (!t->num_targets) { + _block_size[minor] = 0; + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = 0; + return 0; + } + + /* in k */ + _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1; + + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = __find_hardsect_size(&t->devices); + register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]); + + return 0; +} + +static void __unbind(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + dm_table_destroy(md->map); + md->map = NULL; + + _block_size[minor] = 0; + _blksize_size[minor] = 0; + _hardsect_size[minor] = 0; +} + +static int check_name(const char *name) +{ + struct mapped_device *md; + + if (strchr(name, '/') || strlen(name) > DM_NAME_LEN) { + DMWARN("invalid device name"); + return -1; + } + + md = dm_get_name_r(name, DM_LOOKUP_BY_NAME); + if (md) { + dm_put_r(md); + DMWARN("device name already in use"); + return -1; + } + + return 0; +} + +static int check_uuid(const char *uuid) +{ + struct mapped_device *md; + + if (uuid) { + md = dm_get_name_r(uuid, DM_LOOKUP_BY_UUID); + if (md) { + dm_put_r(md); + DMWARN("device uuid already in use"); + return -1; + } + } + + return 0; +} + +/* + * Constructor for a new device. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(name) || check_uuid(uuid)) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = alloc_dev(name, uuid, minor); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + minor = MINOR(md->dev); + _devs[minor] = md; + + r = __register_device(md); + if (r) + goto err; + + r = __bind(md, table); + if (r) + goto err; + + dm_set_ro(md, ro); + + spin_unlock(&_create_lock); + dm_put_w(md); + return 0; + + err: + _devs[minor] = NULL; + if (md->uuid) + kfree(md->uuid); + + dm_put_w(md); + kfree(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Renames the device. No lock held. + */ +int dm_set_name(const char *name, int nametype, const char *newname) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(newname) < 0) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = dm_get_name_w(name, nametype); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + + r = __unregister_device(md); + if (r) + goto out; + + strcpy(md->name, newname); + r = __register_device(md); + + out: + dm_put_w(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Destructor for the device. You cannot destroy an open + * device. Write lock must be held before calling. + * Caller must dm_put_w(md) then kfree(md) if call was successful. + */ +int dm_destroy(struct mapped_device *md) +{ + int minor, r; + + if (md->use_count) + return -EPERM; + + r = __unregister_device(md); + if (r) + return r; + + minor = MINOR(md->dev); + _devs[minor] = NULL; + __unbind(md); + + if (md->uuid) + kfree(md->uuid); + + return 0; +} + +/* + * Destroy all devices - except open ones + */ +void dm_destroy_all(void) +{ + int i, some_destroyed, r; + struct mapped_device *md; + + do { + some_destroyed = 0; + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_w(i); + if (!md) + continue; + + r = dm_destroy(md); + dm_put_w(md); + + if (!r) { + kfree(md); + some_destroyed = 1; + } + } + } while (some_destroyed); +} + +/* + * Sets or clears the read-only flag for the device. Write lock + * must be held. + */ +void dm_set_ro(struct mapped_device *md, int ro) +{ + md->read_only = ro; + set_device_ro(md->dev, ro); +} + +/* + * Requeue the deferred buffer_heads by calling generic_make_request. + */ +static void flush_deferred_io(struct deferred_io *c) +{ + struct deferred_io *n; + + while (c) { + n = c->next; + generic_make_request(c->rw, c->bh); + free_deferred(c); + c = n; + } +} + +/* + * Swap in a new table (destroying old one). Write lock must be + * held. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *table) +{ + int r; + + /* device must be suspended */ + if (!md->suspended) + return -EPERM; + + __unbind(md); + + r = __bind(md, table); + if (r) + return r; + + return 0; +} + +/* + * We need to be able to change a mapping table under a mounted + * filesystem. for example we might want to move some data in + * the background. Before the table can be swapped with + * dm_bind_table, dm_suspend must be called to flush any in + * flight buffer_heads and ensure that any further io gets + * deferred. Write lock must be held. + */ +int dm_suspend(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + DECLARE_WAITQUEUE(wait, current); + + if (md->suspended) + return -EINVAL; + + md->suspended = 1; + dm_put_w(md); + + /* wait for all the pending io to flush */ + add_wait_queue(&md->wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + do { + md = dm_get_w(minor); + if (!md) { + /* Caller expects to free this lock. Yuck. */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + if (!atomic_read(&md->pending)) + break; + + dm_put_w(md); + schedule(); + + } while (1); + + current->state = TASK_RUNNING; + remove_wait_queue(&md->wait, &wait); + + return 0; +} + +int dm_resume(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + struct deferred_io *def; + + if (!md->suspended || !md->map->num_targets) + return -EINVAL; + + md->suspended = 0; + def = md->deferred; + md->deferred = NULL; + + dm_put_w(md); + flush_deferred_io(def); + run_task_queue(&tq_disk); + + if (!dm_get_w(minor)) { + /* FIXME: yuck */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + return 0; +} + +struct block_device_operations dm_blk_dops = { + open: dm_blk_open, + release: dm_blk_close, + ioctl: dm_blk_ioctl, + owner: THIS_MODULE +}; + +/* + * module hooks + */ +module_init(dm_init); +module_exit(dm_exit); + +MODULE_PARM(major, "i"); +MODULE_PARM_DESC(major, "The major number of the device mapper"); +MODULE_DESCRIPTION(DM_NAME " driver"); +MODULE_AUTHOR("Joe Thornber "); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-exception-store.c linux.22-ac2/drivers/md/dm-exception-store.c --- linux.vanilla/drivers/md/dm-exception-store.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-exception-store.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,724 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm-snapshot.h" +#include "kcopyd.h" +#include +#include + +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +/*----------------------------------------------------------------- + * Persistent snapshots, by persistent we mean that the snapshot + * will survive a reboot. + *---------------------------------------------------------------*/ + +/* + * We need to store a record of which parts of the origin have + * been copied to the snapshot device. The snapshot code + * requires that we copy exception chunks to chunk aligned areas + * of the COW store. It makes sense therefore, to store the + * metadata in chunk size blocks. + * + * There is no backward or forward compatibility implemented, + * snapshots with different disk versions than the kernel will + * not be usable. It is expected that "lvcreate" will blank out + * the start of a fresh COW device before calling the snapshot + * constructor. + * + * The first chunk of the COW device just contains the header. + * After this there is a chunk filled with exception metadata, + * followed by as many exception chunks as can fit in the + * metadata areas. + * + * All on disk structures are in little-endian format. The end + * of the exceptions info is indicated by an exception with a + * new_chunk of 0, which is invalid since it would point to the + * header chunk. + */ + +/* + * Magic for persistent snapshots: "SnAp" - Feeble isn't it. + */ +#define SNAP_MAGIC 0x70416e53 + +/* + * The on-disk version of the metadata. + */ +#define SNAPSHOT_DISK_VERSION 1 + +struct disk_header { + uint32_t magic; + + /* + * Is this snapshot valid. There is no way of recovering + * an invalid snapshot. + */ + int valid; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + + /* In sectors */ + uint32_t chunk_size; +}; + +struct disk_exception { + uint64_t old_chunk; + uint64_t new_chunk; +}; + +struct commit_callback { + void (*callback)(void *, int success); + void *context; +}; + +/* + * The top level structure for a persistent exception store. + */ +struct pstore { + struct dm_snapshot *snap; /* up pointer to my snapshot */ + int version; + int valid; + uint32_t chunk_size; + uint32_t exceptions_per_area; + + /* + * Now that we have an asynchronous kcopyd there is no + * need for large chunk sizes, so it wont hurt to have a + * whole chunks worth of metadata in memory at once. + */ + void *area; + struct kiobuf *iobuf; + + /* + * Used to keep track of which metadata area the data in + * 'chunk' refers to. + */ + uint32_t current_area; + + /* + * The next free chunk for an exception. + */ + uint32_t next_free; + + /* + * The index of next free exception in the current + * metadata area. + */ + uint32_t current_committed; + + atomic_t pending_count; + uint32_t callback_count; + struct commit_callback *callbacks; +}; + +/* + * For performance reasons we want to defer writing a committed + * exceptions metadata to disk so that we can amortise away this + * exensive operation. + * + * For the initial version of this code we will remain with + * synchronous io. There are some deadlock issues with async + * that I haven't yet worked out. + */ +static int do_io(int rw, struct kcopyd_region *where, struct kiobuf *iobuf) +{ + int i, sectors_per_block, nr_blocks, start; + int blocksize = get_hardsect_size(where->dev); + int status; + + sectors_per_block = blocksize / SECTOR_SIZE; + + nr_blocks = where->count / sectors_per_block; + start = where->sector / sectors_per_block; + + for (i = 0; i < nr_blocks; i++) + iobuf->blocks[i] = start++; + + iobuf->length = where->count << 9; + iobuf->locked = 1; + + status = brw_kiovec(rw, 1, &iobuf, where->dev, iobuf->blocks, + blocksize); + if (status != (where->count << 9)) + return -EIO; + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 19) +/* + * FIXME: Remove once 2.4.19 has been released. + */ +struct page *vmalloc_to_page(void *vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pmd_t *pmd; + pte_t *pte; + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + pte = pte_offset(pmd, addr); + if (pte_present(*pte)) { + page = pte_page(*pte); + } + } + } + return page; +} +#endif + +static int allocate_iobuf(struct pstore *ps) +{ + size_t i, r = -ENOMEM, len, nr_pages; + struct page *page; + + len = ps->chunk_size << SECTOR_SHIFT; + + /* + * Allocate the chunk_size block of memory that will hold + * a single metadata area. + */ + ps->area = vmalloc(len); + if (!ps->area) + return r; + + if (alloc_kiovec(1, &ps->iobuf)) + goto bad; + + nr_pages = ps->chunk_size / (PAGE_SIZE / SECTOR_SIZE); + r = expand_kiobuf(ps->iobuf, nr_pages); + if (r) + goto bad; + + /* + * We lock the pages for ps->area into memory since they'll be + * doing a lot of io. + */ + for (i = 0; i < nr_pages; i++) { + page = vmalloc_to_page(ps->area + (i * PAGE_SIZE)); + LockPage(page); + ps->iobuf->maplist[i] = page; + ps->iobuf->nr_pages++; + } + + ps->iobuf->nr_pages = nr_pages; + ps->iobuf->offset = 0; + + return 0; + + bad: + if (ps->iobuf) + free_kiovec(1, &ps->iobuf); + + if (ps->area) + vfree(ps->area); + ps->iobuf = NULL; + return r; +} + +static void free_iobuf(struct pstore *ps) +{ + int i; + + for (i = 0; i < ps->iobuf->nr_pages; i++) + UnlockPage(ps->iobuf->maplist[i]); + ps->iobuf->locked = 0; + + free_kiovec(1, &ps->iobuf); + vfree(ps->area); +} + +/* + * Read or write a chunk aligned and sized block of data from a device. + */ +static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) +{ + int r; + struct kcopyd_region where; + + where.dev = ps->snap->cow->dev; + where.sector = ps->chunk_size * chunk; + where.count = ps->chunk_size; + + r = do_io(rw, &where, ps->iobuf); + if (r) + return r; + + return 0; +} + +/* + * Read or write a metadata area. Remembering to skip the first + * chunk which holds the header. + */ +static int area_io(struct pstore *ps, uint32_t area, int rw) +{ + int r; + uint32_t chunk; + + /* convert a metadata area index to a chunk index */ + chunk = 1 + ((ps->exceptions_per_area + 1) * area); + + r = chunk_io(ps, chunk, rw); + if (r) + return r; + + ps->current_area = area; + return 0; +} + +static int zero_area(struct pstore *ps, uint32_t area) +{ + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + return area_io(ps, area, WRITE); +} + +static int read_header(struct pstore *ps, int *new_snapshot) +{ + int r; + struct disk_header *dh; + + r = chunk_io(ps, 0, READ); + if (r) + return r; + + dh = (struct disk_header *) ps->area; + + if (dh->magic == 0) { + *new_snapshot = 1; + + } else if (dh->magic == SNAP_MAGIC) { + *new_snapshot = 0; + ps->valid = dh->valid; + ps->version = dh->version; + ps->chunk_size = dh->chunk_size; + + } else { + DMWARN("Invalid/corrupt snapshot"); + r = -ENXIO; + } + + return r; +} + +static int write_header(struct pstore *ps) +{ + struct disk_header *dh; + + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + + dh = (struct disk_header *) ps->area; + dh->magic = SNAP_MAGIC; + dh->valid = ps->valid; + dh->version = ps->version; + dh->chunk_size = ps->chunk_size; + + return chunk_io(ps, 0, WRITE); +} + +/* + * Access functions for the disk exceptions, these do the endian conversions. + */ +static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +{ + if (index >= ps->exceptions_per_area) + return NULL; + + return ((struct disk_exception *) ps->area) + index; +} + +static int read_exception(struct pstore *ps, + uint32_t index, struct disk_exception *result) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + result->old_chunk = le64_to_cpu(e->old_chunk); + result->new_chunk = le64_to_cpu(e->new_chunk); + + return 0; +} + +static int write_exception(struct pstore *ps, + uint32_t index, struct disk_exception *de) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + e->old_chunk = cpu_to_le64(de->old_chunk); + e->new_chunk = cpu_to_le64(de->new_chunk); + + return 0; +} + +/* + * Registers the exceptions that are present in the current area. + * 'full' is filled in to indicate if the area has been + * filled. + */ +static int insert_exceptions(struct pstore *ps, int *full) +{ + int i, r; + struct disk_exception de; + + /* presume the area is full */ + *full = 1; + + for (i = 0; i < ps->exceptions_per_area; i++) { + r = read_exception(ps, i, &de); + + if (r) + return r; + + /* + * If the new_chunk is pointing at the start of + * the COW device, where the first metadata area + * is we know that we've hit the end of the + * exceptions. Therefore the area is not full. + */ + if (de.new_chunk == 0LL) { + ps->current_committed = i; + *full = 0; + break; + } + + /* + * Keep track of the start of the free chunks. + */ + if (ps->next_free <= de.new_chunk) + ps->next_free = de.new_chunk + 1; + + /* + * Otherwise we add the exception to the snapshot. + */ + r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk); + if (r) + return r; + } + + return 0; +} + +static int read_exceptions(struct pstore *ps) +{ + uint32_t area; + int r, full = 1; + + /* + * Keeping reading chunks and inserting exceptions until + * we find a partially full area. + */ + for (area = 0; full; area++) { + r = area_io(ps, area, READ); + if (r) + return r; + + r = insert_exceptions(ps, &full); + if (r) + return r; + + area++; + } + + return 0; +} + +static inline struct pstore *get_info(struct exception_store *store) +{ + return (struct pstore *) store->context; +} + +static int persistent_percentfull(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + return (ps->next_free * store->snap->chunk_size * 100) / + get_dev_size(store->snap->cow->dev); +} + +static void persistent_destroy(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + vfree(ps->callbacks); + free_iobuf(ps); + kfree(ps); +} + +static int persistent_prepare(struct exception_store *store, + struct exception *e) +{ + struct pstore *ps = get_info(store); + uint32_t stride; + offset_t size = get_dev_size(store->snap->cow->dev); + + /* Is there enough room ? */ + if (size <= (ps->next_free * store->snap->chunk_size)) + return -ENOSPC; + + e->new_chunk = ps->next_free; + + /* + * Move onto the next free pending, making sure to take + * into account the location of the metadata chunks. + */ + stride = (ps->exceptions_per_area + 1); + if (!(++ps->next_free % stride)) + ps->next_free++; + + atomic_inc(&ps->pending_count); + return 0; +} + +static void persistent_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + int r, i; + struct pstore *ps = get_info(store); + struct disk_exception de; + struct commit_callback *cb; + + de.old_chunk = e->old_chunk; + de.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &de); + + /* + * Add the callback to the back of the array. This code + * is the only place where the callback array is + * manipulated, and we know that it will never be called + * multiple times concurrently. + */ + cb = ps->callbacks + ps->callback_count++; + cb->callback = callback; + cb->context = callback_context; + + /* + * If there are no more exceptions in flight, or we have + * filled this metadata area we commit the exceptions to + * disk. + */ + if (atomic_dec_and_test(&ps->pending_count) || + (ps->current_committed == ps->exceptions_per_area)) { + r = area_io(ps, ps->current_area, WRITE); + if (r) + ps->valid = 0; + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, r == 0 ? 1 : 0); + } + + ps->callback_count = 0; + } + + /* + * Have we completely filled the current area ? + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + r = zero_area(ps, ps->current_area + 1); + if (r) + ps->valid = 0; + } +} + +static void persistent_drop(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + ps->valid = 0; + if (write_header(ps)) + DMWARN("write header failed"); +} + +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size) +{ + int r, new_snapshot; + struct pstore *ps; + + /* allocate the pstore */ + ps = kmalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + + ps->snap = store->snap; + ps->valid = 1; + ps->version = SNAPSHOT_DISK_VERSION; + ps->chunk_size = chunk_size; + ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) / + sizeof(struct disk_exception); + ps->next_free = 2; /* skipping the header and first area */ + ps->current_committed = 0; + + r = allocate_iobuf(ps); + if (r) + goto bad; + + /* + * Allocate space for all the callbacks. + */ + ps->callback_count = 0; + atomic_set(&ps->pending_count, 0); + ps->callbacks = vcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks)); + + if (!ps->callbacks) + goto bad; + + /* + * Read the snapshot header. + */ + r = read_header(ps, &new_snapshot); + if (r) + goto bad; + + /* + * Do we need to setup a new snapshot ? + */ + if (new_snapshot) { + r = write_header(ps); + if (r) { + DMWARN("write_header failed"); + goto bad; + } + + r = zero_area(ps, 0); + if (r) { + DMWARN("zero_area(0) failed"); + goto bad; + } + + } else { + /* + * Sanity checks. + */ + if (ps->chunk_size != chunk_size) { + DMWARN("chunk size for existing snapshot different " + "from that requested"); + r = -EINVAL; + goto bad; + } + + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + r = -EINVAL; + goto bad; + } + + /* + * Read the metadata. + */ + r = read_exceptions(ps); + if (r) + goto bad; + } + + store->destroy = persistent_destroy; + store->prepare_exception = persistent_prepare; + store->commit_exception = persistent_commit; + store->drop_snapshot = persistent_drop; + store->percent_full = persistent_percentfull; + store->context = ps; + + return r; + + bad: + if (ps) { + if (ps->callbacks) + vfree(ps->callbacks); + + if (ps->iobuf) + free_iobuf(ps); + + kfree(ps); + } + return r; +} + +/*----------------------------------------------------------------- + * Implementation of the store for non-persistent snapshots. + *---------------------------------------------------------------*/ +struct transient_c { + offset_t next_free; +}; + +void transient_destroy(struct exception_store *store) +{ + kfree(store->context); +} + +int transient_prepare(struct exception_store *store, struct exception *e) +{ + struct transient_c *tc = (struct transient_c *) store->context; + offset_t size = get_dev_size(store->snap->cow->dev); + + if (size < (tc->next_free + store->snap->chunk_size)) + return -1; + + e->new_chunk = sector_to_chunk(store->snap, tc->next_free); + tc->next_free += store->snap->chunk_size; + + return 0; +} + +void transient_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + /* Just succeed */ + callback(callback_context, 1); +} + +static int transient_percentfull(struct exception_store *store) +{ + struct transient_c *tc = (struct transient_c *) store->context; + return (tc->next_free * 100) / get_dev_size(store->snap->cow->dev); +} + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error) +{ + struct transient_c *tc; + + memset(store, 0, sizeof(*store)); + store->destroy = transient_destroy; + store->prepare_exception = transient_prepare; + store->commit_exception = transient_commit; + store->percent_full = transient_percentfull; + store->snap = s; + + tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->next_free = 0; + store->context = tc; + + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm.h linux.22-ac2/drivers/md/dm.h --- linux.vanilla/drivers/md/dm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,241 @@ +/* + * Internal header file for device mapper + * + * Copyright (C) 2001 Sistina Software + * + * This file is released under the LGPL. + */ + +#ifndef DM_INTERNAL_H +#define DM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DM_NAME "device-mapper" /* Name for messaging */ +#define DM_DRIVER_EMAIL "lvm-devel@lists.sistina.com" +#define MAX_DEPTH 16 +#define NODE_SIZE L1_CACHE_BYTES +#define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t)) +#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) +#define MAX_ARGS 32 +#define MAX_DEVICES 256 + +/* + * List of devices that a metadevice uses and should open/close. + */ +struct dm_dev { + atomic_t count; + struct list_head list; + + int mode; + + kdev_t dev; + struct block_device *bd; +}; + +/* + * I/O that had to be deferred while we were suspended + */ +struct deferred_io { + int rw; + struct buffer_head *bh; + struct deferred_io *next; +}; + +/* + * Btree leaf - this does the actual mapping + */ +struct target { + struct target_type *type; + void *private; +}; + +/* + * The btree + */ +struct dm_table { + /* btree table */ + int depth; + int counts[MAX_DEPTH]; /* in nodes */ + offset_t *index[MAX_DEPTH]; + + int num_targets; + int num_allocated; + offset_t *highs; + struct target *targets; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + int mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* + * A waitqueue for processes waiting for something + * interesting to happen to this table. + */ + wait_queue_head_t eventq; +}; + +/* + * The actual device struct + */ +struct mapped_device { + kdev_t dev; + char name[DM_NAME_LEN]; + char *uuid; + + int use_count; + int suspended; + int read_only; + + /* a list of io's that arrived while we were suspended */ + atomic_t pending; + wait_queue_head_t wait; + struct deferred_io *deferred; + + struct dm_table *map; + + /* used by dm-fs.c */ + devfs_handle_t devfs_entry; +}; + +extern struct block_device_operations dm_blk_dops; + +/* dm-target.c */ +int dm_target_init(void); +struct target_type *dm_get_target_type(const char *name); +void dm_put_target_type(struct target_type *t); +void dm_target_exit(void); + +/* + * Destructively splits argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input); + +/* dm.c */ +struct mapped_device *dm_get_r(int minor); +struct mapped_device *dm_get_w(int minor); + +/* + * There are two ways to lookup a device. + */ +enum { + DM_LOOKUP_BY_NAME, + DM_LOOKUP_BY_UUID +}; + +struct mapped_device *dm_get_name_r(const char *name, int nametype); +struct mapped_device *dm_get_name_w(const char *name, int nametype); + +void dm_put_r(struct mapped_device *md); +void dm_put_w(struct mapped_device *md); + +/* + * Call with no lock. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table); +int dm_set_name(const char *name, int nametype, const char *newname); +void dm_destroy_all(void); + +/* + * You must have the write lock before calling the remaining md + * methods. + */ +int dm_destroy(struct mapped_device *md); +void dm_set_ro(struct mapped_device *md, int ro); + +/* + * The device must be suspended before calling this method. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *t); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md); +int dm_resume(struct mapped_device *md); + +/* dm-table.c */ +int dm_table_create(struct dm_table **result, int mode); +void dm_table_destroy(struct dm_table *t); + +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private); +int dm_table_complete(struct dm_table *t); + +/* + * Event handling + */ +void dm_table_event(struct dm_table *t); + +#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) +#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x) +#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x) + +/* + * Calculate the index of the child node of the n'th node k'th key. + */ +static inline int get_child(int n, int k) +{ + return (n * CHILDREN_PER_NODE) + k; +} + +/* + * Return the n'th node of level l from table t. + */ +static inline offset_t *get_node(struct dm_table *t, int l, int n) +{ + return t->index[l] + (n * KEYS_PER_NODE); +} + +static inline int array_too_big(unsigned long fixed, unsigned long obj, + unsigned long num) +{ + return (num > (ULONG_MAX - fixed) / obj); +} + + +/* + * Targets + */ +int dm_linear_init(void); +void dm_linear_exit(void); + +int dm_stripe_init(void); +void dm_stripe_exit(void); + +int dm_snapshot_init(void); +void dm_snapshot_exit(void); + + +/* + * Init functions for the user interface to device-mapper. At + * the moment an ioctl interface on a special char device is + * used. A filesystem based interface would be a nicer way to + * go. + */ +int __init dm_interface_init(void); +void dm_interface_exit(void); + + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-ioctl.c linux.22-ac2/drivers/md/dm-ioctl.c --- linux.vanilla/drivers/md/dm-ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-ioctl.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include +#include + +/*----------------------------------------------------------------- + * Implementation of the ioctl commands + *---------------------------------------------------------------*/ + +/* + * All the ioctl commands get dispatched to functions with this + * prototype. + */ +typedef int (*ioctl_fn)(struct dm_ioctl *param, struct dm_ioctl *user); + +/* + * This is really a debug only call. + */ +static int remove_all(struct dm_ioctl *param, struct dm_ioctl *user) +{ + dm_destroy_all(); + return 0; +} + +/* + * Check a string doesn't overrun the chunk of + * memory we copied from userland. + */ +static int valid_str(char *str, void *begin, void *end) +{ + while (((void *) str >= begin) && ((void *) str < end)) + if (!*str++) + return 0; + + return -EINVAL; +} + +static int next_target(struct dm_target_spec *last, uint32_t next, + void *begin, void *end, + struct dm_target_spec **spec, char **params) +{ + *spec = (struct dm_target_spec *) + ((unsigned char *) last + next); + *params = (char *) (*spec + 1); + + if (*spec < (last + 1) || ((void *) *spec > end)) + return -EINVAL; + + return valid_str(*params, begin, end); +} + +/* + * Checks to see if there's a gap in the table. + * Returns true iff there is a gap. + */ +static int gap(struct dm_table *table, struct dm_target_spec *spec) +{ + if (!table->num_targets) + return (spec->sector_start > 0) ? 1 : 0; + + if (spec->sector_start != table->highs[table->num_targets - 1] + 1) + return 1; + + return 0; +} + +static int populate_table(struct dm_table *table, struct dm_ioctl *args) +{ + int i = 0, r, first = 1, argc; + struct dm_target_spec *spec; + char *params, *argv[MAX_ARGS]; + struct target_type *ttype; + void *context, *begin, *end; + offset_t highs = 0; + + if (!args->target_count) { + DMWARN("populate_table: no targets specified"); + return -EINVAL; + } + + begin = (void *) args; + end = begin + args->data_size; + +#define PARSE_ERROR(msg) {DMWARN(msg); return -EINVAL;} + + for (i = 0; i < args->target_count; i++) { + + if (first) + r = next_target((struct dm_target_spec *) args, + args->data_start, + begin, end, &spec, ¶ms); + else + r = next_target(spec, spec->next, begin, end, + &spec, ¶ms); + + if (r) + PARSE_ERROR("unable to find target"); + + /* Look up the target type */ + ttype = dm_get_target_type(spec->target_type); + if (!ttype) + PARSE_ERROR("unable to find target type"); + + if (gap(table, spec)) + PARSE_ERROR("gap in target ranges"); + + /* Split up the parameter list */ + if (split_args(MAX_ARGS, &argc, argv, params) < 0) + PARSE_ERROR("Too many arguments"); + + /* Build the target */ + if (ttype->ctr(table, spec->sector_start, spec->length, + argc, argv, &context)) { + DMWARN("%s: target constructor failed", + (char *) context); + return -EINVAL; + } + + /* Add the target to the table */ + highs = spec->sector_start + (spec->length - 1); + if (dm_table_add_target(table, highs, ttype, context)) + PARSE_ERROR("internal error adding target to table"); + + first = 0; + } + +#undef PARSE_ERROR + + r = dm_table_complete(table); + return r; +} + +/* + * Round up the ptr to the next 'align' boundary. Obviously + * 'align' must be a power of 2. + */ +static inline void *align_ptr(void *ptr, unsigned int align) +{ + align--; + return (void *) (((unsigned long) (ptr + align)) & ~align); +} + +/* + * Copies a dm_ioctl and an optional additional payload to + * userland. + */ +static int results_to_user(struct dm_ioctl *user, struct dm_ioctl *param, + void *data, uint32_t len) +{ + int r; + void *ptr = NULL; + + if (data) { + ptr = align_ptr(user + 1, sizeof(unsigned long)); + param->data_start = ptr - (void *) user; + } + + /* + * The version number has already been filled in, so we + * just copy later fields. + */ + r = copy_to_user(&user->data_size, ¶m->data_size, + sizeof(*param) - sizeof(param->version)); + if (r) + return -EFAULT; + + if (data) { + if (param->data_start + len > param->data_size) + return -ENOSPC; + + if (copy_to_user(ptr, data, len)) + r = -EFAULT; + } + + return r; +} + +/* + * Fills in a dm_ioctl structure, ready for sending back to + * userland. + */ +static void __info(struct mapped_device *md, struct dm_ioctl *param) +{ + param->flags = DM_EXISTS_FLAG; + if (md->suspended) + param->flags |= DM_SUSPEND_FLAG; + if (md->read_only) + param->flags |= DM_READONLY_FLAG; + + strncpy(param->name, md->name, sizeof(param->name)); + + if (md->uuid) + strncpy(param->uuid, md->uuid, sizeof(param->uuid) - 1); + else + param->uuid[0] = '\0'; + + param->open_count = md->use_count; + param->dev = kdev_t_to_nr(md->dev); + param->target_count = md->map->num_targets; +} + +/* + * Always use UUID for lookups if it's present, otherwise use name. + */ +static inline char *lookup_name(struct dm_ioctl *param) +{ + return (*param->uuid) ? param->uuid : param->name; +} + +static inline int lookup_type(struct dm_ioctl *param) +{ + return (*param->uuid) ? DM_LOOKUP_BY_UUID : DM_LOOKUP_BY_NAME; +} + +#define ALIGNMENT sizeof(int) +static void *_align(void *ptr, unsigned int a) +{ + register unsigned long align = --a; + + return (void *) (((unsigned long) ptr + align) & ~align); +} + +/* + * Copies device info back to user space, used by + * the create and info ioctls. + */ +static int info(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + + param->flags = 0; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + __info(md, param); + dm_put_r(md); + + out: + return results_to_user(user, param, NULL, 0); +} + +static inline int get_mode(struct dm_ioctl *param) +{ + int mode = FMODE_READ | FMODE_WRITE; + + if (param->flags & DM_READONLY_FLAG) + mode = FMODE_READ; + + return mode; +} + +static int create(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r, ro; + struct dm_table *t; + int minor; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + minor = (param->flags & DM_PERSISTENT_DEV_FLAG) ? + MINOR(to_kdev_t(param->dev)) : -1; + + ro = (param->flags & DM_READONLY_FLAG) ? 1 : 0; + + r = dm_create(param->name, param->uuid, minor, ro, t); + if (r) { + dm_table_destroy(t); + return r; + } + + r = info(param, user); + return r; +} + + + +/* + * Build up the status struct for each target + */ +static int __status(struct mapped_device *md, struct dm_ioctl *param, + char *outbuf, int *len) +{ + int i; + struct dm_target_spec *spec; + uint64_t sector = 0LL; + char *outptr; + status_type_t type; + + if (param->flags & DM_STATUS_TABLE_FLAG) + type = STATUSTYPE_TABLE; + else + type = STATUSTYPE_INFO; + + outptr = outbuf; + + /* Get all the target info */ + for (i = 0; i < md->map->num_targets; i++) { + struct target_type *tt = md->map->targets[i].type; + offset_t high = md->map->highs[i]; + + if (outptr - outbuf + + sizeof(struct dm_target_spec) > param->data_size) + return -ENOMEM; + + spec = (struct dm_target_spec *) outptr; + + spec->status = 0; + spec->sector_start = sector; + spec->length = high - sector + 1; + strncpy(spec->target_type, tt->name, sizeof(spec->target_type)); + + outptr += sizeof(struct dm_target_spec); + + /* Get the status/table string from the target driver */ + if (tt->status) + tt->status(type, outptr, + outbuf + param->data_size - outptr, + md->map->targets[i].private); + else + outptr[0] = '\0'; + + outptr += strlen(outptr) + 1; + _align(outptr, ALIGNMENT); + + sector = high + 1; + + spec->next = outptr - outbuf; + } + + param->target_count = md->map->num_targets; + *len = outptr - outbuf; + + return 0; +} + +/* + * Return the status of a device as a text string for each + * target. + */ +static int get_status(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + int len = 0; + int ret; + char *outbuf = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + /* We haven't a clue how long the resultant data will be so + just allocate as much as userland has allowed us and make sure + we don't overun it */ + outbuf = kmalloc(param->data_size, GFP_KERNEL); + if (!outbuf) + goto out; + /* + * Get the status of all targets + */ + __status(md, param, outbuf, &len); + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + out: + if (md) + dm_put_r(md); + + ret = results_to_user(user, param, outbuf, len); + + if (outbuf) + kfree(outbuf); + + return ret; +} + +/* + * Wait for a device to report an event + */ +static int wait_device_event(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + DECLARE_WAITQUEUE(wq, current); + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Wait for a notification event + */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&md->map->eventq, &wq); + + dm_put_r(md); + + schedule(); + set_current_state(TASK_RUNNING); + + out: + return results_to_user(user, param, NULL, 0); +} + +/* + * Retrieves a list of devices used by a particular dm device. + */ +static int dep(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int count, r; + struct mapped_device *md; + struct list_head *tmp; + size_t len = 0; + struct dm_target_deps *deps = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + goto out; + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Count the devices. + */ + count = 0; + list_for_each(tmp, &md->map->devices) + count++; + + /* + * Allocate a kernel space version of the dm_target_status + * struct. + */ + if (array_too_big(sizeof(*deps), sizeof(*deps->dev), count)) { + dm_put_r(md); + return -ENOMEM; + } + + len = sizeof(*deps) + (sizeof(*deps->dev) * count); + deps = kmalloc(len, GFP_KERNEL); + if (!deps) { + dm_put_r(md); + return -ENOMEM; + } + + /* + * Fill in the devices. + */ + deps->count = count; + count = 0; + list_for_each(tmp, &md->map->devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + deps->dev[count++] = kdev_t_to_nr(dd->dev); + } + dm_put_r(md); + + out: + r = results_to_user(user, param, deps, len); + + kfree(deps); + return r; +} + +static int remove(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = dm_destroy(md); + dm_put_w(md); + if (!r) + kfree(md); + + return r; +} + +static int suspend(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = (param->flags & DM_SUSPEND_FLAG) ? dm_suspend(md) : dm_resume(md); + dm_put_w(md); + + return r; +} + +static int reload(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + struct dm_table *t; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) { + dm_table_destroy(t); + return -ENXIO; + } + + r = dm_swap_table(md, t); + if (r) { + dm_put_w(md); + dm_table_destroy(t); + return r; + } + + dm_set_ro(md, (param->flags & DM_READONLY_FLAG) ? 1 : 0); + dm_put_w(md); + + r = info(param, user); + return r; +} + +static int rename(struct dm_ioctl *param, struct dm_ioctl *user) +{ + char *newname = (char *) param + param->data_start; + + if (valid_str(newname, (void *) param, + (void *) param + param->data_size) || + dm_set_name(lookup_name(param), lookup_type(param), newname)) { + DMWARN("Invalid new logical volume name supplied."); + return -EINVAL; + } + + return 0; +} + + +/*----------------------------------------------------------------- + * Implementation of open/close/ioctl on the special char + * device. + *---------------------------------------------------------------*/ +static int ctl_open(struct inode *inode, struct file *file) +{ + /* only root can open this */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + MOD_INC_USE_COUNT; + + return 0; +} + +static int ctl_close(struct inode *inode, struct file *file) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static ioctl_fn lookup_ioctl(unsigned int cmd) +{ + static struct { + int cmd; + ioctl_fn fn; + } _ioctls[] = { + {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ + {DM_REMOVE_ALL_CMD, remove_all}, + {DM_DEV_CREATE_CMD, create}, + {DM_DEV_REMOVE_CMD, remove}, + {DM_DEV_RELOAD_CMD, reload}, + {DM_DEV_RENAME_CMD, rename}, + {DM_DEV_SUSPEND_CMD, suspend}, + {DM_DEV_DEPS_CMD, dep}, + {DM_DEV_STATUS_CMD, info}, + {DM_TARGET_STATUS_CMD, get_status}, + {DM_TARGET_WAIT_CMD, wait_device_event}, + }; + static int nelts = sizeof(_ioctls) / sizeof(*_ioctls); + + return (cmd >= nelts) ? NULL : _ioctls[cmd].fn; +} + +/* + * As well as checking the version compatibility this always + * copies the kernel interface version out. + */ +static int check_version(int cmd, struct dm_ioctl *user) +{ + uint32_t version[3]; + int r = 0; + + if (copy_from_user(version, user->version, sizeof(version))) + return -EFAULT; + + if ((DM_VERSION_MAJOR != version[0]) || + (DM_VERSION_MINOR < version[1])) { + DMWARN("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); + r = -EINVAL; + } + + /* + * Fill in the kernel version. + */ + version[0] = DM_VERSION_MAJOR; + version[1] = DM_VERSION_MINOR; + version[2] = DM_VERSION_PATCHLEVEL; + if (copy_to_user(user->version, version, sizeof(version))) + return -EFAULT; + + return r; +} + +static void free_params(struct dm_ioctl *param) +{ + vfree(param); +} + +static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param) +{ + struct dm_ioctl tmp, *dmi; + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + if (tmp.data_size < sizeof(tmp) || tmp.data_size > 65536) + return -EINVAL; + + dmi = (struct dm_ioctl *) vmalloc(tmp.data_size); + if (!dmi) + return -ENOMEM; + + if (copy_from_user(dmi, user, tmp.data_size)) { + vfree(dmi); + return -EFAULT; + } + + *param = dmi; + return 0; +} + +static int validate_params(uint cmd, struct dm_ioctl *param) +{ + /* Ignores parameters */ + if (cmd == DM_REMOVE_ALL_CMD) + return 0; + + /* Unless creating, either name of uuid but not both */ + if (cmd != DM_DEV_CREATE_CMD) { + if ((!*param->uuid && !*param->name) || + (*param->uuid && *param->name)) { + DMWARN("one of name or uuid must be supplied"); + return -EINVAL; + } + } + + /* Ensure strings are terminated */ + param->name[DM_NAME_LEN - 1] = '\0'; + param->uuid[DM_UUID_LEN - 1] = '\0'; + + return 0; +} + +static int ctl_ioctl(struct inode *inode, struct file *file, + uint command, ulong u) +{ + + int r = 0, cmd; + struct dm_ioctl *param; + struct dm_ioctl *user = (struct dm_ioctl *) u; + ioctl_fn fn = NULL; + + if (_IOC_TYPE(command) != DM_IOCTL) + return -ENOTTY; + + cmd = _IOC_NR(command); + + /* + * Check the interface version passed in. This also + * writes out the kernel's interface version. + */ + r = check_version(cmd, user); + if (r) + return r; + + /* + * Nothing more to do for the version command. + */ + if (cmd == DM_VERSION_CMD) + return 0; + + fn = lookup_ioctl(cmd); + if (!fn) { + DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + return -ENOTTY; + } + + /* + * Copy the parameters into kernel space. + */ + r = copy_params(user, ¶m); + if (r) + return r; + + r = validate_params(cmd, param); + if (r) { + free_params(param); + return r; + } + + r = fn(param, user); + free_params(param); + return r; +} + +static struct file_operations _ctl_fops = { + open: ctl_open, + release: ctl_close, + ioctl: ctl_ioctl, + owner: THIS_MODULE, +}; + +static devfs_handle_t _ctl_handle; + +static struct miscdevice _dm_misc = { + minor: MISC_DYNAMIC_MINOR, + name: DM_NAME, + fops: &_ctl_fops +}; + +static int __init dm_devfs_init(void) { + int r; + char rname[64]; + + r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3, + sizeof rname - 3); + if (r == -ENOSYS) + return 0; /* devfs not present */ + + if (r < 0) { + DMERR("devfs_generate_path failed for control device"); + return r; + } + + strncpy(rname + r, "../", 3); + r = devfs_mk_symlink(NULL, DM_DIR "/control", + DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL); + if (r) { + DMERR("devfs_mk_symlink failed for control device"); + return r; + } + devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle); + + return 0; +} + +/* Create misc character device and link to DM_DIR/control */ +int __init dm_interface_init(void) +{ + int r; + + r = misc_register(&_dm_misc); + if (r) { + DMERR("misc_register failed for control device"); + return r; + } + + r = dm_devfs_init(); + if (r) { + misc_deregister(&_dm_misc); + return r; + } + + DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, + DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, + DM_DRIVER_EMAIL); + + return 0; +} + +void dm_interface_exit(void) +{ + if (misc_deregister(&_dm_misc) < 0) + DMERR("misc_deregister failed for control device"); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-linear.c linux.22-ac2/drivers/md/dm-linear.c --- linux.vanilla/drivers/md/dm-linear.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-linear.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +/* + * Linear: maps a linear range of a device. + */ +struct linear_c { + long delta; /* FIXME: we need a signed offset type */ + long start; /* For display only */ + struct dm_dev *dev; +}; + +/* + * Construct a linear mapping: + */ +static int linear_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct linear_c *lc; + unsigned long start; /* FIXME: unsigned long long */ + char *end; + + if (argc != 2) { + *context = "dm-linear: Not enough arguments"; + return -EINVAL; + } + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (lc == NULL) { + *context = "dm-linear: Cannot allocate linear context"; + return -ENOMEM; + } + + start = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-linear: Invalid device sector"; + goto bad; + } + + if (dm_table_get_device(t, argv[0], start, l, t->mode, &lc->dev)) { + *context = "dm-linear: Device lookup failed"; + goto bad; + } + + lc->delta = (int) start - (int) b; + lc->start = start; + *context = lc; + return 0; + + bad: + kfree(lc); + return -EINVAL; +} + +static void linear_dtr(struct dm_table *t, void *c) +{ + struct linear_c *lc = (struct linear_c *) c; + + dm_table_put_device(t, lc->dev); + kfree(c); +} + +static int linear_map(struct buffer_head *bh, int rw, void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + bh->b_rdev = lc->dev->dev; + bh->b_rsector = bh->b_rsector + lc->delta; + + return 1; +} + +static int linear_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s %ld", kdevname(lc->dev->dev), + lc->start); + break; + } + return 0; +} + +static struct target_type linear_target = { + name: "linear", + module: THIS_MODULE, + ctr: linear_ctr, + dtr: linear_dtr, + map: linear_map, + status: linear_status, +}; + +int __init dm_linear_init(void) +{ + int r = dm_register_target(&linear_target); + + if (r < 0) + DMERR("linear: register failed %d", r); + + return r; +} + +void dm_linear_exit(void) +{ + int r = dm_unregister_target(&linear_target); + + if (r < 0) + DMERR("linear: unregister failed %d", r); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-snapshot.c linux.22-ac2/drivers/md/dm-snapshot.c --- linux.vanilla/drivers/md/dm-snapshot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-snapshot.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,1169 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dm-snapshot.h" +#include "kcopyd.h" + +/* + * FIXME: Remove this before release. + */ +#if 0 +#define DMDEBUG(x...) DMWARN( ## x) +#else +#define DMDEBUG(x...) +#endif + +/* + * The percentage increment we will wake up users at + */ +#define WAKE_UP_PERCENT 5 + +/* + * Hard sector size used all over the kernel + */ +#define SECTOR_SIZE 512 + +/* + * kcopyd priority of snapshot operations + */ +#define SNAPSHOT_COPY_PRIORITY 2 + +struct pending_exception { + struct exception e; + + /* + * Origin buffers waiting for this to complete are held + * in a list (using b_reqnext). + */ + struct buffer_head *origin_bhs; + struct buffer_head *snapshot_bhs; + + /* + * Other pending_exceptions that are processing this + * chunk. When this list is empty, we know we can + * complete the origins. + */ + struct list_head siblings; + + /* Pointer back to snapshot context */ + struct dm_snapshot *snap; + + /* + * 1 indicates the exception has already been sent to + * kcopyd. + */ + int started; +}; + +/* + * Hash table mapping origin volumes to lists of snapshots and + * a lock to protect it + */ +static kmem_cache_t *exception_cache; +static kmem_cache_t *pending_cache; +static mempool_t *pending_pool; + +/* + * One of these per registered origin, held in the snapshot_origins hash + */ +struct origin { + /* The origin device */ + kdev_t dev; + + struct list_head hash_list; + + /* List of snapshots for this origin */ + struct list_head snapshots; +}; + +/* + * Size of the hash table for origin volumes. If we make this + * the size of the minors list then it should be nearly perfect + */ +#define ORIGIN_HASH_SIZE 256 +#define ORIGIN_MASK 0xFF +static struct list_head *_origins; +static struct rw_semaphore _origins_lock; + +static int init_origin_hash(void) +{ + int i; + + _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_origins) { + DMERR("Device mapper: Snapshot: unable to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_origins + i); + init_rwsem(&_origins_lock); + + return 0; +} + +static void exit_origin_hash(void) +{ + kfree(_origins); +} + +static inline unsigned int origin_hash(kdev_t dev) +{ + return MINOR(dev) & ORIGIN_MASK; +} + +static struct origin *__lookup_origin(kdev_t origin) +{ + struct list_head *slist; + struct list_head *ol; + struct origin *o; + + ol = &_origins[origin_hash(origin)]; + list_for_each(slist, ol) { + o = list_entry(slist, struct origin, hash_list); + + if (o->dev == origin) + return o; + } + + return NULL; +} + +static void __insert_origin(struct origin *o) +{ + struct list_head *sl = &_origins[origin_hash(o->dev)]; + list_add_tail(&o->hash_list, sl); +} + +/* + * Make a note of the snapshot and its origin so we can look it + * up when the origin has a write on it. + */ +static int register_snapshot(struct dm_snapshot *snap) +{ + struct origin *o; + kdev_t dev = snap->origin->dev; + + down_write(&_origins_lock); + o = __lookup_origin(dev); + + if (!o) { + /* New origin */ + o = kmalloc(sizeof(*o), GFP_KERNEL); + if (!o) { + up_write(&_origins_lock); + return -ENOMEM; + } + + /* Initialise the struct */ + INIT_LIST_HEAD(&o->snapshots); + o->dev = dev; + + __insert_origin(o); + } + + list_add_tail(&snap->list, &o->snapshots); + + up_write(&_origins_lock); + return 0; +} + +static void unregister_snapshot(struct dm_snapshot *s) +{ + struct origin *o; + + down_write(&_origins_lock); + o = __lookup_origin(s->origin->dev); + + list_del(&s->list); + if (list_empty(&o->snapshots)) { + list_del(&o->hash_list); + kfree(o); + } + + up_write(&_origins_lock); +} + +/* + * Implementation of the exception hash tables. + */ +static int init_exception_table(struct exception_table *et, uint32_t size) +{ + int i; + + et->hash_mask = size - 1; + et->table = vcalloc(size, sizeof(struct list_head)); + if (!et->table) + return -ENOMEM; + + for (i = 0; i < size; i++) + INIT_LIST_HEAD(et->table + i); + + return 0; +} + +static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) +{ + struct list_head *slot, *entry, *temp; + struct exception *ex; + int i, size; + + size = et->hash_mask + 1; + for (i = 0; i < size; i++) { + slot = et->table + i; + + list_for_each_safe(entry, temp, slot) { + ex = list_entry(entry, struct exception, hash_list); + kmem_cache_free(mem, ex); + } + } + + vfree(et->table); +} + +/* + * FIXME: check how this hash fn is performing. + */ +static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +{ + return chunk & et->hash_mask; +} + +static void insert_exception(struct exception_table *eh, struct exception *e) +{ + struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; + list_add(&e->hash_list, l); +} + +static inline void remove_exception(struct exception *e) +{ + list_del(&e->hash_list); +} + +/* + * Return the exception data for a sector, or NULL if not + * remapped. + */ +static struct exception *lookup_exception(struct exception_table *et, + chunk_t chunk) +{ + struct list_head *slot, *el; + struct exception *e; + + slot = &et->table[exception_hash(et, chunk)]; + list_for_each(el, slot) { + e = list_entry(el, struct exception, hash_list); + if (e->old_chunk == chunk) + return e; + } + + return NULL; +} + +static inline struct exception *alloc_exception(void) +{ + struct exception *e; + + e = kmem_cache_alloc(exception_cache, GFP_NOIO); + if (!e) + e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); + + return e; +} + +static inline void free_exception(struct exception *e) +{ + kmem_cache_free(exception_cache, e); +} + +static inline struct pending_exception *alloc_pending_exception(void) +{ + return mempool_alloc(pending_pool, GFP_NOIO); +} + +static inline void free_pending_exception(struct pending_exception *pe) +{ + mempool_free(pe, pending_pool); +} + +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) +{ + struct exception *e; + + e = alloc_exception(); + if (!e) + return -ENOMEM; + + e->old_chunk = old; + e->new_chunk = new; + insert_exception(&s->complete, e); + return 0; +} + +/* + * Hard coded magic. + */ +static int calc_max_buckets(void) +{ + unsigned long mem; + + mem = num_physpages << PAGE_SHIFT; + mem /= 50; + mem /= sizeof(struct list_head); + + return mem; +} + +/* + * Rounds a number down to a power of 2. + */ +static inline uint32_t round_down(uint32_t n) +{ + while (n & (n - 1)) + n &= (n - 1); + return n; +} + +/* + * Allocate room for a suitable hash table. + */ +static int init_hash_tables(struct dm_snapshot *s) +{ + offset_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + + /* + * Calculate based on the size of the original volume or + * the COW volume... + */ + cow_dev_size = get_dev_size(s->cow->dev); + origin_dev_size = get_dev_size(s->origin->dev); + max_buckets = calc_max_buckets(); + + hash_size = min(origin_dev_size, cow_dev_size) / s->chunk_size; + hash_size = min(hash_size, max_buckets); + + /* Round it down to a power of 2 */ + hash_size = round_down(hash_size); + if (init_exception_table(&s->complete, hash_size)) + return -ENOMEM; + + /* + * Allocate hash table for in-flight exceptions + * Make this smaller than the real hash table + */ + hash_size >>= 3; + if (!hash_size) + hash_size = 64; + + if (init_exception_table(&s->pending, hash_size)) { + exit_exception_table(&s->complete, exception_cache); + return -ENOMEM; + } + + return 0; +} + +/* + * Round a number up to the nearest 'size' boundary. size must + * be a power of 2. + */ +static inline ulong round_up(ulong n, ulong size) +{ + size--; + return (n + size) & ~size; +} + +/* + * Construct a snapshot mapping:

+ */ +static int snapshot_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct dm_snapshot *s; + unsigned long chunk_size; + int r = -EINVAL; + char *persistent; + char *origin_path; + char *cow_path; + char *value; + int blocksize; + + if (argc < 4) { + *context = "dm-snapshot: requires exactly 4 arguments"; + r = -EINVAL; + goto bad; + } + + origin_path = argv[0]; + cow_path = argv[1]; + persistent = argv[2]; + + if ((*persistent & 0x5f) != 'P' && (*persistent & 0x5f) != 'N') { + *context = "Persistent flag is not P or N"; + r = -EINVAL; + goto bad; + } + + chunk_size = simple_strtoul(argv[3], &value, 10); + if (chunk_size == 0 || value == NULL) { + *context = "Invalid chunk size"; + r = -EINVAL; + goto bad; + } + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) { + *context = "Cannot allocate snapshot context private structure"; + r = -ENOMEM; + goto bad; + } + + r = dm_table_get_device(t, origin_path, 0, 0, FMODE_READ, &s->origin); + if (r) { + *context = "Cannot get origin device"; + goto bad_free; + } + + r = dm_table_get_device(t, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + dm_table_put_device(t, s->origin); + *context = "Cannot get COW device"; + goto bad_free; + } + + /* + * Chunk size must be multiple of page size. Silently + * round up if it's not. + */ + chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE); + + /* Validate the chunk size against the device block size */ + blocksize = get_hardsect_size(s->cow->dev); + if (chunk_size % (blocksize / SECTOR_SIZE)) { + *context = "Chunk size is not a multiple of device blocksize"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check the sizes are small enough to fit in one kiovec */ + if (chunk_size > KIO_MAX_SECTORS) { + *context = "Chunk size is too big"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check chunk_size is a power of 2 */ + if (chunk_size & (chunk_size - 1)) { + *context = "Chunk size is not a power of 2"; + r = -EINVAL; + goto bad_putdev; + } + + s->chunk_size = chunk_size; + s->chunk_mask = chunk_size - 1; + s->type = *persistent; + for (s->chunk_shift = 0; chunk_size; + s->chunk_shift++, chunk_size >>= 1) + ; + s->chunk_shift--; + + s->valid = 1; + s->last_percent = 0; + s->table = t; + init_rwsem(&s->lock); + + /* Allocate hash table for COW data */ + if (init_hash_tables(s)) { + *context = "Unable to allocate hash table space"; + r = -ENOMEM; + goto bad_putdev; + } + + /* + * Check the persistent flag - done here because we need the iobuf + * to check the LV header + */ + s->store.snap = s; + + if ((*persistent & 0x5f) == 'P') + r = dm_create_persistent(&s->store, s->chunk_size); + else + r = dm_create_transient(&s->store, s, blocksize, context); + + if (r) { + *context = "Couldn't create exception store"; + r = -EINVAL; + goto bad_free1; + } + + /* Flush IO to the origin device */ +#if LVM_VFS_ENHANCEMENT + fsync_dev_lockfs(s->origin->dev); +#else + fsync_dev(s->origin->dev); +#endif + + /* Add snapshot to the list of snapshots for this origin */ + if (register_snapshot(s)) { + r = -EINVAL; + *context = "Cannot register snapshot origin"; + goto bad_free2; + } +#if LVM_VFS_ENHANCEMENT + unlockfs(s->origin->dev); +#endif + kcopyd_inc_client_count(); + + *context = s; + return 0; + + bad_free2: + s->store.destroy(&s->store); + + bad_free1: + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + bad_putdev: + dm_table_put_device(t, s->cow); + dm_table_put_device(t, s->origin); + + bad_free: + kfree(s); + + bad: + return r; +} + +static void snapshot_dtr(struct dm_table *t, void *context) +{ + struct dm_snapshot *s = (struct dm_snapshot *) context; + + dm_table_event(s->table); + + unregister_snapshot(s); + + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + /* Deallocate memory used */ + s->store.destroy(&s->store); + + dm_table_put_device(t, s->origin); + dm_table_put_device(t, s->cow); + kfree(s); + + kcopyd_dec_client_count(); +} + +/* + * We hold lists of buffer_heads, using the b_reqnext field. + */ +static void queue_buffer(struct buffer_head **queue, struct buffer_head *bh) +{ + bh->b_reqnext = *queue; + *queue = bh; +} + +/* + * Flush a list of buffers. + */ +static void flush_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + DMDEBUG("begin flush"); + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + DMDEBUG("flushing %p", bh); + generic_make_request(WRITE, bh); + bh = n; + } + + run_task_queue(&tq_disk); +} + +/* + * Error a list of buffers. + */ +static void error_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + buffer_IO_error(bh); + bh = n; + } +} + +static void pending_complete(struct pending_exception *pe, int success) +{ + struct exception *e; + struct dm_snapshot *s = pe->snap; + + if (success) { + e = alloc_exception(); + if (!e) { + printk("Unable to allocate exception."); + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + up_write(&s->lock); + return; + } + + /* + * Add a proper exception, and remove the + * inflight exception from the list. + */ + down_write(&s->lock); + + memcpy(e, &pe->e, sizeof(*e)); + insert_exception(&s->complete, e); + remove_exception(&pe->e); + + /* Submit any pending write BHs */ + up_write(&s->lock); + + flush_buffers(pe->snapshot_bhs); + DMDEBUG("Exception completed successfully."); + + /* Notify any interested parties */ + if (s->store.percent_full) { + int pc = s->store.percent_full(&s->store); + + if (pc >= s->last_percent + WAKE_UP_PERCENT) { + dm_table_event(s->table); + s->last_percent = pc - pc % WAKE_UP_PERCENT; + } + } + + } else { + /* Read/write error - snapshot is unusable */ + DMERR("Error reading/writing snapshot"); + + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + remove_exception(&pe->e); + up_write(&s->lock); + + error_buffers(pe->snapshot_bhs); + + dm_table_event(s->table); + DMDEBUG("Exception failed."); + } + + if (list_empty(&pe->siblings)) + flush_buffers(pe->origin_bhs); + else + list_del(&pe->siblings); + + free_pending_exception(pe); +} + +static void commit_callback(void *context, int success) +{ + struct pending_exception *pe = (struct pending_exception *) context; + pending_complete(pe, success); +} + +/* + * Called when the copy I/O has finished. kcopyd actually runs + * this code so don't block. + */ +static void copy_callback(int err, void *context) +{ + struct pending_exception *pe = (struct pending_exception *) context; + struct dm_snapshot *s = pe->snap; + + if (err) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store.commit_exception(&s->store, &pe->e, commit_callback, + pe); +} + +/* + * Dispatches the copy operation to kcopyd. + */ +static inline void start_copy(struct pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + struct kcopyd_region src, dest; + + src.dev = s->origin->dev; + src.sector = chunk_to_sector(s, pe->e.old_chunk); + src.count = s->chunk_size; + + dest.dev = s->cow->dev; + dest.sector = chunk_to_sector(s, pe->e.new_chunk); + dest.count = s->chunk_size; + + if (!pe->started) { + /* Hand over to kcopyd */ + kcopyd_copy(&src, &dest, copy_callback, pe); + pe->started = 1; + } +} + +/* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts + * it into the pending table. + */ +static struct pending_exception *find_pending_exception(struct dm_snapshot *s, + struct buffer_head *bh) +{ + struct exception *e; + struct pending_exception *pe; + chunk_t chunk = sector_to_chunk(s, bh->b_rsector); + + /* + * Is there a pending exception for this already ? + */ + e = lookup_exception(&s->pending, chunk); + if (e) { + /* cast the exception to a pending exception */ + pe = list_entry(e, struct pending_exception, e); + + } else { + /* Create a new pending exception */ + pe = alloc_pending_exception(); + if (!pe) { + DMWARN("Couldn't allocate pending exception."); + return NULL; + } + + pe->e.old_chunk = chunk; + pe->origin_bhs = pe->snapshot_bhs = NULL; + INIT_LIST_HEAD(&pe->siblings); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + s->valid = 0; + return NULL; + } + + insert_exception(&s->pending, &pe->e); + } + + return pe; +} + +static inline void remap_exception(struct dm_snapshot *s, struct exception *e, + struct buffer_head *bh) +{ + bh->b_rdev = s->cow->dev; + bh->b_rsector = chunk_to_sector(s, e->new_chunk) + + (bh->b_rsector & s->chunk_mask); +} + +static int snapshot_map(struct buffer_head *bh, int rw, void *context) +{ + struct exception *e; + struct dm_snapshot *s = (struct dm_snapshot *) context; + int r = 1; + chunk_t chunk; + struct pending_exception *pe; + + chunk = sector_to_chunk(s, bh->b_rsector); + + /* Full snapshots are not usable */ + if (!s->valid) + return -1; + + /* + * Write to snapshot - higher level takes care of RW/RO + * flags so we should only get this if we are + * writeable. + */ + if (rw == WRITE) { + + down_write(&s->lock); + + /* If the block is already remapped - use that, else remap it */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + + else { + pe = find_pending_exception(s, bh); + + if (!pe) { + s->store.drop_snapshot(&s->store); + s->valid = 0; + } + + queue_buffer(&pe->snapshot_bhs, bh); + start_copy(pe); + r = 0; + } + + up_write(&s->lock); + + } else { + /* + * FIXME: this read path scares me because we + * always use the origin when we have a pending + * exception. However I can't think of a + * situation where this is wrong - ejt. + */ + + /* Do reads */ + down_read(&s->lock); + + /* See if it it has been remapped */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + else + bh->b_rdev = s->origin->dev; + + up_read(&s->lock); + } + + return r; +} + +static void list_merge(struct list_head *l1, struct list_head *l2) +{ + struct list_head *l1_n, *l2_p; + + l1_n = l1->next; + l2_p = l2->prev; + + l1->next = l2; + l2->prev = l1; + + l2_p->next = l1_n; + l1_n->prev = l2_p; +} + +static int __origin_write(struct list_head *snapshots, struct buffer_head *bh) +{ + int r = 1; + struct list_head *sl; + struct dm_snapshot *snap; + struct exception *e; + struct pending_exception *pe, *last = NULL; + chunk_t chunk; + + /* Do all the snapshots on this origin */ + list_for_each(sl, snapshots) { + snap = list_entry(sl, struct dm_snapshot, list); + + /* Only deal with valid snapshots */ + if (!snap->valid) + continue; + + down_write(&snap->lock); + + /* + * Remember, different snapshots can have + * different chunk sizes. + */ + chunk = sector_to_chunk(snap, bh->b_rsector); + + /* + * Check exception table to see if block + * is already remapped in this snapshot + * and trigger an exception if not. + */ + e = lookup_exception(&snap->complete, chunk); + if (!e) { + pe = find_pending_exception(snap, bh); + if (!pe) { + snap->store.drop_snapshot(&snap->store); + snap->valid = 0; + + } else { + if (last) + list_merge(&pe->siblings, + &last->siblings); + + last = pe; + r = 0; + } + } + + up_write(&snap->lock); + } + + /* + * Now that we have a complete pe list we can start the copying. + */ + if (last) { + pe = last; + do { + down_write(&pe->snap->lock); + queue_buffer(&pe->origin_bhs, bh); + start_copy(pe); + up_write(&pe->snap->lock); + pe = list_entry(pe->siblings.next, + struct pending_exception, siblings); + + } while (pe != last); + } + + return r; +} + +static int snapshot_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_snapshot *snap = (struct dm_snapshot *) context; + char cow[16]; + char org[16]; + + switch (type) { + case STATUSTYPE_INFO: + if (!snap->valid) + snprintf(result, maxlen, "Invalid"); + else { + if (snap->store.percent_full) + snprintf(result, maxlen, "%d%%", + snap->store.percent_full(&snap-> + store)); + else + snprintf(result, maxlen, "Unknown"); + } + break; + + case STATUSTYPE_TABLE: + /* + * kdevname returns a static pointer so we need + * to make private copies if the output is to + * make sense. + */ + strncpy(cow, kdevname(snap->cow->dev), sizeof(cow)); + strncpy(org, kdevname(snap->origin->dev), sizeof(org)); + snprintf(result, maxlen, "%s %s %c %ld", org, cow, + snap->type, snap->chunk_size); + break; + } + + return 0; +} + +/* + * Called on a write from the origin driver. + */ +int do_origin(struct dm_dev *origin, struct buffer_head *bh) +{ + struct origin *o; + int r; + + down_read(&_origins_lock); + o = __lookup_origin(origin->dev); + if (!o) + BUG(); + + r = __origin_write(&o->snapshots, bh); + up_read(&_origins_lock); + + return r; +} + +/* + * Origin: maps a linear range of a device, with hooks for snapshotting. + */ + +/* + * Construct an origin mapping: + * The context for an origin is merely a 'struct dm_dev *' + * pointing to the real device. + */ +static int origin_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + int r; + struct dm_dev *dev; + + if (argc != 1) { + *context = "dm-origin: incorrect number of arguments"; + return -EINVAL; + } + + r = dm_table_get_device(t, argv[0], 0, l, t->mode, &dev); + if (r) { + *context = "Cannot get target device"; + return r; + } + + *context = dev; + + return 0; +} + +static void origin_dtr(struct dm_table *t, void *c) +{ + struct dm_dev *dev = (struct dm_dev *) c; + dm_table_put_device(t, dev); +} + +static int origin_map(struct buffer_head *bh, int rw, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + bh->b_rdev = dev->dev; + + /* Only tell snapshots if this is a write */ + return (rw == WRITE) ? do_origin(dev, bh) : 1; +} + +static int origin_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s", kdevname(dev->dev)); + break; + } + + return 0; +} + +static struct target_type origin_target = { + name: "snapshot-origin", + module: THIS_MODULE, + ctr: origin_ctr, + dtr: origin_dtr, + map: origin_map, + status: origin_status, + err: NULL +}; + +static struct target_type snapshot_target = { + name: "snapshot", + module: THIS_MODULE, + ctr: snapshot_ctr, + dtr: snapshot_dtr, + map: snapshot_map, + status: snapshot_status, + err: NULL +}; + +int __init dm_snapshot_init(void) +{ + int r; + + r = dm_register_target(&snapshot_target); + if (r) { + DMERR("snapshot target register failed %d", r); + return r; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Device mapper: Origin: register failed %d\n", r); + goto bad1; + } + + r = init_origin_hash(); + if (r) { + DMERR("init_origin_hash failed."); + goto bad2; + } + + exception_cache = kmem_cache_create("dm-snapshot-ex", + sizeof(struct exception), + __alignof__(struct exception), + 0, NULL, NULL); + if (!exception_cache) { + DMERR("Couldn't create exception cache."); + r = -ENOMEM; + goto bad3; + } + + pending_cache = + kmem_cache_create("dm-snapshot-in", + sizeof(struct pending_exception), + __alignof__(struct pending_exception), + 0, NULL, NULL); + if (!pending_cache) { + DMERR("Couldn't create pending cache."); + r = -ENOMEM; + goto bad4; + } + + pending_pool = mempool_create(128, mempool_alloc_slab, + mempool_free_slab, pending_cache); + if (!pending_pool) { + DMERR("Couldn't create pending pool."); + r = -ENOMEM; + goto bad5; + } + + return 0; + + bad5: + kmem_cache_destroy(pending_cache); + bad4: + kmem_cache_destroy(exception_cache); + bad3: + exit_origin_hash(); + bad2: + dm_unregister_target(&origin_target); + bad1: + dm_unregister_target(&snapshot_target); + return r; +} + +void dm_snapshot_exit(void) +{ + int r; + + r = dm_unregister_target(&snapshot_target); + if (r) + DMERR("snapshot unregister failed %d", r); + + r = dm_unregister_target(&origin_target); + if (r) + DMERR("origin unregister failed %d", r); + + exit_origin_hash(); + mempool_destroy(pending_pool); + kmem_cache_destroy(pending_cache); + kmem_cache_destroy(exception_cache); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-snapshot.h linux.22-ac2/drivers/md/dm-snapshot.h --- linux.vanilla/drivers/md/dm-snapshot.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-snapshot.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,147 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#ifndef DM_SNAPSHOT_H +#define DM_SNAPSHOT_H + +#include "dm.h" +#include + +struct exception_table { + uint32_t hash_mask; + struct list_head *table; +}; + +/* + * The snapshot code deals with largish chunks of the disk at a + * time. Typically 64k - 256k. + */ +/* FIXME: can we get away with limiting these to a uint32_t ? */ +typedef offset_t chunk_t; + +/* + * An exception is used where an old chunk of data has been + * replaced by a new one. + */ +struct exception { + struct list_head hash_list; + + chunk_t old_chunk; + chunk_t new_chunk; +}; + +/* + * Abstraction to handle the meta/layout of exception stores (the + * COW device). + */ +struct exception_store { + + /* + * Destroys this object when you've finished with it. + */ + void (*destroy) (struct exception_store *store); + + /* + * Find somewhere to store the next exception. + */ + int (*prepare_exception) (struct exception_store *store, + struct exception *e); + + /* + * Update the metadata with this exception. + */ + void (*commit_exception) (struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context); + + /* + * The snapshot is invalid, note this in the metadata. + */ + void (*drop_snapshot) (struct exception_store *store); + + /* + * Return the %age full of the snapshot + */ + int (*percent_full) (struct exception_store *store); + + struct dm_snapshot *snap; + void *context; +}; + +struct dm_snapshot { + struct rw_semaphore lock; + struct dm_table *table; + + struct dm_dev *origin; + struct dm_dev *cow; + + /* List of snapshots per Origin */ + struct list_head list; + + /* Size of data blocks saved - must be a power of 2 */ + chunk_t chunk_size; + chunk_t chunk_mask; + chunk_t chunk_shift; + + /* You can't use a snapshot if this is 0 (e.g. if full) */ + int valid; + + /* Used for display of table */ + char type; + + /* The last percentage we notified */ + int last_percent; + + struct exception_table pending; + struct exception_table complete; + + /* The on disk metadata handler */ + struct exception_store store; +}; + +/* + * Used by the exception stores to load exceptions hen + * initialising. + */ +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new); + +/* + * Constructor and destructor for the default persistent + * store. + */ +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size); + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error); + +/* + * Return the number of sectors in the device. + */ +static inline offset_t get_dev_size(kdev_t dev) +{ + int *sizes; + + sizes = blk_size[MAJOR(dev)]; + if (sizes) + return sizes[MINOR(dev)] << 1; + + return 0; +} + +static inline chunk_t sector_to_chunk(struct dm_snapshot *s, offset_t sector) +{ + return (sector & ~s->chunk_mask) >> s->chunk_shift; +} + +static inline offset_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk) +{ + return chunk << s->chunk_shift; +} + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-stripe.c linux.22-ac2/drivers/md/dm-stripe.c --- linux.vanilla/drivers/md/dm-stripe.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-stripe.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +struct stripe { + struct dm_dev *dev; + offset_t physical_start; +}; + +struct stripe_c { + offset_t logical_start; + uint32_t stripes; + + /* The size of this target / num. stripes */ + uint32_t stripe_width; + + /* stripe chunk size */ + uint32_t chunk_shift; + offset_t chunk_mask; + + struct stripe stripe[0]; +}; + +static inline struct stripe_c *alloc_context(int stripes) +{ + size_t len; + + if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) + return NULL; + + len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); + + return kmalloc(len, GFP_KERNEL); +} + +/* + * Parse a single pair + */ +static int get_stripe(struct dm_table *t, struct stripe_c *sc, + int stripe, char **argv) +{ + char *end; + unsigned long start; + + start = simple_strtoul(argv[1], &end, 10); + if (*end) + return -EINVAL; + + if (dm_table_get_device(t, argv[0], start, sc->stripe_width, + t->mode, &sc->stripe[stripe].dev)) + return -ENXIO; + + sc->stripe[stripe].physical_start = start; + return 0; +} + +/* + * Construct a striped mapping. + * [ ]+ + */ +static int stripe_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct stripe_c *sc; + uint32_t stripes; + uint32_t chunk_size; + char *end; + int r, i; + + if (argc < 2) { + *context = "dm-stripe: Not enough arguments"; + return -EINVAL; + } + + stripes = simple_strtoul(argv[0], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid stripe count"; + return -EINVAL; + } + + chunk_size = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid chunk_size"; + return -EINVAL; + } + + if (l % stripes) { + *context = "dm-stripe: Target length not divisable by " + "number of stripes"; + return -EINVAL; + } + + sc = alloc_context(stripes); + if (!sc) { + *context = "dm-stripe: Memory allocation for striped context " + "failed"; + return -ENOMEM; + } + + sc->logical_start = b; + sc->stripes = stripes; + sc->stripe_width = l / stripes; + + /* + * chunk_size is a power of two + */ + if (!chunk_size || (chunk_size & (chunk_size - 1))) { + *context = "dm-stripe: Invalid chunk size"; + kfree(sc); + return -EINVAL; + } + + sc->chunk_mask = chunk_size - 1; + for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) + chunk_size >>= 1; + sc->chunk_shift--; + + /* + * Get the stripe destinations. + */ + for (i = 0; i < stripes; i++) { + if (argc < 2) { + *context = "dm-stripe: Not enough destinations " + "specified"; + kfree(sc); + return -EINVAL; + } + + argv += 2; + + r = get_stripe(t, sc, i, argv); + if (r < 0) { + *context = "dm-stripe: Couldn't parse stripe " + "destination"; + while (i--) + dm_table_put_device(t, sc->stripe[i].dev); + kfree(sc); + return r; + } + } + + *context = sc; + return 0; +} + +static void stripe_dtr(struct dm_table *t, void *c) +{ + unsigned int i; + struct stripe_c *sc = (struct stripe_c *) c; + + for (i = 0; i < sc->stripes; i++) + dm_table_put_device(t, sc->stripe[i].dev); + + kfree(sc); +} + +static int stripe_map(struct buffer_head *bh, int rw, void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + + offset_t offset = bh->b_rsector - sc->logical_start; + uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift); + uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */ + chunk = chunk / sc->stripes; + + bh->b_rdev = sc->stripe[stripe].dev->dev; + bh->b_rsector = sc->stripe[stripe].physical_start + + (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); + return 1; +} + +static int stripe_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + int offset; + int i; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + offset = snprintf(result, maxlen, "%d %ld", + sc->stripes, sc->chunk_mask + 1); + for (i = 0; i < sc->stripes; i++) { + offset += + snprintf(result + offset, maxlen - offset, + " %s %ld", + kdevname(sc->stripe[i].dev->dev), + sc->stripe[i].physical_start); + } + break; + } + return 0; +} + +static struct target_type stripe_target = { + name: "striped", + module: THIS_MODULE, + ctr: stripe_ctr, + dtr: stripe_dtr, + map: stripe_map, + status: stripe_status, +}; + +int __init dm_stripe_init(void) +{ + int r; + + r = dm_register_target(&stripe_target); + if (r < 0) + DMWARN("striped target registration failed"); + + return r; +} + +void dm_stripe_exit(void) +{ + if (dm_unregister_target(&stripe_target)) + DMWARN("striped target unregistration failed"); + + return; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-table.c linux.22-ac2/drivers/md/dm-table.c --- linux.vanilla/drivers/md/dm-table.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-table.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,452 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +/* ceiling(n / size) * size */ +static inline unsigned long round_up(unsigned long n, unsigned long size) +{ + unsigned long r = n % size; + return n + (r ? (size - r) : 0); +} + +/* ceiling(n / size) */ +static inline unsigned long div_up(unsigned long n, unsigned long size) +{ + return round_up(n, size) / size; +} + +/* similar to ceiling(log_size(n)) */ +static uint int_log(unsigned long n, unsigned long base) +{ + int result = 0; + + while (n > 1) { + n = div_up(n, base); + result++; + } + + return result; +} + +/* + * return the highest key that you could lookup + * from the n'th node on level l of the btree. + */ +static offset_t high(struct dm_table *t, int l, int n) +{ + for (; l < t->depth - 1; l++) + n = get_child(n, CHILDREN_PER_NODE - 1); + + if (n >= t->counts[l]) + return (offset_t) - 1; + + return get_node(t, l, n)[KEYS_PER_NODE - 1]; +} + +/* + * fills in a level of the btree based on the + * highs of the level below it. + */ +static int setup_btree_index(int l, struct dm_table *t) +{ + int n, k; + offset_t *node; + + for (n = 0; n < t->counts[l]; n++) { + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + node[k] = high(t, l + 1, get_child(n, k)); + } + + return 0; +} + +/* + * highs, and targets are managed as dynamic + * arrays during a table load. + */ +static int alloc_targets(struct dm_table *t, int num) +{ + offset_t *n_highs; + struct target *n_targets; + int n = t->num_targets; + + /* + * Allocate both the target array and offset array at once. + */ + n_highs = (offset_t *) vcalloc(sizeof(struct target) + sizeof(offset_t), + num); + if (!n_highs) + return -ENOMEM; + + n_targets = (struct target *) (n_highs + num); + + if (n) { + memcpy(n_highs, t->highs, sizeof(*n_highs) * n); + memcpy(n_targets, t->targets, sizeof(*n_targets) * n); + } + + memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); + if (t->highs) + vfree(t->highs); + + t->num_allocated = num; + t->highs = n_highs; + t->targets = n_targets; + + return 0; +} + +int dm_table_create(struct dm_table **result, int mode) +{ + struct dm_table *t = kmalloc(sizeof(*t), GFP_NOIO); + + if (!t) + return -ENOMEM; + + memset(t, 0, sizeof(*t)); + INIT_LIST_HEAD(&t->devices); + + /* allocate a single node's worth of targets to begin with */ + if (alloc_targets(t, KEYS_PER_NODE)) { + kfree(t); + t = NULL; + return -ENOMEM; + } + + init_waitqueue_head(&t->eventq); + t->mode = mode; + *result = t; + return 0; +} + +static void free_devices(struct list_head *devices) +{ + struct list_head *tmp, *next; + + for (tmp = devices->next; tmp != devices; tmp = next) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + next = tmp->next; + kfree(dd); + } +} + +void dm_table_destroy(struct dm_table *t) +{ + int i; + + /* destroying the table counts as an event */ + dm_table_event(t); + + /* free the indexes (see dm_table_complete) */ + if (t->depth >= 2) + vfree(t->index[t->depth - 2]); + + /* free the targets */ + for (i = 0; i < t->num_targets; i++) { + struct target *tgt = &t->targets[i]; + + dm_put_target_type(t->targets[i].type); + + if (tgt->type->dtr) + tgt->type->dtr(t, tgt->private); + } + + vfree(t->highs); + + /* free the device list */ + if (t->devices.next != &t->devices) { + DMWARN("devices still present during destroy: " + "dm_table_remove_device calls missing"); + + free_devices(&t->devices); + } + + kfree(t); +} + +/* + * Checks to see if we need to extend highs or targets. + */ +static inline int check_space(struct dm_table *t) +{ + if (t->num_targets >= t->num_allocated) + return alloc_targets(t, t->num_allocated * 2); + + return 0; +} + +/* + * Convert a device path to a kdev_t. + */ +int lookup_device(const char *path, kdev_t *dev) +{ + int r; + struct nameidata nd; + struct inode *inode; + + if (!path_init(path, LOOKUP_FOLLOW, &nd)) + return 0; + + if ((r = path_walk(path, &nd))) + goto bad; + + inode = nd.dentry->d_inode; + if (!inode) { + r = -ENOENT; + goto bad; + } + + if (!S_ISBLK(inode->i_mode)) { + r = -EINVAL; + goto bad; + } + + *dev = inode->i_rdev; + + bad: + path_release(&nd); + return r; +} + +/* + * See if we've already got a device in the list. + */ +static struct dm_dev *find_device(struct list_head *l, kdev_t dev) +{ + struct list_head *tmp; + + list_for_each(tmp, l) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + if (dd->dev == dev) + return dd; + } + + return NULL; +} + +/* + * Open a device so we can use it as a map destination. + */ +static int open_dev(struct dm_dev *d) +{ + int err; + + if (d->bd) + BUG(); + + if (!(d->bd = bdget(kdev_t_to_nr(d->dev)))) + return -ENOMEM; + + if ((err = blkdev_get(d->bd, d->mode, 0, BDEV_FILE))) + return err; + + return 0; +} + +/* + * Close a device that we've been using. + */ +static void close_dev(struct dm_dev *d) +{ + if (!d->bd) + return; + + blkdev_put(d->bd, BDEV_FILE); + d->bd = NULL; +} + +/* + * If possible (ie. blk_size[major] is set), this + * checks an area of a destination device is + * valid. + */ +static int check_device_area(kdev_t dev, offset_t start, offset_t len) +{ + int *sizes; + offset_t dev_size; + + if (!(sizes = blk_size[MAJOR(dev)]) || !(dev_size = sizes[MINOR(dev)])) + /* we don't know the device details, + * so give the benefit of the doubt */ + return 1; + + /* convert to 512-byte sectors */ + dev_size <<= 1; + + return ((start < dev_size) && (len <= (dev_size - start))); +} + +/* + * This upgrades the mode on an already open dm_dev. Being + * careful to leave things as they were if we fail to reopen the + * device. + */ +static int upgrade_mode(struct dm_dev *dd, int new_mode) +{ + int r; + struct dm_dev dd_copy; + + memcpy(&dd_copy, dd, sizeof(dd_copy)); + + dd->mode |= new_mode; + dd->bd = NULL; + r = open_dev(dd); + if (!r) + close_dev(&dd_copy); + else + memcpy(dd, &dd_copy, sizeof(dd_copy)); + + return r; +} + +/* + * Add a device to the list, or just increment the usage count + * if it's already present. + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, int mode, + struct dm_dev **result) +{ + int r; + kdev_t dev; + struct dm_dev *dd; + int major, minor; + + if (sscanf(path, "%x:%x", &major, &minor) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + } else { + /* convert the path to a device */ + if ((r = lookup_device(path, &dev))) + return r; + } + + dd = find_device(&t->devices, dev); + if (!dd) { + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + dd->mode = mode; + dd->dev = dev; + dd->bd = NULL; + + if ((r = open_dev(dd))) { + kfree(dd); + return r; + } + + atomic_set(&dd->count, 0); + list_add(&dd->list, &t->devices); + + } else if (dd->mode != (mode | dd->mode)) { + r = upgrade_mode(dd, mode); + if (r) + return r; + } + atomic_inc(&dd->count); + + if (!check_device_area(dd->dev, start, len)) { + DMWARN("device %s too small for target", path); + dm_table_put_device(t, dd); + return -EINVAL; + } + + *result = dd; + + return 0; +} + +/* + * Decrement a devices use count and remove it if neccessary. + */ +void dm_table_put_device(struct dm_table *t, struct dm_dev *dd) +{ + if (atomic_dec_and_test(&dd->count)) { + close_dev(dd); + list_del(&dd->list); + kfree(dd); + } +} + +/* + * Adds a target to the map + */ +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private) +{ + int r, n; + + if ((r = check_space(t))) + return r; + + n = t->num_targets++; + t->highs[n] = highs; + t->targets[n].type = type; + t->targets[n].private = private; + + return 0; +} + +static int setup_indexes(struct dm_table *t) +{ + int i, total = 0; + offset_t *indexes; + + /* allocate the space for *all* the indexes */ + for (i = t->depth - 2; i >= 0; i--) { + t->counts[i] = div_up(t->counts[i + 1], CHILDREN_PER_NODE); + total += t->counts[i]; + } + + indexes = (offset_t *) vcalloc(total, (unsigned long) NODE_SIZE); + if (!indexes) + return -ENOMEM; + + /* set up internal nodes, bottom-up */ + for (i = t->depth - 2, total = 0; i >= 0; i--) { + t->index[i] = indexes; + indexes += (KEYS_PER_NODE * t->counts[i]); + setup_btree_index(i, t); + } + + return 0; +} + +/* + * Builds the btree to index the map + */ +int dm_table_complete(struct dm_table *t) +{ + int leaf_nodes, r = 0; + + /* how many indexes will the btree have ? */ + leaf_nodes = div_up(t->num_targets, KEYS_PER_NODE); + t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); + + /* leaf layer has already been set up */ + t->counts[t->depth - 1] = leaf_nodes; + t->index[t->depth - 1] = t->highs; + + if (t->depth >= 2) + r = setup_indexes(t); + + return r; +} + +void dm_table_event(struct dm_table *t) +{ + wake_up_interruptible(&t->eventq); +} + +EXPORT_SYMBOL(dm_table_get_device); +EXPORT_SYMBOL(dm_table_put_device); +EXPORT_SYMBOL(dm_table_event); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/dm-target.c linux.22-ac2/drivers/md/dm-target.c --- linux.vanilla/drivers/md/dm-target.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/dm-target.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +struct tt_internal { + struct target_type tt; + + struct list_head list; + long use; +}; + +static LIST_HEAD(_targets); +static rwlock_t _lock = RW_LOCK_UNLOCKED; + +#define DM_MOD_NAME_SIZE 32 + +/* + * Destructively splits up the argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input) +{ + char *start, *end = input, *out; + *argc = 0; + + while (1) { + start = end; + + /* Skip whitespace */ + while (*start && isspace(*start)) + start++; + + if (!*start) + break; /* success, we hit the end */ + + /* 'out' is used to remove any back-quotes */ + end = out = start; + while (*end) { + /* Everything apart from '\0' can be quoted */ + if (*end == '\\' && *(end + 1)) { + *out++ = *(end + 1); + end += 2; + continue; + } + + if (isspace(*end)) + break; /* end of token */ + + *out++ = *end++; + } + + /* have we already filled the array ? */ + if ((*argc + 1) > max) + return -EINVAL; + + /* we know this is whitespace */ + if (*end) + end++; + + /* terminate the string and put it in the array */ + *out = '\0'; + argv[*argc] = start; + (*argc)++; + } + + return 0; +} + +static inline struct tt_internal *__find_target_type(const char *name) +{ + struct list_head *tih; + struct tt_internal *ti; + + list_for_each(tih, &_targets) { + ti = list_entry(tih, struct tt_internal, list); + + if (!strcmp(name, ti->tt.name)) + return ti; + } + + return NULL; +} + +static struct tt_internal *get_target_type(const char *name) +{ + struct tt_internal *ti; + + read_lock(&_lock); + ti = __find_target_type(name); + + if (ti) { + if (ti->use == 0 && ti->tt.module) + __MOD_INC_USE_COUNT(ti->tt.module); + ti->use++; + } + read_unlock(&_lock); + + return ti; +} + +static void load_module(const char *name) +{ + char module_name[DM_MOD_NAME_SIZE] = "dm-"; + + /* Length check for strcat() below */ + if (strlen(name) > (DM_MOD_NAME_SIZE - 4)) + return; + + strcat(module_name, name); + request_module(module_name); + + return; +} + +struct target_type *dm_get_target_type(const char *name) +{ + struct tt_internal *ti = get_target_type(name); + + if (!ti) { + load_module(name); + ti = get_target_type(name); + } + + return ti ? &ti->tt : NULL; +} + +void dm_put_target_type(struct target_type *t) +{ + struct tt_internal *ti = (struct tt_internal *) t; + + read_lock(&_lock); + if (--ti->use == 0 && ti->tt.module) + __MOD_DEC_USE_COUNT(ti->tt.module); + + if (ti->use < 0) + BUG(); + read_unlock(&_lock); + + return; +} + +static struct tt_internal *alloc_target(struct target_type *t) +{ + struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL); + + if (ti) { + memset(ti, 0, sizeof(*ti)); + ti->tt = *t; + } + + return ti; +} + +int dm_register_target(struct target_type *t) +{ + int rv = 0; + struct tt_internal *ti = alloc_target(t); + + if (!ti) + return -ENOMEM; + + write_lock(&_lock); + if (__find_target_type(t->name)) + rv = -EEXIST; + else + list_add(&ti->list, &_targets); + + write_unlock(&_lock); + return rv; +} + +int dm_unregister_target(struct target_type *t) +{ + struct tt_internal *ti; + + write_lock(&_lock); + if (!(ti = __find_target_type(t->name))) { + write_unlock(&_lock); + return -EINVAL; + } + + if (ti->use) { + write_unlock(&_lock); + return -ETXTBSY; + } + + list_del(&ti->list); + kfree(ti); + + write_unlock(&_lock); + return 0; +} + +/* + * io-err: always fails an io, useful for bringing + * up LV's that have holes in them. + */ +static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **args, void **context) +{ + *context = NULL; + return 0; +} + +static void io_err_dtr(struct dm_table *t, void *c) +{ + /* empty */ + return; +} + +static int io_err_map(struct buffer_head *bh, int rw, void *context) +{ + buffer_IO_error(bh); + return 0; +} + +static struct target_type error_target = { + name: "error", + ctr: io_err_ctr, + dtr: io_err_dtr, + map: io_err_map, + status: NULL, +}; + +int dm_target_init(void) +{ + return dm_register_target(&error_target); +} + +void dm_target_exit(void) +{ + if (dm_unregister_target(&error_target)) + DMWARN("error target unregistration failed"); +} + +EXPORT_SYMBOL(dm_register_target); +EXPORT_SYMBOL(dm_unregister_target); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/kcopyd.c linux.22-ac2/drivers/md/kcopyd.c --- linux.vanilla/drivers/md/kcopyd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/kcopyd.c 2003-06-29 16:10:32.000000000 +0100 @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcopyd.h" + +/* FIXME: this is only needed for the DMERR macros */ +#include "dm.h" + +/* + * Hard sector size used all over the kernel. + */ +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +static void wake_kcopyd(void); + +/*----------------------------------------------------------------- + * We reserve our own pool of preallocated pages that are + * only used for kcopyd io. + *---------------------------------------------------------------*/ + +/* + * FIXME: This should be configurable. + */ +#define NUM_PAGES 512 + +static DECLARE_MUTEX(_pages_lock); +static int _num_free_pages; +static struct page *_pages_array[NUM_PAGES]; +static DECLARE_MUTEX(start_lock); + +static int init_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = alloc_page(GFP_KERNEL); + if (!p) + goto bad; + + LockPage(p); + _pages_array[i] = p; + } + + _num_free_pages = NUM_PAGES; + return 0; + + bad: + while (i--) + __free_page(_pages_array[i]); + return -ENOMEM; +} + +static void exit_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = _pages_array[i]; + UnlockPage(p); + __free_page(p); + } + + _num_free_pages = 0; +} + +static int kcopyd_get_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + if (_num_free_pages < num) { + up(&_pages_lock); + return -ENOMEM; + } + + for (i = 0; i < num; i++) { + _num_free_pages--; + result[i] = _pages_array[_num_free_pages]; + } + up(&_pages_lock); + + return 0; +} + +static void kcopyd_free_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + for (i = 0; i < num; i++) + _pages_array[_num_free_pages++] = result[i]; + up(&_pages_lock); +} + +/*----------------------------------------------------------------- + * We keep our own private pool of buffer_heads. These are just + * held in a list on the b_reqnext field. + *---------------------------------------------------------------*/ + +/* + * Make sure we have enough buffers to always keep the pages + * occupied. So we assume the worst case scenario where blocks + * are the size of a single sector. + */ +#define NUM_BUFFERS NUM_PAGES * (PAGE_SIZE / SECTOR_SIZE) + +static spinlock_t _buffer_lock = SPIN_LOCK_UNLOCKED; +static struct buffer_head *_all_buffers; +static struct buffer_head *_free_buffers; + +static int init_buffers(void) +{ + int i; + struct buffer_head *buffers; + + buffers = vcalloc(NUM_BUFFERS, sizeof(struct buffer_head)); + if (!buffers) { + DMWARN("Couldn't allocate buffer heads."); + return -ENOMEM; + } + + for (i = 0; i < NUM_BUFFERS; i++) { + if (i < NUM_BUFFERS - 1) + buffers[i].b_reqnext = &buffers[i + 1]; + init_waitqueue_head(&buffers[i].b_wait); + INIT_LIST_HEAD(&buffers[i].b_inode_buffers); + } + + _all_buffers = _free_buffers = buffers; + return 0; +} + +static void exit_buffers(void) +{ + vfree(_all_buffers); +} + +static struct buffer_head *alloc_buffer(void) +{ + struct buffer_head *r; + unsigned long flags; + + spin_lock_irqsave(&_buffer_lock, flags); + + if (!_free_buffers) + r = NULL; + else { + r = _free_buffers; + _free_buffers = _free_buffers->b_reqnext; + r->b_reqnext = NULL; + } + + spin_unlock_irqrestore(&_buffer_lock, flags); + + return r; +} + +/* + * Only called from interrupt context. + */ +static void free_buffer(struct buffer_head *bh) +{ + unsigned long flags, was_empty; + + spin_lock_irqsave(&_buffer_lock, flags); + was_empty = (_free_buffers == NULL) ? 1 : 0; + bh->b_reqnext = _free_buffers; + _free_buffers = bh; + spin_unlock_irqrestore(&_buffer_lock, flags); + + /* + * If the buffer list was empty then kcopyd probably went + * to sleep because it ran out of buffer heads, so let's + * wake it up. + */ + if (was_empty) + wake_kcopyd(); +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a + * deadlock). + *---------------------------------------------------------------*/ +#define MIN_JOBS NUM_PAGES + +static kmem_cache_t *_job_cache = NULL; +static mempool_t *_job_pool = NULL; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ + +static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(_complete_jobs); +static LIST_HEAD(_io_jobs); +static LIST_HEAD(_pages_jobs); + +static int init_jobs(void) +{ + INIT_LIST_HEAD(&_complete_jobs); + INIT_LIST_HEAD(&_io_jobs); + INIT_LIST_HEAD(&_pages_jobs); + + _job_cache = kmem_cache_create("kcopyd-jobs", sizeof(struct kcopyd_job), + __alignof__(struct kcopyd_job), + 0, NULL, NULL); + if (!_job_cache) + return -ENOMEM; + + _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, + mempool_free_slab, _job_cache); + if (!_job_pool) { + kmem_cache_destroy(_job_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_jobs(void) +{ + mempool_destroy(_job_pool); + kmem_cache_destroy(_job_cache); +} + +struct kcopyd_job *kcopyd_alloc_job(void) +{ + struct kcopyd_job *job; + + job = mempool_alloc(_job_pool, GFP_KERNEL); + if (!job) + return NULL; + + memset(job, 0, sizeof(*job)); + return job; +} + +void kcopyd_free_job(struct kcopyd_job *job) +{ + mempool_free(job, _job_pool); +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static inline struct kcopyd_job *pop(struct list_head *jobs) +{ + struct kcopyd_job *job = NULL; + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&_job_lock, flags); + + return job; +} + +static inline void push(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&_job_lock, flags); +} + +/* + * Completion function for one of our buffers. + */ +static void end_bh(struct buffer_head *bh, int uptodate) +{ + struct kcopyd_job *job = bh->b_private; + + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + + if (!uptodate) + job->err = -EIO; + + /* are we the last ? */ + if (atomic_dec_and_test(&job->nr_incomplete)) { + push(&_complete_jobs, job); + wake_kcopyd(); + } + + free_buffer(bh); +} + +static void dispatch_bh(struct kcopyd_job *job, + struct buffer_head *bh, int block) +{ + int p; + + /* + * Add in the job offset + */ + bh->b_blocknr = (job->disk.sector >> job->block_shift) + block; + + p = block >> job->bpp_shift; + block &= job->bpp_mask; + + bh->b_dev = B_FREE; + bh->b_size = job->block_size; + set_bh_page(bh, job->pages[p], ((block << job->block_shift) + + job->offset) << SECTOR_SHIFT); + bh->b_this_page = bh; + + init_buffer(bh, end_bh, job); + + bh->b_dev = job->disk.dev; + bh->b_state = ((1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req)); + + set_bit(BH_Uptodate, &bh->b_state); + if (job->rw == WRITE) + clear_bit(BH_Dirty, &bh->b_state); + + submit_bh(job->rw, bh); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + job->callback(job); + return 0; +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + unsigned int block; + struct buffer_head *bh; + + for (block = atomic_read(&job->nr_requested); + block < job->nr_blocks; block++) { + bh = alloc_buffer(); + if (!bh) + break; + + atomic_inc(&job->nr_requested); + dispatch_bh(job, bh, block); + } + + return (block == job->nr_blocks) ? 0 : 1; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = (job->disk.count + job->offset) / + (PAGE_SIZE / SECTOR_SIZE); + r = kcopyd_get_pages(job->nr_pages, job->pages); + + if (!r) { + /* this job is ready for io */ + push(&_io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + job->err = r; + push(&_complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(void) +{ + int count; + + /* + * We loop round until there is no more work to do. + */ + do { + count = process_jobs(&_complete_jobs, run_complete_job); + count += process_jobs(&_io_jobs, run_io_job); + count += process_jobs(&_pages_jobs, run_pages_job); + + } while (count); + + run_task_queue(&tq_disk); +} + +/*----------------------------------------------------------------- + * The daemon + *---------------------------------------------------------------*/ +static atomic_t _kcopyd_must_die; +static DECLARE_MUTEX(_run_lock); +static DECLARE_WAIT_QUEUE_HEAD(_job_queue); + +static int kcopyd(void *arg) +{ + DECLARE_WAITQUEUE(wq, current); + + daemonize(); + strcpy(current->comm, "kcopyd"); + atomic_set(&_kcopyd_must_die, 0); + + add_wait_queue(&_job_queue, &wq); + + down(&_run_lock); + up(&start_lock); + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (atomic_read(&_kcopyd_must_die)) + break; + + do_work(); + schedule(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&_job_queue, &wq); + + up(&_run_lock); + + return 0; +} + +static int start_daemon(void) +{ + static pid_t pid = 0; + + down(&start_lock); + + pid = kernel_thread(kcopyd, NULL, 0); + if (pid <= 0) { + DMERR("Failed to start kcopyd thread"); + return -EAGAIN; + } + + /* + * wait for the daemon to up this mutex. + */ + down(&start_lock); + up(&start_lock); + + return 0; +} + +static int stop_daemon(void) +{ + atomic_set(&_kcopyd_must_die, 1); + wake_kcopyd(); + down(&_run_lock); + up(&_run_lock); + + return 0; +} + +static void wake_kcopyd(void) +{ + wake_up_interruptible(&_job_queue); +} + +static int calc_shift(unsigned int n) +{ + int s; + + for (s = 0; n; s++, n >>= 1) + ; + + return --s; +} + +static void calc_block_sizes(struct kcopyd_job *job) +{ + job->block_size = get_hardsect_size(job->disk.dev); + job->block_shift = calc_shift(job->block_size / SECTOR_SIZE); + job->bpp_shift = PAGE_SHIFT - job->block_shift - SECTOR_SHIFT; + job->bpp_mask = (1 << job->bpp_shift) - 1; + job->nr_blocks = job->disk.count >> job->block_shift; + atomic_set(&job->nr_requested, 0); + atomic_set(&job->nr_incomplete, job->nr_blocks); +} + +int kcopyd_io(struct kcopyd_job *job) +{ + calc_block_sizes(job); + push(job->pages[0] ? &_io_jobs : &_pages_jobs, job); + wake_kcopyd(); + return 0; +} + +/*----------------------------------------------------------------- + * The copier is implemented on top of the simpler async io + * daemon above. + *---------------------------------------------------------------*/ +struct copy_info { + kcopyd_notify_fn notify; + void *notify_context; + + struct kcopyd_region to; +}; + +#define MIN_INFOS 128 +static kmem_cache_t *_copy_cache = NULL; +static mempool_t *_copy_pool = NULL; + +static int init_copier(void) +{ + _copy_cache = kmem_cache_create("kcopyd-info", + sizeof(struct copy_info), + __alignof__(struct copy_info), + 0, NULL, NULL); + if (!_copy_cache) + return -ENOMEM; + + _copy_pool = mempool_create(MIN_INFOS, mempool_alloc_slab, + mempool_free_slab, _copy_cache); + if (!_copy_pool) { + kmem_cache_destroy(_copy_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_copier(void) +{ + if (_copy_pool) + mempool_destroy(_copy_pool); + + if (_copy_cache) + kmem_cache_destroy(_copy_cache); +} + +static inline struct copy_info *alloc_copy_info(void) +{ + return mempool_alloc(_copy_pool, GFP_KERNEL); +} + +static inline void free_copy_info(struct copy_info *info) +{ + mempool_free(info, _copy_pool); +} + +void copy_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + + kcopyd_free_pages(job->nr_pages, job->pages); + + kcopyd_free_job(job); +} + +static void page_write_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + int i; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + for (i = 0; i < job->nr_pages; i++) + put_page(job->pages[i]); + + kcopyd_free_job(job); +} + +/* + * These callback functions implement the state machine that copies regions. + */ +void copy_write(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (job->err && info->notify) { + info->notify(job->err, job->context); + kcopyd_free_job(job); + free_copy_info(info); + return; + } + + job->rw = WRITE; + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->callback = copy_complete; + job->context = info; + + /* + * Queue the write. + */ + kcopyd_io(job); +} + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + int i; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the write. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + /* Get the pages */ + job->nr_pages = nr_pages; + for (i = 0; i < nr_pages; i++) { + get_page(pages[i]); + job->pages[i] = pages[i]; + } + + job->rw = WRITE; + + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->offset = offset; + calc_block_sizes(job); + job->callback = page_write_complete; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the read. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + job->rw = READ; + memcpy(&job->disk, from, sizeof(*from)); + + job->offset = 0; + calc_block_sizes(job); + job->callback = copy_write; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +/*----------------------------------------------------------------- + * Unit setup + *---------------------------------------------------------------*/ +static struct { + int (*init) (void); + void (*exit) (void); + +} _inits[] = { +#define xx(n) { init_ ## n, exit_ ## n} + xx(pages), + xx(buffers), + xx(jobs), + xx(copier) +#undef xx +}; + +static int _client_count = 0; +static DECLARE_MUTEX(_client_count_sem); + +static int kcopyd_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + start_daemon(); + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void kcopyd_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + if (stop_daemon()) + DMWARN("Couldn't stop kcopyd."); + + while (i--) + _inits[i].exit(); +} + +void kcopyd_inc_client_count(void) +{ + /* + * What I need here is an atomic_test_and_inc that returns + * the previous value of the atomic... In its absence I lock + * an int with a semaphore. :-( + */ + down(&_client_count_sem); + if (_client_count == 0) + kcopyd_init(); + _client_count++; + + up(&_client_count_sem); +} + +void kcopyd_dec_client_count(void) +{ + down(&_client_count_sem); + if (--_client_count == 0) + kcopyd_exit(); + + up(&_client_count_sem); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/kcopyd.h linux.22-ac2/drivers/md/kcopyd.h --- linux.vanilla/drivers/md/kcopyd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/md/kcopyd.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2001 Sistina Software + * + * This file is released under the GPL. + */ + +#ifndef DM_KCOPYD_H +#define DM_KCOPYD_H + +/* + * Needed for the definition of offset_t. + */ +#include +#include + +struct kcopyd_region { + kdev_t dev; + offset_t sector; + offset_t count; +}; + +#define MAX_KCOPYD_PAGES 128 + +struct kcopyd_job { + struct list_head list; + + /* + * Error state of the job. + */ + int err; + + /* + * Either READ or WRITE + */ + int rw; + + /* + * The source or destination for the transfer. + */ + struct kcopyd_region disk; + + int nr_pages; + struct page *pages[MAX_KCOPYD_PAGES]; + + /* + * Shifts and masks that will be useful when dispatching + * each buffer_head. + */ + offset_t offset; + offset_t block_size; + offset_t block_shift; + offset_t bpp_shift; /* blocks per page */ + offset_t bpp_mask; + + /* + * nr_blocks is how many buffer heads will have to be + * displatched to service this job, nr_requested is how + * many have been dispatched and nr_complete is how many + * have come back. + */ + unsigned int nr_blocks; + atomic_t nr_requested; + atomic_t nr_incomplete; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + void (*callback)(struct kcopyd_job *job); + void *context; +}; + +/* + * Low level async io routines. + */ +struct kcopyd_job *kcopyd_alloc_job(void); +void kcopyd_free_job(struct kcopyd_job *job); + +int kcopyd_queue_job(struct kcopyd_job *job); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + */ +typedef void (*kcopyd_notify_fn)(int err, void *context); + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context); + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context); + +/* + * We only want kcopyd to reserve resources if someone is + * actually using it. + */ +void kcopyd_inc_client_count(void); +void kcopyd_dec_client_count(void); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/Makefile linux.22-ac2/drivers/md/Makefile --- linux.vanilla/drivers/md/Makefile 2001-11-11 18:09:32.000000000 +0000 +++ linux.22-ac2/drivers/md/Makefile 2003-06-29 16:10:32.000000000 +0100 @@ -4,9 +4,11 @@ O_TARGET := mddev.o -export-objs := md.o xor.o +export-objs := md.o xor.o dm-table.o dm-target.o kcopyd.o list-multi := lvm-mod.o lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o +dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ + dm-ioctl.o dm-snapshot.o dm-exception-store.o kcopyd.o # Note: link order is important. All raid personalities # and xor.o must come before md.o, as they each initialise @@ -20,8 +22,12 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_BLK_DEV_MD) += md.o obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o +obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o include $(TOPDIR)/Rules.make lvm-mod.o: $(lvm-mod-objs) $(LD) -r -o $@ $(lvm-mod-objs) + +dm-mod.o: $(dm-mod-objs) + $(LD) -r -o $@ $(dm-mod-objs) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/md/md.c linux.22-ac2/drivers/md/md.c --- linux.vanilla/drivers/md/md.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/md/md.c 2003-07-30 21:19:04.000000000 +0100 @@ -77,7 +77,7 @@ */ static int sysctl_speed_limit_min = 100; -static int sysctl_speed_limit_max = 100000; +static int sysctl_speed_limit_max = 10000; static struct ctl_table_header *raid_table_header; @@ -2939,8 +2939,6 @@ * bdflush, otherwise bdflush will deadlock if there are too * many dirty RAID5 blocks. */ - current->policy = SCHED_OTHER; - current->nice = -20; md_unlock_kernel(); complete(thread->event); @@ -3464,11 +3462,6 @@ "(but not more than %d KB/sec) for reconstruction.\n", sysctl_speed_limit_max); - /* - * Resync has low priority. - */ - current->nice = 19; - is_mddev_idle(mddev); /* this also initializes IO event counters */ for (m = 0; m < SYNC_MARKS; m++) { mark[m] = jiffies; @@ -3546,16 +3539,13 @@ currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1; if (currspeed > sysctl_speed_limit_min) { - current->nice = 19; - if ((currspeed > sysctl_speed_limit_max) || !is_mddev_idle(mddev)) { current->state = TASK_INTERRUPTIBLE; md_schedule_timeout(HZ/4); goto repeat; } - } else - current->nice = -20; + } } printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev)); err = 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/media/video/cpia.c linux.22-ac2/drivers/media/video/cpia.c --- linux.vanilla/drivers/media/video/cpia.c 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/media/video/cpia.c 2003-07-31 14:39:39.000000000 +0100 @@ -1683,13 +1683,9 @@ * values. - rich@annexia.org */ if (cam->params.exposure.redComp < 220 || - cam->params.exposure.redComp > 255 || cam->params.exposure.green1Comp < 214 || - cam->params.exposure.green1Comp > 255 || cam->params.exposure.green2Comp < 214 || - cam->params.exposure.green2Comp > 255 || - cam->params.exposure.blueComp < 230 || - cam->params.exposure.blueComp > 255) + cam->params.exposure.blueComp < 230) { printk (KERN_WARNING "*_comp parameters have gone AWOL (%d/%d/%d/%d) - reseting them\n", cam->params.exposure.redComp, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/media/video/cpia.h linux.22-ac2/drivers/media/video/cpia.h --- linux.vanilla/drivers/media/video/cpia.h 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/media/video/cpia.h 2003-09-01 13:54:30.000000000 +0100 @@ -393,12 +393,14 @@ /* ErrorCode */ #define ERROR_FLICKER_BELOW_MIN_EXP 0x01 /*flicker exposure got below minimum exposure */ -#define ALOG(lineno,fmt,args...) printk(fmt,lineno,##args) -#define LOG(fmt,args...) ALOG((__LINE__),KERN_INFO __FILE__":"__FUNCTION__"(%d):"fmt,##args) +#define ALOG(function,lineno,fmt,args...) printk(fmt, function, lineno, ##args) +#define LOG(fmt,args...) ALOG((__FUNCTION__), (__LINE__), \ + KERN_INFO __FILE__":%s(%d):"fmt, ##args) #ifdef _CPIA_DEBUG_ -#define ADBG(lineno,fmt,args...) printk(fmt, jiffies, lineno, ##args) -#define DBG(fmt,args...) ADBG((__LINE__),KERN_DEBUG __FILE__"(%ld):"__FUNCTION__"(%d):"fmt,##args) +#define ADBG(function,lineno,fmt,args...) printk(fmt, jiffies, function, lineno, ##args) +#define DBG(fmt,args...) ADBG((__FUNCTION__), (__LINE__), \ + KERN_DEBUG __FILE__"(%ld):%s(%d):"fmt, ##args) #else #define DBG(fmn,args...) do {} while(0) #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/media/video/meye.c linux.22-ac2/drivers/media/video/meye.c --- linux.vanilla/drivers/media/video/meye.c 2003-08-28 16:45:34.000000000 +0100 +++ linux.22-ac2/drivers/media/video/meye.c 2003-08-28 22:11:47.000000000 +0100 @@ -35,7 +35,6 @@ #include #include #include -#include #include #include @@ -139,7 +138,7 @@ memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long)mem; while (size > 0) { - mem_map_reserve(vmalloc_to_page((void *)adr)); + SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } @@ -153,7 +152,7 @@ if (mem) { adr = (unsigned long) mem; while ((long) size > 0) { - mem_map_unreserve(vmalloc_to_page((void *)adr)); + ClearPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/mtd/maps/Config.in linux.22-ac2/drivers/mtd/maps/Config.in --- linux.vanilla/drivers/mtd/maps/Config.in 2003-06-14 00:11:32.000000000 +0100 +++ linux.22-ac2/drivers/mtd/maps/Config.in 2003-06-29 16:10:32.000000000 +0100 @@ -19,7 +19,7 @@ if [ "$CONFIG_X86" = "y" ]; then dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS - dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI + dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI $CONFIG_MTD_CONCAT dep_tristate ' CFI Flash device mapped on AMD NetSc520' CONFIG_MTD_NETSC520 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/aironet4500_core.c linux.22-ac2/drivers/net/aironet4500_core.c --- linux.vanilla/drivers/net/aironet4500_core.c 2001-09-30 20:26:06.000000000 +0100 +++ linux.22-ac2/drivers/net/aironet4500_core.c 2003-06-29 16:09:54.000000000 +0100 @@ -2676,10 +2676,8 @@ #endif //awc_dump_registers(dev); - if (adhoc & !max_mtu) - max_mtu= 2250; - else if (!max_mtu) - max_mtu= 1500; + if (!max_mtu) + max_mtu= adhoc ? 2250 : 1500; priv->sleeping_bap = 1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/Config.in linux.22-ac2/drivers/net/Config.in --- linux.vanilla/drivers/net/Config.in 2003-08-28 16:45:35.000000000 +0100 +++ linux.22-ac2/drivers/net/Config.in 2003-08-28 17:03:10.000000000 +0100 @@ -268,7 +268,18 @@ dep_tristate 'Packet Engines Hamachi GNIC-II support' CONFIG_HAMACHI $CONFIG_PCI dep_tristate 'Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)' CONFIG_YELLOWFIN $CONFIG_PCI $CONFIG_EXPERIMENTAL dep_tristate 'Realtek 8169 Gigabit Ethernet support' CONFIG_R8169 $CONFIG_PCI -dep_tristate 'SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter family support' CONFIG_SK98LIN $CONFIG_PCI +dep_tristate 'Marvell Yukon Chipset / SysKonnect SK-98xx Support' CONFIG_SK98LIN $CONFIG_PCI +if [ "$CONFIG_SK98LIN" != "n" ]; then + bool ' 3Com 3C940/3C941 Gigabit Ethernet Adapter' CONFIG_SK98LIN_T1 + bool ' Allied Telesyn AT-29xx Gigabit Ethernet Adapter' CONFIG_SK98LIN_T3 + bool ' CNet N-Way Gigabit Ethernet Adapter' CONFIG_SK98LIN_T8 + bool ' D-Link DGE-530T Gigabit Ethernet Adapter' CONFIG_SK98LIN_T6 + bool ' Linksys EG10xx Ethernet Server Adapter' CONFIG_SK98LIN_T9 + bool ' Marvell RDK-80xx Adapter' CONFIG_SK98LIN_T4 + bool ' Marvell Yukon Gigabit Ethernet Adapter' CONFIG_SK98LIN_T7 + bool ' SysKonnect SK-98xx Server Gigabit Adapter' CONFIG_SK98LIN_T2 + bool ' SysKonnect SK-98xx V2.0 Gigabit Ethernet Adapter' CONFIG_SK98LIN_T5 +fi dep_tristate 'Broadcom Tigon3 support' CONFIG_TIGON3 $CONFIG_PCI endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/irda/act200l.c linux.22-ac2/drivers/net/irda/act200l.c --- linux.vanilla/drivers/net/irda/act200l.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/irda/act200l.c 2003-08-28 22:33:48.000000000 +0100 @@ -106,7 +106,7 @@ static void act200l_open(dongle_t *self, struct qos_info *qos) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Power on the dongle */ self->set_dtr_rts(self->dev, TRUE, TRUE); @@ -120,7 +120,7 @@ static void act200l_close(dongle_t *self) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Power off the dongle */ self->set_dtr_rts(self->dev, FALSE, FALSE); @@ -141,7 +141,7 @@ __u8 control[3]; int ret = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self->speed_task = task; @@ -158,7 +158,7 @@ } break; case IRDA_TASK_CHILD_WAIT: - WARNING(__FUNCTION__ "(), resetting dongle timed out!\n"); + WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__); ret = -1; break; case IRDA_TASK_CHILD_DONE: @@ -203,7 +203,7 @@ self->speed_task = NULL; break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->speed_task = NULL; ret = -1; @@ -233,7 +233,7 @@ }; int ret = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self->reset_task = task; @@ -269,7 +269,7 @@ self->reset_task = NULL; break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->reset_task = NULL; ret = -1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/irda/irda-usb.c linux.22-ac2/drivers/net/irda/irda-usb.c --- linux.vanilla/drivers/net/irda/irda-usb.c 2003-08-28 16:45:35.000000000 +0100 +++ linux.22-ac2/drivers/net/irda/irda-usb.c 2003-08-28 22:33:48.000000000 +0100 @@ -339,7 +339,7 @@ int res, mtt; int err = 1; /* Failed */ - IRDA_DEBUG(4, __FUNCTION__ "() on %s\n", netdev->name); + IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name); netif_stop_queue(netdev); @@ -542,7 +542,7 @@ (self->new_xbofs != self->xbofs)) { /* We haven't changed speed yet (because of * IUC_SPEED_BUG), so do it now - Jean II */ - IRDA_DEBUG(1, __FUNCTION__ "(), Changing speed now...\n"); + IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__); irda_usb_change_speed_xbofs(self); } else { /* New speed and xbof is now commited in hardware */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/irda/ma600.c linux.22-ac2/drivers/net/irda/ma600.c --- linux.vanilla/drivers/net/irda/ma600.c 2003-08-28 16:45:35.000000000 +0100 +++ linux.22-ac2/drivers/net/irda/ma600.c 2003-08-28 22:33:48.000000000 +0100 @@ -48,7 +48,7 @@ #undef IRDA_DEBUG #define IRDA_DEBUG(n, args...) (printk(KERN_DEBUG args)) - #undef ASSERT(expr, func) + #undef ASSERT #define ASSERT(expr, func) \ if(!(expr)) { \ printk( "Assertion failed! %s,%s,%s,line=%d\n",\ @@ -86,13 +86,13 @@ int __init ma600_init(void) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); return irda_device_register_dongle(&dongle); } void __exit ma600_cleanup(void) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); irda_device_unregister_dongle(&dongle); } @@ -105,7 +105,7 @@ */ static void ma600_open(dongle_t *self, struct qos_info *qos) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400 |IR_57600|IR_115200; @@ -123,7 +123,7 @@ static void ma600_close(dongle_t *self) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Power off dongle */ self->set_dtr_rts(self->dev, FALSE, FALSE); @@ -184,12 +184,12 @@ __u8 byte_echo; int ret = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(task != NULL, return -1;); if (self->speed_task && self->speed_task != task) { - IRDA_DEBUG(0, __FUNCTION__ "(), busy!\n"); + IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__); return MSECS_TO_JIFFIES(10); } else { self->speed_task = task; @@ -215,7 +215,7 @@ break; case IRDA_TASK_CHILD_WAIT: - WARNING(__FUNCTION__ "(), resetting dongle timed out!\n"); + WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__); ret = -1; break; @@ -246,7 +246,7 @@ if(byte != byte_echo) { /* if control byte != echo, I don't know what to do */ - printk(KERN_WARNING __FUNCTION__ "() control byte written != read!\n"); + printk(KERN_WARNING "%s() control byte written != read!\n", __FUNCTION__); printk(KERN_WARNING "control byte = 0x%c%c\n", hexTbl[(byte>>4)&0x0f], hexTbl[byte&0x0f]); printk(KERN_WARNING "byte echo = 0x%c%c\n", @@ -254,7 +254,7 @@ hexTbl[byte_echo & 0x0f]); #ifndef NDEBUG } else { - IRDA_DEBUG(2, __FUNCTION__ "() control byte write read OK\n"); + IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__); #endif } @@ -273,7 +273,7 @@ break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->speed_task = NULL; ret = -1; @@ -298,12 +298,12 @@ dongle_t *self = (dongle_t *) task->instance; int ret = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(task != NULL, return -1;); if (self->reset_task && self->reset_task != task) { - IRDA_DEBUG(0, __FUNCTION__ "(), busy!\n"); + IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__); return MSECS_TO_JIFFIES(10); } else self->reset_task = task; @@ -326,7 +326,7 @@ self->reset_task = NULL; break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->reset_task = NULL; ret = -1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/irda/mcp2120.c linux.22-ac2/drivers/net/irda/mcp2120.c --- linux.vanilla/drivers/net/irda/mcp2120.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/irda/mcp2120.c 2003-08-28 22:33:48.000000000 +0100 @@ -110,7 +110,7 @@ } break; case IRDA_TASK_CHILD_WAIT: - WARNING(__FUNCTION__ "(), resetting dongle timed out!\n"); + WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__); ret = -1; break; case IRDA_TASK_CHILD_DONE: @@ -158,7 +158,7 @@ //printk("mcp2120_change_speed irda_task_wait\n"); break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->speed_task = NULL; ret = -1; @@ -213,7 +213,7 @@ self->reset_task = NULL; break; default: - ERROR(__FUNCTION__ "(), unknown state %d\n", task->state); + ERROR("%s(), unknown state %d\n", __FUNCTION__ , task->state); irda_task_next_state(task, IRDA_TASK_DONE); self->reset_task = NULL; ret = -1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/irda/nsc-ircc.c linux.22-ac2/drivers/net/irda/nsc-ircc.c --- linux.vanilla/drivers/net/irda/nsc-ircc.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/irda/nsc-ircc.c 2003-08-28 22:33:48.000000000 +0100 @@ -700,7 +700,7 @@ switch_bank(iobase, BANK3); version = inb(iobase+MID); - IRDA_DEBUG(2, __FUNCTION__ "() Driver %s Found chip version %02x\n", + IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", __FUNCTION__, driver_name, version); /* Should be 0x2? */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/3c574_cs.c linux.22-ac2/drivers/net/pcmcia/3c574_cs.c --- linux.vanilla/drivers/net/pcmcia/3c574_cs.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/pcmcia/3c574_cs.c 2003-09-01 13:18:39.000000000 +0100 @@ -1202,6 +1202,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "3c574_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/axnet_cs.c linux.22-ac2/drivers/net/pcmcia/axnet_cs.c --- linux.vanilla/drivers/net/pcmcia/axnet_cs.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/pcmcia/axnet_cs.c 2003-09-01 13:18:39.000000000 +0100 @@ -833,6 +833,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "axnet_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/ibmtr_cs.c linux.22-ac2/drivers/net/pcmcia/ibmtr_cs.c --- linux.vanilla/drivers/net/pcmcia/ibmtr_cs.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/pcmcia/ibmtr_cs.c 2003-06-29 16:09:56.000000000 +0100 @@ -177,6 +177,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "ibmtr_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/pcnet_cs.c linux.22-ac2/drivers/net/pcmcia/pcnet_cs.c --- linux.vanilla/drivers/net/pcmcia/pcnet_cs.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/net/pcmcia/pcnet_cs.c 2003-09-01 13:18:48.000000000 +0100 @@ -1226,6 +1226,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "pcnet_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/ray_cs.c linux.22-ac2/drivers/net/pcmcia/ray_cs.c --- linux.vanilla/drivers/net/pcmcia/ray_cs.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/pcmcia/ray_cs.c 2003-06-29 16:09:55.000000000 +0100 @@ -1247,6 +1247,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "ray_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/smc91c92_cs.c linux.22-ac2/drivers/net/pcmcia/smc91c92_cs.c --- linux.vanilla/drivers/net/pcmcia/smc91c92_cs.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/pcmcia/smc91c92_cs.c 2003-06-29 16:09:55.000000000 +0100 @@ -2161,6 +2161,7 @@ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; strcpy(info.driver, DRV_NAME); strcpy(info.version, DRV_VERSION); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/wavelan_cs.c linux.22-ac2/drivers/net/pcmcia/wavelan_cs.c --- linux.vanilla/drivers/net/pcmcia/wavelan_cs.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/pcmcia/wavelan_cs.c 2003-06-29 16:09:55.000000000 +0100 @@ -1902,6 +1902,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "wavelan_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcmcia/xirc2ps_cs.c linux.22-ac2/drivers/net/pcmcia/xirc2ps_cs.c --- linux.vanilla/drivers/net/pcmcia/xirc2ps_cs.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/pcmcia/xirc2ps_cs.c 2003-09-01 13:18:58.000000000 +0100 @@ -1733,6 +1733,7 @@ case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, "xirc2ps_cs", sizeof(info.driver)-1); + sprintf(info.bus_info, "PCMCIA 0x%lx", dev->base_addr); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/pcnet32.c linux.22-ac2/drivers/net/pcnet32.c --- linux.vanilla/drivers/net/pcnet32.c 2003-08-28 16:45:35.000000000 +0100 +++ linux.22-ac2/drivers/net/pcnet32.c 2003-06-29 16:20:14.000000000 +0100 @@ -1,5 +1,5 @@ -/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ -/* +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. + * * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. @@ -543,7 +543,7 @@ /* initialize variables */ fdx = mii = fset = dxsuflo = ltint = 0; chip_version = (chip_version >> 12) & 0xffff; - + switch (chip_version) { case 0x2420: chipname = "PCnet/PCI 79C970"; /* PCI */ @@ -1178,19 +1178,12 @@ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; if (err_status & 0x10000000) lp->stats.tx_window_errors++; -#ifndef DO_DXSUFLO if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } -#else - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - if (! lp->dxsuflo) { /* If controller doesn't recover ... */ +#ifdef DO_DXSUFLO + if (! lp->dxsuflo) +#endif + { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", @@ -1198,7 +1191,6 @@ must_restart = 1; } } -#endif } else { if (status & 0x1800) lp->stats.collisions++; @@ -1717,20 +1709,20 @@ next_dev = lp->next; unregister_netdev(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); - if (lp->pci_dev) - pci_unregister_driver(&pcnet32_driver); pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); kfree(pcnet32_dev); pcnet32_dev = next_dev; } + pci_unregister_driver(&pcnet32_driver); } + module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); /* * Local variables: - * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c pcnet32.c" + * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include/linux -Wall -Wstrict-prototypes -O2 -m486 -c pcnet32.c" * c-indent-level: 4 * tab-width: 8 * End: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/build_no.c linux.22-ac2/drivers/net/sk98lin/build_no.c --- linux.vanilla/drivers/net/sk98lin/build_no.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/build_no.c 2003-06-29 16:09:55.000000000 +0100 @@ -7,4 +7,3 @@ static const char SysKonnectBuildNumber[] = "@(#)SK-BUILD: 6.02 (20021219) PL: ALL.01"; -^Z \ No newline at end of file diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/lm80.h linux.22-ac2/drivers/net/sk98lin/h/lm80.h --- linux.vanilla/drivers/net/sk98lin/h/lm80.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/lm80.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,9 +1,9 @@ /****************************************************************************** * * Name: lm80.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.4 $ - * Date: $Date: 2002/04/25 11:04:10 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.6 $ + * Date: $Date: 2003/05/13 17:26:52 $ * Purpose: Contains all defines for the LM80 Chip * (National Semiconductor). * @@ -11,7 +11,8 @@ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,13 @@ * * History: * $Log: lm80.h,v $ + * Revision 1.6 2003/05/13 17:26:52 mkarl + * Editorial changes. + * + * Revision 1.5 2003/03/31 07:15:18 mkarl + * Corrected Copyright. + * Editorial changes. + * * Revision 1.4 2002/04/25 11:04:10 rschmidt * Editorial changes * @@ -55,8 +63,8 @@ * * All registers are 8 bit wide */ -#define LM80_CFG 0x00 /* Configuration Register */ -#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */ +#define LM80_CFG 0x00 /* Configuration Register */ +#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */ #define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */ #define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */ #define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */ @@ -93,8 +101,8 @@ #define LM80_THOT_LIM_LO 0x39 /* hot temperature limit (low) */ #define LM80_TOS_LIM_UP 0x3a /* OS temperature limit (high) */ #define LM80_TOS_LIM_LO 0x3b /* OS temperature limit (low) */ -#define LM80_FAN1_COUNT_LIM 0x3c /* Fan 1 count limit (high) */ -#define LM80_FAN2_COUNT_LIM 0x3d /* Fan 2 count limit (low) */ +#define LM80_FAN1_COUNT_LIM 0x3c /* Fan 1 count limit (high) */ +#define LM80_FAN2_COUNT_LIM 0x3d /* Fan 2 count limit (low) */ /* 0x3e - 0x3f reserved */ /* @@ -124,7 +132,7 @@ /* LM80_ISRC_2 Interrupt Status Register 2 */ /* LM80_IMSK_2 Interrupt Mask Register 2 */ -#define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */ +#define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */ #define LM80_IS_BTI (1<<1) /* state of BTI# pin */ #define LM80_IS_FAN1 (1<<2) /* count limit exceeded for Fan 1 */ #define LM80_IS_FAN2 (1<<3) /* count limit exceeded for Fan 2 */ @@ -143,7 +151,7 @@ #define LM80_FAN_RST_ENA (1<<7) /* sets RST_OUT#/OS# pins in RST mode */ /* LM80_TEMP_CTRL OS# Config, Temp Res. Reg */ -#define LM80_TEMP_OS_STAT (1<<0) /* mirrors the state of RST_OUT#/OS# */ +#define LM80_TEMP_OS_STAT (1<<0) /* mirrors the state of RST_OUT#/OS# */ #define LM80_TEMP_OS_POL (1<<1) /* select OS# polarity */ #define LM80_TEMP_OS_MODE (1<<2) /* selects Interrupt mode */ #define LM80_TEMP_RES (1<<3) /* selects 9 or 11 bit temp resulution*/ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skaddr.h linux.22-ac2/drivers/net/sk98lin/h/skaddr.h --- linux.vanilla/drivers/net/sk98lin/h/skaddr.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skaddr.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skaddr.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.26 $ - * Date: $Date: 2002/11/15 07:24:42 $ + * Project: Gigabit Ethernet Adapters, ADDR-Modul + * Version: $Revision: 1.29 $ + * Date: $Date: 2003/05/13 16:57:24 $ * Purpose: Header file for Address Management (MC, UC, Prom). * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,16 @@ * History: * * $Log: skaddr.h,v $ + * Revision 1.29 2003/05/13 16:57:24 mkarl + * Changes for SLIM driver. + * Editorial changes. + * + * Revision 1.28 2003/04/15 09:33:22 tschilli + * Copyright messages changed. + * + * Revision 1.27 2003/04/14 15:55:11 tschilli + * "#error C++ is not yet supported." removed. + * * Revision 1.26 2002/11/15 07:24:42 tschilli * SK_ADDR_EQUAL macro fixed. * @@ -140,7 +151,6 @@ #define __INC_SKADDR_H #ifdef __cplusplus -#error C++ is not yet supported. extern "C" { #endif /* cplusplus */ @@ -206,7 +216,7 @@ /* Macros */ -#if 0 +#ifdef OLD_STUFF #ifndef SK_ADDR_EQUAL /* * "&" instead of "&&" allows better optimization on IA-64. @@ -231,16 +241,18 @@ #ifndef SK_ADDR_EQUAL #ifndef SK_ADDR_DWORD_COMPARE #define SK_ADDR_EQUAL(A1,A2) ( \ - (((SK_U8 *)(A1))[5] == ((SK_U8 *)(A2))[5]) & \ - (((SK_U8 *)(A1))[4] == ((SK_U8 *)(A2))[4]) & \ - (((SK_U8 *)(A1))[3] == ((SK_U8 *)(A2))[3]) & \ - (((SK_U8 *)(A1))[2] == ((SK_U8 *)(A2))[2]) & \ - (((SK_U8 *)(A1))[1] == ((SK_U8 *)(A2))[1]) & \ - (((SK_U8 *)(A1))[0] == ((SK_U8 *)(A2))[0])) + (((SK_U8 SK_FAR *)(A1))[5] == ((SK_U8 SK_FAR *)(A2))[5]) & \ + (((SK_U8 SK_FAR *)(A1))[4] == ((SK_U8 SK_FAR *)(A2))[4]) & \ + (((SK_U8 SK_FAR *)(A1))[3] == ((SK_U8 SK_FAR *)(A2))[3]) & \ + (((SK_U8 SK_FAR *)(A1))[2] == ((SK_U8 SK_FAR *)(A2))[2]) & \ + (((SK_U8 SK_FAR *)(A1))[1] == ((SK_U8 SK_FAR *)(A2))[1]) & \ + (((SK_U8 SK_FAR *)(A1))[0] == ((SK_U8 SK_FAR *)(A2))[0])) #else /* SK_ADDR_DWORD_COMPARE */ #define SK_ADDR_EQUAL(A1,A2) ( \ - (*(SK_U16 *)&(((SK_U8 *)(A1))[4]) == *(SK_U16 *)&(((SK_U8 *)(A2))[4])) && \ - (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0]))) + (*(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[4]) == \ + *(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[4])) && \ + (*(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[0]) == \ + *(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[0]))) #endif /* SK_ADDR_DWORD_COMPARE */ #endif /* SK_ADDR_EQUAL */ @@ -382,7 +394,7 @@ SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, - SK_MAC_ADDR *pNewAddr, + SK_MAC_ADDR SK_FAR *pNewAddr, int Flags); extern int SkAddrPromiscuousChange( @@ -403,11 +415,13 @@ SK_U32 PortNumber, int NewPromMode); +#ifndef SK_SLIM extern int SkAddrSwap( SK_AC *pAC, SK_IOC IoC, SK_U32 FromPortNumber, SK_U32 ToPortNumber); +#endif #else /* defined(SK_KR_PROTO)) */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skdebug.h linux.22-ac2/drivers/net/sk98lin/h/skdebug.h --- linux.vanilla/drivers/net/sk98lin/h/skdebug.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skdebug.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skdebug.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12 $ - * Date: $Date: 2002/07/15 15:37:13 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.14 $ + * Date: $Date: 2003/05/13 17:26:00 $ * Purpose: SK specific DEBUG support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +26,12 @@ * * History: * $Log: skdebug.h,v $ + * Revision 1.14 2003/05/13 17:26:00 mkarl + * Editorial changes. + * + * Revision 1.13 2003/03/31 07:16:39 mkarl + * Corrected Copyright. + * * Revision 1.12 2002/07/15 15:37:13 rschmidt * Power Management support * Editorial changes diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skdrv1st.h linux.22-ac2/drivers/net/sk98lin/h/skdrv1st.h --- linux.vanilla/drivers/net/sk98lin/h/skdrv1st.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skdrv1st.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,16 +2,15 @@ * * Name: skdrv1st.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.9.2.2 $ - * Date: $Date: 2001/12/07 12:06:42 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/21 07:22:43 $ * Purpose: First header file for driver and all other modules * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,30 @@ * History: * * $Log: skdrv1st.h,v $ + * Revision 1.1 2003/07/21 07:22:43 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.15 2003/07/17 14:54:09 rroesler + * Fix: Corrected SK_PNMI_READ macros to copy right amount of bytes + * + * Revision 1.14 2003/06/03 14:36:32 mlindner + * Add: Additions for SK_SLIM + * + * Revision 1.13 2003/05/26 14:03:06 mlindner + * Add: Support for SLIM skaddr + * + * Revision 1.12 2003/05/26 12:56:39 mlindner + * Add: Support for Kernel 2.5/2.6 + * Add: New SkOsGetTimeCurrent function + * Add: SK_PNMI_HUNDREDS_SEC definition + * Fix: SK_TICKS_PER_SEC on Intel Itanium2 + * + * Revision 1.11 2003/02/25 14:16:40 mlindner + * Fix: Copyright statement + * + * Revision 1.10 2002/10/02 12:46:02 mlindner + * Add: Support for Yukon + * * Revision 1.9.2.2 2001/12/07 12:06:42 mlindner * Fix: malloc -> slab changes * @@ -89,19 +112,20 @@ /* Check kernel version */ #include -#if (LINUX_VERSION_CODE > 0x020300) -#endif typedef struct s_AC SK_AC; +/* Set card versions */ +#define SK_FAR + /* override some default functions with optimized linux functions */ #define SK_PNMI_STORE_U16(p,v) memcpy((char*)(p),(char*)&(v),2) #define SK_PNMI_STORE_U32(p,v) memcpy((char*)(p),(char*)&(v),4) #define SK_PNMI_STORE_U64(p,v) memcpy((char*)(p),(char*)&(v),8) #define SK_PNMI_READ_U16(p,v) memcpy((char*)&(v),(char*)(p),2) -#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),2) -#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),2) +#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),4) +#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),8) #define SkCsCalculateChecksum(p,l) ((~ip_compute_csum(p, l)) & 0xffff) @@ -150,7 +174,7 @@ /* we use gethrtime(), return unit: nanoseconds */ -#define SK_TICKS_PER_SEC HZ +#define SK_TICKS_PER_SEC 100 #define SK_MEM_MAPPED_IO diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skdrv2nd.h linux.22-ac2/drivers/net/sk98lin/h/skdrv2nd.h --- linux.vanilla/drivers/net/sk98lin/h/skdrv2nd.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skdrv2nd.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,16 +2,15 @@ * * Name: skdrv2nd.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12.2.2 $ - * Date: $Date: 2001/09/05 12:14:50 $ + * Version: $Revision: 1.2 $ + * Date: $Date: 2003/08/07 10:50:54 $ * Purpose: Second header file for driver and all other modules * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,36 @@ * History: * * $Log: skdrv2nd.h,v $ + * Revision 1.2 2003/08/07 10:50:54 mlindner + * Add: Speed and HW-Csum support for Yukon Lite chipset + * + * Revision 1.1 2003/07/21 07:25:29 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.19 2003/07/07 09:53:10 rroesler + * Fix: Removed proprietary RxTx defines and used the ones from skgehw.h instead + * + * Revision 1.18 2003/06/12 07:54:14 mlindner + * Fix: Changed Descriptor Alignment to 64 Byte + * + * Revision 1.17 2003/05/26 12:56:39 mlindner + * Add: Support for Kernel 2.5/2.6 + * Add: New SkOsGetTimeCurrent function + * Add: SK_PNMI_HUNDREDS_SEC definition + * Fix: SK_TICKS_PER_SEC on Intel Itanium2 + * + * Revision 1.16 2003/03/21 14:56:18 rroesler + * Added code regarding interrupt moderation + * + * Revision 1.15 2003/02/25 14:16:40 mlindner + * Fix: Copyright statement + * + * Revision 1.14 2003/02/25 13:26:26 mlindner + * Add: Support for various vendors + * + * Revision 1.13 2002/10/02 12:46:02 mlindner + * Add: Support for Yukon + * * Revision 1.12.2.2 2001/09/05 12:14:50 mlindner * add: New hardware revision int * @@ -114,7 +143,54 @@ #include "h/skrlmt.h" #include "h/skgedrv.h" -/* global function prototypes ******************************************/ +#define SK_PCI_ISCOMPLIANT(result, pdev) { \ + result = SK_FALSE; /* default */ \ + /* 3Com (0x10b7) */ \ + if (pdev->vendor == 0x10b7) { \ + /* Gigabit Ethernet Adapter (0x1700) */ \ + if ((pdev->device == 0x1700)) { \ + result = SK_TRUE; \ + } \ + /* SysKonnect (0x1148) */ \ + } else if (pdev->vendor == 0x1148) { \ + /* SK-98xx Gigabit Ethernet Server Adapter (0x4300) */ \ + /* SK-98xx V2.0 Gigabit Ethernet Adapter (0x4320) */ \ + if ((pdev->device == 0x4300) || \ + (pdev->device == 0x4320)) { \ + result = SK_TRUE; \ + } \ + /* D-Link (0x1186) */ \ + } else if (pdev->vendor == 0x1186) { \ + /* Gigabit Ethernet Adapter (0x4c00) */ \ + if ((pdev->device == 0x4c00)) { \ + result = SK_TRUE; \ + } \ + /* Marvell (0x11ab) */ \ + } else if (pdev->vendor == 0x11ab) { \ + /* Gigabit Ethernet Adapter (0x4320) */ \ + if ((pdev->device == 0x4320)) { \ + result = SK_TRUE; \ + } \ + /* CNet (0x1371) */ \ + } else if (pdev->vendor == 0x1371) { \ + /* GigaCard Network Adapter (0x434e) */ \ + if ((pdev->device == 0x434e)) { \ + result = SK_TRUE; \ + } \ + /* Linksys (0x1737) */ \ + } else if (pdev->vendor == 0x1737) { \ + /* Gigabit Network Adapter (0x1032) */ \ + /* Gigabit Network Adapter (0x1064) */ \ + if ((pdev->device == 0x1032) || \ + (pdev->device == 0x1064)) { \ + result = SK_TRUE; \ + } \ + } else { \ + result = SK_FALSE; \ + } \ +} + + extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned); extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*); extern SK_U64 SkOsGetTime(SK_AC*); @@ -139,6 +215,25 @@ }; +/* + * Time macros + */ +#if SK_TICKS_PER_SEC == 100 +#define SK_PNMI_HUNDREDS_SEC(t) (t) +#else +#define SK_PNMI_HUNDREDS_SEC(t) ((((unsigned long)t) * 100) / \ + (SK_TICKS_PER_SEC)) +#endif + +/* + * New SkOsGetTime + */ +#define SkOsGetTimeCurrent(pAC, pUsec) {\ + struct timeval t;\ + do_gettimeofday(&t);\ + *pUsec = ((((t.tv_sec) * 1000000L)+t.tv_usec)/10000);\ +} + /* * ioctl definitions @@ -147,6 +242,7 @@ #define SK_IOCTL_GETMIB (SK_IOCTL_BASE + 0) #define SK_IOCTL_SETMIB (SK_IOCTL_BASE + 1) #define SK_IOCTL_PRESETMIB (SK_IOCTL_BASE + 2) +#define SK_IOCTL_GEN (SK_IOCTL_BASE + 3) typedef struct s_IOCTL SK_GE_IOCTL; @@ -181,7 +277,7 @@ /* * alignment of rx/tx descriptors */ -#define DESCR_ALIGN 8 +#define DESCR_ALIGN 64 /* * definitions for pnmi. TODO @@ -194,6 +290,43 @@ #define SK_DRIVER_SET_MTU(pAc,IoC,i,v) 0 #define SK_DRIVER_PRESET_MTU(pAc,IoC,i,v) 0 +/* +** Interim definition of SK_DRV_TIMER placed in this file until +** common modules have boon finallized +*/ +#define SK_DRV_TIMER 11 +#define SK_DRV_MODERATION_TIMER 1 +#define SK_DRV_MODERATION_TIMER_LENGTH 1000000 /* 1 second */ +#define SK_DRV_RX_CLEANUP_TIMER 2 +#define SK_DRV_RX_CLEANUP_TIMER_LENGTH 1000000 /* 100 millisecs */ + +/* +** Definitions regarding transmitting frames +** any calculating any checksum. +*/ +#define C_LEN_ETHERMAC_HEADER_DEST_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_SRC_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_LENTYPE 2 +#define C_LEN_ETHERMAC_HEADER ( (C_LEN_ETHERMAC_HEADER_DEST_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_SRC_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_LENTYPE) ) + +#define C_LEN_ETHERMTU_MINSIZE 46 +#define C_LEN_ETHERMTU_MAXSIZE_STD 1500 +#define C_LEN_ETHERMTU_MAXSIZE_JUMBO 9000 + +#define C_LEN_ETHERNET_MINSIZE ( (C_LEN_ETHERMAC_HEADER) + \ + (C_LEN_ETHERMTU_MINSIZE) ) + +#define C_OFFSET_IPHEADER C_LEN_ETHERMAC_HEADER +#define C_OFFSET_IPHEADER_IPPROTO 9 +#define C_OFFSET_TCPHEADER_TCPCS 16 + +#define C_OFFSET_IPPROTO ( (C_LEN_ETHERMAC_HEADER) + \ + (C_OFFSET_IPHEADER_IPPROTO) ) + +#define C_PROTO_ID_UDP 6 /* refer to RFC 790 or Stevens' */ +#define C_PROTO_ID_TCP 17 /* TCP/IP illustrated for details */ /* TX and RX descriptors *****************************************************/ @@ -228,156 +361,37 @@ struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */ }; +/* Used interrupt bits in the interrupts source register *********************/ -/* definition of flags in descriptor control field */ -#define RX_CTRL_OWN_BMU UINT32_C(0x80000000) -#define RX_CTRL_STF UINT32_C(0x40000000) -#define RX_CTRL_EOF UINT32_C(0x20000000) -#define RX_CTRL_EOB_IRQ UINT32_C(0x10000000) -#define RX_CTRL_EOF_IRQ UINT32_C(0x08000000) -#define RX_CTRL_DEV_NULL UINT32_C(0x04000000) -#define RX_CTRL_STAT_VALID UINT32_C(0x02000000) -#define RX_CTRL_TIME_VALID UINT32_C(0x01000000) -#define RX_CTRL_CHECK_DEFAULT UINT32_C(0x00550000) -#define RX_CTRL_CHECK_CSUM UINT32_C(0x00560000) -#define RX_CTRL_LEN_MASK UINT32_C(0x0000FFFF) - -#define TX_CTRL_OWN_BMU UINT32_C(0x80000000) -#define TX_CTRL_STF UINT32_C(0x40000000) -#define TX_CTRL_EOF UINT32_C(0x20000000) -#define TX_CTRL_EOB_IRQ UINT32_C(0x10000000) -#define TX_CTRL_EOF_IRQ UINT32_C(0x08000000) -#define TX_CTRL_ST_FWD UINT32_C(0x04000000) -#define TX_CTRL_DISAB_CRC UINT32_C(0x02000000) -#define TX_CTRL_SOFTWARE UINT32_C(0x01000000) -#define TX_CTRL_CHECK_DEFAULT UINT32_C(0x00550000) -#define TX_CTRL_CHECK_CSUM UINT32_C(0x00560000) -#define TX_CTRL_LEN_MASK UINT32_C(0x0000FFFF) - - - -/* The offsets of registers in the TX and RX queue control io area ***********/ - -#define RX_Q_BUF_CTRL_CNT 0x00 -#define RX_Q_NEXT_DESCR_LOW 0x04 -#define RX_Q_BUF_ADDR_LOW 0x08 -#define RX_Q_BUF_ADDR_HIGH 0x0c -#define RX_Q_FRAME_STAT 0x10 -#define RX_Q_TIME_STAMP 0x14 -#define RX_Q_CSUM_1_2 0x18 -#define RX_Q_CSUM_START_1_2 0x1c -#define RX_Q_CUR_DESCR_LOW 0x20 -#define RX_Q_DESCR_HIGH 0x24 -#define RX_Q_CUR_ADDR_LOW 0x28 -#define RX_Q_CUR_ADDR_HIGH 0x2c -#define RX_Q_CUR_BYTE_CNT 0x30 -#define RX_Q_CTRL 0x34 -#define RX_Q_FLAG 0x38 -#define RX_Q_TEST1 0x3c -#define RX_Q_TEST2 0x40 -#define RX_Q_TEST3 0x44 - -#define TX_Q_BUF_CTRL_CNT 0x00 -#define TX_Q_NEXT_DESCR_LOW 0x04 -#define TX_Q_BUF_ADDR_LOW 0x08 -#define TX_Q_BUF_ADDR_HIGH 0x0c -#define TX_Q_FRAME_STAT 0x10 -#define TX_Q_CSUM_START 0x14 -#define TX_Q_CSUM_START_POS 0x18 -#define TX_Q_RESERVED 0x1c -#define TX_Q_CUR_DESCR_LOW 0x20 -#define TX_Q_DESCR_HIGH 0x24 -#define TX_Q_CUR_ADDR_LOW 0x28 -#define TX_Q_CUR_ADDR_HIGH 0x2c -#define TX_Q_CUR_BYTE_CNT 0x30 -#define TX_Q_CTRL 0x34 -#define TX_Q_FLAG 0x38 -#define TX_Q_TEST1 0x3c -#define TX_Q_TEST2 0x40 -#define TX_Q_TEST3 0x44 - -/* definition of flags in the queue control field */ -#define RX_Q_CTRL_POLL_ON 0x00000080 -#define RX_Q_CTRL_POLL_OFF 0x00000040 -#define RX_Q_CTRL_STOP 0x00000020 -#define RX_Q_CTRL_START 0x00000010 -#define RX_Q_CTRL_CLR_I_PAR 0x00000008 -#define RX_Q_CTRL_CLR_I_EOB 0x00000004 -#define RX_Q_CTRL_CLR_I_EOF 0x00000002 -#define RX_Q_CTRL_CLR_I_ERR 0x00000001 - -#define TX_Q_CTRL_POLL_ON 0x00000080 -#define TX_Q_CTRL_POLL_OFF 0x00000040 -#define TX_Q_CTRL_STOP 0x00000020 -#define TX_Q_CTRL_START 0x00000010 -#define TX_Q_CTRL_CLR_I_EOB 0x00000004 -#define TX_Q_CTRL_CLR_I_EOF 0x00000002 -#define TX_Q_CTRL_CLR_I_ERR 0x00000001 - - -/* Interrupt bits in the interrupts source register **************************/ -#define IRQ_HW_ERROR 0x80000000 -#define IRQ_RESERVED 0x40000000 -#define IRQ_PKT_TOUT_RX1 0x20000000 -#define IRQ_PKT_TOUT_RX2 0x10000000 -#define IRQ_PKT_TOUT_TX1 0x08000000 -#define IRQ_PKT_TOUT_TX2 0x04000000 -#define IRQ_I2C_READY 0x02000000 -#define IRQ_SW 0x01000000 -#define IRQ_EXTERNAL_REG 0x00800000 -#define IRQ_TIMER 0x00400000 -#define IRQ_MAC1 0x00200000 -#define IRQ_LINK_SYNC_C_M1 0x00100000 -#define IRQ_MAC2 0x00080000 -#define IRQ_LINK_SYNC_C_M2 0x00040000 -#define IRQ_EOB_RX1 0x00020000 -#define IRQ_EOF_RX1 0x00010000 -#define IRQ_CHK_RX1 0x00008000 -#define IRQ_EOB_RX2 0x00004000 -#define IRQ_EOF_RX2 0x00002000 -#define IRQ_CHK_RX2 0x00001000 -#define IRQ_EOB_SY_TX1 0x00000800 -#define IRQ_EOF_SY_TX1 0x00000400 -#define IRQ_CHK_SY_TX1 0x00000200 -#define IRQ_EOB_AS_TX1 0x00000100 -#define IRQ_EOF_AS_TX1 0x00000080 -#define IRQ_CHK_AS_TX1 0x00000040 -#define IRQ_EOB_SY_TX2 0x00000020 -#define IRQ_EOF_SY_TX2 0x00000010 -#define IRQ_CHK_SY_TX2 0x00000008 -#define IRQ_EOB_AS_TX2 0x00000004 -#define IRQ_EOF_AS_TX2 0x00000002 -#define IRQ_CHK_AS_TX2 0x00000001 - -#define DRIVER_IRQS (IRQ_SW | IRQ_EOF_RX1 | IRQ_EOF_RX2 | \ - IRQ_EOF_SY_TX1 | IRQ_EOF_AS_TX1 | \ - IRQ_EOF_SY_TX2 | IRQ_EOF_AS_TX2) - -#define SPECIAL_IRQS (IRQ_HW_ERROR | IRQ_PKT_TOUT_RX1 | IRQ_PKT_TOUT_RX2 | \ - IRQ_PKT_TOUT_TX1 | IRQ_PKT_TOUT_TX2 | \ - IRQ_I2C_READY | IRQ_EXTERNAL_REG | IRQ_TIMER | \ - IRQ_MAC1 | IRQ_LINK_SYNC_C_M1 | \ - IRQ_MAC2 | IRQ_LINK_SYNC_C_M2 | \ - IRQ_CHK_RX1 | IRQ_CHK_RX2 | \ - IRQ_CHK_SY_TX1 | IRQ_CHK_AS_TX1 | \ - IRQ_CHK_SY_TX2 | IRQ_CHK_AS_TX2) - -#define IRQ_MASK (IRQ_SW | IRQ_EOB_RX1 | IRQ_EOF_RX1 | \ - IRQ_EOB_RX2 | IRQ_EOF_RX2 | \ - IRQ_EOB_SY_TX1 | IRQ_EOF_SY_TX1 | \ - IRQ_EOB_AS_TX1 | IRQ_EOF_AS_TX1 | \ - IRQ_EOB_SY_TX2 | IRQ_EOF_SY_TX2 | \ - IRQ_EOB_AS_TX2 | IRQ_EOF_AS_TX2 | \ - IRQ_HW_ERROR | IRQ_PKT_TOUT_RX1 | IRQ_PKT_TOUT_RX2 | \ - IRQ_PKT_TOUT_TX1 | IRQ_PKT_TOUT_TX2 | \ - IRQ_I2C_READY | IRQ_EXTERNAL_REG | IRQ_TIMER | \ - IRQ_MAC1 | \ - IRQ_MAC2 | \ - IRQ_CHK_RX1 | IRQ_CHK_RX2 | \ - IRQ_CHK_SY_TX1 | IRQ_CHK_AS_TX1 | \ - IRQ_CHK_SY_TX2 | IRQ_CHK_AS_TX2) +#define DRIVER_IRQS ((IS_IRQ_SW) | \ + (IS_R1_F) |(IS_R2_F) | \ + (IS_XS1_F) |(IS_XA1_F) | \ + (IS_XS2_F) |(IS_XA2_F)) + +#define SPECIAL_IRQS ((IS_HW_ERR) |(IS_I2C_READY) | \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2) | \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2) | \ + (IS_MAC1) |(IS_LNK_SYNC_M1)| \ + (IS_MAC2) |(IS_LNK_SYNC_M2)| \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) + +#define IRQ_MASK ((IS_IRQ_SW) | \ + (IS_R1_B) |(IS_R1_F) |(IS_R2_B) |(IS_R2_F) | \ + (IS_XS1_B) |(IS_XS1_F) |(IS_XA1_B)|(IS_XA1_F)| \ + (IS_XS2_B) |(IS_XS2_F) |(IS_XA2_B)|(IS_XA2_F)| \ + (IS_HW_ERR) |(IS_I2C_READY)| \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2)| \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2)| \ + (IS_MAC1) |(IS_MAC2) | \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) -#define IRQ_HWE_MASK 0x00000FFF /* enable all HW irqs */ +#define IRQ_HWE_MASK (IS_ERR_MSK) /* enable all HW irqs */ typedef struct s_DevNet DEV_NET; @@ -420,6 +434,55 @@ int PortIndex; /* index number of port (0 or 1) */ }; +/* Definitions needed for interrupt moderation *******************************/ + +#define IRQ_EOF_AS_TX ((IS_XA1_F) | (IS_XA2_F)) +#define IRQ_EOF_SY_TX ((IS_XS1_F) | (IS_XS2_F)) +#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX)| (IRQ_EOF_SY_TX)) +#define IRQ_MASK_RX_ONLY ((IS_R1_F) | (IS_R2_F)) +#define IRQ_MASK_SP_ONLY (SPECIAL_IRQS) +#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY)| (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY)) +#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX)) + +#define C_INT_MOD_NONE 1 +#define C_INT_MOD_STATIC 2 +#define C_INT_MOD_DYNAMIC 4 + +#define C_CLK_FREQ_GENESIS 53215000 /* shorter: 53.125 MHz */ +#define C_CLK_FREQ_YUKON 78215000 /* shorter: 78.125 MHz */ + +#define C_INTS_PER_SEC_DEFAULT 2000 +#define C_INT_MOD_ENABLE_PERCENTAGE 50 /* if higher 50% enable */ +#define C_INT_MOD_DISABLE_PERCENTAGE 50 /* if lower 50% disable */ + +typedef struct s_DynIrqModInfo DIM_INFO; +struct s_DynIrqModInfo { + unsigned long PrevTimeVal; + unsigned int PrevSysLoad; + unsigned int PrevUsedTime; + unsigned int PrevTotalTime; + int PrevUsedDescrRatio; + int NbrProcessedDescr; + SK_U64 PrevPort0RxIntrCts; + SK_U64 PrevPort1RxIntrCts; + SK_U64 PrevPort0TxIntrCts; + SK_U64 PrevPort1TxIntrCts; + SK_BOOL ModJustEnabled; /* Moderation just enabled yes/no */ + + int MaxModIntsPerSec; /* Moderation Threshold */ + int MaxModIntsPerSecUpperLimit; /* Upper limit for DIM */ + int MaxModIntsPerSecLowerLimit; /* Lower limit for DIM */ + + long MaskIrqModeration; /* ModIrqType (eg. 'TxRx') */ + SK_BOOL DisplayStats; /* Stats yes/no */ + SK_BOOL AutoSizing; /* Resize DIM-timer on/off */ + int IntModTypeSelect; /* EnableIntMod (eg. 'dynamic') */ + + SK_TIMER ModTimer; /* just some timer */ +}; + typedef struct s_PerStrm PER_STRM; #define SK_ALLOC_IRQ 0x00000001 @@ -487,11 +550,16 @@ SK_U32 CsOfs; /* for checksum calculation */ SK_BOOL CheckQueue; /* check event queue soon */ + SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */ + DIM_INFO DynIrqModInfo; /* all data related to DIM */ /* Only for tests */ int PortUp; int PortDown; - + int ChipsetType; /* Chipset family type + * 0 == Genesis family support + * 1 == Yukon family support + */ }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skerror.h linux.22-ac2/drivers/net/sk98lin/h/skerror.h --- linux.vanilla/drivers/net/sk98lin/h/skerror.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skerror.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skerror.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.5 $ - * Date: $Date: 2002/04/25 11:05:10 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.7 $ + * Date: $Date: 2003/05/13 17:25:13 $ * Purpose: SK specific Error log support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +26,12 @@ * * History: * $Log: skerror.h,v $ + * Revision 1.7 2003/05/13 17:25:13 mkarl + * Editorial changes. + * + * Revision 1.6 2003/03/31 07:17:48 mkarl + * Corrected Copyright. + * * Revision 1.5 2002/04/25 11:05:10 rschmidt * Editorial changes * diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgedrv.h linux.22-ac2/drivers/net/sk98lin/h/skgedrv.h --- linux.vanilla/drivers/net/sk98lin/h/skgedrv.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgedrv.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgedrv.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.6 $ - * Date: $Date: 2002/07/15 15:38:01 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/07/04 12:25:01 $ * Purpose: Interface with the driver * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,20 @@ * History: * * $Log: skgedrv.h,v $ + * Revision 1.10 2003/07/04 12:25:01 rschmidt + * Added event SK_DRV_DOWNSHIFT_DET for Downshift 4-Pair / 2-Pair + * + * Revision 1.9 2003/05/13 17:24:21 mkarl + * Added events SK_DRV_LINK_UP and SK_DRV_LINK_DOWN for drivers not using + * RLMT (SK_NO_RLMT). + * Editorial changes. + * + * Revision 1.8 2003/03/31 07:18:54 mkarl + * Corrected Copyright. + * + * Revision 1.7 2003/03/18 09:43:47 rroesler + * Added new event for timer + * * Revision 1.6 2002/07/15 15:38:01 rschmidt * Power Management support * Editorial changes @@ -68,5 +83,10 @@ #define SK_DRV_PORT_FAIL 8 /* One port fails */ #define SK_DRV_SWITCH_INTERN 9 /* Port switch by the driver itself */ #define SK_DRV_POWER_DOWN 10 /* Power down mode */ - -#endif /* __INC_SKGEDRV_H_ */ +#define SK_DRV_TIMER 11 /* Timer for free use */ +#ifdef SK_NO_RLMT +#define SK_DRV_LINK_UP 12 /* Link Up event for driver */ +#define SK_DRV_LINK_DOWN 13 /* Link Down event for driver */ +#endif +#define SK_DRV_DOWNSHIFT_DET 14 /* Downshift 4-Pair / 2-Pair (YUKON only) */ +#endif /* __INC_SKGEDRV_H_ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgehw.h linux.22-ac2/drivers/net/sk98lin/h/skgehw.h --- linux.vanilla/drivers/net/sk98lin/h/skgehw.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgehw.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgehw.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.48 $ - * Date: $Date: 2002/12/05 10:25:11 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.53 $ + * Date: $Date: 2003/07/04 12:39:01 $ * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +26,27 @@ * * History: * $Log: skgehw.h,v $ + * Revision 1.53 2003/07/04 12:39:01 rschmidt + * Added SK_FAR to pointers in XM_IN32() and GM_IN32() macros (for PXE) + * Editorial changes + * + * Revision 1.52 2003/05/13 17:16:36 mkarl + * Added SK_FAR for PXE. + * Editorial changes. + * + * Revision 1.51 2003/04/08 16:31:50 rschmidt + * Added defines for new Chip IDs (YUKON-Lite, YUKON-LP) + * Editorial changes + * + * Revision 1.50 2003/03/31 07:29:45 mkarl + * Corrected Copyright. + * Editorial changes. + * + * Revision 1.49 2003/01/28 09:43:49 rschmidt + * Added defines for PCI-Spec. 2.3 IRQ + * Added defines for CLK_RUN (YUKON-Lite) + * Editorial changes + * * Revision 1.48 2002/12/05 10:25:11 rschmidt * Added defines for Half Duplex Burst Mode On/Off * Added defines for Rx GMAC FIFO Flush feature @@ -334,7 +356,7 @@ #define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */ #define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */ #define PCI_CAP_PTR 0x34 /* 8 bit Capabilities Ptr */ - /* Byte 35..3b: reserved */ + /* Byte 0x35..0x3b: reserved */ #define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */ #define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */ #define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */ @@ -354,7 +376,9 @@ #define PCI_VPD_NITEM 0x51 /* 8 bit Next Item Ptr */ #define PCI_VPD_ADR_REG 0x52 /* 16 bit VPD Address Register */ #define PCI_VPD_DAT_REG 0x54 /* 32 bit VPD Data Register */ - /* Byte 0x58..0xff: reserved */ + /* Byte 0x58..0x59: reserved */ +#define PCI_SER_LD_CTRL 0x5a /* 16 bit SEEPROM Loader Ctrl (YUKON only) */ + /* Byte 0x5c..0xff: reserved */ /* * I2C Address (PCI Config) @@ -362,13 +386,14 @@ * Note: The temperature and voltage sensors are relocated on a different * I2C bus. */ -#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */ +#define I2C_ADDR_VPD 0xa0 /* I2C address for the VPD EEPROM */ /* * Define Bits and Values of the registers */ /* PCI_COMMAND 16 bit Command */ - /* Bit 15..10: reserved */ + /* Bit 15..11: reserved */ +#define PCI_INT_DIS BIT_10S /* Interrupt INTx# disable (PCI 2.3) */ #define PCI_FBTEN BIT_9S /* Fast Back-To-Back enable */ #define PCI_SERREN BIT_8S /* SERR enable */ #define PCI_ADSTEP BIT_7S /* Address Stepping */ @@ -398,7 +423,8 @@ #define PCI_UDF BIT_6S /* User Defined Features */ #define PCI_66MHZCAP BIT_5S /* 66 MHz PCI bus clock capable */ #define PCI_NEWCAP BIT_4S /* New cap. list implemented */ - /* Bit 3.. 0: reserved */ +#define PCI_INT_STAT BIT_3S /* Interrupt INTx# Status (PCI 2.3) */ + /* Bit 2.. 0: reserved */ #define PCI_ERRBITS (PCI_PERR | PCI_SERR | PCI_RMABORT | PCI_RTABORT |\ PCI_DATAPERR) @@ -427,7 +453,7 @@ #define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */ #define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */ #define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */ -#define PCI_MEMSPACE BIT_0 /* Memory Space Indic. */ +#define PCI_MEMSPACE BIT_0 /* Memory Space Indicator */ /* PCI_BASE_2ND 32 bit 2nd Base address */ #define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */ @@ -436,8 +462,8 @@ #define PCI_IOSPACE BIT_0 /* I/O Space Indicator */ /* PCI_BASE_ROM 32 bit Expansion ROM Base Address */ -#define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st)*/ -#define PCI_ROMBASZ (0x1cL<<14) /* Bit 16..14: Treat as BASE or SIZE */ +#define PCI_ROMBASE_MSK 0xfffe0000L /* Bit 31..17: ROM Base address */ +#define PCI_ROMBASE_SIZ (0x1cL<<14) /* Bit 16..14: Treat as Base or Size */ #define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */ /* Bit 10.. 1: reserved */ #define PCI_ROMEN BIT_0 /* Address Decode enable */ @@ -445,15 +471,15 @@ /* Device Dependent Region */ /* PCI_OUR_REG_1 32 bit Our Register 1 */ /* Bit 31..29: reserved */ -#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode */ -#define PCI_EN_CAL BIT_27 /* Enable PCI buffer strength calibr. */ -#define PCI_DIS_CAL BIT_26 /* Disable PCI buffer strength calibr. */ +#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode (YUKON only) */ +#define PCI_TEST_CAL BIT_27 /* Test PCI buffer calib. (YUKON only) */ +#define PCI_EN_CAL BIT_26 /* Enable PCI buffer calib. (YUKON only) */ #define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */ -#define PCI_EN_BOOT BIT_24 /* Enable BOOT via ROM */ +#define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */ #define PCI_EN_IO BIT_23 /* Mapping to I/O space */ -#define PCI_EN_FPROM BIT_22 /* FLASH mapped to mem? */ - /* 1 = Map Flash to Mem */ - /* 0 = Disable addr. dec*/ +#define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */ + /* 1 = Map Flash to memory */ + /* 0 = Disable addr. dec */ #define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */ #define PCI_PAGE_16 (0L<<20) /* 16 k pages */ #define PCI_PAGE_32K (1L<<20) /* 32 k pages */ @@ -496,7 +522,7 @@ /* Power Management Region */ /* PCI_PM_CAP_REG 16 bit Power Management Capabilities */ -#define PCI_PME_SUP (0x1f<<11) /* Bit 15..11: PM Event Support */ +#define PCI_PME_SUP_MSK (0x1f<<11) /* Bit 15..11: PM Event Support Mask */ #define PCI_PME_D3C_SUP BIT_15S /* PME from D3cold Support (if Vaux) */ #define PCI_PME_D3H_SUP BIT_14S /* PME from D3hot Support */ #define PCI_PME_D2_SUP BIT_13S /* PME from D2 Support */ @@ -525,8 +551,8 @@ /* VPD Region */ /* PCI_VPD_ADR_REG 16 bit VPD Address Register */ -#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wd cycle*/ -#define PCI_VPD_ADDR 0x3fffL /* Bit 14.. 0: VPD address */ +#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wr cycle */ +#define PCI_VPD_ADR_MSK 0x7fffL /* Bit 14.. 0: VPD address mask */ /* Control Register File (Address Map) */ @@ -996,7 +1022,10 @@ #define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0,..,6f = block 6f */ /* B0_CTST 16 bit Control/Status register */ - /* Bit 15..11: reserved */ + /* Bit 15..14: reserved */ +#define CS_CLK_RUN_HOT BIT_13S /* CLK_RUN hot m. (YUKON-Lite only) */ +#define CS_CLK_RUN_RST BIT_12S /* CLK_RUN reset (YUKON-Lite only) */ +#define CS_CLK_RUN_ENA BIT_11S /* CLK_RUN enable (YUKON-Lite only) */ #define CS_VAUX_AVAIL BIT_10S /* VAUX available (YUKON only) */ #define CS_BUS_CLOCK BIT_9S /* Bus Clock 0/1 = 33/66 MHz */ #define CS_BUS_SLOT_SZ BIT_8S /* Slot Size 0/1 = 32/64 bit slot */ @@ -1028,7 +1057,7 @@ /* B0_IMSK 32 bit Interrupt Mask Register */ /* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ -#define IS_ALL_MSK 0xbfffffffL /* All Interrupt bits */ +#define IS_ALL_MSK 0xbfffffffUL /* All Interrupt bits */ #define IS_HW_ERR BIT_31 /* Interrupt HW Error */ /* Bit 30: reserved */ #define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */ @@ -1101,8 +1130,10 @@ #define CFG_SNG_MAC BIT_0S /* MAC Config: 0=2 MACs / 1=1 MAC*/ /* B2_CHIP_ID 8 bit Chip Identification Number */ -#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ -#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ +#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ +#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ +#define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1) */ +#define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */ /* B2_FAR 32 bit Flash-Prom Addr Reg/Cnt */ #define FAR_ADDR 0x1ffffL /* Bit 16.. 0: FPROM Address mask */ @@ -1168,16 +1199,16 @@ /* B2_GP_IO 32 bit General Purpose I/O Register */ /* Bit 31..26: reserved */ -#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=I/1=O */ -#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=I/1=O */ -#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=I/1=O */ -#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=I/1=O */ -#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=I/1=O */ -#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=I/1=O */ -#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=I/1=O */ -#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=I/1=O */ -#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=I/1=O */ -#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=I/1=O */ +#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=In/1=Out */ +#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=In/1=Out */ +#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=In/1=Out */ +#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=In/1=Out */ +#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=In/1=Out */ +#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=In/1=Out */ +#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=In/1=Out */ +#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=In/1=Out */ +#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=In/1=Out */ +#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=In/1=Out */ /* Bit 15..10: reserved */ #define GP_IO_9 BIT_9 /* IO_9 pin */ #define GP_IO_8 BIT_8 /* IO_8 pin */ @@ -1327,7 +1358,7 @@ /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ /* Bit 31..24: reserved */ -#define TXA_MAX_VAL 0x00ffffffL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ +#define TXA_MAX_VAL 0x00ffffffUL/* Bit 23.. 0: Max TXA Timer/Cnt Val */ /* TXA_CTRL 8 bit Tx Arbiter Control Register */ #define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */ @@ -1646,9 +1677,11 @@ #define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */ /* Bits 3..0: same as for RX_GMF_CTRL_T */ -#define GMF_RX_CTRL_DEF GMF_OPER_ON +#define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON) #define GMF_TX_CTRL_DEF GMF_OPER_ON +#define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */ + /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ /* Bit 7.. 3: reserved */ #define GMT_ST_START BIT_2S /* Start Time Stamp Timer */ @@ -1767,7 +1800,7 @@ WOL_CTL_DIS_LINK_CHG_UNIT | \ WOL_CTL_DIS_PATTERN_UNIT | \ WOL_CTL_DIS_MAGIC_PKT_UNIT) - + /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ #define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) @@ -1811,7 +1844,7 @@ SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */ SK_U32 RxStat; /* Receive Frame Status Word */ SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */ -#ifndef SK_USE_REV_DESC +#ifndef SK_USE_REV_DESC SK_U16 RxTcpSum1; /* TCP Checksum 1 */ SK_U16 RxTcpSum2; /* TCP Checksum 2 */ SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ @@ -1855,7 +1888,7 @@ #define BMU_CHECK (0x55L<<16) /* Default BMU check */ #define BMU_TCP_CHECK (0x56L<<16) /* Descr with TCP ext */ #define BMU_UDP_CHECK (0x57L<<16) /* Descr with UDP ext (YUKON only) */ -#define BMU_BBC 0xFFFFL /* Bit 15.. 0: Buffer Byte Counter */ +#define BMU_BBC 0xffffL /* Bit 15.. 0: Buffer Byte Counter */ /* TxStat Transmit Frame Status Word */ /* RxStat Receive Frame Status Word */ @@ -1866,20 +1899,9 @@ * (see XMR_FS bits) */ -/* other defines *************************************************************/ - -/* - * FlashProm specification - */ -#define MAX_PAGES 0x20000L /* Every byte has a single page */ -#define MAX_FADDR 1 /* 1 byte per page */ -#define SKFDDI_PSZ 8 /* address PROM size */ - /* macros ********************************************************************/ -/* - * Receive and Transmit Queues - */ +/* Receive and Transmit Queues */ #define Q_R1 0x0000 /* Receive Queue 1 */ #define Q_R2 0x0080 /* Receive Queue 2 */ #define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */ @@ -1892,7 +1914,7 @@ * * Use this macro to access the Receive and Transmit Queue Registers. * - * para: + * para: * Queue Queue to access. * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. @@ -1907,7 +1929,7 @@ * * Use this macro to access the RAM Buffer Registers. * - * para: + * para: * Queue Queue to access. * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. @@ -1918,9 +1940,7 @@ #define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) -/* - * MAC Related Registers - */ +/* MAC Related Registers */ #define MAC_1 0 /* belongs to the port near the slot */ #define MAC_2 1 /* belongs to the port far away from the slot */ @@ -1929,7 +1949,7 @@ * * Use this macro to access a MAC Related Registers inside the ASIC. * - * para: + * para: * Mac MAC to access. * Values: MAC_1, MAC_2 * Offs MAC register offset. @@ -1981,9 +2001,9 @@ #define XM_IN32(IoC, Mac, Reg, pVal) { \ SK_IN16((IoC), XMA((Mac), (Reg)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ SK_IN16((IoC), XMA((Mac), (Reg+2)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ } #define XM_OUT32(IoC, Mac, Reg, Val) { \ @@ -2009,8 +2029,8 @@ } #define XM_OUTADDR(IoC, Mac, Reg, pVal) { \ - SK_U8 *pByte; \ - pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \ (((SK_U16)(pByte[0]) & 0x00ff) | \ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ @@ -2024,8 +2044,8 @@ #define XM_INHASH(IoC, Mac, Reg, pVal) { \ SK_U16 Word; \ - SK_U8 *pByte; \ - pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \ pByte[0] = (SK_U8)(Word & 0x00ff); \ pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ @@ -2041,8 +2061,8 @@ } #define XM_OUTHASH(IoC, Mac, Reg, pVal) { \ - SK_U8 *pByte; \ - pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \ (((SK_U16)(pByte[0]) & 0x00ff)| \ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ @@ -2089,9 +2109,9 @@ #define GM_IN32(IoC, Mac, Reg, pVal) { \ SK_IN16((IoC), GMA((Mac), (Reg)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ SK_IN16((IoC), GMA((Mac), (Reg+4)), \ - (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ } #define GM_OUT32(IoC, Mac, Reg, Val) { \ @@ -2115,8 +2135,8 @@ } #define GM_OUTADDR(IoC, Mac, Reg, pVal) { \ - SK_U8 *pByte; \ - pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \ (((SK_U16)(pByte[0]) & 0x00ff) | \ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ @@ -2186,7 +2206,7 @@ #define PHY_ADDR_BCOM (1<<8) #define PHY_ADDR_LONE (3<<8) #define PHY_ADDR_NAT (0<<8) - + /* GPHY address (bits 15..11 of SMI control reg) */ #define PHY_ADDR_MARV 0 @@ -2196,7 +2216,7 @@ * PHY_READ() read a 16 bit value from the PHY * PHY_WRITE() write a 16 bit value to the PHY * - * para: + * para: * IoC I/O context needed for SK I/O macros * pPort Pointer to port struct for PhyAddr * Mac XMAC to access values: MAC_1 or MAC_2 @@ -2268,7 +2288,7 @@ * * para: * Addr PCI configuration register to access. - * Values: PCI_VENDOR_ID ... PCI_VPD_ADDR, + * Values: PCI_VENDOR_ID ... PCI_VPD_ADR_REG, * * usage SK_IN16(pAC, PCI_C(PCI_VENDOR_ID), pVal); */ @@ -2287,12 +2307,12 @@ * #define SK_IN8(pAC, Addr, pVal) ...\ * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr))) */ -#ifdef SK_MEM_MAPPED_IO +#ifdef SK_MEM_MAPPED_IO #define SK_HW_ADDR(Base, Addr) ((Base) + (Addr)) -#else /* SK_MEM_MAPPED_IO */ +#else /* SK_MEM_MAPPED_IO */ #define SK_HW_ADDR(Base, Addr) \ ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0))) -#endif /* SK_MEM_MAPPED_IO */ +#endif /* SK_MEM_MAPPED_IO */ #define SZ_LONG (sizeof(SK_U32)) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgehwt.h linux.22-ac2/drivers/net/sk98lin/h/skgehwt.h --- linux.vanilla/drivers/net/sk98lin/h/skgehwt.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgehwt.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: skhwt.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.5 $ - * Date: $Date: 1999/11/22 13:54:24 $ + * Project: Gigabit Ethernet Adapters, Schedule-Modul + * Version: $Revision: 1.6 $ + * Date: $Date: 2003/05/13 17:57:48 $ * Purpose: Defines for the hardware timer functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,9 @@ * History: * * $Log: skgehwt.h,v $ + * Revision 1.6 2003/05/13 17:57:48 mkarl + * Editorial changes. + * * Revision 1.5 1999/11/22 13:54:24 cgoos * Changed license header to GPL. * diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgeinit.h linux.22-ac2/drivers/net/sk98lin/h/skgeinit.h --- linux.vanilla/drivers/net/sk98lin/h/skgeinit.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgeinit.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgeinit.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.73 $ - * Date: $Date: 2002/11/15 12:47:25 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.81 $ + * Date: $Date: 2003/07/04 12:30:38 $ * Purpose: Structures and prototypes for the GE Init Module * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,66 +27,98 @@ * History: * * $Log: skgeinit.h,v $ + * Revision 1.81 2003/07/04 12:30:38 rschmidt + * Added SK_FAR to pointers in MAC statistic functions (for PXE) + * Editorial changes + * + * Revision 1.80 2003/05/28 15:25:30 rschmidt + * Added SK_FAR to pointers in MAC/PHY read functions (for PXE) + * Minor changes to avoid LINT warnings + * Editorial changes + * + * Revision 1.79 2003/05/06 12:02:33 rschmidt + * Added entry GIYukon in s_GeInit structure + * Editorial changes + * + * Revision 1.78 2003/04/28 08:59:57 rschmidt + * Added entries GIValIrqMask and GITimeStampCnt in s_GeInit structure + * + * Revision 1.77 2003/04/08 16:27:02 rschmidt + * Added entry GILedBlinkCtrl in s_GeInit structure + * Added defines for LED Blink Control + * + * Revision 1.76 2003/03/31 07:21:01 mkarl + * Added PGmANegAdv to SK_GEPORT. + * Corrected Copyright. + * + * Revision 1.75 2003/02/05 13:36:39 rschmidt + * Added define SK_FACT_78 for YUKON's Host Clock of 78.12 MHz + * Editorial changes + * + * Revision 1.74 2003/01/28 09:39:16 rschmidt + * Added entry GIYukonLite in s_GeInit structure + * Editorial changes + * * Revision 1.73 2002/11/15 12:47:25 rschmidt * Replaced error message SKERR_HWI_E024 for Cable Diagnostic with * Rx queue error in SkGeStopPort(). - * + * * Revision 1.72 2002/11/12 17:08:35 rschmidt * Added entries for Cable Diagnostic to Port structure * Added entries GIPciSlot64 and GIPciClock66 in s_GeInit structure * Added error message for Cable Diagnostic * Added prototypes for SkGmCableDiagStatus() * Editorial changes - * + * * Revision 1.71 2002/10/21 11:26:10 mkarl * Changed interface of SkGeInitAssignRamToQueues(). - * + * * Revision 1.70 2002/10/14 08:21:32 rschmidt * Changed type of GICopperType, GIVauxAvail to SK_BOOL * Added entry PRxOverCnt to Port structure * Added entry GIYukon32Bit in s_GeInit structure * Editorial changes - * + * * Revision 1.69 2002/10/09 16:57:15 mkarl * Added some constants and macros for SkGeInitAssignRamToQueues(). - * + * * Revision 1.68 2002/09/12 08:58:51 rwahl * Retrieve counters needed for XMAC errata workarounds directly because * PNMI returns corrected counter values (e.g. #10620). - * + * * Revision 1.67 2002/08/16 14:40:30 rschmidt * Added entries GIGenesis and GICopperType in s_GeInit structure * Added prototypes for SkMacHashing() * Editorial changes - * + * * Revision 1.66 2002/08/12 13:27:21 rschmidt * Added defines for Link speed capabilities * Added entry PLinkSpeedCap to Port structure * Added entry GIVauxAvail in s_GeInit structure * Added prototypes for SkMacPromiscMode() * Editorial changes - * + * * Revision 1.65 2002/08/08 15:46:18 rschmidt * Added define SK_PHY_ACC_TO for PHY access timeout * Added define SK_XM_RX_HI_WM for XMAC Rx High Watermark * Added define SK_MIN_TXQ_SIZE for Min RAM Buffer Tx Queue Size * Added entry PhyId1 to Port structure - * + * * Revision 1.64 2002/07/23 16:02:56 rschmidt * Added entry GIWolOffs in s_GeInit struct (HW-Bug in YUKON 1st rev.) * Added prototypes for: SkGePhyRead(), SkGePhyWrite() - * + * * Revision 1.63 2002/07/18 08:17:38 rwahl * Corrected definitions for SK_LSPEED_xxx & SK_LSPEED_STAT_xxx. - * + * * Revision 1.62 2002/07/17 18:21:55 rwahl * Added SK_LSPEED_INDETERMINATED define. - * + * * Revision 1.61 2002/07/17 17:16:03 rwahl * - MacType now member of GIni struct. * - Struct alignment to 32bit. * - Editorial change. - * + * * Revision 1.60 2002/07/15 18:23:39 rwahl * Added GeMacFunc to GE Init structure. * Added prototypes for SkXmUpdateStats(), SkGmUpdateStats(), @@ -93,19 +126,19 @@ * SkGmResetCounter(), SkXmOverflowStatus(), SkGmOverflowStatus(). * Added defines for current link speed state. * Added ERRMSG defintions for MacUpdateStat() & MacStatistics(). - * + * * Revision 1.59 2002/07/15 15:40:22 rschmidt * Added entry PLinkSpeedUsed to Port structure * Editorial changes - * + * * Revision 1.58 2002/06/10 09:36:30 rschmidt * Editorial changes. - * + * * Revision 1.57 2002/06/05 08:18:00 rschmidt * Corrected alignment in Port Structure * Added new prototypes for GMAC * Editorial changes - * + * * Revision 1.56 2002/04/25 11:38:12 rschmidt * Added defines for Link speed values * Added defines for Loopback parameters for MAC and PHY @@ -120,150 +153,150 @@ * SkXmPhyRead(), SkXmPhyRead(), SkGmPhyWrite(), SkGmPhyWrite(); * Removed prototypes for static functions in SkXmac2.c * Editorial changes - * + * * Revision 1.55 2002/02/26 15:24:53 rwahl * Fix: no link with manual configuration (#10673). The previous fix for * #10639 was removed. So for RLMT mode = CLS the RLMT may switch to * misconfigured port. It should not occur for the other RLMT modes. - * + * * Revision 1.54 2002/01/18 16:52:52 rwahl * Editorial corrections. - * + * * Revision 1.53 2001/11/20 09:19:58 rwahl * Reworked bugfix #10639 (no dependency to RLMT mode). - * + * * Revision 1.52 2001/10/26 07:52:23 afischer * Port switching bug in `check local link` mode - * + * * Revision 1.51 2001/02/09 12:26:38 cgoos * Inserted #ifdef DIAG for half duplex workaround timer. - * + * * Revision 1.50 2001/02/07 07:56:40 rassmann * Corrected copyright. - * + * * Revision 1.49 2001/01/31 15:32:18 gklug * fix: problem with autosensing an SR8800 switch * add: counter for autoneg timeouts - * + * * Revision 1.48 2000/11/09 11:30:10 rassmann * WA: Waiting after releasing reset until BCom chip is accessible. - * + * * Revision 1.47 2000/10/18 12:22:40 cgoos * Added workaround for half duplex hangup. - * + * * Revision 1.46 2000/08/10 11:28:00 rassmann * Editorial changes. * Preserving 32-bit alignment in structs for the adapter context. - * + * * Revision 1.45 1999/11/22 13:56:19 cgoos * Changed license header to GPL. - * + * * Revision 1.44 1999/10/26 07:34:15 malthoff * The define SK_LNK_ON has been lost in v1.41. - * + * * Revision 1.43 1999/10/06 09:30:16 cgoos * Changed SK_XM_THR_JUMBO. - * + * * Revision 1.42 1999/09/16 12:58:26 cgoos * Changed SK_LED_STANDY macro to be independent of HW link sync. - * + * * Revision 1.41 1999/07/30 06:56:14 malthoff * Correct comment for SK_MS_STAT_UNSET. - * + * * Revision 1.40 1999/05/27 13:38:46 cgoos * Added SK_BMU_TX_WM. * Made SK_BMU_TX_WM and SK_BMU_RX_WM user-definable. * Changed XMAC Tx treshold to max. values. - * + * * Revision 1.39 1999/05/20 14:35:26 malthoff * Remove prototypes for SkGeLinkLED(). - * + * * Revision 1.38 1999/05/19 11:59:12 cgoos * Added SK_MS_CAP_INDETERMINATED define. - * + * * Revision 1.37 1999/05/19 07:32:33 cgoos * Changes for 1000Base-T. * LED-defines for HWAC_LINK_LED macro. - * + * * Revision 1.36 1999/04/08 14:00:24 gklug * add:Port struct field PLinkResCt - * + * * Revision 1.35 1999/03/25 07:43:07 malthoff * Add error string for SKERR_HWI_E018MSG. - * + * * Revision 1.34 1999/03/12 16:25:57 malthoff * Remove PPollRxD and PPollTxD. * Add SKERR_HWI_E017MSG. and SK_DPOLL_MAX. - * + * * Revision 1.33 1999/03/12 13:34:41 malthoff * Add Autonegotiation error codes. * Change defines for parameter Mode in SkXmSetRxCmd(). * Replace __STDC__ by SK_KR_PROTO. - * + * * Revision 1.32 1999/01/25 14:40:20 mhaveman * Added new return states for the virtual management port if multiple * ports are active but differently configured. - * + * * Revision 1.31 1998/12/11 15:17:02 gklug - * add: Link partnet autoneg states : Unknown Manual and Autonegotiation - * + * add: Link partnet autoneg states : Unknown Manual and Auto-negotiation + * * Revision 1.30 1998/12/07 12:17:04 gklug - * add: Link Partner autonegotiation flag - * + * add: Link Partner auto-negotiation flag + * * Revision 1.29 1998/12/01 10:54:42 gklug * add: variables for XMAC Errata - * + * * Revision 1.28 1998/12/01 10:14:15 gklug * add: PIsave saves the Interrupt status word - * + * * Revision 1.27 1998/11/26 15:24:52 mhaveman * Added link status states SK_LMODE_STAT_AUTOHALF and * SK_LMODE_STAT_AUTOFULL which are used by PNMI. - * + * * Revision 1.26 1998/11/26 14:53:01 gklug * add:autoNeg Timeout variable - * + * * Revision 1.25 1998/11/26 08:58:50 gklug * add: Link Mode configuration (AUTO Sense mode) - * + * * Revision 1.24 1998/11/24 13:30:27 gklug * add: PCheckPar to port struct - * + * * Revision 1.23 1998/11/18 13:23:26 malthoff * Add SK_PKT_TO_MAX. - * + * * Revision 1.22 1998/11/18 13:19:54 gklug * add: PPrevShorts and PLinkBroken to port struct for WA XMAC Errata #C1 * * Revision 1.21 1998/10/26 08:02:57 malthoff * Add GIRamOffs. - * + * * Revision 1.20 1998/10/19 07:28:37 malthoff * Add prototype for SkGeInitRamIface(). - * + * * Revision 1.19 1998/10/14 14:47:48 malthoff * SK_TIMER should not be defined for Diagnostics. * Add SKERR_HWI_E015MSG and SKERR_HWI_E016MSG. - * + * * Revision 1.18 1998/10/14 14:00:03 gklug * add: timer to port struct for workaround of Errata #2 - * + * * Revision 1.17 1998/10/14 11:23:09 malthoff * Add prototype for SkXmAutoNegDone(). * Fix SkXmSetRxCmd() prototype statement. * * Revision 1.16 1998/10/14 05:42:29 gklug * add: HWLinkUp flag to Port struct - * + * * Revision 1.15 1998/10/09 08:26:33 malthoff * Rename SK_RB_ULPP_B to SK_RB_LLPP_B. - * + * * Revision 1.14 1998/10/09 07:11:13 malthoff * bug fix: SK_FACT_53 is 85 not 117. * Rework time out init values. * Add GIPortUsage and corresponding defines. * Add some error log messages. - * + * * Revision 1.13 1998/10/06 14:13:14 malthoff * Add prototype for SkGeLoadLnkSyncCnt(). * @@ -328,9 +361,11 @@ /* defines ********************************************************************/ +#define SK_TEST_VAL 0x11335577UL + /* modifying Link LED behaviour (used with SkGeLinkLED()) */ #define SK_LNK_OFF LED_OFF -#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) #define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON) #define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON) #define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF) @@ -352,14 +387,17 @@ #define SK_LED_TST 2 /* Counter and Timer constants, for a host clock of 62.5 MHz */ -#define SK_XMIT_DUR 0x002faf08L /* 50 ms */ -#define SK_BLK_DUR 0x01dcd650L /* 500 ms */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ -#define SK_DPOLL_DEF 0x00EE6B28L /* 250 ms */ -#define SK_DPOLL_MAX 0x00FFFFFFL /* ca. 268ms */ +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ #define SK_FACT_62 100 /* is given in percent */ -#define SK_FACT_53 85 +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ /* Timeout values */ #define SK_MAC_TO_53 72 /* MAC arbiter timeout */ @@ -447,9 +485,7 @@ #define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */ #define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */ #define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */ -#define SK_LMODE_INDETERMINATED 7 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LMODE_INDETERMINATED 7 /* indeterminated */ /* Auto-negotiation timeout in 100ms granularity */ #define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */ @@ -465,27 +501,21 @@ #define SK_LSPEED_CAP_10MBPS (1<<1) /* 10 Mbps */ #define SK_LSPEED_CAP_100MBPS (1<<2) /* 100 Mbps */ #define SK_LSPEED_CAP_1000MBPS (1<<3) /* 1000 Mbps */ -#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* indeterminated */ /* Link Speed Parameter */ #define SK_LSPEED_AUTO 1 /* Automatic resolution */ #define SK_LSPEED_10MBPS 2 /* 10 Mbps */ #define SK_LSPEED_100MBPS 3 /* 100 Mbps */ #define SK_LSPEED_1000MBPS 4 /* 1000 Mbps */ -#define SK_LSPEED_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LSPEED_INDETERMINATED 5 /* indeterminated */ /* Link Speed Current State */ #define SK_LSPEED_STAT_UNKNOWN 1 #define SK_LSPEED_STAT_10MBPS 2 #define SK_LSPEED_STAT_100MBPS 3 #define SK_LSPEED_STAT_1000MBPS 4 -#define SK_LSPEED_STAT_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LSPEED_STAT_INDETERMINATED 5 /* Link Capability Parameter */ @@ -493,62 +523,50 @@ #define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */ #define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */ #define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */ -#define SK_LMODE_CAP_INDETERMINATED (1<<4) /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LMODE_CAP_INDETERMINATED (1<<4) /* indeterminated */ /* Link Mode Current State */ #define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */ #define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */ #define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */ -#define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by AutoNeg */ -#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by AutoNeg */ -#define SK_LMODE_STAT_INDETERMINATED 6 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by Auto-Neg */ +#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by Auto-Neg */ +#define SK_LMODE_STAT_INDETERMINATED 6 /* indeterminated */ + /* Flow Control Mode Parameter (and capabilities) */ -#define SK_FLOW_MODE_NONE 1 /* No Flow Control */ +#define SK_FLOW_MODE_NONE 1 /* No Flow-Control */ #define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */ -#define SK_FLOW_MODE_SYMMETRIC 3 /* Both station may send PAUSE */ -#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both station may send PAUSE or +#define SK_FLOW_MODE_SYMMETRIC 3 /* Both stations may send PAUSE */ +#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both stations may send PAUSE or * just the remote station may send PAUSE */ -#define SK_FLOW_MODE_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_FLOW_MODE_INDETERMINATED 5 /* indeterminated */ /* Flow Control Status Parameter */ #define SK_FLOW_STAT_NONE 1 /* No Flow Control */ #define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */ #define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */ #define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */ -#define SK_FLOW_STAT_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_FLOW_STAT_INDETERMINATED 5 /* indeterminated */ + /* Master/Slave Mode Capabilities */ #define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */ #define SK_MS_CAP_MASTER (1<<1) /* This station is master */ #define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */ -#define SK_MS_CAP_INDETERMINATED (1<<3) /* Return value for virtual port if - * multiple ports are differently configured. - */ +#define SK_MS_CAP_INDETERMINATED (1<<3) /* indeterminated */ /* Set Master/Slave Mode Parameter (and capabilities) */ #define SK_MS_MODE_AUTO 1 /* Automatic resolution */ #define SK_MS_MODE_MASTER 2 /* This station is master */ #define SK_MS_MODE_SLAVE 3 /* This station is slave */ -#define SK_MS_MODE_INDETERMINATED 4 /* Return value for virtual port if - * multiple ports are differently - */ +#define SK_MS_MODE_INDETERMINATED 4 /* indeterminated */ /* Master/Slave Status Parameter */ -#define SK_MS_STAT_UNSET 1 /* The MS status is never been determ*/ +#define SK_MS_STAT_UNSET 1 /* The M/S status is not set */ #define SK_MS_STAT_MASTER 2 /* This station is master */ #define SK_MS_STAT_SLAVE 3 /* This station is slave */ -#define SK_MS_STAT_FAULT 4 /* MS resolution failed */ -#define SK_MS_STAT_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently - */ +#define SK_MS_STAT_FAULT 4 /* M/S resolution failed */ +#define SK_MS_STAT_INDETERMINATED 5 /* indeterminated */ /* parameter 'Mode' when calling SkXmSetRxCmd() */ #define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of Rx frames */ @@ -557,8 +575,8 @@ #define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of Rx fr */ #define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */ #define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */ -#define SK_BIG_PK_OK_ON (1<<6) /* Don't set rcvError bit for big fr */ -#define SK_BIG_PK_OK_OFF (1<<7) /* Set rcvError bit for big frames */ +#define SK_BIG_PK_OK_ON (1<<6) /* Don't set Rx Error bit for big frames */ +#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */ #define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */ #define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */ @@ -579,6 +597,11 @@ /* Default receive frame limit for Workaround of XMAC Errata */ #define SK_DEF_RX_WA_LIM SK_CONSTU64(100) +/* values for GILedBlinkCtrl (LED Blink Control) */ +#define SK_ACT_LED_BLINK (1<<0) /* Active LED blinking */ +#define SK_DUP_LED_NORMAL (1<<1) /* Duplex LED normal */ +#define SK_LED_LINK100_ON (1<<2) /* Link 100M LED on */ + /* Link Partner Status */ #define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */ #define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */ @@ -598,10 +621,10 @@ typedef struct s_GeMacFunc { int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, - SK_U16 StatAddr, SK_U32 *pVal); + SK_U16 StatAddr, SK_U32 SK_FAR *pVal); int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, - SK_U16 IStatus, SK_U64 *pVal); + SK_U16 IStatus, SK_U64 SK_FAR *pVal); } SK_GEMACFUNC; /* @@ -612,7 +635,7 @@ SK_TIMER PWaTimer; /* Workaround Timer */ SK_TIMER HalfDupChkTimer; #endif /* SK_DIAG */ - SK_U32 PPrevShorts; /* Previous short Counter checking */ + SK_U32 PPrevShorts; /* Previous Short Counter checking */ SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */ SK_U64 PPrevRx; /* Previous RxOk Counter checking */ SK_U64 PRxLim; /* Previous RxOk Counter checking */ @@ -634,12 +657,13 @@ int PXsQOff; /* Synchronous Tx Queue Address Offset */ int PXaQOff; /* Asynchronous Tx Queue Address Offset */ int PhyType; /* PHY used on this port */ + int PState; /* Port status (reset, stop, init, run) */ SK_U16 PhyId1; /* PHY Id1 on this port */ SK_U16 PhyAddr; /* MDIO/MDC PHY address */ SK_U16 PIsave; /* Saved Interrupt status word */ SK_U16 PSsave; /* Saved PHY status word */ + SK_U16 PGmANegAdv; /* Saved GPhy AutoNegAdvertisment register */ SK_BOOL PHWLinkUp; /* The hardware Link is up (wiring) */ - SK_BOOL PState; /* Is port initialized ? */ SK_BOOL PLinkBroken; /* Is Link broken ? */ SK_BOOL PCheckPar; /* Do we check for parity errors ? */ SK_BOOL HalfDupTimerActive; @@ -656,7 +680,7 @@ SK_U8 PMSCap; /* Master/Slave Capabilities */ SK_U8 PMSMode; /* Master/Slave Mode */ SK_U8 PMSStatus; /* Master/Slave Status */ - SK_U8 PAutoNegFail; /* Auto-negotiation fail flag */ + SK_BOOL PAutoNegFail; /* Auto-negotiation fail flag */ SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */ SK_U8 PCableLen; /* Cable Length */ SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */ @@ -668,24 +692,29 @@ * (has to be included in the adapter context) */ typedef struct s_GeInit { + int GIChipId; /* Chip Identification Number */ + int GIChipRev; /* Chip Revision Number */ SK_U8 GIPciHwRev; /* PCI HW Revision Number */ - SK_U8 GIChipId; /* Chip Identification Number */ - SK_U8 GIChipRev; /* Chip Revision Number */ SK_BOOL GIGenesis; /* Genesis adapter ? */ + SK_BOOL GIYukon; /* YUKON-A1/Bx chip */ + SK_BOOL GIYukonLite; /* YUKON-Lite chip */ SK_BOOL GICopperType; /* Copper Type adapter ? */ SK_BOOL GIPciSlot64; /* 64-bit PCI Slot */ SK_BOOL GIPciClock66; /* 66 MHz PCI Clock */ SK_BOOL GIVauxAvail; /* VAUX available (YUKON) */ SK_BOOL GIYukon32Bit; /* 32-Bit YUKON adapter */ + SK_U16 GILedBlinkCtrl; /* LED Blink Control */ int GIMacsFound; /* Number of MACs found on this adapter */ int GIMacType; /* MAC Type used on this adapter */ int GIHstClkFact; /* Host Clock Factor (62.5 / HstClk * 100) */ - int GIPortUsage; /* Driver Port Usage: SK_RED_LINK/SK_MUL_LINK */ + int GIPortUsage; /* Driver Port Usage */ int GILevel; /* Initialization Level completed */ int GIRamSize; /* The RAM size of the adapter in kB */ - int GIWolOffs; /* WOL Register Offset (HW-Bug in 1st revision) */ + int GIWolOffs; /* WOL Register Offset (HW-Bug in Rev. A) */ SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */ - SK_U32 GIPollTimerVal; /* Descriptor Poll Timer Init Val in clk ticks*/ + SK_U32 GIPollTimerVal; /* Descr. Poll Timer Init Val (HstClk ticks) */ + SK_U32 GIValIrqMask; /* Value for Interrupt Mask */ + SK_U32 GITimeStampCnt; /* Time Stamp High Counter (YUKON only) */ SK_GEPORT GP[SK_MAX_MACS];/* Port Dependent Information */ SK_GEMACFUNC GIFunc; /* MAC depedent functions */ } SK_GEINIT; @@ -714,7 +743,7 @@ #define SKERR_HWI_E010 (SKERR_HWI_E009+1) #define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters" #define SKERR_HWI_E011 (SKERR_HWI_E010+1) -#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size to small" +#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size too small" #define SKERR_HWI_E012 (SKERR_HWI_E011+1) #define SKERR_HWI_E012MSG "SkGeInitPort(): invalid Queue Size specified" #define SKERR_HWI_E013 (SKERR_HWI_E012+1) @@ -738,7 +767,7 @@ #define SKERR_HWI_E022 (SKERR_HWI_E021+1) #define SKERR_HWI_E022MSG "MacStatistic(): illegal statistic base address" #define SKERR_HWI_E023 (SKERR_HWI_E022+1) -#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size to small" +#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size too small" #define SKERR_HWI_E024 (SKERR_HWI_E023+1) #define SKERR_HWI_E024MSG "FATAL: SkGeStopPort() does not terminate (Rx)" #define SKERR_HWI_E025 (SKERR_HWI_E024+1) @@ -911,7 +940,7 @@ SK_IOC IoC, int Port, int Addr, - SK_U16 *pVal); + SK_U16 SK_FAR *pVal); extern void SkXmPhyWrite( SK_AC *pAC, @@ -925,7 +954,7 @@ SK_IOC IoC, int Port, int Addr, - SK_U16 *pVal); + SK_U16 SK_FAR *pVal); extern void SkGmPhyWrite( SK_AC *pAC, @@ -934,20 +963,6 @@ int Addr, SK_U16 Val); -extern void SkGePhyRead( - SK_AC *pAC, - SK_IOC IoC, - int Port, - int Addr, - SK_U16 *pVal); - -extern void SkGePhyWrite( - SK_AC *pAC, - SK_IOC IoC, - int Port, - int Addr, - SK_U16 Val); - extern void SkXmClrExactAddr( SK_AC *pAC, SK_IOC IoC, @@ -986,14 +1001,14 @@ SK_IOC IoC, unsigned int Port, SK_U16 StatAddr, - SK_U32 *pVal); + SK_U32 SK_FAR *pVal); extern int SkGmMacStatistic( SK_AC *pAC, SK_IOC IoC, unsigned int Port, SK_U16 StatAddr, - SK_U32 *pVal); + SK_U32 SK_FAR *pVal); extern int SkXmResetCounter( SK_AC *pAC, @@ -1010,14 +1025,14 @@ SK_IOC IoC, unsigned int Port, SK_U16 IStatus, - SK_U64 *pStatus); + SK_U64 SK_FAR *pStatus); extern int SkGmOverflowStatus( SK_AC *pAC, SK_IOC IoC, unsigned int Port, SK_U16 MacStatus, - SK_U64 *pStatus); + SK_U64 SK_FAR *pStatus); extern int SkGmCableDiagStatus( SK_AC *pAC, @@ -1026,6 +1041,20 @@ SK_BOOL StartTest); #ifdef SK_DIAG +extern void SkGePhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 *pVal); + +extern void SkGePhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + extern void SkMacSetRxCmd( SK_AC *pAC, SK_IOC IoC, @@ -1083,8 +1112,6 @@ extern int SkMacAutoNegDone(); extern void SkMacAutoNegLipaPhy(); extern void SkMacSetRxTxEn(); -extern void SkGePhyRead(); -extern void SkGePhyWrite(); extern void SkXmInitMac(); extern void SkXmPhyRead(); extern void SkXmPhyWrite(); @@ -1106,6 +1133,8 @@ extern int SkGmCableDiagStatus(); #ifdef SK_DIAG +extern void SkGePhyRead(); +extern void SkGePhyWrite(); extern void SkMacSetRxCmd(); extern void SkMacCrcGener(); extern void SkMacTimeStamp(); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgepnm2.h linux.22-ac2/drivers/net/sk98lin/h/skgepnm2.h --- linux.vanilla/drivers/net/sk98lin/h/skgepnm2.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgepnm2.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,16 @@ * * Name: skgepnm2.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.34 $ - * Date: $Date: 2002/12/16 09:05:18 $ + * Version: $Revision: 1.36 $ + * Date: $Date: 2003/05/23 12:45:13 $ * Purpose: Defines for Private Network Management Interface * ****************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,13 @@ * History: * * $Log: skgepnm2.h,v $ + * Revision 1.36 2003/05/23 12:45:13 tschilli + * #ifndef SK_PNMI_HUNDREDS_SEC added to SK_PNMI_HUNDREDS_SEC definition + * to allow own time macro defines. + * + * Revision 1.35 2003/03/27 11:27:48 tschilli + * Copyright messages changed. + * * Revision 1.34 2002/12/16 09:05:18 tschilli * Code for VCT handling added. * @@ -359,11 +367,13 @@ /* * Time macros */ +#ifndef SK_PNMI_HUNDREDS_SEC #if SK_TICKS_PER_SEC == 100 #define SK_PNMI_HUNDREDS_SEC(t) (t) #else #define SK_PNMI_HUNDREDS_SEC(t) (((t) * 100) / (SK_TICKS_PER_SEC)) -#endif +#endif /* !SK_TICKS_PER_SEC */ +#endif /* !SK_PNMI_HUNDREDS_SEC */ /* * Macros to work around alignment problems diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgepnmi.h linux.22-ac2/drivers/net/sk98lin/h/skgepnmi.h --- linux.vanilla/drivers/net/sk98lin/h/skgepnmi.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgepnmi.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,16 @@ * * Name: skgepnmi.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.59 $ - * Date: $Date: 2002/12/16 14:03:50 $ + * Version: $Revision: 1.61 $ + * Date: $Date: 2003/05/23 12:53:52 $ * Purpose: Defines for Private Network Management Interface * ****************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,16 @@ * History: * * $Log: skgepnmi.h,v $ + * Revision 1.61 2003/05/23 12:53:52 tschilli + * Generic PNMI IOCTL subcommands added. + * Function prototype SkPnmiGenIoctl() added. + * OID_SKGE_BOARDLEVEL added. + * Return value SK_PNMI_ERR_NOT_SUPPORTED added. + * Editorial changes. + * + * Revision 1.60 2003/03/27 11:27:26 tschilli + * Copyright messages changed. + * * Revision 1.59 2002/12/16 14:03:50 tschilli * New defines for VCT added. * @@ -284,6 +295,7 @@ #define SK_PNMI_ERR_UNKNOWN_OID 5 #define SK_PNMI_ERR_UNKNOWN_INST 6 #define SK_PNMI_ERR_UNKNOWN_NET 7 +#define SK_PNMI_ERR_NOT_SUPPORTED 10 /* @@ -436,6 +448,8 @@ #define OID_SKGE_SPEED_MODE 0xFF010171 #define OID_SKGE_SPEED_STATUS 0xFF010172 +#define OID_SKGE_BOARDLEVEL 0xFF010180 + #define OID_SKGE_SENSOR_NUMBER 0xFF020100 #define OID_SKGE_SENSOR_INDEX 0xFF020101 #define OID_SKGE_SENSOR_DESCR 0xFF020102 @@ -558,6 +572,11 @@ #define OID_SKGE_VCT_SET 0xFF020201 #define OID_SKGE_VCT_STATUS 0xFF020202 +#ifdef SK_DIAG_SUPPORT +/* Defines for driver DIAG mode. */ +#define OID_SKGE_DIAG_MODE 0xFF020204 +#endif /* SK_DIAG_SUPPORT */ + /* VCT struct to store a backup copy of VCT data after a port reset. */ typedef struct s_PnmiVct { @@ -596,6 +615,17 @@ /* + * Generic PNMI IOCTL subcommand definitions. + */ +#define SK_GET_SINGLE_VAR 1 +#define SK_SET_SINGLE_VAR 2 +#define SK_PRESET_SINGLE_VAR 3 +#define SK_GET_FULL_MIB 4 +#define SK_SET_FULL_MIB 5 +#define SK_PRESET_FULL_MIB 6 + + +/* * Define error numbers and messages for syslog */ #define SK_PNMI_ERR001 (SK_ERRBASE_PNMI + 1) @@ -1095,20 +1125,22 @@ /* * Function prototypes */ -extern int SkPnmiInit(SK_AC *pAc, SK_IOC IoC, int level); -extern int SkPnmiGetVar(SK_AC *pAc, SK_IOC IoC, SK_U32 Id, void* pBuf, +extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); +extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex); -extern int SkPnmiPreSetVar(SK_AC *pAc, SK_IOC IoC, SK_U32 Id, +extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); -extern int SkPnmiSetVar(SK_AC *pAc, SK_IOC IoC, SK_U32 Id, void* pBuf, +extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); -extern int SkPnmiGetStruct(SK_AC *pAc, SK_IOC IoC, void* pBuf, +extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, unsigned int *pLen, SK_U32 NetIndex); -extern int SkPnmiPreSetStruct(SK_AC *pAc, SK_IOC IoC, void* pBuf, +extern int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, unsigned int *pLen, SK_U32 NetIndex); -extern int SkPnmiSetStruct(SK_AC *pAc, SK_IOC IoC, void* pBuf, +extern int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, unsigned int *pLen, SK_U32 NetIndex); -extern int SkPnmiEvent(SK_AC *pAc, SK_IOC IoC, SK_U32 Event, +extern int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Param); +extern int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf, + unsigned int * pLen, SK_U32 NetIndex); #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skgesirq.h linux.22-ac2/drivers/net/sk98lin/h/skgesirq.h --- linux.vanilla/drivers/net/sk98lin/h/skgesirq.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skgesirq.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgesirq.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.26 $ - * Date: $Date: 2002/10/14 09:52:36 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.30 $ + * Date: $Date: 2003/07/04 12:34:13 $ * Purpose: SK specific Gigabit Ethernet special IRQ functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,8 +26,22 @@ * * History: * $Log: skgesirq.h,v $ + * Revision 1.30 2003/07/04 12:34:13 rschmidt + * Added SKERR_SIRQ_E025 for Downshift detected (Yukon-Copper) + * + * Revision 1.29 2003/05/28 15:14:49 rschmidt + * Moved defines for return codes of SkGePortCheckUp() to header file. + * Minor changes to avoid LINT warnings. + * + * Revision 1.28 2003/05/13 17:22:43 mkarl + * Editorial changes. + * + * Revision 1.27 2003/03/31 07:32:34 mkarl + * Corrected Copyright. + * Editorial changes. + * * Revision 1.26 2002/10/14 09:52:36 rschmidt - * Added SKERR_SIRQ_E023 and SKERR_SIRQ_E023 for GPHY (Yukon) + * Added SKERR_SIRQ_E023 and SKERR_SIRQ_E024 for GPHY (Yukon) * Editorial changes * * Revision 1.25 2002/07/15 18:15:52 rwahl @@ -115,10 +130,15 @@ #ifndef _INC_SKGESIRQ_H_ #define _INC_SKGESIRQ_H_ +/* Define return codes of SkGePortCheckUp and CheckShort */ +#define SK_HW_PS_NONE 0 /* No action needed */ +#define SK_HW_PS_RESTART 1 /* Restart needed */ +#define SK_HW_PS_LINK 2 /* Link Up actions needed */ + /* * Define the Event the special IRQ/INI module can handle */ -#define SK_HWEV_WATIM 1 /* Timeout for WA errata #2 XMAC */ +#define SK_HWEV_WATIM 1 /* Timeout for WA Errata #2 XMAC */ #define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */ #define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */ #define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */ @@ -129,10 +149,10 @@ #define SK_HWEV_SET_SPEED 9 /* Set Link Speed by PNMI */ #define SK_HWEV_HALFDUP_CHK 10 /* Half Duplex Hangup Workaround */ -#define SK_WA_ACT_TIME (5000000L) /* 5 sec */ -#define SK_WA_INA_TIME (100000L) /* 100 msec */ +#define SK_WA_ACT_TIME (5000000UL) /* 5 sec */ +#define SK_WA_INA_TIME (100000UL) /* 100 msec */ -#define SK_HALFDUP_CHK_TIME (10000L) /* 10 msec */ +#define SK_HALFDUP_CHK_TIME (10000UL) /* 10 msec */ /* * Define the error numbers and messages @@ -185,6 +205,8 @@ #define SKERR_SIRQ_E023MSG "Auto-negotiation error" #define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1) #define SKERR_SIRQ_E024MSG "FIFO overflow error" +#define SKERR_SIRQ_E025 (SKERR_SIRQ_E024+1) +#define SKERR_SIRQ_E025MSG "2 Pair Downshift detected" extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/ski2c.h linux.22-ac2/drivers/net/sk98lin/h/ski2c.h --- linux.vanilla/drivers/net/sk98lin/h/ski2c.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/ski2c.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,17 +2,15 @@ * * Name: ski2c.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.33 $ - * Date: $Date: 2002/10/14 16:40:50 $ + * Version: $Revision: 1.34 $ + * Date: $Date: 2003/01/28 09:11:21 $ * Purpose: Defines to access Voltage and Temperature Sensor - * (taken from Monalisa (taken from Concentrator)) * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,6 +26,9 @@ * History: * * $Log: ski2c.h,v $ + * Revision 1.34 2003/01/28 09:11:21 rschmidt + * Editorial changes + * * Revision 1.33 2002/10/14 16:40:50 rschmidt * Editorial changes (TWSI) * @@ -163,7 +164,7 @@ #define I2C_READ 0 #define I2C_WRITE 1 #define I2C_BURST 1 -#define I2C_SIGLE 0 +#define I2C_SINGLE 0 #define SKERR_I2C_E001 (SK_ERRBASE_I2C+0) #define SKERR_I2C_E001MSG "Sensor index unknown" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skqueue.h linux.22-ac2/drivers/net/sk98lin/h/skqueue.h --- linux.vanilla/drivers/net/sk98lin/h/skqueue.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skqueue.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: skqueue.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.14 $ - * Date: $Date: 2002/03/15 10:52:13 $ + * Project: Gigabit Ethernet Adapters, Schedule-Modul + * Version: $Revision: 1.15 $ + * Date: $Date: 2003/05/13 17:54:57 $ * Purpose: Defines for the Event queue * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,9 @@ * History: * * $Log: skqueue.h,v $ + * Revision 1.15 2003/05/13 17:54:57 mkarl + * Editorial changes. + * * Revision 1.14 2002/03/15 10:52:13 mkunz * Added event classes for link aggregation * diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skrlmt.h linux.22-ac2/drivers/net/sk98lin/h/skrlmt.h --- linux.vanilla/drivers/net/sk98lin/h/skrlmt.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skrlmt.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,16 @@ * * Name: skrlmt.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.33 $ - * Date: $Date: 2001/07/03 12:16:48 $ + * Version: $Revision: 1.37 $ + * Date: $Date: 2003/04/15 09:43:43 $ * Purpose: Header file for Redundant Link ManagemenT. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,18 @@ * History: * * $Log: skrlmt.h,v $ + * Revision 1.37 2003/04/15 09:43:43 tschilli + * Copyright messages changed. + * + * Revision 1.36 2003/04/14 15:56:22 tschilli + * "#error C++ is not yet supported." removed. + * + * Revision 1.35 2003/01/31 14:12:41 mkunz + * single port adapter runs now with two identical MAC addresses + * + * Revision 1.34 2002/09/23 15:13:41 rwahl + * Editorial changes. + * * Revision 1.33 2001/07/03 12:16:48 mkunz * New Flag ChgBcPrio (Change priority of last broadcast received) * @@ -163,7 +176,6 @@ #define __INC_SKRLMT_H #ifdef __cplusplus -#error C++ is not yet supported. extern "C" { #endif /* cplusplus */ @@ -286,28 +298,33 @@ _PortNum = (SK_U32)(PortNum); \ /* _pAC->Rlmt.Port[_PortNum].PacketsRx++; */ \ _pAC->Rlmt.Port[_PortNum].PacketsPerTimeSlot++; \ - if ((_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_TRANSPARENT) != 0) { \ + if (_pAC->Rlmt.RlmtOff) { \ *(pNumBytes) = 0; \ - } \ - else if (IsBc) { \ - if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode != SK_RLMT_MODE_CLS) { \ - *(pNumBytes) = 6; \ - *(pOffset) = 6; \ - } \ - else { \ - *(pNumBytes) = 0; \ - } \ - } \ - else { \ - if ((PktLen) > SK_RLMT_MAX_TX_BUF_SIZE) { \ - /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ - *(pNumBytes) = 0; \ - } \ - else { \ - *(pNumBytes) = 6; \ - *(pOffset) = 0; \ - } \ - } \ + } \ + else {\ + if ((_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_TRANSPARENT) != 0) { \ + *(pNumBytes) = 0; \ + } \ + else if (IsBc) { \ + if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode != SK_RLMT_MODE_CLS) { \ + *(pNumBytes) = 6; \ + *(pOffset) = 6; \ + } \ + else { \ + *(pNumBytes) = 0; \ + } \ + } \ + else { \ + if ((PktLen) > SK_RLMT_MAX_TX_BUF_SIZE) { \ + /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ + *(pNumBytes) = 0; \ + } \ + else { \ + *(pNumBytes) = 6; \ + *(pOffset) = 0; \ + } \ + } \ + } \ } #if 0 @@ -505,8 +522,10 @@ /* ----- Private part ----- */ SK_BOOL CheckSwitch; - SK_U8 Align01; - SK_U16 Align02; + SK_BOOL RlmtOff; /* set to zero if the Mac addresses + are equal or the second one + is zero */ + SK_U16 Align01; } SK_RLMT; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/sktimer.h linux.22-ac2/drivers/net/sk98lin/h/sktimer.h --- linux.vanilla/drivers/net/sk98lin/h/sktimer.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/sktimer.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: sktimer.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.9 $ - * Date: $Date: 1999/11/22 14:00:29 $ + * Project: Gigabit Ethernet Adapters, Schedule-Modul + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/05/13 17:56:44 $ * Purpose: Defines for the timer functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,9 @@ * History: * * $Log: sktimer.h,v $ + * Revision 1.10 2003/05/13 17:56:44 mkarl + * Editorial changes. + * * Revision 1.9 1999/11/22 14:00:29 cgoos * Changed license header to GPL. * diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/sktypes.h linux.22-ac2/drivers/net/sk98lin/h/sktypes.h --- linux.vanilla/drivers/net/sk98lin/h/sktypes.h 2000-09-15 22:34:19.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/sktypes.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,16 +2,15 @@ * * Name: sktypes.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.2 $ - * Date: $Date: 1999/11/22 14:01:58 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/21 07:26:01 $ * Purpose: Define data types for Linux * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,12 @@ * History: * * $Log: sktypes.h,v $ + * Revision 1.1 2003/07/21 07:26:01 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.3 2003/02/25 14:16:40 mlindner + * Fix: Copyright statement + * * Revision 1.2 1999/11/22 14:01:58 cgoos * Changed license header to GPL. * Now using Linux' fixed size types instead of standard types. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skversion.h linux.22-ac2/drivers/net/sk98lin/h/skversion.h --- linux.vanilla/drivers/net/sk98lin/h/skversion.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skversion.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,16 +2,15 @@ * * Name: version.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.1.2.1 $ - * Date: $Date: 2001/09/05 13:38:30 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/24 09:29:56 $ * Purpose: SK specific Error log support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +25,15 @@ * * History: * $Log: skversion.h,v $ + * Revision 1.1 2003/07/24 09:29:56 rroesler + * Fix: Re-Enter after CVS crash + * + * Revision 1.4 2003/02/25 14:16:40 mlindner + * Fix: Copyright statement + * + * Revision 1.3 2003/02/25 13:30:18 mlindner + * Add: Support for various vendors + * * Revision 1.1.2.1 2001/09/05 13:38:30 mlindner * Removed FILE description * @@ -39,11 +47,11 @@ static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH."; static const char SysKonnectBuildNumber[] = - "@(#)SK-BUILD: 6.02 PL: 01"; + "@(#)SK-BUILD: 6.15 PL: 01"; -#define BOOT_STRING "sk98lin: Network Device Driver v6.02\n" \ - "Copyright (C) 2000-2002 SysKonnect GmbH." +#define BOOT_STRING "sk98lin: Network Device Driver v6.15\n" \ + "(C)Copyright 1999-2003 Marvell(R)." -#define VER_STRING "6.02" +#define VER_STRING "6.15" diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/skvpd.h linux.22-ac2/drivers/net/sk98lin/h/skvpd.h --- linux.vanilla/drivers/net/sk98lin/h/skvpd.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/skvpd.h 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: skvpd.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.13 $ - * Date: $Date: 2002/10/14 15:58:18 $ + * Version: $Revision: 1.15 $ + * Date: $Date: 2003/01/13 10:39:38 $ * Purpose: Defines and Macros for VPD handling * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,14 @@ * History: * * $Log: skvpd.h,v $ + * Revision 1.15 2003/01/13 10:39:38 rschmidt + * Replaced define for PCI device Id for YUKON with GENESIS + * Editorial changes + * + * Revision 1.14 2002/11/14 15:18:10 gheinig + * Added const specifier to key and buf parameters for VpdPara,VpdRead + * and VpdWrite. This is necessary for the Diag 7 GUI API + * * Revision 1.13 2002/10/14 15:58:18 rschmidt * Added entry in rom_size struct s_vpd * Editorial changes @@ -63,7 +71,7 @@ * Changed constants in SK_SWAP_32 to UL. * * Revision 1.4 1998/08/19 08:14:09 gklug - * fix: remove struct keyword as much as possible from the c-code (see CCC) + * fix: remove struct keyword as much as possible from the C-code (see CCC) * * Revision 1.3 1998/08/18 08:18:56 malthoff * Modify VPD in and out macros for SK_DIAG @@ -118,7 +126,7 @@ * Define READ and WRITE Constants. */ -#define VPD_PCI_ID_YUKON 0x4320 +#define VPD_DEV_ID_GENESIS 0x4300 #define VPD_SIZE_YUKON 256 #define VPD_SIZE_GENESIS 512 @@ -249,8 +257,8 @@ extern int VpdSetupPara( SK_AC *pAC, - char *key, - char *buf, + const char *key, + const char *buf, int len, int type, int op); @@ -269,7 +277,7 @@ extern int VpdRead( SK_AC *pAC, SK_IOC IoC, - char *key, + const char *key, char *buf, int *len); @@ -279,8 +287,8 @@ extern int VpdWrite( SK_AC *pAC, SK_IOC IoC, - char *key, - char *buf); + const char *key, + const char *buf); extern int VpdDelete( SK_AC *pAC, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/h/xmac_ii.h linux.22-ac2/drivers/net/sk98lin/h/xmac_ii.h --- linux.vanilla/drivers/net/sk98lin/h/xmac_ii.h 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/h/xmac_ii.h 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: xmac_ii.h - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.45 $ - * Date: $Date: 2002/12/10 14:35:13 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.48 $ + * Date: $Date: 2003/05/13 17:17:55 $ * Purpose: Defines and Macros for Gigabit Ethernet Controller * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,18 @@ * History: * * $Log: xmac_ii.h,v $ + * Revision 1.48 2003/05/13 17:17:55 mkarl + * Editorial changes. + * + * Revision 1.47 2003/03/31 07:37:25 mkarl + * Corrected Copyright. + * Editorial changes. + * + * Revision 1.46 2003/01/28 09:47:45 rschmidt + * Added defines for copper MDI/MDIX configuration + * Added defines for LED Control Register + * Editorial changes + * * Revision 1.45 2002/12/10 14:35:13 rschmidt * Corrected defines for Extended PHY Specific Control * Added defines for Ext. PHY Specific Ctrl 2 Reg. (Fiber specific) @@ -925,9 +938,9 @@ /***** PHY_BCOM_NEPG_LP 16 bit r/o Next Page Link Partner *****/ /***** PHY_LONE_NEPG_LP 16 bit r/o Next Page Link Partner *****/ #define PHY_NP_MORE (1<<15) /* Bit 15: More, Next Pages to follow */ -#define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack 1, for receiving a message*/ +#define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack1, for receiving a message */ #define PHY_NP_MSG_VAL (1<<13) /* Bit 13: Message Page valid */ -#define PHY_NP_ACK2 (1<<12) /* Bit 12: Ack 2, comply with msg content*/ +#define PHY_NP_ACK2 (1<<12) /* Bit 12: Ack2, comply with msg content */ #define PHY_NP_TOG (1<<11) /* Bit 11: Toggle Bit, ensure sync */ #define PHY_NP_MSG 0x07ff /* Bit 10..0: Message from/to Link Partner */ @@ -1118,7 +1131,7 @@ #define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ #define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ #define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ -#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ +#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ #define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ #define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ /* Bit 9..8: reserved */ @@ -1170,8 +1183,8 @@ #define PHY_L_IS_AN_F (1<<13) /* Bit 13: Auto-Negotiation fault */ /* Bit 12: not described */ #define PHY_L_IS_CROSS (1<<11) /* Bit 11: Crossover used */ -#define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used*/ -#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade*/ +#define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used */ +#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade */ #define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */ #define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */ #define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */ @@ -1262,8 +1275,9 @@ * Marvell-Specific */ /***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/ #define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */ - /* Bit 14: reserved */ +#define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */ #define PHY_M_AN_RF BIT_13 /* Remote Fault */ /* Bit 12: reserved */ #define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */ @@ -1309,6 +1323,10 @@ #define PHY_M_PC_POL_R_DIS (1<<1) /* Bit 1: Polarity Reversal Disabled */ #define PHY_M_PC_DIS_JABBER (1<<0) /* Bit 0: Disable Jabber */ +#define PHY_M_PC_MDI_XMODE(x) SHIFT5(x) +#define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */ +#define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */ +#define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */ /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ #define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */ @@ -1366,26 +1384,35 @@ /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ #define PHY_M_LEDC_DIS_LED (1<<15) /* Bit 15: Disable LED */ - -#define PHY_M_LED_BL_RATE(x) SHIFT12(x) /* Bit 12..14: Blink Rate */ - -/* values for PHY_M_LED_BL_RATE() */ -#define BL_DEFAULT 0 /* no pulse stretching */ -#define BL_21MS 1 /* 21 ms to 42ms */ -#define BL_42MS 2 /* 42 ms to 84ms */ -#define BL_84MS 3 /* 84 ms to 170ms */ -#define BL_170MS 4 /* 170 ms to340ms */ -#define BL_340MS 5 /* 340 ms to670ms */ -#define BL_670MS 6 /* 670 ms to 1.3s */ -#define BL_1300MS 7 /* 1.3s to 2.7s */ - +#define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */ #define PHY_M_LEDC_F_INT (1<<11) /* Bit 11: Force Interrupt */ - -#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4..3: Link Control */ +#define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */ + /* Bit 7.. 5: reserved */ +#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */ #define PHY_M_LEDC_DP_CTRL (1<<2) /* Bit 2: Duplex Control */ #define PHY_M_LEDC_RX_CTRL (1<<1) /* Bit 1: Rx activity / Link */ #define PHY_M_LEDC_TX_CTRL (1<<0) /* Bit 0: Tx activity / Link */ +#define PHY_M_LED_PULS_DUR(x) SHIFT12(x) /* Pulse Stretch Duration */ + +#define PULS_NO_STR 0 /* no pulse stretching */ +#define PULS_21MS 1 /* 21 ms to 42 ms */ +#define PULS_42MS 2 /* 42 ms to 84 ms */ +#define PULS_84MS 3 /* 84 ms to 170 ms */ +#define PULS_170MS 4 /* 170 ms to 340 ms */ +#define PULS_340MS 5 /* 340 ms to 670 ms */ +#define PULS_670MS 6 /* 670 ms to 1.3 s */ +#define PULS_1300MS 7 /* 1.3 s to 2.7 s */ + +#define PHY_M_LED_BLINK_RT(x) SHIFT8(x) /* Blink Rate */ + +#define BLINK_42MS 0 /* 42 ms */ +#define BLINK_84MS 1 /* 84 ms */ +#define BLINK_170MS 2 /* 170 ms */ +#define BLINK_340MS 3 /* 340 ms */ +#define BLINK_670MS 4 /* 670 ms */ + /* values 5 - 7: reserved */ + /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ #define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */ #define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */ @@ -1441,7 +1468,7 @@ #define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */ #define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */ #define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */ -#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow Control */ +#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */ #define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */ #define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */ @@ -1582,7 +1609,7 @@ #define GM_GPSR_SPEED (1<<15) /* Bit 15: Port Speed (1 = 100 Mbps) */ #define GM_GPSR_DUPLEX (1<<14) /* Bit 14: Duplex Mode (1 = Full) */ -#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow Control Mode Disabled */ +#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow-Control Mode Disabled */ #define GM_GPSR_LINK_UP (1<<12) /* Bit 12: Link Up Status */ #define GM_GPSR_PAUSE (1<<11) /* Bit 11: Pause State */ #define GM_GPSR_TX_ACTIVE (1<<10) /* Bit 10: Tx in Progress */ @@ -1592,14 +1619,14 @@ #define GM_GPSR_PHY_ST_CH (1<<5) /* Bit 5: PHY Status Change */ #define GM_GPSR_GIG_SPEED (1<<4) /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ #define GM_GPSR_PART_MODE (1<<3) /* Bit 3: Partition mode */ -#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow Control Mode Disabled */ +#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow-Control Mode Disabled */ #define GM_GPSR_PROM_EN (1<<1) /* Bit 1: Promiscuous Mode Enabled */ /* Bit 0: reserved */ /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ /* Bit 15: reserved */ #define GM_GPCR_PROM_ENA (1<<14) /* Bit 14: Enable Promiscuous Mode */ -#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow Control Mode */ +#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow-Control Mode */ #define GM_GPCR_TX_ENA (1<<12) /* Bit 12: Enable Transmit */ #define GM_GPCR_RX_ENA (1<<11) /* Bit 11: Enable Receive */ #define GM_GPCR_BURST_ENA (1<<10) /* Bit 10: Enable Burst Mode */ @@ -1608,11 +1635,11 @@ #define GM_GPCR_GIGS_ENA (1<<7) /* Bit 7: Gigabit Speed (1000 Mbps) */ #define GM_GPCR_FL_PASS (1<<6) /* Bit 6: Force Link Pass */ #define GM_GPCR_DUP_FULL (1<<5) /* Bit 5: Full Duplex Mode */ -#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow Control Mode */ +#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow-Control Mode */ #define GM_GPCR_SPEED_100 (1<<3) /* Bit 3: Port Speed 100 Mbps */ -#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update for Duplex */ -#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update for Flow-c. */ -#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update for Speed */ +#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update Duplex */ +#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update Flow-C. */ +#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update Speed */ #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\ @@ -1642,9 +1669,9 @@ /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ #define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder */ -#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive transmit trials */ -#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Length) */ -#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Length) */ +#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive Tx trials */ +#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Len) */ +#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Len) */ /* Bit 7..5: reserved */ #define GM_SMOD_IPG_MSK 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/Makefile linux.22-ac2/drivers/net/sk98lin/Makefile --- linux.vanilla/drivers/net/sk98lin/Makefile 2001-07-04 19:50:39.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/Makefile 2003-08-13 14:10:39.000000000 +0100 @@ -3,11 +3,32 @@ # Makefile for the SysKonnect SK-98xx device driver. # +# +# Standalone driver params +# SKPARAM += -DSK_KERNEL_24 +# SKPARAM += -DSK_KERNEL_24_26 +# SKPARAM += -DSK_KERNEL_26 +# SKPARAM += -DSK_KERNEL_22_24 + O_TARGET := sk98lin.o -obj-y := skge.o skaddr.o skgehwt.o skgeinit.o skgepnmi.o skgesirq.o \ - ski2c.o sklm80.o skqueue.o skrlmt.o sktimer.o skvpd.o \ - skxmac2.o skproc.o skcsum.o +obj-y := \ + skge.o \ + skdim.o \ + skaddr.o \ + skgehwt.o \ + skgeinit.o \ + skgepnmi.o \ + skgesirq.o \ + ski2c.o \ + sklm80.o \ + skqueue.o \ + skrlmt.o \ + sktimer.o \ + skvpd.o \ + skxmac2.o \ + skproc.o \ + skcsum.o obj-m := $(O_TARGET) # DBGDEF = \ @@ -57,7 +78,7 @@ # SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources # SK_DBGCAT_DRV_EVENT 0x08000000 driver events -EXTRA_CFLAGS += -I. -DSK_USE_CSUM $(DBGDEF) +EXTRA_CFLAGS += -I. -DSK_USE_CSUM -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM) include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skaddr.c linux.22-ac2/drivers/net/sk98lin/skaddr.c --- linux.vanilla/drivers/net/sk98lin/skaddr.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skaddr.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,9 +1,9 @@ /****************************************************************************** * * Name: skaddr.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.47 $ - * Date: $Date: 2002/09/17 06:31:10 $ + * Project: Gigabit Ethernet Adapters, ADDR-Module + * Version: $Revision: 1.52 $ + * Date: $Date: 2003/06/02 13:46:15 $ * Purpose: Manage Addresses (Multicast and Unicast) and Promiscuous Mode. * ******************************************************************************/ @@ -11,6 +11,7 @@ /****************************************************************************** * * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,30 @@ * History: * * $Log: skaddr.c,v $ + * Revision 1.52 2003/06/02 13:46:15 tschilli + * Editorial changes. + * + * Revision 1.51 2003/05/13 17:12:43 mkarl + * Changes for SLIM Driver via SK_SLIM. + * Changes for driver not using RLMT via SK_NO_RLMT. + * Changes for driver not supporting MAC address override via SK_NO_MAO. + * Separeted GENESIS and YUKON only code to reduce code size. + * Editorial changes. + * + * Revision 1.50 2003/05/08 12:29:31 rschmidt + * Replaced all if(GIChipId == CHIP_ID_GENESIS) with new entry GIGenesis. + * Changed initialisation for Next0[SK_MAX_MACS] to avoid + * compiler errors when SK_MAX_MACS=1. + * Editorial changes. + * + * Revision 1.49 2003/04/15 09:30:51 tschilli + * Copyright messages changed. + * "#error C++ is not yet supported." removed. + * + * Revision 1.48 2003/02/12 17:09:37 tschilli + * Fix in SkAddrOverride() to set both (physical and logical) MAC addresses + * in case that both addresses are identical. + * * Revision 1.47 2002/09/17 06:31:10 tschilli * Handling of SK_PROM_MODE_ALL_MC flag in SkAddrGmacMcUpdate() * and SkAddrGmacPromiscuousChange() fixed. @@ -214,15 +239,14 @@ * ******************************************************************************/ -#ifndef lint +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "@(#) $Id: skaddr.c,v 1.47 2002/09/17 06:31:10 tschilli Exp $ (C) SysKonnect."; -#endif /* !defined(lint) */ + "@(#) $Id: skaddr.c,v 1.52 2003/06/02 13:46:15 tschilli Exp $ (C) Marvell."; +#endif /* DEBUG ||!LINT || !SK_SLIM */ #define __SKADDR_C #ifdef __cplusplus -#error C++ is not yet supported. extern "C" { #endif /* cplusplus */ @@ -257,7 +281,7 @@ /* local variables ************************************************************/ #ifdef DEBUG -static int Next0[SK_MAX_MACS] = {0, 0}; +static int Next0[SK_MAX_MACS] = {0}; #endif /* DEBUG */ /* functions ******************************************************************/ @@ -309,7 +333,8 @@ switch (Level) { case SK_INIT_DATA: - SK_MEMSET((char *) &pAC->Addr, 0, sizeof(SK_ADDR)); + SK_MEMSET((char *) &pAC->Addr, (SK_U8) 0, + (SK_U16) sizeof(SK_ADDR)); for (i = 0; i < SK_MAX_MACS; i++) { pAPort = &pAC->Addr.Port[i]; @@ -331,10 +356,12 @@ /* pAC->Addr.InitDone = SK_INIT_DATA; */ break; - case SK_INIT_IO: + case SK_INIT_IO: +#ifndef SK_NO_RLMT for (i = 0; i < SK_MAX_NETS; i++) { pAC->Addr.Net[i].ActivePort = pAC->Rlmt.Net[i].ActivePort; } +#endif /* !SK_NO_RLMT */ #ifdef xDEBUG for (i = 0; i < SK_MAX_MACS; i++) { if (pAC->Addr.Port[i].NextExactMatchRlmt < @@ -418,13 +445,16 @@ /* Set port's current physical MAC address. */ OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; - - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { XM_OUTADDR(IoC, i, XM_SA, OutAddr); } - else { +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { GM_OUTADDR(IoC, i, GM_SRC_ADDR_1L, OutAddr); } +#endif /* YUKON */ #ifdef DEBUG SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, ("SkAddrInit: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", @@ -443,7 +473,7 @@ pAPort->CurrentMacAddress.a[3], pAPort->CurrentMacAddress.a[4], pAPort->CurrentMacAddress.a[5])) -#endif /* DEBUG */ +#endif /* DEBUG */ } /* pAC->Addr.InitDone = SK_INIT_IO; */ break; @@ -469,6 +499,7 @@ } /* SkAddrInit */ +#ifndef SK_SLIM /****************************************************************************** * @@ -504,7 +535,7 @@ return (SK_ADDR_ILLEGAL_PORT); } - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + if (pAC->GIni.GIGenesis) { ReturnCode = SkAddrXmacMcClear(pAC, IoC, PortNumber, Flags); } else { @@ -515,6 +546,9 @@ } /* SkAddrMcClear */ +#endif /* !SK_SLIM */ + +#ifndef SK_SLIM /****************************************************************************** * @@ -568,6 +602,9 @@ } /* SkAddrXmacMcClear */ +#endif /* !SK_SLIM */ + +#ifndef SK_SLIM /****************************************************************************** * @@ -758,7 +795,7 @@ } /* SkGmacMcHash */ -#endif /* not SK_ADDR_CHEAT */ +#endif /* !SK_ADDR_CHEAT */ /****************************************************************************** * @@ -797,7 +834,7 @@ return (SK_ADDR_ILLEGAL_PORT); } - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + if (pAC->GIni.GIGenesis) { ReturnCode = SkAddrXmacMcAdd(pAC, IoC, PortNumber, pMc, Flags); } else { @@ -888,7 +925,7 @@ } else { if (!(pMc->a[0] & SK_MC_BIT)) { - /* Hashing only possible with multicast addresses. */ + /* Hashing only possible with multicast addresses */ return (SK_MC_ILLEGAL_ADDRESS); } #ifndef SK_ADDR_CHEAT @@ -951,7 +988,7 @@ #endif /* !defined(SK_ADDR_CHEAT) */ if (!(pMc->a[0] & SK_MC_BIT)) { - /* Hashing only possible with multicast addresses. */ + /* Hashing only possible with multicast addresses */ return (SK_MC_ILLEGAL_ADDRESS); } @@ -1021,6 +1058,7 @@ } /* SkAddrGmacMcAdd */ +#endif /* !SK_SLIM */ /****************************************************************************** * @@ -1052,23 +1090,29 @@ SK_U32 PortNumber) /* Port Number */ { int ReturnCode; - +#if (!defined(SK_SLIM) || defined(DEBUG)) if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } - - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { +#endif /* !SK_SLIM || DEBUG */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { ReturnCode = SkAddrXmacMcUpdate(pAC, IoC, PortNumber); } - else { +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { ReturnCode = SkAddrGmacMcUpdate(pAC, IoC, PortNumber); } - +#endif /* YUKON */ return (ReturnCode); } /* SkAddrMcUpdate */ +#ifdef GENESIS + /****************************************************************************** * * SkAddrXmacMcUpdate - update the HW MC address table and set the MAC address @@ -1108,7 +1152,7 @@ #ifdef DEBUG SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) -#endif /* DEBUG */ +#endif /* DEBUG */ /* Start with 0 to also program the logical MAC address. */ for (i = 0; i < pAPort->NextExactMatchRlmt; i++) { @@ -1146,7 +1190,7 @@ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } else if (Inexact != 0) { @@ -1154,11 +1198,11 @@ XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAPort->InexactFilter.Bytes[0]); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } else { /* Disable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_FALSE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE); } if (pAPort->PromMode != SK_PROM_MODE_NONE) { @@ -1198,7 +1242,7 @@ pAPort->Exact[i].a[4], pAPort->Exact[i].a[5])) } -#endif /* DEBUG */ +#endif /* DEBUG */ /* Determine return value. */ if (Inexact == 0 && pAPort->PromMode == 0) { @@ -1210,6 +1254,9 @@ } /* SkAddrXmacMcUpdate */ +#endif /* GENESIS */ + +#ifdef YUKON /****************************************************************************** * @@ -1237,8 +1284,10 @@ SK_IOC IoC, /* I/O context */ SK_U32 PortNumber) /* Port Number */ { +#ifndef SK_SLIM SK_U32 i; SK_U8 Inexact; +#endif /* not SK_SLIM */ SK_U16 *OutAddr; SK_ADDR_PORT *pAPort; @@ -1250,8 +1299,9 @@ #ifdef DEBUG SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) -#endif /* DEBUG */ +#endif /* DEBUG */ +#ifndef SK_SLIM for (Inexact = 0, i = 0; i < 8; i++) { Inexact |= pAPort->InexactFilter.Bytes[i]; } @@ -1266,16 +1316,27 @@ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } else { /* Enable Hashing. */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } if (pAPort->PromMode != SK_PROM_MODE_NONE) { (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); } +#else /* SK_SLIM */ + + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + + (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + +#endif /* SK_SLIM */ /* Set port's current physical MAC address. */ OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; @@ -1303,8 +1364,9 @@ pAPort->CurrentMacAddress.a[3], pAPort->CurrentMacAddress.a[4], pAPort->CurrentMacAddress.a[5])) -#endif /* DEBUG */ +#endif /* DEBUG */ +#ifndef SK_SLIM /* Determine return value. */ if (Inexact == 0 && pAPort->PromMode == 0) { return (SK_MC_FILTERING_EXACT); @@ -1312,9 +1374,15 @@ else { return (SK_MC_FILTERING_INEXACT); } +#else /* SK_SLIM */ + return (SK_MC_FILTERING_INEXACT); +#endif /* SK_SLIM */ } /* SkAddrGmacMcUpdate */ +#endif /* YUKON */ + +#ifndef SK_NO_MAO /****************************************************************************** * @@ -1334,23 +1402,29 @@ * SK_ADDR_TOO_EARLY if SK_INIT_IO was not executed before. */ int SkAddrOverride( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* I/O context */ -SK_U32 PortNumber, /* Port Number */ -SK_MAC_ADDR *pNewAddr, /* new MAC address */ -int Flags) /* logical/physical MAC address */ +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR SK_FAR *pNewAddr, /* new MAC address */ +int Flags) /* logical/physical MAC address */ { +#ifndef SK_NO_RLMT SK_EVPARA Para; +#endif /* !SK_NO_RLMT */ SK_U32 NetNumber; SK_U32 i; - SK_U16 *OutAddr; + SK_U16 SK_FAR *OutAddr; +#ifndef SK_NO_RLMT NetNumber = pAC->Rlmt.Port[PortNumber].Net->NetNumber; - +#else + NetNumber = 0; +#endif /* SK_NO_RLMT */ +#if (!defined(SK_SLIM) || defined(DEBUG)) if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } - +#endif /* !SK_SLIM || DEBUG */ if (pNewAddr != NULL && (pNewAddr->a[0] & SK_MC_BIT) != 0) { return (SK_ADDR_MULTICAST_ADDRESS); } @@ -1366,11 +1440,11 @@ return (SK_ADDR_TOO_EARLY); } } - +#ifndef SK_NO_RLMT /* Set PortNumber to number of net's active port. */ PortNumber = pAC->Rlmt.Net[NetNumber]. Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; - +#endif /* !SK_NO_RLMT */ pAC->Addr.Port[PortNumber].Exact[0] = pAC->Addr.Net[NetNumber].CurrentMacAddress; @@ -1385,11 +1459,11 @@ return (SK_ADDR_TOO_EARLY); } } - +#ifndef SK_NO_RLMT /* Set PortNumber to number of net's active port. */ PortNumber = pAC->Rlmt.Net[NetNumber]. Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; - +#endif /* !SK_NO_RLMT */ for (i = 0; i < SK_MAC_ADDR_LEN; i++ ) { pAC->Addr.Port[PortNumber].Exact[0].a[i] = 0; } @@ -1424,19 +1498,24 @@ pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr; /* Change port's physical MAC address. */ - OutAddr = (SK_U16 *) pNewAddr; - - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + OutAddr = (SK_U16 SK_FAR *) pNewAddr; +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); } - else { +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr); } +#endif /* YUKON */ +#ifndef SK_NO_RLMT /* Report address change to RLMT. */ Para.Para32[0] = PortNumber; Para.Para32[0] = -1; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para); +#endif /* !SK_NO_RLMT */ } else { /* Logical MAC address. */ if (SK_ADDR_EQUAL(pNewAddr->a, @@ -1454,11 +1533,33 @@ return (SK_ADDR_DUPLICATE_ADDRESS); } } - + + /* + * In case that the physical and the logical MAC addresses are equal + * we must also change the physical MAC address here. + * In this case we have an adapter which initially was programmed with + * two identical MAC addresses. + */ + if (SK_ADDR_EQUAL(pAC->Addr.Port[PortNumber].CurrentMacAddress.a, + pAC->Addr.Port[PortNumber].Exact[0].a)) { + + pAC->Addr.Port[PortNumber].PreviousMacAddress = + pAC->Addr.Port[PortNumber].CurrentMacAddress; + pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr; + +#ifndef SK_NO_RLMT + /* Report address change to RLMT. */ + Para.Para32[0] = PortNumber; + Para.Para32[0] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para); +#endif /* !SK_NO_RLMT */ + } + +#ifndef SK_NO_RLMT /* Set PortNumber to number of net's active port. */ PortNumber = pAC->Rlmt.Net[NetNumber]. Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; - +#endif /* !SK_NO_RLMT */ pAC->Addr.Net[NetNumber].CurrentMacAddress = *pNewAddr; pAC->Addr.Port[PortNumber].Exact[0] = *pNewAddr; #ifdef DEBUG @@ -1479,9 +1580,9 @@ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[3], pAC->Addr.Net[NetNumber].CurrentMacAddress.a[4], pAC->Addr.Net[NetNumber].CurrentMacAddress.a[5])) -#endif /* DEBUG */ +#endif /* DEBUG */ - /* Write address to first exact match entry of active port. */ + /* Write address to first exact match entry of active port. */ (void) SkAddrMcUpdate(pAC, IoC, PortNumber); } @@ -1490,6 +1591,8 @@ } /* SkAddrOverride */ +#endif /* SK_NO_MAO */ + /****************************************************************************** * * SkAddrPromiscuousChange - set promiscuous mode for given port @@ -1519,22 +1622,30 @@ int NewPromMode) /* new promiscuous mode */ { int ReturnCode; - +#if (!defined(SK_SLIM) || defined(DEBUG)) if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } - - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { - ReturnCode = SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); - } - else { - ReturnCode = SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); +#endif /* !SK_SLIM || DEBUG */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + ReturnCode = + SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); + } +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { + ReturnCode = + SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); } +#endif /* YUKON */ return (ReturnCode); } /* SkAddrPromiscuousChange */ +#ifdef GENESIS /****************************************************************************** * @@ -1613,7 +1724,7 @@ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } else if ((CurPromMode & SK_PROM_MODE_ALL_MC) && !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm MC. */ @@ -1622,7 +1733,7 @@ } if (Inexact == 0) { /* Disable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_FALSE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE); } else { /* Set 64-bit hash register to InexactFilter. */ @@ -1630,25 +1741,28 @@ &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } } if ((NewPromMode & SK_PROM_MODE_LLC) && !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ /* Set the MAC in Promiscuous Mode */ - SkMacPromiscMode(pAC, IoC, PortNumber, SK_TRUE); + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE); } else if ((CurPromMode & SK_PROM_MODE_LLC) && !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC. */ /* Clear Promiscuous Mode */ - SkMacPromiscMode(pAC, IoC, PortNumber, SK_FALSE); + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE); } return (SK_ADDR_SUCCESS); } /* SkAddrXmacPromiscuousChange */ +#endif /* GENESIS */ + +#ifdef YUKON /****************************************************************************** * @@ -1703,7 +1817,7 @@ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); /* Enable Hashing */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } if ((CurPromMode & SK_PROM_MODE_ALL_MC) && @@ -1714,26 +1828,29 @@ &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); /* Enable Hashing. */ - SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); } if ((NewPromMode & SK_PROM_MODE_LLC) && !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ /* Set the MAC to Promiscuous Mode. */ - SkMacPromiscMode(pAC, IoC, PortNumber, SK_TRUE); + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE); } else if ((CurPromMode & SK_PROM_MODE_LLC) && !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC */ /* Clear Promiscuous Mode. */ - SkMacPromiscMode(pAC, IoC, PortNumber, SK_FALSE); + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE); } return (SK_ADDR_SUCCESS); } /* SkAddrGmacPromiscuousChange */ +#endif /* YUKON */ + +#ifndef SK_SLIM /****************************************************************************** * @@ -1805,7 +1922,7 @@ pAC->Addr.Port[FromPortNumber].PromMode = pAC->Addr.Port[ToPortNumber].PromMode; pAC->Addr.Port[ToPortNumber].PromMode = i; - if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + if (pAC->GIni.GIGenesis) { DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt; pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt = pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt; @@ -1845,6 +1962,8 @@ } /* SkAddrSwap */ +#endif /* !SK_SLIM */ + #ifdef __cplusplus } #endif /* __cplusplus */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skcsum.c linux.22-ac2/drivers/net/sk98lin/skcsum.c --- linux.vanilla/drivers/net/sk98lin/skcsum.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skcsum.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: skcsum.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.10 $ - * Date: $Date: 2002/04/11 10:02:04 $ + * Version: $Revision: 1.11 $ + * Date: $Date: 2003/03/11 14:05:55 $ * Purpose: Store/verify Internet checksum in send/receive packets. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,10 @@ * History: * * $Log: skcsum.c,v $ + * Revision 1.11 2003/03/11 14:05:55 rschmidt + * Replaced memset() by macro SK_MEMSET() + * Editorial changes + * * Revision 1.10 2002/04/11 10:02:04 rwahl * Fix in SkCsGetSendInfo(): * - function did not return ProtocolFlags in every case. @@ -73,9 +77,8 @@ #ifdef SK_USE_CSUM /* Check if CSUM is to be used. */ #ifndef lint -static const char SysKonnectFileId[] = "@(#)" - "$Id: skcsum.c,v 1.10 2002/04/11 10:02:04 rwahl Exp $" - " (C) SysKonnect."; +static const char SysKonnectFileId[] = + "@(#) $Id: skcsum.c,v 1.11 2003/03/11 14:05:55 rschmidt Exp $ (C) SysKonnect."; #endif /* !lint */ /****************************************************************************** @@ -107,8 +110,8 @@ * * "h/skdrv1st.h" * "h/skcsum.h" - * "h/sktypes.h" - * "h/skqueue.h" + * "h/sktypes.h" + * "h/skqueue.h" * "h/skdrv2nd.h" * ******************************************************************************/ @@ -173,7 +176,7 @@ * little/big endian conversion on little endian machines only. */ #ifdef SK_LITTLE_ENDIAN -#define SKCS_HTON16(Val16) (((unsigned) (Val16) >> 8) | (((Val16) & 0xFF) << 8)) +#define SKCS_HTON16(Val16) (((unsigned) (Val16) >> 8) | (((Val16) & 0xff) << 8)) #endif /* SK_LITTLE_ENDIAN */ #ifdef SK_BIG_ENDIAN #define SKCS_HTON16(Val16) (Val16) @@ -204,7 +207,7 @@ * zero.) * * Note: - * There is a bug in the ASIC which may lead to wrong checksums. + * There is a bug in the GENESIS ASIC which may lead to wrong checksums. * * Arguments: * pAc - A pointer to the adapter context struct. @@ -603,7 +606,7 @@ NextLevelProtocol = *(SK_U8 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL); - if (IpHeaderChecksum != 0xFFFF) { + if (IpHeaderChecksum != 0xffff) { pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxErrCts++; /* the NDIS tester wants to know the upper level protocol too */ if (NextLevelProtocol == SKCS_PROTO_ID_TCP) { @@ -721,7 +724,7 @@ /* Check if the TCP/UDP checksum is ok. */ - if ((unsigned) NextLevelProtocolChecksum == 0xFFFF) { + if ((unsigned) NextLevelProtocolChecksum == 0xffff) { /* TCP/UDP checksum ok. */ @@ -903,12 +906,12 @@ NetNumber = (int)Param.Para32[0]; if (ProtoIndex < 0) { /* Clear for all protocols. */ if (NetNumber >= 0) { - memset(&pAc->Csum.ProtoStats[NetNumber][0], 0, + SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][0], 0, sizeof(pAc->Csum.ProtoStats[NetNumber])); } } else { /* Clear for individual protocol. */ - memset(&pAc->Csum.ProtoStats[NetNumber][ProtoIndex], 0, + SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][ProtoIndex], 0, sizeof(pAc->Csum.ProtoStats[NetNumber][ProtoIndex])); } break; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skdim.c linux.22-ac2/drivers/net/sk98lin/skdim.c --- linux.vanilla/drivers/net/sk98lin/skdim.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skdim.c 2003-08-13 14:10:39.000000000 +0100 @@ -0,0 +1,728 @@ +/****************************************************************************** + * + * Name: skdim.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/18 13:39:55 $ + * Purpose: All functions to maintain interrupt moderation + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * History: + * + * $Log: skdim.c,v $ + * Revision 1.1 2003/07/18 13:39:55 rroesler + * Fix: Re-enter after CVS crash + * + * Revision 1.4 2003/07/07 09:45:47 rroesler + * Fix: Compiler warnings corrected + * + * Revision 1.3 2003/06/10 09:16:40 rroesler + * Adapt GetCurrentSystemLoad() to NOT access the kernels + * kstat-structure in kernel 2.5/2.6. This must be done + * due to a not exported symbol. Instead of evaluating the + * SystemLoad directly, the nbr of interrupts is used as + * a rough basis for the load. + * + * + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This module is intended to manage the dynamic interrupt moderation on both + * GEnesis and Yukon adapters. + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * "skdrv2nd.h" + * + ******************************************************************************/ + +#ifndef lint +static const char SysKonnectFileId[] = + "@(#) $Id: skdim.c,v 1.1 2003/07/18 13:39:55 rroesler Exp $ (C) SysKonnect."; +#endif + +#define __SKADDR_C + +#ifdef __cplusplus +#error C++ is not yet supported. +extern "C" { +#endif + +/******************************************************************************* +** +** Includes +** +*******************************************************************************/ + +#ifndef __INC_SKDRV1ST_H +#include "h/skdrv1st.h" +#endif + +#ifndef __INC_SKDRV2ND_H +#include "h/skdrv2nd.h" +#endif + +#include + +/******************************************************************************* +** +** Defines +** +*******************************************************************************/ + +/******************************************************************************* +** +** Typedefs +** +*******************************************************************************/ + +/******************************************************************************* +** +** Local function prototypes +** +*******************************************************************************/ + +static unsigned int GetCurrentSystemLoad(SK_AC *pAC); +static SK_U64 GetIsrCalls(SK_AC *pAC); +static SK_BOOL IsIntModEnabled(SK_AC *pAC); +static void SetCurrIntCtr(SK_AC *pAC); +static void EnableIntMod(SK_AC *pAC); +static void DisableIntMod(SK_AC *pAC); +static void ResizeDimTimerDuration(SK_AC *pAC); +static void DisplaySelectedModerationType(SK_AC *pAC); +static void DisplaySelectedModerationMask(SK_AC *pAC); +static void DisplayDescrRatio(SK_AC *pAC); + +/******************************************************************************* +** +** Global variables +** +*******************************************************************************/ + +/******************************************************************************* +** +** Local variables +** +*******************************************************************************/ + +/******************************************************************************* +** +** Global functions +** +*******************************************************************************/ + +/******************************************************************************* +** Function : SkDimModerate +** Description : Called in every ISR to check if moderation is to be applied +** or not for the current number of interrupts +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimModerate(SK_AC *pAC) { + unsigned int CurrSysLoad = 0; /* expressed in percent */ + unsigned int LoadIncrease = 0; /* expressed in percent */ + SK_U64 ThresholdInts = 0; + SK_U64 IsrCallsPerSec = 0; + +#define M_DIMINFO pAC->DynIrqModInfo + + if (!IsIntModEnabled(pAC)) { + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + CurrSysLoad = GetCurrentSystemLoad(pAC); + if (CurrSysLoad > 75) { + /* + ** More than 75% total system load! Enable the moderation + ** to shield the system against too many interrupts. + */ + EnableIntMod(pAC); + } else if (CurrSysLoad > M_DIMINFO.PrevSysLoad) { + LoadIncrease = (CurrSysLoad - M_DIMINFO.PrevSysLoad); + if (LoadIncrease > ((M_DIMINFO.PrevSysLoad * + C_INT_MOD_ENABLE_PERCENTAGE) / 100)) { + if (CurrSysLoad > 10) { + /* + ** More than 50% increase with respect to the + ** previous load of the system. Most likely this + ** is due to our ISR-proc... + */ + EnableIntMod(pAC); + } + } + } else { + /* + ** Neither too much system load at all nor too much increase + ** with respect to the previous system load. Hence, we can leave + ** the ISR-handling like it is without enabling moderation. + */ + } + M_DIMINFO.PrevSysLoad = CurrSysLoad; + } + } else { + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec * + C_INT_MOD_DISABLE_PERCENTAGE) / 100); + IsrCallsPerSec = GetIsrCalls(pAC); + if (IsrCallsPerSec <= ThresholdInts) { + /* + ** The number of interrupts within the last second is + ** lower than the disable_percentage of the desried + ** maxrate. Therefore we can disable the moderation. + */ + DisableIntMod(pAC); + M_DIMINFO.MaxModIntsPerSec = + (M_DIMINFO.MaxModIntsPerSecUpperLimit + + M_DIMINFO.MaxModIntsPerSecLowerLimit) / 2; + } else { + /* + ** The number of interrupts per sec is the same as expected. + ** Evalulate the descriptor-ratio. If it has changed, a resize + ** in the moderation timer might be usefull + */ + if (M_DIMINFO.AutoSizing) { + ResizeDimTimerDuration(pAC); + } + } + } + } + + /* + ** Some information to the log... + */ + if (M_DIMINFO.DisplayStats) { + DisplaySelectedModerationType(pAC); + DisplaySelectedModerationMask(pAC); + DisplayDescrRatio(pAC); + } + + M_DIMINFO.NbrProcessedDescr = 0; + SetCurrIntCtr(pAC); +} + +/******************************************************************************* +** Function : SkDimStartModerationTimer +** Description : Starts the audit-timer for the dynamic interrupt moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimStartModerationTimer(SK_AC *pAC) { + SK_EVPARA EventParam; /* Event struct for timer event */ + + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = SK_DRV_MODERATION_TIMER; + SkTimerStart(pAC, pAC->IoBase, &pAC->DynIrqModInfo.ModTimer, + SK_DRV_MODERATION_TIMER_LENGTH, + SKGE_DRV, SK_DRV_TIMER, EventParam); +} + +/******************************************************************************* +** Function : SkDimEnableModerationIfNeeded +** Description : Either enables or disables moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : This function is called when a particular adapter is opened +** There is no Disable function, because when all interrupts +** might be disable, the moderation timer has no meaning at all +******************************************************************************/ + +void +SkDimEnableModerationIfNeeded(SK_AC *pAC) { + + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_STATIC) { + EnableIntMod(pAC); /* notification print in this function */ + } else if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + SkDimStartModerationTimer(pAC); + if (M_DIMINFO.DisplayStats) { + printk("Dynamic moderation has been enabled\n"); + } + } else { + if (M_DIMINFO.DisplayStats) { + printk("No moderation has been enabled\n"); + } + } +} + +/******************************************************************************* +** Function : SkDimDisplayModerationSettings +** Description : Displays the current settings regaring interrupt moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimDisplayModerationSettings(SK_AC *pAC) { + DisplaySelectedModerationType(pAC); + DisplaySelectedModerationMask(pAC); +} + +/******************************************************************************* +** +** Local functions +** +*******************************************************************************/ + +/******************************************************************************* +** Function : GetCurrentSystemLoad +** Description : Retrieves the current system load of the system. This load +** is evaluated for all processors within the system. +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : unsigned int: load expressed in percentage +** Notes : The possible range being returned is from 0 up to 100. +** Whereas 0 means 'no load at all' and 100 'system fully loaded' +** It is impossible to determine what actually causes the system +** to be in 100%, but maybe that is due to too much interrupts. +*******************************************************************************/ + +static unsigned int +GetCurrentSystemLoad(SK_AC *pAC) { + unsigned long jif = jiffies; + unsigned int UserTime = 0; + unsigned int SystemTime = 0; + unsigned int NiceTime = 0; + unsigned int IdleTime = 0; + unsigned int TotalTime = 0; + unsigned int UsedTime = 0; + unsigned int SystemLoad = 0; + unsigned int NbrCpu = 0; + + for (NbrCpu = 0; NbrCpu < smp_num_cpus; NbrCpu++) { + UserTime = UserTime + kstat.per_cpu_user[NbrCpu]; + NiceTime = NiceTime + kstat.per_cpu_nice[NbrCpu]; + SystemTime = SystemTime + kstat.per_cpu_system[NbrCpu]; + } + + UsedTime = UserTime + NiceTime + SystemTime; + + IdleTime = jif * smp_num_cpus - UsedTime; + TotalTime = UsedTime + IdleTime; + + SystemLoad = ( 100 * (UsedTime - M_DIMINFO.PrevUsedTime) ) / + (TotalTime - M_DIMINFO.PrevTotalTime); + + if (M_DIMINFO.DisplayStats) { + printk("Current system load is: %u\n", SystemLoad); + } + + M_DIMINFO.PrevTotalTime = TotalTime; + M_DIMINFO.PrevUsedTime = UsedTime; + + return (SystemLoad); +} + +/******************************************************************************* +** Function : GetIsrCalls +** Description : Depending on the selected moderation mask, this function will +** return the number of interrupts handled in the previous time- +** frame. This evaluated number is based on the current number +** of interrupts stored in PNMI-context and the previous stored +** interrupts. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : int: the number of interrupts being executed in the last +** timeframe +** Notes : It makes only sense to call this function, when dynamic +** interrupt moderation is applied +*******************************************************************************/ + +static SK_U64 +GetIsrCalls(SK_AC *pAC) { + SK_U64 RxPort0IntDiff = 0; + SK_U64 RxPort1IntDiff = 0; + SK_U64 TxPort0IntDiff = 0; + SK_U64 TxPort1IntDiff = 0; + + if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_TX_ONLY) { + if (pAC->GIni.GIMacsFound == 2) { + TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts - + pAC->DynIrqModInfo.PrevPort1TxIntrCts; + } + TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts - + pAC->DynIrqModInfo.PrevPort0TxIntrCts; + } else if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_RX_ONLY) { + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + } else { + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts - + pAC->DynIrqModInfo.PrevPort1TxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts - + pAC->DynIrqModInfo.PrevPort0TxIntrCts; + } + + return (RxPort0IntDiff + RxPort1IntDiff + TxPort0IntDiff + TxPort1IntDiff); +} + +/******************************************************************************* +** Function : GetRxCalls +** Description : This function will return the number of times a receive inter- +** rupt was processed. This is needed to evaluate any resizing +** factor. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : SK_U64: the number of RX-ints being processed +** Notes : It makes only sense to call this function, when dynamic +** interrupt moderation is applied +*******************************************************************************/ + +static SK_U64 +GetRxCalls(SK_AC *pAC) { + SK_U64 RxPort0IntDiff = 0; + SK_U64 RxPort1IntDiff = 0; + + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + + return (RxPort0IntDiff + RxPort1IntDiff); +} + +/******************************************************************************* +** Function : SetCurrIntCtr +** Description : Will store the current number orf occured interrupts in the +** adapter context. This is needed to evaluated the number of +** interrupts within a current timeframe. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +static void +SetCurrIntCtr(SK_AC *pAC) { + if (pAC->GIni.GIMacsFound == 2) { + pAC->DynIrqModInfo.PrevPort1RxIntrCts = pAC->Pnmi.Port[1].RxIntrCts; + pAC->DynIrqModInfo.PrevPort1TxIntrCts = pAC->Pnmi.Port[1].TxIntrCts; + } + pAC->DynIrqModInfo.PrevPort0RxIntrCts = pAC->Pnmi.Port[0].RxIntrCts; + pAC->DynIrqModInfo.PrevPort0TxIntrCts = pAC->Pnmi.Port[0].TxIntrCts; +} + +/******************************************************************************* +** Function : IsIntModEnabled() +** Description : Retrieves the current value of the interrupts moderation +** command register. Its content determines whether any +** moderation is running or not. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : SK_TRUE : if mod timer running +** SK_FALSE : if no moderation is being performed +** Notes : - +*******************************************************************************/ + +static SK_BOOL +IsIntModEnabled(SK_AC *pAC) { + unsigned long CtrCmd; + + SK_IN32(pAC->IoBase, B2_IRQM_CTRL, &CtrCmd); + if ((CtrCmd & TIM_START) == TIM_START) { + return SK_TRUE; + } else { + return SK_FALSE; + } +} + +/******************************************************************************* +** Function : EnableIntMod() +** Description : Enables the interrupt moderation using the values stored in +** in the pAC->DynIntMod data structure +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : - +** Notes : - +*******************************************************************************/ + +static void +EnableIntMod(SK_AC *pAC) { + unsigned long ModBase; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec; + } else { + ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec; + } + + SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); + SK_OUT32(pAC->IoBase, B2_IRQM_MSK, pAC->DynIrqModInfo.MaskIrqModeration); + SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START); + if (M_DIMINFO.DisplayStats) { + printk("Enabled interrupt moderation (%i ints/sec)\n", + M_DIMINFO.MaxModIntsPerSec); + } +} + +/******************************************************************************* +** Function : DisableIntMod() +** Description : Disbles the interrupt moderation independent of what inter- +** rupts are running or not +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : - +** Notes : - +*******************************************************************************/ + +static void +DisableIntMod(SK_AC *pAC) { + + SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_STOP); + if (M_DIMINFO.DisplayStats) { + printk("Disabled interrupt moderation\n"); + } +} + +/******************************************************************************* +** Function : ResizeDimTimerDuration(); +** Description : Checks the current used descriptor ratio and resizes the +** duration timer (longer/smaller) if possible. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : - +** Notes : There are both maximum and minimum timer duration value. +** This function assumes that interrupt moderation is already +** enabled! +*******************************************************************************/ + +static void +ResizeDimTimerDuration(SK_AC *pAC) { + SK_BOOL IncreaseTimerDuration; + int TotalMaxNbrDescr; + int UsedDescrRatio; + int RatioDiffAbs; + int RatioDiffRel; + int NewMaxModIntsPerSec; + int ModAdjValue; + long ModBase; + + /* + ** Check first if we are allowed to perform any modification + */ + if (IsIntModEnabled(pAC)) { + if (M_DIMINFO.IntModTypeSelect != C_INT_MOD_DYNAMIC) { + return; + } else { + if (M_DIMINFO.ModJustEnabled) { + M_DIMINFO.ModJustEnabled = SK_FALSE; + return; + } + } + } + + /* + ** If we got until here, we have to evaluate the amount of the + ** descriptor ratio change... + */ + TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC); + UsedDescrRatio = (M_DIMINFO.NbrProcessedDescr * 100) / TotalMaxNbrDescr; + + if (UsedDescrRatio > M_DIMINFO.PrevUsedDescrRatio) { + RatioDiffAbs = (UsedDescrRatio - M_DIMINFO.PrevUsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / UsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_FALSE; /* in other words: DECREASE */ + } else if (UsedDescrRatio < M_DIMINFO.PrevUsedDescrRatio) { + RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */ + } else { + RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */ + } + + /* + ** Now we can determine the change in percent + */ + if ((RatioDiffRel >= 0) && (RatioDiffRel <= 5) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else if ((RatioDiffRel > 5) && (RatioDiffRel <= 10) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else if ((RatioDiffRel > 10) && (RatioDiffRel <= 15) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } + + if (IncreaseTimerDuration) { + NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec + + (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100; + } else { + NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec - + (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100; + } + + /* + ** Check if we exceed boundaries... + */ + if ( (NewMaxModIntsPerSec > M_DIMINFO.MaxModIntsPerSecUpperLimit) || + (NewMaxModIntsPerSec < M_DIMINFO.MaxModIntsPerSecLowerLimit)) { + if (M_DIMINFO.DisplayStats) { + printk("Cannot change ModTim from %i to %i ints/sec\n", + M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec); + } + return; + } else { + if (M_DIMINFO.DisplayStats) { + printk("Resized ModTim from %i to %i ints/sec\n", + M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec); + } + } + + M_DIMINFO.MaxModIntsPerSec = NewMaxModIntsPerSec; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec; + } else { + ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec; + } + + /* + ** We do not need to touch any other registers + */ + SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); +} + +/******************************************************************************* +** Function : DisplaySelectedModerationType() +** Description : Displays what type of moderation we have +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplaySelectedModerationType(SK_AC *pAC) { + + if (pAC->DynIrqModInfo.DisplayStats) { + if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) { + printk("Static int moderation runs with %i INTS/sec\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + } else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + if (IsIntModEnabled(pAC)) { + printk("Dynamic int moderation runs with %i INTS/sec\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + } else { + printk("Dynamic int moderation currently not applied\n"); + } + } else { + printk("No interrupt moderation selected!\n"); + } + } +} + +/******************************************************************************* +** Function : DisplaySelectedModerationMask() +** Description : Displays what interrupts are moderated +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplaySelectedModerationMask(SK_AC *pAC) { + + if (pAC->DynIrqModInfo.DisplayStats) { + if (pAC->DynIrqModInfo.IntModTypeSelect != C_INT_MOD_NONE) { + switch (pAC->DynIrqModInfo.MaskIrqModeration) { + case IRQ_MASK_TX_ONLY: + printk("Only Tx-interrupts are moderated\n"); + break; + case IRQ_MASK_RX_ONLY: + printk("Only Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_ONLY: + printk("Only special-interrupts are moderated\n"); + break; + case IRQ_MASK_TX_RX: + printk("Tx- and Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_RX: + printk("Special- and Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_TX: + printk("Special- and Tx-interrupts are moderated\n"); + break; + case IRQ_MASK_RX_TX_SP: + printk("All Rx-, Tx and special-interrupts are moderated\n"); + break; + default: + printk("Don't know what is moderated\n"); + break; + } + } else { + printk("No specific interrupts masked for moderation\n"); + } + } +} + +/******************************************************************************* +** Function : DisplayDescrRatio +** Description : Like the name states... +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplayDescrRatio(SK_AC *pAC) { + int TotalMaxNbrDescr = 0; + + if (pAC->DynIrqModInfo.DisplayStats) { + TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC); + printk("Ratio descriptors: %i/%i\n", + M_DIMINFO.NbrProcessedDescr, TotalMaxNbrDescr); + } +} + +/******************************************************************************* +** +** End of file +** +*******************************************************************************/ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skge.c linux.22-ac2/drivers/net/sk98lin/skge.c --- linux.vanilla/drivers/net/sk98lin/skge.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skge.c 2003-08-13 14:13:06.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: skge.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.43 $ - * Date: $Date: 2002/11/29 08:42:41 $ + * Version: $Revision: 1.5 $ + * Date: $Date: 2003/08/07 12:25:07 $ * Purpose: The main driver source module * ******************************************************************************/ - + /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * Driver for SysKonnect Gigabit Ethernet Server Adapters: * @@ -31,7 +31,7 @@ * SK-9843 (single link 1000Base-SX V2) * SK-9821 (single link 1000Base-T V2) * - * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and + * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and * SysKonnects GEnesis Solaris driver * Author: Christoph Goos (cgoos@syskonnect.de) * Mirko Lindner (mlindner@syskonnect.de) @@ -56,6 +56,93 @@ * History: * * $Log: skge.c,v $ + * Revision 1.5 2003/08/07 12:25:07 mlindner + * Fix: ConType parameter check and error detection + * Fix: Insert various fixes applied to the kernel tree + * + * Revision 1.4 2003/08/07 10:50:21 mlindner + * Add: Speed and HW-Csum support for Yukon Lite chipset + * + * Revision 1.3 2003/08/06 11:24:08 mlindner + * Add: Kernel updates + * + * Revision 1.2 2003/07/21 08:28:47 rroesler + * Fix: Handle padded bytes using skb_put() + * + * Revision 1.63 2003/07/15 09:26:23 rroesler + * Fix: Removed memory leak when sending short padded frames + * + * Revision 1.62 2003/07/09 11:11:16 rroesler + * Fix: Call of ReceiveIrq() performed with parameter SK_FALSE in + * order not to hang the system with multiple spinlocks + * + * Revision 1.61 2003/07/08 07:32:41 rroesler + * Fix: Correct Kernel-version + * + * Revision 1.60 2003/07/07 15:42:30 rroesler + * Fix: Removed function pci_present() for 2.5/2.6 kernels (deprecated) + * Fix: Corrected warning in GetConfiguration() + * + * Revision 1.59 2003/07/07 09:44:32 rroesler + * Add: HW checksumming on kernel 2.5/2.6 + * Add: padding of short frames (<60 bytes) with 0x00 instead of 0xaa + * Add: ConType parameter combining multiple other parameters into one + * Fix: Corrected bugreport #10721 (warning when changing MTU size) + * Fix: Removed obsolete function SetQueueSize() + * Fix: Function ChangeMtuSize() returns new MTU size in kernel 2.5/2.6 + * + * Revision 1.58 2003/06/17 07:14:29 mlindner + * Add: Disable checksum functionality + * Fix: Unload module (Kernel 2.5) + * + * Revision 1.57 2003/06/05 14:55:27 mlindner + * Fix: ProcFS creation (Kernel 2.2.x) + * Fix: ProcFS OWNER (Kernel 2.2.x) + * + * Revision 1.56 2003/06/03 14:34:29 mlindner + * Add: Additions for SK_SLIM + * Fix: SkGeIoctl SK_IOCTL_GEN + * + * Revision 1.55 2003/05/26 13:00:52 mlindner + * Add: Support for Kernel 2.5/2.6 + * Add: Support for new IO-control MIB data structure + * Add: New SkOsGetTime function + * Fix: Race condition with broken LM80 chip + * Fix: Race condition with padded frames + * + * Revision 1.54 2003/04/28 13:07:27 mlindner + * Fix: Delay race condition with some server machines + * + * Revision 1.53 2003/04/28 12:49:49 mlindner + * Fix: Code optimization + * + * Revision 1.52 2003/04/28 12:24:32 mlindner + * Fix: Disabled HW Error IRQ on 32-bit Yukon if sensor IRQ occurs + * + * Revision 1.51 2003/04/16 08:31:14 mlindner + * Fix: Kernel 2.2 compilation + * + * Revision 1.49 2003/04/10 09:08:51 mlindner + * Add: Blink mode verification + * Fix: Checksum calculation + * + * Revision 1.48 2003/03/21 14:48:38 rroesler + * Added code for interrupt moderation + * + * Revision 1.47 2003/03/12 13:56:15 mlindner + * Fix: Mac update during SK_DRV_NET_UP + * + * Revision 1.46 2003/02/25 14:16:36 mlindner + * Fix: Copyright statement + * + * Revision 1.45 2003/02/25 13:25:55 mlindner + * Add: Performance improvements + * Add: Support for various vendors + * Fix: Init function + * + * Revision 1.44 2003/01/09 09:25:26 mlindner + * Fix: Remove useless init_module/cleanup_module forward declarations + * * Revision 1.43 2002/11/29 08:42:41 mlindner * Fix: Boot message * @@ -253,7 +340,7 @@ * Printing "ethX:" before adapter type at adapter init. * * - * 10-Feb-1999 cg Created, based on Linux' acenic.c, 3c59x.c and + * 10-Feb-1999 cg Created, based on Linux' acenic.c, 3c59x.c and * SysKonnects GEnesis Solaris driver * ******************************************************************************/ @@ -262,11 +349,11 @@ * * Possible compiler options (#define xxx / -Dxxx): * - * debugging can be enable by changing SK_DEBUG_CHKMOD and + * debugging can be enable by changing SK_DEBUG_CHKMOD and * SK_DEBUG_CHKCAT in makefile (described there). * ******************************************************************************/ - + /****************************************************************************** * * Description: @@ -330,39 +417,48 @@ ******************************************************************************/ #include "h/skversion.h" + #include #include #include + #include "h/skdrv1st.h" #include "h/skdrv2nd.h" +/******************************************************************************* + * + * Defines + * + ******************************************************************************/ -/* defines ******************************************************************/ /* for debuging on x86 only */ /* #define BREAKPOINT() asm(" int $3"); */ +/* use the transmit hw checksum driver functionality */ +#define USE_SK_TX_CHECKSUM + +/* use the receive hw checksum driver functionality */ +#define USE_SK_RX_CHECKSUM + /* use the scatter-gather functionality with sendfile() */ #define SK_ZEROCOPY /* use of a transmit complete interrupt */ #define USE_TX_COMPLETE -/* use interrupt moderation (for tx complete only) */ -#define USE_INT_MOD -#define INTS_PER_SEC 1800 - /* * threshold for copying small receive frames * set to 0 to avoid copying, set to 9001 to copy all frames */ -#define SK_COPY_THRESHOLD 200 +#define SK_COPY_THRESHOLD 50 /* number of adapters that can be configured via command line params */ #define SK_MAX_CARD_PARAM 16 + /* - * use those defines for a compile-in version of the driver instead + * use those defines for a compile-in version of the driver instead * of command line parameters */ // #define LINK_SPEED_A {"Auto", } @@ -376,23 +472,40 @@ // #define ROLE_A {"Auto", } // #define ROLE_B {"Auto", } // #define PREF_PORT {"A", } +// #define CON_TYPE {"Auto", } // #define RLMT_MODE {"CheckLinkState", } #define DEV_KFREE_SKB(skb) dev_kfree_skb(skb) #define DEV_KFREE_SKB_IRQ(skb) dev_kfree_skb_irq(skb) #define DEV_KFREE_SKB_ANY(skb) dev_kfree_skb_any(skb) -/* function prototypes ******************************************************/ + +/* Set blink mode*/ +#define OEM_CONFIG_VALUE ( SK_ACT_LED_BLINK | \ + SK_DUP_LED_NORMAL | \ + SK_LED_LINK100_ON) + + +/* Isr return value */ +#define SkIsrRetVar void +#define SkIsrRetNone NULL +#define SkIsrRetHandled NULL + + +/******************************************************************************* + * + * Local Function Prototypes + * + ******************************************************************************/ + static void FreeResources(struct SK_NET_DEVICE *dev); static int SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC); static SK_BOOL BoardAllocMem(SK_AC *pAC); static void BoardFreeMem(SK_AC *pAC); static void BoardInitMem(SK_AC *pAC); -static void SetupRing(SK_AC*, void*, uintptr_t, RXD**, RXD**, RXD**, - int*, SK_BOOL); - -static void SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs); -static void SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs); +static void SetupRing(SK_AC*, void*, uintptr_t, RXD**, RXD**, RXD**, int*, SK_BOOL); +static SkIsrRetVar SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs); +static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs); static int SkGeOpen(struct SK_NET_DEVICE *dev); static int SkGeClose(struct SK_NET_DEVICE *dev); static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev); @@ -407,33 +520,42 @@ static void FillRxRing(SK_AC*, RX_PORT*); static SK_BOOL FillRxDescriptor(SK_AC*, RX_PORT*); static void ReceiveIrq(SK_AC*, RX_PORT*, SK_BOOL); -static void ClearAndStartRx(SK_AC*, int); +static void ClearAndStartRx(SK_AC*, int); static void ClearTxIrq(SK_AC*, int, int); static void ClearRxRing(SK_AC*, RX_PORT*); static void ClearTxRing(SK_AC*, TX_PORT*); -static void SetQueueSizes(SK_AC *pAC); static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int new_mtu); static void PortReInitBmu(SK_AC*, int); static int SkGeIocMib(DEV_NET*, unsigned int, int); +static void StartDrvCleanupTimer(SK_AC *pAC); +static void StopDrvCleanupTimer(SK_AC *pAC); static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*); -/*Extern */ +/******************************************************************************* + * + * Extern Function Prototypes + * + ******************************************************************************/ -/* external Proc function */ -extern int proc_read( - char *buffer, - char **buffer_location, - off_t offset, - int buffer_length, - int *eof, - void *data); +static const char SK_Root_Dir_entry[] = "sk98lin"; +static struct proc_dir_entry *pSkRootDir; +extern int sk_proc_read( char *buffer, + char **buffer_location, + off_t offset, + int buffer_length, + int *eof, + void *data); + +extern void SkDimEnableModerationIfNeeded(SK_AC *pAC); +extern void SkDimDisplayModerationSettings(SK_AC *pAC); +extern void SkDimStartModerationTimer(SK_AC *pAC); +extern void SkDimModerate(SK_AC *pAC); #ifdef DEBUG static void DumpMsg(struct sk_buff*, char*); static void DumpData(char*, int); static void DumpLong(char*, int); #endif -void dump_frag( SK_U8 *data, int length); /* global variables *********************************************************/ static const char *BootString = BOOT_STRING; @@ -445,13 +567,9 @@ static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480}; -/* local variables **********************************************************/ -const char SK_Root_Dir_entry[8]; - static struct proc_dir_entry *pSkRootDir; - /***************************************************************************** * * skge_probe - find all SK-98xx adapters @@ -469,6 +587,7 @@ { int proc_root_initialized = 0; int boards_found = 0; + int vendor_flag = SK_FALSE; SK_AC *pAC; DEV_NET *pNet = NULL; struct proc_dir_entry *pProcFile; @@ -485,17 +604,14 @@ if (!pci_present()) /* is PCI support present? */ return -ENODEV; - while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) - { + while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) { dev = NULL; pNet = NULL; - if ((pdev->vendor != PCI_VENDOR_ID_SYSKONNECT) && - ((pdev->device != PCI_DEVICE_ID_SYSKONNECT_GE) || - (pdev->device != PCI_DEVICE_ID_SYSKONNECT_YU))){ + SK_PCI_ISCOMPLIANT(vendor_flag, pdev); + if (!vendor_flag) continue; - } /* Configure DMA attributes. */ if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) && @@ -556,12 +672,15 @@ dev->flags &= ~IFF_RUNNING; #ifdef SK_ZEROCOPY - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { +#ifdef USE_SK_TX_CHECKSUM + + if (pAC->ChipsetType) { /* Use only if yukon hardware */ /* SK and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } #endif +#endif /* * Dummy value. @@ -618,10 +737,10 @@ if(!proc_root_initialized) { pSkRootDir = create_proc_entry(SK_Root_Dir_entry, S_IFDIR | S_IWUSR | S_IRUGO | S_IXUGO, proc_net); + pSkRootDir->owner = THIS_MODULE; proc_root_initialized = 1; } - pSkRootDir->owner = THIS_MODULE; } @@ -632,21 +751,25 @@ pSkRootDir); - pProcFile->read_proc = proc_read; + pProcFile->read_proc = sk_proc_read; pProcFile->write_proc = NULL; pProcFile->nlink = 1; pProcFile->size = sizeof(dev->name + 1); pProcFile->data = (void *)pProcFile; + pProcFile->owner = THIS_MODULE; pNet->PortNr = 0; pNet->NetNr = 0; + #ifdef SK_ZEROCOPY - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { +#ifdef USE_SK_TX_CHECKSUM + if (pAC->ChipsetType) { /* SG and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } #endif +#endif boards_found++; @@ -677,22 +800,25 @@ dev->flags &= ~IFF_RUNNING; #ifdef SK_ZEROCOPY - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { +#ifdef USE_SK_TX_CHECKSUM + if (pAC->ChipsetType) { /* SG and ZEROCOPY - fly baby... */ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } #endif +#endif pProcFile = create_proc_entry(dev->name, S_IFREG | S_IXUSR | S_IWGRP | S_IROTH, pSkRootDir); - pProcFile->read_proc = proc_read; + pProcFile->read_proc = sk_proc_read; pProcFile->write_proc = NULL; pProcFile->nlink = 1; pProcFile->size = sizeof(dev->name + 1); pProcFile->data = (void *)pProcFile; + pProcFile->owner = THIS_MODULE; memcpy((caddr_t) &dev->dev_addr, (caddr_t) &pAC->Addr.Net[1].CurrentMacAddress, 6); @@ -771,13 +897,20 @@ MODULE_PARM(DupCap_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(FlowCtrl_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(FlowCtrl_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); -MODULE_PARM(Role_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); -MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Role_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(ConType, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(PrefPort, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(RlmtMode, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); /* not used, just there because every driver should have them: */ MODULE_PARM(options, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i"); MODULE_PARM(debug, "i"); +/* used for interrupt moderation */ +MODULE_PARM(IntsPerSec, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i"); +MODULE_PARM(Moderation, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Stats, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(ModerationMask, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(AutoSizing, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); #ifdef LINK_SPEED_A @@ -840,6 +973,12 @@ static char *Role_B[SK_MAX_CARD_PARAM] = {"", }; #endif +#ifdef CON_TYPE +static char *ConType[SK_MAX_CARD_PARAM] = CON_TYPE; +#else +static char *ConType[SK_MAX_CARD_PARAM] = {"", }; +#endif + #ifdef PREF_PORT static char *PrefPort[SK_MAX_CARD_PARAM] = PREF_PORT; #else @@ -855,6 +994,12 @@ static int debug = 0; /* not used */ static int options[SK_MAX_CARD_PARAM] = {0, }; /* not used */ +static int IntsPerSec[SK_MAX_CARD_PARAM]; +static char *Moderation[SK_MAX_CARD_PARAM]; +static char *ModerationMask[SK_MAX_CARD_PARAM]; +static char *AutoSizing[SK_MAX_CARD_PARAM]; +static char *Stats[SK_MAX_CARD_PARAM]; + /***************************************************************************** * @@ -878,7 +1023,7 @@ cards = skge_probe(); if (cards == 0) { - printk("No adapter found.\n"); + printk("sk98lin: No adapter found.\n"); } return cards ? 0 : -ENODEV; } /* skge_init_module */ @@ -910,7 +1055,7 @@ netif_stop_queue(SkGeRootDev); SkGeYellowLED(pAC, pAC->IoBase, 0); - if(pAC->BoardLevel == 2) { + if(pAC->BoardLevel == SK_INIT_RUN) { /* board is still alive */ spin_lock_irqsave(&pAC->SlowPathLock, Flags); EvPara.Para32[0] = 0; @@ -922,16 +1067,16 @@ SkEventDispatcher(pAC, pAC->IoBase); /* disable interrupts */ SK_OUT32(pAC->IoBase, B0_IMSK, 0); - SkGeDeInit(pAC, pAC->IoBase); + SkGeDeInit(pAC, pAC->IoBase); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); - pAC->BoardLevel = 0; + pAC->BoardLevel = SK_INIT_DATA; /* We do NOT check here, if IRQ was pending, of course*/ } - if(pAC->BoardLevel == 1) { + if(pAC->BoardLevel == SK_INIT_IO) { /* board is still alive */ - SkGeDeInit(pAC, pAC->IoBase); - pAC->BoardLevel = 0; + SkGeDeInit(pAC, pAC->IoBase); + pAC->BoardLevel = SK_INIT_DATA; } if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2){ @@ -942,7 +1087,7 @@ FreeResources(SkGeRootDev); SkGeRootDev->get_stats = NULL; - /* + /* * otherwise unregister_netdev calls get_stats with * invalid IO ... :-( */ @@ -960,6 +1105,7 @@ module_init(skge_init_module); module_exit(skge_cleanup_module); + /***************************************************************************** * * SkGeBoardInit - do level 0 and 1 initialization @@ -1002,20 +1148,20 @@ spin_lock_irqsave(&pAC->SlowPathLock, Flags); /* Does a RESET on board ...*/ - if (SkGeInit(pAC, pAC->IoBase, 0) != 0) { + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) { printk("HWInit (0) failed.\n"); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); return(-EAGAIN); } - SkI2cInit( pAC, pAC->IoBase, 0); - SkEventInit(pAC, pAC->IoBase, 0); - SkPnmiInit( pAC, pAC->IoBase, 0); - SkAddrInit( pAC, pAC->IoBase, 0); - SkRlmtInit( pAC, pAC->IoBase, 0); - SkTimerInit(pAC, pAC->IoBase, 0); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_DATA); - pAC->BoardLevel = 0; - pAC->RxBufSize = ETH_BUF_SIZE; + pAC->BoardLevel = SK_INIT_DATA; + pAC->RxBufSize = ETH_BUF_SIZE; SK_PNMI_SET_DRIVER_DESCR(pAC, DescrString); SK_PNMI_SET_DRIVER_VER(pAC, VerStr); @@ -1024,24 +1170,31 @@ /* level 1 init common modules here (HW init) */ spin_lock_irqsave(&pAC->SlowPathLock, Flags); - if (SkGeInit(pAC, pAC->IoBase, 1) != 0) { + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) { printk("HWInit (1) failed.\n"); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); return(-EAGAIN); } - SkI2cInit( pAC, pAC->IoBase, 1); - SkEventInit(pAC, pAC->IoBase, 1); - SkPnmiInit( pAC, pAC->IoBase, 1); - SkAddrInit( pAC, pAC->IoBase, 1); - SkRlmtInit( pAC, pAC->IoBase, 1); - SkTimerInit(pAC, pAC->IoBase, 1); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit(pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); + + /* Set chipset type support */ + pAC->ChipsetType = 0; + if ((pAC->GIni.GIChipId == CHIP_ID_YUKON) || + (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE)) { + pAC->ChipsetType = 1; + } GetConfiguration(pAC); if (pAC->RlmtNets == 2) { pAC->GIni.GIPortUsage = SK_MUL_LINK; } - pAC->BoardLevel = 1; + pAC->BoardLevel = SK_INIT_IO; spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); if (pAC->GIni.GIMacsFound == 2) { @@ -1074,9 +1227,6 @@ pAC->CsOfs = (pAC->CsOfs2 << 16) | pAC->CsOfs1; BoardInitMem(pAC); -#if 0 - SetQueueSizes(pAC); -#else /* tschilling: New common function with minimum size check. */ DualNet = SK_FALSE; if (pAC->RlmtNets == 2) { @@ -1091,7 +1241,6 @@ printk("SkGeInitAssignRamToQueues failed.\n"); return(-EAGAIN); } -#endif /* Print adapter specific string from vpd */ ProductStr(pAC); @@ -1101,9 +1250,9 @@ printk(" PrefPort:%c RlmtMode:%s\n", 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber, (pAC->RlmtMode==0) ? "Check Link State" : - ((pAC->RlmtMode==1) ? "Check Link State" : - ((pAC->RlmtMode==3) ? "Check Local Port" : - ((pAC->RlmtMode==7) ? "Check Segmentation" : + ((pAC->RlmtMode==1) ? "Check Link State" : + ((pAC->RlmtMode==3) ? "Check Local Port" : + ((pAC->RlmtMode==7) ? "Check Segmentation" : ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error"))))); SkGeYellowLED(pAC, pAC->IoBase, 1); @@ -1306,7 +1455,7 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("Descriptor size: %d Descriptor Number: %d\n", DescrSize,DescrNum)); - + pDescr = (RXD*) pMemArea; pPrevDescr = NULL; pNextDescr = (RXD*) (((char*)pDescr) + DescrSize); @@ -1353,24 +1502,22 @@ ("PortReInitBmu ")); /* set address of first descriptor of ring in BMU */ - SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ - TX_Q_CUR_DESCR_LOW, + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_L, (uint32_t)(((caddr_t) (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) & 0xFFFFFFFF)); - SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ - TX_Q_DESCR_HIGH, + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_H, (uint32_t)(((caddr_t) (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) >> 32)); - SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_CUR_DESCR_LOW, + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_L, (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - pAC->RxPort[PortIndex].pRxDescrRing + pAC->RxPort[PortIndex].VRxDescrRing) & 0xFFFFFFFF)); - SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_DESCR_HIGH, + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_H, (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - pAC->RxPort[PortIndex].pRxDescrRing + pAC->RxPort[PortIndex].VRxDescrRing) >> 32)); @@ -1389,7 +1536,7 @@ * Returns: N/A * */ -static void SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs) +static SkIsrRetVar SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs) { struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; DEV_NET *pNet; @@ -1409,20 +1556,20 @@ while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { #if 0 /* software irq currently not used */ - if (IntSrc & IRQ_SW) { + if (IntSrc & IS_IRQ_SW) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("Software IRQ\n")); } #endif - if (IntSrc & IRQ_EOF_RX1) { + if (IntSrc & IS_R1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); SK_PNMI_CNT_RX_INTR(pAC, 0); } - if (IntSrc & IRQ_EOF_RX2) { + if (IntSrc & IS_R2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX2 IRQ\n")); @@ -1430,7 +1577,7 @@ SK_PNMI_CNT_RX_INTR(pAC, 1); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) { + if (IntSrc & IS_XA1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); @@ -1439,7 +1586,7 @@ FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); } - if (IntSrc & IRQ_EOF_AS_TX2) { + if (IntSrc & IS_XA2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX2 IRQ\n")); @@ -1449,7 +1596,7 @@ spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); } #if 0 /* only if sync. queues used */ - if (IntSrc & IRQ_EOF_SY_TX1) { + if (IntSrc & IS_XS1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); @@ -1459,7 +1606,7 @@ spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); ClearTxIrq(pAC, 0, TX_PRIO_HIGH); } - if (IntSrc & IRQ_EOF_SY_TX2) { + if (IntSrc & IS_XS2_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX2 IRQ\n")); @@ -1473,19 +1620,20 @@ #endif /* do all IO at once */ - if (IntSrc & IRQ_EOF_RX1) + if (IntSrc & IS_R1_F) ClearAndStartRx(pAC, 0); - if (IntSrc & IRQ_EOF_RX2) + if (IntSrc & IS_R2_F) ClearAndStartRx(pAC, 1); #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) + if (IntSrc & IS_XA1_F) ClearTxIrq(pAC, 0, TX_PRIO_LOW); - if (IntSrc & IRQ_EOF_AS_TX2) + if (IntSrc & IS_XA2_F) ClearTxIrq(pAC, 1, TX_PRIO_LOW); #endif SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); } /* while (IntSrc & IRQ_MASK != 0) */ + IntSrc &= pAC->GIni.GIValIrqMask; if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("SPECIAL IRQ DP-Cards => %x\n", IntSrc)); @@ -1498,13 +1646,17 @@ spin_unlock(&pAC->SlowPathLock); } /* - * do it all again is case we cleared an interrupt that + * do it all again is case we cleared an interrupt that * came in after handling the ring (OUTs may be delayed * in hardware buffers, but are through after IN) - */ - + * + * rroesler: has been commented out and shifted to + * SkGeDrvEvent(), because it is timer + * guarded now + * ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + */ if (pAC->CheckQueue) { pAC->CheckQueue = SK_FALSE; @@ -1513,11 +1665,10 @@ spin_unlock(&pAC->SlowPathLock); } - /* IRQ is processed - Enable IRQs again*/ - SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); - return; + return; } /* SkGeIsr */ @@ -1534,7 +1685,7 @@ * Returns: N/A * */ -static void SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs) +static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs) { struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; DEV_NET *pNet; @@ -1554,13 +1705,13 @@ while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { #if 0 /* software irq currently not used */ - if (IntSrc & IRQ_SW) { + if (IntSrc & IS_IRQ_SW) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("Software IRQ\n")); } #endif - if (IntSrc & IRQ_EOF_RX1) { + if (IntSrc & IS_R1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); @@ -1568,7 +1719,7 @@ SK_PNMI_CNT_RX_INTR(pAC, 0); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) { + if (IntSrc & IS_XA1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); @@ -1578,7 +1729,7 @@ spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); } #if 0 /* only if sync. queues used */ - if (IntSrc & IRQ_EOF_SY_TX1) { + if (IntSrc & IS_XS1_F) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); @@ -1592,15 +1743,16 @@ #endif /* do all IO at once */ - if (IntSrc & IRQ_EOF_RX1) + if (IntSrc & IS_R1_F) ClearAndStartRx(pAC, 0); #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - if (IntSrc & IRQ_EOF_AS_TX1) + if (IntSrc & IS_XA1_F) ClearTxIrq(pAC, 0, TX_PRIO_LOW); #endif SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); } /* while (IntSrc & IRQ_MASK != 0) */ + IntSrc &= pAC->GIni.GIValIrqMask; if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("SPECIAL IRQ SP-Cards => %x\n", IntSrc)); @@ -1613,16 +1765,21 @@ spin_unlock(&pAC->SlowPathLock); } /* - * do it all again is case we cleared an interrupt that + * do it all again is case we cleared an interrupt that * came in after handling the ring (OUTs may be delayed * in hardware buffers, but are through after IN) - */ + * + * rroesler: has been commented out and shifted to + * SkGeDrvEvent(), because it is timer + * guarded now + * ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + */ /* IRQ is processed - Enable IRQs again*/ - SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); - return; + return; } /* SkGeIsrOnePort */ @@ -1657,34 +1814,39 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeOpen: pAC=0x%lX:\n", (unsigned long)pAC)); - if (pAC->BoardLevel == 0) { + + /* Set blink mode */ + if (pAC->PciDev->vendor == 0x1186) + pAC->GIni.GILedBlinkCtrl = OEM_CONFIG_VALUE; + + if (pAC->BoardLevel == SK_INIT_DATA) { /* level 1 init common modules here */ - if (SkGeInit(pAC, pAC->IoBase, 1) != 0) { + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) { printk("%s: HWInit (1) failed.\n", pAC->dev[pNet->PortNr]->name); return (-1); } - SkI2cInit (pAC, pAC->IoBase, 1); - SkEventInit (pAC, pAC->IoBase, 1); - SkPnmiInit (pAC, pAC->IoBase, 1); - SkAddrInit (pAC, pAC->IoBase, 1); - SkRlmtInit (pAC, pAC->IoBase, 1); - SkTimerInit (pAC, pAC->IoBase, 1); - pAC->BoardLevel = 1; + SkI2cInit (pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit (pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit (pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit (pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit (pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit (pAC, pAC->IoBase, SK_INIT_IO); + pAC->BoardLevel = SK_INIT_IO; } - if (pAC->BoardLevel != 2) { + if (pAC->BoardLevel != SK_INIT_RUN) { /* tschilling: Level 2 init modules here, check return value. */ - if (SkGeInit(pAC, pAC->IoBase, 2) != 0) { + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_RUN) != 0) { printk("%s: HWInit (2) failed.\n", pAC->dev[pNet->PortNr]->name); return (-1); } - SkI2cInit (pAC, pAC->IoBase, 2); - SkEventInit (pAC, pAC->IoBase, 2); - SkPnmiInit (pAC, pAC->IoBase, 2); - SkAddrInit (pAC, pAC->IoBase, 2); - SkRlmtInit (pAC, pAC->IoBase, 2); - SkTimerInit (pAC, pAC->IoBase, 2); - pAC->BoardLevel = 2; + SkI2cInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkEventInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkPnmiInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkAddrInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkRlmtInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkTimerInit (pAC, pAC->IoBase, SK_INIT_RUN); + pAC->BoardLevel = SK_INIT_RUN; } for (i=0; iGIni.GIMacsFound; i++) { @@ -1694,20 +1856,14 @@ } SkGeYellowLED(pAC, pAC->IoBase, 1); -#ifdef USE_INT_MOD -/* moderate only TX complete interrupts (these are not time critical) */ -#define IRQ_MOD_MASK (IRQ_EOF_AS_TX1 | IRQ_EOF_AS_TX2) - { - unsigned long ModBase; - ModBase = 53125000 / INTS_PER_SEC; - SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); - SK_OUT32(pAC->IoBase, B2_IRQM_MSK, IRQ_MOD_MASK); - SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START); - } -#endif + StartDrvCleanupTimer(pAC); + SkDimEnableModerationIfNeeded(pAC); + SkDimDisplayModerationSettings(pAC); + + pAC->GIni.GIValIrqMask &= IRQ_MASK; /* enable Interrupts */ - SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK); spin_lock_irqsave(&pAC->SlowPathLock, Flags); @@ -1775,7 +1931,9 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeClose: pAC=0x%lX ", (unsigned long)pAC)); - /* + StopDrvCleanupTimer(pAC); + + /* * Clear multicast table, promiscuous mode .... */ SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0); @@ -1793,7 +1951,7 @@ SK_OUT32(pAC->IoBase, B0_IMSK, 0); /* stop the hardware */ SkGeDeInit(pAC, pAC->IoBase); - pAC->BoardLevel = 0; + pAC->BoardLevel = SK_INIT_DATA; spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); } else { @@ -1807,7 +1965,7 @@ /* Stop port */ spin_lock_irqsave(&pAC->TxPort[pNet->PortNr] [TX_PRIO_LOW].TxDesRingLock, Flags); - SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr, + SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr, SK_STOP_ALL, SK_HARD_RST); spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr] [TX_PRIO_LOW].TxDesRingLock, Flags); @@ -1832,8 +1990,8 @@ pAC->MaxPorts--; pNet->Up = 0; + MOD_DEC_USE_COUNT; - return (0); } /* SkGeClose */ @@ -1862,7 +2020,7 @@ pNet = (DEV_NET*) dev->priv; pAC = pNet->pAC; - if ((!skb_shinfo(skb)->nr_frags) || + if ((!skb_shinfo(skb)->nr_frags) || (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) { /* Don't activate scatter-gather and hardware checksum */ @@ -1929,24 +2087,29 @@ * < 0 - on failure: other problems ( -> return failure to upper layers) */ static int XmitFrame( -SK_AC *pAC, /* pointer to adapter context */ +SK_AC *pAC, /* pointer to adapter context */ TX_PORT *pTxPort, /* pointer to struct of port to send to */ -struct sk_buff *pMessage) /* pointer to send-message */ +struct sk_buff *pMessage) /* pointer to send-message */ { -TXD *pTxd; /* the rxd to fill */ -unsigned long Flags; -SK_U64 PhysAddr; -int BytesSend; + TXD *pTxd; /* the rxd to fill */ + TXD *pOldTxd; + unsigned long Flags; + SK_U64 PhysAddr; + int Protocol; + int IpHeaderLength; + int BytesSend = pMessage->len; - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, - ("X")); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X")); spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); #ifndef USE_TX_COMPLETE FreeTxDescriptors(pAC, pTxPort); #endif if (pTxPort->TxdRingFree == 0) { - /* no enough free descriptors in ring at the moment */ + /* + ** no enough free descriptors in ring at the moment. + ** Maybe free'ing some old one help? + */ FreeTxDescriptors(pAC, pTxPort); if (pTxPort->TxdRingFree == 0) { spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); @@ -1954,59 +2117,102 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("XmitFrame failed\n")); - /* this message can not be sent now */ - /* Because tbusy seems to be set, the message should not be freed here */ - /* It will be used by the scheduler of the ethernet handler */ + /* + ** the desired message can not be sent + ** Because tbusy seems to be set, the message + ** should not be freed here. It will be used + ** by the scheduler of the ethernet handler + */ return (-1); } } - /* advance head counter behind descriptor needed for this frame */ + + /* + ** If the passed socket buffer is of smaller MTU-size than 60, + ** copy everything into new buffer and fill all bytes between + ** the original packet end and the new packet end of 60 with 0x00. + ** This is to resolve faulty padding by the HW with 0xaa bytes. + */ + if (BytesSend < C_LEN_ETHERNET_MINSIZE) { + skb_put(pMessage, (C_LEN_ETHERNET_MINSIZE-BytesSend)); + memset( ((void *)(pMessage->data))+BytesSend, + 0, C_LEN_ETHERNET_MINSIZE-BytesSend); + } + + /* + ** advance head counter behind descriptor needed for this frame, + ** so that needed descriptor is reserved from that on. The next + ** action will be to add the passed buffer to the TX-descriptor + */ pTxd = pTxPort->pTxdRingHead; pTxPort->pTxdRingHead = pTxd->pNextTxd; pTxPort->TxdRingFree--; - /* the needed descriptor is reserved now */ - - /* - * everything allocated ok, so add buffer to descriptor - */ #ifdef SK_DUMP_TX DumpMsg(pMessage, "XmitFrame"); #endif - /* set up descriptor and CONTROL dword */ + /* + ** First step is to map the data to be sent via the adapter onto + ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4 + ** and 2.6 need to use pci_map_page() for that mapping. + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, - virt_to_page(pMessage->data), - ((unsigned long) pMessage->data & - ~PAGE_MASK), - pMessage->len, - PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & ~PAGE_MASK), + pMessage->len, + PCI_DMA_TODEVICE); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pTxd->pMBuf = pMessage; - pTxd->TBControl = TX_CTRL_OWN_BMU | TX_CTRL_STF | - TX_CTRL_CHECK_DEFAULT | TX_CTRL_SOFTWARE | + pTxd->pMBuf = pMessage; + + if (pMessage->ip_summed == CHECKSUM_HW) { + Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xf); + if ((Protocol == C_PROTO_ID_TCP) && (pAC->GIni.GIChipRev != 0)) { + pTxd->TBControl = BMU_UDP_CHECK; + } else { + pTxd->TBControl = BMU_TCP_CHECK ; + } + + IpHeaderLength = (SK_U8)pMessage->data[C_OFFSET_IPHEADER]; + IpHeaderLength = (IpHeaderLength & 0xf) * 4; + pTxd->TcpSumOfs = 0; /* PH-Checksum already calculated */ + pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength + + C_OFFSET_TCPHEADER_TCPCS; + pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength; + + pTxd->TBControl |= BMU_OWN | BMU_STF | + BMU_SW | BMU_EOF | #ifdef USE_TX_COMPLETE - TX_CTRL_EOF | TX_CTRL_EOF_IRQ | pMessage->len; -#else - TX_CTRL_EOF | pMessage->len; + BMU_IRQ_EOF | #endif - - if ((pTxPort->pTxdRingPrev->TBControl & TX_CTRL_OWN_BMU) == 0) { - /* previous descriptor already done, so give tx start cmd */ - /* StartTx(pAC, pTxPort->HwAddr); */ - SK_OUT8(pTxPort->HwAddr, TX_Q_CTRL, TX_Q_CTRL_START); + pMessage->len; + } else { + pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK | + BMU_SW | BMU_EOF | +#ifdef USE_TX_COMPLETE + BMU_IRQ_EOF | +#endif + pMessage->len; } - pTxPort->pTxdRingPrev = pTxd; - - - BytesSend = pMessage->len; + + /* + ** If previous descriptor already done, give TX start cmd + */ + pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd); + if ((pOldTxd->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); + } + + /* + ** after releasing the lock, the skb may immediately be free'd + */ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); - /* after releasing the lock, the skb may be immidiately freed */ - if (pTxPort->TxdRingFree != 0) + if (pTxPort->TxdRingFree != 0) { return (BytesSend); - else + } else { return (0); + } } /* XmitFrame */ @@ -2026,21 +2232,21 @@ * < 0 - on failure: other problems ( -> return failure to upper layers) */ static int XmitFrameSG( -SK_AC *pAC, /* pointer to adapter context */ -TX_PORT *pTxPort, /* pointer to struct of port to send to */ -struct sk_buff *pMessage) /* pointer to send-message */ +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort, /* pointer to struct of port to send to */ +struct sk_buff *pMessage) /* pointer to send-message */ { - int i; - int BytesSend; - int hlength; - int protocol; - skb_frag_t *sk_frag; - TXD *pTxd; - TXD *pTxdFst; - TXD *pTxdLst; - SK_U64 PhysAddr; - unsigned long Flags; + TXD *pTxd; + TXD *pTxdFst; + TXD *pTxdLst; + int CurrFrag; + int BytesSend; + int IpHeaderLength; + int Protocol; + skb_frag_t *sk_frag; + SK_U64 PhysAddr; + unsigned long Flags; spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); #ifndef USE_TX_COMPLETE @@ -2059,114 +2265,118 @@ } } - - pTxd = pTxPort->pTxdRingHead; - pTxdFst = pTxd; - pTxdLst = pTxd; + pTxd = pTxPort->pTxdRingHead; + pTxdFst = pTxd; + pTxdLst = pTxd; BytesSend = 0; - protocol = 0; + Protocol = 0; - /* map first fragment (header) */ + /* + ** Map the first fragment (header) into the DMA-space + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, virt_to_page(pMessage->data), ((unsigned long) pMessage->data & ~PAGE_MASK), skb_headlen(pMessage), PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - /* HW checksum? */ + /* + ** Does the HW need to evaluate checksum for TCP or UDP packets? + */ if (pMessage->ip_summed == CHECKSUM_HW) { - pTxd->TBControl = TX_CTRL_STF | - TX_CTRL_ST_FWD | - skb_headlen(pMessage); - - /* We have to use the opcode for tcp here because the opcode for - udp is not working in the hardware yet (revision 2.0)*/ - protocol = ((SK_U8)pMessage->data[23] & 0xf); - if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) - pTxd->TBControl |= BMU_UDP_CHECK; - else + pTxd->TBControl = BMU_STF | BMU_STFWD | skb_headlen(pMessage); + /* + ** We have to use the opcode for tcp here, because the + ** opcode for udp is not working in the hardware yet + ** (Revision 2.0) + */ + Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xf); + if ((Protocol == C_PROTO_ID_TCP) && (pAC->GIni.GIChipRev != 0)) { + pTxd->TBControl |= BMU_UDP_CHECK; + } else { pTxd->TBControl |= BMU_TCP_CHECK ; + } - hlength = ((SK_U8)pMessage->data[14] & 0xf) * 4; + IpHeaderLength = ((SK_U8)pMessage->data[C_OFFSET_IPHEADER] & 0xf)*4; pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */ - pTxd->TcpSumSt = 14+hlength+16; - pTxd->TcpSumWr = 14+hlength; - + pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength + + C_OFFSET_TCPHEADER_TCPCS; + pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength; } else { - pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | - TX_CTRL_SOFTWARE | - TX_CTRL_STF | - skb_headlen(pMessage); + pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_STF | + skb_headlen(pMessage); } pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += skb_headlen(pMessage); - - /* Map SG fragments */ - for (i = 0; i < skb_shinfo(pMessage)->nr_frags; i++) { - sk_frag = &skb_shinfo(pMessage)->frags[i]; - - /* we already have the proper value in entry */ + /* + ** Browse over all SG fragments and map each of them into the DMA space + */ + for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) { + sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag]; + /* + ** we already have the proper value in entry + */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, sk_frag->page, sk_frag->page_offset, sk_frag->size, PCI_DMA_TODEVICE); - pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pTxd->pMBuf = pMessage; + pTxd->pMBuf = pMessage; - /* HW checksum */ + /* + ** Does the HW need to evaluate checksum for TCP or UDP packets? + */ if (pMessage->ip_summed == CHECKSUM_HW) { - pTxd->TBControl = TX_CTRL_OWN_BMU | - TX_CTRL_SOFTWARE | - TX_CTRL_ST_FWD; - - /* We have to use the opcode for tcp here because the opcode for - udp is not working in the hardware yet (revision 2.0)*/ - if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) + pTxd->TBControl = BMU_OWN | BMU_SW | BMU_STFWD; + /* + ** We have to use the opcode for tcp here because the + ** opcode for udp is not working in the hardware yet + ** (revision 2.0) + */ + if ( (Protocol == C_PROTO_ID_TCP) && + (pAC->GIni.GIChipRev != 0) ) { pTxd->TBControl |= BMU_UDP_CHECK ; - else + } else { pTxd->TBControl |= BMU_TCP_CHECK ; - + } } else { - pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | - TX_CTRL_SOFTWARE | - TX_CTRL_OWN_BMU; + pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_OWN; } - /* Last fragment */ - if( (i+1) == skb_shinfo(pMessage)->nr_frags ) { + /* + ** Do we have the last fragment? + */ + if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) { #ifdef USE_TX_COMPLETE - pTxd->TBControl |= TX_CTRL_EOF | - TX_CTRL_EOF_IRQ | - sk_frag->size; + pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF | sk_frag->size; #else - pTxd->TBControl |= TX_CTRL_EOF | - sk_frag->size; + pTxd->TBControl |= BMU_EOF | sk_frag->size; #endif - pTxdFst->TBControl |= TX_CTRL_OWN_BMU | - TX_CTRL_SOFTWARE; + pTxdFst->TBControl |= BMU_OWN | BMU_SW; } else { pTxd->TBControl |= sk_frag->size; } pTxdLst = pTxd; - pTxd = pTxd->pNextTxd; + pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += sk_frag->size; } - if ((pTxPort->pTxdRingPrev->TBControl & TX_CTRL_OWN_BMU) == 0) { - /* previous descriptor already done, so give tx start cmd */ - /* StartTx(pAC, pTxPort->HwAddr); */ - SK_OUT8(pTxPort->HwAddr, TX_Q_CTRL, TX_Q_CTRL_START); + /* + ** If previous descriptor already done, give TX start cmd + */ + if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); } pTxPort->pTxdRingPrev = pTxdLst; @@ -2174,28 +2384,13 @@ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); - if (pTxPort->TxdRingFree > 0) + if (pTxPort->TxdRingFree > 0) { return (BytesSend); - else + } else { return (0); + } } - -void dump_frag( SK_U8 *data, int length) -{ - int i; - - printk("Length: %d\n", length); - for( i=0; i < length; i++ ) { - printk(" %02x", (SK_U8)*(data + i) ); - if( !((i+1) % 20) ) - printk("\n"); - } - printk("\n\n"); - -} - - /***************************************************************************** * * FreeTxDescriptors - release descriptors from the descriptor ring @@ -2224,44 +2419,48 @@ SK_U64 PhysAddr; /* address of DMA mapping */ pNewTail = pTxPort->pTxdRingTail; - pTxd = pNewTail; - /* - * loop forever; exits if TX_CTRL_SOFTWARE bit not set in start frame - * or TX_CTRL_OWN_BMU bit set in any frame - */ + pTxd = pNewTail; + /* + ** loop forever; exits if BMU_SW bit not set in start frame + ** or BMU_OWN bit set in any frame + */ while (1) { Control = pTxd->TBControl; - if ((Control & TX_CTRL_SOFTWARE) == 0) { - /* - * software controllable bit is set in first - * fragment when given to BMU. Not set means that - * this fragment was never sent or is already - * freed ( -> ring completely free now). - */ + if ((Control & BMU_SW) == 0) { + /* + ** software controllable bit is set in first + ** fragment when given to BMU. Not set means that + ** this fragment was never sent or is already + ** freed ( -> ring completely free now). + */ pTxPort->pTxdRingTail = pTxd; - netif_start_queue(pAC->dev[pTxPort->PortIndex]); + netif_wake_queue(pAC->dev[pTxPort->PortIndex]); return; } - if (Control & TX_CTRL_OWN_BMU) { + if (Control & BMU_OWN) { pTxPort->pTxdRingTail = pTxd; if (pTxPort->TxdRingFree > 0) { - netif_start_queue(pAC->dev[pTxPort->PortIndex]); + netif_wake_queue(pAC->dev[pTxPort->PortIndex]); } return; } - /* release the DMA mapping */ + /* + ** release the DMA mapping, because until not unmapped + ** this buffer is considered being under control of the + ** adapter card! + */ PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32; PhysAddr |= (SK_U64) pTxd->VDataLow; pci_unmap_page(pAC->PciDev, PhysAddr, pTxd->pMBuf->len, PCI_DMA_TODEVICE); - if (Control & TX_CTRL_EOF) + if (Control & BMU_EOF) DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */ pTxPort->TxdRingFree++; - pTxd->TBControl &= ~TX_CTRL_SOFTWARE; + pTxd->TBControl &= ~BMU_SW; pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */ } /* while(forever) */ } /* FreeTxDescriptors */ @@ -2340,11 +2539,15 @@ ~PAGE_MASK), pAC->RxBufSize - 2, PCI_DMA_FROMDEVICE); - pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + + pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32); - pRxd->pMBuf = pMsgBlock; - pRxd->RBControl = RX_CTRL_OWN_BMU | RX_CTRL_STF | - RX_CTRL_EOF_IRQ | RX_CTRL_CHECK_CSUM | Length; + pRxd->pMBuf = pMsgBlock; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; return (SK_TRUE); } /* FillRxDescriptor */ @@ -2375,15 +2578,18 @@ pRxPort->pRxdRingTail = pRxd->pNextRxd; pRxPort->RxdRingFree--; Length = pAC->RxBufSize; - pRxd->VDataLow = PhysLow; + + pRxd->VDataLow = PhysLow; pRxd->VDataHigh = PhysHigh; - pRxd->pMBuf = pMsg; - pRxd->RBControl = RX_CTRL_OWN_BMU | RX_CTRL_STF | - RX_CTRL_EOF_IRQ | RX_CTRL_CHECK_CSUM | Length; + pRxd->pMBuf = pMsg; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; return; } /* ReQueueRxBuffer */ - /***************************************************************************** * * ReceiveIrq - handle a receive IRQ @@ -2405,6 +2611,7 @@ struct sk_buff *pMsg; /* pointer to message holding frame */ struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */ int FrameLength; /* total length of received frame */ +int IpFrameLength; SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */ SK_EVPARA EvPara; /* an event parameter union */ unsigned long Flags; /* for spin lock */ @@ -2424,7 +2631,7 @@ SK_U64 PhysAddr; rx_start: - /* do forever; exit if RX_CTRL_OWN_BMU found */ + /* do forever; exit if BMU_OWN found */ for ( pRxd = pRxPort->pRxdRingHead ; pRxPort->RxdRingFree < pAC->RxDescrPerRing ; pRxd = pRxd->pNextRxd, @@ -2432,8 +2639,8 @@ pRxPort->RxdRingFree ++) { /* - * For a better understanding of this loop - * Go through every descriptor beginning at the head + * For a better understanding of this loop + * Go through every descriptor beginning at the head * Please note: the ring might be completely received so the OWN bit * set is not a good crirteria to leave that loop. * Therefore the RingFree counter is used. @@ -2444,23 +2651,23 @@ Control = pRxd->RBControl; /* check if this descriptor is ready */ - if ((Control & RX_CTRL_OWN_BMU) != 0) { + if ((Control & BMU_OWN) != 0) { /* this descriptor is not yet ready */ /* This is the usual end of the loop */ /* We don't need to start the ring again */ FillRxRing(pAC, pRxPort); return; } + pAC->DynIrqModInfo.NbrProcessedDescr++; /* get length of frame and check it */ - FrameLength = Control & RX_CTRL_LEN_MASK; + FrameLength = Control & BMU_BBC; if (FrameLength > pAC->RxBufSize) { goto rx_failed; } /* check for STF and EOF */ - if ((Control & (RX_CTRL_STF | RX_CTRL_EOF)) != - (RX_CTRL_STF | RX_CTRL_EOF)) { + if ((Control & (BMU_STF | BMU_EOF)) != (BMU_STF | BMU_EOF)) { goto rx_failed; } @@ -2497,7 +2704,7 @@ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { IsBc = (FrameStat & XMR_FS_BC) != 0; IsMc = (FrameStat & XMR_FS_MC) != 0; - IsBadFrame = (FrameStat & + IsBadFrame = (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0; } else { IsBc = (FrameStat & GMR_FS_BC) != 0; @@ -2514,8 +2721,7 @@ pRxPort->RxdRingFree)); /* DumpMsg(pMsg, "Rx"); */ - if ((Control & RX_CTRL_STAT_VALID) != RX_CTRL_STAT_VALID || - (IsBadFrame)) { + if ((Control & BMU_STAT_VAL) != BMU_STAT_VAL || (IsBadFrame)) { #if 0 (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) { #endif @@ -2560,6 +2766,7 @@ FrameLength, 0); ReQueueRxBuffer(pAC, pRxPort, pMsg, pRxd->VDataHigh, pRxd->VDataLow); + pMsg = pNewMsg; } @@ -2582,33 +2789,65 @@ skb_put(pMsg, FrameLength); /* hardware checksum */ Type = ntohs(*((short*)&pMsg->data[12])); + +#ifdef USE_SK_RX_CHECKSUM if (Type == 0x800) { Csum1=le16_to_cpu(pRxd->TcpSums & 0xffff); Csum2=le16_to_cpu((pRxd->TcpSums >> 16) & 0xffff); - if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) && - (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) || - (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { - Result = SkCsGetReceiveInfo(pAC, - &pMsg->data[14], - Csum1, Csum2, pRxPort->PortIndex); - if (Result == - SKCS_STATUS_IP_FRAGMENT || - Result == - SKCS_STATUS_IP_CSUM_OK || - Result == - SKCS_STATUS_TCP_CSUM_OK || - Result == - SKCS_STATUS_UDP_CSUM_OK) { - pMsg->ip_summed = - CHECKSUM_UNNECESSARY; - } else { - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, - SK_DBGCAT_DRV_RX_PROGRESS, - ("skge: CRC error. Frame dropped!\n")); - goto rx_failed; - } - }/* checksumControl calculation valid */ + IpFrameLength = (int) ntohs((unsigned short) + ((unsigned short *) pMsg->data)[8]); + + /* + * Test: If frame is padded, a check is not possible! + * Frame not padded? Length difference must be 14 (0xe)! + */ + if ((FrameLength - IpFrameLength) != 0xe) { + /* Frame padded => TCP offload not possible! */ + pMsg->ip_summed = CHECKSUM_NONE; + } else { + /* Frame not padded => TCP offload! */ + if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) && + (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) || + (pAC->ChipsetType)) { + Result = SkCsGetReceiveInfo(pAC, + &pMsg->data[14], + Csum1, Csum2, pRxPort->PortIndex); + if (Result == + SKCS_STATUS_IP_FRAGMENT || + Result == + SKCS_STATUS_IP_CSUM_OK || + Result == + SKCS_STATUS_TCP_CSUM_OK || + Result == + SKCS_STATUS_UDP_CSUM_OK) { + pMsg->ip_summed = + CHECKSUM_UNNECESSARY; + } + else if (Result == + SKCS_STATUS_TCP_CSUM_ERROR || + Result == + SKCS_STATUS_UDP_CSUM_ERROR || + Result == + SKCS_STATUS_IP_CSUM_ERROR_UDP || + Result == + SKCS_STATUS_IP_CSUM_ERROR_TCP || + Result == + SKCS_STATUS_IP_CSUM_ERROR ) { + /* HW Checksum error */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: CRC error. Frame dropped!\n")); + goto rx_failed; + } else { + pMsg->ip_summed = + CHECKSUM_NONE; + } + }/* checksumControl calculation valid */ + } /* Frame length check */ } /* IP frame */ +#else + pMsg->ip_summed = CHECKSUM_NONE; +#endif } /* frame > SK_COPY_TRESHOLD */ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V")); @@ -2622,7 +2861,7 @@ #if 0 IsMc = (FrameStat & XMR_FS_MC)==XMR_FS_MC; #endif - SK_RLMT_LOOKAHEAD(pAC, PortIndex, + SK_RLMT_LOOKAHEAD(pAC, PortIndex, &pMsg->data[Offset], IsBc, IsMc, &ForRlmt); } @@ -2647,7 +2886,7 @@ } else { /* drop frame */ - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_RX_PROGRESS, ("D")); DEV_KFREE_SKB(pMsg); @@ -2656,7 +2895,7 @@ } /* if not for rlmt */ else { /* packet for rlmt */ - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_RX_PROGRESS, ("R")); pRlmtMbuf = SkDrvAllocRlmtMbuf(pAC, pAC->IoBase, FrameLength); @@ -2684,14 +2923,14 @@ pAC->CheckQueue = SK_TRUE; } - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_RX_PROGRESS, ("Q")); } - if ((pAC->dev[pRxPort->PortIndex]->flags & + if ((pAC->dev[pRxPort->PortIndex]->flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0 || - (ForRlmt & SK_RLMT_RX_PROTOCOL) == - SK_RLMT_RX_PROTOCOL) { + (ForRlmt & SK_RLMT_RX_PROTOCOL) == + SK_RLMT_RX_PROTOCOL) { pMsg->dev = pAC->dev[pRxPort->PortIndex]; pMsg->protocol = eth_type_trans(pMsg, pAC->dev[pRxPort->PortIndex]); @@ -2708,7 +2947,7 @@ /* RXD ring is empty -> fill and restart */ FillRxRing(pAC, pRxPort); /* do not start if called from Close */ - if (pAC->BoardLevel > 0) { + if (pAC->BoardLevel > SK_INIT_DATA) { ClearAndStartRx(pAC, PortIndex); } return; @@ -2750,8 +2989,9 @@ SK_AC *pAC, /* pointer to the adapter context */ int PortIndex) /* index of the receive port (XMAC) */ { - SK_OUT8(pAC->IoBase, RxQueueAddr[PortIndex]+RX_Q_CTRL, - RX_Q_CTRL_START | RX_Q_CTRL_CLR_I_EOF); + SK_OUT8(pAC->IoBase, + RxQueueAddr[PortIndex]+Q_CSR, + CSR_START | CSR_IRQ_CL_F); } /* ClearAndStartRx */ @@ -2770,8 +3010,9 @@ int PortIndex, /* index of the transmit port (XMAC) */ int Prio) /* priority or normal queue */ { - SK_OUT8(pAC->IoBase, TxQueueAddr[PortIndex][Prio]+TX_Q_CTRL, - TX_Q_CTRL_CLR_I_EOF); + SK_OUT8(pAC->IoBase, + TxQueueAddr[PortIndex][Prio]+Q_CSR, + CSR_IRQ_CL_F); } /* ClearTxIrq */ @@ -2810,7 +3051,7 @@ DEV_KFREE_SKB(pRxd->pMBuf); pRxd->pMBuf = NULL; } - pRxd->RBControl &= RX_CTRL_OWN_BMU; + pRxd->RBControl &= BMU_OWN; pRxd = pRxd->pNextRxd; pRxPort->RxdRingFree++; } while (pRxd != pRxPort->pRxdRingTail); @@ -2818,7 +3059,6 @@ spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags); } /* ClearRxRing */ - /***************************************************************************** * * ClearTxRing - remove all buffers from the transmit ring @@ -2843,107 +3083,13 @@ spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); pTxd = pTxPort->pTxdRingHead; for (i=0; iTxDescrPerRing; i++) { - pTxd->TBControl &= ~TX_CTRL_OWN_BMU; + pTxd->TBControl &= ~BMU_OWN; pTxd = pTxd->pNextTxd; } FreeTxDescriptors(pAC, pTxPort); spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); } /* ClearTxRing */ - -/***************************************************************************** - * - * SetQueueSizes - configure the sizes of rx and tx queues - * - * Description: - * This function assigns the sizes for active and passive port - * to the appropriate HWinit structure variables. - * The passive port(s) get standard values, all remaining RAM - * is given to the active port. - * The queue sizes are in kbyte and must be multiple of 8. - * The limits for the number of buffers filled into the rx rings - * is also set in this routine. - * - * Returns: - * none - */ -static void SetQueueSizes( -SK_AC *pAC) /* pointer to the adapter context */ -{ -int StandbyRam; /* adapter RAM used for a standby port */ -int RemainingRam; /* adapter RAM available for the active port */ -int RxRam; /* RAM used for the active port receive queue */ -int i; /* loop counter */ - -if (pAC->RlmtNets == 1) { - StandbyRam = SK_RLMT_STANDBY_QRXSIZE + SK_RLMT_STANDBY_QXASIZE + - SK_RLMT_STANDBY_QXSSIZE; - RemainingRam = pAC->GIni.GIRamSize - - (pAC->GIni.GIMacsFound-1) * StandbyRam; - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxQSize = SK_RLMT_STANDBY_QRXSIZE; - pAC->GIni.GP[i].PXSQSize = SK_RLMT_STANDBY_QXSSIZE; - pAC->GIni.GP[i].PXAQSize = SK_RLMT_STANDBY_QXASIZE; - } - RxRam = (RemainingRam * 8 / 10) & ~7; - pAC->GIni.GP[pAC->ActivePort].PRxQSize = RxRam; - pAC->GIni.GP[pAC->ActivePort].PXSQSize = 0; - pAC->GIni.GP[pAC->ActivePort].PXAQSize = - (RemainingRam - RxRam) & ~7; - pAC->RxQueueSize = RxRam; - pAC->TxSQueueSize = 0; - pAC->TxAQueueSize = (RemainingRam - RxRam) & ~7; - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, - ("queue sizes settings - rx:%d txA:%d txS:%d\n", - pAC->RxQueueSize,pAC->TxAQueueSize, pAC->TxSQueueSize)); -} else { - RemainingRam = pAC->GIni.GIRamSize/pAC->GIni.GIMacsFound; - RxRam = (RemainingRam * 8 / 10) & ~7; - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxQSize = RxRam; - pAC->GIni.GP[i].PXSQSize = 0; - pAC->GIni.GP[i].PXAQSize = (RemainingRam - RxRam) & ~7; - } - - pAC->RxQueueSize = RxRam; - pAC->TxSQueueSize = 0; - pAC->TxAQueueSize = (RemainingRam - RxRam) & ~7; -} - for (i=0; iRxPort[i].RxFillLimit = pAC->RxDescrPerRing; - } - - if (pAC->RlmtNets == 2) { - for (i=0; iGIni.GIMacsFound; i++) { - pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - 100; - } - } else { - for (i=0; iGIni.GIMacsFound; i++) { - pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - 100; - } - /* - * Do not set the Limit to 0, because this could cause - * wrap around with ReQueue'ed buffers (a buffer could - * be requeued in the same position, made accessable to - * the hardware, and the hardware could change its - * contents! - */ - pAC->RxPort[pAC->ActivePort].RxFillLimit = 1; - } - -#ifdef DEBUG - for (i=0; iGIni.GIMacsFound; i++) { - SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, - ("i: %d, RxQSize: %d, PXSQsize: %d, PXAQSize: %d\n", - i, - pAC->GIni.GP[i].PRxQSize, - pAC->GIni.GP[i].PXSQSize, - pAC->GIni.GP[i].PXAQSize)); - } -#endif -} /* SetQueueSizes */ - - /***************************************************************************** * * SkGeSetMacAddr - Set the hardware MAC address @@ -3088,49 +3234,52 @@ ("SkGeChangeMtu starts now...\n")); pNet = (DEV_NET*) dev->priv; - pAC = pNet->pAC; + pAC = pNet->pAC; if ((NewMtu < 68) || (NewMtu > SK_JUMBO_MTU)) { return -EINVAL; } - if(pAC->BoardLevel != 2) { + if(pAC->BoardLevel != SK_INIT_RUN) { return -EINVAL; } pNet->Mtu = NewMtu; pOtherNet = (DEV_NET*)pAC->dev[1 - pNet->NetNr]->priv; - if ((pOtherNet->Mtu > 1500) && (NewMtu <= 1500) && (pOtherNet->Up==1)) { + if ((pOtherNet->Mtu>1500) && (NewMtu<=1500) && (pOtherNet->Up==1)) { return(0); } - EvPara.Para32[0] = pNet->NetNr; - EvPara.Para32[1] = -1; - pAC->RxBufSize = NewMtu + 32; dev->mtu = NewMtu; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("New MTU: %d\n", NewMtu)); - /* prevent reconfiguration while changing the MTU */ - - /* disable interrupts */ + /* + ** Prevent any reconfiguration while changing the MTU + ** by disabling any interrupts + */ SK_OUT32(pAC->IoBase, B0_IMSK, 0); spin_lock_irqsave(&pAC->SlowPathLock, Flags); - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - /* Stop both ports */ - EvPara.Para32[0] = 0; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); - EvPara.Para32[0] = 1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + /* + ** Notify RLMT that any ports are to be stopped + */ + EvPara.Para32[0] = 0; + EvPara.Para32[1] = -1; + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + EvPara.Para32[0] = 1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); } else { SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); } + /* + ** After calling the SkEventDispatcher(), RLMT is aware about + ** the stopped ports -> configuration can take place! + */ SkEventDispatcher(pAC, pAC->IoBase); for (i=0; iGIni.GIMacsFound; i++) { @@ -3140,140 +3289,132 @@ } - /* - * adjust number of rx buffers allocated - */ + /* + ** Depending on the desired MTU size change, a different number of + ** RX buffers need to be allocated + */ if (NewMtu > 1500) { - /* use less rx buffers */ - for (i=0; iGIni.GIMacsFound; i++) { - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - } else { - if (i == pAC->ActivePort) - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - else - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 10; - } + /* + ** Use less rx buffers + */ + for (i=0; iGIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 10); + } } - } - else { - /* use normal amount of rx buffers */ - for (i=0; iGIni.GIMacsFound; i++) { - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->RxPort[i].RxFillLimit = 1; - } else { - if (i == pAC->ActivePort) - pAC->RxPort[i].RxFillLimit = 1; - else - pAC->RxPort[i].RxFillLimit = - pAC->RxDescrPerRing - 100; - } + } + } else { + /* + ** Use the normal amount of rx buffers + */ + for (i=0; iGIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } } + } } - - SkGeDeInit(pAC, pAC->IoBase); + + SkGeDeInit(pAC, pAC->IoBase); - /* - * enable/disable hardware support for long frames - */ + /* + ** enable/disable hardware support for long frames + */ if (NewMtu > 1500) { -// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ +// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ pAC->GIni.GIPortUsage = SK_JUMBO_LINK; - } - else { - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - pAC->GIni.GIPortUsage = SK_MUL_LINK; - } else { - pAC->GIni.GIPortUsage = SK_RED_LINK; - } + } else { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->GIni.GIPortUsage = SK_MUL_LINK; + } else { + pAC->GIni.GIPortUsage = SK_RED_LINK; + } } - SkGeInit( pAC, pAC->IoBase, 1); - SkI2cInit( pAC, pAC->IoBase, 1); - SkEventInit(pAC, pAC->IoBase, 1); - SkPnmiInit( pAC, pAC->IoBase, 1); - SkAddrInit( pAC, pAC->IoBase, 1); - SkRlmtInit( pAC, pAC->IoBase, 1); - SkTimerInit(pAC, pAC->IoBase, 1); + SkGeInit( pAC, pAC->IoBase, SK_INIT_IO); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit(pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); /* - * tschilling: - * Speed and others are set back to default in level 1 init! - */ + ** tschilling: + ** Speed and others are set back to default in level 1 init! + */ GetConfiguration(pAC); - SkGeInit( pAC, pAC->IoBase, 2); - SkI2cInit( pAC, pAC->IoBase, 2); - SkEventInit(pAC, pAC->IoBase, 2); - SkPnmiInit( pAC, pAC->IoBase, 2); - SkAddrInit( pAC, pAC->IoBase, 2); - SkRlmtInit( pAC, pAC->IoBase, 2); - SkTimerInit(pAC, pAC->IoBase, 2); + SkGeInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkEventInit(pAC, pAC->IoBase, SK_INIT_RUN); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_RUN); - /* - * clear and reinit the rx rings here - */ + /* + ** clear and reinit the rx rings here + */ for (i=0; iGIni.GIMacsFound; i++) { ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[i]); FillRxRing(pAC, &pAC->RxPort[i]); - /* Enable transmit descriptor polling. */ + /* + ** Enable transmit descriptor polling + */ SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE); FillRxRing(pAC, &pAC->RxPort[i]); }; SkGeYellowLED(pAC, pAC->IoBase, 1); - -#ifdef USE_INT_MOD - { - unsigned long ModBase; - ModBase = 53125000 / INTS_PER_SEC; - SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); - SK_OUT32(pAC->IoBase, B2_IRQM_MSK, IRQ_MOD_MASK); - SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START); - } -#endif + SkDimEnableModerationIfNeeded(pAC); + SkDimDisplayModerationSettings(pAC); netif_start_queue(pAC->dev[pNet->PortNr]); for (i=pAC->GIni.GIMacsFound-1; i>=0; i--) { spin_unlock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock); } - /* enable Interrupts */ - SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); + /* + ** Enable Interrupts again + */ + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK); SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); SkEventDispatcher(pAC, pAC->IoBase); - /* Found more than one port */ - if ((pAC->GIni.GIMacsFound == 2 ) && - (pAC->RlmtNets == 2)) { - /* Start both ports */ - EvPara.Para32[0] = pAC->RlmtNets; - EvPara.Para32[1] = -1; - SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, - EvPara); - + /* + ** Notify RLMT about the changing and restarting one (or more) ports + */ + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + EvPara.Para32[0] = pAC->RlmtNets; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, EvPara); + EvPara.Para32[0] = pNet->PortNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); - EvPara.Para32[1] = -1; - EvPara.Para32[0] = pNet->PortNr; + if (pOtherNet->Up) { + EvPara.Para32[0] = pOtherNet->PortNr; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); - - if (pOtherNet->Up) { - EvPara.Para32[0] = pOtherNet->PortNr; - SkEventQueue(pAC, SKGE_RLMT, - SK_RLMT_START, EvPara); - } + } } else { SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); } @@ -3281,7 +3422,20 @@ SkEventDispatcher(pAC, pAC->IoBase); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + /* + ** While testing this driver with latest kernel 2.5 (2.5.70), it + ** seems as if upper layers have a problem to handle a successful + ** return value of '0'. If such a zero is returned, the complete + ** system hangs for several minutes (!), which is in acceptable. + ** + ** Currently it is not clear, what the exact reason for this problem + ** is. The implemented workaround for 2.5 is to return the desired + ** new MTU size if all needed changes for the new MTU size where + ** performed. In kernels 2.2 and 2.4, a zero value is returned, + ** which indicates the successful change of the mtu-size. + */ return 0; + } /* SkGeChangeMtu */ @@ -3375,10 +3529,14 @@ { DEV_NET *pNet; SK_AC *pAC; +void *pMemBuf; SK_GE_IOCTL Ioctl; unsigned int Err = 0; -int Size; +int Size = 0; +int Ret = 0; +unsigned int Length = 0; +int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32); SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeIoctl starts now...\n")); @@ -3395,7 +3553,7 @@ case SK_IOCTL_PRESETMIB: if (!capable(CAP_NET_ADMIN)) return -EPERM; case SK_IOCTL_GETMIB: - if(copy_from_user(&pAC->PnmiStruct, Ioctl.pData, + if(copy_from_user(&pAC->PnmiStruct, Ioctl.pData, Ioctl.LenPnmiStruct)? Ioctl.Len : sizeof(pAC->PnmiStruct))) { return -EFAULT; @@ -3410,10 +3568,36 @@ return -EFAULT; } break; + case SK_IOCTL_GEN: + if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) { + Length = Ioctl.Len; + } else { + Length = sizeof(pAC->PnmiStruct) + HeaderLength; + } + if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) { + return -EFAULT; + } + if(copy_from_user(pMemBuf, Ioctl.pData, Length)) { + return -EFAULT; + } + if ((Ret = SkPnmiGenIoctl(pAC, pAC->IoBase, pMemBuf, &Length, 0)) < 0) { + return -EFAULT; + } + if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) { + return -EFAULT; + } + Ioctl.Len = Length; + if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) { + return -EFAULT; + } + kfree(pMemBuf); /* cleanup everything */ + break; default: Err = -EOPNOTSUPP; } + return(Err); + } /* SkGeIoctl */ @@ -3485,12 +3669,19 @@ SK_AC *pAC) /* pointer to the adapter context structure */ { SK_I32 Port; /* preferred port */ -int LinkSpeed; /* Link speed */ -int AutoNeg; /* auto negotiation off (0) or on (1) */ -int DuplexCap; /* duplex capabilities (0=both, 1=full, 2=half */ -int MSMode; /* master / slave mode selection */ SK_BOOL AutoSet; SK_BOOL DupSet; +int LinkSpeed = SK_LSPEED_AUTO; /* Link speed */ +int AutoNeg = 1; /* autoneg off (0) or on (1) */ +int DuplexCap = 0; /* 0=both,1=full,2=half */ +int FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; /* FlowControl */ +int MSMode = SK_MS_MODE_AUTO; /* master/slave mode */ + +SK_BOOL IsConTypeDefined = SK_TRUE; +SK_BOOL IsLinkSpeedDefined = SK_TRUE; +SK_BOOL IsFlowCtrlDefined = SK_TRUE; +SK_BOOL IsRoleDefined = SK_TRUE; +SK_BOOL IsModeDefined = SK_TRUE; /* * The two parameters AutoNeg. and DuplexCap. map to one configuration * parameter. The mapping is described by this table: @@ -3503,109 +3694,216 @@ * ----------------------------------------------------------------- * Sense | AutoSense | AutoSense | AutoSense | */ -int Capabilities[3][3] = - { { -1, SK_LMODE_FULL, SK_LMODE_HALF}, - {SK_LMODE_AUTOBOTH, SK_LMODE_AUTOFULL, SK_LMODE_AUTOHALF}, +int Capabilities[3][3] = + { { -1, SK_LMODE_FULL , SK_LMODE_HALF }, + {SK_LMODE_AUTOBOTH , SK_LMODE_AUTOFULL , SK_LMODE_AUTOHALF }, {SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE} }; + #define DC_BOTH 0 #define DC_FULL 1 #define DC_HALF 2 #define AN_OFF 0 #define AN_ON 1 #define AN_SENS 2 +#define M_CurrPort pAC->GIni.GP[Port] - /* settings for port A */ - /* settings link speed */ - LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + + /* + ** Set the default values first for both ports! + */ + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + + /* + ** Check merged parameter ConType. If it has not been used, + ** verify any other parameter (e.g. AutoNeg) and use default values. + ** + ** Stating both ConType and other lowlevel link parameters is also + ** possible. If this is the case, the passed ConType-parameter is + ** overwritten by the lowlevel link parameter. + ** + ** The following settings are used for a merged ConType-parameter: + ** + ** ConType DupCap AutoNeg FlowCtrl Role Speed + ** ------- ------ ------- -------- ---------- ----- + ** Auto Both On SymOrRem Auto Auto + ** 100FD Full Off None 100 + ** 100HD Half Off None 100 + ** 10FD Full Off None 10 + ** 10HD Half Off None 10 + ** + ** This ConType parameter is used for all ports of the adapter! + */ + if ( (ConType != NULL) && + (pAC->Index < SK_MAX_CARD_PARAM) && + (ConType[pAC->Index] != NULL) ) { + + /* Check chipset family */ + if ((!pAC->ChipsetType) && + (strcmp(ConType[pAC->Index],"Auto")!=0)) { + /* Set the speed parameter back */ + printk("%s: Illegal value \"%s\" " + "for ConType." + " Using Auto.\n", + pAC->dev[0]->name, + ConType[pAC->Index]); + + sprintf(ConType[pAC->Index], "Auto"); + } + + if (strcmp(ConType[pAC->Index],"")==0) { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } else if (strcmp(ConType[pAC->Index],"Auto")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + } else if (strcmp(ConType[pAC->Index],"100FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"100HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else { + printk("%s: Illegal value \"%s\" for ConType\n", + pAC->dev[0]->name, ConType[pAC->Index]); + IsConTypeDefined = SK_FALSE; /* Wrong ConType defined */ + } + } else { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } + + /* + ** Parse any parameter settings for port A: + ** a) any LinkSpeed stated? + */ if (Speed_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Speed_A[pAC->Index],"")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_A[pAC->Index],"10")==0) { - LinkSpeed = SK_LSPEED_10MBPS; - } - else if (strcmp(Speed_A[pAC->Index],"100")==0) { - LinkSpeed = SK_LSPEED_100MBPS; - } - else if (strcmp(Speed_A[pAC->Index],"1000")==0) { - LinkSpeed = SK_LSPEED_1000MBPS; + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_A[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_A[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_A[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("%s: Illegal value \"%s\" for Speed_A\n", + pAC->dev[0]->name, Speed_A[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; } - else printk("%s: Illegal value for Speed_A\n", - pAC->dev[0]->name); + } else { + IsLinkSpeedDefined = SK_FALSE; } - /* Check speed parameter */ - /* Only copper type adapter and GE V2 cards */ - if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || - (pAC->GIni.GICopperType != SK_TRUE)) && - ((LinkSpeed != SK_LSPEED_AUTO) && + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && (LinkSpeed != SK_LSPEED_1000MBPS))) { printk("%s: Illegal value for Speed_A. " "Not a copper card or GE V2 card\n Using " "speed 1000\n", pAC->dev[0]->name); LinkSpeed = SK_LSPEED_1000MBPS; } - pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; + + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; + } - /* Autonegotiation */ + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */ AutoSet = SK_FALSE; if (AutoNeg_A != NULL && pAC->IndexIndex] != NULL) { AutoSet = SK_TRUE; if (strcmp(AutoNeg_A[pAC->Index],"")==0) { - AutoSet = SK_FALSE; - } - else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) { - AutoNeg = AN_ON; - } - else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) { - AutoNeg = AN_OFF; - } - else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) { - AutoNeg = AN_SENS; + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("%s: Illegal value \"%s\" for AutoNeg_A\n", + pAC->dev[0]->name, AutoNeg_A[pAC->Index]); } - else printk("%s: Illegal value for AutoNeg_A\n", - pAC->dev[0]->name); } DuplexCap = DC_BOTH; - DupSet = SK_FALSE; + DupSet = SK_FALSE; if (DupCap_A != NULL && pAC->IndexIndex] != NULL) { DupSet = SK_TRUE; if (strcmp(DupCap_A[pAC->Index],"")==0) { - DupSet = SK_FALSE; - } - else if (strcmp(DupCap_A[pAC->Index],"Both")==0) { - DuplexCap = DC_BOTH; - } - else if (strcmp(DupCap_A[pAC->Index],"Full")==0) { - DuplexCap = DC_FULL; - } - else if (strcmp(DupCap_A[pAC->Index],"Half")==0) { - DuplexCap = DC_HALF; + DupSet = SK_FALSE; + } else if (strcmp(DupCap_A[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_A[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_A[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("%s: Illegal value \"%s\" for DupCap_A\n", + pAC->dev[0]->name, DupCap_A[pAC->Index]); } - else printk("%s: Illegal value for DupCap_A\n", - pAC->dev[0]->name); } - /* check for illegal combinations */ - if (AutoSet && AutoNeg==AN_SENS && DupSet) { + /* + ** Check for illegal combinations + */ + if ( AutoSet && AutoNeg==AN_SENS && DupSet) { printk("%s, Port A: DuplexCapabilities" " ignored using Sense mode\n", pAC->dev[0]->name); } + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ printk("%s, Port A: Illegal combination" " of values AutoNeg. and DuplexCap.\n Using " "Full Duplex\n", pAC->dev[0]->name); - DuplexCap = DC_FULL; } + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { DuplexCap = DC_FULL; } @@ -3618,151 +3916,183 @@ AutoNeg = AN_ON; } - /* set the desired mode */ - pAC->GIni.GP[0].PLinkModeConf = - Capabilities[AutoNeg][DuplexCap]; + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[0].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } - pAC->GIni.GP[0].PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + /* + ** c) Any Flowcontrol-parameter set? + */ if (FlowCtrl_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(FlowCtrl_A[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("%s: Illegal value \"%s\" for FlowCtrl_A\n", + pAC->dev[0]->name, FlowCtrl_A[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; } - else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_SYM_OR_REM; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_SYMMETRIC; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_LOC_SEND; - } - else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) { - pAC->GIni.GP[0].PFlowCtrlMode = - SK_FLOW_MODE_NONE; - } - else printk("Illegal value for FlowCtrl_A\n"); + } else { + IsFlowCtrlDefined = SK_FALSE; } - if (AutoNeg==AN_OFF && pAC->GIni.GP[0].PFlowCtrlMode!= - SK_FLOW_MODE_NONE) { + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { printk("%s, Port A: FlowControl" " impossible without AutoNegotiation," " disabled\n", pAC->dev[0]->name); - pAC->GIni.GP[0].PFlowCtrlMode = SK_FLOW_MODE_NONE; + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[0].PFlowCtrlMode = FlowCtrl; } - MSMode = SK_MS_MODE_AUTO; /* default: do auto select */ + /* + ** d) What is with the RoleParameter? + */ if (Role_A != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Role_A[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_A[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_A[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_A[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("%s: Illegal value \"%s\" for Role_A\n", + pAC->dev[0]->name, Role_A[pAC->Index]); + IsRoleDefined = SK_FALSE; } - else if (strcmp(Role_A[pAC->Index],"Auto")==0) { - MSMode = SK_MS_MODE_AUTO; - } - else if (strcmp(Role_A[pAC->Index],"Master")==0) { - MSMode = SK_MS_MODE_MASTER; - } - else if (strcmp(Role_A[pAC->Index],"Slave")==0) { - MSMode = SK_MS_MODE_SLAVE; - } - else printk("%s: Illegal value for Role_A\n", - pAC->dev[0]->name); + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined == SK_TRUE) { + pAC->GIni.GP[0].PMSMode = MSMode; } - pAC->GIni.GP[0].PMSMode = MSMode; + - /* settings for port B */ - /* settings link speed */ - LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + /* + ** Parse any parameter settings for port B: + ** a) any LinkSpeed stated? + */ + IsConTypeDefined = SK_TRUE; + IsLinkSpeedDefined = SK_TRUE; + IsFlowCtrlDefined = SK_TRUE; + IsModeDefined = SK_TRUE; + if (Speed_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Speed_B[pAC->Index],"")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { - LinkSpeed = SK_LSPEED_AUTO; - } - else if (strcmp(Speed_B[pAC->Index],"10")==0) { - LinkSpeed = SK_LSPEED_10MBPS; - } - else if (strcmp(Speed_B[pAC->Index],"100")==0) { - LinkSpeed = SK_LSPEED_100MBPS; - } - else if (strcmp(Speed_B[pAC->Index],"1000")==0) { - LinkSpeed = SK_LSPEED_1000MBPS; + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_B[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_B[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_B[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("%s: Illegal value \"%s\" for Speed_B\n", + pAC->dev[1]->name, Speed_B[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; } - else printk("%s: Illegal value for Speed_B\n", - pAC->dev[1]->name); + } else { + IsLinkSpeedDefined = SK_FALSE; } - /* Check speed parameter */ - /* Only copper type adapter and GE V2 cards */ - if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || - (pAC->GIni.GICopperType != SK_TRUE)) && - ((LinkSpeed != SK_LSPEED_AUTO) && + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && (LinkSpeed != SK_LSPEED_1000MBPS))) { printk("%s: Illegal value for Speed_B. " "Not a copper card or GE V2 card\n Using " "speed 1000\n", pAC->dev[1]->name); LinkSpeed = SK_LSPEED_1000MBPS; } - pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; - /* Auto negotiation */ + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; + } + + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ AutoNeg = AN_SENS; /* default: do auto Sense */ AutoSet = SK_FALSE; if (AutoNeg_B != NULL && pAC->IndexIndex] != NULL) { AutoSet = SK_TRUE; if (strcmp(AutoNeg_B[pAC->Index],"")==0) { - AutoSet = SK_FALSE; - } - else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) { - AutoNeg = AN_ON; - } - else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) { - AutoNeg = AN_OFF; - } - else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) { - AutoNeg = AN_SENS; + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("%s: Illegal value \"%s\" for AutoNeg_B\n", + pAC->dev[0]->name, AutoNeg_B[pAC->Index]); } - else printk("Illegal value for AutoNeg_B\n"); } DuplexCap = DC_BOTH; - DupSet = SK_FALSE; + DupSet = SK_FALSE; if (DupCap_B != NULL && pAC->IndexIndex] != NULL) { DupSet = SK_TRUE; if (strcmp(DupCap_B[pAC->Index],"")==0) { - DupSet = SK_FALSE; - } - else if (strcmp(DupCap_B[pAC->Index],"Both")==0) { - DuplexCap = DC_BOTH; - } - else if (strcmp(DupCap_B[pAC->Index],"Full")==0) { - DuplexCap = DC_FULL; - } - else if (strcmp(DupCap_B[pAC->Index],"Half")==0) { - DuplexCap = DC_HALF; + DupSet = SK_FALSE; + } else if (strcmp(DupCap_B[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_B[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_B[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("%s: Illegal value \"%s\" for DupCap_B\n", + pAC->dev[0]->name, DupCap_B[pAC->Index]); } - else printk("Illegal value for DupCap_B\n"); } - /* check for illegal combinations */ + /* + ** Check for illegal combinations + */ if (AutoSet && AutoNeg==AN_SENS && DupSet) { printk("%s, Port B: DuplexCapabilities" " ignored using Sense mode\n", pAC->dev[1]->name); } + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ printk("%s, Port B: Illegal combination" " of values AutoNeg. and DuplexCap.\n Using " "Full Duplex\n", pAC->dev[1]->name); - DuplexCap = DC_FULL; } + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { DuplexCap = DC_FULL; } @@ -3775,90 +4105,103 @@ AutoNeg = AN_ON; } - /* set the desired mode */ - pAC->GIni.GP[1].PLinkModeConf = - Capabilities[AutoNeg][DuplexCap]; + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[1].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } - pAC->GIni.GP[1].PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + /* + ** c) Any FlowCtrl parameter set? + */ if (FlowCtrl_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(FlowCtrl_B[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("%s: Illegal value \"%s\" for FlowCtrl_B\n", + pAC->dev[0]->name, FlowCtrl_B[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; } - else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_SYM_OR_REM; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_SYMMETRIC; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_LOC_SEND; - } - else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) { - pAC->GIni.GP[1].PFlowCtrlMode = - SK_FLOW_MODE_NONE; - } - else printk("Illegal value for FlowCtrl_B\n"); + } else { + IsFlowCtrlDefined = SK_FALSE; } - if (AutoNeg==AN_OFF && pAC->GIni.GP[1].PFlowCtrlMode!= - SK_FLOW_MODE_NONE) { + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { printk("%s, Port B: FlowControl" " impossible without AutoNegotiation," " disabled\n", pAC->dev[1]->name); - pAC->GIni.GP[1].PFlowCtrlMode = SK_FLOW_MODE_NONE; + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[1].PFlowCtrlMode = FlowCtrl; } - MSMode = SK_MS_MODE_AUTO; /* default: do auto select */ + /* + ** d) What is the RoleParameter? + */ if (Role_B != NULL && pAC->IndexIndex] != NULL) { if (strcmp(Role_B[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_B[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_B[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_B[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("%s: Illegal value \"%s\" for Role_B\n", + pAC->dev[1]->name, Role_B[pAC->Index]); + IsRoleDefined = SK_FALSE; } - else if (strcmp(Role_B[pAC->Index],"Auto")==0) { - MSMode = SK_MS_MODE_AUTO; - } - else if (strcmp(Role_B[pAC->Index],"Master")==0) { - MSMode = SK_MS_MODE_MASTER; - } - else if (strcmp(Role_B[pAC->Index],"Slave")==0) { - MSMode = SK_MS_MODE_SLAVE; - } - else printk("%s: Illegal value for Role_B\n", - pAC->dev[1]->name); + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined) { + pAC->GIni.GP[1].PMSMode = MSMode; } - pAC->GIni.GP[1].PMSMode = MSMode; - - /* settings for both ports */ + /* + ** Evaluate settings for both ports + */ pAC->ActivePort = 0; if (PrefPort != NULL && pAC->IndexIndex] != NULL) { if (strcmp(PrefPort[pAC->Index],"") == 0) { /* Auto */ - pAC->ActivePort = 0; - pAC->Rlmt.Net[0].Preference = -1; /* auto */ - pAC->Rlmt.Net[0].PrefPort = 0; - } - else if (strcmp(PrefPort[pAC->Index],"A") == 0) { - /* - * do not set ActivePort here, thus a port - * switch is issued after net up. - */ - Port = 0; - pAC->Rlmt.Net[0].Preference = Port; - pAC->Rlmt.Net[0].PrefPort = Port; - } - else if (strcmp(PrefPort[pAC->Index],"B") == 0) { - /* - * do not set ActivePort here, thus a port - * switch is issued after net up. - */ - Port = 1; - pAC->Rlmt.Net[0].Preference = Port; - pAC->Rlmt.Net[0].PrefPort = Port; + pAC->ActivePort = 0; + pAC->Rlmt.Net[0].Preference = -1; /* auto */ + pAC->Rlmt.Net[0].PrefPort = 0; + } else if (strcmp(PrefPort[pAC->Index],"A") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + Port = 0; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } else if (strcmp(PrefPort[pAC->Index],"B") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + Port = 1; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } else { + printk("%s: Illegal value \"%s\" for PrefPort\n", + pAC->dev[0]->name, PrefPort[pAC->Index]); } - else printk("%s: Illegal value for PrefPort\n", - pAC->dev[0]->name); } pAC->RlmtNets = 1; @@ -3867,33 +4210,126 @@ RlmtMode[pAC->Index] != NULL) { if (strcmp(RlmtMode[pAC->Index], "") == 0) { pAC->RlmtMode = 0; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) { + } else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) { pAC->RlmtMode = SK_RLMT_CHECK_LINK; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) { - pAC->RlmtMode = SK_RLMT_CHECK_LINK | - SK_RLMT_CHECK_LOC_LINK; - } - else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) { + } else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) { pAC->RlmtMode = SK_RLMT_CHECK_LINK | - SK_RLMT_CHECK_LOC_LINK | - SK_RLMT_CHECK_SEG; - } - else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) && + SK_RLMT_CHECK_LOC_LINK; + } else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK | + SK_RLMT_CHECK_LOC_LINK | + SK_RLMT_CHECK_SEG; + } else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) && (pAC->GIni.GIMacsFound == 2)) { - pAC->RlmtMode = SK_RLMT_CHECK_LINK; - pAC->RlmtNets = 2; - } - else { - printk("%s: Illegal value for" - " RlmtMode, using default\n", pAC->dev[0]->name); + pAC->RlmtMode = SK_RLMT_CHECK_LINK; + pAC->RlmtNets = 2; + } else { + printk("%s: Illegal value \"%s\" for" + " RlmtMode, using default\n", + pAC->dev[0]->name, RlmtMode[pAC->Index]); pAC->RlmtMode = 0; } - } - else { + } else { pAC->RlmtMode = 0; } + + /* + ** Check the interrupt moderation parameters + */ + if (Moderation[pAC->Index] != NULL) { + if (strcmp(Moderation[pAC->Index], "Static") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_STATIC; + } else if (strcmp(Moderation[pAC->Index], "Dynamic") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_DYNAMIC; + } else { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } + } else { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } + + if (Stats[pAC->Index] != NULL) { + if (strcmp(Stats[pAC->Index], "Yes") == 0) { + pAC->DynIrqModInfo.DisplayStats = SK_TRUE; + } else { + pAC->DynIrqModInfo.DisplayStats = SK_FALSE; + } + } else { + pAC->DynIrqModInfo.DisplayStats = SK_FALSE; + } + + if (ModerationMask[pAC->Index] != NULL) { + if (strcmp(ModerationMask[pAC->Index], "Rx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "Tx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "Sp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "RxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX; + } else if (strcmp(ModerationMask[pAC->Index], "SpRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX; + } else if (strcmp(ModerationMask[pAC->Index], "RxTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } else if (strcmp(ModerationMask[pAC->Index], "TxRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } else if (strcmp(ModerationMask[pAC->Index], "TxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX; + } else if (strcmp(ModerationMask[pAC->Index], "SpTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX; + } else if (strcmp(ModerationMask[pAC->Index], "RxTxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "RxSpTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "TxRxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "TxSpRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "SpTxRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "SpRxTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else { /* some rubbish */ + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY; + } + } else { /* operator has stated nothing */ + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } + + if (AutoSizing[pAC->Index] != NULL) { + if (strcmp(AutoSizing[pAC->Index], "On") == 0) { + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } else { + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } + } else { /* operator has stated nothing */ + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } + + if (IntsPerSec[pAC->Index] != 0) { + if ((IntsPerSec[pAC->Index]< 30)&&(IntsPerSec[pAC->Index]> 40000)) { + pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT; + } else { + pAC->DynIrqModInfo.MaxModIntsPerSec = IntsPerSec[pAC->Index]; + } + } else { + pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT; + } + + /* + ** Evaluate upper and lower moderation threshold + */ + pAC->DynIrqModInfo.MaxModIntsPerSecUpperLimit = + pAC->DynIrqModInfo.MaxModIntsPerSec + + (pAC->DynIrqModInfo.MaxModIntsPerSec / 2); + + pAC->DynIrqModInfo.MaxModIntsPerSecLowerLimit = + pAC->DynIrqModInfo.MaxModIntsPerSec - + (pAC->DynIrqModInfo.MaxModIntsPerSec / 2); + + pAC->DynIrqModInfo.PrevTimeVal = jiffies; /* initial value */ + + } /* GetConfiguration */ @@ -3928,8 +4364,44 @@ } } /* ProductStr */ +/***************************************************************************** + * + * StartDrvCleanupTimer - Start timer to check for descriptors which + * might be placed in descriptor ring, but + * havent been handled up to now + * + * Description: + * This function requests a HW-timer fo the Yukon card. The actions to + * perform when this timer expires, are located in the SkDrvEvent(). + * + * Returns: N/A + */ +static void +StartDrvCleanupTimer(SK_AC *pAC) { + SK_EVPARA EventParam; /* Event struct for timer event */ + + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = SK_DRV_RX_CLEANUP_TIMER; + SkTimerStart(pAC, pAC->IoBase, &pAC->DrvCleanupTimer, + SK_DRV_RX_CLEANUP_TIMER_LENGTH, + SKGE_DRV, SK_DRV_TIMER, EventParam); +} - +/***************************************************************************** + * + * StopDrvCleanupTimer - Stop timer to check for descriptors + * + * Description: + * This function requests a HW-timer fo the Yukon card. The actions to + * perform when this timer expires, are located in the SkDrvEvent(). + * + * Returns: N/A + */ +static void +StopDrvCleanupTimer(SK_AC *pAC) { + SkTimerStop(pAC, pAC->IoBase, &pAC->DrvCleanupTimer); + SK_MEMSET((char *) &pAC->DrvCleanupTimer, 0, sizeof(SK_TIMER)); +} /****************************************************************************/ /* functions for common modules *********************************************/ @@ -3988,8 +4460,8 @@ * Nothing */ void SkDrvFreeRlmtMbuf( -SK_AC *pAC, /* pointer to adapter context */ -SK_IOC IoC, /* the IO-context */ +SK_AC *pAC, /* pointer to adapter context */ +SK_IOC IoC, /* the IO-context */ SK_MBUF *pMbuf) /* size of the requested buffer */ { SK_MBUF *pFreeMbuf; @@ -4018,7 +4490,9 @@ */ SK_U64 SkOsGetTime(SK_AC *pAC) { - return jiffies; + SK_U64 PrivateJiffies; + SkOsGetTimeCurrent(pAC, &PrivateJiffies); + return PrivateJiffies; } /* SkOsGetTime */ @@ -4266,6 +4740,9 @@ printk(" speed: unknown\n"); } + /* Mac update */ + SkAddrMcUpdate(pAC,IoC, FromPort); + Stat = pAC->GIni.GP[FromPort].PLinkModeStatus; if (Stat == SK_LMODE_STAT_AUTOHALF || Stat == SK_LMODE_STAT_AUTOFULL) { @@ -4310,18 +4787,26 @@ printk(" role: ???\n"); } } - + #ifdef SK_ZEROCOPY - if (pAC->GIni.GIChipId == CHIP_ID_YUKON) + if (pAC->ChipsetType) +#ifdef USE_SK_TX_CHECKSUM printk(" scatter-gather: enabled\n"); +#else + printk(" tx-checksum: disabled\n"); +#endif else printk(" scatter-gather: disabled\n"); - #else printk(" scatter-gather: disabled\n"); #endif - - if ((Param.Para32[0] != pAC->ActivePort) && + +#ifndef USE_SK_RX_CHECKSUM + printk(" rx-checksum: disabled\n"); +#endif + + + if ((Param.Para32[0] != pAC->ActivePort) && (pAC->RlmtNets == 1)) { NewPara.Para32[0] = pAC->ActivePort; NewPara.Para32[1] = Param.Para32[0]; @@ -4376,7 +4861,7 @@ ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]); ClearTxRing(pAC, &pAC->TxPort[ToPort][TX_PRIO_LOW]); spin_lock_irqsave( - &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); spin_lock_irqsave( &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); @@ -4438,6 +4923,31 @@ DEV_KFREE_SKB_ANY(pMsg); break; + case SK_DRV_TIMER: + if (Param.Para32[0] == SK_DRV_MODERATION_TIMER) { + /* + ** expiration of the moderation timer implies that + ** dynamic moderation is to be applied + */ + SkDimStartModerationTimer(pAC); + SkDimModerate(pAC); + if (pAC->DynIrqModInfo.DisplayStats) { + SkDimDisplayModerationSettings(pAC); + } + } else if (Param.Para32[0] == SK_DRV_RX_CLEANUP_TIMER) { + /* + ** check if we need to check for descriptors which + ** haven't been handled the last millisecs + */ + StartDrvCleanupTimer(pAC); + if (pAC->GIni.GIMacsFound == 2) { + ReceiveIrq(pAC, &pAC->RxPort[1], SK_FALSE); + } + ReceiveIrq(pAC, &pAC->RxPort[0], SK_FALSE); + } else { + printk("Expiration of unknown timer\n"); + } + break; default: break; } @@ -4545,7 +5055,7 @@ * DumpData - print a data area * * Description: - * This function prints a area of data to the system logfile/to the + * This function prints a area of data to the system logfile/to the * console. * * Returns: N/A @@ -4593,7 +5103,7 @@ * DumpLong - print a data area as long values * * Description: - * This function prints a area of data to the system logfile/to the + * This function prints a area of data to the system logfile/to the * console. * * Returns: N/A @@ -4647,9 +5157,8 @@ #endif -/* - * Local variables: - * compile-command: "make" - * End: - */ - +/******************************************************************************* + * + * End of file + * + ******************************************************************************/ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skgehwt.c linux.22-ac2/drivers/net/sk98lin/skgehwt.c --- linux.vanilla/drivers/net/sk98lin/skgehwt.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skgehwt.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: skgehwt.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.13 $ - * Date: $Date: 1999/11/22 13:31:12 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.14 $ + * Date: $Date: 2003/05/13 18:01:58 $ * Purpose: Hardware Timer. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,9 @@ * History: * * $Log: skgehwt.c,v $ + * Revision 1.14 2003/05/13 18:01:58 mkarl + * Editorial changes. + * * Revision 1.13 1999/11/22 13:31:12 cgoos * Changed license header to GPL. * @@ -76,8 +79,10 @@ /* Event queue and dispatcher */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/skgehwt.c,v 1.13 1999/11/22 13:31:12 cgoos Exp $" ; + "$Header: /usr56/projects/ge/schedule/skgehwt.c,v 1.14 2003/05/13 18:01:58 mkarl Exp $" ; +#endif #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skgeinit.c linux.22-ac2/drivers/net/sk98lin/skgeinit.c --- linux.vanilla/drivers/net/sk98lin/skgeinit.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skgeinit.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgeinit.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.82 $ - * Date: $Date: 2002/12/05 13:40:21 $ - * Purpose: Contains functions to initialize the GE HW + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.93 $ + * Date: $Date: 2003/05/28 15:44:43 $ + * Purpose: Contains functions to initialize the adapter * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,66 @@ * History: * * $Log: skgeinit.c,v $ + * Revision 1.93 2003/05/28 15:44:43 rschmidt + * Added check for chip Id on WOL WA for chip Rev. A. + * Added setting of GILevel in SkGeDeInit(). + * Minor changes to avoid LINT warnings. + * Editorial changes. + * + * Revision 1.92 2003/05/13 17:42:26 mkarl + * Added SK_FAR for PXE. + * Separated code pathes not used for SLIM driver to reduce code size. + * Removed calls to I2C for SLIM driver. + * Removed currently unused function SkGeLoadLnkSyncCnt. + * Editorial changes. + * + * Revision 1.91 2003/05/06 12:21:48 rschmidt + * Added use of pAC->GIni.GIYukon for selection of YUKON branches. + * Added defines around GENESIS resp. YUKON branches to reduce + * code size for PXE. + * Editorial changes. + * + * Revision 1.90 2003/04/28 09:12:20 rschmidt + * Added init for GIValIrqMask (common IRQ mask). + * Disabled HW Error IRQ on Yukon if sensor IRQ is set in SkGeInit1() + * by changing the common mask stored in GIValIrqMask. + * Editorial changes. + * + * Revision 1.89 2003/04/10 14:33:10 rschmidt + * Fixed alignement error of patchable configuration parameter + * in struct OemConfig caused by length of recognition string. + * + * Revision 1.88 2003/04/09 12:59:45 rschmidt + * Added define around initialization of patchable OEM specific + * configuration parameter. + * + * Revision 1.87 2003/04/08 16:46:13 rschmidt + * Added configuration variable for OEMs and initialization for + * GILedBlinkCtrl (LED Blink Control). + * Improved detection for YUKON-Lite Rev. A1. + * Editorial changes. + * + * Revision 1.86 2003/03/31 06:53:13 mkarl + * Corrected Copyright. + * Editorial changes. + * + * Revision 1.85 2003/02/05 15:30:33 rschmidt + * Corrected setting of GIHstClkFact (Host Clock Factor) and + * GIPollTimerVal (Descr. Poll Timer Init Value) for YUKON. + * Editorial changes. + * + * Revision 1.84 2003/01/28 09:57:25 rschmidt + * Added detection of YUKON-Lite Rev. A0 (stored in GIYukonLite). + * Disabled Rx GMAC FIFO Flush for YUKON-Lite Rev. A0. + * Added support for CLK_RUN (YUKON-Lite). + * Added additional check of PME from D3cold for setting GIVauxAvail. + * Editorial changes. + * + * Revision 1.83 2002/12/17 16:15:41 rschmidt + * Added default setting of PhyType (Copper) for YUKON. + * Added define around check for HW self test results. + * Editorial changes. + * * Revision 1.82 2002/12/05 13:40:21 rschmidt * Added setting of Rx GMAC FIFO Flush Mask register. * Corrected PhyType with new define SK_PHY_MARV_FIBER when @@ -383,8 +444,10 @@ /* local variables ************************************************************/ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "@(#)$Id: skgeinit.c,v 1.82 2002/12/05 13:40:21 rschmidt Exp $ (C) SK "; + "@(#) $Id: skgeinit.c,v 1.93 2003/05/28 15:44:43 rschmidt Exp $ (C) Marvell."; +#endif struct s_QOffTab { int RxQOff; /* Receive Queue Address Offset */ @@ -395,14 +458,27 @@ {Q_R1, Q_XS1, Q_XA1}, {Q_R2, Q_XS2, Q_XA2} }; +struct s_Config { + char ScanString[8]; + SK_U32 Value; +}; + +static struct s_Config OemConfig = { + {'O','E','M','_','C','o','n','f'}, +#ifdef SK_OEM_CONFIG + OEM_CONFIG_VALUE, +#else + 0, +#endif +}; /****************************************************************************** * * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring * * Description: - * Enable or disable the descriptor polling the receive descriptor - * ring (RxD) of port 'Port'. + * Enable or disable the descriptor polling of the receive descriptor + * ring (RxD) for port 'Port'. * The new configuration is *not* saved over any SkGeStopPort() and * SkGeInitPort() calls. * @@ -429,8 +505,8 @@ * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings * * Description: - * Enable or disable the descriptor polling the transmit descriptor - * ring(s) (TxD) of port 'Port'. + * Enable or disable the descriptor polling of the transmit descriptor + * ring(s) (TxD) for port 'Port'. * The new configuration is *not* saved over any SkGeStopPort() and * SkGeInitPort() calls. * @@ -448,7 +524,7 @@ pPrt = &pAC->GIni.GP[Port]; - DWord = (PollTxD) ? CSR_ENA_POL : CSR_DIS_POL; + DWord = (SK_U32)(PollTxD ? CSR_ENA_POL : CSR_DIS_POL); if (pPrt->PXSQSize != 0) { SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), DWord); @@ -489,6 +565,7 @@ } /* SkGeYellowLED */ +#if (!defined(SK_SLIM) || defined(GENESIS)) /****************************************************************************** * * SkGeXmitLED() - Modify the Operational Mode of a transmission LED. @@ -544,6 +621,7 @@ * (In this case it has to be added here. But we will see. XXX) */ } /* SkGeXmitLED */ +#endif /* !SK_SLIM || GENESIS */ /****************************************************************************** @@ -564,12 +642,12 @@ * 1: configuration error */ static int DoCalcAddr( -SK_AC *pAC, /* adapter context */ -SK_GEPORT *pPrt, /* port index */ -int QuSize, /* size of the queue to configure in kB */ -SK_U32 *StartVal, /* start value for address calculation */ -SK_U32 *QuStartAddr, /* start addr to calculate */ -SK_U32 *QuEndAddr) /* end address to calculate */ +SK_AC *pAC, /* adapter context */ +SK_GEPORT SK_FAR *pPrt, /* port index */ +int QuSize, /* size of the queue to configure in kB */ +SK_U32 SK_FAR *StartVal, /* start value for address calculation */ +SK_U32 SK_FAR *QuStartAddr,/* start addr to calculate */ +SK_U32 SK_FAR *QuEndAddr) /* end address to calculate */ { SK_U32 EndVal; SK_U32 NextStart; @@ -644,7 +722,6 @@ return(2); } - if (DualNet) { /* every port gets the same amount of memory */ ActivePortKilobytes = pAC->GIni.GIRamSize / pAC->GIni.GIMacsFound; @@ -746,14 +823,19 @@ int Port) /* port index */ { SK_GEPORT *pPrt; - int UsedMem; /* total memory used (max. found ports) */ int i; int Rtv; int Rtv2; SK_U32 StartAddr; +#ifndef SK_SLIM + int UsedMem; /* total memory used (max. found ports) */ +#endif - UsedMem = 0; Rtv = 0; + +#ifndef SK_SLIM + + UsedMem = 0; for (i = 0; i < pAC->GIni.GIMacsFound; i++) { pPrt = &pAC->GIni.GP[i]; @@ -789,6 +871,7 @@ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG); return(1); } +#endif /* !SK_SLIM */ /* Now start address calculation */ StartAddr = pAC->GIni.GIRamOffs; @@ -820,6 +903,7 @@ } /* SkGeCheckQSize */ +#ifdef GENESIS /****************************************************************************** * * SkGeInitMacArb() - Initialize the MAC Arbiter @@ -854,7 +938,7 @@ /* Fast Output Enable Mode was intended to use with Rev. B2, but now? */ /* - * There is not start or enable button to push, therefore + * There is no start or enable button to push, therefore * the MAC arbiter is configured and enabled now. */ } /* SkGeInitMacArb */ @@ -899,6 +983,7 @@ } } } /* SkGeInitPktArb */ +#endif /* GENESIS */ /****************************************************************************** @@ -916,6 +1001,7 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { + SK_U16 Word; #ifdef VCPU SK_U32 DWord; #endif /* VCPU */ @@ -927,6 +1013,9 @@ * - enable the FIFO */ + Word = (SK_U16)GMF_RX_CTRL_DEF; + +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* Configure Rx MAC FIFO */ SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR); @@ -943,20 +1032,29 @@ SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH); } } - else { - /* Configure Rx MAC FIFO */ - SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); - SK_OUT32(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), GMF_RX_CTRL_DEF | - GMF_RX_F_FL_ON); /* enable Rx GMAC FIFO Flush */ +#endif /* GENESIS */ +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* set Rx GMAC FIFO Flush Mask */ SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_MSK), (SK_U16)RX_FF_FL_DEF_MSK); - /* use Rx GMAC FIFO Flush Threshold default value (0x0a == 56 bytes) */ + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (pAC->GIni.GIYukonLite && pAC->GIni.GIChipId == CHIP_ID_YUKON) { + + Word &= ~GMF_RX_F_FL_ON; + } + + /* Configure Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), Word); + + /* set Rx GMAC FIFO Flush Threshold (default: 0x0a -> 56 bytes) */ + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); /* Configure Tx MAC FIFO */ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); - SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), GMF_TX_CTRL_DEF); + SK_OUT16(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U16)GMF_TX_CTRL_DEF); #ifdef VCPU SK_IN32(IoC, MR_ADDR(Port, RX_GMF_AF_THR), &DWord); @@ -966,9 +1064,11 @@ /* set Tx GMAC FIFO Almost Empty Threshold */ /* SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), 0); */ } -} /* SkGeInitMacFifo */ +#endif /* YUKON */ +} /* SkGeInitMacFifo */ +#ifdef SK_LNK_SYNC_CNT /****************************************************************************** * * SkGeLoadLnkSyncCnt() - Load the Link Sync Counter and starts counting @@ -1041,8 +1141,9 @@ SK_OUT32(IoC, B0_IMSK, OrgIMsk); } } /* SkGeLoadLnkSyncCnt*/ +#endif /* SK_LNK_SYNC_CNT */ - +#if defined(SK_DIAG) || defined(SK_CFG_SYNC) /****************************************************************************** * * SkGeCfgSync() - Configure synchronous bandwidth for this port. @@ -1131,6 +1232,7 @@ return(0); } /* SkGeCfgSync */ +#endif /* SK_DIAG || SK_CFG_SYNC*/ /****************************************************************************** @@ -1196,7 +1298,7 @@ * we NEED Store & Forward of the RAM buffer. */ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK || - !pAC->GIni.GIGenesis) { + pAC->GIni.GIYukon) { /* enable Store & Forward Mode for the Tx Side */ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_STFWD); } @@ -1401,7 +1503,7 @@ * Example: * 1) A Link Down event was signaled for a port. Therefore the activity * of this port should be stopped and a hardware reset should be issued - * to enable the workaround of XMAC errata #2. But the received frames + * to enable the workaround of XMAC Errata #2. But the received frames * should not be discarded. * ... * SkGeStopPort(pAC, IoC, Port, SK_STOP_TX, SK_HARD_RST); @@ -1468,8 +1570,8 @@ SK_U32 DWord; SK_U32 XsCsr; SK_U32 XaCsr; - int i; SK_U64 ToutStart; + int i; int ToutCnt; pPrt = &pAC->GIni.GP[Port]; @@ -1493,8 +1595,8 @@ * Clear packet arbiter timeout to make sure * this loop will terminate. */ - SK_OUT16(IoC, B3_PA_CTRL, (Port == MAC_1) ? PA_CLR_TO_TX1 : - PA_CLR_TO_TX2); + SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ? + PA_CLR_TO_TX1 : PA_CLR_TO_TX2)); /* * If the transfer stucks at the MAC the STOP command will not @@ -1574,6 +1676,7 @@ SK_OUT8(IoC, RB_ADDR(pPrt->PXsQOff, RB_CTRL), RB_RST_SET); /* Reset Tx MAC FIFO */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* Note: MFF_RST_SET does NOT reset the XMAC ! */ SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET); @@ -1582,10 +1685,14 @@ /* Link LED is switched off by the RLMT and the Diag itself */ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* Reset TX MAC FIFO */ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); } +#endif /* YUKON */ } if ((Dir & SK_STOP_RX) != 0) { @@ -1604,9 +1711,9 @@ * Clear packet arbiter timeout to make sure * this loop will terminate */ - SK_OUT16(IoC, B3_PA_CTRL, (Port == MAC_1) ? PA_CLR_TO_RX1 : - PA_CLR_TO_RX2); - + SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ? + PA_CLR_TO_RX1 : PA_CLR_TO_RX2)); + DWord = TestStopBit(pAC, IoC, pPrt->PRxQOff); /* timeout if i==0 (bug fix for #10748) */ @@ -1632,6 +1739,7 @@ SK_OUT8(IoC, RB_ADDR(pPrt->PRxQOff, RB_CTRL), RB_RST_SET); /* Reset Rx MAC FIFO */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET); @@ -1639,10 +1747,14 @@ /* switch Rx LED off, stop the LED counter */ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* Reset Rx MAC FIFO */ SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); } +#endif /* YUKON */ } } /* SkGeStopPort */ @@ -1679,27 +1791,29 @@ pPrt->PPrevRx = 0; pPrt->PPrevFcs = 0; pPrt->PRxLim = SK_DEF_RX_WA_LIM; - pPrt->PLinkMode = SK_LMODE_AUTOFULL; - pPrt->PLinkSpeedCap = SK_LSPEED_CAP_1000MBPS; - pPrt->PLinkSpeed = SK_LSPEED_1000MBPS; - pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_UNKNOWN; - pPrt->PLinkModeConf = SK_LMODE_AUTOSENSE; - pPrt->PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; - pPrt->PLinkBroken = SK_TRUE; /* See WA code */ - pPrt->PLinkCap = (SK_LMODE_CAP_HALF | SK_LMODE_CAP_FULL | - SK_LMODE_CAP_AUTOHALF | SK_LMODE_CAP_AUTOFULL); - pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; - pPrt->PFlowCtrlCap = SK_FLOW_MODE_SYM_OR_REM; - pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL; + pPrt->PLinkSpeedCap = (SK_U8)SK_LSPEED_CAP_1000MBPS; + pPrt->PLinkSpeed = (SK_U8)SK_LSPEED_1000MBPS; + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_UNKNOWN; + pPrt->PLinkModeConf = (SK_U8)SK_LMODE_AUTOSENSE; + pPrt->PFlowCtrlMode = (SK_U8)SK_FLOW_MODE_SYM_OR_REM; + pPrt->PLinkCap = (SK_U8)(SK_LMODE_CAP_HALF | SK_LMODE_CAP_FULL | + SK_LMODE_CAP_AUTOHALF | SK_LMODE_CAP_AUTOFULL); + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + pPrt->PFlowCtrlCap = (SK_U8)SK_FLOW_MODE_SYM_OR_REM; + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; pPrt->PMSCap = 0; - pPrt->PMSMode = SK_MS_MODE_AUTO; - pPrt->PMSStatus = SK_MS_STAT_UNSET; + pPrt->PMSMode = (SK_U8)SK_MS_MODE_AUTO; + pPrt->PMSStatus = (SK_U8)SK_MS_STAT_UNSET; + pPrt->PLipaAutoNeg = (SK_U8)SK_LIPA_UNKNOWN; pPrt->PAutoNegFail = SK_FALSE; - pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN; pPrt->PHWLinkUp = SK_FALSE; + pPrt->PLinkBroken = SK_TRUE; /* See WA code */ } pAC->GIni.GIPortUsage = SK_RED_LINK; + pAC->GIni.GILedBlinkCtrl = (SK_U16)OemConfig.Value; + pAC->GIni.GIValIrqMask = IS_ALL_MSK; } /* SkGeInit0*/ @@ -1766,8 +1880,8 @@ SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2); SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat); - if (PciCmd != 0 || Cls != 0 || (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1 || - Lat != 0) { + if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 || + (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) { return(1); } @@ -1779,7 +1893,7 @@ return(0); } /* SkGePciReset */ -#endif /* SK_PCI_RESET */ +#endif /* SK_PCI_RESET */ /****************************************************************************** * @@ -1808,22 +1922,27 @@ { SK_U8 Byte; SK_U16 Word; + SK_U16 CtrlStat; + SK_U32 DWord; int RetVal; int i; RetVal = 0; + /* save CLK_RUN bits (YUKON-Lite) */ + SK_IN16(IoC, B0_CTST, &CtrlStat); + #ifdef SK_PCI_RESET (void)SkGePciReset(pAC, IoC); -#endif /* SK_PCI_RESET */ +#endif /* SK_PCI_RESET */ - /* Do the SW-reset */ + /* do the SW-reset */ SK_OUT8(IoC, B0_CTST, CS_RST_SET); - /* Release the SW-reset */ + /* release the SW-reset */ SK_OUT8(IoC, B0_CTST, CS_RST_CLR); - /* Reset all error bits in the PCI STATUS register */ + /* reset all error bits in the PCI STATUS register */ /* * Note: PCI Cfg cycles cannot be used, because they are not * available on some platforms after 'boot time'. @@ -1831,31 +1950,47 @@ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); - SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - /* Release Master Reset */ + /* release Master Reset */ SK_OUT8(IoC, B0_CTST, CS_MRST_CLR); - /* Read Chip Identification Number */ +#ifdef CLK_RUN + CtrlStat |= CS_CLK_RUN_ENA; +#endif /* CLK_RUN */ + + /* restore CLK_RUN bits */ + SK_OUT16(IoC, B0_CTST, (SK_U16)(CtrlStat & + (CS_CLK_RUN_HOT | CS_CLK_RUN_RST | CS_CLK_RUN_ENA))); + + /* read Chip Identification Number */ SK_IN8(IoC, B2_CHIP_ID, &Byte); pAC->GIni.GIChipId = Byte; - /* Read number of MACs */ + /* read number of MACs */ SK_IN8(IoC, B2_MAC_CFG, &Byte); pAC->GIni.GIMacsFound = (Byte & CFG_SNG_MAC) ? 1 : 2; - /* Get Chip Revision Number */ + /* get Chip Revision Number */ pAC->GIni.GIChipRev = (SK_U8)((Byte & CFG_CHIP_R_MSK) >> 4); - /* Read the adapters external SRAM size */ + /* get diff. PCI parameters */ + SK_IN16(IoC, B0_CTST, &CtrlStat); + + /* read the adapters RAM size */ SK_IN8(IoC, B2_E_0, &Byte); + pAC->GIni.GIGenesis = SK_FALSE; + pAC->GIni.GIYukon = SK_FALSE; + pAC->GIni.GIYukonLite = SK_FALSE; + +#ifdef GENESIS if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { pAC->GIni.GIGenesis = SK_TRUE; - if (Byte == 3) { + if (Byte == (SK_U8)3) { /* special case: 4 x 64k x 36, offset = 0x80000 */ pAC->GIni.GIRamSize = 1024; pAC->GIni.GIRamOffs = (SK_U32)512 * 1024; @@ -1864,19 +1999,72 @@ pAC->GIni.GIRamSize = (int)Byte * 512; pAC->GIni.GIRamOffs = 0; } + /* all GE adapters work with 53.125 MHz host clock */ + pAC->GIni.GIHstClkFact = SK_FACT_53; + + /* set Descr. Poll Timer Init Value to 250 ms */ + pAC->GIni.GIPollTimerVal = + SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100; } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIChipId != CHIP_ID_GENESIS) { + + pAC->GIni.GIYukon = SK_TRUE; + + pAC->GIni.GIRamSize = (Byte == (SK_U8)0) ? 128 : (int)Byte * 4; - pAC->GIni.GIGenesis = SK_FALSE; - -#ifndef VCPU - pAC->GIni.GIRamSize = (Byte == 0) ? 128 : (int)Byte * 4; -#else - pAC->GIni.GIRamSize = 128; -#endif pAC->GIni.GIRamOffs = 0; - pAC->GIni.GIWolOffs = (pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0; + /* WA for chip Rev. A */ + pAC->GIni.GIWolOffs = (pAC->GIni.GIChipId == CHIP_ID_YUKON && + pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0; + + /* get PM Capabilities of PCI config space */ + SK_IN16(IoC, PCI_C(PCI_PM_CAP_REG), &Word); + + /* check if VAUX is available */ + if (((CtrlStat & CS_VAUX_AVAIL) != 0) && + /* check also if PME from D3cold is set */ + ((Word & PCI_PME_D3C_SUP) != 0)) { + /* set entry in GE init struct */ + pAC->GIni.GIVauxAvail = SK_TRUE; + } + + if (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE) { + /* this is Rev. A1 */ + pAC->GIni.GIYukonLite = SK_TRUE; + } + else { + /* save Flash-Address Register */ + SK_IN32(IoC, B2_FAR, &DWord); + + /* test Flash-Address Register */ + SK_OUT8(IoC, B2_FAR + 3, 0xff); + SK_IN8(IoC, B2_FAR + 3, &Byte); + + if (Byte != 0) { + /* this is Rev. A0 */ + pAC->GIni.GIYukonLite = SK_TRUE; + + /* restore Flash-Address Register */ + SK_OUT32(IoC, B2_FAR, DWord); + } + } + + /* read the Interrupt source */ + SK_IN32(IoC, B0_ISRC, &DWord); + + if ((DWord & IS_HW_ERR) != 0) { + /* read the HW Error Interrupt source */ + SK_IN32(IoC, B0_HWE_ISRC, &DWord); + + if ((DWord & IS_IRQ_SENSOR) != 0) { + /* disable HW Error IRQ */ + pAC->GIni.GIValIrqMask &= ~IS_HW_ERR; + } + } for (i = 0; i < pAC->GIni.GIMacsFound; i++) { /* set GMAC Link Control reset */ @@ -1885,90 +2073,100 @@ /* clear GMAC Link Control reset */ SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR); } + /* all YU chips work with 78.125 MHz host clock */ + pAC->GIni.GIHstClkFact = SK_FACT_78; + + pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; /* 215 ms */ } +#endif /* YUKON */ - /* get diff. PCI parameters */ - SK_IN16(IoC, B0_CTST, &Word); - - /* Check if 64-bit PCI Slot is present */ - pAC->GIni.GIPciSlot64 = (SK_BOOL)((Word & CS_BUS_SLOT_SZ) != 0); + /* check if 64-bit PCI Slot is present */ + pAC->GIni.GIPciSlot64 = (SK_BOOL)((CtrlStat & CS_BUS_SLOT_SZ) != 0); - /* Check if 66 MHz PCI Clock is active */ - pAC->GIni.GIPciClock66 = (SK_BOOL)((Word & CS_BUS_CLOCK) != 0); - - /* Check if VAUX is available */ - pAC->GIni.GIVauxAvail = (SK_BOOL)((Word & CS_VAUX_AVAIL) != 0); + /* check if 66 MHz PCI Clock is active */ + pAC->GIni.GIPciClock66 = (SK_BOOL)((CtrlStat & CS_BUS_CLOCK) != 0); - /* Read PCI HW Revision Id. */ + /* read PCI HW Revision Id. */ SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte); pAC->GIni.GIPciHwRev = Byte; - /* All known GE Adapters work with 53.125 MHz host clock */ - pAC->GIni.GIHstClkFact = SK_FACT_53; - pAC->GIni.GIPollTimerVal = - SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100; - - /* Read the PMD type */ + /* read the PMD type */ SK_IN8(IoC, B2_PMD_TYP, &Byte); pAC->GIni.GICopperType = (SK_U8)(Byte == 'T'); - /* Read the PHY type */ + /* read the PHY type */ SK_IN8(IoC, B2_E_1, &Byte); -#ifdef VCPU - if (!pAC->GIni.GIGenesis) { - pAC->GIni.GICopperType = SK_TRUE; - Byte = SK_PHY_MARV_COPPER; /* this field is not initialized */ - } -#endif - Byte &= 0x0f; /* the PHY type is stored in the lower nibble */ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PhyType = Byte; - switch (Byte) { - case SK_PHY_XMAC: - pAC->GIni.GP[i].PhyAddr = PHY_ADDR_XMAC; - break; - case SK_PHY_BCOM: - pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM; - pAC->GIni.GP[i].PMSCap = - SK_MS_CAP_AUTO | SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE; - break; - case SK_PHY_MARV_COPPER: + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + switch (Byte) { + case SK_PHY_XMAC: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM; + pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO | + SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE; + break; + case SK_PHY_NAT: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT; + break; +#endif /* OTHER_PHY */ + default: + /* ERROR: unexpected PHY type detected */ + RetVal = 5; + break; + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + if (Byte < (SK_U8)SK_PHY_MARV_COPPER) { + /* if this field is not initialized */ + Byte = (SK_U8)SK_PHY_MARV_COPPER; + + pAC->GIni.GICopperType = SK_TRUE; + } + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_MARV; + if (pAC->GIni.GICopperType) { - pAC->GIni.GP[i].PLinkSpeedCap = SK_LSPEED_CAP_AUTO | + + pAC->GIni.GP[i].PLinkSpeedCap = (SK_U8)(SK_LSPEED_CAP_AUTO | SK_LSPEED_CAP_10MBPS | SK_LSPEED_CAP_100MBPS | - SK_LSPEED_CAP_1000MBPS; - pAC->GIni.GP[i].PLinkSpeed = SK_LSPEED_AUTO; - pAC->GIni.GP[i].PMSCap = - SK_MS_CAP_AUTO | SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE; + SK_LSPEED_CAP_1000MBPS); + + pAC->GIni.GP[i].PLinkSpeed = (SK_U8)SK_LSPEED_AUTO; + + pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO | + SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE); } else { - pAC->GIni.GP[i].PhyType = SK_PHY_MARV_FIBER; + Byte = (SK_U8)SK_PHY_MARV_FIBER; } - break; -#ifdef OTHER_PHY - case SK_PHY_LONE: - pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE; - break; - case SK_PHY_NAT: - pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT; - break; -#endif /* OTHER_PHY */ - default: - /* ERROR: unexpected PHY type detected */ - RetVal = 5; - break; } +#endif /* YUKON */ + + pAC->GIni.GP[i].PhyType = (int)Byte; SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, - ("PHY type: %d PHY addr: %04x\n", pAC->GIni.GP[i].PhyType, + ("PHY type: %d PHY addr: %04x\n", Byte, pAC->GIni.GP[i].PhyAddr)); } - /* Get Mac Type & set function pointers dependent on */ + /* get MAC Type & set function pointers dependent on */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { + pAC->GIni.GIMacType = SK_MAC_XMAC; pAC->GIni.GIFunc.pFnMacUpdateStats = SkXmUpdateStats; @@ -1976,24 +2174,30 @@ pAC->GIni.GIFunc.pFnMacResetCounter = SkXmResetCounter; pAC->GIni.GIFunc.pFnMacOverflow = SkXmOverflowStatus; } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + pAC->GIni.GIMacType = SK_MAC_GMAC; pAC->GIni.GIFunc.pFnMacUpdateStats = SkGmUpdateStats; pAC->GIni.GIFunc.pFnMacStatistic = SkGmMacStatistic; pAC->GIni.GIFunc.pFnMacResetCounter = SkGmResetCounter; pAC->GIni.GIFunc.pFnMacOverflow = SkGmOverflowStatus; - -#ifndef VCPU + +#ifdef SPECIAL_HANDLING if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { /* check HW self test result */ SK_IN8(IoC, B2_E_3, &Byte); - if ((Byte & B2_E3_RES_MASK) != 0) { + if (Byte & B2_E3_RES_MASK) { RetVal = 6; } } #endif } +#endif /* YUKON */ + return(RetVal); } /* SkGeInit1 */ @@ -2017,7 +2221,9 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC) /* IO context */ { +#ifdef GENESIS SK_U32 DWord; +#endif /* GENESIS */ int i; /* start the Descriptor Poll Timer */ @@ -2031,6 +2237,7 @@ SK_OUT8(IoC, B28_DPT_CTRL, DPT_START); } +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* start the Blink Source Counter */ DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100; @@ -2046,10 +2253,14 @@ SkGeInitPktArb(pAC, IoC); } - else { - /* Start Time Stamp Timer */ +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* start Time Stamp Timer */ SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_START); } +#endif /* YUKON */ /* enable the Tx Arbiters */ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { @@ -2121,17 +2332,17 @@ break; } - /* Check if the adapter seems to be accessible */ - SK_OUT32(IoC, B2_IRQM_INI, 0x11335577L); + /* check if the adapter seems to be accessible */ + SK_OUT32(IoC, B2_IRQM_INI, SK_TEST_VAL); SK_IN32(IoC, B2_IRQM_INI, &DWord); SK_OUT32(IoC, B2_IRQM_INI, 0L); - if (DWord != 0x11335577L) { + if (DWord != SK_TEST_VAL) { RetVal = 2; break; } - /* Check if the number of GIMacsFound matches SK_MAX_MACS */ + /* check if the number of GIMacsFound matches SK_MAX_MACS */ if (pAC->GIni.GIMacsFound > SK_MAX_MACS) { RetVal = 1; break; @@ -2168,7 +2379,7 @@ /****************************************************************************** * - * SkGeDeInit() - Deinitialize the adapter. + * SkGeDeInit() - Deinitialize the adapter * * Description: * All ports of the adapter will be stopped if not already done. @@ -2184,12 +2395,12 @@ int i; SK_U16 Word; -#ifndef VCPU - /* Ensure I2C is ready */ +#if (!defined(SK_SLIM) && !defined(VCPU)) + /* ensure I2C is ready */ SkI2cWaitIrq(pAC, IoC); -#endif +#endif - /* Stop all current transfer activity */ + /* stop all current transfer activity */ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { if (pAC->GIni.GP[i].PState != SK_PRT_STOP && pAC->GIni.GP[i].PState != SK_PRT_RESET) { @@ -2206,11 +2417,13 @@ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); - SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - /* Do the reset, all LEDs are switched off now */ + /* do the reset, all LEDs are switched off now */ SK_OUT8(IoC, B0_CTST, CS_RST_SET); + + pAC->GIni.GILevel = SK_INIT_DATA; } /* SkGeDeInit */ @@ -2261,10 +2474,11 @@ return(2); } - /* Configuration ok, initialize the Port now */ + /* configuration ok, initialize the Port now */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { - /* Initialize Rx, Tx and Link LED */ + /* initialize Rx, Tx and Link LED */ /* * If 1000BT Phy needs LED initialization than swap * LED and XMAC initialization order @@ -2275,12 +2489,16 @@ SkXmInitMac(pAC, IoC, Port); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { SkGmInitMac(pAC, IoC, Port); } +#endif /* YUKON */ - /* Do NOT initialize the Link Sync Counter */ + /* do NOT initialize the Link Sync Counter */ SkGeInitMacFifo(pAC, IoC, Port); @@ -2293,7 +2511,7 @@ SkGeInitBmu(pAC, IoC, Port); - /* Mark port as initialized */ + /* mark port as initialized */ pPrt->PState = SK_PRT_INIT; return(0); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skgemib.c linux.22-ac2/drivers/net/sk98lin/skgemib.c --- linux.vanilla/drivers/net/sk98lin/skgemib.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skgemib.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,16 @@ * * Name: skgemib.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.7 $ - * Date: $Date: 2002/12/16 09:04:34 $ + * Version: $Revision: 1.9 $ + * Date: $Date: 2003/05/23 12:55:20 $ * Purpose: Private Network Management Interface Management Database * ****************************************************************************/ /****************************************************************************** * - * (C)Copyright 2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,12 @@ * History: * * $Log: skgemib.c,v $ + * Revision 1.9 2003/05/23 12:55:20 tschilli + * OID_SKGE_BOARDLEVEL added. + * + * Revision 1.8 2003/03/27 11:19:15 tschilli + * Copyright messages changed. + * * Revision 1.7 2002/12/16 09:04:34 tschilli * Code for VCT handling added. * @@ -100,8 +107,13 @@ PNMI_STATIC int PowerManagement(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, unsigned int TableIndex, SK_U32 NetIndex); -#endif +#endif /* SK_POWER_MGMT */ +#ifdef SK_DIAG_SUPPORT +PNMI_STATIC int DiagActions(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +#endif /* SK_DIAG_SUPPORT */ /* defines *******************************************************************/ @@ -267,6 +279,13 @@ 0, SK_PNMI_RW, PowerManagement, 0}, #endif /* SK_POWER_MGMT */ +#ifdef SK_DIAG_SUPPORT + {OID_SKGE_DIAG_MODE, + 0, + 0, + 0, + SK_PNMI_RW, DiagActions, 0}, +#endif /* SK_DIAG_SUPPORT */ {OID_SKGE_MDB_VERSION, 1, 0, @@ -1052,5 +1071,10 @@ 0, 0, SK_PNMI_RO, Vct, 0}, + {OID_SKGE_BOARDLEVEL, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skgepnmi.c linux.22-ac2/drivers/net/sk98lin/skgepnmi.c --- linux.vanilla/drivers/net/sk98lin/skgepnmi.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skgepnmi.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,8 +2,8 @@ * * Name: skgepnmi.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.102 $ - * Date: $Date: 2002/12/16 14:03:24 $ + * Version: $Revision: 1.109 $ + * Date: $Date: 2003/07/17 14:15:24 $ * Purpose: Private Network Management Interface * ****************************************************************************/ @@ -11,6 +11,7 @@ /****************************************************************************** * * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,42 @@ * History: * * $Log: skgepnmi.c,v $ + * Revision 1.109 2003/07/17 14:15:24 tschilli + * Bug in SkPnmiGenIoctl() fixed. + * + * Revision 1.108 2003/05/27 07:10:11 tschilli + * Bug in SkPnmiGenIoctl() fixed. + * + * Revision 1.107 2003/05/23 13:01:10 tschilli + * Code for DIAG support added (#define SK_DIAG_SUPPORT). + * Code for generic PNMI IOCTL support added. The new function + * SkPnmiGenIoctl() is used for this purpose. + * Handling of OID_SKGE_BOARDLEVEL added. + * Incorrect buffer size handling of OID_SKGE_MTU during GET action fixed. + * Return code handling in PowerManagement() fixed. + * Editorial changes. + * + * Revision 1.106 2003/04/10 14:47:31 rschmidt + * Fixed handling for OID_GEN_RCV_OK and OID_GEN_XMIT_OK for YUKON's GMAC + * in GetPhysStatVal(). + * Replaced macro PHY_READ() with function call SkXmPhyRead(). + * Made optimisations for readability and code size. + * Editorial changes. + * + * Revision 1.105 2003/04/09 12:51:32 rschmidt + * Fixed XMAC only handling for some events in SkPnmiEvent(). + * Fixed return value for OID_GEN_RCV_OK (SK_PNMI_HRX) in GetPhysStatVal(). + * Editorial changes. + * + * Revision 1.104 2003/03/27 11:18:21 tschilli + * BRK statements from DEBUG code removed. + * OID_GEN_XMIT_OK and OID_GEN_RCV_OK work with Yukon now. + * Copyright messages changed. + * + * Revision 1.103 2002/12/20 09:57:13 tschilli + * SK_PNMI_EVT_VCT_RESET event code changed. + * Unused variable from Vct() removed. + * * Revision 1.102 2002/12/16 14:03:24 tschilli * VCT code in Vct() changed. * @@ -84,7 +121,7 @@ * - Extended SIRQ event handler for both mac types. * - Fixed rx short counter bug (#10620) * - Added handler for oids SKGE_SPEED_MODE & SKGE_SPEED_STATUS. - * - Extendet GetPhysStatVal() for GMAC. + * - Extended GetPhysStatVal() for GMAC. * - Editorial changes. * * Revision 1.90 2002/05/22 08:56:25 rwahl @@ -172,7 +209,7 @@ * Added state check to PHY_READ call (hanged if called during startup). * * Revision 1.67 1999/09/22 09:53:20 rwahl - * - Read Broadcom register for updating fcs error counter (1000Base-T). + * - Read Broadcom register for updating FCS error counter (1000Base-T). * * Revision 1.66 1999/08/26 13:47:56 rwahl * Added SK_DRIVER_SENDEVENT when queueing RLMT_CHANGE_THRES trap. @@ -432,9 +469,10 @@ ****************************************************************************/ +#ifndef _lint static const char SysKonnectFileId[] = - "@(#) $Id: skgepnmi.c,v 1.102 2002/12/16 14:03:24 tschilli Exp $" - " (C) SysKonnect."; + "@(#) $Id: skgepnmi.c,v 1.109 2003/07/17 14:15:24 tschilli Exp $ (C) Marvell."; +#endif /* !_lint */ #include "h/skdrv1st.h" #include "h/sktypes.h" @@ -472,11 +510,13 @@ unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, unsigned int *pLen, SK_U32 NetIndex); -int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, +int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, unsigned int *pLen, SK_U32 NetIndex); -int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, +int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, unsigned int *pLen, SK_U32 NetIndex); int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Param); +int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf, + unsigned int * pLen, SK_U32 NetIndex); /* @@ -530,7 +570,7 @@ /* * Overflow status register bit table and corresponding counter * dependent on MAC type - the number relates to the size of overflow - * mask returned by the pFnMacOverflow function + * mask returned by the pFnMacOverflow function */ PNMI_STATIC const SK_U16 StatOvrflwBit[][SK_PNMI_MAC_TYPES] = { /* Bit0 */ { SK_PNMI_HTX, SK_PNMI_HTX_UNICAST}, @@ -694,7 +734,7 @@ /* SK_PNMI_HRX_FRAMING */ {{XM_RXF_FRA_ERR, SK_TRUE}, {0, SK_FALSE}}, /* SK_PNMI_HRX_UNDERSIZE */ - {{0, SK_FALSE},{GM_RXF_SHT, SK_TRUE}}, + {{0, SK_FALSE}, {GM_RXF_SHT, SK_TRUE}}, /* SK_PNMI_HRX_OVERFLOW */ {{XM_RXE_FIFO_OV, SK_TRUE}, {GM_RXE_FIFO_OV, SK_TRUE}}, /* SK_PNMI_HRX_JABBER */ @@ -768,7 +808,6 @@ SK_U16 Val16; /* Multiple purpose 16 bit variable */ SK_U8 Val8; /* Mulitple purpose 8 bit variable */ SK_EVPARA EventParam; /* Event struct for timer event */ - SK_GEPORT *pPrt; SK_PNMI_VCT *pVctBackupData; @@ -797,7 +836,6 @@ ("CounterOffset struct size (%d) differs from" "SK_PNMI_MAX_IDX (%d)\n", SK_PNMI_CNT_NO, SK_PNMI_MAX_IDX)); - BRK; } if (SK_PNMI_MAX_IDX != @@ -808,10 +846,9 @@ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL, ("StatAddr table size (%d) differs from " "SK_PNMI_MAX_IDX (%d)\n", - (sizeof(StatAddr) / + (sizeof(StatAddr) / (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES)), SK_PNMI_MAX_IDX)); - BRK; } #endif /* SK_PNMI_CHECK */ break; @@ -829,8 +866,7 @@ /* Initialize DSP variables for Vct() to 0xff => Never written! */ for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) { - pPrt = &pAC->GIni.GP[PortIndex]; - pPrt->PCableLen =0xff; + pAC->GIni.GP[PortIndex].PCableLen = 0xff; pVctBackupData = &pAC->Pnmi.VctBackup[PortIndex]; pVctBackupData->PCableLen = 0xff; } @@ -958,14 +994,14 @@ default: pAC->Pnmi.Connector = 1; break; - } + } break; - + case SK_INIT_RUN: /* * Start timer for RLMT change counter */ - SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, EventParam); @@ -994,17 +1030,17 @@ * the data. * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ int SkPnmiGetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 Id, /* Object ID that is to be processed */ -void *pBuf, /* Buffer to which to mgmt data will be retrieved */ +void *pBuf, /* Buffer to which the management data will be copied */ unsigned int *pLen, /* On call: buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiGetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", @@ -1022,8 +1058,8 @@ * Calls a general sub-function for all this stuff. The preset does * the same as a set, but returns just before finally setting the * new value. This is usefull to check if a set might be successfull. - * If as instance a -1 is passed, an array of values is supposed and - * all instance of the OID will be set. + * If the instance -1 is passed, an array of values is supposed and + * all instances of the OID will be set. * * Returns: * SK_PNMI_ERR_OK The request was successfully performed. @@ -1036,17 +1072,17 @@ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ int SkPnmiPreSetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 Id, /* Object ID that is to be processed */ -void *pBuf, /* Buffer which stores the mgmt data to be set */ -unsigned int *pLen, /* Total length of mgmt data */ +void *pBuf, /* Buffer to which the management data will be copied */ +unsigned int *pLen, /* Total length of management data */ SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiPreSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", @@ -1065,8 +1101,8 @@ * Calls a general sub-function for all this stuff. The preset does * the same as a set, but returns just before finally setting the * new value. This is usefull to check if a set might be successfull. - * If as instance a -1 is passed, an array of values is supposed and - * all instance of the OID will be set. + * If the instance -1 is passed, an array of values is supposed and + * all instances of the OID will be set. * * Returns: * SK_PNMI_ERR_OK The request was successfully performed. @@ -1079,17 +1115,17 @@ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ int SkPnmiSetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 Id, /* Object ID that is to be processed */ -void *pBuf, /* Buffer which stores the mgmt data to be set */ -unsigned int *pLen, /* Total length of mgmt data */ +void *pBuf, /* Buffer to which the management data will be copied */ +unsigned int *pLen, /* Total length of management data */ SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", @@ -1116,14 +1152,14 @@ * SK_PNMI_ERR_GENERAL A general severe internal error occured * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take * the data. - * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ int SkPnmiGetStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -void *pBuf, /* Buffer which will store the retrieved data */ +void *pBuf, /* Buffer to which the management data will be copied. */ unsigned int *pLen, /* Length of buffer */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; unsigned int TableIndex; @@ -1301,13 +1337,13 @@ SK_IOC IoC, /* IO context handle */ void *pBuf, /* Buffer which contains the data to be set */ unsigned int *pLen, /* Length of buffer */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiPreSetStruct: Called, BufLen=%d, NetIndex=%d\n", *pLen, NetIndex)); - return (PnmiStruct(pAC, IoC, SK_PNMI_PRESET, (char *)pBuf, + return (PnmiStruct(pAC, IoC, SK_PNMI_PRESET, (char *)pBuf, pLen, NetIndex)); } @@ -1339,13 +1375,13 @@ SK_IOC IoC, /* IO context handle */ void *pBuf, /* Buffer which contains the data to be set */ unsigned int *pLen, /* Length of buffer */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiSetStruct: Called, BufLen=%d, NetIndex=%d\n", *pLen, NetIndex)); - return (PnmiStruct(pAC, IoC, SK_PNMI_SET, (char *)pBuf, + return (PnmiStruct(pAC, IoC, SK_PNMI_SET, (char *)pBuf, pLen, NetIndex)); } @@ -1392,9 +1428,9 @@ * is now an active port. PNMI will now * add the statistic data of this port to * the virtual port. - * SK_PNMI_EVT_RLMT_SET_NETS Notifies PNMI about the net mode. The first Parameter + * SK_PNMI_EVT_RLMT_SET_NETS Notifies PNMI about the net mode. The first parameter * contains the number of nets. 1 means single net, 2 means - * dual net. The second Parameter is -1 + * dual net. The second parameter is -1 * * Returns: * Always 0 @@ -1406,7 +1442,7 @@ SK_EVPARA Param) /* Event dependent parameter */ { unsigned int PhysPortIndex; - unsigned int MaxNetNumber; + unsigned int MaxNetNumber; int CounterIndex; int Ret; SK_U16 MacStatus; @@ -1436,7 +1472,7 @@ ("PNMI: SkPnmiEvent: Called, Event=0x%x, Param=0x%x\n", (unsigned int)Event, (unsigned int)Param.Para64)); } -#endif +#endif /* DEBUG */ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On call"); MacType = pAC->GIni.GIMacType; @@ -1455,18 +1491,18 @@ PhysPortIndex)); return (0); } -#endif +#endif /* DEBUG */ OverflowStatus = 0; /* * Check which source caused an overflow interrupt. */ - if ((pAC->GIni.GIFunc.pFnMacOverflow( - pAC, IoC, PhysPortIndex, MacStatus, &OverflowStatus) != 0) || + if ((pAC->GIni.GIFunc.pFnMacOverflow(pAC, IoC, PhysPortIndex, + MacStatus, &OverflowStatus) != 0) || (OverflowStatus == 0)) { SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); - return (0); + return (0); } /* @@ -1486,20 +1522,20 @@ case SK_PNMI_HTX_UTILUNDER: case SK_PNMI_HTX_UTILOVER: - XM_IN16(IoC, PhysPortIndex, XM_TX_CMD, - &Register); - Register |= XM_TX_SAM_LINE; - XM_OUT16(IoC, PhysPortIndex, XM_TX_CMD, - Register); + if (MacType == SK_MAC_XMAC) { + XM_IN16(IoC, PhysPortIndex, XM_TX_CMD, &Register); + Register |= XM_TX_SAM_LINE; + XM_OUT16(IoC, PhysPortIndex, XM_TX_CMD, Register); + } break; case SK_PNMI_HRX_UTILUNDER: case SK_PNMI_HRX_UTILOVER: - XM_IN16(IoC, PhysPortIndex, XM_RX_CMD, - &Register); - Register |= XM_RX_SAM_LINE; - XM_OUT16(IoC, PhysPortIndex, XM_RX_CMD, - Register); + if (MacType == SK_MAC_XMAC) { + XM_IN16(IoC, PhysPortIndex, XM_RX_CMD, &Register); + Register |= XM_RX_SAM_LINE; + XM_OUT16(IoC, PhysPortIndex, XM_RX_CMD, Register); + } break; case SK_PNMI_HTX_OCTETHIGH: @@ -1540,7 +1576,8 @@ (unsigned int)Param.Para64)); return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate * an event for user space applications with the @@ -1556,11 +1593,12 @@ if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, - ("PNMI: ERR:SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_UPP parameter wrong, SensorIndex=%d\n", + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_UPP parameter wrong, SensorIndex=%d\n", (unsigned int)Param.Para64)); return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate * an event for user space applications with the @@ -1580,7 +1618,8 @@ (unsigned int)Param.Para64)); return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate * an event for user space applications with the @@ -1600,7 +1639,8 @@ (unsigned int)Param.Para64)); return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate * an event for user space applications with the @@ -1620,7 +1660,7 @@ * Be careful in changing these values, on change check * - typedef of SK_PNMI_ESTIMATE (Size of EstValue * array one less than value number) - * - Timer initilization SkTimerStart() in SkPnmiInit + * - Timer initialization SkTimerStart() in SkPnmiInit * - Delta value below must be multiplicated with * power of 2 * @@ -1671,7 +1711,7 @@ (void)SK_DRIVER_SENDEVENT(pAC, IoC); } - SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, EventParam); @@ -1693,20 +1733,21 @@ return (0); } -#endif +#endif /* DEBUG */ /* - * Set all counters and timestamps to zero + * Set all counters and timestamps to zero. + * The according NetIndex is required as a + * parameter of the event. */ - ResetCounter(pAC, IoC, NetIndex); /* the according NetIndex is required - as a Parameter of the Event */ + ResetCounter(pAC, IoC, NetIndex); break; case SK_PNMI_EVT_XMAC_RESET: /* * To grant continuous counter values store the current * XMAC statistic values to the entries 1..n of the - * CounterOffset array. XMAC Errata #2 + * CounterOffset array. XMAC Errata #2 */ #ifdef DEBUG if ((unsigned int)Param.Para64 >= SK_MAX_MACS) { @@ -1742,11 +1783,10 @@ continue; } - pAC->Pnmi.Port[PhysPortIndex]. - CounterOffset[CounterIndex] = GetPhysStatVal( - pAC, IoC, PhysPortIndex, CounterIndex); - pAC->Pnmi.Port[PhysPortIndex]. - CounterHigh[CounterIndex] = 0; + pAC->Pnmi.Port[PhysPortIndex].CounterOffset[CounterIndex] = + GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex); + + pAC->Pnmi.Port[PhysPortIndex].CounterHigh[CounterIndex] = 0; } pAC->Pnmi.MacUpdatedFlag --; @@ -1763,7 +1803,8 @@ return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate an event for * user space applications with the SK_DRIVER_SENDEVENT macro. @@ -1772,8 +1813,7 @@ (void)SK_DRIVER_SENDEVENT(pAC, IoC); /* Bugfix for XMAC errata (#10620)*/ - if (pAC->GIni.GIMacType == SK_MAC_XMAC){ - + if (MacType == SK_MAC_XMAC) { /* Add incremental difference to offset (#10620)*/ (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, XM_RXE_SHT_ERR, &Val32); @@ -1800,20 +1840,22 @@ return (0); } -#endif +#endif /* DEBUG */ + /* * Store a trap message in the trap buffer and generate an event for * user space applications with the SK_DRIVER_SENDEVENT macro. */ QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, PhysPortIndex); (void)SK_DRIVER_SENDEVENT(pAC, IoC); - + /* Bugfix #10620 - get zero level for incremental difference */ - if ((pAC->GIni.GIMacType == SK_MAC_XMAC)) { + if (MacType == SK_MAC_XMAC) { (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, XM_RXE_SHT_ERR, &Val32); - pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark = + + pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark = (((SK_U64)pAC->Pnmi.Port[PhysPortIndex]. CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32); } @@ -1837,7 +1879,8 @@ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, NetIndex=%d\n", NetIndex)); } -#endif +#endif /* DEBUG */ + /* * For now, ignore event if NetIndex != 0. */ @@ -1914,7 +1957,8 @@ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, NetIndex=%d\n", NetIndex)); } -#endif +#endif /* DEBUG */ + /* * For now, ignore event if NetIndex != 0. */ @@ -1976,9 +2020,7 @@ pAC->Pnmi.VirtualCounterOffset[CounterIndex] -= Value; } - /* - * Set port to active - */ + /* Set port to active */ pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_TRUE; pAC->Pnmi.MacUpdatedFlag --; @@ -2020,47 +2062,56 @@ break; case SK_PNMI_EVT_VCT_RESET: - PhysPortIndex = Param.Para32[0]; - pPrt = &pAC->GIni.GP[PhysPortIndex]; - pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; - - if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { - RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); - if (RetCode == 2) { - /* - * VCT test is still running. - * Start VCT timer counter again. - */ - SK_MEMSET((char *) &Param, 0, sizeof(Param)); - Param.Para32[0] = PhysPortIndex; - Param.Para32[1] = -1; - SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, - 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param); - break; - } - pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; - pAC->Pnmi.VctStatus[PhysPortIndex] |= - (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + PhysPortIndex = Param.Para32[0]; + pPrt = &pAC->GIni.GP[PhysPortIndex]; + pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; - /* Copy results for later use to PNMI struct. */ - for (i = 0; i < 4; i++) { - if (pPrt->PMdiPairLen[i] > 35) { - CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 2) { + /* + * VCT test is still running. + * Start VCT timer counter again. + */ + SK_MEMSET((char *) &Param, 0, sizeof(Param)); + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkTimerStart(pAC, IoC, + &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, + 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param); + break; } - else { - CableLength = 0; + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] |= + (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + + /* Copy results for later use to PNMI struct. */ + for (i = 0; i < 4; i++) { + if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) { + if ((pPrt->PMdiPairLen[i] > 35) && + (pPrt->PMdiPairLen[i] < 0xff)) { + pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH; + } + } + if ((pPrt->PMdiPairLen[i] > 35) && + (pPrt->PMdiPairLen[i] != 0xff)) { + CableLength = 1000 * + (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + } + else { + CableLength = 0; + } + pVctBackupData->PMdiPairLen[i] = CableLength; + pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; } - pVctBackupData->PMdiPairLen[i] = CableLength; - pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; + + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param); + SkEventDispatcher(pAC, IoC); } - Param.Para32[0] = PhysPortIndex; - Param.Para32[1] = -1; - SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param); - SkEventDispatcher(pAC, IoC); - } - - break; + break; default: break; @@ -2088,19 +2139,19 @@ * SkGePnmiPreSetVar, or SkGePnmiSetVar. * * Returns: - * SK_PNMI_ERR_XXX. For details have a look to the description of the + * SK_PNMI_ERR_XXX. For details have a look at the description of the * calling functions. - * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ PNMI_STATIC int PnmiVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer which stores the mgmt data to be set */ -unsigned int *pLen, /* Total length of mgmt data */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Total length of pBuf management data */ SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int TableIndex; int Ret; @@ -2112,9 +2163,7 @@ return (SK_PNMI_ERR_UNKNOWN_OID); } - /* - * Check NetIndex - */ + /* Check NetIndex */ if (NetIndex >= pAC->Rlmt.NumNets) { return (SK_PNMI_ERR_UNKNOWN_NET); } @@ -2145,15 +2194,15 @@ * * Returns: * SK_PNMI_ERR_XXX. The codes are described in the calling functions. - * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ PNMI_STATIC int PnmiStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Set action to be performed */ -char *pBuf, /* Buffer which contains the data to be set */ -unsigned int *pLen, /* Length of buffer */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +int Action, /* PRESET/SET action to be performed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Length of pBuf management data buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; unsigned int TableIndex; @@ -2179,9 +2228,7 @@ return (SK_PNMI_ERR_TOO_SHORT); } - /* - * Check NetIndex - */ + /* Check NetIndex */ if (NetIndex >= pAC->Rlmt.NumNets) { return (SK_PNMI_ERR_UNKNOWN_NET); } @@ -2348,19 +2395,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int OidStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { if (Id != OID_SKGE_ALL_DATA) { @@ -2415,19 +2462,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Perform( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; SK_U32 ActionOp; @@ -2546,19 +2593,19 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Mac8023Stat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; SK_U64 StatVal; @@ -2583,9 +2630,7 @@ return (SK_PNMI_ERR_READ_ONLY); } - /* - * Check length - */ + /* Check length */ switch (Id) { case OID_802_3_PERMANENT_ADDRESS: @@ -2606,9 +2651,7 @@ #else /* SK_NDIS_64BIT_CTR */ - /* - * for compatibility, at least 32bit are required for oid - */ + /* for compatibility, at least 32bit are required for OID */ if (*pLen < sizeof(SK_U32)) { /* * but indicate handling for 64bit values, @@ -2654,9 +2697,7 @@ default: StatVal = GetStatVal(pAC, IoC, 0, IdTable[TableIndex].Param, NetIndex); - /* - * by default 32bit values are evaluated - */ + /* by default 32bit values are evaluated */ if (!Is64BitReq) { StatVal32 = (SK_U32)StatVal; SK_PNMI_STORE_U32(pBuf, StatVal32); @@ -2679,7 +2720,7 @@ * MacPrivateStat - OID handler function of OID_SKGE_STAT_XXX * * Description: - * Retrieves the XMAC statistic data. + * Retrieves the MAC statistic data. * * Returns: * SK_PNMI_ERR_OK The request was successfully performed. @@ -2688,35 +2729,36 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int MacPrivateStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int LogPortMax; unsigned int LogPortIndex; unsigned int PhysPortMax; unsigned int Limit; unsigned int Offset; + int MacType; int Ret; SK_U64 StatVal; + + - - /* - * Calculate instance if wished. MAC index 0 is the virtual - * MAC. - */ + /* Calculate instance if wished. MAC index 0 is the virtual MAC */ PhysPortMax = pAC->GIni.GIMacsFound; LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + MacType = pAC->GIni.GIMacType; if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ LogPortMax--; @@ -2739,19 +2781,14 @@ Limit = LogPortMax; } - - /* - * Check action - */ + /* Check action */ if (Action != SK_PNMI_GET) { *pLen = 0; return (SK_PNMI_ERR_READ_ONLY); } - /* - * Check length - */ + /* Check length */ if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U64)) { *pLen = (Limit - LogPortIndex) * sizeof(SK_U64); @@ -2759,7 +2796,7 @@ } /* - * Update XMAC statistic and increment semaphore to indicate that + * Update MAC statistic and increment semaphore to indicate that * an update was already done. */ Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1); @@ -2770,9 +2807,7 @@ } pAC->Pnmi.MacUpdatedFlag ++; - /* - * Get value - */ + /* Get value */ Offset = 0; for (; LogPortIndex < Limit; LogPortIndex ++) { @@ -2787,51 +2822,44 @@ return (SK_PNMI_ERR_GENERAL); */ case OID_SKGE_STAT_RX: - case OID_SKGE_STAT_TX: - switch (pAC->GIni.GIMacType) { - case SK_MAC_XMAC: + if (MacType == SK_MAC_GMAC) { + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNDERSIZE, NetIndex); + } + else { StatVal = GetStatVal(pAC, IoC, LogPortIndex, IdTable[TableIndex].Param, NetIndex); - break; - - case SK_MAC_GMAC: - if (Id == OID_SKGE_STAT_TX) { - - StatVal = - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HTX_BROADCAST, NetIndex) + - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HTX_MULTICAST, NetIndex) + - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HTX_UNICAST, NetIndex); - } - else { - StatVal = - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HRX_BROADCAST, NetIndex) + - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HRX_MULTICAST, NetIndex) + - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HRX_UNICAST, NetIndex) + - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HRX_UNDERSIZE, NetIndex); - } - break; - - default: - StatVal = 0; - break; } + break; - SK_PNMI_STORE_U64(pBuf + Offset, StatVal); + case OID_SKGE_STAT_TX: + if (MacType == SK_MAC_GMAC) { + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_UNICAST, NetIndex); + } + else { + StatVal = GetStatVal(pAC, IoC, LogPortIndex, + IdTable[TableIndex].Param, NetIndex); + } break; default: StatVal = GetStatVal(pAC, IoC, LogPortIndex, IdTable[TableIndex].Param, NetIndex); - SK_PNMI_STORE_U64(pBuf + Offset, StatVal); - break; } + SK_PNMI_STORE_U64(pBuf + Offset, StatVal); Offset += sizeof(SK_U64); } @@ -2863,19 +2891,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Addr( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; unsigned int LogPortMax; @@ -2906,7 +2934,6 @@ LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance); Limit = LogPortIndex + 1; } - else { /* Instance == (SK_U32)(-1), get all Instances of that OID */ LogPortIndex = 0; @@ -2918,9 +2945,7 @@ */ if (Action == SK_PNMI_GET) { - /* - * Check length - */ + /* Check length */ if (*pLen < (Limit - LogPortIndex) * 6) { *pLen = (Limit - LogPortIndex) * 6; @@ -2996,9 +3021,7 @@ return (SK_PNMI_ERR_GENERAL); } - /* - * Check length - */ + /* Check length */ if (*pLen < (Limit - LogPortIndex) * 6) { *pLen = (Limit - LogPortIndex) * 6; @@ -3069,19 +3092,19 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int CsumStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int Index; unsigned int Limit; @@ -3116,9 +3139,7 @@ return (SK_PNMI_ERR_READ_ONLY); } - /* - * Check length - */ + /* Check length */ if (*pLen < (Limit - Index) * sizeof(SK_U64)) { *pLen = (Limit - Index) * sizeof(SK_U64); @@ -3187,19 +3208,19 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int SensorStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int i; unsigned int Index; @@ -3238,9 +3259,7 @@ return (SK_PNMI_ERR_READ_ONLY); } - /* - * Check length - */ + /* Check length */ switch (Id) { case OID_SKGE_SENSOR_VALUE: @@ -3434,19 +3453,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Vpd( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { SK_VPD_STATUS *pVpdStatus; unsigned int BufLen; @@ -3465,8 +3484,7 @@ /* * Get array of all currently stored VPD keys */ - Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), - &KeyNo); + Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &KeyNo); if (Ret != SK_PNMI_ERR_OK) { *pLen = 0; return (Ret); @@ -3712,7 +3730,7 @@ *pLen = 0; return (SK_PNMI_ERR_GENERAL); } - } + } else { /* The only OID which can be set is VPD_ACTION */ if (Id != OID_SKGE_VPD_ACTION) { @@ -3909,19 +3927,19 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int General( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ +char *pBuf, /* Buffer used for the management data transfer */ unsigned int *pLen, /* On call: buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; unsigned int Index; @@ -3939,7 +3957,7 @@ int MacType; /* - * Check instance. We only handle single instance variables + * Check instance. We only handle single instance variables. */ if (Instance != (SK_U32)(-1) && Instance != 1) { @@ -3990,6 +4008,14 @@ #endif /* SK_NDIS_64BIT_CTR */ break; + case OID_SKGE_BOARDLEVEL: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + case OID_SKGE_PORT_NUMBER: case OID_SKGE_DEVICE_TYPE: case OID_SKGE_RESULT: @@ -4085,10 +4111,10 @@ Val64RxHwErrs = GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_MISSED, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FRAMING, NetIndex) + - GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_OVERFLOW, NetIndex)+ + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_OVERFLOW, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_JABBER, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CARRIER, NetIndex) + - GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_IRLENGTH, NetIndex)+ + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_IRLENGTH, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SYMBOL, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SHORTS, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_RUNT, NetIndex) + @@ -4102,8 +4128,8 @@ case OID_GEN_XMIT_ERROR: Val64TxHwErrs = GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex) + - GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex)+ - GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex)+ + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex); break; } @@ -4130,6 +4156,12 @@ *pLen = Len; break; + case OID_SKGE_BOARDLEVEL: + Val32 = (SK_U32)pAC->GIni.GILevel; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + case OID_SKGE_PORT_NUMBER: Val32 = (SK_U32)pAC->GIni.GIMacsFound; SK_PNMI_STORE_U32(pBuf, Val32); @@ -4316,7 +4348,7 @@ break; case OID_SKGE_TX_SW_QUEUE_LEN: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4345,7 +4377,7 @@ case OID_SKGE_TX_SW_QUEUE_MAX: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4373,7 +4405,7 @@ break; case OID_SKGE_TX_RETRY: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4401,7 +4433,7 @@ break; case OID_SKGE_RX_INTR_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4429,7 +4461,7 @@ break; case OID_SKGE_TX_INTR_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4457,7 +4489,7 @@ break; case OID_SKGE_RX_NO_BUF_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4485,7 +4517,7 @@ break; case OID_SKGE_TX_NO_BUF_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4513,7 +4545,7 @@ break; case OID_SKGE_TX_USED_DESCR_NO: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4541,7 +4573,7 @@ break; case OID_SKGE_RX_DELIVERED_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4569,7 +4601,7 @@ break; case OID_SKGE_RX_OCTETS_DELIV_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4607,7 +4639,7 @@ break; case OID_SKGE_IN_ERRORS_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4615,7 +4647,7 @@ } /* Single net mode */ else { - Val64 = Val64RxHwErrs + + Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[0].RxNoBufCts + pAC->Pnmi.BufPort[1].RxNoBufCts; } @@ -4627,7 +4659,7 @@ } /* Single net mode */ else { - Val64 = Val64RxHwErrs + + Val64 = Val64RxHwErrs + pAC->Pnmi.Port[0].RxNoBufCts + pAC->Pnmi.Port[1].RxNoBufCts; } @@ -4637,7 +4669,7 @@ break; case OID_SKGE_OUT_ERROR_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4645,7 +4677,7 @@ } /* Single net mode */ else { - Val64 = Val64TxHwErrs + + Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[0].TxNoBufCts + pAC->Pnmi.BufPort[1].TxNoBufCts; } @@ -4657,7 +4689,7 @@ } /* Single net mode */ else { - Val64 = Val64TxHwErrs + + Val64 = Val64TxHwErrs + pAC->Pnmi.Port[0].TxNoBufCts + pAC->Pnmi.Port[1].TxNoBufCts; } @@ -4667,7 +4699,7 @@ break; case OID_SKGE_ERR_RECOVERY_CTS: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { /* Dual net mode */ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { @@ -4708,7 +4740,7 @@ break; case OID_GEN_RCV_ERROR: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; } @@ -4731,7 +4763,7 @@ break; case OID_GEN_XMIT_ERROR: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; } @@ -4754,7 +4786,7 @@ break; case OID_GEN_RCV_NO_BUFFER: - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ if (MacType == SK_MAC_XMAC) { Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; } @@ -4820,19 +4852,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Rlmt( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { int Ret; unsigned int PhysPortIndex; @@ -4852,7 +4884,7 @@ } /* - * Perform the requested action + * Perform the requested action. */ if (Action == SK_PNMI_GET) { @@ -5125,19 +5157,19 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int RlmtStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int PhysPortMax; unsigned int PhysPortIndex; @@ -5148,7 +5180,7 @@ SK_U64 Val64; /* - * Calculate the port indexes from the instance + * Calculate the port indexes from the instance. */ PhysPortMax = pAC->GIni.GIMacsFound; @@ -5327,19 +5359,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int MacPrivateConf( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int PhysPortMax; unsigned int PhysPortIndex; @@ -5348,14 +5380,13 @@ unsigned int Limit; unsigned int Offset; char Val8; - int Ret; + char *pBufPtr; + int Ret; SK_EVPARA EventParam; SK_U32 Val32; - /* - * Calculate instance if wished. MAC index 0 is the virtual - * MAC. + * Calculate instance if wished. MAC index 0 is the virtual MAC. */ PhysPortMax = pAC->GIni.GIMacsFound; LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); @@ -5386,9 +5417,7 @@ */ if (Action == SK_PNMI_GET) { - /* - * Check length - */ + /* Check length */ switch (Id) { case OID_SKGE_PMD: @@ -5408,16 +5437,15 @@ case OID_SKGE_SPEED_STATUS: if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { - *pLen = (Limit - LogPortIndex) * - sizeof(SK_U8); + *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); return (SK_PNMI_ERR_TOO_SHORT); } break; case OID_SKGE_MTU: - if (*pLen < sizeof(SK_U32)) { + if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U32)) { - *pLen = sizeof(SK_U32); + *pLen = (Limit - LogPortIndex) * sizeof(SK_U32); return (SK_PNMI_ERR_TOO_SHORT); } break; @@ -5446,343 +5474,297 @@ Offset = 0; for (; LogPortIndex < Limit; LogPortIndex ++) { + pBufPtr = pBuf + Offset; + switch (Id) { case OID_SKGE_PMD: - *(pBuf + Offset) = pAC->Pnmi.PMD; + *pBufPtr = pAC->Pnmi.PMD; Offset += sizeof(char); break; case OID_SKGE_CONNECTOR: - *(pBuf + Offset) = pAC->Pnmi.Connector; + *pBufPtr = pAC->Pnmi.Connector; Offset += sizeof(char); break; case OID_SKGE_LINK_CAP: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkCap; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkCap; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkCap; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkCap; } + Offset += sizeof(char); break; case OID_SKGE_LINK_MODE: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkModeConf; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkModeConf; } - Offset += sizeof(char); } - else { /* DualNetMode */ + else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkModeConf; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkModeConf; } + Offset += sizeof(char); break; case OID_SKGE_LINK_MODE_STATUS: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = - CalculateLinkModeStatus(pAC, - IoC, PhysPortIndex); + *pBufPtr = + CalculateLinkModeStatus(pAC, IoC, PhysPortIndex); } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = CalculateLinkModeStatus(pAC, IoC, NetIndex); - Offset += sizeof(char); + + *pBufPtr = CalculateLinkModeStatus(pAC, IoC, NetIndex); } + Offset += sizeof(char); break; case OID_SKGE_LINK_STATUS: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = - CalculateLinkStatus(pAC, - IoC, PhysPortIndex); + *pBufPtr = CalculateLinkStatus(pAC, IoC, PhysPortIndex); } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = CalculateLinkStatus(pAC, IoC, NetIndex); - Offset += sizeof(char); + *pBufPtr = CalculateLinkStatus(pAC, IoC, NetIndex); } + Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_CAP: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlCap; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlCap; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlCap; } + Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_MODE: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlMode; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlMode; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlMode; } + Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_STATUS: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlStatus; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlStatus; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlStatus; } + Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_CAP: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSCap; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSCap; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSCap; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PMSCap; } + Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_MODE: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSMode; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSMode; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSMode; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PMSMode; } + Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_STATUS: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSStatus; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSStatus; } - Offset += sizeof(char); } else { - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSStatus; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PMSStatus; } + Offset += sizeof(char); break; case OID_SKGE_SPEED_CAP: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical ports */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkSpeedCap; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedCap; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeedCap; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedCap; } + Offset += sizeof(char); break; case OID_SKGE_SPEED_MODE: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkSpeed; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeed; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeed; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeed; } + Offset += sizeof(char); break; case OID_SKGE_SPEED_STATUS: if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + Offset); + VirtualConf(pAC, IoC, Id, pBufPtr); } else { /* Get value for physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkSpeedUsed; + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; } - Offset += sizeof(char); } else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeedUsed; - Offset += sizeof(char); + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedUsed; } + Offset += sizeof(char); break; case OID_SKGE_MTU: Val32 = SK_DRIVER_GET_MTU(pAC, IoC, NetIndex); - SK_PNMI_STORE_U32(pBuf + Offset, Val32); + SK_PNMI_STORE_U32(pBufPtr, Val32); Offset += sizeof(SK_U32); break; @@ -6209,19 +6191,19 @@ * value range. * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter. */ PNMI_STATIC int Monitor( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which to mgmt data will be retrieved */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ unsigned int TableIndex, /* Index to the Id table */ -SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ { unsigned int Index; unsigned int Limit; @@ -6232,7 +6214,7 @@ /* * Calculate instance if wished. */ -/* XXX Not yet implemented. Return always an empty table. */ + /* XXX Not yet implemented. Return always an empty table. */ Entries = 0; if ((Instance != (SK_U32)(-1))) { @@ -6331,21 +6313,23 @@ SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf) /* Buffer to which to mgmt data will be retrieved */ +char *pBuf) /* Buffer used for the management data transfer */ { unsigned int PhysPortMax; unsigned int PhysPortIndex; SK_U8 Val8; SK_BOOL PortActiveFlag; - + SK_GEPORT *pPrt; *pBuf = 0; PortActiveFlag = SK_FALSE; PhysPortMax = pAC->GIni.GIMacsFound; - + for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax; PhysPortIndex ++) { + pPrt = &pAC->GIni.GP[PhysPortIndex]; + /* Check if the physical port is active */ if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { @@ -6364,14 +6348,14 @@ * From a curious point of view the virtual port * is capable of all found capabilities. */ - *pBuf |= pAC->GIni.GP[PhysPortIndex].PLinkCap; + *pBuf |= pPrt->PLinkCap; break; case OID_SKGE_LINK_MODE: /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkModeConf; + *pBuf = pPrt->PLinkModeConf; continue; } @@ -6380,8 +6364,7 @@ * mode than the first one we return a value that * indicates that the link mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PLinkModeConf - ) { + if (*pBuf != pPrt->PLinkModeConf) { *pBuf = SK_LMODE_INDETERMINATED; } @@ -6437,7 +6420,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap; + *pBuf = pPrt->PFlowCtrlCap; continue; } @@ -6445,14 +6428,14 @@ * From a curious point of view the virtual port * is capable of all found capabilities. */ - *pBuf |= pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap; + *pBuf |= pPrt->PFlowCtrlCap; break; case OID_SKGE_FLOWCTRL_MODE: /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode; + *pBuf = pPrt->PFlowCtrlMode; continue; } @@ -6461,7 +6444,7 @@ * control mode than the first one, we return a value * that indicates that the mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode) { + if (*pBuf != pPrt->PFlowCtrlMode) { *pBuf = SK_FLOW_MODE_INDETERMINATED; } @@ -6471,7 +6454,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus; + *pBuf = pPrt->PFlowCtrlStatus; continue; } @@ -6481,7 +6464,7 @@ * value that indicates that the status is * indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus) { + if (*pBuf != pPrt->PFlowCtrlStatus) { *pBuf = SK_FLOW_STAT_INDETERMINATED; } @@ -6491,7 +6474,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PMSCap; + *pBuf = pPrt->PMSCap; continue; } @@ -6499,14 +6482,14 @@ * From a curious point of view the virtual port * is capable of all found capabilities. */ - *pBuf |= pAC->GIni.GP[PhysPortIndex].PMSCap; + *pBuf |= pPrt->PMSCap; break; case OID_SKGE_PHY_OPERATION_MODE: /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PMSMode; + *pBuf = pPrt->PMSMode; continue; } @@ -6515,7 +6498,7 @@ * slave mode than the first one, we return a value * that indicates that the mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PMSMode) { + if (*pBuf != pPrt->PMSMode) { *pBuf = SK_MS_MODE_INDETERMINATED; } @@ -6525,7 +6508,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PMSStatus; + *pBuf = pPrt->PMSStatus; continue; } @@ -6535,7 +6518,7 @@ * value that indicates that the status is * indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PMSStatus) { + if (*pBuf != pPrt->PMSStatus) { *pBuf = SK_MS_STAT_INDETERMINATED; } @@ -6545,7 +6528,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkSpeed; + *pBuf = pPrt->PLinkSpeed; continue; } @@ -6554,7 +6537,7 @@ * control mode than the first one, we return a value * that indicates that the mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PLinkSpeed) { + if (*pBuf != pPrt->PLinkSpeed) { *pBuf = SK_LSPEED_INDETERMINATED; } @@ -6564,7 +6547,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; + *pBuf = pPrt->PLinkSpeedUsed; continue; } @@ -6574,7 +6557,7 @@ * value that indicates that the status is * indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed) { + if (*pBuf != pPrt->PLinkSpeedUsed) { *pBuf = SK_LSPEED_STAT_INDETERMINATED; } @@ -6662,7 +6645,6 @@ { SK_U8 Result; - if (!pAC->GIni.GP[PhysPortIndex].PHWLinkUp) { Result = SK_PNMI_RLMT_LSTAT_PHY_DOWN; @@ -6702,7 +6684,6 @@ { SK_U8 Result; - /* Get the current mode, which can be full or half duplex */ Result = pAC->GIni.GP[PhysPortIndex].PLinkModeStatus; @@ -6710,7 +6691,7 @@ if (Result < SK_LMODE_STAT_HALF) { Result = SK_LMODE_STAT_UNKNOWN; - } + } else if (pAC->GIni.GP[PhysPortIndex].PLinkMode >= SK_LMODE_AUTOHALF) { /* @@ -6913,8 +6894,8 @@ * * Description: * The XMAC holds its statistic internally. To obtain the current - * values we send a command so that the statistic data will - * be written to apredefined memory area on the adapter. + * values we must send a command so that the statistic data will + * be written to a predefined memory area on the adapter. * * Returns: * SK_PNMI_ERR_OK Task successfully performed. @@ -6941,16 +6922,16 @@ for (MacIndex = FirstMac; MacIndex <= LastMac; MacIndex ++) { /* - * 2002-09-13 pweber: Freeze the current sw counters. - * (That should be done as close as - * possible to the update of the - * hw counters) + * 2002-09-13 pweber: Freeze the current SW counters. + * (That should be done as close as + * possible to the update of the + * HW counters) */ if (pAC->GIni.GIMacType == SK_MAC_XMAC) { pAC->Pnmi.BufPort[MacIndex] = pAC->Pnmi.Port[MacIndex]; } - /* 2002-09-13 pweber: Update the hw counter */ + /* 2002-09-13 pweber: Update the HW counter */ if (pAC->GIni.GIFunc.pFnMacUpdateStats(pAC, IoC, MacIndex) != 0) { return (SK_PNMI_ERR_GENERAL); @@ -6991,6 +6972,7 @@ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ PhysPortIndex = NetIndex; + Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); } else { /* Single Net mode */ @@ -7005,8 +6987,7 @@ if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { - Val += GetPhysStatVal(pAC, IoC, PhysPortIndex, - StatIndex); + Val += GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); } } @@ -7016,6 +6997,7 @@ else { /* Get counter value of physical port */ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); + Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); } } @@ -7049,14 +7031,18 @@ SK_U32 HighVal = 0; SK_U16 Word; int MacType; + unsigned int HelpIndex; + SK_GEPORT *pPrt; SK_PNMI_PORT *pPnmiPrt; SK_GEMACFUNC *pFnMac; + pPrt = &pAC->GIni.GP[PhysPortIndex]; + MacType = pAC->GIni.GIMacType; - /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ - if (pAC->GIni.GIMacType == SK_MAC_XMAC) { + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { pPnmiPrt = &pAC->Pnmi.BufPort[PhysPortIndex]; } else { @@ -7067,15 +7053,46 @@ switch (StatIndex) { case SK_PNMI_HTX: + if (MacType == SK_MAC_GMAC) { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_BROADCAST][MacType].Reg, + &LowVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_MULTICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_UNICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + } + else { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + } + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + case SK_PNMI_HRX: - /* Not supported by GMAC */ if (MacType == SK_MAC_GMAC) { - return (Val); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_BROADCAST][MacType].Reg, + &LowVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_MULTICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_UNICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + } + else { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); } - - (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, - StatAddr[StatIndex][MacType].Reg, - &LowVal); HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; @@ -7106,14 +7123,16 @@ case SK_PNMI_HTX_MACC: /* GMAC only supports PAUSE MAC control frames */ if (MacType == SK_MAC_GMAC) { - Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, SK_PNMI_HTX_PMACC); - - return (Val); + HelpIndex = SK_PNMI_HTX_PMACC; } - + else { + HelpIndex = StatIndex; + } + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, - StatAddr[StatIndex][MacType].Reg, - &LowVal); + StatAddr[HelpIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; @@ -7130,12 +7149,10 @@ HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; - - case SK_PNMI_HTX_DEFFERAL: /* Not supported by GMAC */ if (MacType == SK_MAC_GMAC) { - return (Val); + return (Val); } /* @@ -7144,16 +7161,16 @@ * * In full-duplex mode the counter remains constant! */ - if ((pAC->GIni.GP[PhysPortIndex].PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) || - (pAC->GIni.GP[PhysPortIndex].PLinkModeStatus == SK_LMODE_STAT_FULL)) { + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) || + (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL)) { LowVal = 0; HighVal = 0; } else { - /* Otherwise get contents of hardware register. */ + /* Otherwise get contents of hardware register */ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, - StatAddr[SK_PNMI_HTX_DEFFERAL][MacType].Reg, + StatAddr[StatIndex][MacType].Reg, &LowVal); HighVal = pPnmiPrt->CounterHigh[StatIndex]; } @@ -7181,7 +7198,7 @@ case SK_PNMI_HRX_LONGFRAMES: /* For XMAC the SW counter is managed by PNMI */ if (MacType == SK_MAC_XMAC) { - return (pPnmiPrt->StatRxLongFrameCts); + return (pPnmiPrt->StatRxLongFrameCts); } (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, @@ -7192,19 +7209,17 @@ case SK_PNMI_HRX_TOO_LONG: (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, - StatAddr[StatIndex][MacType].Reg, + StatAddr[StatIndex][MacType].Reg, &LowVal); HighVal = pPnmiPrt->CounterHigh[StatIndex]; Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); - switch (MacType) { - case SK_MAC_GMAC: + if (MacType == SK_MAC_GMAC) { /* For GMAC the SW counter is additionally managed by PNMI */ Val += pPnmiPrt->StatRxFrameTooLongCts; - break; - - case SK_MAC_XMAC: + } + else { /* * Frames longer than IEEE 802.3 frame max size are counted * by XMAC in frame_too_long counter even reception of long @@ -7212,10 +7227,6 @@ * So correct the value by subtracting RxLongFrame counter. */ Val -= pPnmiPrt->StatRxLongFrameCts; - break; - - default: - break; } LowVal = (SK_U32)Val; @@ -7226,7 +7237,7 @@ /* Not supported by GMAC */ if (MacType == SK_MAC_GMAC) { /* GM_RXE_FRAG?? */ - return (Val); + return (Val); } /* @@ -7234,7 +7245,7 @@ * * If link-down the counter remains constant */ - if (pAC->GIni.GP[PhysPortIndex].PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) { + if (pPrt->PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) { /* Otherwise get incremental difference */ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, @@ -7261,8 +7272,7 @@ case SK_PNMI_HRX_CEXT: /* Not supported by GMAC */ if (MacType == SK_MAC_GMAC) { - /* GM_RXE_FRAG?? */ - return (Val); + return (Val); } (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, @@ -7274,7 +7284,7 @@ case SK_PNMI_HRX_PMACC_ERR: /* For GMAC the SW counter is managed by PNMI */ if (MacType == SK_MAC_GMAC) { - return (pPnmiPrt->StatRxPMaccErr); + return (pPnmiPrt->StatRxPMaccErr); } (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, @@ -7296,16 +7306,14 @@ break; case SK_PNMI_HRX_FCS: - /* - * Broadcom filters fcs errors and counts it in + /* + * Broadcom filters FCS errors and counts it in * Receive Error Counter register */ - if (pAC->GIni.GP[PhysPortIndex].PhyType == SK_PHY_BCOM) { + if (pPrt->PhyType == SK_PHY_BCOM) { /* do not read while not initialized (PHY_READ hangs!)*/ - if (pAC->GIni.GP[PhysPortIndex].PState) { - PHY_READ(IoC, &pAC->GIni.GP[PhysPortIndex], - PhysPortIndex, PHY_BCOM_RE_CTR, - &Word); + if (pPrt->PState != SK_PRT_RESET) { + SkXmPhyRead(pAC, IoC, PhysPortIndex, PHY_BCOM_RE_CTR, &Word); LowVal = Word; } @@ -7375,8 +7383,8 @@ EventParam.Para32[1] = (SK_U32)-1; SkEventQueue(pAC, SKGE_CSUM, SK_CSUM_EVENT_CLEAR_PROTO_STATS, EventParam); -#endif - +#endif /* SK_USE_CSUM */ + /* Clear XMAC statistic */ for (PhysPortIndex = 0; PhysPortIndex < (unsigned int)pAC->GIni.GIMacsFound; PhysPortIndex ++) { @@ -7492,18 +7500,18 @@ End -= EntrySize; #ifdef DEBUG SK_MEMSET(pBuf + End, (char)(-1), EntrySize); -#endif +#endif /* DEBUG */ if (End == BufPad) { #ifdef DEBUG SK_MEMSET(pBuf, (char)(-1), End); -#endif +#endif /* DEBUG */ BufFree += End; End = 0; BufPad = 0; } } - /* + /* * Insert new entry as first entry. Newest entries are * stored at the beginning of the queue. */ @@ -7772,7 +7780,6 @@ } } - #ifdef SK_POWER_MGMT /***************************************************************************** * @@ -7814,45 +7821,50 @@ *pLen = 0; return (SK_PNMI_ERR_UNKNOWN_INST); } + + + /* Check length */ + switch (Id) { - /* - * Perform action - */ - if (Action == SK_PNMI_GET) { + case OID_PNP_CAPABILITIES: + if (*pLen < sizeof(SK_PNP_CAPABILITIES)) { - /* - * Check length - */ - switch (Id) { + *pLen = sizeof(SK_PNP_CAPABILITIES); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; - case OID_PNP_CAPABILITIES: - if (*pLen < sizeof(SK_PNP_CAPABILITIES)) { + case OID_PNP_SET_POWER: + case OID_PNP_QUERY_POWER: + if (*pLen < sizeof(SK_DEVICE_POWER_STATE)) + { + *pLen = sizeof(SK_DEVICE_POWER_STATE); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; - *pLen = sizeof(SK_PNP_CAPABILITIES); - return (SK_PNMI_ERR_TOO_SHORT); - } - break; + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) { - case OID_PNP_QUERY_POWER: - case OID_PNP_ENABLE_WAKE_UP: - if (*pLen < sizeof(SK_U32)) { + *pLen = sizeof(SK_PM_PACKET_PATTERN); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; - *pLen = sizeof(SK_U32); - return (SK_PNMI_ERR_TOO_SHORT); - } - break; + case OID_PNP_ENABLE_WAKE_UP: + if (*pLen < sizeof(SK_U32)) { - case OID_PNP_SET_POWER: - case OID_PNP_ADD_WAKE_UP_PATTERN: - case OID_PNP_REMOVE_WAKE_UP_PATTERN: - break; - - default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, - SK_PNMI_ERR040MSG); - *pLen = 0; - return (SK_PNMI_ERR_GENERAL); - } + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + } + + /* + * Perform action + */ + if (Action == SK_PNMI_GET) { /* * Get value @@ -7865,11 +7877,12 @@ case OID_PNP_QUERY_POWER: /* The Windows DDK describes: An OID_PNP_QUERY_POWER requests - the miniport to indicate whether it can transition its NIC - to the low-power state. + the miniport to indicate whether it can transition its NIC + to the low-power state. A miniport driver must always return NDIS_STATUS_SUCCESS to a query of OID_PNP_QUERY_POWER. */ - RetCode = SK_PNMI_ERR_OK; + *pLen = sizeof(SK_DEVICE_POWER_STATE);; + RetCode = SK_PNMI_ERR_OK; break; /* NDIS handles these OIDs as write-only. @@ -7880,7 +7893,7 @@ case OID_PNP_ADD_WAKE_UP_PATTERN: case OID_PNP_REMOVE_WAKE_UP_PATTERN: *pLen = 0; - RetCode = SK_PNMI_ERR_OK; + RetCode = SK_PNMI_ERR_NOT_SUPPORTED; break; case OID_PNP_ENABLE_WAKE_UP: @@ -7892,41 +7905,9 @@ break; } - return (RetCode); + return (RetCode); } - /* - * From here SET or PRESET action. Check if the passed - * buffer length is plausible. - */ - switch (Id) { - case OID_PNP_SET_POWER: - case OID_PNP_ENABLE_WAKE_UP: - if (*pLen < sizeof(SK_U32)) { - - *pLen = sizeof(SK_U32); - return (SK_PNMI_ERR_TOO_SHORT); - } - if (*pLen != sizeof(SK_U32)) { - - *pLen = 0; - return (SK_PNMI_ERR_BAD_VALUE); - } - break; - - case OID_PNP_ADD_WAKE_UP_PATTERN: - case OID_PNP_REMOVE_WAKE_UP_PATTERN: - if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) { - - *pLen = 0; - return (SK_PNMI_ERR_BAD_VALUE); - } - break; - - default: - *pLen = 0; - return (SK_PNMI_ERR_READ_ONLY); - } /* * Perform preset or set @@ -7934,7 +7915,7 @@ /* POWER module does not support PRESET action */ if (Action == SK_PNMI_PRESET) { - return (SK_PNMI_ERR_OK); + return (SK_PNMI_ERR_OK); } switch (Id) { @@ -7955,13 +7936,148 @@ break; default: - RetCode = SK_PNMI_ERR_GENERAL; + RetCode = SK_PNMI_ERR_READ_ONLY; } return (RetCode); } #endif /* SK_POWER_MGMT */ +#ifdef SK_DIAG_SUPPORT +/***************************************************************************** + * + * DiagActions - OID handler function of Diagnostic driver + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ + +PNMI_STATIC int DiagActions( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + + /* + * Check instance. We only handle single instance variables. + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Check length. + */ + switch (Id) { + + case OID_SKGE_DIAG_MODE: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, SK_PNMI_ERR040MSG); + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Perform action. */ + + /* GET value. */ + if (Action == SK_PNMI_GET) { + + switch (Id) { + + case OID_SKGE_DIAG_MODE: + SK_PNMI_STORE_U32(pBuf, pAC->DiagModeActive); + *pLen = sizeof(SK_U32); + RetCode = SK_PNMI_ERR_OK; + break; + + default: + *pLen = 0; + RetCode = SK_PNMI_ERR_GENERAL; + break; + } + + return (RetCode); + } + + /* From here SET or PRESET value. */ + + /* PRESET value is not supported. */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + /* SET value. */ + switch (Id) { + case OID_SKGE_DIAG_MODE: + + /* Handle the SET. */ + switch (*pBuf) { + + /* Enter the DIAG mode in the driver. */ + case 1: + /* If DiagMode is not active, we can enter it. */ + if (!pAC->DiagModeActive) { + + RetCode = SkDrvEnterDiagMode(pAC); + } + else { + + RetCode = SK_PNMI_ERR_GENERAL; + } + break; + + /* Leave the DIAG mode in the driver. */ + case 0: + RetCode = SkDrvLeaveDiagMode(pAC); + break; + + default: + RetCode = SK_PNMI_ERR_BAD_VALUE; + break; + } + break; + + default: + RetCode = SK_PNMI_ERR_GENERAL; + } + + if (RetCode == SK_PNMI_ERR_OK) { + *pLen = sizeof(SK_U32); + } + else { + + *pLen = 0; + } + return (RetCode); +} +#endif /* SK_DIAG_SUPPORT */ /***************************************************************************** * @@ -7977,7 +8093,7 @@ * the correct data (e.g. a 32bit value is * needed, but a 16 bit value was passed). * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't - * exist (e.g. port instance 3 on a two port + * exist (e.g. port instance 3 on a two port * adapter). * SK_PNMI_ERR_READ_ONLY Only the Get action is allowed. * @@ -7986,10 +8102,10 @@ PNMI_STATIC int Vct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ -int Action, /* Get/PreSet/Set action */ +int Action, /* GET/PRESET/SET action */ SK_U32 Id, /* Object ID that is to be processed */ -char *pBuf, /* Buffer to which the mgmt data will be copied */ -unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ SK_U32 Instance, /* Instance (-1,2..n) that is to be queried */ unsigned int TableIndex, /* Index to the Id table */ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ @@ -8001,7 +8117,6 @@ SK_U32 PhysPortIndex; SK_U32 Limit; SK_U32 Offset; - SK_BOOL Link; SK_U32 RetCode = SK_PNMI_ERR_GENERAL; int i; @@ -8034,7 +8149,8 @@ } Limit = PhysPortIndex + 1; } - else { /* + else { + /* * Instance == (SK_U32) (-1), get all Instances of that OID. * * Not implemented yet. May be used in future releases. @@ -8051,9 +8167,7 @@ Link = SK_FALSE; } - /* - * Check MAC type. - */ + /* Check MAC type */ if (pPrt->PhyType != SK_PHY_MARV_COPPER) { *pLen = 0; return (SK_PNMI_ERR_GENERAL); @@ -8062,13 +8176,9 @@ /* Initialize backup data pointer. */ pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; - /* - * Check action type. - */ + /* Check action type */ if (Action == SK_PNMI_GET) { - /* - * Check length. - */ + /* Check length */ switch (Id) { case OID_SKGE_VCT_GET: @@ -8090,14 +8200,12 @@ return (SK_PNMI_ERR_GENERAL); } - /* - * Get value. - */ + /* Get value */ Offset = 0; for (; PhysPortIndex < Limit; PhysPortIndex++) { switch (Id) { - case OID_SKGE_VCT_GET: + case OID_SKGE_VCT_GET: if ((Link == SK_FALSE) && (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING)) { RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); @@ -8171,9 +8279,7 @@ * buffer length is plausible. */ - /* - * Check length. - */ + /* Check length */ switch (Id) { case OID_SKGE_VCT_SET: if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) { @@ -8254,7 +8360,6 @@ SK_GEPORT *pPrt; SK_PNMI_VCT *pVctData; SK_U32 RetCode; - SK_U8 LinkSpeedUsed; pPrt = &pAC->GIni.GP[PhysPortIndex]; @@ -8297,10 +8402,93 @@ } /* DSP only valid in 100/1000 modes. */ - LinkSpeedUsed = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; - if (LinkSpeedUsed != SK_LSPEED_STAT_10MBPS) { + if (pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed != + SK_LSPEED_STAT_10MBPS) { pVctData->VctStatus |= SK_PNMI_VCT_NEW_DSP_DATA; } } - } /* CheckVctStatus */ + + +/***************************************************************************** + * + * SkPnmiGenIoctl - Handles new generic PNMI IOCTL, calls the needed + * PNMI function depending on the subcommand and + * returns all data belonging to the complete database + * or OID request. + * + * Description: + * Looks up the requested subcommand, calls the corresponding handler + * function and passes all required parameters to it. + * The function is called by the driver. It is needed to handle the new + * generic PNMI IOCTL. This IOCTL is given to the driver and contains both + * the OID and a subcommand to decide what kind of request has to be done. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed + * SK_PNMI_ERR_GENERAL A general severe internal error occured + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take + * the data. + * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +int SkPnmiGenIoctl( +SK_AC *pAC, /* Pointer to adapter context struct */ +SK_IOC IoC, /* I/O context */ +void *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Length of buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ +SK_I32 Mode; /* Store value of subcommand. */ +SK_U32 Oid; /* Store value of OID. */ +int ReturnCode; /* Store return value to show status of PNMI action. */ +int HeaderLength; /* Length of desired action plus OID. */ + + ReturnCode = SK_PNMI_ERR_GENERAL; + + SK_MEMCPY(&Mode, pBuf, sizeof(SK_I32)); + SK_MEMCPY(&Oid, (char *) pBuf + sizeof(SK_I32), sizeof(SK_U32)); + HeaderLength = sizeof(SK_I32) + sizeof(SK_U32); + *pLen = *pLen - HeaderLength; + SK_MEMCPY((char *) pBuf + sizeof(SK_I32), (char *) pBuf + HeaderLength, *pLen); + + switch(Mode) { + case SK_GET_SINGLE_VAR: + ReturnCode = SkPnmiGetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_PRESET_SINGLE_VAR: + ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_SET_SINGLE_VAR: + ReturnCode = SkPnmiSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_GET_FULL_MIB: + ReturnCode = SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + case SK_PRESET_FULL_MIB: + ReturnCode = SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + case SK_SET_FULL_MIB: + ReturnCode = SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + default: + break; + } + + return (ReturnCode); + +} /* SkGeIocGen */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skgesirq.c linux.22-ac2/drivers/net/sk98lin/skgesirq.c --- linux.vanilla/drivers/net/sk98lin/skgesirq.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skgesirq.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skgesirq.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.81 $ - * Date: $Date: 2002/12/05 10:49:51 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.91 $ + * Date: $Date: 2003/07/04 12:46:22 $ * Purpose: Special IRQ module * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,60 @@ * History: * * $Log: skgesirq.c,v $ + * Revision 1.91 2003/07/04 12:46:22 rschmidt + * Added debug messages in SkGePortCheckUpGmac(). + * Added error log message and new driver event SK_DRV_DOWNSHIFT_DET + * for Downshift detection (Yukon-Copper). + * Editorial changes. + * + * Revision 1.90 2003/05/28 15:35:45 rschmidt + * Added parameter AutoNeg in all SkGePortCheckUp...() to save code. + * Added setting for AutoNeg only once in SkGePortCheckUp(). + * Moved defines for return codes of SkGePortCheckUp() to header file. + * Editorial changes. + * + * Revision 1.89 2003/05/13 17:32:20 mkarl + * Removed links to RLMT and PNMI for SLIM driver (SK_SLIM). + * Separated GENESIS and YUKON only code to reduce code size. + * + * Revision 1.88 2003/05/06 13:20:34 rschmidt + * Changed workaround for Tx hang in half duplex only for Genesis. + * Replaced SkPnmiGetVar() calls for Tx Octets Counter + * with SkXmMacStatistic() in SkGeSirqIsr(). + * Added defines around GENESIS resp. YUKON branches to reduce + * code size for PXE. + * Editorial changes. + * + * Revision 1.87 2003/04/28 09:18:31 rschmidt + * Added increment for GITimeStampCnt (high dword for + * Time Stamp Timer counter), when overflow IRQ occurs. + * Disabled HW Error IRQ on 32-bit Yukon if sensor IRQ occurs + * by changing the common mask stored in GIValIrqMask. + * Changed handling for HW Error IRQ in SkGeSirqIsr(). + * Added clearing of the software forced IRQ in SkGeSirqIsr(). + * Editorial changes. + * + * Revision 1.86 2003/04/09 13:03:24 rschmidt + * Added workaround for configuration of GPHY's Auto-negotiation + * advertisement register after link down event in SkPhyIsrGmac(). + * + * Revision 1.85 2003/04/08 16:39:02 rschmidt + * Changed handling for different PhyTypes for source code + * portability to PXE, UNDI. + * Editorial changes. + * + * Revision 1.84 2003/03/31 07:01:43 mkarl + * Corrected Copyright. + * Editorial changes. + * + * Revision 1.83 2003/02/05 15:10:59 rschmidt + * Fixed setting of PLinkSpeedUsed in SkHWLinkUp() when + * auto-negotiation is disabled. + * Editorial changes. + * + * Revision 1.82 2003/01/29 13:34:33 rschmidt + * Added some typecasts to avoid compiler warnings. + * * Revision 1.81 2002/12/05 10:49:51 rschmidt * Fixed missing Link Down Event for fiber (Bug Id #10768) * Added reading of cable length when link is up @@ -132,7 +187,7 @@ * Added workaround for half duplex hangup. * * Revision 1.58 2000/09/28 13:06:04 gklug - * fix: BCOM may NOT be touched if XMAC is in RESET state + * fix: BCom may NOT be touched if XMAC is in RESET state * * Revision 1.57 2000/09/08 12:38:39 cgoos * Added forgotten variable declaration. @@ -353,26 +408,35 @@ * */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "$Id: skgesirq.c,v 1.81 2002/12/05 10:49:51 rschmidt Exp $" ; + "@(#) $Id: skgesirq.c,v 1.91 2003/07/04 12:46:22 rschmidt Exp $ (C) Marvell."; +#endif #include "h/skdrv1st.h" /* Driver Specific Definitions */ +#ifndef SK_SLIM #include "h/skgepnmi.h" /* PNMI Definitions */ #include "h/skrlmt.h" /* RLMT Definitions */ +#endif #include "h/skdrv2nd.h" /* Adapter Control and Driver specific Def. */ /* local function prototypes */ -static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int); -static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int); -static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int); +#ifdef GENESIS +static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int, SK_BOOL); static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16); +#endif /* GENESIS */ +#ifdef YUKON +static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int, SK_BOOL); static void SkPhyIsrGmac(SK_AC*, SK_IOC, int, SK_U16); +#endif /* YUKON */ #ifdef OTHER_PHY -static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int); -static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int); +static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int, SK_BOOL); static void SkPhyIsrLone(SK_AC*, SK_IOC, int, SK_U16); #endif /* OTHER_PHY */ +#ifdef GENESIS /* * array of Rx counter from XMAC which are checked * in AutoSense mode to check whether a link is not able to auto-negotiate. @@ -385,6 +449,7 @@ XM_RXF_1023B, XM_RXF_MAX_SZ } ; +#endif /* GENESIS */ #ifdef __C2MAN__ /* @@ -397,20 +462,13 @@ {} #endif -/* Define return codes of SkGePortCheckUp and CheckShort */ -#define SK_HW_PS_NONE 0 /* No action needed */ -#define SK_HW_PS_RESTART 1 /* Restart needed */ -#define SK_HW_PS_LINK 2 /* Link Up actions needed */ - /****************************************************************************** * * SkHWInitDefSense() - Default Autosensing mode initialization * - * Description: - * This function sets the PLinkMode for HWInit - * - * Note: + * Description: sets the PLinkMode for HWInit * + * Returns: N/A */ static void SkHWInitDefSense( SK_AC *pAC, /* adapter context */ @@ -438,17 +496,17 @@ } /* SkHWInitDefSense */ +#ifdef GENESIS /****************************************************************************** * - * SkHWSenseGetNext() - GetNextAutosensing Mode + * SkHWSenseGetNext() - Get Next Autosensing Mode * - * Description: - * This function handles the AutoSensing + * Description: gets the appropriate next mode * * Note: * */ -SK_U8 SkHWSenseGetNext( +static SK_U8 SkHWSenseGetNext( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ @@ -459,18 +517,18 @@ pPrt->PAutoNegTimeOut = 0; - if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) { + if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) { /* Leave all as configured */ return(pPrt->PLinkModeConf); } - if (pPrt->PLinkMode == SK_LMODE_AUTOFULL) { + if (pPrt->PLinkMode == (SK_U8)SK_LMODE_AUTOFULL) { /* Return next mode AUTOBOTH */ - return(SK_LMODE_AUTOBOTH); + return ((SK_U8)SK_LMODE_AUTOBOTH); } /* Return default autofull */ - return(SK_LMODE_AUTOFULL); + return ((SK_U8)SK_LMODE_AUTOFULL); } /* SkHWSenseGetNext */ @@ -478,13 +536,11 @@ * * SkHWSenseSetNext() - Autosensing Set next mode * - * Description: - * This function sets the appropriate next mode. - * - * Note: + * Description: sets the appropriate next mode * + * Returns: N/A */ -void SkHWSenseSetNext( +static void SkHWSenseSetNext( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ @@ -496,7 +552,7 @@ pPrt->PAutoNegTimeOut = 0; - if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) { + if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) { return; } @@ -508,17 +564,16 @@ return; } /* SkHWSenseSetNext */ +#endif /* GENESIS */ /****************************************************************************** * * SkHWLinkDown() - Link Down handling * - * Description: - * This function handles the Hardware link down signal - * - * Note: + * Description: handles the hardware link down signal * + * Returns: N/A */ void SkHWLinkDown( SK_AC *pAC, /* adapter context */ @@ -538,7 +593,7 @@ /* Init default sense mode */ SkHWInitDefSense(pAC, IoC, Port); - if (!pPrt->PHWLinkUp) { + if (pPrt->PHWLinkUp == SK_FALSE) { return; } @@ -549,8 +604,8 @@ pPrt->PHWLinkUp = SK_FALSE; /* Reset Port stati */ - pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; - pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_INDETERMINATED; /* Re-init Phy especially when the AutoSense default is set now */ @@ -568,11 +623,9 @@ * * SkHWLinkUp() - Link Up handling * - * Description: - * This function handles the Hardware link up signal - * - * Note: + * Description: handles the hardware link up signal * + * Returns: N/A */ void SkHWLinkUp( SK_AC *pAC, /* adapter context */ @@ -590,39 +643,52 @@ pPrt->PHWLinkUp = SK_TRUE; pPrt->PAutoNegFail = SK_FALSE; - pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; - if (pPrt->PLinkMode != SK_LMODE_AUTOHALF && - pPrt->PLinkMode != SK_LMODE_AUTOFULL && - pPrt->PLinkMode != SK_LMODE_AUTOBOTH) { + if (pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOHALF && + pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOFULL && + pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOBOTH) { /* Link is up and no Auto-negotiation should be done */ - /* Configure Port */ - - /* link speed should be the configured one */ - pPrt->PLinkSpeedUsed = pPrt->PLinkSpeed; + /* Link speed should be the configured one */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + /* default is 1000 Mbps */ + case SK_LSPEED_1000MBPS: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_1000MBPS; + break; + case SK_LSPEED_100MBPS: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_100MBPS; + break; + case SK_LSPEED_10MBPS: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_10MBPS; + break; + } /* Set Link Mode Status */ if (pPrt->PLinkMode == SK_LMODE_FULL) { pPrt->PLinkModeStatus = SK_LMODE_STAT_FULL; } else { - pPrt->PLinkModeStatus = SK_LMODE_STAT_HALF; + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_HALF; } /* No flow control without auto-negotiation */ - pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; /* enable Rx/Tx */ - SkMacRxTxEnable(pAC, IoC, Port); + (void)SkMacRxTxEnable(pAC, IoC, Port); } } /* SkHWLinkUp */ /****************************************************************************** * - * SkMacParity - does everything to handle MAC parity errors correctly + * SkMacParity() - MAC parity workaround * + * Description: handles MAC parity errors correctly + * + * Returns: N/A */ static void SkMacParity( SK_AC *pAC, /* adapter context */ @@ -631,21 +697,29 @@ { SK_EVPARA Para; SK_GEPORT *pPrt; /* GIni Port struct pointer */ - SK_U32 TxMax; /* TxMax Counter */ + SK_U32 TxMax; /* Tx Max Size Counter */ pPrt = &pAC->GIni.GP[Port]; /* Clear IRQ Tx Parity Error */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_PERR); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), - (SK_U8)((pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); + (SK_U8)((pAC->GIni.GIChipId == CHIP_ID_YUKON && + pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); } - +#endif /* YUKON */ + if (pPrt->PCheckPar) { + if (Port == MAC_1) { SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E016, SKERR_SIRQ_E016MSG); } @@ -654,6 +728,7 @@ } Para.Para64 = Port; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = Port; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); @@ -661,15 +736,21 @@ } /* Check whether frames with a size of 1k were sent */ +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* Snap statistic counters */ (void)SkXmUpdateStats(pAC, IoC, Port); (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXF_MAX_SZ, &TxMax); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + (void)SkGmMacStatistic(pAC, IoC, Port, GM_TXF_1518B, &TxMax); } +#endif /* YUKON */ if (TxMax > 0) { /* From now on check the parity */ @@ -680,11 +761,11 @@ /****************************************************************************** * - * Hardware Error service routine + * SkGeHwErr() - Hardware Error service routine * - * Description: + * Description: handles all HW Error interrupts * - * Notes: + * Returns: N/A */ static void SkGeHwErr( SK_AC *pAC, /* adapter context */ @@ -707,50 +788,62 @@ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); - SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); Para.Para64 = 0; SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); } +#ifdef GENESIS if (pAC->GIni.GIGenesis) { + if ((HwStatus & IS_NO_STAT_M1) != 0) { /* Ignore it */ /* This situation is also indicated in the descriptor */ SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INSTAT); } - + if ((HwStatus & IS_NO_STAT_M2) != 0) { /* Ignore it */ /* This situation is also indicated in the descriptor */ SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INSTAT); } - + if ((HwStatus & IS_NO_TIST_M1) != 0) { /* Ignore it */ /* This situation is also indicated in the descriptor */ SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INTIST); } - + if ((HwStatus & IS_NO_TIST_M2) != 0) { /* Ignore it */ /* This situation is also indicated in the descriptor */ SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INTIST); } } - else { /* YUKON */ +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* This is necessary only for Rx timing measurements */ if ((HwStatus & IS_IRQ_TIST_OV) != 0) { + /* increment Time Stamp Timer counter (high) */ + pAC->GIni.GITimeStampCnt++; + /* Clear Time Stamp Timer IRQ */ SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_CLR_IRQ); } if ((HwStatus & IS_IRQ_SENSOR) != 0) { - /* Clear I2C IRQ */ - SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); + /* no sensors on 32-bit Yukon */ + if (pAC->GIni.GIYukon32Bit) { + /* disable HW Error IRQ */ + pAC->GIni.GIValIrqMask &= ~IS_HW_ERR; + } } } +#endif /* YUKON */ if ((HwStatus & IS_RAM_RD_PAR) != 0) { SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_RD_PERR); @@ -781,6 +874,7 @@ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG); Para.Para64 = MAC_1; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_1; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } @@ -792,6 +886,7 @@ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG); Para.Para64 = MAC_2; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_2; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } @@ -800,11 +895,11 @@ /****************************************************************************** * - * Interrupt service routine + * SkGeSirqIsr() - Special Interrupt Service Routine * - * Description: + * Description: handles all non data transfer specific interrupts (slow path) * - * Notes: + * Returns: N/A */ void SkGeSirqIsr( SK_AC *pAC, /* adapter context */ @@ -814,13 +909,10 @@ SK_EVPARA Para; SK_U32 RegVal32; /* Read register value */ SK_GEPORT *pPrt; /* GIni Port struct pointer */ - unsigned Len; - SK_U64 Octets; SK_U16 PhyInt; - SK_U16 PhyIMsk; int i; - if ((Istatus & IS_HW_ERR) != 0) { + if (((Istatus & IS_HW_ERR) & pAC->GIni.GIValIrqMask) != 0) { /* read the HW Error Interrupt source */ SK_IN32(IoC, B0_HWE_ISRC, &RegVal32); @@ -866,32 +958,47 @@ /* May be a normal situation in a server with a slow network */ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX1); - /* - * workaround: if in half duplex mode, check for Tx hangup. - * Read number of TX'ed bytes, wait for 10 ms, then compare - * the number with current value. If nothing changed, we assume - * that Tx is hanging and do a FIFO flush (see event routine). - */ - if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || - pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && - !pPrt->HalfDupTimerActive) { +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { /* - * many more pack. arb. timeouts may come in between, - * we ignore those + * workaround: if in half duplex mode, check for Tx hangup. + * Read number of TX'ed bytes, wait for 10 ms, then compare + * the number with current value. If nothing changed, we assume + * that Tx is hanging and do a FIFO flush (see event routine). */ - pPrt->HalfDupTimerActive = SK_TRUE; + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && + !pPrt->HalfDupTimerActive) { + /* + * many more pack. arb. timeouts may come in between, + * we ignore those + */ + pPrt->HalfDupTimerActive = SK_TRUE; +#ifdef XXX + Len = sizeof(SK_U64); + SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, + &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0), + pAC->Rlmt.Port[0].Net->NetNumber); + + pPrt->LastOctets = Octets; +#endif /* XXX */ + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, 0); - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, - &Len, (SK_U32) SK_PNMI_PORT_PHYS2INST(pAC, 0), - pAC->Rlmt.Port[0].Net->NetNumber); - - pPrt->LastOctets = Octets; - - Para.Para32[0] = 0; - SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, - SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); + (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_HI, &RegVal32); + + pPrt->LastOctets = (SK_U64)RegVal32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_LO, &RegVal32); + + pPrt->LastOctets += RegVal32; + + Para.Para32[0] = 0; + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); + } } +#endif /* GENESIS */ } if ((Istatus & IS_PA_TO_TX2) != 0) { @@ -901,23 +1008,38 @@ /* May be a normal situation in a server with a slow network */ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX2); - /* workaround: see above */ - if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || - pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && - !pPrt->HalfDupTimerActive) { - pPrt->HalfDupTimerActive = SK_TRUE; - - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, - &Len, (SK_U32) SK_PNMI_PORT_PHYS2INST(pAC, 1), - pAC->Rlmt.Port[1].Net->NetNumber); - - pPrt->LastOctets = Octets; - - Para.Para32[0] = 1; - SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, - SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* workaround: see above */ + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && + !pPrt->HalfDupTimerActive) { + pPrt->HalfDupTimerActive = SK_TRUE; +#ifdef XXX + Len = sizeof(SK_U64); + SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, + &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1), + pAC->Rlmt.Port[1].Net->NetNumber); + + pPrt->LastOctets = Octets; +#endif /* XXX */ + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, 1); + + (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_HI, &RegVal32); + + pPrt->LastOctets = (SK_U64)RegVal32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_LO, &RegVal32); + + pPrt->LastOctets += RegVal32; + + Para.Para32[0] = 1; + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); + } } +#endif /* GENESIS */ } /* Check interrupts of the particular queues */ @@ -998,59 +1120,69 @@ continue; } - switch (pPrt->PhyType) { - - case SK_PHY_XMAC: - break; - - case SK_PHY_BCOM: - SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt); - SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_MASK, &PhyIMsk); - - if ((PhyInt & ~PhyIMsk) != 0) { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("Port %d Bcom Int: 0x%04X Mask: 0x%04X\n", - i, PhyInt, PhyIMsk)); - SkPhyIsrBcom(pAC, IoC, i, PhyInt); +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + break; + + case SK_PHY_BCOM: + SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt); + + if ((PhyInt & ~PHY_B_DEF_MSK) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Bcom Int: 0x%04X\n", + i, PhyInt)); + SkPhyIsrBcom(pAC, IoC, i, PhyInt); + } + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt); + + if ((PhyInt & PHY_L_DEF_MSK) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Lone Int: %x\n", + i, PhyInt)); + SkPhyIsrLone(pAC, IoC, i, PhyInt); + } + break; +#endif /* OTHER_PHY */ } - break; - - case SK_PHY_MARV_COPPER: - case SK_PHY_MARV_FIBER: + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* Read PHY Interrupt Status */ SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_STAT, &PhyInt); - SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_MASK, &PhyIMsk); - if ((PhyInt & PhyIMsk) != 0) { + if ((PhyInt & PHY_M_DEF_MSK) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("Port %d Marv Int: 0x%04X Mask: 0x%04X\n", - i, PhyInt, PhyIMsk)); + ("Port %d Marv Int: 0x%04X\n", + i, PhyInt)); SkPhyIsrGmac(pAC, IoC, i, PhyInt); } - break; - -#ifdef OTHER_PHY - case SK_PHY_LONE: - SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt); - SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_ENAB, &PhyIMsk); - - if ((PhyInt & PhyIMsk) != 0) { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("Port %d Lone Int: %x Mask: %x\n", - i, PhyInt, PhyIMsk)); - SkPhyIsrLone(pAC, IoC, i, PhyInt); - } - break; - case SK_PHY_NAT: - /* todo: National */ - break; -#endif /* OTHER_PHY */ } +#endif /* YUKON */ } } /* I2C Ready interrupt */ if ((Istatus & IS_I2C_READY) != 0) { +#ifdef SK_SLIM + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); +#else SkI2cIsr(pAC, IoC); +#endif + } + + /* SW forced interrupt */ + if ((Istatus & IS_IRQ_SW) != 0) { + /* clear the software IRQ */ + SK_OUT8(IoC, B0_CTST, CS_CL_SW_IRQ); } if ((Istatus & IS_LNK_SYNC_M1) != 0) { @@ -1085,20 +1217,30 @@ /* Timer interrupt (served last) */ if ((Istatus & IS_TIMINT) != 0) { + /* check for HW Errors */ + if (((Istatus & IS_HW_ERR) & ~pAC->GIni.GIValIrqMask) != 0) { + /* read the HW Error Interrupt source */ + SK_IN32(IoC, B0_HWE_ISRC, &RegVal32); + + SkGeHwErr(pAC, IoC, RegVal32); + } + SkHwtIsr(pAC, IoC); } + } /* SkGeSirqIsr */ +#ifdef GENESIS /****************************************************************************** * - * SkGePortCheckShorts - Implementing XMAC Workaround Errata # 2 + * SkGePortCheckShorts() - Implementing XMAC Workaround Errata # 2 * * return: * 0 o.k. nothing needed * 1 Restart needed on this port */ -static int SkGePortCheckShorts( +static int SkGePortCheckShorts( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ @@ -1123,13 +1265,15 @@ (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts); /* - * Read Rx counter (packets seen on the network and not necessarily + * Read Rx counters (packets seen on the network and not necessarily * really received. */ RxCts = 0; for (i = 0; i < sizeof(SkGeRxRegs)/sizeof(SkGeRxRegs[0]); i++) { + (void)SkXmMacStatistic(pAC, IoC, Port, SkGeRxRegs[i], &RxTmp); + RxCts += (SK_U64)RxTmp; } @@ -1199,44 +1343,76 @@ return(Rtv); } /* SkGePortCheckShorts */ +#endif /* GENESIS */ /****************************************************************************** * - * SkGePortCheckUp - Implementation of the Workaround for Errata #2 + * SkGePortCheckUp() - Check if the link is up * * return: * 0 o.k. nothing needed * 1 Restart needed on this port * 2 Link came up */ -static int SkGePortCheckUp( +static int SkGePortCheckUp( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { - switch (pAC->GIni.GP[Port].PhyType) { - case SK_PHY_XMAC: - return(SkGePortCheckUpXmac(pAC, IoC, Port)); - case SK_PHY_BCOM: - return(SkGePortCheckUpBcom(pAC, IoC, Port)); - case SK_PHY_MARV_COPPER: - case SK_PHY_MARV_FIBER: - return(SkGePortCheckUpGmac(pAC, IoC, Port)); + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ + int Rtv; /* Return value */ + + Rtv = SK_HW_PS_NONE; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + AutoNeg = SK_FALSE; + } + else { + AutoNeg = SK_TRUE; + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + Rtv = SkGePortCheckUpXmac(pAC, IoC, Port, AutoNeg); + break; + case SK_PHY_BCOM: + Rtv = SkGePortCheckUpBcom(pAC, IoC, Port, AutoNeg); + break; #ifdef OTHER_PHY - case SK_PHY_LONE: - return(SkGePortCheckUpLone(pAC, IoC, Port)); - case SK_PHY_NAT: - return(SkGePortCheckUpNat(pAC, IoC, Port)); + case SK_PHY_LONE: + Rtv = SkGePortCheckUpLone(pAC, IoC, Port, AutoNeg); + break; + case SK_PHY_NAT: + Rtv = SkGePortCheckUpNat(pAC, IoC, Port, AutoNeg); + break; #endif /* OTHER_PHY */ + } } - return(SK_HW_PS_NONE); +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + Rtv = SkGePortCheckUpGmac(pAC, IoC, Port, AutoNeg); + } +#endif /* YUKON */ + + return(Rtv); } /* SkGePortCheckUp */ +#ifdef GENESIS /****************************************************************************** * - * SkGePortCheckUpXmac - Implementing of the Workaround Errata # 2 + * SkGePortCheckUpXmac() - Implementing of the Workaround Errata # 2 * * return: * 0 o.k. nothing needed @@ -1246,7 +1422,8 @@ static int SkGePortCheckUpXmac( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ -int Port) /* Which port should be checked */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ { SK_U32 Shorts; /* Short Event Counter */ SK_GEPORT *pPrt; /* GIni Port struct pointer */ @@ -1257,7 +1434,6 @@ SK_U16 LpAb; /* Link Partner Ability */ SK_U16 ResAb; /* Resolved Ability */ SK_U16 ExtStat; /* Extended Status Register */ - SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ SK_U8 NextMode; /* Next AutoSensing Mode */ pPrt = &pAC->GIni.GP[Port]; @@ -1275,13 +1451,6 @@ pPrt->PIsave = 0; /* Now wait for each port's link */ - if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { - AutoNeg = SK_FALSE; - } - else { - AutoNeg = SK_TRUE; - } - if (pPrt->PLinkBroken) { /* Link was broken */ XM_IN32(IoC, Port, XM_GP_PORT, &GpReg); @@ -1291,6 +1460,7 @@ XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum); + if ((Isrc & XM_IS_INP_ASS) == 0) { /* It has been in sync since last time */ /* Restart the PORT */ @@ -1329,7 +1499,7 @@ ("Do NOT restart on Port %d %x %x\n", Port, Isrc, IsrcSum)); } else { - pPrt->PIsave = IsrcSum & XM_IS_AND; + pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Save Sync/nosync Port %d %x %x\n", Port, Isrc, IsrcSum)); @@ -1370,6 +1540,7 @@ } else { SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc); + if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) { return(SK_HW_PS_RESTART); } @@ -1389,7 +1560,7 @@ if ((GpReg & XM_GP_INP_ASS) != 0 || (IsrcSum & XM_IS_INP_ASS) != 0) { if ((GpReg & XM_GP_INP_ASS) == 0) { /* Save Auto-negotiation Done interrupt only if link is in sync */ - pPrt->PIsave = IsrcSum & XM_IS_AND; + pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND); } #ifdef DEBUG if ((pPrt->PIsave & XM_IS_AND) != 0) { @@ -1497,7 +1668,7 @@ /****************************************************************************** * - * SkGePortCheckUpBcom - Check, if the link is up + * SkGePortCheckUpBcom() - Check if the link is up on Bcom PHY * * return: * 0 o.k. nothing needed @@ -1505,9 +1676,10 @@ * 2 Link came up */ static int SkGePortCheckUpBcom( -SK_AC *pAC, /* Adapter Context */ -SK_IOC IoC, /* IO Context */ -int Port) /* Which port should be checked */ +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ { SK_GEPORT *pPrt; /* GIni Port struct pointer */ int Done; @@ -1519,7 +1691,6 @@ SK_U16 LpAb; SK_U16 ExtStat; #endif /* DEBUG */ - SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ pPrt = &pAC->GIni.GP[Port]; @@ -1596,7 +1767,7 @@ if ((Isrc & (PHY_B_IS_NO_HDCL /* | PHY_B_IS_NO_HDC */)) != 0) { /* - * Workaround BCOM Errata: + * Workaround BCom Errata: * enable and disable loopback mode if "NO HCD" occurs. */ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Ctrl); @@ -1689,14 +1860,6 @@ } #endif /* DEBUG */ - /* Now wait for each port's link */ - if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { - AutoNeg = SK_FALSE; - } - else { - AutoNeg = SK_TRUE; - } - /* * Here we usually can check whether the link is in sync and * auto-negotiation is done. @@ -1707,7 +1870,7 @@ SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); + ("AutoNeg: %d, PhyStat: 0x%04X\n", AutoNeg, PhyStat)); SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); @@ -1715,6 +1878,7 @@ /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Master/Slave Fault port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; @@ -1729,7 +1893,7 @@ SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); + ("Port %d, ResAb: 0x%04X\n", Port, ResAb)); if (AutoNeg) { if ((PhyStat & PHY_ST_AN_OVER) != 0) { @@ -1793,16 +1957,19 @@ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link sync(GP), Port %d\n", Port)); SkHWLinkUp(pAC, IoC, Port); + return(SK_HW_PS_LINK); } return(SK_HW_PS_NONE); } /* SkGePortCheckUpBcom */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * - * SkGePortCheckUpGmac - Check, if the link is up + * SkGePortCheckUpGmac() - Check if the link is up on Marvell PHY * * return: * 0 o.k. nothing needed @@ -1810,44 +1977,43 @@ * 2 Link came up */ static int SkGePortCheckUpGmac( -SK_AC *pAC, /* Adapter Context */ -SK_IOC IoC, /* IO Context */ -int Port) /* Which port should be checked */ +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ { SK_GEPORT *pPrt; /* GIni Port struct pointer */ int Done; - SK_U16 Isrc; /* Interrupt source */ - SK_U16 PhyStat; /* Phy Status */ - SK_U16 PhySpecStat;/* Phy Specific Status */ + SK_U16 PhyIsrc; /* PHY Interrupt source */ + SK_U16 PhyStat; /* PPY Status */ + SK_U16 PhySpecStat;/* PHY Specific Status */ SK_U16 ResAb; /* Master/Slave resolution */ - SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ + SK_EVPARA Para; pPrt = &pAC->GIni.GP[Port]; /* Read PHY Interrupt Status */ - SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &Isrc); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &PhyIsrc); - if ((Isrc & PHY_M_IS_AN_COMPL) != 0) { - /* TBD */ + if ((PhyIsrc & PHY_M_IS_AN_COMPL) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Negotiation Completed, PhyIsrc: 0x%04X\n", PhyIsrc)); } - if (pPrt->PHWLinkUp) { - return(SK_HW_PS_NONE); + if ((PhyIsrc & PHY_M_IS_LSP_CHANGE) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Link Speed Changed, PhyIsrc: 0x%04X\n", PhyIsrc)); } - /* Now wait for each port's link */ - if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { - AutoNeg = SK_FALSE; - } - else { - AutoNeg = SK_TRUE; + if (pPrt->PHWLinkUp) { + return(SK_HW_PS_NONE); } /* Read PHY Status */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); + ("AutoNeg: %d, PhyStat: 0x%04X\n", AutoNeg, PhyStat)); SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); @@ -1857,6 +2023,7 @@ /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Master/Slave Fault port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; @@ -1867,12 +2034,24 @@ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg: %d, PhySpecStat: 0x%04x\n", AutoNeg, PhySpecStat)); + ("AutoNeg: %d, PhySpecStat: 0x%04X\n", AutoNeg, PhySpecStat)); if ((PhySpecStat & PHY_M_PS_LINK_UP) == 0) { return(SK_HW_PS_NONE); } + if ((PhySpecStat & PHY_M_PS_DOWNS_STAT) != 0 || + (PhyIsrc & PHY_M_IS_DOWNSH_DET) != 0) { + /* Downshift detected */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E025, SKERR_SIRQ_E025MSG); + + Para.Para64 = Port; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_DOWNSHIFT_DET, Para); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Downshift detected, PhyIsrc: 0x%04X\n", PhyIsrc)); + } + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; @@ -1913,12 +2092,13 @@ return(SK_HW_PS_NONE); } /* SkGePortCheckUpGmac */ +#endif /* YUKON */ #ifdef OTHER_PHY /****************************************************************************** * - * SkGePortCheckUpLone - Check if the link is up + * SkGePortCheckUpLone() - Check if the link is up on Level One PHY * * return: * 0 o.k. nothing needed @@ -1928,7 +2108,8 @@ static int SkGePortCheckUpLone( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ -int Port) /* Which port should be checked */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ { SK_GEPORT *pPrt; /* GIni Port struct pointer */ int Done; @@ -1937,7 +2118,6 @@ SK_U16 ExtStat; /* Extended Status Register */ SK_U16 PhyStat; /* Phy Status Register */ SK_U16 StatSum; - SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ SK_U8 NextMode; /* Next AutoSensing Mode */ pPrt = &pAC->GIni.GP[Port]; @@ -1949,14 +2129,6 @@ StatSum = pPrt->PIsave; pPrt->PIsave = 0; - /* Now wait for each ports link */ - if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { - AutoNeg = SK_FALSE; - } - else { - AutoNeg = SK_TRUE; - } - /* * here we usually can check whether the link is in sync and * auto-negotiation is done. @@ -2049,6 +2221,7 @@ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link sync(GP), Port %d\n", Port)); SkHWLinkUp(pAC, IoC, Port); + return(SK_HW_PS_LINK); } @@ -2058,7 +2231,7 @@ /****************************************************************************** * - * SkGePortCheckUpNat - Check if the link is up + * SkGePortCheckUpNat() - Check if the link is up on National PHY * * return: * 0 o.k. nothing needed @@ -2068,7 +2241,8 @@ static int SkGePortCheckUpNat( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ -int Port) /* Which port should be checked */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ { /* todo: National */ return(SK_HW_PS_NONE); @@ -2078,7 +2252,7 @@ /****************************************************************************** * - * Event service routine + * SkGeSirqEvent() - Event Service Routine * * Description: * @@ -2090,13 +2264,14 @@ SK_U32 Event, /* Module specific Event */ SK_EVPARA Para) /* Event specific Parameter */ { - SK_U64 Octets; SK_GEPORT *pPrt; /* GIni Port struct pointer */ SK_U32 Port; - SK_U32 Time; - unsigned Len; + SK_U32 Val32; int PortStat; SK_U8 Val8; +#ifdef GENESIS + SK_U64 Octets; +#endif /* GENESIS */ Port = Para.Para32[0]; pPrt = &pAC->GIni.GP[Port]; @@ -2104,15 +2279,13 @@ switch (Event) { case SK_HWEV_WATIM: /* Check whether port came up */ - PortStat = SkGePortCheckUp(pAC, IoC, Port); + PortStat = SkGePortCheckUp(pAC, IoC, (int)Port); switch (PortStat) { case SK_HW_PS_RESTART: if (pPrt->PHWLinkUp) { - /* - * Set Link to down. - */ - SkHWLinkDown(pAC, IoC, Port); + /* Set Link to down */ + SkHWLinkDown(pAC, IoC, (int)Port); /* * Signal directly to RLMT to ensure correct @@ -2129,20 +2302,19 @@ /* Signal to RLMT */ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_UP, Para); break; - } /* Start again the check Timer */ if (pPrt->PHWLinkUp) { - Time = SK_WA_ACT_TIME; + Val32 = SK_WA_ACT_TIME; } else { - Time = SK_WA_INA_TIME; + Val32 = SK_WA_INA_TIME; } /* Todo: still needed for non-XMAC PHYs??? */ /* Start workaround Errata #2 timer */ - SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Time, + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Val32, SKGE_HWAC, SK_HWEV_WATIM, Para); break; @@ -2155,7 +2327,7 @@ SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para); } - SkHWLinkDown(pAC, IoC, Port); + SkHWLinkDown(pAC, IoC, (int)Port); /* Schedule Port RESET */ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para); @@ -2177,7 +2349,7 @@ /* Stop Workaround Timer */ SkTimerStop(pAC, IoC, &pPrt->PWaTimer); - SkHWLinkDown(pAC, IoC, Port); + SkHWLinkDown(pAC, IoC, (int)Port); break; case SK_HWEV_UPDATE_STAT: @@ -2224,7 +2396,7 @@ } Val8 = (SK_U8)Para.Para32[1]; if (pPrt->PMSMode != Val8) { - /* Set New link mode */ + /* Set New Role (Master/Slave) mode */ pPrt->PMSMode = Val8; /* Restart Port */ @@ -2248,26 +2420,41 @@ } break; +#ifdef GENESIS case SK_HWEV_HALFDUP_CHK: - /* - * half duplex hangup workaround. - * See packet arbiter timeout interrupt for description - */ - pPrt->HalfDupTimerActive = SK_FALSE; - if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || - pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { - - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, - &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); - - if (pPrt->LastOctets == Octets) { - /* Tx hanging, a FIFO flush restarts it */ - SkMacFlushTxFifo(pAC, IoC, Port); + if (pAC->GIni.GIGenesis) { + /* + * half duplex hangup workaround. + * See packet arbiter timeout interrupt for description + */ + pPrt->HalfDupTimerActive = SK_FALSE; + if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { +#ifdef XXX + Len = sizeof(SK_U64); + SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, + &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), + pAC->Rlmt.Port[Port].Net->NetNumber); +#endif /* XXX */ + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, Port); + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_HI, &Val32); + + Octets = (SK_U64)Val32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_LO, &Val32); + + Octets += Val32; + + if (pPrt->LastOctets == Octets) { + /* Tx hanging, a FIFO flush restarts it */ + SkMacFlushTxFifo(pAC, IoC, Port); + } } } break; +#endif /* GENESIS */ default: SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_SIRQ_E001, SKERR_SIRQ_E001MSG); @@ -2278,11 +2465,12 @@ } /* SkGeSirqEvent */ +#ifdef GENESIS /****************************************************************************** * - * SkPhyIsrBcom - PHY interrupt service routine + * SkPhyIsrBcom() - PHY interrupt service routine * - * Description: handle all interrupts from BCOM PHY + * Description: handles all interrupts from BCom PHY * * Returns: N/A */ @@ -2299,15 +2487,15 @@ if ((IStatus & PHY_B_IS_PSE) != 0) { /* Incorrectable pair swap error */ - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E022, + SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E022, SKERR_SIRQ_E022MSG); } if ((IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) != 0) { - Para.Para32[0] = (SK_U32)Port; SkHWLinkDown(pAC, IoC, Port); + Para.Para32[0] = (SK_U32)Port; /* Signal to RLMT */ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); @@ -2317,13 +2505,15 @@ } } /* SkPhyIsrBcom */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * - * SkPhyIsrGmac - PHY interrupt service routine + * SkPhyIsrGmac() - PHY interrupt service routine * - * Description: handle all interrupts from Marvell PHY + * Description: handles all interrupts from Marvell PHY * * Returns: N/A */ @@ -2335,14 +2525,27 @@ { SK_GEPORT *pPrt; /* GIni Port struct pointer */ SK_EVPARA Para; + SK_U16 Word; pPrt = &pAC->GIni.GP[Port]; if ((IStatus & (PHY_M_IS_AN_PR | PHY_M_IS_LST_CHANGE)) != 0) { - Para.Para32[0] = (SK_U32)Port; SkHWLinkDown(pAC, IoC, Port); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &Word); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg.Adv: 0x%04X\n", Word)); + + /* Set Auto-negotiation advertisement */ + if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) { + /* restore Asymmetric Pause bit */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, + (SK_U16)(Word | PHY_M_AN_ASP)); + } + + Para.Para32[0] = (SK_U32)Port; /* Signal to RLMT */ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } @@ -2352,23 +2555,21 @@ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG); } - if ((IStatus & PHY_M_IS_LSP_CHANGE) != 0) { - /* TBD */ - } - if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) { /* FIFO Overflow/Underrun Error */ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG); } + } /* SkPhyIsrGmac */ +#endif /* YUKON */ #ifdef OTHER_PHY /****************************************************************************** * - * SkPhyIsrLone - PHY interrupt service routine + * SkPhyIsrLone() - PHY interrupt service routine * - * Description: handle all interrupts from LONE PHY + * Description: handles all interrupts from LONE PHY * * Returns: N/A */ @@ -2381,10 +2582,11 @@ SK_EVPARA Para; if (IStatus & (PHY_L_IS_DUP | PHY_L_IS_ISOL)) { + SkHWLinkDown(pAC, IoC, Port); - /* Signal to RLMT */ Para.Para32[0] = (SK_U32)Port; + /* Signal to RLMT */ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/ski2c.c linux.22-ac2/drivers/net/sk98lin/ski2c.c --- linux.vanilla/drivers/net/sk98lin/ski2c.c 2003-06-14 00:11:33.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/ski2c.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: ski2c.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.56 $ - * Date: $Date: 2002/12/19 14:20:41 $ + * Version: $Revision: 1.57 $ + * Date: $Date: 2003/01/28 09:17:38 $ * Purpose: Functions to access Voltage and Temperature Sensor * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,10 @@ * History: * * $Log: ski2c.c,v $ + * Revision 1.57 2003/01/28 09:17:38 rschmidt + * Fixed handling for sensors on YUKON Fiber. + * Editorial changes. + * * Revision 1.56 2002/12/19 14:20:41 rschmidt * Added debugging code in SkI2cWait(). * Replaced all I2C-write operations with function SkI2cWrite(). @@ -228,7 +232,7 @@ * I2C Protocol */ static const char SysKonnectFileId[] = - "$Id: ski2c.c,v 1.56 2002/12/19 14:20:41 rschmidt Exp $"; + "$Id: ski2c.c,v 1.57 2003/01/28 09:17:38 rschmidt Exp $"; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/lm80.h" @@ -249,7 +253,7 @@ The Genesis has 2 I2C buses. One for the EEPROM which holds the VPD Data and one for temperature and voltage sensor. The following picture shows the I2C buses, I2C devices and - there control registers. + their control registers. Note: The VPD functions are in skvpd.c . @@ -314,23 +318,23 @@ * If new devices are added to the I2C bus the timing values have to be checked. */ #ifndef I2C_SLOW_TIMING -#define T_CLK_LOW 1300L /* clock low time in ns */ -#define T_CLK_HIGH 600L /* clock high time in ns */ +#define T_CLK_LOW 1300L /* clock low time in ns */ +#define T_CLK_HIGH 600L /* clock high time in ns */ #define T_DATA_IN_SETUP 100L /* data in Set-up Time */ #define T_START_HOLD 600L /* start condition hold time */ #define T_START_SETUP 600L /* start condition Set-up time */ #define T_STOP_SETUP 600L /* stop condition Set-up time */ -#define T_BUS_IDLE 1300L /* time the bus must free after Tx */ +#define T_BUS_IDLE 1300L /* time the bus must free after Tx */ #define T_CLK_2_DATA_OUT 900L /* max. clock low to data output valid */ #else /* I2C_SLOW_TIMING */ /* I2C Standard Mode Timing */ -#define T_CLK_LOW 4700L /* clock low time in ns */ -#define T_CLK_HIGH 4000L /* clock high time in ns */ +#define T_CLK_LOW 4700L /* clock low time in ns */ +#define T_CLK_HIGH 4000L /* clock high time in ns */ #define T_DATA_IN_SETUP 250L /* data in Set-up Time */ #define T_START_HOLD 4000L /* start condition hold time */ #define T_START_SETUP 4700L /* start condition Set-up time */ #define T_STOP_SETUP 4000L /* stop condition Set-up time */ -#define T_BUS_IDLE 4700L /* time the bus must free after Tx */ +#define T_BUS_IDLE 4700L /* time the bus must free after Tx */ #endif /* !I2C_SLOW_TIMING */ #define NS2BCLK(x) (((x)*125)/10000) @@ -858,18 +862,18 @@ pPrt = &pAC->GIni.GP[0]; - switch (pPrt->PhyType) { - case SK_PHY_BCOM: - if (pAC->GIni.GIMacsFound == 1) { - pAC->I2c.MaxSens += 1; - } - else { - pAC->I2c.MaxSens += 3; + if (pAC->GIni.GIGenesis) { + if (pPrt->PhyType == SK_PHY_BCOM) { + if (pAC->GIni.GIMacsFound == 1) { + pAC->I2c.MaxSens += 1; + } + else { + pAC->I2c.MaxSens += 3; + } } - break; - case SK_PHY_MARV_COPPER: + } + else { pAC->I2c.MaxSens += 3; - break; } for (i = 0; i < pAC->I2c.MaxSens; i++) { @@ -912,14 +916,23 @@ pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN; break; case 4: - if (pPrt->PhyType == SK_PHY_BCOM) { - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL"; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + if (pAC->GIni.GIGenesis) { + if (pPrt->PhyType == SK_PHY_BCOM) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PMA"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } } - else if (pPrt->PhyType == SK_PHY_MARV_COPPER) { + else { pAC->I2c.SenTable[i].SenDesc = "Voltage VAUX"; pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VAUX_3V3_HIGH_ERR; pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VAUX_3V3_HIGH_WARN; @@ -932,40 +945,33 @@ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_0V_WARN_ERR; } } - else { - pAC->I2c.SenTable[i].SenDesc = "Voltage PMA"; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; - } pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN; break; case 5: - if (pPrt->PhyType == SK_PHY_MARV_COPPER) { - pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC-Co 1V5"; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR; - } - else { + if (pAC->GIni.GIGenesis) { pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC-Co 1V5"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR; + } pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN; break; case 6: - if (pPrt->PhyType == SK_PHY_MARV_COPPER) { - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3"; + if (pAC->GIni.GIGenesis) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL"; } else { - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL"; + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3"; } pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; @@ -975,16 +981,7 @@ pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN; break; case 7: - if (pPrt->PhyType == SK_PHY_MARV_COPPER) { - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; - pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN; - } - else { + if (pAC->GIni.GIGenesis) { pAC->I2c.SenTable[i].SenDesc = "Speed Fan"; pAC->I2c.SenTable[i].SenType = SK_SEN_FAN; pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_FAN_HIGH_ERR; @@ -993,6 +990,15 @@ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_FAN_LOW_ERR; pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN; + } break; default: SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skproc.c linux.22-ac2/drivers/net/sk98lin/skproc.c --- linux.vanilla/drivers/net/sk98lin/skproc.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skproc.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: skproc.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.3 $ - * Date: $Date: 2002/10/02 12:59:51 $ + * Version: $Revision: 1.1 $ + * Date: $Date: 2003/07/18 13:39:57 $ * Purpose: Funktions to display statictic data * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,6 +28,24 @@ * History: * * $Log: skproc.c,v $ + * Revision 1.1 2003/07/18 13:39:57 rroesler + * Fix: Re-enter after CVS crash + * + * Revision 1.8 2003/06/27 14:41:42 rroesler + * Corrected compiler-warning kernel 2.2 + * + * Revision 1.7 2003/06/27 12:09:51 rroesler + * corrected minor edits + * + * Revision 1.6 2003/05/26 12:58:53 mlindner + * Add: Support for Kernel 2.5/2.6 + * + * Revision 1.5 2003/03/19 14:40:47 mlindner + * Fix: Editorial changes + * + * Revision 1.4 2003/02/25 14:16:37 mlindner + * Fix: Copyright statement + * * Revision 1.3 2002/10/02 12:59:51 mlindner * Add: Support for Yukon * Add: Speed check and setup @@ -79,21 +97,28 @@ #define SPECIALX 32 /* 0x */ #define LARGE 64 -extern SK_AC *pACList; -extern struct net_device *SkGeRootDev; + extern struct net_device *SkGeRootDev; extern char * SkNumber( - char * str, - long long num, - int base, - int size, - int precision, - int type); + char * str, + long long num, + int base, + int size, + int precision, + int type); + +int sk_proc_read(char *buffer, + char **buffer_location, + off_t offset, + int buffer_length, + int *eof, + void *data); + /***************************************************************************** * - * proc_read - print "summaries" entry + * sk_proc_read - print "summaries" entry * * Description: * This function fills the proc entry with statistic data about @@ -103,7 +128,7 @@ * Returns: buffer with statistic data * */ -int proc_read(char *buffer, +int sk_proc_read(char *buffer, char **buffer_location, off_t offset, int buffer_length, @@ -117,7 +142,7 @@ SK_AC *pAC; char test_buf[100]; char sens_msg[50]; - unsigned long Flags; + unsigned long Flags; unsigned int Size; struct SK_NET_DEVICE *next; struct SK_NET_DEVICE *SkgeProcDev = SkGeRootDev; @@ -249,7 +274,7 @@ SkNumber(test_buf, pPnmiStruct->InErrorsCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Receive drops %s\n", + "Receive dropped %s\n", SkNumber(test_buf, pPnmiStruct->RxNoBufCts, 10,0,-1,0)); len += sprintf(buffer + len, @@ -337,7 +362,7 @@ SkNumber(test_buf, pPnmiStat->StatTxSingleCollisionCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmit errors types\n"); + "Transmit error types\n"); len += sprintf(buffer + len, " excessive collision %ld\n", pAC->stats.tx_aborted_errors); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skqueue.c linux.22-ac2/drivers/net/sk98lin/skqueue.c --- linux.vanilla/drivers/net/sk98lin/skqueue.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skqueue.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: skqueue.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.18 $ - * Date: $Date: 2002/05/07 14:11:11 $ + * Project: Gigabit Ethernet Adapters, Schedule-Modul + * Version: $Revision: 1.19 $ + * Date: $Date: 2003/05/13 18:00:07 $ * Purpose: Management of an event queue. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,10 @@ * History: * * $Log: skqueue.c,v $ + * Revision 1.19 2003/05/13 18:00:07 mkarl + * Removed calls to RLMT, TWSI, and PNMI for SLIM driver (SK_SLIM). + * Editorial changes. + * * Revision 1.18 2002/05/07 14:11:11 rwahl * Fixed Watcom Precompiler error. * @@ -90,8 +94,10 @@ /* Event queue and dispatcher */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/skqueue.c,v 1.18 2002/05/07 14:11:11 rwahl Exp $" ; + "$Header: /usr56/projects/ge/schedule/skqueue.c,v 1.19 2003/05/13 18:00:07 mkarl Exp $" ; +#endif #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skqueue.h" /* Queue Definitions */ @@ -171,7 +177,8 @@ while (pEv != pAC->Event.EvPut) { PRINTF("dispatch Class %d Event %d\n",pEv->Class,pEv->Event) ; switch(Class = pEv->Class) { -#ifndef SK_USE_LAC_EV +#ifndef SK_USE_LAC_EV +#ifndef SK_SLIM case SKGE_RLMT : /* RLMT Event */ Rtv = SkRlmtEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; @@ -181,7 +188,8 @@ case SKGE_PNMI : Rtv = SkPnmiEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; -#endif /* SK_USE_LAC_EV */ +#endif /* not SK_SLIM */ +#endif /* not SK_USE_LAC_EV */ case SKGE_DRV : /* Driver Event */ Rtv = SkDrvEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skrlmt.c linux.22-ac2/drivers/net/sk98lin/skrlmt.c --- linux.vanilla/drivers/net/sk98lin/skrlmt.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skrlmt.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,16 @@ * * Name: skrlmt.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.65 $ - * Date: $Date: 2002/07/22 14:29:48 $ + * Version: $Revision: 1.69 $ + * Date: $Date: 2003/04/15 09:39:22 $ * Purpose: Manage links on SK-NET Adapters, esp. redundant ones. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,20 @@ * History: * * $Log: skrlmt.c,v $ + * Revision 1.69 2003/04/15 09:39:22 tschilli + * Copyright messages changed. + * "#error C++ is not yet supported." removed. + * + * Revision 1.68 2003/01/31 15:26:56 rschmidt + * Added init for local variables in RlmtInit(). + * + * Revision 1.67 2003/01/31 14:12:41 mkunz + * single port adapter runs now with two identical MAC addresses + * + * Revision 1.66 2002/09/23 15:14:19 rwahl + * - Reset broadcast timestamp on link down. + * - Editorial corrections. + * * Revision 1.65 2002/07/22 14:29:48 rwahl * - Removed BRK statement from debug check. * @@ -272,13 +287,12 @@ #ifndef lint static const char SysKonnectFileId[] = - "@(#) $Id: skrlmt.c,v 1.65 2002/07/22 14:29:48 rwahl Exp $ (C) SysKonnect."; + "@(#) $Id: skrlmt.c,v 1.69 2003/04/15 09:39:22 tschilli Exp $ (C) Marvell."; #endif /* !defined(lint) */ #define __SKRLMT_C #ifdef __cplusplus -#error C++ is not yet supported. extern "C" { #endif /* cplusplus */ @@ -578,6 +592,10 @@ SK_U32 i, j; SK_U64 Random; SK_EVPARA Para; + SK_MAC_ADDR VirtualMacAddress; + SK_MAC_ADDR PhysicalAMacAddress; + SK_BOOL VirtualMacAddressSet; + SK_BOOL PhysicalAMacAddressSet; SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT, ("RLMT Init level %d.\n", Level)) @@ -625,7 +643,7 @@ pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound; /* Initialize HW registers? */ - if (pAC->GIni.GIMacsFound < 2) { + if (pAC->GIni.GIMacsFound == 1) { Para.Para32[0] = SK_RLMT_MODE_CLS; Para.Para32[1] = 0; (void)SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE, Para); @@ -661,6 +679,38 @@ (void)SkAddrMcUpdate(pAC, IoC, i); } + + VirtualMacAddressSet = SK_FALSE; + /* Read virtual MAC address from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + + SK_IN8(IoC, B2_MAC_1 + j, &VirtualMacAddress.a[j]); + VirtualMacAddressSet |= VirtualMacAddress.a[j]; + } + + PhysicalAMacAddressSet = SK_FALSE; + /* Read physical MAC address for MAC A from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + + SK_IN8(IoC, B2_MAC_2 + j, &PhysicalAMacAddress.a[j]); + PhysicalAMacAddressSet |= PhysicalAMacAddress.a[j]; + } + + /* check if the two mac addresses contain reasonable values */ + if (!VirtualMacAddressSet || !PhysicalAMacAddressSet) { + + pAC->Rlmt.RlmtOff = SK_TRUE; + } + + /* if the two mac addresses are equal switch off the RLMT_PRE_LOOKAHEAD + and the RLMT_LOOKAHEAD macros */ + else if (SK_ADDR_EQUAL(PhysicalAMacAddress.a, VirtualMacAddress.a)) { + + pAC->Rlmt.RlmtOff = SK_TRUE; + } + else { + pAC->Rlmt.RlmtOff = SK_FALSE; + } break; default: /* error */ @@ -760,7 +810,7 @@ } #endif /* DEBUG */ - return; + return; } /* SkRlmtBuildCheckChain */ @@ -847,7 +897,7 @@ } } - return (pMb); + return (pMb); } /* SkRlmtBuildPacket */ @@ -929,7 +979,7 @@ pAC->Rlmt.Port[PortNumber].TxSpHelloReqCts++; } - return (pMb); + return (pMb); } /* SkRlmtBuildSpanningTreePacket */ @@ -996,8 +1046,8 @@ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_TX, ("SkRlmtSend: BPDU Packet on Port %u.\n", PortNumber)) } - } - return; + } + return; } /* SkRlmtSend */ @@ -1337,7 +1387,7 @@ pRPort->Root.Id[0], pRPort->Root.Id[1], pRPort->Root.Id[2], pRPort->Root.Id[3], pRPort->Root.Id[4], pRPort->Root.Id[5], - pRPort->Root.Id[6], pRPort->Root.Id[7])) + pRPort->Root.Id[6], pRPort->Root.Id[7])) } SkDrvFreeRlmtMbuf(pAC, IoC, pMb); @@ -1500,10 +1550,10 @@ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); } - NewTimeout = SK_RLMT_DEF_TO_VAL; + NewTimeout = SK_RLMT_DEF_TO_VAL; } - return (NewTimeout); + return (NewTimeout); } /* SkRlmtCheckPort */ @@ -1537,13 +1587,14 @@ /* Select port with the latest TimeStamp. */ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { -#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("TimeStamp Port %d: %08x %08x.\n", + ("TimeStamp Port %d (Down: %d, NoRx: %d): %08x %08x.\n", i, + pAC->Rlmt.Port[i].PortDown, pAC->Rlmt.Port[i].PortNoRx, *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_HI32), *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_LO32))) -#endif /* DEBUG */ + if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx) { if (!PortFound || pAC->Rlmt.Port[i].BcTimeStamp > BcTimeStamp) { BcTimeStamp = pAC->Rlmt.Port[i].BcTimeStamp; @@ -1568,10 +1619,9 @@ pAC->Rlmt.Port[i].BcTimeStamp + SK_RLMT_BC_DELTA > BcTimeStamp)) { PortFound = SK_FALSE; -#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("Port %d received a broadcast at a similar time.\n", i)) -#endif /* DEBUG */ break; } } @@ -1580,8 +1630,8 @@ #ifdef DEBUG if (PortFound) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d receiving the substantially " - "latest broadcast (%d).\n", + ("SK_RLMT_SELECT_BCRX found Port %d receiving the substantially " + "latest broadcast (%u).\n", *pSelect, BcTimeStamp - pAC->Rlmt.Port[1 - *pSelect].BcTimeStamp)) } @@ -1631,7 +1681,7 @@ } PortFound = SK_TRUE; SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d up and not check RX.\n", + ("SK_RLMT_SELECT_NOTSUSPECT found Port %d up and not check RX.\n", *pSelect)) break; } @@ -1681,7 +1731,7 @@ } PortFound = SK_TRUE; SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d up.\n", *pSelect)) + ("SK_RLMT_SELECT_UP found Port %d up.\n", *pSelect)) break; } } @@ -1742,7 +1792,7 @@ } SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d going up.\n", *pSelect)) + ("SK_RLMT_SELECT_GOINGUP found Port %d going up.\n", *pSelect)) return (SK_TRUE); } /* SkRlmtSelectGoingUp */ @@ -1788,7 +1838,7 @@ } PortFound = SK_TRUE; SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d down.\n", *pSelect)) + ("SK_RLMT_SELECT_DOWN found Port %d down.\n", *pSelect)) break; } } @@ -2426,6 +2476,7 @@ pRPort->PacketsPerTimeSlot = 0; /* pRPort->DataPacketsPerTimeSlot = 0; */ pRPort->BpduPacketsPerTimeSlot = 0; + pRPort->BcTimeStamp = 0; /* * RA;:;: To be checked: @@ -2696,7 +2747,7 @@ } /* Stop RLMT timers. */ - SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer); + SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer); SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer); /* Stop net. */ @@ -2896,7 +2947,7 @@ } } #endif /* xDEBUG */ - + SkRlmtCheckSeg(pAC, IoC, Para.Para32[0]); SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, @@ -3284,7 +3335,7 @@ Para.Para32[0] |= SK_RLMT_CHECK_LINK; - if (pAC->Rlmt.Net[Para.Para32[1]].NumPorts < 2 && + if ((pAC->Rlmt.Net[Para.Para32[1]].NumPorts == 1) && Para.Para32[0] != SK_RLMT_MODE_CLS) { pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = SK_RLMT_MODE_CLS; SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, @@ -3447,7 +3498,7 @@ break; } /* switch() */ - return (0); + return (0); } /* SkRlmtEvent */ #ifdef __cplusplus diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/sktimer.c linux.22-ac2/drivers/net/sk98lin/sktimer.c --- linux.vanilla/drivers/net/sk98lin/sktimer.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/sktimer.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,17 +1,17 @@ /****************************************************************************** * * Name: sktimer.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12 $ - * Date: $Date: 1999/11/22 13:38:51 $ + * Project: Gigabit Ethernet Adapters, Schedule-Modul + * Version: $Revision: 1.13 $ + * Date: $Date: 2003/05/13 18:01:01 $ * Purpose: High level timer functions. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +27,9 @@ * History: * * $Log: sktimer.c,v $ + * Revision 1.13 2003/05/13 18:01:01 mkarl + * Editorial changes. + * * Revision 1.12 1999/11/22 13:38:51 cgoos * Changed license header to GPL. * @@ -75,8 +78,10 @@ /* Event queue and dispatcher */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/sktimer.c,v 1.12 1999/11/22 13:38:51 cgoos Exp $" ; + "$Header: /usr56/projects/ge/schedule/sktimer.c,v 1.13 2003/05/13 18:01:01 mkarl Exp $" ; +#endif #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skvpd.c linux.22-ac2/drivers/net/sk98lin/skvpd.c --- linux.vanilla/drivers/net/sk98lin/skvpd.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skvpd.c 2003-08-13 14:10:39.000000000 +0100 @@ -2,15 +2,15 @@ * * Name: skvpd.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.32 $ - * Date: $Date: 2002/10/14 16:04:29 $ + * Version: $Revision: 1.37 $ + * Date: $Date: 2003/01/13 10:42:45 $ * Purpose: Shared software to read and write VPD data * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2003 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,21 @@ * History: * * $Log: skvpd.c,v $ + * Revision 1.37 2003/01/13 10:42:45 rschmidt + * Replaced check for PCI device Id from YUKON with GENESIS + * to set the VPD size in VpdInit() + * Editorial changes + * + * Revision 1.36 2002/11/14 15:16:56 gheinig + * Added const specifier to key and buf parameters for VpdPara, VpdRead + * and VpdWrite for Diag 7 GUI + * + * Revision 1.35 2002/10/21 14:31:59 gheinig + * Took out CVS web garbage at head of file + * + * Revision 1.34 2002/10/21 11:47:24 gheinig + * Reverted to version 1.32 due to unwanted commit + * * Revision 1.32 2002/10/14 16:04:29 rschmidt * Added saving of VPD ROM Size from PCI_OUR_REG_2 * Avoid reading of PCI_OUR_REG_2 in VpdTransferBlock() @@ -95,7 +110,7 @@ * chg: VPD_IN/OUT names conform to SK_IN/OUT * add: usage of VPD_IN/OUT8 macros * add: VpdRead/Write Stream functions to r/w a stream of data - * fix: VpdTransferBlock swapped illeagal + * fix: VpdTransferBlock swapped illegal * add: VpdMayWrite * * Revision 1.13 1998/10/22 10:02:37 gklug @@ -111,7 +126,7 @@ * Remove CvsId by SysKonnectFileId. * * Revision 1.9 1998/09/16 07:33:52 malthoff - * remove memcmp() by SK_MEMCMP and + * replace memcmp() by SK_MEMCMP and * memcpy() by SK_MEMCPY() to be * independent from the 'C' Standard Library. * @@ -119,7 +134,7 @@ * compiler fix: use SK_VPD_KEY instead of S_VPD. * * Revision 1.7 1998/08/19 08:14:01 gklug - * fix: remove struct keyword as much as possible from the c-code (see CCC) + * fix: remove struct keyword as much as possible from the C-code (see CCC) * * Revision 1.6 1998/08/18 13:03:58 gklug * SkOsGetTime now returns SK_U64 @@ -149,7 +164,7 @@ Please refer skvpd.txt for infomation how to include this module */ static const char SysKonnectFileId[] = - "@(#)$Id: skvpd.c,v 1.32 2002/10/14 16:04:29 rschmidt Exp $ (C) SK"; + "@(#)$Id: skvpd.c,v 1.37 2003/01/13 10:42:45 rschmidt Exp $ (C) SK"; #include "h/skdrv1st.h" #include "h/sktypes.h" @@ -162,7 +177,7 @@ #ifndef SK_KR_PROTO static SK_VPD_PARA *vpd_find_para( SK_AC *pAC, - char *key, + const char *key, SK_VPD_PARA *p); #else /* SK_KR_PROTO */ static SK_VPD_PARA *vpd_find_para(); @@ -175,7 +190,7 @@ * returns 0: success, transfer completes * error exit(9) with a error message */ -static int VpdWait( +static int VpdWait( SK_AC *pAC, /* Adapters context */ SK_IOC IoC, /* IO Context */ int event) /* event to wait for (VPD_READ / VPD_write) completion*/ @@ -187,7 +202,7 @@ ("VPD wait for %s\n", event?"Write":"Read")); start_time = SkOsGetTime(pAC); do { - if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC/16) { + if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC) { /* Bug fix AF: Thu Mar 28 2002 * Do not call: VPD_STOP(pAC, IoC); @@ -205,7 +220,9 @@ ("ERROR:VPD wait timeout\n")); return(1); } + VPD_IN16(pAC, IoC, PCI_VPD_ADR_REG, &state); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("state = %x, event %x\n",state,event)); } while((int)(state & PCI_VPD_FLAG) == event); @@ -319,7 +336,7 @@ * * Returns number of bytes read / written. */ -static int VpdWriteStream( +static int VpdWriteStream( SK_AC *pAC, /* Adapters context */ SK_IOC IoC, /* IO Context */ char *buf, /* data buffer */ @@ -391,7 +408,9 @@ } for (j = 0; j <= (int)(i%sizeof(SK_U32)); j++, pComp++) { + VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + j, &Data); + if (Data != *pComp) { /* Verify Error */ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, @@ -412,7 +431,7 @@ * * Returns number of bytes read / written. */ -static int VpdReadStream( +static int VpdReadStream( SK_AC *pAC, /* Adapters context */ SK_IOC IoC, /* IO Context */ char *buf, /* data buffer */ @@ -451,7 +470,7 @@ * * Returns number of bytes read / written. */ -static int VpdTransferBlock( +static int VpdTransferBlock( SK_AC *pAC, /* Adapters context */ SK_IOC IoC, /* IO Context */ char *buf, /* data buffer */ @@ -501,7 +520,7 @@ * * Returns number of bytes read. */ -int VpdReadBlock( +int VpdReadBlock( SK_AC *pAC, /* pAC pointer */ SK_IOC IoC, /* IO Context */ char *buf, /* buffer were the data should be stored */ @@ -516,7 +535,7 @@ * * Returns number of bytes writes. */ -int VpdWriteBlock( +int VpdWriteBlock( SK_AC *pAC, /* pAC pointer */ SK_IOC IoC, /* IO Context */ char *buf, /* buffer, holds the data to write */ @@ -536,7 +555,7 @@ * return 0: success * 1: fatal VPD error */ -static int VpdInit( +static int VpdInit( SK_AC *pAC, /* Adapters context */ SK_IOC IoC) /* IO Context */ { @@ -544,12 +563,12 @@ int i; unsigned char x; int vpd_size; - SK_U16 word; + SK_U16 dev_id; SK_U32 our_reg2; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("VpdInit .. ")); - VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &word); + VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &dev_id); VPD_IN32(pAC, IoC, PCI_OUR_REG_2, &our_reg2); @@ -560,9 +579,9 @@ * therefore we cannot always trust in GIChipId */ if (((pAC->vpd.v.vpd_status & VPD_VALID) == 0 && - word == VPD_PCI_ID_YUKON) || + dev_id != VPD_DEV_ID_GENESIS) || ((pAC->vpd.v.vpd_status & VPD_VALID) != 0 && - !(pAC->GIni.GIGenesis))) { + !pAC->GIni.GIGenesis)) { /* for Yukon the VPD size is always 256 */ vpd_size = VPD_SIZE_YUKON; @@ -603,7 +622,7 @@ pAC->vpd.v.vpd_free_ro = r->p_len - 1; /* test the checksum */ - for (i = 0, x = 0; (unsigned)i<=(unsigned)vpd_size/2 - r->p_len; i++) { + for (i = 0, x = 0; (unsigned)i <= (unsigned)vpd_size/2 - r->p_len; i++) { x += pAC->vpd.vpd_buf[i]; } @@ -648,9 +667,9 @@ * 0: parameter was not found or VPD encoding error */ static SK_VPD_PARA *vpd_find_para( -SK_AC *pAC, /* common data base */ -char *key, /* keyword to find (e.g. "MN") */ -SK_VPD_PARA *p) /* parameter description struct */ +SK_AC *pAC, /* common data base */ +const char *key, /* keyword to find (e.g. "MN") */ +SK_VPD_PARA *p) /* parameter description struct */ { char *v ; /* points to VPD buffer */ int max; /* Maximum Number of Iterations */ @@ -719,9 +738,9 @@ * returns nothing */ static void vpd_move_para( -char *start, /* start of memory block */ -char *end, /* end of memory block to move */ -int n) /* number of bytes the memory block has to be moved */ +char *start, /* start of memory block */ +char *end, /* end of memory block to move */ +int n) /* number of bytes the memory block has to be moved */ { char *p; int i; /* number of byte copied */ @@ -752,10 +771,10 @@ * returns nothing */ static void vpd_insert_key( -char *key, /* keyword to insert */ -char *buf, /* buffer with the keyword value */ -int len, /* length of the value string */ -char *ip) /* inseration point */ +const char *key, /* keyword to insert */ +const char *buf, /* buffer with the keyword value */ +int len, /* length of the value string */ +char *ip) /* inseration point */ { SK_VPD_KEY *p; @@ -774,8 +793,8 @@ * 1: encoding error */ static int vpd_mod_endtag( -SK_AC *pAC, /* common data base */ -char *etp) /* end pointer input position */ +SK_AC *pAC, /* common data base */ +char *etp) /* end pointer input position */ { SK_VPD_KEY *p; unsigned char x; @@ -839,11 +858,11 @@ */ int VpdSetupPara( SK_AC *pAC, /* common data base */ -char *key, /* keyword to insert */ -char *buf, /* buffer with the keyword value */ -int len, /* length of the keyword value */ -int type, /* VPD_RO_KEY or VPD_RW_KEY */ -int op) /* operation to do: ADD_KEY or OWR_KEY */ +const char *key, /* keyword to insert */ +const char *buf, /* buffer with the keyword value */ +int len, /* length of the keyword value */ +int type, /* VPD_RO_KEY or VPD_RW_KEY */ +int op) /* operation to do: ADD_KEY or OWR_KEY */ { SK_VPD_PARA vp; char *etp; /* end tag position */ @@ -930,12 +949,12 @@ * return: A pointer to the vpd_status structure. The structure contains * this fields. */ -SK_VPD_STATUS *VpdStat( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC) /* IO Context */ +SK_VPD_STATUS *VpdStat( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ { - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - (void)VpdInit(pAC,IoC); + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + (void)VpdInit(pAC, IoC); } return(&pAC->vpd.v); } @@ -963,10 +982,10 @@ * *len = 30 * *elements = 9 */ -int VpdKeys( -SK_AC *pAC, /* common data base */ -SK_IOC IoC, /* IO Context */ -char *buf, /* buffer where to copy the keywords */ +int VpdKeys( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer where to copy the keywords */ int *len, /* buffer length */ int *elements) /* number of keywords returned */ { @@ -975,8 +994,8 @@ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("list VPD keys .. ")); *elements = 0; - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - if (VpdInit(pAC,IoC) != 0 ) { + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { *len = 0; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD Init Error, terminated\n")); @@ -1049,18 +1068,18 @@ * 3: VPD transfer timeout * 6: fatal VPD error */ -int VpdRead( +int VpdRead( SK_AC *pAC, /* common data base */ SK_IOC IoC, /* IO Context */ -char *key, /* keyword to read (e.g. "MN") */ +const char *key, /* keyword to read (e.g. "MN") */ char *buf, /* buffer where to copy the keyword value */ -int *len) /* buffer length */ +int *len) /* buffer length */ { SK_VPD_PARA *p, vp; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("VPD read %s .. ", key)); - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - if (VpdInit(pAC,IoC) != 0 ) { + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { *len = 0; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD init error\n")); @@ -1095,8 +1114,8 @@ * SK_TRUE Yes it may be written * SK_FALSE No it may be written */ -SK_BOOL VpdMayWrite( -char *key) /* keyword to write (allowed values "Yx", "Vx") */ +SK_BOOL VpdMayWrite( +char *key) /* keyword to write (allowed values "Yx", "Vx") */ { if ((*key != 'Y' && *key != 'V') || key[1] < '0' || key[1] > 'Z' || @@ -1120,14 +1139,14 @@ * 5: keyword cannot be written * 6: fatal VPD error */ -int VpdWrite( +int VpdWrite( SK_AC *pAC, /* common data base */ SK_IOC IoC, /* IO Context */ -char *key, /* keyword to write (allowed values "Yx", "Vx") */ -char *buf) /* buffer where the keyword value can be read from */ +const char *key, /* keyword to write (allowed values "Yx", "Vx") */ +const char *buf) /* buffer where the keyword value can be read from */ { - int len; /* length of the keyword to write */ - int rtv; /* return code */ + int len; /* length of the keyword to write */ + int rtv; /* return code */ int rtv2; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, @@ -1142,8 +1161,8 @@ return(5); } - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - if (VpdInit(pAC,IoC) != 0 ) { + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD init error\n")); return(6); @@ -1157,9 +1176,9 @@ len = VPD_MAX_LEN; rtv = 2; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, - ("keyword to long, cut after %d bytes\n",VPD_MAX_LEN)); + ("keyword too long, cut after %d bytes\n",VPD_MAX_LEN)); } - if ((rtv2 = VpdSetupPara(pAC, key,buf, len, VPD_RW_KEY, OWR_KEY)) != 0) { + if ((rtv2 = VpdSetupPara(pAC, key, buf, len, VPD_RW_KEY, OWR_KEY)) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD write error\n")); return(rtv2); @@ -1180,10 +1199,10 @@ * 5: keyword cannot be deleted * 6: fatal VPD error */ -int VpdDelete( -SK_AC *pAC, /* common data base */ -SK_IOC IoC, /* IO Context */ -char *key) /* keyword to read (e.g. "MN") */ +int VpdDelete( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +char *key) /* keyword to read (e.g. "MN") */ { SK_VPD_PARA *p, vp; char *etp; @@ -1192,8 +1211,8 @@ vpd_size = pAC->vpd.vpd_size; SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("VPD delete key %s\n",key)); - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - if (VpdInit(pAC,IoC) != 0 ) { + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD init error\n")); return(6); @@ -1235,16 +1254,16 @@ * returns 0: success * 3: VPD transfer timeout */ -int VpdUpdate( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC) /* IO Context */ +int VpdUpdate( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ { int vpd_size; vpd_size = pAC->vpd.vpd_size; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("VPD update .. ")); - if (pAC->vpd.v.vpd_status & VPD_VALID) { + if ((pAC->vpd.v.vpd_status & VPD_VALID) != 0) { if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf + vpd_size/2, vpd_size/2, vpd_size/2, VPD_WRITE) != vpd_size/2) { @@ -1269,18 +1288,18 @@ * * returns nothing, errors will be ignored. */ -void VpdErrLog( -SK_AC *pAC, /* common data base */ -SK_IOC IoC, /* IO Context */ -char *msg) /* error log message */ +void VpdErrLog( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +char *msg) /* error log message */ { SK_VPD_PARA *v, vf; /* VF */ int len; SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, - ("VPD error log msg %s\n",msg)); - if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - if (VpdInit(pAC,IoC) != 0 ) { + ("VPD error log msg %s\n", msg)); + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD init error\n")); return; @@ -1298,7 +1317,7 @@ } else { SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n")); - (void)VpdSetupPara(pAC, VPD_VF, msg,len, VPD_RW_KEY, ADD_KEY); + (void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY); } (void)VpdUpdate(pAC, IoC); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/sk98lin/skxmac2.c linux.22-ac2/drivers/net/sk98lin/skxmac2.c --- linux.vanilla/drivers/net/sk98lin/skxmac2.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/sk98lin/skxmac2.c 2003-08-13 14:10:39.000000000 +0100 @@ -1,16 +1,17 @@ /****************************************************************************** * * Name: skxmac2.c - * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.87 $ - * Date: $Date: 2002/12/10 14:39:05 $ + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.99 $ + * Date: $Date: 2003/07/11 12:19:33 $ * Purpose: Contains functions to initialize the MACs and PHYs * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +27,75 @@ * History: * * $Log: skxmac2.c,v $ + * Revision 1.99 2003/07/11 12:19:33 rschmidt + * Reduced init values for Master & Slave downshift counters to + * minimum values. + * Editorial changes. + * + * Revision 1.98 2003/07/04 12:53:56 rschmidt + * Changed setting of downshift feature in SkGmInitPhyMarv(). + * Enabled downshift feature only for para 'Speed' set to 'Auto'. + * Changed init values for Master & Slave downshift counters. + * Editorial changes. + * + * Revision 1.97 2003/05/28 15:53:47 rschmidt + * Removed setting of Yukon PHY's 'force link good' in loopback mode. + * Replaced call pFnMacOverflow() with SkXmOverflowStatus() resp. + * SkGmOverflowStatus(). + * Editorial changes. + * + * Revision 1.96 2003/05/13 17:37:11 mkarl + * Removed calls to PNMI for SLIM driver. + * Added SK_FAR for PXE. + * Separated code pathes not used for SLIM driver. + * Some further separations for YUKON and GENESIS. + * Editorial changes. + * + * Revision 1.95 2003/05/06 13:09:53 rschmidt + * Changed init sequence for auto-negotiation disabled in SkGmInitMac(). + * Added defines around GENESIS resp. YUKON branches to reduce + * code size for PXE. + * Editorial changes. + * + * Revision 1.94 2003/04/10 14:36:40 rschmidt + * Fixed define for debug code in SkGmInitPhyMarv(). + * + * Revision 1.93 2003/04/08 16:58:16 rschmidt + * Changed initialisation of GMAC and GPHY for disabling + * Flow-Control with parameter 'none' (Bug Id #10769). + * Changed init for blinking active LED and normal duplex LED + * depending on value from GILedBlinkCtrl (LED Blink Control). + * Added control for Link100 LED. + * Changed handling for different PhyTypes for source code + * portability to PXE, UNDI. + * Editorial changes. + * + * Revision 1.92 2003/03/31 07:12:33 mkarl + * Restore PHY_MARV_AUNE_ADV after writing to GM_GP_CTRL in order to make + * auto-negotiation of limited flow-control possible. + * Corrected Copyright. + * Editorial changes. + * + * Revision 1.91 2003/02/05 15:09:34 rschmidt + * Removed setting of 'Collision Test'-bit in SkGmInitPhyMarv(). + * Disabled auto-update for speed, duplex and flow-control when + * auto-negotiation is not enabled (Bug Id #10766). + * Editorial changes. + * + * Revision 1.90 2003/01/29 13:35:19 rschmidt + * Increment Rx FIFO Overflow counter only in DEBUG-mode. + * Corrected define for blinking active LED. + * + * Revision 1.89 2003/01/28 16:37:45 rschmidt + * Changed init for blinking active LED + * + * Revision 1.88 2003/01/28 10:09:38 rschmidt + * Added debug outputs in SkGmInitMac(). + * Added customized init of LED registers in SkGmInitPhyMarv(), + * for blinking active LED (#ifdef ACT_LED_BLINK) and + * for normal duplex LED (#ifdef DUP_LED_NORMAL). + * Editorial changes. + * * Revision 1.87 2002/12/10 14:39:05 rschmidt * Improved initialization of GPHY in SkGmInitPhyMarv(). * Editorial changes. @@ -34,7 +104,7 @@ * Added setup of Ext. PHY Specific Ctrl Reg (downshift feature). * * Revision 1.85 2002/12/05 14:09:16 rschmidt - * Improved avoiding endless loop in SkGmPhyWrite(), SkGmPhyWrite(). + * Improved avoiding endless loop in SkGmPhyRead(), SkGmPhyWrite(). * Added additional advertising for 10Base-T when 100Base-T is selected. * Added case SK_PHY_MARV_FIBER for YUKON Fiber adapter. * Editorial changes. @@ -402,9 +472,13 @@ } BCOM_HACK; /* local variables ************************************************************/ + +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) static const char SysKonnectFileId[] = - "@(#)$Id: skxmac2.c,v 1.87 2002/12/10 14:39:05 rschmidt Exp $ (C) SK "; + "@(#) $Id: skxmac2.c,v 1.99 2003/07/11 12:19:33 rschmidt Exp $ (C) Marvell."; +#endif +#ifdef GENESIS BCOM_HACK BcomRegA1Hack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, @@ -416,14 +490,19 @@ { 0x15, 0x0A04 }, { 0x18, 0x0420 }, { 0, 0 } }; +#endif /* function prototypes ********************************************************/ +#ifdef GENESIS static void SkXmInitPhyXmac(SK_AC*, SK_IOC, int, SK_BOOL); static void SkXmInitPhyBcom(SK_AC*, SK_IOC, int, SK_BOOL); -static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL); static int SkXmAutoNegDoneXmac(SK_AC*, SK_IOC, int); static int SkXmAutoNegDoneBcom(SK_AC*, SK_IOC, int); +#endif /* GENESIS */ +#ifdef YUKON +static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL); static int SkGmAutoNegDoneMarv(SK_AC*, SK_IOC, int); +#endif /* YUKON */ #ifdef OTHER_PHY static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL); static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL); @@ -432,7 +511,7 @@ #endif /* OTHER_PHY */ - +#ifdef GENESIS /****************************************************************************** * * SkXmPhyRead() - Read from XMAC PHY register @@ -443,11 +522,11 @@ * nothing */ void SkXmPhyRead( -SK_AC *pAC, /* Adapter Context */ -SK_IOC IoC, /* I/O Context */ -int Port, /* Port Index (MAC_1 + n) */ -int PhyReg, /* Register Address (Offset) */ -SK_U16 *pVal) /* Pointer to Value */ +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 SK_FAR *pVal) /* Pointer to Value */ { SK_U16 Mmu; SK_GEPORT *pPrt; @@ -513,8 +592,10 @@ } while ((Mmu & XM_MMU_PHY_BUSY) != 0); } } /* SkXmPhyWrite */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmPhyRead() - Read from GPHY register @@ -525,11 +606,11 @@ * nothing */ void SkGmPhyRead( -SK_AC *pAC, /* Adapter Context */ -SK_IOC IoC, /* I/O Context */ -int Port, /* Port Index (MAC_1 + n) */ -int PhyReg, /* Register Address (Offset) */ -SK_U16 *pVal) /* Pointer to Value */ +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 SK_FAR *pVal) /* Pointer to Value */ { SK_U16 Ctrl; SK_GEPORT *pPrt; @@ -545,8 +626,8 @@ pPrt = &pAC->GIni.GP[Port]; /* set PHY-Register offset and 'Read' OpCode (= 1) */ - *pVal = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg) | - GM_SMI_CT_OP_RD; + *pVal = (SK_U16)(GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | + GM_SMI_CT_REG_AD(PhyReg) | GM_SMI_CT_OP_RD); GM_OUT16(IoC, Port, GM_SMI_CTRL, *pVal); @@ -578,6 +659,7 @@ VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", SimCyle, SimLowTime); #endif /* VCPU */ + } /* SkGmPhyRead */ @@ -646,9 +728,12 @@ VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", SimCyle, SimLowTime); #endif /* VCPU */ + } /* SkGmPhyWrite */ +#endif /* YUKON */ +#ifdef SK_DIAG /****************************************************************************** * * SkGePhyRead() - Read from PHY register @@ -705,6 +790,7 @@ w_func(pAC, IoC, Port, PhyReg, Val); } /* SkGePhyWrite */ +#endif /* SK_DIAG */ /****************************************************************************** @@ -724,9 +810,14 @@ int Port, /* Port Index (MAC_1 + n) */ SK_BOOL Enable) /* Enable / Disable */ { +#ifdef YUKON SK_U16 RcReg; +#endif +#ifdef GENESIS SK_U32 MdReg; +#endif +#ifdef GENESIS if (pAC->GIni.GIGenesis) { XM_IN32(IoC, Port, XM_MODE, &MdReg); @@ -740,7 +831,10 @@ /* setup Mode Register */ XM_OUT32(IoC, Port, XM_MODE, MdReg); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); @@ -754,6 +848,8 @@ /* setup Receive Control Register */ GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); } +#endif /* YUKON */ + } /* SkMacPromiscMode*/ @@ -774,9 +870,14 @@ int Port, /* Port Index (MAC_1 + n) */ SK_BOOL Enable) /* Enable / Disable */ { +#ifdef YUKON SK_U16 RcReg; +#endif +#ifdef GENESIS SK_U32 MdReg; +#endif +#ifdef GENESIS if (pAC->GIni.GIGenesis) { XM_IN32(IoC, Port, XM_MODE, &MdReg); @@ -790,7 +891,10 @@ /* setup Mode Register */ XM_OUT32(IoC, Port, XM_MODE, MdReg); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); @@ -804,6 +908,8 @@ /* setup Receive Control Register */ GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); } +#endif /* YUKON */ + } /* SkMacHashing*/ @@ -903,7 +1009,7 @@ * Description: * The features * - FCS (CRC) stripping, SK_STRIP_FCS_ON/OFF - * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF + * - don't set GMR_FS_LONG_ERR SK_BIG_PK_OK_ON/OFF * for frames > 1514 bytes * - enable Rx of own packets SK_SELF_RX_ON/OFF * @@ -986,6 +1092,7 @@ SkGmSetRxCmd(pAC, IoC, Port, Mode); } + } /* SkMacSetRxCmd */ @@ -1017,7 +1124,7 @@ Word |= XM_TX_NO_CRC; } /* setup Tx Command Register */ - XM_OUT16(pAC, Port, XM_TX_CMD, Word); + XM_OUT16(IoC, Port, XM_TX_CMD, Word); } else { @@ -1032,11 +1139,13 @@ /* setup Tx Control Register */ GM_OUT16(IoC, Port, GM_TX_CTRL, Word); } + } /* SkMacCrcGener*/ #endif /* SK_DIAG */ +#ifdef GENESIS /****************************************************************************** * * SkXmClrExactAddr() - Clear Exact Match Address Registers @@ -1070,6 +1179,7 @@ XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]); } } /* SkXmClrExactAddr */ +#endif /* GENESIS */ /****************************************************************************** @@ -1087,6 +1197,7 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { +#ifdef GENESIS SK_U32 MdReg; if (pAC->GIni.GIGenesis) { @@ -1095,10 +1206,15 @@ XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FTF); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* no way to flush the FIFO we have to issue a reset */ /* TBD */ } +#endif /* YUKON */ + } /* SkMacFlushTxFifo */ @@ -1117,6 +1233,7 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { +#ifdef GENESIS SK_U32 MdReg; if (pAC->GIni.GIGenesis) { @@ -1125,13 +1242,19 @@ XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FRF); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* no way to flush the FIFO we have to issue a reset */ /* TBD */ } +#endif /* YUKON */ + } /* SkMacFlushRxFifo */ +#ifdef GENESIS /****************************************************************************** * * SkXmSoftRst() - Do a XMAC software reset @@ -1280,8 +1403,10 @@ } } /* SkXmHardRst */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmSoftRst() - Do a GMAC software reset @@ -1310,13 +1435,13 @@ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); /* clear the Hash Register */ - GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, &EmptyHash); + GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, EmptyHash); /* Enable Unicast and Multicast filtering */ GM_IN16(IoC, Port, GM_RX_CTRL, &RxCtrl); GM_OUT16(IoC, Port, GM_RX_CTRL, - RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + (SK_U16)(RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)); } /* SkGmSoftRst */ @@ -1346,6 +1471,7 @@ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); } /* SkGmHardRst */ +#endif /* YUKON */ /****************************************************************************** @@ -1369,14 +1495,19 @@ /* disable receiver and transmitter */ SkMacRxTxDisable(pAC, IoC, Port); +#ifdef GENESIS if (pAC->GIni.GIGenesis) { SkXmSoftRst(pAC, IoC, Port); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { SkGmSoftRst(pAC, IoC, Port); } +#endif /* YUKON */ /* flush the MAC's Rx and Tx FIFOs */ SkMacFlushTxFifo(pAC, IoC, Port); @@ -1403,21 +1534,26 @@ int Port) /* Port Index (MAC_1 + n) */ { +#ifdef GENESIS if (pAC->GIni.GIGenesis) { SkXmHardRst(pAC, IoC, Port); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { SkGmHardRst(pAC, IoC, Port); } +#endif /* YUKON */ pAC->GIni.GP[Port].PState = SK_PRT_RESET; } /* SkMacHardRst */ - +#ifdef GENESIS /****************************************************************************** * * SkXmInitMac() - Initialize the XMAC II @@ -1542,10 +1678,10 @@ */ SkMacInitPhy(pAC, IoC, Port, SK_FALSE); -#if 0 +#ifdef TEST_ONLY /* temp. code: enable signal detect */ /* WARNING: do not override GMII setting above */ - XM_OUT16(pAC, Port, XM_HW_CFG, XM_HW_COM4SIG); + XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_COM4SIG); #endif } @@ -1605,7 +1741,7 @@ SWord |= XM_RX_BIG_PK_OK; } - if (pPrt->PLinkModeConf == SK_LMODE_HALF) { + if (pPrt->PLinkMode == SK_LMODE_HALF) { /* * If in manual half duplex mode the other side might be in * full duplex mode, so ignore if a carrier extension is not seen @@ -1651,7 +1787,10 @@ * has been completed successfully. */ } /* SkXmInitMac */ +#endif /* GENESIS */ + +#ifdef YUKON /****************************************************************************** * * SkGmInitMac() - Initialize the GMAC @@ -1698,11 +1837,13 @@ /* set GMAC Control reset */ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); +#ifdef XXX /* clear GMAC Control reset */ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_CLR); /* set GMAC Control reset */ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); +#endif /* XXX */ /* set HWCFG_MODE */ DWord = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | @@ -1716,10 +1857,76 @@ /* release GPHY Control reset */ SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_CLR); +#ifdef VCPU + VCpuWait(9000); +#endif /* VCPU */ + /* clear GMAC Control reset */ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); - /* Dummy read the Interrupt source register */ +#ifdef VCPU + VCpuWait(2000); +#endif /* VCPU */ + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + /* Auto-negotiation disabled */ + + /* get General Purpose Control */ + GM_IN16(IoC, Port, GM_GP_CTRL, &SWord); + + /* disable auto-update for speed, duplex and flow-control */ + SWord |= GM_GPCR_AU_ALL_DIS; + + /* setup General Purpose Control Register */ + GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); + + SWord = GM_GPCR_AU_ALL_DIS; + } + else { + SWord = 0; + } + + /* speed settings */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + case SK_LSPEED_1000MBPS: + SWord |= GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100; + break; + case SK_LSPEED_100MBPS: + SWord |= GM_GPCR_SPEED_100; + break; + case SK_LSPEED_10MBPS: + break; + } + + /* duplex settings */ + if (pPrt->PLinkMode != SK_LMODE_HALF) { + /* set full duplex */ + SWord |= GM_GPCR_DUP_FULL; + } + + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + /* set Pause Off */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_OFF); + /* disable Tx & Rx flow-control */ + SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case SK_FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + SWord |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case SK_FLOW_MODE_SYMMETRIC: + case SK_FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + /* setup General Purpose Control Register */ + GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); + + /* dummy read the Interrupt Source Register */ SK_IN16(IoC, GMAC_IRQ_SRC, &SWord); #ifndef VCPU @@ -1732,50 +1939,6 @@ (void)SkGmResetCounter(pAC, IoC, Port); - SWord = 0; - - /* speed settings */ - switch (pPrt->PLinkSpeed) { - case SK_LSPEED_AUTO: - /* auto update for speed is already set */ - break; - case SK_LSPEED_1000MBPS: - SWord |= GM_GPCR_SPEED_1000; - break; - case SK_LSPEED_100MBPS: - SWord |= GM_GPCR_SPEED_100; - break; - case SK_LSPEED_10MBPS: - break; - } - - /* duplex settings */ - if (pPrt->PLinkModeConf == SK_LMODE_FULL || - pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE) { - - SWord |= GM_GPCR_DUP_FULL; - } - - /* flow control settings */ - switch (pPrt->PFlowCtrlMode) { - case SK_FLOW_MODE_NONE: - /* disable auto-neg of flow control */ - SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS; - break; - case SK_FLOW_MODE_LOC_SEND: - SWord |= GM_GPCR_FC_RX_DIS; - break; - case SK_FLOW_MODE_SYMMETRIC: - /* TBD */ - case SK_FLOW_MODE_SYM_OR_REM: - /* do nothing means to enable autoneg for flowcontrol and */ - /* enable rx and tx of pause frames */ - break; - } - - /* setup General Purpose Control Register */ - GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); - /* setup Transmit Control Register */ GM_OUT16(IoC, Port, GM_TX_CTRL, GM_TXCR_COL_THR); @@ -1791,7 +1954,7 @@ GM_IN16(IoC, Port, GM_TX_PARAM, &SWord); #endif /* VCPU */ - SWord = JAM_LEN_VAL(3) | JAM_IPG_VAL(11) | IPG_JAM_DATA(26); + SWord = (SK_U16)(JAM_LEN_VAL(3) | JAM_IPG_VAL(11) | IPG_JAM_DATA(26)); GM_OUT16(IoC, Port, GM_TX_PARAM, SWord); @@ -1827,7 +1990,7 @@ #ifdef WA_DEV_16 /* WA for deviation #16 */ - if (pAC->GIni.GIChipRev == 0) { + if (pAC->GIni.GIChipId == CHIP_ID_YUKON && pAC->GIni.GIChipRev == 0) { /* swap the address bytes */ SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8); @@ -1845,12 +2008,12 @@ SK_IN16(IoC, (B2_MAC_1 + Port * 8 + i * 2), &SWord); GM_OUT16(IoC, Port, (GM_SRC_ADDR_2L + i * 4), SWord); + + /* reset Multicast filtering Hash registers 1-3 */ + GM_OUT16(IoC, Port, GM_MC_ADDR_H1 + 4*i, 0); } - /* reset all Multicast filtering Hash registers */ - GM_OUT16(IoC, Port, GM_MC_ADDR_H1, 0); - GM_OUT16(IoC, Port, GM_MC_ADDR_H2, 0); - GM_OUT16(IoC, Port, GM_MC_ADDR_H3, 0); + /* reset Multicast filtering Hash register 4 */ GM_OUT16(IoC, Port, GM_MC_ADDR_H4, 0); /* enable interrupt mask for counter overflows */ @@ -1858,13 +2021,21 @@ GM_OUT16(IoC, Port, GM_RX_IRQ_MSK, 0); GM_OUT16(IoC, Port, GM_TR_IRQ_MSK, 0); -#ifdef VCPU /* read General Purpose Status */ GM_IN16(IoC, Port, GM_GP_STAT, &SWord); -#endif /* VCPU */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("MAC Stat Reg=0x%04X\n", SWord)); + +#ifdef SK_DIAG + c_print("MAC Stat Reg=0x%04X\n", SWord); +#endif /* SK_DIAG */ + } /* SkGmInitMac */ +#endif /* YUKON */ +#ifdef GENESIS /****************************************************************************** * * SkXmInitDupMd() - Initialize the XMACs Duplex Mode @@ -2209,7 +2380,7 @@ /* Write AutoNeg Advertisement Register */ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); + ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); if (DoLoop) { /* Set the Phy Loopback bit, too */ @@ -2234,8 +2405,10 @@ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("PHY Control Reg=0x%04X\n", Ctrl1)); } /* SkXmInitPhyBcom */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmInitPhyMarv() - Initialize the Marvell Phy registers @@ -2255,43 +2428,66 @@ { SK_GEPORT *pPrt; SK_U16 PhyCtrl; - SK_U16 PhyStat; - SK_U16 PhyStat1; - SK_U16 PhySpec; SK_U16 C1000BaseT; SK_U16 AutoNegAdv; SK_U16 ExtPhyCtrl; + SK_U16 LedCtrl; + SK_BOOL AutoNeg; +#if defined(SK_DIAG) || defined(DEBUG) + SK_U16 PhyStat; + SK_U16 PhyStat1; + SK_U16 PhySpecStat; +#endif /* SK_DIAG || DEBUG */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + AutoNeg = SK_FALSE; + } + else { + AutoNeg = SK_TRUE; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyMarv: Port %d, auto-negotiation %s\n", + Port, AutoNeg ? "ON" : "OFF")); #ifdef VCPU VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n", Port, DoLoop); #else /* VCPU */ - if (!DoLoop) { + if (DoLoop) { + /* Set 'MAC Power up'-bit, set Manual MDI configuration */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, + PHY_M_PC_MAC_POW_UP); + } + else if (AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_AUTO) { /* Read Ext. PHY Specific Control */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); - ExtPhyCtrl |= PHY_M_EC_M_DSC(1) | PHY_M_EC_S_DSC(1) | - PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + ExtPhyCtrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ) | + PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Ext.PHYCtrl=0x%04X\n", ExtPhyCtrl)); - - /* Read PHY Control */ - SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); - - /* Assert software reset */ - SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl | PHY_CT_RESET); + ("Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl)); } -#endif /* VCPU */ - pPrt = &pAC->GIni.GP[Port]; + /* Read PHY Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); + + PhyCtrl |= PHY_CT_RESET; + /* Assert software reset */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl); + +#endif /* VCPU */ - PhyCtrl = PHY_CT_COL_TST; + PhyCtrl = 0 /* PHY_CT_COL_TST */; C1000BaseT = 0; AutoNegAdv = PHY_SEL_TYPE; @@ -2306,9 +2502,7 @@ } /* Auto-negotiation ? */ - if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("InitPhyMarv: no auto-negotiation Port %d\n", Port)); + if (!AutoNeg) { if (pPrt->PLinkMode == SK_LMODE_FULL) { /* Set Full Duplex Mode */ @@ -2345,9 +2539,6 @@ */ } else { - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("InitPhyMarv: with auto-negotiation Port %d\n", Port)); - PhyCtrl |= PHY_CT_ANE; if (pAC->GIni.GICopperType) { @@ -2459,12 +2650,7 @@ */ /* Program PHY register 30 as 16'h0708 for simulation speed up */ - SkGmPhyWrite(pAC, IoC, Port, 30, 0x0708); - -#if 0 - /* Program PHY register 20 as 16'h2070 */ - SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, 0x2070); -#endif /* 0 */ + SkGmPhyWrite(pAC, IoC, Port, 30, 0x0700 /* 0x0708 */); VCpuWait(2000); @@ -2485,38 +2671,63 @@ /* Set the PHY Loopback bit */ PhyCtrl |= PHY_CT_LOOP; +#ifdef XXX /* Program PHY register 16 as 16'h0400 to force link good */ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_FL_GOOD); +#endif /* XXX */ -#if 0 +#ifndef VCPU if (pPrt->PLinkSpeed != SK_LSPEED_AUTO) { /* Write Ext. PHY Specific Control */ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, (SK_U16)((pPrt->PLinkSpeed + 2) << 4)); } +#endif /* VCPU */ } +#ifdef TEST_ONLY else if (pPrt->PLinkSpeed == SK_LSPEED_10MBPS) { /* Write PHY Specific Control */ - SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_EN_DET_MSK); - } -#endif /* 0 */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, + PHY_M_PC_EN_DET_MSK); } +#endif /* Write to the PHY Control register */ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl); #ifdef VCPU VCpuWait(2000); -#endif /* VCPU */ +#else + + LedCtrl = PHY_M_LED_PULS_DUR(PULS_170MS) | PHY_M_LED_BLINK_RT(BLINK_84MS); + + if ((pAC->GIni.GILedBlinkCtrl & SK_ACT_LED_BLINK) != 0) { + LedCtrl |= PHY_M_LEDC_RX_CTRL | PHY_M_LEDC_TX_CTRL; + } + + if ((pAC->GIni.GILedBlinkCtrl & SK_DUP_LED_NORMAL) != 0) { + LedCtrl |= PHY_M_LEDC_DP_CTRL; + } + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_CTRL, LedCtrl); + + if ((pAC->GIni.GILedBlinkCtrl & SK_LED_LINK100_ON) != 0) { + /* only in forced 100Mbps mode */ + if (!AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_100MBPS) { + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_100(MO_LED_ON)); + } + } #ifdef SK_DIAG - c_print("PHY Ctrl Val=0x%04X\n", PhyCtrl); - c_print("1000 B-T Val=0x%04X\n", C1000BaseT); - c_print("Auto-Neg Val=0x%04X\n", AutoNegAdv); - c_print("Ext Ctrl Val=0x%04X\n", ExtPhyCtrl); + c_print("Set PHY Ctrl=0x%04X\n", PhyCtrl); + c_print("Set 1000 B-T=0x%04X\n", C1000BaseT); + c_print("Set Auto-Neg=0x%04X\n", AutoNegAdv); + c_print("Set Ext Ctrl=0x%04X\n", ExtPhyCtrl); #endif /* SK_DIAG */ -#ifndef VCPU +#if defined(SK_DIAG) || defined(DEBUG) /* Read PHY Control */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, @@ -2535,7 +2746,7 @@ /* Read Ext. PHY Specific Control */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Ext PHY Ctrl=0x%04X\n", ExtPhyCtrl)); + ("Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl)); /* Read PHY Status */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); @@ -2546,10 +2757,10 @@ ("PHY Stat Reg.=0x%04X\n", PhyStat1)); /* Read PHY Specific Status */ - SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpec); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("PHY Spec Stat=0x%04X\n", PhySpec)); -#endif /* VCPU */ + ("PHY Spec Stat=0x%04X\n", PhySpecStat)); +#endif /* SK_DIAG || DEBUG */ #ifdef SK_DIAG c_print("PHY Ctrl Reg=0x%04X\n", PhyCtrl); @@ -2558,10 +2769,13 @@ c_print("Ext Ctrl Reg=0x%04X\n", ExtPhyCtrl); c_print("PHY Stat Reg=0x%04X\n", PhyStat); c_print("PHY Stat Reg=0x%04X\n", PhyStat1); - c_print("PHY Spec Reg=0x%04X\n", PhySpec); + c_print("PHY Spec Reg=0x%04X\n", PhySpecStat); #endif /* SK_DIAG */ +#endif /* VCPU */ + } /* SkGmInitPhyMarv */ +#endif /* YUKON */ #ifdef OTHER_PHY @@ -2666,10 +2880,6 @@ } - /* Initialize LED register here ? */ - /* No. Please do it in SkDgXmitLed() (if required) and swap - init order of LEDs and XMAC. (MAl) */ - /* Write 1000Base-T Control Register */ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_1000T_CTRL, Ctrl2); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, @@ -2678,8 +2888,7 @@ /* Write AutoNeg Advertisement Register */ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); - + ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); if (DoLoop) { /* Set the Phy Loopback bit, too */ @@ -2736,126 +2945,76 @@ pPrt = &pAC->GIni.GP[Port]; - switch (pPrt->PhyType) { - case SK_PHY_XMAC: - SkXmInitPhyXmac(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_BCOM: - SkXmInitPhyBcom(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_MARV_COPPER: - case SK_PHY_MARV_FIBER: - SkGmInitPhyMarv(pAC, IoC, Port, DoLoop); - break; +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + case SK_PHY_XMAC: + SkXmInitPhyXmac(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_BCOM: + SkXmInitPhyBcom(pAC, IoC, Port, DoLoop); + break; #ifdef OTHER_PHY - case SK_PHY_LONE: - SkXmInitPhyLone(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_NAT: - SkXmInitPhyNat(pAC, IoC, Port, DoLoop); - break; + case SK_PHY_LONE: + SkXmInitPhyLone(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_NAT: + SkXmInitPhyNat(pAC, IoC, Port, DoLoop); + break; #endif /* OTHER_PHY */ + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + SkGmInitPhyMarv(pAC, IoC, Port, DoLoop); } +#endif /* YUKON */ + } /* SkMacInitPhy */ -#ifndef SK_DIAG +#ifdef GENESIS /****************************************************************************** * - * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg + * SkXmAutoNegDoneXmac() - Auto-negotiation handling * - * This function analyses the Interrupt status word. If any of the - * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable - * is set true. + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened */ -void SkXmAutoNegLipaXmac( +static int SkXmAutoNegDoneXmac( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus) /* Interrupt Status word to analyse */ +int Port) /* Port Index (MAC_1 + n) */ { SK_GEPORT *pPrt; + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 LPAb; /* Link Partner Ability */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneXmac, Port %d\n", Port)); pPrt = &pAC->GIni.GP[Port]; - if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && - (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) { + /* Get PHY parameters */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); + if ((LPAb & PHY_X_AN_RFB) != 0) { + /* At least one of the remote fault bit is set */ + /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04x\n", - Port, IStatus)); - pPrt->PLipaAutoNeg = SK_LIPA_AUTO; - } -} /* SkXmAutoNegLipaXmac */ - - -/****************************************************************************** - * - * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg - * - * This function analyses the PHY status word. - * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable - * is set true. - */ -void SkMacAutoNegLipaPhy( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_U16 PhyStat) /* PHY Status word to analyse */ -{ - SK_GEPORT *pPrt; - - pPrt = &pAC->GIni.GP[Port]; - - if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && - (PhyStat & PHY_ST_AN_OVER) != 0) { - - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04x\n", - Port, PhyStat)); - pPrt->PLipaAutoNeg = SK_LIPA_AUTO; - } -} /* SkMacAutoNegLipaPhy */ -#endif /* SK_DIAG */ - - -/****************************************************************************** - * - * SkXmAutoNegDoneXmac() - Auto-negotiation handling - * - * Description: - * This function handles the auto-negotiation if the Done bit is set. - * - * Returns: - * SK_AND_OK o.k. - * SK_AND_DUP_CAP Duplex capability error happened - * SK_AND_OTHER Other error happened - */ -static int SkXmAutoNegDoneXmac( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port) /* Port Index (MAC_1 + n) */ -{ - SK_GEPORT *pPrt; - SK_U16 ResAb; /* Resolved Ability */ - SK_U16 LPAb; /* Link Partner Ability */ - - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegDoneXmac, Port %d\n",Port)); - - pPrt = &pAC->GIni.GP[Port]; - - /* Get PHY parameters */ - SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb); - SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); - - if ((LPAb & PHY_X_AN_RFB) != 0) { - /* At least one of the remote fault bit is set */ - /* Error */ - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegFail: Remote fault bit set Port %d\n", Port)); - pPrt->PAutoNegFail = SK_TRUE; - return(SK_AND_OTHER); + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); } /* Check Duplex mismatch */ @@ -2923,7 +3082,7 @@ SK_U16 LPAb; /* Link Partner Ability */ SK_U16 AuxStat; /* Auxiliary Status */ -#if 0 +#ifdef TEST_ONLY 01-Sep-2000 RA;:;: SK_U16 ResAb; /* Resolved Ability */ #endif /* 0 */ @@ -2934,7 +3093,7 @@ /* Get PHY parameters */ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LPAb); -#if 0 +#ifdef TEST_ONLY 01-Sep-2000 RA;:;: SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); #endif /* 0 */ @@ -2964,7 +3123,7 @@ return(SK_AND_DUP_CAP); } -#if 0 +#ifdef TEST_ONLY 01-Sep-2000 RA;:;: /* Check Master/Slave resolution */ if ((ResAb & PHY_B_1000S_MSF) != 0) { @@ -2979,7 +3138,7 @@ SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; #endif /* 0 */ - /* Check PAUSE mismatch */ + /* Check PAUSE mismatch ??? */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */ if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PAUSE_MSK) { /* Symmetric PAUSE */ @@ -3001,8 +3160,10 @@ return(SK_AND_OK); } /* SkXmAutoNegDoneBcom */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmAutoNegDoneMarv() - Auto-negotiation handling @@ -3032,7 +3193,7 @@ /* Get PHY parameters */ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_LP, &LPAb); - if ((LPAb & PHY_B_AN_RF) != 0) { + if ((LPAb & PHY_M_AN_RF) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNegFail: Remote fault bit set Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; @@ -3059,7 +3220,7 @@ /* Check Speed & Duplex resolved */ if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegFail: Speed & Duplex not resolved Port %d\n", Port)); + ("AutoNegFail: Speed & Duplex not resolved, Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; return(SK_AND_DUP_CAP); @@ -3072,7 +3233,7 @@ pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOHALF; } - /* Check PAUSE mismatch */ + /* Check PAUSE mismatch ??? */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */ if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_PAUSE_MSK) { /* Symmetric PAUSE */ @@ -3105,6 +3266,7 @@ return(SK_AND_OK); } /* SkGmAutoNegDoneMarv */ +#endif /* YUKON */ #ifdef OTHER_PHY @@ -3131,7 +3293,7 @@ SK_U16 QuickStat; /* Auxiliary Status */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNegDoneLone, Port %d\n",Port)); + ("AutoNegDoneLone, Port %d\n", Port)); pPrt = &pAC->GIni.GP[Port]; /* Get PHY parameters */ @@ -3255,30 +3417,41 @@ SK_GEPORT *pPrt; int Rtv; + Rtv = SK_AND_OK; + pPrt = &pAC->GIni.GP[Port]; - switch (pPrt->PhyType) { - case SK_PHY_XMAC: - Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port); - break; - case SK_PHY_BCOM: - Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port); - break; - case SK_PHY_MARV_COPPER: - case SK_PHY_MARV_FIBER: - Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port); - break; +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port); + break; + case SK_PHY_BCOM: + Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port); + break; #ifdef OTHER_PHY - case SK_PHY_LONE: - Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port); - break; - case SK_PHY_NAT: - Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port); - break; + case SK_PHY_LONE: + Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port); + break; + case SK_PHY_NAT: + Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port); + break; #endif /* OTHER_PHY */ - default: - return(SK_AND_OTHER); + default: + return(SK_AND_OTHER); + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port); } +#endif /* YUKON */ if (Rtv != SK_AND_OK) { return(Rtv); @@ -3293,6 +3466,7 @@ } /* SkMacAutoNegDone */ +#ifdef GENESIS /****************************************************************************** * * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC @@ -3346,8 +3520,10 @@ XM_IN16(IoC, Port, XM_MMU_CMD, &Word); } /* SkXmSetRxTxEn */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC @@ -3386,14 +3562,17 @@ break; } - GM_OUT16(IoC, Port, GM_GP_CTRL, Ctrl | GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA | + GM_GPCR_TX_ENA)); /* dummy read to ensure writing */ GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); } /* SkGmSetRxTxEn */ +#endif /* YUKON */ +#ifndef SK_SLIM /****************************************************************************** * * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters @@ -3408,16 +3587,22 @@ int Port, /* Port Index (MAC_1 + n) */ int Para) { +#ifdef GENESIS if (pAC->GIni.GIGenesis) { SkXmSetRxTxEn(pAC, IoC, Port, Para); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { SkGmSetRxTxEn(pAC, IoC, Port, Para); } +#endif /* YUKON */ } /* SkMacSetRxTxEn */ +#endif /* !SK_SLIM */ /****************************************************************************** @@ -3438,7 +3623,9 @@ SK_GEPORT *pPrt; SK_U16 Reg; /* 16-bit register value */ SK_U16 IntMask; /* MAC interrupt mask */ +#ifdef GENESIS SK_U16 SWord; +#endif pPrt = &pAC->GIni.GP[Port]; @@ -3455,6 +3642,7 @@ return(0); } +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* set Duplex Mode and Pause Mode */ SkXmInitDupMd(pAC, IoC, Port); @@ -3502,7 +3690,8 @@ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord); SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, (SK_U16)(SWord & ~PHY_B_AC_DIS_PM)); - SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, + (SK_U16)PHY_B_DEF_MSK); break; #ifdef OTHER_PHY case SK_PHY_LONE: @@ -3519,7 +3708,10 @@ /* enable Rx/Tx */ XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* * Initialize the Interrupt Mask Register. Default IRQs are... * - Rx Counter Event Overflow @@ -3545,13 +3737,16 @@ } /* enable Rx/Tx */ - GM_OUT16(IoC, Port, GM_GP_CTRL, Reg | GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Reg | GM_GPCR_RX_ENA | + GM_GPCR_TX_ENA)); #ifndef VCPU /* Enable all PHY interrupts */ - SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, + (SK_U16)PHY_M_DEF_MSK); #endif /* VCPU */ } +#endif /* YUKON */ return(0); @@ -3573,6 +3768,7 @@ { SK_U16 Word; +#ifdef GENESIS if (pAC->GIni.GIGenesis) { XM_IN16(IoC, Port, XM_MMU_CMD, &Word); @@ -3582,15 +3778,21 @@ /* dummy read to ensure writing */ XM_IN16(IoC, Port, XM_MMU_CMD, &Word); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { GM_IN16(IoC, Port, GM_GP_CTRL, &Word); - GM_OUT16(IoC, Port, GM_GP_CTRL, Word & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); + GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Word & ~(GM_GPCR_RX_ENA | + GM_GPCR_TX_ENA))); /* dummy read to ensure writing */ GM_IN16(IoC, Port, GM_GP_CTRL, &Word); } +#endif /* YUKON */ + } /* SkMacRxTxDisable */ @@ -3608,10 +3810,13 @@ int Port) /* Port Index (MAC_1 + n) */ { SK_GEPORT *pPrt; +#ifdef GENESIS SK_U16 Word; +#endif pPrt = &pAC->GIni.GP[Port]; +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* disable all XMAC IRQs */ @@ -3642,7 +3847,10 @@ #endif /* OTHER_PHY */ } } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* disable all GMAC IRQs */ SK_OUT8(IoC, GMAC_IRQ_MSK, 0); @@ -3651,6 +3859,8 @@ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); #endif /* VCPU */ } +#endif /* YUKON */ + } /* SkMacIrqDisable */ @@ -3683,7 +3893,8 @@ /* setup Mode Register */ XM_OUT32(IoC, Port, XM_MODE, MdReg); -} /* SkXmSendCont*/ +} /* SkXmSendCont */ + /****************************************************************************** * @@ -3724,12 +3935,74 @@ TimeCtrl = GMT_ST_STOP | GMT_ST_CLR_IRQ; } /* Start/Stop Time Stamp Timer */ - SK_OUT8(pAC, GMAC_TI_ST_CTRL, TimeCtrl); + SK_OUT8(IoC, GMAC_TI_ST_CTRL, TimeCtrl); } + } /* SkMacTimeStamp*/ -#else /* SK_DIAG */ +#else /* !SK_DIAG */ + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg + * + * This function analyses the Interrupt status word. If any of the + * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable + * is set true. + */ +void SkXmAutoNegLipaXmac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus) /* Interrupt Status word to analyse */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && + (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) { + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04X\n", + Port, IStatus)); + pPrt->PLipaAutoNeg = SK_LIPA_AUTO; + } +} /* SkXmAutoNegLipaXmac */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg + * + * This function analyses the PHY status word. + * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable + * is set true. + */ +void SkMacAutoNegLipaPhy( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U16 PhyStat) /* PHY Status word to analyse */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && + (PhyStat & PHY_ST_AN_OVER) != 0) { + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04X\n", + Port, PhyStat)); + pPrt->PLipaAutoNeg = SK_LIPA_AUTO; + } +} /* SkMacAutoNegLipaPhy */ + + +#ifdef GENESIS /****************************************************************************** * * SkXmIrq() - Interrupt Service Routine @@ -3758,6 +4031,9 @@ SK_EVPARA Para; SK_U16 IStatus; /* Interrupt status read from the XMAC */ SK_U16 IStatus2; +#ifdef SK_SLIM + SK_U64 OverflowStatus; +#endif pPrt = &pAC->GIni.GP[Port]; @@ -3775,7 +4051,7 @@ } SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("XmacIrq Port %d Isr 0x%04x\n", Port, IStatus)); + ("XmacIrq Port %d Isr 0x%04X\n", Port, IStatus)); if (!pPrt->PHWLinkUp) { /* Spurious XMAC interrupt */ @@ -3789,7 +4065,7 @@ XM_IN16(IoC, Port, XM_ISRC, &IStatus2); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("SkXmIrq: Link async. Double check Port %d 0x%04x 0x%04x\n", + ("SkXmIrq: Link async. Double check Port %d 0x%04X 0x%04X\n", Port, IStatus, IStatus2)); IStatus &= ~XM_IS_INP_ASS; IStatus |= IStatus2; @@ -3838,14 +4114,20 @@ /* Combined Tx & Rx Counter Overflow SIRQ Event */ if ((IStatus & (XM_IS_RXC_OV | XM_IS_TXC_OV)) != 0) { +#ifdef SK_SLIM + SkXmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus); +#else Para.Para32[0] = (SK_U32)Port; Para.Para32[1] = (SK_U32)IStatus; SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); +#endif /* SK_SLIM */ } if ((IStatus & XM_IS_RXF_OV) != 0) { /* normal situation -> no effect */ +#ifdef DEBUG pPrt->PRxOverCnt++; +#endif /* DEBUG */ } if ((IStatus & XM_IS_TXF_UR) != 0) { @@ -3861,8 +4143,10 @@ /* not served here */ } } /* SkXmIrq */ +#endif /* GENESIS */ +#ifdef YUKON /****************************************************************************** * * SkGmIrq() - Interrupt Service Routine @@ -3880,32 +4164,43 @@ int Port) /* Port Index (MAC_1 + n) */ { SK_GEPORT *pPrt; - SK_EVPARA Para; SK_U8 IStatus; /* Interrupt status */ +#ifdef SK_SLIM + SK_U64 OverflowStatus; +#else + SK_EVPARA Para; +#endif pPrt = &pAC->GIni.GP[Port]; SK_IN8(IoC, GMAC_IRQ_SRC, &IStatus); +#ifdef XXX /* LinkPartner Auto-negable? */ SkMacAutoNegLipaPhy(pAC, IoC, Port, IStatus); +#endif /* XXX */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("GmacIrq Port %d Isr 0x%04x\n", Port, IStatus)); + ("GmacIrq Port %d Isr 0x%04X\n", Port, IStatus)); /* Combined Tx & Rx Counter Overflow SIRQ Event */ if (IStatus & (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV)) { /* these IRQs will be cleared by reading GMACs register */ +#ifdef SK_SLIM + SkGmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus); +#else Para.Para32[0] = (SK_U32)Port; Para.Para32[1] = (SK_U32)IStatus; SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); +#endif } if (IStatus & GM_IS_RX_FF_OR) { /* clear GMAC Rx FIFO Overrun IRQ */ SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_CLI_RX_FO); - +#ifdef DEBUG pPrt->PRxOverCnt++; +#endif /* DEBUG */ } if (IStatus & GM_IS_TX_FF_UR) { @@ -3923,6 +4218,8 @@ /* not served here */ } } /* SkGmIrq */ +#endif /* YUKON */ + /****************************************************************************** * @@ -3938,19 +4235,25 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { - +#ifdef GENESIS if (pAC->GIni.GIGenesis) { /* IRQ from XMAC */ SkXmIrq(pAC, IoC, Port); } - else { +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { /* IRQ from GMAC */ SkGmIrq(pAC, IoC, Port); } +#endif /* YUKON */ + } /* SkMacIrq */ #endif /* !SK_DIAG */ +#ifdef GENESIS /****************************************************************************** * * SkXmUpdateStats() - Force the XMAC to output the current statistic @@ -4000,24 +4303,6 @@ return(0); } /* SkXmUpdateStats */ -/****************************************************************************** - * - * SkGmUpdateStats() - Force the GMAC to output the current statistic - * - * Description: - * Empty function for GMAC. Statistic data is accessible in direct way. - * - * Returns: - * 0: success - * 1: something went wrong - */ -int SkGmUpdateStats( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -unsigned int Port) /* Port Index (MAC_1 + n) */ -{ - return(0); -} /****************************************************************************** * @@ -4033,11 +4318,11 @@ * 1: something went wrong */ int SkXmMacStatistic( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 StatAddr, /* MIB counter base address */ -SK_U32 *pVal) /* ptr to return statistic value */ +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ { if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) { @@ -4051,63 +4336,137 @@ return(0); } /* SkXmMacStatistic */ + /****************************************************************************** * - * SkGmMacStatistic() - Get GMAC counter value + * SkXmResetCounter() - Clear MAC statistic counter * * Description: - * Gets the 32bit counter value. Except for the octet counters - * the lower 32bit are counted in hardware and the upper 32bit - * must be counted in software by monitoring counter overflow interrupts. + * Force the XMAC to clear its statistic counter. * * Returns: * 0: success * 1: something went wrong */ -int SkGmMacStatistic( +int SkXmResetCounter( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 StatAddr, /* MIB counter base address */ -SK_U32 *pVal) /* ptr to return statistic value */ +unsigned int Port) /* Port Index (MAC_1 + n) */ { + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); - if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) { - - SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); - - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr)); - return(1); + return(0); +} /* SkXmResetCounter */ + + +/****************************************************************************** + * + * SkXmOverflowStatus() - Gets the status of counter overflow interrupt + * + * Description: + * Checks the source causing an counter overflow interrupt. On success the + * resulting counter overflow status is written to , whereas the + * upper dword stores the XMAC ReceiveCounterEvent register and the lower + * dword the XMAC TransmitCounterEvent register. + * + * Note: + * For XMAC the interrupt source is a self-clearing register, so the source + * must be checked only once. SIRQ module does another check to be sure + * that no interrupt get lost during process time. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmOverflowStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ +{ + SK_U64 Status; /* Overflow status */ + SK_U32 RegVal; + + Status = 0; + + if ((IStatus & XM_IS_RXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal << 32; + } + + if ((IStatus & XM_IS_TXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal; } - - GM_IN32(IoC, Port, StatAddr, pVal); + + *pStatus = Status; return(0); -} /* SkGmMacStatistic */ +} /* SkXmOverflowStatus */ +#endif /* GENESIS */ + +#ifdef YUKON /****************************************************************************** * - * SkXmResetCounter() - Clear MAC statistic counter + * SkGmUpdateStats() - Force the GMAC to output the current statistic * * Description: - * Force the XMAC to clear its statistic counter. + * Empty function for GMAC. Statistic data is accessible in direct way. * * Returns: * 0: success * 1: something went wrong */ -int SkXmResetCounter( +int SkGmUpdateStats( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ unsigned int Port) /* Port Index (MAC_1 + n) */ { - XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); - /* Clear two times according to Errata #3 */ - XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + return(0); +} + + +/****************************************************************************** + * + * SkGmMacStatistic() - Get GMAC counter value + * + * Description: + * Gets the 32bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * must be counted in software by monitoring counter overflow interrupts. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmMacStatistic( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ +{ + + if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr)); + return(1); + } + + GM_IN32(IoC, Port, StatAddr, pVal); return(0); -} /* SkXmResetCounter */ +} /* SkGmMacStatistic */ + /****************************************************************************** * @@ -4131,7 +4490,6 @@ GM_IN16(IoC, Port, GM_PHY_ADDR, &Reg); -#ifndef VCPU /* set MIB Clear Counter Mode */ GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg | GM_PAR_MIB_CLR); @@ -4143,59 +4501,10 @@ /* clear MIB Clear Counter Mode */ GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg); -#endif /* !VCPU */ return(0); } /* SkGmResetCounter */ -/****************************************************************************** - * - * SkXmOverflowStatus() - Gets the status of counter overflow interrupt - * - * Description: - * Checks the source causing an counter overflow interrupt. On success the - * resulting counter overflow status is written to , whereas the - * upper dword stores the XMAC ReceiveCounterEvent register and the lower - * dword the XMAC TransmitCounterEvent register. - * - * Note: - * For XMAC the interrupt source is a self-clearing register, so the source - * must be checked only once. SIRQ module does another check to be sure - * that no interrupt get lost during process time. - * - * Returns: - * 0: success - * 1: something went wrong - */ -int SkXmOverflowStatus( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus, /* Interupt Status from MAC */ -SK_U64 *pStatus) /* ptr for return overflow status value */ -{ - SK_U64 Status; /* Overflow status */ - SK_U32 RegVal; - - Status = 0; - - if ((IStatus & XM_IS_RXC_OV) != 0) { - - XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal); - Status |= (SK_U64)RegVal << 32; - } - - if ((IStatus & XM_IS_TXC_OV) != 0) { - - XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal); - Status |= (SK_U64)RegVal; - } - - *pStatus = Status; - - return(0); -} /* SkXmOverflowStatus */ - /****************************************************************************** * @@ -4217,11 +4526,11 @@ * 1: something went wrong */ int SkGmOverflowStatus( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -unsigned int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus, /* Interupt Status from MAC */ -SK_U64 *pStatus) /* ptr for return overflow status value */ +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ { SK_U64 Status; /* Overflow status */ SK_U16 RegVal; @@ -4252,6 +4561,8 @@ return(0); } /* SkGmOverflowStatus */ + +#ifndef SK_SLIM /****************************************************************************** * * SkGmCableDiagStatus() - Starts / Gets status of cable diagnostic test @@ -4305,7 +4616,7 @@ /* start Cable Diagnostic Test */ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, - RegVal | PHY_M_CABD_ENA_TEST); + (SK_U16)(RegVal | PHY_M_CABD_ENA_TEST)); return(0); } @@ -4336,5 +4647,7 @@ return(0); } /* SkGmCableDiagStatus */ +#endif /* !SK_SLIM */ +#endif /* YUKON */ /* End of file */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/tulip/tulip_core.c linux.22-ac2/drivers/net/tulip/tulip_core.c --- linux.vanilla/drivers/net/tulip/tulip_core.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/net/tulip/tulip_core.c 2003-08-16 20:27:02.000000000 +0100 @@ -232,6 +232,7 @@ { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, /* ALi 1563 integrated ethernet */ + { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/tun.c linux.22-ac2/drivers/net/tun.c --- linux.vanilla/drivers/net/tun.c 2002-08-03 16:08:26.000000000 +0100 +++ linux.22-ac2/drivers/net/tun.c 2003-06-29 16:09:56.000000000 +0100 @@ -188,7 +188,7 @@ size_t len = count; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) < 0) + if ((len -= sizeof(pi)) > len) return -EINVAL; memcpy_fromiovec((void *)&pi, iv, sizeof(pi)); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/8253x/8253xsyn.c linux.22-ac2/drivers/net/wan/8253x/8253xsyn.c --- linux.vanilla/drivers/net/wan/8253x/8253xsyn.c 2002-08-03 16:08:26.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/8253x/8253xsyn.c 2003-06-29 16:09:51.000000000 +0100 @@ -1030,7 +1030,7 @@ } #if 0 - if ((tty->count == 1) && (port->count != 0)) + if ((atomic_read(&tty->count) == 1) && (port->count != 0)) { /* * Uh, oh. tty->count is 1, which means that the tty diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/c101.c linux.22-ac2/drivers/net/wan/c101.c --- linux.vanilla/drivers/net/wan/c101.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/c101.c 2003-06-29 16:09:51.000000000 +0100 @@ -1,12 +1,11 @@ /* * Moxa C101 synchronous serial card driver for Linux * - * Copyright (C) 2000-2002 Krzysztof Halasa + * Copyright (C) 2000-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * For information see http://hq.pm.waw.pl/hdlc/ * @@ -31,7 +30,7 @@ #include "hd64570.h" -static const char* version = "Moxa C101 driver version: 1.10"; +static const char* version = "Moxa C101 driver version: 1.12"; static const char* devname = "C101"; #define C101_PAGE 0x1D00 @@ -78,7 +77,12 @@ #define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) #define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) #define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) -#define sca_outw(value, reg, card) writew(value, (card)->win0base + C101_SCA + (reg)) + +/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ +#define sca_outw(value, reg, card) do { \ + writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ + writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\ +} while(0) #define port_to_card(port) (port) #define log_node(port) (0) @@ -352,7 +356,7 @@ c101_run(irq, ram); if (*hw == '\x0') - return 0; + return first_card ? 0 : -ENOSYS; }while(*hw++ == ':'); printk(KERN_ERR "c101: invalid hardware parameters\n"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/Config.in linux.22-ac2/drivers/net/wan/Config.in --- linux.vanilla/drivers/net/wan/Config.in 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/Config.in 2003-08-16 21:14:17.000000000 +0100 @@ -38,12 +38,12 @@ fi dep_tristate ' Support for Frame Relay on MultiGate boards' CONFIG_COMX_PROTO_FR $CONFIG_COMX fi -# -# The Etinc driver has not been tested as non-modular yet. -# - - dep_tristate ' Etinc PCISYNC serial board support (EXPERIMENTAL)' CONFIG_DSCC4 m + dep_tristate ' DSCC4 support' CONFIG_DSCC4 m + if [ "$CONFIG_DSCC4" = "m" ]; then + dep_bool ' Etinc PCISYNC features' CONFIG_DSCC4_PCISYNC $CONFIG_DSCC4 + dep_bool ' GPIO and PCI #RST pins wired together' CONFIG_DSCC4_PCI_RST $CONFIG_DSCC4 + fi # # Lan Media's board. Currently 1000, 1200, 5200, 5245 @@ -68,6 +68,7 @@ tristate ' Generic HDLC layer' CONFIG_HDLC if [ "$CONFIG_HDLC" != "n" ]; then bool ' Raw HDLC support' CONFIG_HDLC_RAW + bool ' Raw HDLC Ethernet device support' CONFIG_HDLC_RAW_ETH bool ' Cisco HDLC support' CONFIG_HDLC_CISCO bool ' Frame Relay support' CONFIG_HDLC_FR bool ' Synchronous Point-to-Point Protocol (PPP) support' CONFIG_HDLC_PPP @@ -76,6 +77,18 @@ else comment ' X.25/LAPB support is disabled' fi + if [ "$CONFIG_PCI" != "n" ]; then + dep_tristate ' Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)' CONFIG_PC300 $CONFIG_HDLC + if [ "$CONFIG_PC300" != "n" ]; then + if [ "$CONFIG_PPP" != "n" -a "$CONFIG_PPP_MULTLINK" != "n" -a "$CONFIG_PPP_SYNCTTY" != "n" -a "$CONFIG_HDLC_PPP" = "y" ]; + then + bool ' Cyclades-PC300 MLPPP support' CONFIG_PC300_MLPPP + else + comment ' Cyclades-PC300 MLPPP support is disabled. You have to enable PPP, PPP_MULTILINK' + comment ' PPP_SYNCTTY and HDLC_PPP to use this package.' + fi + fi + fi dep_tristate ' SDL RISCom/N2 support' CONFIG_N2 $CONFIG_HDLC dep_tristate ' Moxa C101 support' CONFIG_C101 $CONFIG_HDLC dep_tristate ' FarSync T-Series support' CONFIG_FARSYNC $CONFIG_HDLC diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/dscc4.c linux.22-ac2/drivers/net/wan/dscc4.c --- linux.vanilla/drivers/net/wan/dscc4.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/dscc4.c 2003-06-29 16:09:51.000000000 +0100 @@ -112,6 +112,11 @@ static int debug; static int quartz; +#ifdef CONFIG_DSCC4_PCI_RST +static DECLARE_MUTEX(dscc4_sem); +static u32 dscc4_pci_config_store[16]; +#endif + #define DRV_NAME "dscc4" #undef DSCC4_POLLING @@ -165,7 +170,7 @@ #define SOURCE_ID(flags) (((flags) >> 28) & 0x03) #define TO_SIZE(state) (((state) >> 16) & 0x1fff) #define TO_STATE(len) cpu_to_le32(((len) & TxSizeMax) << 16) -#define RX_MAX(len) ((((len) >> 5) + 1) << 5) +#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */ #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) struct dscc4_pci_priv { @@ -256,6 +261,10 @@ #define IMR 0x54 #define ISR 0x58 +#define GPDIR 0x0400 +#define GPDATA 0x0404 +#define GPIM 0x0408 + /* Bit masks */ #define EncodingMask 0x00700000 #define CrcMask 0x00000003 @@ -283,6 +292,7 @@ #define Hold 0x40000000 #define SccBusy 0x10000000 #define PowerUp 0x80000000 +#define Vis 0x00001000 #define FrameOk (FrameVfr | FrameCrc) #define FrameVfr 0x80 #define FrameRdo 0x40 @@ -319,10 +329,19 @@ #define Arf 0x00000002 #define ArAck 0x00000001 -/* Misc */ +/* State flags */ +#define Ready 0x00000000 #define NeedIDR 0x00000001 #define NeedIDT 0x00000002 #define RdoSet 0x00000004 +#define FakeReset 0x00000008 + +/* Don't mask RDO. Ever. */ +#ifdef DSCC4_POLLING +#define EventsMask 0xfffeef7f +#else +#define EventsMask 0xfffa8f7a +#endif /* Functions prototypes */ static inline void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); @@ -487,9 +506,9 @@ skb = dev_alloc_skb(len); dpriv->rx_skbuff[dirty] = skb; if (skb) { - skb->dev = dev; - skb->protocol = htons(ETH_P_HDLC); - skb->mac.raw = skb->data; + skb->dev = dev; + skb->protocol = hdlc_type_trans(skb, dev); + skb->mac.raw = skb->data; rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, len, PCI_DMA_FROMDEVICE); } else { @@ -566,15 +585,18 @@ return (i >= 0 ) ? i : -EAGAIN; } -/* Requires protection against interrupt */ static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) { + unsigned long flags; + + spin_lock_irqsave(&dpriv->pci_priv->lock, flags); /* Cf errata DS5 p.6 */ writel(0x00000000, dev->base_addr + CH0LRDA + dpriv->dev_id*4); - scc_writel(~PowerUp & scc_readl(dpriv, CCR0), dpriv, dev, CCR0); + scc_patchl(PowerUp, 0, dpriv, dev, CCR0); readl(dev->base_addr + CH0LRDA + dpriv->dev_id*4); writel(MTFi|Rdr, dev->base_addr + dpriv->dev_id*0x0c + CH0CFG); writel(Action, dev->base_addr + GCMDR); + spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); } static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) @@ -582,7 +604,7 @@ u16 i = 0; /* Cf errata DS5 p.7 */ - scc_writel(~PowerUp & scc_readl(dpriv, CCR0), dpriv, dev, CCR0); + scc_patchl(PowerUp, 0, dpriv, dev, CCR0); scc_writel(0x00050000, dpriv, dev, CCR2); /* * Must be longer than the time required to fill the fifo. @@ -805,7 +827,8 @@ static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, struct net_device *dev) { - scc_writel(0x80001000, dpriv, dev, CCR0); + /* No interrupts, SCC core disabled. Let's relax */ + scc_writel(0x00000000, dpriv, dev, CCR0); scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); @@ -814,21 +837,12 @@ * Shared flags transmission disabled - cf errata DS5 p.11 * Carrier detect disabled - cf errata p.14 */ - scc_writel(0x021c8000, dpriv, dev, CCR1); + scc_writel(0x02408000, dpriv, dev, CCR1); /* crc not forwarded - Cf errata DS5 p.11 */ scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); // crc forwarded //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2); - - /* Don't mask RDO. Ever. */ -#ifdef DSCC4_POLLING - scc_writel(0xfffeef7f, dpriv, dev, IMR); /* Interrupt mask */ -#else - //scc_writel(0xfffaef7f, dpriv, dev, IMR); /* Interrupt mask */ - //scc_writel(0xfffaef7e, dpriv, dev, IMR); /* Interrupt mask */ - scc_writel(0xfffa8f7a, dpriv, dev, IMR); /* Interrupt mask */ -#endif } static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr) @@ -883,6 +897,10 @@ dscc4_init_registers(dpriv, d); dpriv->parity = PARITY_CRC16_PR0_CCITT; dpriv->encoding = ENCODING_NRZ; + if (dscc4_init_ring(d)) { + unregister_hdlc_device(hdlc); + goto err_unregister; + } } if (dscc4_set_quartz(root, quartz) < 0) goto err_unregister; @@ -892,8 +910,10 @@ return 0; err_unregister: - while (--i >= 0) + while (--i >= 0) { + dscc4_release_ring(root + i); unregister_hdlc_device(&root[i].hdlc); + } kfree(ppriv); err_free_dev: kfree(root); @@ -932,6 +952,46 @@ return 0; } +#ifdef CONFIG_DSCC4_PCI_RST +/* + * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together + * so as to provide a safe way to reset the asic while not the whole machine + * rebooting. + * + * This code doesn't need to be efficient. Keep It Simple + */ +static void dscc4_pci_reset(struct pci_dev *pdev, u32 ioaddr) +{ + int i; + + down(&dscc4_sem); + for (i = 0; i < 16; i++) + pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); + + /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ + writel(0x001c0000, ioaddr + GMODE); + /* Configure GPIO port as output */ + writel(0x0000ffff, ioaddr + GPDIR); + /* Disable interruption */ + writel(0x0000ffff, ioaddr + GPIM); + + writel(0x0000ffff, ioaddr + GPDATA); + writel(0x00000000, ioaddr + GPDATA); + + /* Flush posted writes */ + readl(ioaddr + GSTAR); + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(10); + + for (i = 0; i < 16; i++) + pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); + up(&dscc4_sem); +} +#else +#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) +#endif /* CONFIG_DSCC4_PCI_RST */ + static int dscc4_open(struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); @@ -949,12 +1009,29 @@ ppriv = dpriv->pci_priv; - if ((ret = dscc4_init_ring(dev))) - goto err_out; + /* + * Due to various bugs, there is no way to reliably reset a + * specific port (manufacturer's dependant special PCI #RST wiring + * apart: it affects all ports). Thus the device goes in the best + * silent mode possible at dscc4_close() time and simply claims to + * be up if it's opened again. It still isn't possible to change + * the HDLC configuration without rebooting but at least the ports + * can be up/down ifconfig'ed without killing the host. + */ + if (dpriv->flags & FakeReset) { + dpriv->flags &= ~FakeReset; + scc_patchl(0, PowerUp, dpriv, dev, CCR0); + scc_patchl(0, 0x00050000, dpriv, dev, CCR2); + scc_writel(EventsMask | 0x00000004, dpriv, dev, IMR); + printk(KERN_INFO "%s: up again.\n", dev->name); + goto done; + } /* IDT+IDR during XPR */ dpriv->flags = NeedIDR | NeedIDT; + scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); + /* * The following is a bit paranoid... * @@ -965,15 +1042,17 @@ if (scc_readl_star(dpriv, dev) & SccBusy) { printk(KERN_ERR "%s busy. Try later\n", dev->name); ret = -EAGAIN; - goto err_free_ring; + goto err_out; } else printk(KERN_INFO "%s: available. Good\n", dev->name); + scc_writel(EventsMask | 0x00000004, dpriv, dev, IMR); + /* Posted write is flushed in the wait_ack loop */ scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) - goto err_free_ring; + goto err_disable_scc_events; /* * I would expect XPR near CE completion (before ? after ?). @@ -984,12 +1063,13 @@ */ if ((ret = dscc4_xpr_ack(dpriv)) < 0) { printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR"); - goto err_free_ring; + goto err_disable_scc_events; } if (debug > 2) dscc4_tx_print(dev, dpriv, "Open"); +done: netif_start_queue(dev); init_timer(&dpriv->timer); @@ -1001,7 +1081,10 @@ return 0; +err_disable_scc_events: + scc_writel(0xffffffff, dpriv, dev, IMR); err_free_ring: + scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); dscc4_release_ring(dpriv); err_out: hdlc_close(hdlc); @@ -1058,16 +1141,15 @@ { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); hdlc_device *hdlc = dev_to_hdlc(dev); - unsigned long flags; del_timer_sync(&dpriv->timer); netif_stop_queue(dev); - spin_lock_irqsave(&dpriv->pci_priv->lock, flags); - dscc4_rx_reset(dpriv, dev); - spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); + scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); + scc_patchl(0x00050000, 0, dpriv, dev, CCR2); + scc_writel(0xffffffff, dpriv, dev, IMR); - dscc4_tx_reset(dpriv, dev); + dpriv->flags |= FakeReset; hdlc_close(hdlc); dscc4_release_ring(dpriv); @@ -1076,6 +1158,10 @@ return 0; } +/* + * Et*nc's PCISYNC only allows clock generation on first two ports. + * DSCC4 can do more. It depends on the manufacturer of the card. + */ static inline int dscc4_check_clock_ability(int port) { int ret = 0; @@ -1087,6 +1173,43 @@ return ret; } +/* + * DS1 p.137: "There are a total of 13 different clocking modes..." + * ^^ + * Design choices: + * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a). + * Clock mode 3b _should_ work but the testing seems to make this point + * dubious (0x00000033 should be considered for CCR0 for testing). + * - if line rate is specified, clocks are assumed to be generated. A quartz + * must be available (on pin XTAL1). Modes 6b/7b are used. The choice between + * these depends on the required frequency scaling. + * - no high speed mode (40Mb/s). May be trivial to do but I don't have an + * appropriate external clocking device for testing. + * - no time-slot/clock mode 5: shameless lazyness. + * + * The clock signals wiring can be (is ?) manufacturer dependant. + * + * BIG FAT WARNING: if the device isn't provided enough clocking signal, it + * won't pass the init sequence. For example, straight back-to-back DTE without + * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is + * called. + * + * Clock mode related bits of CCR0: + * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only) + * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b + * | | +-------- High Speed: say 0 + * | | | +-+-+-- Clock Mode: 0..7 + * | | | | | | + * -+-+-+-+-+-+-+-+ + * x|x|5|4|3|2|1|0| lower bits + * + * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b) + * +-+-+-+------------------ M (0..15) + * | | | | +-+-+-+-+-+-- N (0..63) + * 0 0 0 0 | | | | 0 0 | | | | | | + * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits + */ static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); @@ -1123,13 +1246,13 @@ } brr = (m << 8) | n; divider = n << m; - if (!(*state & 0x00000001)) /* Clock mode 6b */ + if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */ divider <<= 4; *bps = xtal / divider; } else { /* * External clock - DTE - * "state" already reflects Clock mode 0a. + * "state" already reflects Clock mode 0a (0xzzzzzz00 for CCR0). * Nothing more to be done */ brr = 0; @@ -1168,6 +1291,11 @@ if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (dpriv->flags & FakeReset) { + printk(KERN_INFO "%s: please reset the device" + "before this command\n", dev->name); + return -EPERM; + } if (copy_from_user(&dpriv->settings, line, size)) return -EFAULT; ret = dscc4_set_iface(dpriv, dev); @@ -1226,7 +1354,7 @@ dev->name, dpriv->settings.clock_rate, bps); } } else { /* DTE */ - state = 0x80001000; + state |= PowerUp | Vis; printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); } scc_writel(state, dpriv, dev, CCR0); @@ -1332,6 +1460,8 @@ state = readl(ioaddr + GSTAR); if (!state) goto out; + if (debug > 3) + printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state); writel(state, ioaddr + GSTAR); if (state & Arf) { @@ -1377,6 +1507,9 @@ cur = dpriv->iqtx_current%IRQ_RING_SIZE; state = dpriv->iqtx[cur]; if (!state) { + if (debug > 4) + printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, + state); if ((debug > 1) && (loop > 1)) printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); if (loop && netif_queue_stopped(dev)) @@ -1463,9 +1596,19 @@ } if (state & Xpr) { u32 scc_addr, ring; + int i; + + /* + * - the busy condition happens (sometimes); + * - it doesn't seem to make the handler unreliable. + */ + for (i = 1; i; i <<= 1) { + if (!(scc_readl_star(dpriv, dev) & SccBusy)) + break; + } + if (!i) + printk(KERN_INFO "%s busy in irq\n", dev->name); - if (scc_readl_star(dpriv, dev) & SccBusy) - printk(KERN_ERR "%s busy. Fatal\n", dev->name); scc_addr = dev->base_addr + 0x0c*dpriv->dev_id; /* Keep this order: IDT before IDR */ if (dpriv->flags & NeedIDT) { @@ -1542,6 +1685,9 @@ if (!(state & SccEvt)){ struct RxFD *rx_fd; + if (debug > 4) + printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name, + state); state &= 0x00ffffff; if (state & Err) { /* Hold or reset */ printk(KERN_DEBUG "%s: Rx ERR\n", dev->name); @@ -1757,7 +1903,7 @@ (++i%TX_RING_SIZE)*sizeof(*tx_fd)); } while (i < TX_RING_SIZE); - if (dscc4_init_dummy_skb(dpriv) < 0) + if (dscc4_init_dummy_skb(dpriv) == NULL) goto err_free_dma_tx; memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); @@ -1796,12 +1942,16 @@ root = ppriv->root; ioaddr = hdlc_to_dev(&root->hdlc)->base_addr; + + dscc4_pci_reset(pdev, ioaddr); + free_irq(pdev->irq, root); pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, ppriv->iqcfg_dma); for (i = 0; i < dev_per_card; i++) { struct dscc4_dev_priv *dpriv = root + i; + dscc4_release_ring(dpriv); pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), dpriv->iqrx, dpriv->iqrx_dma); pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/farsync.c linux.22-ac2/drivers/net/wan/farsync.c --- linux.vanilla/drivers/net/wan/farsync.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/farsync.c 2003-06-29 16:09:51.000000000 +0100 @@ -764,7 +764,7 @@ /* Push upstream */ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev ( &port->hdlc ); - skb->protocol = htons ( ETH_P_HDLC ); + skb->protocol = hdlc_type_trans(skb, skb->dev); netif_rx ( skb ); port_to_dev ( port )->last_rx = jiffies; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hd64572.h linux.22-ac2/drivers/net/wan/hd64572.h --- linux.vanilla/drivers/net/wan/hd64572.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hd64572.h 2003-06-29 16:09:51.000000000 +0100 @@ -0,0 +1,433 @@ +/* + * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for + * CPU modes 0 & 2. + * + * Author: Ivan Passos + * + * Copyright: (c) 2000-2001 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _HD64572_H +#define _HD64572_H + +/* Illegal Access Register */ +#define ILAR 0x00 + +/* Wait Controller Registers */ +#define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */ +#define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */ +#define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */ +#define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */ +#define WCRL 0x24 /* Wait Control Register L */ +#define WCRM 0x25 /* Wait Control Register M */ +#define WCRH 0x26 /* Wait Control Register H */ + +/* Interrupt Registers */ +#define IVR 0x60 /* Interrupt Vector Register */ +#define IMVR 0x64 /* Interrupt Modified Vector Register */ +#define ITCR 0x68 /* Interrupt Control Register */ +#define ISR0 0x6c /* Interrupt Status Register 0 */ +#define ISR1 0x70 /* Interrupt Status Register 1 */ +#define IER0 0x74 /* Interrupt Enable Register 0 */ +#define IER1 0x78 /* Interrupt Enable Register 1 */ + +/* Register Access Macros (chan is 0 or 1 in _any_ case) */ +#define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */ +#define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */ +#define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */ +#define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */ +#define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */ +#define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */ +#define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */ +#define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */ +#define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */ + +/* MSCI Channel Registers */ +#define MD0 0x138 /* Mode reg 0 */ +#define MD1 0x139 /* Mode reg 1 */ +#define MD2 0x13a /* Mode reg 2 */ +#define MD3 0x13b /* Mode reg 3 */ +#define CTL 0x130 /* Control reg */ +#define RXS 0x13c /* RX clock source */ +#define TXS 0x13d /* TX clock source */ +#define EXS 0x13e /* External clock input selection */ +#define TMCT 0x144 /* Time constant (Tx) */ +#define TMCR 0x145 /* Time constant (Rx) */ +#define CMD 0x128 /* Command reg */ +#define ST0 0x118 /* Status reg 0 */ +#define ST1 0x119 /* Status reg 1 */ +#define ST2 0x11a /* Status reg 2 */ +#define ST3 0x11b /* Status reg 3 */ +#define ST4 0x11c /* Status reg 4 */ +#define FST 0x11d /* frame Status reg */ +#define IE0 0x120 /* Interrupt enable reg 0 */ +#define IE1 0x121 /* Interrupt enable reg 1 */ +#define IE2 0x122 /* Interrupt enable reg 2 */ +#define IE4 0x124 /* Interrupt enable reg 4 */ +#define FIE 0x125 /* Frame Interrupt enable reg */ +#define SA0 0x140 /* Syn Address reg 0 */ +#define SA1 0x141 /* Syn Address reg 1 */ +#define IDL 0x142 /* Idle register */ +#define TRBL 0x100 /* TX/RX buffer reg L */ +#define TRBK 0x101 /* TX/RX buffer reg K */ +#define TRBJ 0x102 /* TX/RX buffer reg J */ +#define TRBH 0x103 /* TX/RX buffer reg H */ +#define TRC0 0x148 /* TX Ready control reg 0 */ +#define TRC1 0x149 /* TX Ready control reg 1 */ +#define RRC 0x14a /* RX Ready control reg */ +#define CST0 0x108 /* Current Status Register 0 */ +#define CST1 0x109 /* Current Status Register 1 */ +#define CST2 0x10a /* Current Status Register 2 */ +#define CST3 0x10b /* Current Status Register 3 */ +#define GPO 0x131 /* General Purpose Output Pin Ctl Reg */ +#define TFS 0x14b /* Tx Start Threshold Ctl Reg */ +#define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */ +#define TBN 0x110 /* Tx Buffer Number Reg */ +#define RBN 0x111 /* Rx Buffer Number Reg */ +#define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */ +#define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */ +#define TCR 0x152 /* Tx DMA Critical Request Reg */ +#define RNR 0x154 /* Rx DMA Request Ctl Reg */ +#define RCR 0x156 /* Rx DMA Critical Request Reg */ + +/* Timer Registers */ +#define TCNTL 0x200 /* Timer Upcounter L */ +#define TCNTH 0x201 /* Timer Upcounter H */ +#define TCONRL 0x204 /* Timer Constant Register L */ +#define TCONRH 0x205 /* Timer Constant Register H */ +#define TCSR 0x206 /* Timer Control/Status Register */ +#define TEPR 0x207 /* Timer Expand Prescale Register */ + +/* DMA registers */ +#define PCR 0x40 /* DMA priority control reg */ +#define DRR 0x44 /* DMA reset reg */ +#define DMER 0x07 /* DMA Master Enable reg */ +#define BTCR 0x08 /* Burst Tx Ctl Reg */ +#define BOLR 0x0c /* Back-off Length Reg */ +#define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */ +#define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */ +#define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */ +#define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */ +#define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */ +#define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */ +#define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */ +#define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */ +#define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */ +#define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */ + +/* DMA Channel Registers */ +#define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */ +#define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */ +#define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */ +#define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */ +#define SARL 0x80 /* Source Addr Register L (single-block, TX only) */ +#define SARH 0x81 /* Source Addr Register H (single-block, TX only) */ +#define SARB 0x82 /* Source Addr Register B (single-block, TX only) */ +#define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */ +#define BARL 0x80 /* Buffer Addr Register L (chained-block) */ +#define BARH 0x81 /* Buffer Addr Register H (chained-block) */ +#define BARB 0x82 /* Buffer Addr Register B (chained-block) */ +#define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */ +#define CDAL 0x84 /* Current Descriptor Addr Register L */ +#define CDAH 0x85 /* Current Descriptor Addr Register H */ +#define CDAB 0x86 /* Current Descriptor Addr Register B */ +#define CDABH 0x87 /* Current Descriptor Addr Register BH */ +#define EDAL 0x88 /* Error Descriptor Addr Register L */ +#define EDAH 0x89 /* Error Descriptor Addr Register H */ +#define EDAB 0x8a /* Error Descriptor Addr Register B */ +#define EDABH 0x8b /* Error Descriptor Addr Register BH */ +#define BFLL 0x90 /* RX Buffer Length L (only RX) */ +#define BFLH 0x91 /* RX Buffer Length H (only RX) */ +#define BCRL 0x8c /* Byte Count Register L */ +#define BCRH 0x8d /* Byte Count Register H */ + +/* Block Descriptor Structure */ +typedef struct { + unsigned long next; /* pointer to next block descriptor */ + unsigned long ptbuf; /* buffer pointer */ + unsigned short len; /* data length */ + unsigned char status; /* status */ + unsigned char filler[5]; /* alignment filler (16 bytes) */ +} pcsca_bd_t; + +/* + Descriptor Status definitions: + + Bit Transmission Reception + + 7 EOM EOM + 6 - Short Frame + 5 - Abort + 4 - Residual bit + 3 Underrun Overrun + 2 - CRC + 1 Ownership Ownership + 0 EOT - +*/ +#define DST_EOT 0x01 /* End of transmit command */ +#define DST_OSB 0x02 /* Ownership bit */ +#define DST_CRC 0x04 /* CRC Error */ +#define DST_OVR 0x08 /* Overrun */ +#define DST_UDR 0x08 /* Underrun */ +#define DST_RBIT 0x10 /* Residual bit */ +#define DST_ABT 0x20 /* Abort */ +#define DST_SHRT 0x40 /* Short Frame */ +#define DST_EOM 0x80 /* End of Message */ + +/* Status Counter Registers */ +#define CMCR 0x158 /* Counter Master Ctl Reg */ +#define TECNTL 0x160 /* Tx EOM Counter L */ +#define TECNTM 0x161 /* Tx EOM Counter M */ +#define TECNTH 0x162 /* Tx EOM Counter H */ +#define TECCR 0x163 /* Tx EOM Counter Ctl Reg */ +#define URCNTL 0x164 /* Underrun Counter L */ +#define URCNTH 0x165 /* Underrun Counter H */ +#define URCCR 0x167 /* Underrun Counter Ctl Reg */ +#define RECNTL 0x168 /* Rx EOM Counter L */ +#define RECNTM 0x169 /* Rx EOM Counter M */ +#define RECNTH 0x16a /* Rx EOM Counter H */ +#define RECCR 0x16b /* Rx EOM Counter Ctl Reg */ +#define ORCNTL 0x16c /* Overrun Counter L */ +#define ORCNTH 0x16d /* Overrun Counter H */ +#define ORCCR 0x16f /* Overrun Counter Ctl Reg */ +#define CECNTL 0x170 /* CRC Counter L */ +#define CECNTH 0x171 /* CRC Counter H */ +#define CECCR 0x173 /* CRC Counter Ctl Reg */ +#define ABCNTL 0x174 /* Abort frame Counter L */ +#define ABCNTH 0x175 /* Abort frame Counter H */ +#define ABCCR 0x177 /* Abort frame Counter Ctl Reg */ +#define SHCNTL 0x178 /* Short frame Counter L */ +#define SHCNTH 0x179 /* Short frame Counter H */ +#define SHCCR 0x17b /* Short frame Counter Ctl Reg */ +#define RSCNTL 0x17c /* Residual bit Counter L */ +#define RSCNTH 0x17d /* Residual bit Counter H */ +#define RSCCR 0x17f /* Residual bit Counter Ctl Reg */ + +/* Register Programming Constants */ + +#define IR0_DMIC 0x00000001 +#define IR0_DMIB 0x00000002 +#define IR0_DMIA 0x00000004 +#define IR0_EFT 0x00000008 +#define IR0_DMAREQ 0x00010000 +#define IR0_TXINT 0x00020000 +#define IR0_RXINTB 0x00040000 +#define IR0_RXINTA 0x00080000 +#define IR0_TXRDY 0x00100000 +#define IR0_RXRDY 0x00200000 + +#define MD0_CRC16_0 0x00 +#define MD0_CRC16_1 0x01 +#define MD0_CRC32 0x02 +#define MD0_CRC_CCITT 0x03 +#define MD0_CRCC0 0x04 +#define MD0_CRCC1 0x08 +#define MD0_AUTO_ENA 0x10 +#define MD0_ASYNC 0x00 +#define MD0_BY_MSYNC 0x20 +#define MD0_BY_BISYNC 0x40 +#define MD0_BY_EXT 0x60 +#define MD0_BIT_SYNC 0x80 +#define MD0_TRANSP 0xc0 + +#define MD1_NOADDR 0x00 +#define MD1_SADDR1 0x40 +#define MD1_SADDR2 0x80 +#define MD1_DADDR 0xc0 + +#define MD2_F_DUPLEX 0x00 +#define MD2_AUTO_ECHO 0x01 +#define MD2_LOOP_HI_Z 0x02 +#define MD2_LOOP_MIR 0x03 +#define MD2_ADPLL_X8 0x00 +#define MD2_ADPLL_X16 0x08 +#define MD2_ADPLL_X32 0x10 +#define MD2_NRZ 0x00 +#define MD2_NRZI 0x20 +#define MD2_NRZ_IEEE 0x40 +#define MD2_MANCH 0x00 +#define MD2_FM1 0x20 +#define MD2_FM0 0x40 +#define MD2_FM 0x80 + +#define CTL_RTS 0x01 +#define CTL_DTR 0x02 +#define CTL_SYN 0x04 +#define CTL_IDLC 0x10 +#define CTL_UDRNC 0x20 +#define CTL_URSKP 0x40 +#define CTL_URCT 0x80 + +#define RXS_BR0 0x01 +#define RXS_BR1 0x02 +#define RXS_BR2 0x04 +#define RXS_BR3 0x08 +#define RXS_ECLK 0x00 +#define RXS_ECLK_NS 0x20 +#define RXS_IBRG 0x40 +#define RXS_PLL1 0x50 +#define RXS_PLL2 0x60 +#define RXS_PLL3 0x70 +#define RXS_DRTXC 0x80 + +#define TXS_BR0 0x01 +#define TXS_BR1 0x02 +#define TXS_BR2 0x04 +#define TXS_BR3 0x08 +#define TXS_ECLK 0x00 +#define TXS_IBRG 0x40 +#define TXS_RCLK 0x60 +#define TXS_DTRXC 0x80 + +#define EXS_RES0 0x01 +#define EXS_RES1 0x02 +#define EXS_RES2 0x04 +#define EXS_TES0 0x10 +#define EXS_TES1 0x20 +#define EXS_TES2 0x40 + +#define CMD_RX_RST 0x11 +#define CMD_RX_ENA 0x12 +#define CMD_RX_DIS 0x13 +#define CMD_RX_CRC_INIT 0x14 +#define CMD_RX_MSG_REJ 0x15 +#define CMD_RX_MP_SRCH 0x16 +#define CMD_RX_CRC_EXC 0x17 +#define CMD_RX_CRC_FRC 0x18 +#define CMD_TX_RST 0x01 +#define CMD_TX_ENA 0x02 +#define CMD_TX_DISA 0x03 +#define CMD_TX_CRC_INIT 0x04 +#define CMD_TX_CRC_EXC 0x05 +#define CMD_TX_EOM 0x06 +#define CMD_TX_ABORT 0x07 +#define CMD_TX_MP_ON 0x08 +#define CMD_TX_BUF_CLR 0x09 +#define CMD_TX_DISB 0x0b +#define CMD_CH_RST 0x21 +#define CMD_SRCH_MODE 0x31 +#define CMD_NOP 0x00 + +#define ST0_RXRDY 0x01 +#define ST0_TXRDY 0x02 +#define ST0_RXINTB 0x20 +#define ST0_RXINTA 0x40 +#define ST0_TXINT 0x80 + +#define ST1_IDLE 0x01 +#define ST1_ABORT 0x02 +#define ST1_CDCD 0x04 +#define ST1_CCTS 0x08 +#define ST1_SYN_FLAG 0x10 +#define ST1_CLMD 0x20 +#define ST1_TXIDLE 0x40 +#define ST1_UDRN 0x80 + +#define ST2_CRCE 0x04 +#define ST2_ONRN 0x08 +#define ST2_RBIT 0x10 +#define ST2_ABORT 0x20 +#define ST2_SHORT 0x40 +#define ST2_EOM 0x80 + +#define ST3_RX_ENA 0x01 +#define ST3_TX_ENA 0x02 +#define ST3_DCD 0x04 +#define ST3_CTS 0x08 +#define ST3_SRCH_MODE 0x10 +#define ST3_SLOOP 0x20 +#define ST3_GPI 0x80 + +#define ST4_RDNR 0x01 +#define ST4_RDCR 0x02 +#define ST4_TDNR 0x04 +#define ST4_TDCR 0x08 +#define ST4_OCLM 0x20 +#define ST4_CFT 0x40 +#define ST4_CGPI 0x80 + +#define FST_CRCEF 0x04 +#define FST_OVRNF 0x08 +#define FST_RBIF 0x10 +#define FST_ABTF 0x20 +#define FST_SHRTF 0x40 +#define FST_EOMF 0x80 + +#define IE0_RXRDY 0x01 +#define IE0_TXRDY 0x02 +#define IE0_RXINTB 0x20 +#define IE0_RXINTA 0x40 +#define IE0_TXINT 0x80 + +#define IE1_IDLD 0x01 +#define IE1_ABTD 0x02 +#define IE1_CDCD 0x04 +#define IE1_CCTS 0x08 +#define IE1_SYNCD 0x10 +#define IE1_CLMD 0x20 +#define IE1_IDL 0x40 +#define IE1_UDRN 0x80 + +#define IE2_CRCE 0x04 +#define IE2_OVRN 0x08 +#define IE2_RBIT 0x10 +#define IE2_ABT 0x20 +#define IE2_SHRT 0x40 +#define IE2_EOM 0x80 + +#define IE4_RDNR 0x01 +#define IE4_RDCR 0x02 +#define IE4_TDNR 0x04 +#define IE4_TDCR 0x08 +#define IE4_OCLM 0x20 +#define IE4_CFT 0x40 +#define IE4_CGPI 0x80 + +#define FIE_CRCEF 0x04 +#define FIE_OVRNF 0x08 +#define FIE_RBIF 0x10 +#define FIE_ABTF 0x20 +#define FIE_SHRTF 0x40 +#define FIE_EOMF 0x80 + +#define DSR_DWE 0x01 +#define DSR_DE 0x02 +#define DSR_REF 0x04 +#define DSR_UDRF 0x04 +#define DSR_COA 0x08 +#define DSR_COF 0x10 +#define DSR_BOF 0x20 +#define DSR_EOM 0x40 +#define DSR_EOT 0x80 + +#define DIR_REF 0x04 +#define DIR_UDRF 0x04 +#define DIR_COA 0x08 +#define DIR_COF 0x10 +#define DIR_BOF 0x20 +#define DIR_EOM 0x40 +#define DIR_EOT 0x80 + +#define DMR_CNTE 0x02 +#define DMR_NF 0x04 +#define DMR_SEOME 0x08 +#define DMR_TMOD 0x10 + +#define DCR_SW_ABT 0x01 +#define DCR_FCT_CLR 0x02 + +#define PCR_PR0 0x01 +#define PCR_PR1 0x02 +#define PCR_PR2 0x04 +#define PCR_CCC 0x08 +#define PCR_BRC 0x10 +#define PCR_OSB 0x40 +#define PCR_BURST 0x80 + +#endif /* (_HD64572_H) */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hd6457x.c linux.22-ac2/drivers/net/wan/hd6457x.c --- linux.vanilla/drivers/net/wan/hd6457x.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hd6457x.c 2003-06-29 16:09:51.000000000 +0100 @@ -1,12 +1,11 @@ /* * Hitachi SCA HD64570 and HD64572 common driver for Linux * - * Copyright (C) 1998-2000 Krzysztof Halasa + * Copyright (C) 1998-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * Sources of information: * Hitachi HD64570 SCA User's Manual @@ -42,7 +41,7 @@ #error Either hd64570.h or hd64572.h must be included #endif -static char sca_version[]="1.09"; +static char sca_version[]="1.12"; #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) @@ -294,7 +293,7 @@ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev(&port->hdlc); skb->dev->last_rx = jiffies; - skb->protocol = htons(ETH_P_HDLC); + skb->protocol = hdlc_type_trans(skb, hdlc_to_dev(&port->hdlc)); netif_rx(skb); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_cisco.c linux.22-ac2/drivers/net/wan/hdlc_cisco.c --- linux.vanilla/drivers/net/wan/hdlc_cisco.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_cisco.c 2003-06-29 16:09:51.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Cisco HDLC support * - * Copyright (C) 2000 - 2001 Krzysztof Halasa + * Copyright (C) 2000 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -85,12 +84,37 @@ skb_put(skb, sizeof(cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } +static unsigned short cisco_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_header *data = (hdlc_header*)skb->data; + + if (skb->len < sizeof(hdlc_header)) + return __constant_htons(ETH_P_HDLC); + + if (data->address != CISCO_MULTICAST && + data->address != CISCO_UNICAST) + return __constant_htons(ETH_P_HDLC); + + switch(data->protocol) { + case __constant_htons(ETH_P_IP): + case __constant_htons(ETH_P_IPX): + case __constant_htons(ETH_P_IPV6): + skb_pull(skb, sizeof(hdlc_header)); + return data->protocol; + default: + return __constant_htons(ETH_P_HDLC); + } +} + + static void cisco_rx(struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(skb->dev); @@ -109,14 +133,6 @@ skb_pull(skb, sizeof(hdlc_header)); switch(ntohs(data->protocol)) { - case ETH_P_IP: - case ETH_P_IPX: - case ETH_P_IPV6: - skb->protocol = data->protocol; - skb->dev = hdlc_to_dev(hdlc); - netif_rx(skb); - return; - case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); @@ -288,6 +304,7 @@ hdlc->open = cisco_open; hdlc->stop = cisco_close; hdlc->netif_rx = cisco_rx; + hdlc->type_trans = cisco_type_trans; hdlc->proto = IF_PROTO_CISCO; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = cisco_hard_header; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_fr.c linux.22-ac2/drivers/net/wan/hdlc_fr.c --- linux.vanilla/drivers/net/wan/hdlc_fr.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_fr.c 2003-06-29 16:09:51.000000000 +0100 @@ -2,13 +2,22 @@ * Generic HDLC support routines for Linux * Frame Relay support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + + Theory of PVC state in DCE mode: + + (exist,new) -> 0,0 when "PVC create" or if "link unreliable" + 0,x -> 1,1 if "link reliable" when sending FULL STATUS + 1,1 -> 1,0 if received FULL STATUS ACK + + (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" + -> 1 when "PVC up" and (exist,new) = 1,0 +*/ #include #include @@ -20,19 +29,23 @@ #include #include #include +#include #include #include #include +#include #include __inline__ pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) { - pvc_device *pvc=hdlc->state.fr.first_pvc; + pvc_device *pvc = hdlc->state.fr.first_pvc; - while (pvc) { - if (netdev_dlci(&pvc->netdev) == dlci) + while(pvc) { + if (pvc->dlci == dlci) return pvc; + if (pvc->dlci > dlci) + return NULL; /* the listed is sorted */ pvc = pvc->next; } @@ -40,18 +53,72 @@ } +__inline__ pvc_device* add_pvc(hdlc_device *hdlc, u16 dlci) +{ + pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if ((*pvc_p)->dlci == dlci) + return *pvc_p; + if ((*pvc_p)->dlci > dlci) + break; /* the listed is sorted */ + pvc_p = &(*pvc_p)->next; + } + + pvc = kmalloc(sizeof(pvc_device), GFP_KERNEL); + if (!pvc) + return NULL; + + memset(pvc, 0, sizeof(pvc_device)); + pvc->dlci = dlci; + pvc->master = hdlc; + pvc->next = *pvc_p; /* Put it in the chain */ + *pvc_p = pvc; + return pvc; +} + + +__inline__ int pvc_is_used(pvc_device *pvc) +{ + return pvc->main != NULL || pvc->ether != NULL; +} + + +__inline__ void delete_unused_pvcs(hdlc_device *hdlc) +{ + pvc_device **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if (!pvc_is_used(*pvc_p)) { + pvc_device *pvc = *pvc_p; + *pvc_p = pvc->next; + kfree(pvc); + continue; + } + pvc_p = &(*pvc_p)->next; + } +} + -__inline__ u16 status_to_dlci(hdlc_device *hdlc, u8 *status, - int *active, int *new) +__inline__ struct net_device** get_dev_p(pvc_device *pvc, int type) { - *new = (status[2] & 0x08); - *active = (!*new && (status[2] & 0x02)); + if (type == ARPHRD_ETHER) + return &pvc->ether; + else + return &pvc->main; +} + + +__inline__ u16 status_to_dlci(u8 *status, int *active, int *new) +{ + *new = (status[2] & 0x08) ? 1 : 0; + *active = (status[2] & 0x02) ? 1 : 0; return ((status[0] & 0x3F)<<4) | ((status[1] & 0x78)>>3); } -__inline__ void dlci_to_status(hdlc_device *hdlc, u16 dlci, u8 *status, +__inline__ void dlci_to_status(u16 dlci, u8 *status, int active, int new) { status[0] = (dlci>>4) & 0x3F; @@ -66,37 +133,50 @@ -static int fr_hard_header(struct sk_buff *skb, struct net_device *dev, - u16 type, void *daddr, void *saddr, unsigned int len) +static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) { u16 head_len; + struct sk_buff *skb = *skb_p; - if (!daddr) - daddr = dev->broadcast; - -#ifdef CONFIG_HDLC_DEBUG_HARD_HEADER - printk(KERN_DEBUG "%s: fr_hard_header called\n", dev->name); -#endif - - switch(type) { - case ETH_P_IP: + switch(skb->protocol) { + case __constant_ntohs(ETH_P_IP): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IP; break; - case ETH_P_IPV6: + case __constant_ntohs(ETH_P_IPV6): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IPV6; break; - case LMI_PROTO: + case __constant_ntohs(LMI_PROTO): head_len = 4; skb_push(skb, head_len); skb->data[3] = LMI_PROTO; break; + case __constant_ntohs(ETH_P_802_3): + head_len = 10; + if (skb_headroom(skb) < head_len) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, + head_len); + if (!skb2) + return -ENOBUFS; + dev_kfree_skb(skb); + skb = *skb_p = skb2; + } + skb_push(skb, head_len); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + skb->data[5] = FR_PAD; + skb->data[6] = 0x80; + skb->data[7] = 0xC2; + skb->data[8] = 0x00; + skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ + break; + default: head_len = 10; skb_push(skb, head_len); @@ -105,14 +185,12 @@ skb->data[5] = FR_PAD; skb->data[6] = FR_PAD; skb->data[7] = FR_PAD; - skb->data[8] = type>>8; - skb->data[9] = (u8)type; + *(u16*)(skb->data + 8) = skb->protocol; } - memcpy(skb->data, daddr, 2); + dlci_to_q922(skb->data, dlci); skb->data[2] = FR_UI; - - return head_len; + return 0; } @@ -124,13 +202,12 @@ if ((hdlc_to_dev(pvc->master)->flags & IFF_UP) == 0) return -EIO; /* Master must be UP in order to activate PVC */ - if (pvc->master->state.fr.settings.lmi != LMI_NONE) - pvc->state.active = 0; - else - pvc->state.active = 1; + if (pvc->open_count++ == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 1; - pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + pvc->master->state.fr.dce_changed = 1; + } return 0; } @@ -139,38 +216,94 @@ static int pvc_close(struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - pvc->state.active = pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + + if (--pvc->open_count == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 0; + + if (pvc->master->state.fr.settings.dce) { + pvc->master->state.fr.dce_changed = 1; + pvc->state.active = 0; + } + } return 0; } -static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) +int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { pvc_device *pvc = dev_to_pvc(dev); + fr_proto_pvc_info info; - if (pvc->state.active) { - skb->dev = hdlc_to_dev(pvc->master); - pvc->stats.tx_bytes += skb->len; - pvc->stats.tx_packets++; - if (pvc->state.fecn) - pvc->stats.tx_compressed++; /* TX Congestion counter */ - dev_queue_xmit(skb); - } else { - pvc->stats.tx_dropped++; - dev_kfree_skb(skb); + if (ifr->ifr_settings.type == IF_GET_PROTO) { + if (dev->type == ARPHRD_ETHER) + ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; + else + ifr->ifr_settings.type = IF_PROTO_FR_PVC; + + if (ifr->ifr_settings.size < sizeof(info)) { + /* data size wanted */ + ifr->ifr_settings.size = sizeof(info); + return -ENOBUFS; + } + + info.dlci = pvc->dlci; + memcpy(info.master, hdlc_to_name(pvc->master), IFNAMSIZ); + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, + &info, sizeof(info))) + return -EFAULT; + return 0; } - return 0; + return -EINVAL; +} + + +__inline__ struct net_device_stats *pvc_get_stats(struct net_device *dev) +{ + return (struct net_device_stats *) + ((char *)dev + sizeof(struct net_device)); } -static struct net_device_stats *pvc_get_stats(struct net_device *dev) +static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - return &pvc->stats; + struct net_device_stats *stats = pvc_get_stats(dev); + + if (pvc->state.active) { + if (dev->type == ARPHRD_ETHER) { + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, + GFP_ATOMIC)) { + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + skb->protocol = __constant_htons(ETH_P_802_3); + } + if (!fr_hard_header(&skb, pvc->dlci)) { + stats->tx_bytes += skb->len; + stats->tx_packets++; + if (pvc->state.fecn) /* TX Congestion counter */ + stats->tx_compressed++; + skb->dev = hdlc_to_dev(pvc->master); + dev_queue_xmit(skb); + return 0; + } + } + + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; } @@ -187,9 +320,15 @@ static inline void fr_log_dlci_active(pvc_device *pvc) { - printk(KERN_INFO "%s: %sactive%s\n", pvc_to_name(pvc), - pvc->state.active ? "" : "in", - pvc->state.new ? " new" : ""); + printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", + hdlc_to_name(pvc->master), + pvc->dlci, + pvc->main ? pvc->main->name : "", + pvc->main && pvc->ether ? " " : "", + pvc->ether ? pvc->ether->name : "", + pvc->state.new ? " new" : "", + !pvc->state.exist ? "deleted" : + pvc->state.active ? "active" : "inactive"); } @@ -213,8 +352,8 @@ int i = 0; if (hdlc->state.fr.settings.dce && fullrep) { - len += hdlc->state.fr.pvc_count * (2 + stat_len); - if (len > HDLC_MAX_MTU) { + len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); + if (len > HDLC_MAX_MRU) { printk(KERN_WARNING "%s: Too many PVCs while sending " "LMI full report\n", hdlc_to_name(hdlc)); return; @@ -224,12 +363,13 @@ skb = dev_alloc_skb(len); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n", - hdlc_to_name(hdlc)); + hdlc_to_name(hdlc)); return; } memset(skb->data, 0, len); skb_reserve(skb, 4); - fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0); + skb->protocol = __constant_htons(LMI_PROTO); + fr_hard_header(&skb, LMI_DLCI); data = skb->tail; data[i++] = LMI_CALLREF; data[i++] = hdlc->state.fr.settings.dce @@ -253,16 +393,20 @@ ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT; data[i++] = stat_len; - if (hdlc->state.fr.reliable && - (pvc->netdev.flags & IFF_UP) && - !pvc->state.active && - !pvc->state.new) { - pvc->state.new = 1; + /* LMI start/restart */ + if (hdlc->state.fr.reliable && !pvc->state.exist) { + pvc->state.exist = pvc->state.new = 1; + fr_log_dlci_active(pvc); + } + + /* ifconfig PVC up */ + if (pvc->open_count && !pvc->state.active && + pvc->state.exist && !pvc->state.new) { + pvc->state.active = 1; fr_log_dlci_active(pvc); } - dlci_to_status(hdlc, netdev_dlci(&pvc->netdev), - data + i, + dlci_to_status(pvc->dlci, data + i, pvc->state.active, pvc->state.new); i += stat_len; pvc = pvc->next; @@ -272,6 +416,7 @@ skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } @@ -312,10 +457,11 @@ if (reliable) { hdlc->state.fr.n391cnt = 0; /* Request full status */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } else { while (pvc) { /* Deactivate all PVCs */ - pvc->state.new = pvc->state.active = 0; + pvc->state.exist = 0; + pvc->state.active = pvc->state.new = 0; pvc = pvc->next; } } @@ -346,7 +492,7 @@ { int stat_len; pvc_device *pvc; - int reptype = -1, error; + int reptype = -1, error, no_ram; u8 rxseq, txseq; int i; @@ -420,20 +566,18 @@ while (pvc) { if (pvc->state.new) { pvc->state.new = 0; - pvc->state.active = 1; - fr_log_dlci_active(pvc); /* Tell DTE that new PVC is now active */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } pvc = pvc->next; } } - if (hdlc->state.fr.changed) { + if (hdlc->state.fr.dce_changed) { reptype = LMI_FULLREP; hdlc->state.fr.fullrep_sent = 1; - hdlc->state.fr.changed = 0; + hdlc->state.fr.dce_changed = 0; } fr_lmi_send(hdlc, reptype == LMI_FULLREP ? 1 : 0); @@ -449,13 +593,14 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - pvc->state.deleted = pvc->state.active; /* mark active PVCs */ + pvc->state.deleted = 1; pvc = pvc->next; } + no_ram = 0; while (skb->len >= i + 2 + stat_len) { u16 dlci; - int active, new; + unsigned int active, new; if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT) ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) { @@ -472,21 +617,28 @@ } i++; - dlci = status_to_dlci(hdlc, skb->data + i, &active, &new); - pvc = find_pvc(hdlc, dlci); + dlci = status_to_dlci(skb->data + i, &active, &new); + + pvc = add_pvc(hdlc, dlci); + + if (!pvc && !no_ram) { + printk(KERN_WARNING + "%s: Memory squeeze on fr_lmi_recv()\n", + hdlc_to_name(hdlc)); + no_ram = 1; + } - active |= new; if (pvc) { - if (active && !pvc->state.active && - (pvc->netdev.flags & IFF_UP)) { + pvc->state.exist = 1; + pvc->state.deleted = 0; + if (active != pvc->state.active || + new != pvc->state.new || + !pvc->state.exist) { + pvc->state.new = new; pvc->state.active = active; fr_log_dlci_active(pvc); } - pvc->state.deleted = 0; } - else if (new) - printk(KERN_INFO "%s: new PVC available, DLCI=%u\n", - hdlc_to_name(hdlc), dlci); i += stat_len; } @@ -494,10 +646,10 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - if (pvc->state.deleted) { + if (pvc->state.deleted && pvc->state.exist) { pvc->state.active = pvc->state.new = 0; + pvc->state.exist = 0; fr_log_dlci_active(pvc); - pvc->state.deleted = 0; } pvc = pvc->next; } @@ -517,8 +669,9 @@ u8 *data = skb->data; u16 dlci; pvc_device *pvc; + struct net_device *dev = NULL; - if (skb->len<4 || fh->ea1 || data[2] != FR_UI) + if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) goto rx_error; dlci = q922_to_dlci(skb->data); @@ -550,57 +703,39 @@ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n", hdlc_to_name(hdlc), dlci); #endif - goto rx_error; - } - - if ((pvc->netdev.flags & IFF_UP) == 0) { -#ifdef CONFIG_HDLC_DEBUG_PKT - printk(KERN_INFO "%s: PVC for received frame's DLCI %d is down\n", - hdlc_to_name(hdlc), dlci); -#endif - goto rx_error; + dev_kfree_skb_any(skb); + return; } - pvc->stats.rx_packets++; /* PVC traffic */ - pvc->stats.rx_bytes += skb->len; - - if (pvc->state.fecn != (fh->fecn ? PVC_STATE_FECN : 0)) { + if (pvc->state.fecn != fh->fecn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: FECN O%s\n", pvc_to_name(pvc), - fh->fecn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", hdlc_to_name(pvc), + dlci, fh->fecn ? "N" : "FF"); #endif pvc->state.fecn ^= 1; } - if (pvc->state.becn != (fh->becn ? PVC_STATE_BECN : 0)) { + if (pvc->state.becn != fh->becn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: BECN O%s\n", pvc_to_name(pvc), - fh->becn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", hdlc_to_name(pvc), + dlci, fh->becn ? "N" : "FF"); #endif pvc->state.becn ^= 1; } - if (pvc->state.becn) - pvc->stats.rx_compressed++; - - skb->dev = &pvc->netdev; if (data[3] == NLPID_IP) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IP); - netif_rx(skb); - return; - } - - if (data[3] == NLPID_IPV6) { + } else if (data[3] == NLPID_IPV6) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IPV6); - netif_rx(skb); - return; - } - if (data[3] == FR_PAD && data[4] == NLPID_SNAP && data[5] == FR_PAD) { + } else if (skb->len > 10 && data[3] == FR_PAD && + data[4] == NLPID_SNAP && data[5] == FR_PAD) { u16 oui = ntohs(*(u16*)(data + 6)); u16 pid = ntohs(*(u16*)(data + 8)); skb_pull(skb, 10); @@ -610,23 +745,39 @@ case ETH_P_IPX: case ETH_P_IP: /* a long variant */ case ETH_P_IPV6: + dev = pvc->main; skb->protocol = htons(pid); break; + case 0x80C20007: /* bridged Ethernet frame */ + if ((dev = pvc->ether) != NULL) + skb->protocol = eth_type_trans(skb, dev); + break; + default: printk(KERN_INFO "%s: Unsupported protocol, OUI=%x " "PID=%x\n", hdlc_to_name(hdlc), oui, pid); dev_kfree_skb_any(skb); return; } - - netif_rx(skb); + } else { + printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x " + "length = %i\n", hdlc_to_name(hdlc), data[3], skb->len); + dev_kfree_skb_any(skb); return; } - printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x\n", - hdlc_to_name(hdlc), data[3]); - dev_kfree_skb_any(skb); + if (dev) { + struct net_device_stats *stats = pvc_get_stats(dev); + stats->rx_packets++; /* PVC traffic */ + stats->rx_bytes += skb->len; + if (pvc->state.becn) + stats->rx_compressed++; + skb->dev = dev; + netif_rx(skb); + } else + dev_kfree_skb_any(skb); + return; rx_error: @@ -641,7 +792,7 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) { hdlc->state.fr.last_poll = 0; hdlc->state.fr.reliable = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; hdlc->state.fr.request = 0; hdlc->state.fr.fullrep_sent = 0; hdlc->state.fr.last_errors = 0xFFFFFFFF; @@ -669,90 +820,119 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) del_timer_sync(&hdlc->state.fr.timer); - while(pvc) { - dev_close(&pvc->netdev); /* Shutdown all PVCs for this FRAD */ + while(pvc) { /* Shutdown all PVCs for this FRAD */ + if (pvc->main) + dev_close(pvc->main); + if (pvc->ether) + dev_close(pvc->ether); + pvc->state.active = pvc->state.new = pvc->state.fecn = + pvc->state.becn = 0; + pvc->state.exist = 0; pvc = pvc->next; } } - -static int fr_pvc(hdlc_device *hdlc, unsigned int dlci, int create) +static int fr_add_pvc(hdlc_device *hdlc, unsigned int dlci, int type) { - pvc_device **pvc_p = &hdlc->state.fr.first_pvc; - pvc_device *pvc; - int result; + pvc_device *pvc = NULL; + struct net_device *dev; + int result, used; + char * prefix = "pvc%d"; - if(dlci <= 0 || dlci >= 1024) - return -EINVAL; /* Only 10 bits for DLCI, DLCI 0 reserved */ + if (type == ARPHRD_ETHER) + prefix = "pvceth%d"; - while(*pvc_p) { - if (netdev_dlci(&(*pvc_p)->netdev) == dlci) - break; - pvc_p = &(*pvc_p)->next; + if ((pvc = add_pvc(hdlc, dlci)) == NULL) { + printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", + hdlc_to_name(hdlc)); + return -ENOBUFS; } - if (create) { /* Create PVC */ - if (*pvc_p != NULL) - return -EEXIST; - - pvc = *pvc_p = kmalloc(sizeof(pvc_device), GFP_KERNEL); - if (!pvc) { - printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", - hdlc_to_name(hdlc)); - return -ENOBUFS; - } - memset(pvc, 0, sizeof(pvc_device)); + if (*get_dev_p(pvc, type)) + return -EEXIST; - pvc->netdev.hard_start_xmit = pvc_xmit; - pvc->netdev.get_stats = pvc_get_stats; - pvc->netdev.open = pvc_open; - pvc->netdev.stop = pvc_close; - pvc->netdev.change_mtu = pvc_change_mtu; - pvc->netdev.mtu = HDLC_MAX_MTU; - - pvc->netdev.type = ARPHRD_DLCI; - pvc->netdev.hard_header_len = 16; - pvc->netdev.hard_header = fr_hard_header; - pvc->netdev.tx_queue_len = 0; - pvc->netdev.flags = IFF_POINTOPOINT; - - pvc->master = hdlc; - *(u16*)pvc->netdev.dev_addr = htons(dlci); - dlci_to_q922(pvc->netdev.broadcast, dlci); - pvc->netdev.addr_len = 2; + used = pvc_is_used(pvc); - result = dev_alloc_name(&pvc->netdev, "pvc%d"); - if (result < 0) { - kfree(pvc); - *pvc_p = NULL; - return result; - } - - if (register_netdevice(&pvc->netdev) != 0) { - kfree(pvc); - *pvc_p = NULL; - return -EIO; - } + dev = kmalloc(sizeof(struct net_device) + + sizeof(struct net_device_stats), GFP_KERNEL); + if (!dev) { + printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", + hdlc_to_name(hdlc)); + delete_unused_pvcs(hdlc); + return -ENOBUFS; + } + memset(dev, 0, sizeof(struct net_device) + + sizeof(struct net_device_stats)); - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count++; - return 0; + if (type == ARPHRD_ETHER) { + ether_setup(dev); + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + } else { + dev->type = ARPHRD_DLCI; + dev->flags = IFF_POINTOPOINT; + dev->hard_header_len = 10; + dev->addr_len = 2; + *(u16*)dev->dev_addr = htons(dlci); + dlci_to_q922(dev->broadcast, dlci); + } + dev->hard_start_xmit = pvc_xmit; + dev->get_stats = pvc_get_stats; + dev->open = pvc_open; + dev->stop = pvc_close; + dev->do_ioctl = pvc_ioctl; + dev->change_mtu = pvc_change_mtu; + dev->mtu = HDLC_MAX_MTU; + dev->tx_queue_len = 0; + dev->priv = pvc; + + result = dev_alloc_name(dev, prefix); + if (result < 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return result; + } + + if (register_netdevice(dev) != 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return -EIO; + } + + *get_dev_p(pvc, type) = dev; + if (!used) { + hdlc->state.fr.dce_changed = 1; + hdlc->state.fr.dce_pvc_count++; } + return 0; +} + + - if (*pvc_p == NULL) /* Delete PVC */ +static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) +{ + pvc_device *pvc; + struct net_device *dev; + + if ((pvc = find_pvc(hdlc, dlci)) == NULL) return -ENOENT; - pvc = *pvc_p; + if ((dev = *get_dev_p(pvc, type)) == NULL) + return -ENOENT; - if (pvc->netdev.flags & IFF_UP) + if (dev->flags & IFF_UP) return -EBUSY; /* PVC in use */ - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count--; - *pvc_p = pvc->next; - unregister_netdevice(&pvc->netdev); - kfree(pvc); + unregister_netdevice(dev); + kfree(dev); + *get_dev_p(pvc, type) = NULL; + + if (!pvc_is_used(pvc)) { + hdlc->state.fr.dce_pvc_count--; + hdlc->state.fr.dce_changed = 1; + } + delete_unused_pvcs(hdlc); return 0; } @@ -763,14 +943,21 @@ pvc_device *pvc = hdlc->state.fr.first_pvc; while(pvc) { pvc_device *next = pvc->next; - unregister_netdev(&pvc->netdev); + if (pvc->main) { + unregister_netdevice(pvc->main); + kfree(pvc->main); + } + if (pvc->ether) { + unregister_netdevice(pvc->ether); + kfree(pvc->ether); + } kfree(pvc); pvc = next; } hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */ - hdlc->state.fr.pvc_count = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_pvc_count = 0; + hdlc->state.fr.dce_changed = 1; } @@ -828,25 +1015,27 @@ if (hdlc->proto != IF_PROTO_FR) { hdlc_proto_detach(hdlc); hdlc->state.fr.first_pvc = NULL; - hdlc->state.fr.pvc_count = 0; + hdlc->state.fr.dce_pvc_count = 0; } memcpy(&hdlc->state.fr.settings, &new_settings, size); hdlc->open = fr_open; hdlc->stop = fr_close; hdlc->netif_rx = fr_rx; + hdlc->type_trans = NULL; hdlc->proto_detach = fr_destroy; hdlc->proto = IF_PROTO_FR; dev->hard_start_xmit = hdlc->xmit; - dev->hard_header = fr_hard_header; + dev->hard_header = NULL; dev->type = ARPHRD_FRAD; - dev->addr_len = 2; - *(u16*)dev->dev_addr = htons(LMI_DLCI); - dlci_to_q922(dev->broadcast, LMI_DLCI); + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->addr_len = 0; return 0; case IF_PROTO_FR_ADD_PVC: case IF_PROTO_FR_DEL_PVC: + case IF_PROTO_FR_ADD_ETH_PVC: + case IF_PROTO_FR_DEL_ETH_PVC: if(!capable(CAP_NET_ADMIN)) return -EPERM; @@ -854,8 +1043,20 @@ sizeof(fr_proto_pvc))) return -EFAULT; - return fr_pvc(hdlc, pvc.dlci, - ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC); + if (pvc.dlci <= 0 || pvc.dlci >= 1024) + return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) + result = ARPHRD_ETHER; /* bridged Ethernet device */ + else + result = ARPHRD_DLCI; + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) + return fr_add_pvc(hdlc, pvc.dlci, result); + else + return fr_del_pvc(hdlc, pvc.dlci, result); } return -EINVAL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_generic.c linux.22-ac2/drivers/net/wan/hdlc_generic.c --- linux.vanilla/drivers/net/wan/hdlc_generic.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_generic.c 2003-06-29 16:09:51.000000000 +0100 @@ -1,17 +1,13 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * - * Current status: - * - this is work in progress - * - not heavily tested on SMP - * - currently supported: + * Currently supported: * * raw IP-in-HDLC * * Cisco HDLC * * Frame Relay with ANSI or CCITT LMI (both user and network side) @@ -37,7 +33,7 @@ #include -static const char* version = "HDLC support module revision 1.11"; +static const char* version = "HDLC support module revision 1.12"; static int hdlc_change_mtu(struct net_device *dev, int new_mtu) @@ -60,7 +56,13 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) { - dev_to_hdlc(dev)->netif_rx(skb); + hdlc_device *hdlc = dev_to_hdlc(dev); + if (hdlc->netif_rx) + hdlc->netif_rx(skb); + else { + hdlc->stats.rx_dropped++; /* Shouldn't happen */ + dev_kfree_skb(skb); + } return 0; } @@ -69,6 +71,10 @@ #define hdlc_raw_ioctl(hdlc, ifr) -ENOSYS #endif +#ifndef CONFIG_HDLC_RAW_ETH +#define hdlc_raw_eth_ioctl(hdlc, ifr) -ENOSYS +#endif + #ifndef CONFIG_HDLC_PPP #define hdlc_ppp_ioctl(hdlc, ifr) -ENOSYS #endif @@ -96,6 +102,7 @@ switch(ifr->ifr_settings.type) { case IF_PROTO_HDLC: + case IF_PROTO_HDLC_ETH: case IF_PROTO_PPP: case IF_PROTO_CISCO: case IF_PROTO_FR: @@ -109,6 +116,7 @@ switch(proto) { case IF_PROTO_HDLC: return hdlc_raw_ioctl(hdlc, ifr); + case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(hdlc, ifr); case IF_PROTO_PPP: return hdlc_ppp_ioctl(hdlc, ifr); case IF_PROTO_CISCO: return hdlc_cisco_ioctl(hdlc, ifr); case IF_PROTO_FR: return hdlc_fr_ioctl(hdlc, ifr); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_ppp.c linux.22-ac2/drivers/net/wan/hdlc_ppp.c --- linux.vanilla/drivers/net/wan/hdlc_ppp.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_ppp.c 2003-06-29 16:09:51.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Point-to-point protocol support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -68,10 +67,10 @@ -static void ppp_rx(struct sk_buff *skb) +static unsigned short ppp_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_WAN_PPP); - netif_rx(skb); + return __constant_htons(ETH_P_WAN_PPP); } @@ -103,7 +102,8 @@ hdlc->open = ppp_open; hdlc->stop = ppp_close; - hdlc->netif_rx = ppp_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = ppp_type_trans; hdlc->proto = IF_PROTO_PPP; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_raw.c linux.22-ac2/drivers/net/wan/hdlc_raw.c --- linux.vanilla/drivers/net/wan/hdlc_raw.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_raw.c 2003-06-29 16:09:51.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * HDLC support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -26,10 +25,10 @@ #include -static void raw_rx(struct sk_buff *skb) +static unsigned short raw_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_IP); - netif_rx(skb); + return __constant_htons(ETH_P_IP); } @@ -67,7 +66,7 @@ new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) - new_settings.parity = PARITY_NONE; + new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(hdlc, new_settings.encoding, new_settings.parity); @@ -79,11 +78,13 @@ hdlc->open = NULL; hdlc->stop = NULL; - hdlc->netif_rx = raw_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = raw_type_trans; hdlc->proto = IF_PROTO_HDLC; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; dev->type = ARPHRD_RAWHDLC; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->addr_len = 0; return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_raw_eth.c linux.22-ac2/drivers/net/wan/hdlc_raw_eth.c --- linux.vanilla/drivers/net/wan/hdlc_raw_eth.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_raw_eth.c 2003-06-29 16:09:51.000000000 +0100 @@ -0,0 +1,110 @@ +/* + * Generic HDLC support routines for Linux + * HDLC Ethernet emulation support + * + * Copyright (C) 2002-2003 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int eth_tx(struct sk_buff *skb, struct net_device *dev) +{ + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { + dev_to_hdlc(dev)->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + return dev_to_hdlc(dev)->xmit(skb, dev); +} + + +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr) +{ + raw_hdlc_proto *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; + const size_t size = sizeof(raw_hdlc_proto); + raw_hdlc_proto new_settings; + struct net_device *dev = hdlc_to_dev(hdlc); + int result; + void *old_ch_mtu; + int old_qlen; + + switch (ifr->ifr_settings.type) { + case IF_GET_PROTO: + ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_HDLC_ETH: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, raw_s, size)) + return -EFAULT; + + if (new_settings.encoding == ENCODING_DEFAULT) + new_settings.encoding = ENCODING_NRZ; + + if (new_settings.parity == PARITY_DEFAULT) + new_settings.parity = PARITY_CRC16_PR1_CCITT; + + result = hdlc->attach(hdlc, new_settings.encoding, + new_settings.parity); + if (result) + return result; + + hdlc_proto_detach(hdlc); + memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size); + + hdlc->open = NULL; + hdlc->stop = NULL; + hdlc->netif_rx = NULL; + hdlc->type_trans = eth_type_trans; + hdlc->proto = IF_PROTO_HDLC_ETH; + dev->hard_start_xmit = eth_tx; + old_ch_mtu = dev->change_mtu; + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->change_mtu = old_ch_mtu; + dev->tx_queue_len = old_qlen; + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + return 0; + } + + return -EINVAL; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/hdlc_x25.c linux.22-ac2/drivers/net/wan/hdlc_x25.c --- linux.vanilla/drivers/net/wan/hdlc_x25.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/hdlc_x25.c 2003-06-29 16:09:51.000000000 +0100 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * X.25 support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -204,6 +203,7 @@ hdlc->open = x25_open; hdlc->stop = x25_close; hdlc->netif_rx = x25_rx; + hdlc->type_trans = NULL; hdlc->proto = IF_PROTO_X25; dev->hard_start_xmit = x25_xmit; dev->hard_header = NULL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/lmc/lmc_proto.c linux.22-ac2/drivers/net/wan/lmc/lmc_proto.c --- linux.vanilla/drivers/net/wan/lmc/lmc_proto.c 2001-03-07 03:44:36.000000000 +0000 +++ linux.22-ac2/drivers/net/wan/lmc/lmc_proto.c 2003-06-29 16:09:51.000000000 +0100 @@ -30,8 +30,8 @@ #include #include #include +#include #include -#include #include #include diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/Makefile linux.22-ac2/drivers/net/wan/Makefile --- linux.vanilla/drivers/net/wan/Makefile 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/Makefile 2003-06-29 16:09:51.000000000 +0100 @@ -9,7 +9,7 @@ O_TARGET := wan.o -export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o hdlc_generic.o +export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o hdlc_generic.o pc300_drv.o list-multi = wanpipe.o cyclomx.o wanpipe-objs = sdlamain.o sdla_ft1.o $(wanpipe-y) @@ -24,12 +24,15 @@ hdlc-y := hdlc_generic.o hdlc-$(CONFIG_HDLC_RAW) += hdlc_raw.o +hdlc-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o hdlc-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o hdlc-$(CONFIG_HDLC_FR) += hdlc_fr.o hdlc-$(CONFIG_HDLC_PPP) += hdlc_ppp.o hdlc-$(CONFIG_HDLC_X25) += hdlc_x25.o hdlc-objs := $(hdlc-y) +pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o + obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o obj-$(CONFIG_COMX) += comx.o @@ -70,6 +73,7 @@ obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_SBNI) += sbni.o +obj-$(CONFIG_PC300) += pc300.o obj-$(CONFIG_HDLC) += hdlc.o ifeq ($(CONFIG_HDLC_PPP),y) obj-$(CONFIG_HDLC) += syncppp.o @@ -79,6 +83,9 @@ include $(TOPDIR)/Rules.make +pc300.o: pc300_drv.o $(pc300-y) + $(LD) -r -o $@ pc300_drv.o $(pc300-y) + hdlc.o: $(hdlc-objs) $(LD) -r -o $@ $(hdlc-objs) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/pc300_drv.c linux.22-ac2/drivers/net/wan/pc300_drv.c --- linux.vanilla/drivers/net/wan/pc300_drv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/pc300_drv.c 2003-06-29 16:09:51.000000000 +0100 @@ -0,0 +1,3490 @@ +#define USE_PCI_CLOCK +static char rcsid[] = +/* Important note: + * This string must have this exact format, including the space on its end + */ +"Revision: 3.4.8 Date: 2003/03/27 "; + +/* + * pc300.c Cyclades-PC300(tm) Driver. + * + * Author: Ivan Passos + * Maintainer: PC300 Maintainer + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pc300.h" + +#define CPC_LOCK(card,flags) \ + do { \ + spin_lock_irqsave(&card->card_lock, flags); \ + } while (0) + +#define CPC_UNLOCK(card,flags) \ + do { \ + spin_unlock_irqrestore(&card->card_lock, flags); \ + } while (0) + +#undef PC300_DEBUG_PCI +#undef PC300_DEBUG_INTR +#undef PC300_DEBUG_TX +#undef PC300_DEBUG_RX +#undef PC300_DEBUG_OTHER + +static struct pci_device_id cpc_pci_dev_id[] __devinitdata = { + /* PC300/RSV or PC300/X21, 2 chan */ + {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300}, + /* PC300/RSV or PC300/X21, 1 chan */ + {0x120e, 0x301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x301}, + /* PC300/TE, 2 chan */ + {0x120e, 0x310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x310}, + /* PC300/TE, 1 chan */ + {0x120e, 0x311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x311}, + /* PC300/TE-M, 2 chan */ + {0x120e, 0x320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x320}, + /* PC300/TE-M, 1 chan */ + {0x120e, 0x321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x321}, + /* End of table */ + {0,}, +}; +MODULE_DEVICE_TABLE(pci, cpc_pci_dev_id); + +#ifndef cpc_min +#define cpc_min(a,b) (((a)<(b))?(a):(b)) +#endif +#ifndef cpc_max +#define cpc_max(a,b) (((a)>(b))?(a):(b)) +#endif + +/* prototypes */ +static void tx_dma_buf_pt_init(pc300_t *, int); +static void tx_dma_buf_init(pc300_t *, int); +static void rx_dma_buf_pt_init(pc300_t *, int); +static void rx_dma_buf_init(pc300_t *, int); +static void tx_dma_buf_check(pc300_t *, int); +static void rx_dma_buf_check(pc300_t *, int); +static void cpc_intr(int, void *, struct pt_regs *); +static struct net_device_stats *cpc_get_stats(struct net_device *); +static int clock_rate_calc(uclong, uclong, int *); +static uclong detect_ram(pc300_t *); +static void plx_init(pc300_t *); +static void cpc_trace(struct net_device *, struct sk_buff *, char); +static int cpc_attach(hdlc_device *, unsigned short, unsigned short); + +#ifdef CONFIG_PC300_MLPPP +void cpc_tty_init(pc300dev_t * dev); +void cpc_tty_unregister_service(pc300dev_t * pc300dev); +void cpc_tty_receive(pc300dev_t * pc300dev); +void cpc_tty_trigger_poll(pc300dev_t * pc300dev); +void cpc_tty_reset_var(void); +#endif + +/************************/ +/*** DMA Routines ***/ +/************************/ +static void tx_dma_buf_pt_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_TX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { + cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + + (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); + cpc_writel(&ptdescr->ptbuf, + (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); + } +} + +static void tx_dma_buf_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_TX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { + memset_io(ptdescr, 0, sizeof(pcsca_bd_t)); + cpc_writew(&ptdescr->len, 0); + cpc_writeb(&ptdescr->status, DST_OSB); + } + tx_dma_buf_pt_init(card, ch); +} + +static void rx_dma_buf_pt_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_RX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { + cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + + (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); + cpc_writel(&ptdescr->ptbuf, + (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); + } +} + +static void rx_dma_buf_init(pc300_t * card, int ch) +{ + int i; + int ch_factor = ch * N_DMA_RX_BUF; + volatile pcsca_bd_t *ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + + for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { + memset_io(ptdescr, 0, sizeof(pcsca_bd_t)); + cpc_writew(&ptdescr->len, 0); + cpc_writeb(&ptdescr->status, 0); + } + rx_dma_buf_pt_init(card, ch); +} + +static void tx_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].tx_first_bd; + ucshort next_bd = card->chan[ch].tx_next_bd; + + printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, + first_bd, TX_BD_ADDR(ch, first_bd), + next_bd, TX_BD_ADDR(ch, next_bd)); + for (i = first_bd, + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, first_bd)); + i != ((next_bd + 1) & (N_DMA_TX_BUF - 1)); + i = (i + 1) & (N_DMA_TX_BUF - 1), + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, i))) { + printk("\n CH%d TX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len)); + } + printk("\n"); +} + +#ifdef PC300_DEBUG_OTHER +/* Show all TX buffer descriptors */ +static void tx1_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].tx_first_bd; + ucshort next_bd = card->chan[ch].tx_next_bd; + uclong scabase = card->hw.scabase; + + printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); + printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, + first_bd, TX_BD_ADDR(ch, first_bd), + next_bd, TX_BD_ADDR(ch, next_bd)); + printk("TX_CDA=0x%08lx, TX_EDA=0x%08lx\n", + (uclong) cpc_readl(scabase + DTX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DTX_REG(EDAL, ch))); + for (i = 0; i < N_DMA_TX_BUF; i++) { + ptdescr = (pcsca_bd_t *) (card->hw.rambase + TX_BD_ADDR(ch, i)); + printk("\n CH%d TX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len)); + } + printk("\n"); +} +#endif + +static void rx_dma_buf_check(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + int i; + ucshort first_bd = card->chan[ch].rx_first_bd; + ucshort last_bd = card->chan[ch].rx_last_bd; + int ch_factor; + + ch_factor = ch * N_DMA_RX_BUF; + printk("#CH%d: f_bd = %d, l_bd = %d\n", ch, first_bd, last_bd); + for (i = 0, ptdescr = (pcsca_bd_t *) (card->hw.rambase + + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); + i < N_DMA_RX_BUF; i++, ptdescr++) { + if (cpc_readb(&ptdescr->status) & DST_OSB) + printk ("\n CH%d RX%d: next=0x%lx, ptbuf=0x%lx, ST=0x%x, len=%d", + ch, i, (uclong) cpc_readl(&ptdescr->next), + (uclong) cpc_readl(&ptdescr->ptbuf), + cpc_readb(&ptdescr->status), + cpc_readw(&ptdescr->len)); + } + printk("\n"); +} + +int dma_get_rx_frame_size(pc300_t * card, int ch) +{ + volatile pcsca_bd_t *ptdescr; + ucshort first_bd = card->chan[ch].rx_first_bd; + int rcvd = 0; + volatile ucchar status; + + ptdescr = (pcsca_bd_t *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rcvd += cpc_readw(&ptdescr->len); + first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1); + if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) { + /* Return the size of a good frame or incomplete bad frame + * (dma_buf_read will clean the buffer descriptors in this case). */ + return (rcvd); + } + ptdescr = (pcsca_bd_t *)(card->hw.rambase + cpc_readl(&ptdescr->next)); + } + return (-1); +} + +/* + * dma_buf_write: writes a frame to the Tx DMA buffers + * NOTE: this function writes one frame at a time. + */ +int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) +{ + int i, nchar; + volatile pcsca_bd_t *ptdescr; + int tosend = len; + ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; + + if (nbuf >= card->chan[ch].nfree_tx_bd) { + return -ENOMEM; + } + + for (i = 0; i < nbuf; i++) { + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + TX_BD_ADDR(ch, card->chan[ch].tx_next_bd)); + nchar = cpc_min(BD_DEF_LEN, tosend); + if (cpc_readb(&ptdescr->status) & DST_OSB) { + memcpy_toio((void *)(card->hw.rambase + cpc_readl(&ptdescr->ptbuf)), + &ptdata[len - tosend], nchar); + cpc_writew(&ptdescr->len, nchar); + card->chan[ch].nfree_tx_bd--; + if ((i + 1) == nbuf) { + /* This must be the last BD to be used */ + cpc_writeb(&ptdescr->status, DST_EOM); + } else { + cpc_writeb(&ptdescr->status, 0); + } + } else { + return -ENOMEM; + } + tosend -= nchar; + card->chan[ch].tx_next_bd = + (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1); + } + /* If it gets to here, it means we have sent the whole frame */ + return 0; +} + +/* + * dma_buf_read: reads a frame from the Rx DMA buffers + * NOTE: this function reads one frame at a time. + */ +int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) +{ + int nchar; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + volatile pcsca_bd_t *ptdescr; + int rcvd = 0; + volatile ucchar status; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + RX_BD_ADDR(ch, chan->rx_first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + nchar = cpc_readw(&ptdescr->len); + if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) + || (nchar > BD_DEF_LEN)) { + + if (nchar > BD_DEF_LEN) + status |= DST_RBIT; + rcvd = -status; + /* Discard remaining descriptors used by the bad frame */ + while (chan->rx_first_bd != chan->rx_last_bd) { + cpc_writeb(&ptdescr->status, 0); + chan->rx_first_bd = (chan->rx_first_bd+1) & (N_DMA_RX_BUF-1); + if (status & DST_EOM) + break; + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + cpc_readl(&ptdescr->next)); + status = cpc_readb(&ptdescr->status); + } + break; + } + if (nchar != 0) { + if (skb) { + memcpy_fromio(skb_put(skb, nchar), + (void *)(card->hw.rambase+cpc_readl(&ptdescr->ptbuf)),nchar); + } + rcvd += nchar; + } + cpc_writeb(&ptdescr->status, 0); + cpc_writeb(&ptdescr->len, 0); + chan->rx_first_bd = (chan->rx_first_bd + 1) & (N_DMA_RX_BUF - 1); + + if (status & DST_EOM) + break; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + cpc_readl(&ptdescr->next)); + } + + if (rcvd != 0) { + /* Update pointer */ + chan->rx_last_bd = (chan->rx_first_bd - 1) & (N_DMA_RX_BUF - 1); + /* Update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, chan->rx_last_bd)); + } + return (rcvd); +} + +void tx_dma_stop(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + ucchar drr_ena_bit = 1 << (5 + 2 * ch); + ucchar drr_rst_bit = 1 << (1 + 2 * ch); + + /* Disable DMA */ + cpc_writeb(scabase + DRR, drr_ena_bit); + cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); +} + +void rx_dma_stop(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + ucchar drr_ena_bit = 1 << (4 + 2 * ch); + ucchar drr_rst_bit = 1 << (2 * ch); + + /* Disable DMA */ + cpc_writeb(scabase + DRR, drr_ena_bit); + cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); +} + +void rx_dma_start(pc300_t * card, int ch) +{ + uclong scabase = card->hw.scabase; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + + /* Start DMA */ + cpc_writel(scabase + DRX_REG(CDAL, ch), + RX_BD_ADDR(ch, chan->rx_first_bd)); + if (cpc_readl(scabase + DRX_REG(CDAL,ch)) != + RX_BD_ADDR(ch, chan->rx_first_bd)) { + cpc_writel(scabase + DRX_REG(CDAL, ch), + RX_BD_ADDR(ch, chan->rx_first_bd)); + } + cpc_writel(scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, chan->rx_last_bd)); + cpc_writew(scabase + DRX_REG(BFLL, ch), BD_DEF_LEN); + cpc_writeb(scabase + DSR_RX(ch), DSR_DE); + if (!(cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + cpc_writeb(scabase + DSR_RX(ch), DSR_DE); + } +} + +/*************************/ +/*** FALC Routines ***/ +/*************************/ +void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) +{ + uclong falcbase = card->hw.falcbase; + unsigned long i = 0; + + while (cpc_readb(falcbase + F_REG(SIS, ch)) & SIS_CEC) { + if (i++ >= PC300_FALC_MAXLOOP) { + printk("%s: FALC command locked(cmd=0x%x).\n", + card->chan[ch].d.name, cmd); + break; + } + } + cpc_writeb(falcbase + F_REG(CMDR, ch), cmd); +} + +void falc_intr_enable(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + /* Interrupt pins are open-drain */ + cpc_writeb(falcbase + F_REG(IPC, ch), + cpc_readb(falcbase + F_REG(IPC, ch)) & ~IPC_IC0); + /* Conters updated each second */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_ECM); + /* Enable SEC and ES interrupts */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~(IMR3_SEC | IMR3_ES)); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(IMR4, ch), + cpc_readb(falcbase + F_REG(IMR4, ch)) & ~(IMR4_LOS)); + } else { + cpc_writeb(falcbase + F_REG(IMR4, ch), + cpc_readb(falcbase + F_REG(IMR4, ch)) & + ~(IMR4_LFA | IMR4_AIS | IMR4_LOS | IMR4_SLIP)); + } + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC); + } else { + cpc_writeb(falcbase + F_REG(IPC, ch), + cpc_readb(falcbase + F_REG(IPC, ch)) | IPC_SCI); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & ~(IMR2_LOS)); + } else { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & + ~(IMR2_FAR | IMR2_LFA | IMR2_AIS | IMR2_LOS)); + if (pfalc->multiframe_mode) { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) & + ~(IMR2_T400MS | IMR2_MFAR)); + } else { + cpc_writeb(falcbase + F_REG(IMR2, ch), + cpc_readb(falcbase + F_REG(IMR2, ch)) | + IMR2_T400MS | IMR2_MFAR); + } + } + } +} + +void falc_open_timeslot(pc300_t * card, int ch, int timeslot) +{ + uclong falcbase = card->hw.falcbase; + ucchar tshf = card->chan[ch].falc.offset; + + cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), + cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & + ~(0x80 >> ((timeslot - tshf) & 0x07))); + cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) | + (0x80 >> (timeslot & 0x07))); + cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) | + (0x80 >> (timeslot & 0x07))); +} + +void falc_close_timeslot(pc300_t * card, int ch, int timeslot) +{ + uclong falcbase = card->hw.falcbase; + ucchar tshf = card->chan[ch].falc.offset; + + cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), + cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | + (0x80 >> ((timeslot - tshf) & 0x07))); + cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) & + ~(0x80 >> (timeslot & 0x07))); + cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch), + cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) & + ~(0x80 >> (timeslot & 0x07))); +} + +void falc_close_all_timeslots(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + cpc_writeb(falcbase + F_REG(ICB1, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR1, ch), 0); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0); + cpc_writeb(falcbase + F_REG(ICB2, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR2, ch), 0); + cpc_writeb(falcbase + F_REG(RTR2, ch), 0); + cpc_writeb(falcbase + F_REG(ICB3, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR3, ch), 0); + cpc_writeb(falcbase + F_REG(RTR3, ch), 0); + if (conf->media == IF_IFACE_E1) { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0); + } +} + +void falc_open_all_timeslots(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + cpc_writeb(falcbase + F_REG(ICB1, ch), 0); + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(TTR1, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0xff); + } else { + /* Timeslot 0 is never enabled */ + cpc_writeb(falcbase + F_REG(TTR1, ch), 0x7f); + cpc_writeb(falcbase + F_REG(RTR1, ch), 0x7f); + } + cpc_writeb(falcbase + F_REG(ICB2, ch), 0); + cpc_writeb(falcbase + F_REG(TTR2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR2, ch), 0xff); + cpc_writeb(falcbase + F_REG(ICB3, ch), 0); + cpc_writeb(falcbase + F_REG(TTR3, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR3, ch), 0xff); + if (conf->media == IF_IFACE_E1) { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0xff); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0xff); + } else { + cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff); + cpc_writeb(falcbase + F_REG(TTR4, ch), 0x80); + cpc_writeb(falcbase + F_REG(RTR4, ch), 0x80); + } +} + +void falc_init_timeslot(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + int tslot; + + for (tslot = 0; tslot < pfalc->num_channels; tslot++) { + if (conf->tslot_bitmap & (1 << tslot)) { + // Channel enabled + falc_open_timeslot(card, ch, tslot + 1); + } else { + // Channel disabled + falc_close_timeslot(card, ch, tslot + 1); + } + } +} + +void falc_enable_comm(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + if (pfalc->full_bandwidth) { + falc_open_all_timeslots(card, ch); + } else { + falc_init_timeslot(card, ch); + } + // CTS/DCD ON + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); +} + +void falc_disable_comm(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + if (pfalc->loop_active != 2) { + falc_close_all_timeslots(card, ch); + } + // CTS/DCD OFF + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); +} + +void falc_init_t1(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + + /* Switch to T1 mode (PCM 24) */ + cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); + + /* Wait 20 us for setup */ + udelay(20); + + /* Transmit Buffer Size (1 frame) */ + cpc_writeb(falcbase + F_REG(SIC1, ch), SIC1_XBS0); + + /* Clock mode */ + if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS); + } else { /* Slave mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS); + cpc_writeb(falcbase + F_REG(LOOP, ch), + cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_RTM); + } + + cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & + ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1)); + + switch (conf->lcode) { + case PC300_LC_AMI: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC1 | FMR0_RC1); + /* Clear Channel register to ON for all channels */ + cpc_writeb(falcbase + F_REG(CCB1, ch), 0xff); + cpc_writeb(falcbase + F_REG(CCB2, ch), 0xff); + cpc_writeb(falcbase + F_REG(CCB3, ch), 0xff); + break; + + case PC300_LC_B8ZS: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1); + break; + + case PC300_LC_NRZ: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | 0x00); + break; + } + + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_ELOS); + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0)); + /* Set interface mode to 2 MBPS */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD); + + switch (conf->fr_mode) { + case PC300_FR_ESF: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_FM1); + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | + FMR1_CRC | FMR1_EDL); + cpc_writeb(falcbase + F_REG(XDL1, ch), 0); + cpc_writeb(falcbase + F_REG(XDL2, ch), 0); + cpc_writeb(falcbase + F_REG(XDL3, ch), 0); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & ~FMR0_SRAF); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2,ch)) | FMR2_MCSP | FMR2_SSP); + break; + + case PC300_FR_D4: + pfalc->multiframe_mode = 1; + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) & + ~(FMR4_FM1 | FMR4_FM0)); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | FMR0_SRAF); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_SSP); + break; + } + + /* Enable Automatic Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_AUTO); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Channel translation mode 1 : one to one */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_CTM); + + /* No signaling */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_SIGM); + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & + ~(FMR5_EIBR | FMR5_SRS)); + cpc_writeb(falcbase + F_REG(CCR1, ch), 0); + + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1); + + switch (conf->lbo) { + /* Provides proper Line Build Out */ + case PC300_LBO_0_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x5a); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x8f); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_7_5_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0x40 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x11); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x02); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_15_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0x80 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x8e); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + case PC300_LBO_22_5_DB: + cpc_writeb(falcbase + F_REG(LIM2, ch), (0xc0 | LIM2_LOS1 | dja)); + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x09); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20); + break; + } + + /* Transmit Clock-Slot Offset */ + cpc_writeb(falcbase + F_REG(XC0, ch), + cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01); + /* Transmit Time-slot Offset */ + cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e); + /* Receive Clock-Slot offset */ + cpc_writeb(falcbase + F_REG(RC0, ch), 0x05); + /* Receive Time-slot offset */ + cpc_writeb(falcbase + F_REG(RC1, ch), 0x00); + + /* LOS Detection after 176 consecutive 0s */ + cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a); + /* LOS Recovery after 22 ones in the time window of PCD */ + cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15); + + cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f); + + if (conf->fr_mode == PC300_FR_ESF_JAPAN) { + cpc_writeb(falcbase + F_REG(RC1, ch), + cpc_readb(falcbase + F_REG(RC1, ch)) | 0x80); + } + + falc_close_all_timeslots(card, ch); +} + +void falc_init_e1(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + + /* Switch to E1 mode (PCM 30) */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_PMOD); + + /* Clock mode */ + if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS); + } else { /* Slave mode */ + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS); + } + cpc_writeb(falcbase + F_REG(LOOP, ch), + cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_SFM); + + cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI); + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) & + ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1)); + + switch (conf->lcode) { + case PC300_LC_AMI: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC1 | FMR0_RC1); + break; + + case PC300_LC_HDB3: + cpc_writeb(falcbase + F_REG(FMR0, ch), + cpc_readb(falcbase + F_REG(FMR0, ch)) | + FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1); + break; + + case PC300_LC_NRZ: + break; + } + + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0)); + /* Set interface mode to 2 MBPS */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD); + + cpc_writeb(falcbase + F_REG(XPM0, ch), 0x18); + cpc_writeb(falcbase + F_REG(XPM1, ch), 0x03); + cpc_writeb(falcbase + F_REG(XPM2, ch), 0x00); + + switch (conf->fr_mode) { + case PC300_FR_MF_CRC4: + pfalc->multiframe_mode = 1; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_RFS1); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_RFS0); + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_EXTIW); + + /* MultiFrame Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_MFCS); + + /* Automatic Loss of Multiframe > 914 CRC errors */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_ALMF); + + /* S1 and SI1/SI2 spare Bits set to 1 */ + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_AXS); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_EBP); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XS13 | XSP_XS15); + + /* Automatic Force Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Transmit Spare Bits for National Use (Y, Sn, Sa) */ + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | + XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4); + break; + + case PC300_FR_MF_NON_CRC4: + case PC300_FR_D4: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & + ~(FMR2_RFS1 | FMR2_RFS0)); + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XSIS); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XSIF); + + /* Automatic Force Resynchronization */ + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR); + + /* Transmit Automatic Remote Alarm */ + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA); + + /* Transmit Spare Bits for National Use (Y, Sn, Sa) */ + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | + XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4); + break; + + case PC300_FR_UNFRAMED: + pfalc->multiframe_mode = 0; + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & + ~(FMR2_RFS1 | FMR2_RFS0)); + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_TT0); + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) & + ~(XSW_XTM|XSW_XY0|XSW_XY1|XSW_XY2|XSW_XY3|XSW_XY4)); + cpc_writeb(falcbase + F_REG(TSWM, ch), 0xff); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | + (FMR2_RTM | FMR2_DAIS)); + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_AXRA); + cpc_writeb(falcbase + F_REG(FMR1, ch), + cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_AFR); + pfalc->sync = 1; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED2 << (2 * ch))); + break; + } + + /* No signaling */ + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_CASEN); + cpc_writeb(falcbase + F_REG(CCR1, ch), 0); + + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1); + cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja)); + + /* Transmit Clock-Slot Offset */ + cpc_writeb(falcbase + F_REG(XC0, ch), + cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01); + /* Transmit Time-slot Offset */ + cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e); + /* Receive Clock-Slot offset */ + cpc_writeb(falcbase + F_REG(RC0, ch), 0x05); + /* Receive Time-slot offset */ + cpc_writeb(falcbase + F_REG(RC1, ch), 0x00); + + /* LOS Detection after 176 consecutive 0s */ + cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a); + /* LOS Recovery after 22 ones in the time window of PCD */ + cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15); + + cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f); + + falc_close_all_timeslots(card, ch); +} + +void falc_init_hdlc(pc300_t * card, int ch) +{ + uclong falcbase = card->hw.falcbase; + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + + /* Enable transparent data transfer */ + if (conf->fr_mode == PC300_FR_UNFRAMED) { + cpc_writeb(falcbase + F_REG(MODE, ch), 0); + } else { + cpc_writeb(falcbase + F_REG(MODE, ch), + cpc_readb(falcbase + F_REG(MODE, ch)) | + (MODE_HRAC | MODE_MDS2)); + cpc_writeb(falcbase + F_REG(RAH2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAH1, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAL2, ch), 0xff); + cpc_writeb(falcbase + F_REG(RAL1, ch), 0xff); + } + + /* Tx/Rx reset */ + falc_issue_cmd(card, ch, CMDR_RRES | CMDR_XRES | CMDR_SRES); + + /* Enable interrupt sources */ + falc_intr_enable(card, ch); +} + +void te_config(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar dummy; + unsigned long flags; + + memset(pfalc, 0, sizeof(falc_t)); + switch (conf->media) { + case IF_IFACE_T1: + pfalc->num_channels = NUM_OF_T1_CHANNELS; + pfalc->offset = 1; + break; + case IF_IFACE_E1: + pfalc->num_channels = NUM_OF_E1_CHANNELS; + pfalc->offset = 0; + break; + } + if (conf->tslot_bitmap == 0xffffffffUL) + pfalc->full_bandwidth = 1; + else + pfalc->full_bandwidth = 0; + + CPC_LOCK(card, flags); + /* Reset the FALC chip */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + (CPLD_REG1_FALC_RESET << (2 * ch))); + udelay(10000); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~(CPLD_REG1_FALC_RESET << (2 * ch))); + + if (conf->media == IF_IFACE_T1) { + falc_init_t1(card, ch); + } else { + falc_init_e1(card, ch); + } + falc_init_hdlc(card, ch); + if (conf->rx_sens == PC300_RX_SENS_SH) { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_EQON); + } else { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_EQON); + } + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + ((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK) << (2 * ch))); + + /* Clear all interrupt registers */ + dummy = cpc_readb(falcbase + F_REG(FISR0, ch)) + + cpc_readb(falcbase + F_REG(FISR1, ch)) + + cpc_readb(falcbase + F_REG(FISR2, ch)) + + cpc_readb(falcbase + F_REG(FISR3, ch)); + CPC_UNLOCK(card, flags); +} + +void falc_check_status(pc300_t * card, int ch, unsigned char frs0) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + /* Verify LOS */ + if (frs0 & FRS0_LOS) { + if (!pfalc->red_alarm) { + pfalc->red_alarm = 1; + pfalc->los++; + if (!pfalc->blue_alarm) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere + * with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + if (pfalc->red_alarm) { + pfalc->red_alarm = 0; + pfalc->losr++; + } + } + + if (conf->fr_mode != PC300_FR_UNFRAMED) { + /* Verify AIS alarm */ + if (frs0 & FRS0_AIS) { + if (!pfalc->blue_alarm) { + pfalc->blue_alarm = 1; + pfalc->ais++; + // EVENT_AIS + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_AIS + } + } else { + pfalc->blue_alarm = 0; + } + + /* Verify LFA */ + if (frs0 & FRS0_LFA) { + if (!pfalc->loss_fa) { + pfalc->loss_fa = 1; + pfalc->lfa++; + if (!pfalc->blue_alarm && !pfalc->red_alarm) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise + * interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + if (pfalc->loss_fa) { + pfalc->loss_fa = 0; + pfalc->farec++; + } + } + + /* Verify LMFA */ + if (pfalc->multiframe_mode && (frs0 & FRS0_LMFA)) { + /* D4 or CRC4 frame mode */ + if (!pfalc->loss_mfa) { + pfalc->loss_mfa = 1; + pfalc->lmfa++; + if (!pfalc->blue_alarm && !pfalc->red_alarm && + !pfalc->loss_fa) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise + * interfere with other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) + | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + } + } + } else { + pfalc->loss_mfa = 0; + } + + /* Verify Remote Alarm */ + if (frs0 & FRS0_RRA) { + if (!pfalc->yellow_alarm) { + pfalc->yellow_alarm = 1; + pfalc->rai++; + if (pfalc->sync) { + // EVENT_RAI + falc_disable_comm(card, ch); + // EVENT_RAI + } + } + } else { + pfalc->yellow_alarm = 0; + } + } /* if !PC300_UNFRAMED */ + + if (pfalc->red_alarm || pfalc->loss_fa || + pfalc->loss_mfa || pfalc->blue_alarm) { + if (pfalc->sync) { + pfalc->sync = 0; + chan->d.line_off++; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + } + } else { + if (!pfalc->sync) { + pfalc->sync = 1; + chan->d.line_on++; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED2 << (2 * ch))); + } + } + + if (pfalc->sync && !pfalc->yellow_alarm) { + if (!pfalc->active) { + // EVENT_FALC_NORMAL + if (pfalc->loop_active) { + return; + } + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) & ~IMR0_PDEN); + } + falc_enable_comm(card, ch); + // EVENT_FALC_NORMAL + pfalc->active = 1; + } + } else { + if (pfalc->active) { + pfalc->active = 0; + } + } +} + +void falc_update_stats(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucshort counter; + + counter = cpc_readb(falcbase + F_REG(FECL, ch)); + counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; + pfalc->fec += counter; + + counter = cpc_readb(falcbase + F_REG(CVCL, ch)); + counter |= cpc_readb(falcbase + F_REG(CVCH, ch)) << 8; + pfalc->cvc += counter; + + counter = cpc_readb(falcbase + F_REG(CECL, ch)); + counter |= cpc_readb(falcbase + F_REG(CECH, ch)) << 8; + pfalc->cec += counter; + + counter = cpc_readb(falcbase + F_REG(EBCL, ch)); + counter |= cpc_readb(falcbase + F_REG(EBCH, ch)) << 8; + pfalc->ebc += counter; + + if (cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) { + mdelay(10); + counter = cpc_readb(falcbase + F_REG(BECL, ch)); + counter |= cpc_readb(falcbase + F_REG(BECH, ch)) << 8; + pfalc->bec += counter; + + if (((conf->media == IF_IFACE_T1) && + (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) && + (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN))) + || + ((conf->media == IF_IFACE_E1) && + (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) { + pfalc->prbs = 2; + } else { + pfalc->prbs = 1; + } + } +} + +/*---------------------------------------------------------------------------- + * falc_remote_loop + *---------------------------------------------------------------------------- + * Description: In the remote loopback mode the clock and data recovered + * from the line inputs RL1/2 or RDIP/RDIN are routed back + * to the line outputs XL1/2 or XDOP/XDON via the analog + * transmitter. As in normal mode they are processsed by + * the synchronizer and then sent to the system interface. + *---------------------------------------------------------------------------- + */ +void falc_remote_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RL); + pfalc->loop_active = 1; + } else { + cpc_writeb(falcbase + F_REG(LIM1, ch), + cpc_readb(falcbase + F_REG(LIM1, ch)) & ~LIM1_RL); + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; + falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * falc_local_loop + *---------------------------------------------------------------------------- + * Description: The local loopback mode disconnects the receive lines + * RL1/RL2 resp. RDIP/RDIN from the receiver. Instead of the + * signals coming from the line the data provided by system + * interface are routed through the analog receiver back to + * the system interface. The unipolar bit stream will be + * undisturbed transmitted on the line. Receiver and transmitter + * coding must be identical. + *---------------------------------------------------------------------------- + */ +void falc_local_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_LL); + pfalc->loop_active = 1; + } else { + cpc_writeb(falcbase + F_REG(LIM0, ch), + cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_LL); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * falc_payload_loop + *---------------------------------------------------------------------------- + * Description: This routine allows to enable/disable payload loopback. + * When the payload loop is activated, the received 192 bits + * of payload data will be looped back to the transmit + * direction. The framing bits, CRC6 and DL bits are not + * looped. They are originated by the FALC-LH transmitter. + *---------------------------------------------------------------------------- + */ +void falc_payload_loop(pc300_t * card, int ch, int loop_on) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (loop_on) { + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_PLB); + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_TM); + } else { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | XSP_TT0); + } + falc_open_all_timeslots(card, ch); + pfalc->loop_active = 2; + } else { + cpc_writeb(falcbase + F_REG(FMR2, ch), + cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_PLB); + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR4, ch), + cpc_readb(falcbase + F_REG(FMR4, ch)) & ~FMR4_TM); + } else { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~XSP_TT0); + } + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; + falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_active = 0; + } +} + +/*---------------------------------------------------------------------------- + * turn_off_xlu + *---------------------------------------------------------------------------- + * Description: Turns XLU bit off in the proper register + *---------------------------------------------------------------------------- + */ +void turn_off_xlu(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLU); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLU); + } +} + +/*---------------------------------------------------------------------------- + * turn_off_xld + *---------------------------------------------------------------------------- + * Description: Turns XLD bit off in the proper register + *---------------------------------------------------------------------------- + */ +void turn_off_xld(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLD); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLD); + } +} + +/*---------------------------------------------------------------------------- + * falc_generate_loop_up_code + *---------------------------------------------------------------------------- + * Description: This routine writes the proper FALC chip register in order + * to generate a LOOP activation code over a T1/E1 line. + *---------------------------------------------------------------------------- + */ +void falc_generate_loop_up_code(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLU); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLU); + } + // EVENT_FALC_ABNORMAL + if (conf->media == IF_IFACE_T1) { + /* Disable this interrupt as it may otherwise interfere with + * other working boards. */ + cpc_writeb(falcbase + F_REG(IMR0, ch), + cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN); + } + falc_disable_comm(card, ch); + // EVENT_FALC_ABNORMAL + pfalc->loop_gen = 1; +} + +/*---------------------------------------------------------------------------- + * falc_generate_loop_down_code + *---------------------------------------------------------------------------- + * Description: This routine writes the proper FALC chip register in order + * to generate a LOOP deactivation code over a T1/E1 line. + *---------------------------------------------------------------------------- + */ +void falc_generate_loop_down_code(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (conf->media == IF_IFACE_T1) { + cpc_writeb(falcbase + F_REG(FMR5, ch), + cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLD); + } else { + cpc_writeb(falcbase + F_REG(FMR3, ch), + cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLD); + } + pfalc->sync = 0; + cpc_writeb(falcbase + card->hw.cpld_reg2, + cpc_readb(falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED2 << (2 * ch))); + pfalc->active = 0; +//? falc_issue_cmd(card, ch, CMDR_XRES); + pfalc->loop_gen = 0; +} + +/*---------------------------------------------------------------------------- + * falc_pattern_test + *---------------------------------------------------------------------------- + * Description: This routine generates a pattern code and checks + * it on the reception side. + *---------------------------------------------------------------------------- + */ +void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (activate) { + pfalc->prbs = 1; + pfalc->bec = 0; + if (conf->media == IF_IFACE_T1) { + /* Disable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) | IMR3_LLBSC); + } else { + /* Disable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_LLBSC); + } + /* Activates generation and monitoring of PRBS + * (Pseudo Random Bit Sequence) */ + cpc_writeb(falcbase + F_REG(LCR1, ch), + cpc_readb(falcbase + F_REG(LCR1, ch)) | LCR1_EPRM | LCR1_XPRBS); + } else { + pfalc->prbs = 0; + /* Deactivates generation and monitoring of PRBS + * (Pseudo Random Bit Sequence) */ + cpc_writeb(falcbase + F_REG(LCR1, ch), + cpc_readb(falcbase+F_REG(LCR1,ch)) & ~(LCR1_EPRM | LCR1_XPRBS)); + if (conf->media == IF_IFACE_T1) { + /* Enable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR3, ch), + cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC); + } else { + /* Enable local loop activation/deactivation detect */ + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_LLBSC); + } + } +} + +/*---------------------------------------------------------------------------- + * falc_pattern_test_error + *---------------------------------------------------------------------------- + * Description: This routine returns the bit error counter value + *---------------------------------------------------------------------------- + */ +ucshort falc_pattern_test_error(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + + return (pfalc->bec); +} + +/**********************************/ +/*** Net Interface Routines ***/ +/**********************************/ + +static void +cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) +{ + struct sk_buff *skb; + + if ((skb = dev_alloc_skb(10 + skb_main->len)) == NULL) { + printk("%s: out of memory\n", dev->name); + return; + } + skb_put(skb, 10 + skb_main->len); + + skb->dev = dev; + skb->protocol = htons(ETH_P_CUST); + skb->mac.raw = skb->data; + skb->pkt_type = PACKET_HOST; + skb->len = 10 + skb_main->len; + + memcpy(skb->data, dev->name, 5); + skb->data[5] = '['; + skb->data[6] = rx_tx; + skb->data[7] = ']'; + skb->data[8] = ':'; + skb->data[9] = ' '; + memcpy(&skb->data[10], skb_main->data, skb_main->len); + + netif_rx(skb); +} + +void cpc_tx_timeout(struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; + uclong flags; + ucchar ilar; + + stats->tx_errors++; + stats->tx_aborted_errors++; + CPC_LOCK(card, flags); + if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) { + printk("%s: ILAR=0x%x\n", dev->name, ilar); + cpc_writeb(card->hw.scabase + ILAR, ilar); + cpc_writeb(card->hw.scabase + DMER, 0x80); + } + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 * ch))); + } + dev->trans_start = jiffies; + CPC_UNLOCK(card, flags); + netif_wake_queue(dev); +} + +int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; + uclong flags; +#ifdef PC300_DEBUG_TX + int i; +#endif + + if (chan->conf.monitor) { + /* In monitor mode no Tx is done: ignore packet */ + dev_kfree_skb(skb); + return 0; + } else if (!netif_carrier_ok(dev)) { + /* DCD must be OFF: drop packet */ + dev_kfree_skb(skb); + stats->tx_errors++; + stats->tx_carrier_errors++; + return 0; + } else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) { + printk("%s: DCD is OFF. Going administrative down.\n", dev->name); + stats->tx_errors++; + stats->tx_carrier_errors++; + dev_kfree_skb(skb); + netif_carrier_off(dev); + CPC_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR); + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_UNLOCK(card, flags); + netif_wake_queue(dev); + return 0; + } + + /* Write buffer to DMA buffers */ + if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { +// printk("%s: write error. Dropping TX packet.\n", dev->name); + netif_stop_queue(dev); + dev_kfree_skb(skb); + stats->tx_errors++; + stats->tx_dropped++; + return 0; + } +#ifdef PC300_DEBUG_TX + printk("%s T:", dev->name); + for (i = 0; i < skb->len; i++) + printk(" %02x", *(skb->data + i)); + printk("\n"); +#endif + + if (d->trace_on) { + cpc_trace(dev, skb, 'T'); + } + dev->trans_start = jiffies; + + /* Start transmission */ + CPC_LOCK(card, flags); + /* verify if it has more than one free descriptor */ + if (card->chan[ch].nfree_tx_bd <= 1) { + /* don't have so stop the queue */ + netif_stop_queue(dev); + } + cpc_writel(card->hw.scabase + DTX_REG(EDAL, ch), + TX_BD_ADDR(ch, chan->tx_next_bd)); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA); + cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE); + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_UNLOCK(card, flags); + dev_kfree_skb(skb); + + return 0; +} + +void cpc_net_rx(hdlc_device * hdlc) +{ + struct net_device *dev = hdlc_to_dev(hdlc); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = &d->hdlc->stats; + int ch = chan->channel; +#ifdef PC300_DEBUG_RX + int i; +#endif + int rxb; + struct sk_buff *skb; + + while (1) { + if ((rxb = dma_get_rx_frame_size(card, ch)) == -1) + return; + + if (!netif_carrier_ok(dev)) { + /* DCD must be OFF: drop packet */ + printk("%s : DCD is OFF - drop %d rx bytes\n", dev->name, rxb); + skb = NULL; + } else { + if (rxb > (dev->mtu + 40)) { /* add headers */ + printk("%s : MTU exceeded %d\n", dev->name, rxb); + skb = NULL; + } else { + skb = dev_alloc_skb(rxb); + if (skb == NULL) { + printk("%s: Memory squeeze!!\n", dev->name); + return; + } + skb->dev = dev; + } + } + + if (((rxb = dma_buf_read(card, ch, skb)) <= 0) || (skb == NULL)) { +#ifdef PC300_DEBUG_RX + printk("%s: rxb = %x\n", dev->name, rxb); +#endif + if ((skb == NULL) && (rxb > 0)) { + /* rxb > dev->mtu */ + stats->rx_errors++; + stats->rx_length_errors++; + continue; + } + + if (rxb < 0) { /* Invalid frame */ + rxb = -rxb; + if (rxb & DST_OVR) { + stats->rx_errors++; + stats->rx_fifo_errors++; + } + if (rxb & DST_CRC) { + stats->rx_errors++; + stats->rx_crc_errors++; + } + if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) { + stats->rx_errors++; + stats->rx_frame_errors++; + } + } + if (skb) { + dev_kfree_skb_irq(skb); + } + continue; + } + + stats->rx_bytes += rxb; + +#ifdef PC300_DEBUG_RX + printk("%s R:", dev->name); + for (i = 0; i < skb->len; i++) + printk(" %02x", *(skb->data + i)); + printk("\n"); +#endif + if (d->trace_on) { + cpc_trace(dev, skb, 'R'); + } + stats->rx_packets++; + skb->mac.raw = skb->data; + skb->protocol = hdlc_type_trans(skb, dev); + netif_rx(skb); + } +} + +/************************************/ +/*** PC300 Interrupt Routines ***/ +/************************************/ +static void sca_tx_intr(pc300dev_t *dev) +{ + pc300ch_t *chan = (pc300ch_t *)dev->chan; + pc300_t *card = (pc300_t *)chan->card; + int ch = chan->channel; + volatile pcsca_bd_t * ptdescr; + struct net_device_stats *stats = &dev->hdlc->stats; + + /* Clean up descriptors from previous transmission */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch,chan->tx_first_bd)); + while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) != + TX_BD_ADDR(ch,chan->tx_first_bd)) && + (cpc_readb(&ptdescr->status) & DST_OSB)) { + stats->tx_packets++; + stats->tx_bytes += cpc_readw(&ptdescr->len); + cpc_writeb(&ptdescr->status, DST_OSB); + cpc_writew(&ptdescr->len, 0); + chan->nfree_tx_bd++; + chan->tx_first_bd = (chan->tx_first_bd + 1) & (N_DMA_TX_BUF - 1); + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch,chan->tx_first_bd)); + } + +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + cpc_tty_trigger_poll(dev); + } else { +#endif + /* Tell the upper layer we are ready to transmit more packets */ + netif_wake_queue((struct net_device*)dev->hdlc); +#ifdef CONFIG_PC300_MLPPP + } +#endif +} + +static void sca_intr(pc300_t * card) +{ + uclong scabase = card->hw.scabase; + volatile uclong status; + int ch; + int intr_count = 0; + unsigned char dsr_rx; + + while ((status = cpc_readl(scabase + ISR0)) != 0) { + for (ch = 0; ch < card->hw.nchan; ch++) { + pc300ch_t *chan = &card->chan[ch]; + pc300dev_t *d = &chan->d; + hdlc_device *hdlc = d->hdlc; + struct net_device *dev = hdlc_to_dev(hdlc); + + spin_lock(&card->card_lock); + + /**** Reception ****/ + if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { + ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); + + /* Clear RX interrupts */ + cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); + +#ifdef PC300_DEBUG_INTR + printk ("sca_intr: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n", + ch, status, drx_stat); +#endif + if (status & IR0_DRX(IR0_DMIA, ch)) { + if (drx_stat & DSR_BOF) { +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + /* verify if driver is TTY */ + if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + rx_dma_stop(card, ch); + } + cpc_tty_receive(d); + rx_dma_start(card, ch); + } else +#endif + { + if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + rx_dma_stop(card, ch); + } + cpc_net_rx(hdlc); + /* Discard invalid frames */ + hdlc->stats.rx_errors++; + hdlc->stats.rx_over_errors++; + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + rx_dma_start(card, ch); + } + } + } + if (status & IR0_DRX(IR0_DMIB, ch)) { + if (drx_stat & DSR_EOM) { + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + /* verify if driver is TTY */ + cpc_tty_receive(d); + } else { + cpc_net_rx(hdlc); + } +#else + cpc_net_rx(hdlc); +#endif + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + } + } + if (!(dsr_rx = cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) { + +printk("%s: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x, dsr2=0x%02x)\n", + dev->name, ch, status, drx_stat, dsr_rx); + cpc_writeb(scabase + DSR_RX(ch), (dsr_rx | DSR_DE) & 0xfe); + } + } + + /**** Transmission ****/ + if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { + ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); + + /* Clear TX interrupts */ + cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); + +#ifdef PC300_DEBUG_INTR + printk ("sca_intr: TX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n", + ch, status, dtx_stat); +#endif + if (status & IR0_DTX(IR0_EFT, ch)) { + if (dtx_stat & DSR_UDRF) { + if (cpc_readb (scabase + M_REG(TBN, ch)) != 0) { + cpc_writeb(scabase + M_REG(CMD,ch), CMD_TX_BUF_CLR); + } + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + hdlc->stats.tx_errors++; + hdlc->stats.tx_fifo_errors++; + sca_tx_intr(d); + } + } + if (status & IR0_DTX(IR0_DMIA, ch)) { + if (dtx_stat & DSR_BOF) { + } + } + if (status & IR0_DTX(IR0_DMIB, ch)) { + if (dtx_stat & DSR_EOM) { + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb (card->hw.falcbase + + card->hw.cpld_reg2) & + ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + sca_tx_intr(d); + } + } + } + + /**** MSCI ****/ + if (status & IR0_M(IR0_RXINTA, ch)) { + ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); + + /* Clear MSCI interrupts */ + cpc_writeb(scabase + M_REG(ST1, ch), st1); + +#ifdef PC300_DEBUG_INTR + printk("sca_intr: MSCI intr chan[%d] (st=0x%08lx, st1=0x%02x)\n", + ch, status, st1); +#endif + if (st1 & ST1_CDCD) { /* DCD changed */ + if (cpc_readb(scabase + M_REG(ST3, ch)) & ST3_DCD) { + printk ("%s: DCD is OFF. Going administrative down.\n", + dev->name); +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto != PC300_PROTO_MLPPP) { + netif_carrier_off(dev); + } +#else + netif_carrier_off(dev); + +#endif + card->chan[ch].d.line_off++; + } else { /* DCD = 1 */ + printk ("%s: DCD is ON. Going administrative up.\n", + dev->name); +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto != PC300_PROTO_MLPPP) + /* verify if driver is not TTY */ +#endif + netif_carrier_on(dev); + card->chan[ch].d.line_on++; + } + } + } + spin_unlock(&card->card_lock); + } + if (++intr_count == 10) + /* Too much work at this board. Force exit */ + break; + } +} + +static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) && + !pfalc->loop_gen) { + if (frs1 & FRS1_LLBDD) { + // A Line Loop Back Deactivation signal detected + if (pfalc->loop_active) { + falc_remote_loop(card, ch, 0); + } + } else { + if ((frs1 & FRS1_LLBAD) && + ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) { + // A Line Loop Back Activation signal detected + if (!pfalc->loop_active) { + falc_remote_loop(card, ch, 1); + } + } + } + } +} + +static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + + if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) && + !pfalc->loop_gen) { + if (rsp & RSP_LLBDD) { + // A Line Loop Back Deactivation signal detected + if (pfalc->loop_active) { + falc_remote_loop(card, ch, 0); + } + } else { + if ((rsp & RSP_LLBAD) && + ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) { + // A Line Loop Back Activation signal detected + if (!pfalc->loop_active) { + falc_remote_loop(card, ch, 1); + } + } + } + } +} + +static void falc_t1_intr(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar isr0, isr3, gis; + ucchar dummy; + + while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { + if (gis & GIS_ISR0) { + isr0 = cpc_readb(falcbase + F_REG(FISR0, ch)); + if (isr0 & FISR0_PDEN) { + /* Read the bit to clear the situation */ + if (cpc_readb(falcbase + F_REG(FRS1, ch)) & + FRS1_PDEN) { + pfalc->pden++; + } + } + } + + if (gis & GIS_ISR1) { + dummy = cpc_readb(falcbase + F_REG(FISR1, ch)); + } + + if (gis & GIS_ISR2) { + dummy = cpc_readb(falcbase + F_REG(FISR2, ch)); + } + + if (gis & GIS_ISR3) { + isr3 = cpc_readb(falcbase + F_REG(FISR3, ch)); + if (isr3 & FISR3_SEC) { + pfalc->sec++; + falc_update_stats(card, ch); + falc_check_status(card, ch, + cpc_readb(falcbase + F_REG(FRS0, ch))); + } + if (isr3 & FISR3_ES) { + pfalc->es++; + } + if (isr3 & FISR3_LLBSC) { + falc_t1_loop_detection(card, ch, + cpc_readb(falcbase + F_REG(FRS1, ch))); + } + } + } +} + +static void falc_e1_intr(pc300_t * card, int ch) +{ + pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong falcbase = card->hw.falcbase; + ucchar isr1, isr2, isr3, gis, rsp; + ucchar dummy; + + while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { + rsp = cpc_readb(falcbase + F_REG(RSP, ch)); + + if (gis & GIS_ISR0) { + dummy = cpc_readb(falcbase + F_REG(FISR0, ch)); + } + if (gis & GIS_ISR1) { + isr1 = cpc_readb(falcbase + F_REG(FISR1, ch)); + if (isr1 & FISR1_XMB) { + if ((pfalc->xmb_cause & 2) + && pfalc->multiframe_mode) { + if (cpc_readb (falcbase + F_REG(FRS0, ch)) & + (FRS0_LOS | FRS0_AIS | FRS0_LFA)) { + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) + & ~XSP_AXS); + } else { + cpc_writeb(falcbase + F_REG(XSP, ch), + cpc_readb(falcbase + F_REG(XSP, ch)) + | XSP_AXS); + } + } + pfalc->xmb_cause = 0; + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_XMB); + } + if (isr1 & FISR1_LLBSC) { + falc_e1_loop_detection(card, ch, rsp); + } + } + if (gis & GIS_ISR2) { + isr2 = cpc_readb(falcbase + F_REG(FISR2, ch)); + if (isr2 & FISR2_T400MS) { + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XRA); + } + if (isr2 & FISR2_MFAR) { + cpc_writeb(falcbase + F_REG(XSW, ch), + cpc_readb(falcbase + F_REG(XSW, ch)) & ~XSW_XRA); + } + if (isr2 & (FISR2_FAR | FISR2_LFA | FISR2_AIS | FISR2_LOS)) { + pfalc->xmb_cause |= 2; + cpc_writeb(falcbase + F_REG(IMR1, ch), + cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_XMB); + } + } + if (gis & GIS_ISR3) { + isr3 = cpc_readb(falcbase + F_REG(FISR3, ch)); + if (isr3 & FISR3_SEC) { + pfalc->sec++; + falc_update_stats(card, ch); + falc_check_status(card, ch, + cpc_readb(falcbase + F_REG(FRS0, ch))); + } + if (isr3 & FISR3_ES) { + pfalc->es++; + } + } + } +} + +static void falc_intr(pc300_t * card) +{ + int ch; + + for (ch = 0; ch < card->hw.nchan; ch++) { + pc300ch_t *chan = &card->chan[ch]; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + + if (conf->media == IF_IFACE_T1) { + falc_t1_intr(card, ch); + } else { + falc_e1_intr(card, ch); + } + } +} + +static void cpc_intr(int irq, void *dev_id, struct pt_regs *regs) +{ + pc300_t *card; + volatile ucchar plx_status; + + if ((card = (pc300_t *) dev_id) == 0) { +#ifdef PC300_DEBUG_INTR + printk("cpc_intr: spurious intr %d\n", irq); +#endif + return; /* spurious intr */ + } + + if (card->hw.rambase == 0) { +#ifdef PC300_DEBUG_INTR + printk("cpc_intr: spurious intr2 %d\n", irq); +#endif + return; /* spurious intr */ + } + + switch (card->hw.type) { + case PC300_RSV: + case PC300_X21: + sca_intr(card); + break; + + case PC300_TE: + while ( (plx_status = (cpc_readb(card->hw.plxbase + card->hw.intctl_reg) & + (PLX_9050_LINT1_STATUS | PLX_9050_LINT2_STATUS))) != 0) { + if (plx_status & PLX_9050_LINT1_STATUS) { /* SCA Interrupt */ + sca_intr(card); + } + if (plx_status & PLX_9050_LINT2_STATUS) { /* FALC Interrupt */ + falc_intr(card); + } + } + break; + } +} + +void cpc_sca_status(pc300_t * card, int ch) +{ + ucchar ilar; + uclong scabase = card->hw.scabase; + uclong flags; + + tx_dma_buf_check(card, ch); + rx_dma_buf_check(card, ch); + ilar = cpc_readb(scabase + ILAR); + printk ("ILAR=0x%02x, WCRL=0x%02x, PCR=0x%02x, BTCR=0x%02x, BOLR=0x%02x\n", + ilar, cpc_readb(scabase + WCRL), cpc_readb(scabase + PCR), + cpc_readb(scabase + BTCR), cpc_readb(scabase + BOLR)); + printk("TX_CDA=0x%08lx, TX_EDA=0x%08lx\n", + (uclong) cpc_readl(scabase + DTX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DTX_REG(EDAL, ch))); + printk("RX_CDA=0x%08lx, RX_EDA=0x%08lx, BFL=0x%04x\n", + (uclong) cpc_readl(scabase + DRX_REG(CDAL, ch)), + (uclong) cpc_readl(scabase + DRX_REG(EDAL, ch)), + cpc_readw(scabase + DRX_REG(BFLL, ch))); + printk("DMER=0x%02x, DSR_TX=0x%02x, DSR_RX=0x%02x\n", + cpc_readb(scabase + DMER), cpc_readb(scabase + DSR_TX(ch)), + cpc_readb(scabase + DSR_RX(ch))); + printk("DMR_TX=0x%02x, DMR_RX=0x%02x, DIR_TX=0x%02x, DIR_RX=0x%02x\n", + cpc_readb(scabase + DMR_TX(ch)), cpc_readb(scabase + DMR_RX(ch)), + cpc_readb(scabase + DIR_TX(ch)), + cpc_readb(scabase + DIR_RX(ch))); + printk("DCR_TX=0x%02x, DCR_RX=0x%02x, FCT_TX=0x%02x, FCT_RX=0x%02x\n", + cpc_readb(scabase + DCR_TX(ch)), cpc_readb(scabase + DCR_RX(ch)), + cpc_readb(scabase + FCT_TX(ch)), + cpc_readb(scabase + FCT_RX(ch))); + printk("MD0=0x%02x, MD1=0x%02x, MD2=0x%02x, MD3=0x%02x, IDL=0x%02x\n", + cpc_readb(scabase + M_REG(MD0, ch)), + cpc_readb(scabase + M_REG(MD1, ch)), + cpc_readb(scabase + M_REG(MD2, ch)), + cpc_readb(scabase + M_REG(MD3, ch)), + cpc_readb(scabase + M_REG(IDL, ch))); + printk("CMD=0x%02x, SA0=0x%02x, SA1=0x%02x, TFN=0x%02x, CTL=0x%02x\n", + cpc_readb(scabase + M_REG(CMD, ch)), + cpc_readb(scabase + M_REG(SA0, ch)), + cpc_readb(scabase + M_REG(SA1, ch)), + cpc_readb(scabase + M_REG(TFN, ch)), + cpc_readb(scabase + M_REG(CTL, ch))); + printk("ST0=0x%02x, ST1=0x%02x, ST2=0x%02x, ST3=0x%02x, ST4=0x%02x\n", + cpc_readb(scabase + M_REG(ST0, ch)), + cpc_readb(scabase + M_REG(ST1, ch)), + cpc_readb(scabase + M_REG(ST2, ch)), + cpc_readb(scabase + M_REG(ST3, ch)), + cpc_readb(scabase + M_REG(ST4, ch))); + printk ("CST0=0x%02x, CST1=0x%02x, CST2=0x%02x, CST3=0x%02x, FST=0x%02x\n", + cpc_readb(scabase + M_REG(CST0, ch)), + cpc_readb(scabase + M_REG(CST1, ch)), + cpc_readb(scabase + M_REG(CST2, ch)), + cpc_readb(scabase + M_REG(CST3, ch)), + cpc_readb(scabase + M_REG(FST, ch))); + printk("TRC0=0x%02x, TRC1=0x%02x, RRC=0x%02x, TBN=0x%02x, RBN=0x%02x\n", + cpc_readb(scabase + M_REG(TRC0, ch)), + cpc_readb(scabase + M_REG(TRC1, ch)), + cpc_readb(scabase + M_REG(RRC, ch)), + cpc_readb(scabase + M_REG(TBN, ch)), + cpc_readb(scabase + M_REG(RBN, ch))); + printk("TFS=0x%02x, TNR0=0x%02x, TNR1=0x%02x, RNR=0x%02x\n", + cpc_readb(scabase + M_REG(TFS, ch)), + cpc_readb(scabase + M_REG(TNR0, ch)), + cpc_readb(scabase + M_REG(TNR1, ch)), + cpc_readb(scabase + M_REG(RNR, ch))); + printk("TCR=0x%02x, RCR=0x%02x, TNR1=0x%02x, RNR=0x%02x\n", + cpc_readb(scabase + M_REG(TCR, ch)), + cpc_readb(scabase + M_REG(RCR, ch)), + cpc_readb(scabase + M_REG(TNR1, ch)), + cpc_readb(scabase + M_REG(RNR, ch))); + printk("TXS=0x%02x, RXS=0x%02x, EXS=0x%02x, TMCT=0x%02x, TMCR=0x%02x\n", + cpc_readb(scabase + M_REG(TXS, ch)), + cpc_readb(scabase + M_REG(RXS, ch)), + cpc_readb(scabase + M_REG(EXS, ch)), + cpc_readb(scabase + M_REG(TMCT, ch)), + cpc_readb(scabase + M_REG(TMCR, ch))); + printk("IE0=0x%02x, IE1=0x%02x, IE2=0x%02x, IE4=0x%02x, FIE=0x%02x\n", + cpc_readb(scabase + M_REG(IE0, ch)), + cpc_readb(scabase + M_REG(IE1, ch)), + cpc_readb(scabase + M_REG(IE2, ch)), + cpc_readb(scabase + M_REG(IE4, ch)), + cpc_readb(scabase + M_REG(FIE, ch))); + printk("IER0=0x%08lx\n", (uclong) cpc_readl(scabase + IER0)); + + if (ilar != 0) { + CPC_LOCK(card, flags); + cpc_writeb(scabase + ILAR, ilar); + cpc_writeb(scabase + DMER, 0x80); + CPC_UNLOCK(card, flags); + } +} + +void cpc_falc_status(pc300_t * card, int ch) +{ + pc300ch_t *chan = &card->chan[ch]; + falc_t *pfalc = (falc_t *) & chan->falc; + uclong flags; + + CPC_LOCK(card, flags); + printk("CH%d: %s %s %d channels\n", + ch, (pfalc->sync ? "SYNC" : ""), (pfalc->active ? "ACTIVE" : ""), + pfalc->num_channels); + + printk(" pden=%d, los=%d, losr=%d, lfa=%d, farec=%d\n", + pfalc->pden, pfalc->los, pfalc->losr, pfalc->lfa, pfalc->farec); + printk(" lmfa=%d, ais=%d, sec=%d, es=%d, rai=%d\n", + pfalc->lmfa, pfalc->ais, pfalc->sec, pfalc->es, pfalc->rai); + printk(" bec=%d, fec=%d, cvc=%d, cec=%d, ebc=%d\n", + pfalc->bec, pfalc->fec, pfalc->cvc, pfalc->cec, pfalc->ebc); + + printk("\n"); + printk(" STATUS: %s %s %s %s %s %s\n", + (pfalc->red_alarm ? "RED" : ""), + (pfalc->blue_alarm ? "BLU" : ""), + (pfalc->yellow_alarm ? "YEL" : ""), + (pfalc->loss_fa ? "LFA" : ""), + (pfalc->loss_mfa ? "LMF" : ""), (pfalc->prbs ? "PRB" : "")); + CPC_UNLOCK(card, flags); +} + +int cpc_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + pc300conf_t conf_aux; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + int ch = chan->channel; + void *arg = (void *) ifr->ifr_data; + uclong scabase = card->hw.scabase; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case SIOCGPC300CONF: +#ifdef CONFIG_PC300_MLPPP + if (conf->proto != PC300_PROTO_MLPPP) { + conf->proto = hdlc->proto; + } +#else + conf->proto = hdlc->proto; +#endif + memcpy(&conf_aux.conf, conf, sizeof(pc300chconf_t)); + memcpy(&conf_aux.hw, &card->hw, sizeof(pc300hw_t)); + if (!arg || + copy_to_user(arg, &conf_aux, sizeof(pc300conf_t))) + return -EINVAL; + return 0; + case SIOCSPC300CONF: + if (!suser()) + return -EPERM; + if (!arg || + copy_from_user(&conf_aux.conf, arg, sizeof(pc300chconf_t))) + return -EINVAL; + if (card->hw.cpld_id < 0x02 && + conf_aux.conf.fr_mode == PC300_FR_UNFRAMED) { + /* CPLD_ID < 0x02 doesn't support Unframed E1 */ + return -EINVAL; + } +#ifdef CONFIG_PC300_MLPPP + if (conf_aux.conf.proto == PC300_PROTO_MLPPP) { + if (conf->proto != PC300_PROTO_MLPPP) { + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + cpc_tty_init(d); /* init TTY driver */ + } + } else { + if (conf_aux.conf.proto == 0xffff) { + if (conf->proto == PC300_PROTO_MLPPP){ + /* ifdown interface */ + cpc_close(dev); + } + } else { + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + hdlc->proto = conf->proto; + } + } +#else + memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t)); + hdlc->proto = conf->proto; +#endif + return 0; + case SIOCGPC300STATUS: + cpc_sca_status(card, ch); + return 0; + case SIOCGPC300FALCSTATUS: + cpc_falc_status(card, ch); + return 0; + + case SIOCGPC300UTILSTATS: + { + if (!arg) { /* clear statistics */ + memset(&hdlc->stats, 0, sizeof(struct net_device_stats)); + if (card->hw.type == PC300_TE) { + memset(&chan->falc, 0, sizeof(falc_t)); + } + } else { + pc300stats_t pc300stats; + + memset(&pc300stats, 0, sizeof(pc300stats_t)); + pc300stats.hw_type = card->hw.type; + pc300stats.line_on = card->chan[ch].d.line_on; + pc300stats.line_off = card->chan[ch].d.line_off; + memcpy(&pc300stats.gen_stats, &hdlc->stats, + sizeof(struct net_device_stats)); + if (card->hw.type == PC300_TE) + memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t)); + copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)); + } + return 0; + } + + case SIOCGPC300UTILSTATUS: + { + struct pc300status pc300status; + + pc300status.hw_type = card->hw.type; + if (card->hw.type == PC300_TE) { + pc300status.te_status.sync = chan->falc.sync; + pc300status.te_status.red_alarm = chan->falc.red_alarm; + pc300status.te_status.blue_alarm = chan->falc.blue_alarm; + pc300status.te_status.loss_fa = chan->falc.loss_fa; + pc300status.te_status.yellow_alarm =chan->falc.yellow_alarm; + pc300status.te_status.loss_mfa = chan->falc.loss_mfa; + pc300status.te_status.prbs = chan->falc.prbs; + } else { + pc300status.gen_status.dcd = + !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_DCD); + pc300status.gen_status.cts = + !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_CTS); + pc300status.gen_status.rts = + !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_RTS); + pc300status.gen_status.dtr = + !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR); + /* There is no DSR in HD64572 */ + } + if (!arg + || copy_to_user(arg, &pc300status, sizeof(pc300status_t))) + return -EINVAL; + return 0; + } + + case SIOCSPC300TRACE: + /* Sets/resets a trace_flag for the respective device */ + if (!arg || copy_from_user(&d->trace_on, arg,sizeof(unsigned char))) + return -EINVAL; + return 0; + + case SIOCSPC300LOOPBACK: + { + struct pc300loopback pc300loop; + + /* TE boards only */ + if (card->hw.type != PC300_TE) + return -EINVAL; + + if (!arg || + copy_from_user(&pc300loop, arg, sizeof(pc300loopback_t))) + return -EINVAL; + switch (pc300loop.loop_type) { + case PC300LOCLOOP: /* Turn the local loop on/off */ + falc_local_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300REMLOOP: /* Turn the remote loop on/off */ + falc_remote_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300PAYLOADLOOP: /* Turn the payload loop on/off */ + falc_payload_loop(card, ch, pc300loop.loop_on); + return 0; + + case PC300GENLOOPUP: /* Generate loop UP */ + if (pc300loop.loop_on) { + falc_generate_loop_up_code (card, ch); + } else { + turn_off_xlu(card, ch); + } + return 0; + + case PC300GENLOOPDOWN: /* Generate loop DOWN */ + if (pc300loop.loop_on) { + falc_generate_loop_down_code (card, ch); + } else { + turn_off_xld(card, ch); + } + return 0; + + default: + return -EINVAL; + } + } + + case SIOCSPC300PATTERNTEST: + /* Turn the pattern test on/off and show the errors counter */ + { + struct pc300patterntst pc300patrntst; + + /* TE boards only */ + if (card->hw.type != PC300_TE) + return -EINVAL; + + if (card->hw.cpld_id < 0x02) { + /* CPLD_ID < 0x02 doesn't support pattern test */ + return -EINVAL; + } + + if (!arg || + copy_from_user(&pc300patrntst,arg,sizeof(pc300patterntst_t))) + return -EINVAL; + if (pc300patrntst.patrntst_on == 2) { + if (chan->falc.prbs == 0) { + falc_pattern_test(card, ch, 1); + } + pc300patrntst.num_errors = + falc_pattern_test_error(card, ch); + if (!arg + || copy_to_user(arg, &pc300patrntst, + sizeof (pc300patterntst_t))) + return -EINVAL; + } else { + falc_pattern_test(card, ch, pc300patrntst.patrntst_on); + } + return 0; + } + + case SIOCWANDEV: + switch (ifr->ifr_settings.type) { + case IF_GET_IFACE: + { + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings *line = ifr->ifr_settings.ifs_ifsu.sync; + + ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; + if (ifr->ifr_settings.size == 0) { + return 0; //return interface type only + } + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(line, &conf->phys_settings, size)) + return -EFAULT; + return 0; + } + + case IF_IFACE_V35: + case IF_IFACE_V24: + case IF_IFACE_X21: + { + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings *line = ifr->ifr_settings.ifs_ifsu.sync; + + if (!capable(CAP_NET_ADMIN)) { + return -EPERM; + } + if (ifr->ifr_settings.size != size) { + return -ENOMEM; //incorrect data len + } + if (copy_from_user(&conf->phys_settings, line, size)) { + return -EFAULT; + } + if (conf->phys_settings.loopback) { + cpc_writeb(card->hw.scabase + M_REG(MD2, ch), + cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | + MD2_LOOP_MIR); + } + conf->media = ifr->ifr_settings.type; + return 0; + } + + case IF_IFACE_T1: + case IF_IFACE_E1: + { + const size_t te_size = sizeof(te1_settings); + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings *line = ifr->ifr_settings.ifs_ifsu.sync; + + if (!capable(CAP_NET_ADMIN)) { + return -EPERM; + } + if (ifr->ifr_settings.size != te_size) { + return -ENOMEM; //incorrect data len + } + if (copy_from_user(&conf->phys_settings, line, size)) { + return -EFAULT; + }/* Ignoring HDLC slot_map for a while */ + if (conf->phys_settings.loopback) { + cpc_writeb(card->hw.scabase + M_REG(MD2, ch), + cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | + MD2_LOOP_MIR); + } + conf->media = ifr->ifr_settings.type; + return 0; + } + + default: + return hdlc_ioctl(dev, ifr, cmd); + } + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +static struct net_device_stats *cpc_get_stats(struct net_device *dev) +{ + pc300dev_t *d = (pc300dev_t *) dev->priv; + + if (d) + return &d->hdlc->stats; + else + return NULL; +} + +static int clock_rate_calc(uclong rate, uclong clock, int *br_io) +{ + int br, tc; + int br_pwr, error; + + if (rate == 0) + return (0); + + for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) { + if ((tc = clock / br_pwr / rate) <= 0xff) { + *br_io = br; + break; + } + } + + if (tc <= 0xff) { + error = ((rate - (clock / br_pwr / rate)) / rate) * 1000; + /* Errors bigger than +/- 1% won't be tolerated */ + if (error < -10 || error > 10) + return (-1); + else + return (tc); + } else { + return (-1); + } +} + +int ch_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + uclong plxbase = card->hw.plxbase; + int ch = chan->channel; + uclong clkrate = chan->conf.phys_settings.clock_rate; + uclong clktype = chan->conf.phys_settings.clock_type; + ucshort encoding = chan->conf.proto_settings.encoding; + ucshort parity = chan->conf.proto_settings.parity; + int tmc, br; + ucchar md0, md2; + + /* Reset the channel */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); + + /* Configure the SCA registers */ + switch (parity) { + case PARITY_NONE: + md0 = MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR0: + md0 = MD0_CRC16_0|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR1: + md0 = MD0_CRC16_1|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC32_PR1_CCITT: + md0 = MD0_CRC32|MD0_CRCC0|MD0_BIT_SYNC; + break; + case PARITY_CRC16_PR1_CCITT: + default: + md0 = MD0_CRC_CCITT|MD0_CRCC0|MD0_BIT_SYNC; + break; + } + switch (encoding) { + case ENCODING_NRZI: + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZI; + break; + case ENCODING_FM_MARK: /* FM1 */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM1; + break; + case ENCODING_FM_SPACE: /* FM0 */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM0; + break; + case ENCODING_MANCHESTER: /* It's not working... */ + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_MANCH; + break; + case ENCODING_NRZ: + default: + md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZ; + break; + } + cpc_writeb(scabase + M_REG(MD0, ch), md0); + cpc_writeb(scabase + M_REG(MD1, ch), 0); + cpc_writeb(scabase + M_REG(MD2, ch), md2); + cpc_writeb(scabase + M_REG(IDL, ch), 0x7e); + cpc_writeb(scabase + M_REG(CTL, ch), CTL_URSKP | CTL_IDLC); + + /* Configure HW media */ + switch (card->hw.type) { + case PC300_RSV: + if (conf->media == IF_IFACE_V35) { + cpc_writel((plxbase + card->hw.gpioc_reg), + cpc_readl(plxbase + card->hw.gpioc_reg) | PC300_CHMEDIA_MASK(ch)); + } else { + cpc_writel((plxbase + card->hw.gpioc_reg), + cpc_readl(plxbase + card->hw.gpioc_reg) & ~PC300_CHMEDIA_MASK(ch)); + } + break; + + case PC300_X21: + break; + + case PC300_TE: + te_config(card, ch); + break; + } + + switch (card->hw.type) { + case PC300_RSV: + case PC300_X21: + if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) { + /* Calculate the clkrate parameters */ + tmc = clock_rate_calc(clkrate, card->hw.clock, &br); + cpc_writeb(scabase + M_REG(TMCT, ch), tmc); + cpc_writeb(scabase + M_REG(TXS, ch), + (TXS_DTRXC | TXS_IBRG | br)); + if (clktype == CLOCK_INT) { + cpc_writeb(scabase + M_REG(TMCR, ch), tmc); + cpc_writeb(scabase + M_REG(RXS, ch), + (RXS_IBRG | br)); + } else { + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + } + if (card->hw.type == PC300_X21) { + cpc_writeb(scabase + M_REG(GPO, ch), 1); + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1); + } else { + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1); + } + } else { + cpc_writeb(scabase + M_REG(TMCT, ch), 1); + if (clktype == CLOCK_EXT) { + cpc_writeb(scabase + M_REG(TXS, ch), + TXS_DTRXC); + } else { + cpc_writeb(scabase + M_REG(TXS, ch), + TXS_DTRXC|TXS_RCLK); + } + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + if (card->hw.type == PC300_X21) { + cpc_writeb(scabase + M_REG(GPO, ch), 0); + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1); + } else { + cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1); + } + } + break; + + case PC300_TE: + /* SCA always receives clock from the FALC chip */ + cpc_writeb(scabase + M_REG(TMCT, ch), 1); + cpc_writeb(scabase + M_REG(TXS, ch), 0); + cpc_writeb(scabase + M_REG(TMCR, ch), 1); + cpc_writeb(scabase + M_REG(RXS, ch), 0); + cpc_writeb(scabase + M_REG(EXS, ch), 0); + break; + } + + /* Enable Interrupts */ + cpc_writel(scabase + IER0, + cpc_readl(scabase + IER0) | + IR0_M(IR0_RXINTA, ch) | + IR0_DRX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch) | + IR0_DTX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch)); + cpc_writeb(scabase + M_REG(IE0, ch), + cpc_readl(scabase + M_REG(IE0, ch)) | IE0_RXINTA); + cpc_writeb(scabase + M_REG(IE1, ch), + cpc_readl(scabase + M_REG(IE1, ch)) | IE1_CDCD); + + return 0; +} + +int rx_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + int ch = chan->channel; + + cpc_writeb(scabase + DSR_RX(ch), 0); + + /* General RX settings */ + cpc_writeb(scabase + M_REG(RRC, ch), 0); + cpc_writeb(scabase + M_REG(RNR, ch), 16); + + /* Enable reception */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_CRC_INIT); + cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_ENA); + + /* Initialize DMA stuff */ + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + rx_dma_buf_init(card, ch); + cpc_writeb(scabase + DCR_RX(ch), DCR_FCT_CLR); + cpc_writeb(scabase + DMR_RX(ch), (DMR_TMOD | DMR_NF)); + cpc_writeb(scabase + DIR_RX(ch), (DIR_EOM | DIR_BOF)); + + /* Start DMA */ + rx_dma_start(card, ch); + + return 0; +} + +int tx_config(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong scabase = card->hw.scabase; + int ch = chan->channel; + + cpc_writeb(scabase + DSR_TX(ch), 0); + + /* General TX settings */ + cpc_writeb(scabase + M_REG(TRC0, ch), 0); + cpc_writeb(scabase + M_REG(TFS, ch), 32); + cpc_writeb(scabase + M_REG(TNR0, ch), 20); + cpc_writeb(scabase + M_REG(TNR1, ch), 48); + cpc_writeb(scabase + M_REG(TCR, ch), 8); + + /* Enable transmission */ + cpc_writeb(scabase + M_REG(CMD, ch), CMD_TX_CRC_INIT); + + /* Initialize DMA stuff */ + chan->tx_first_bd = 0; + chan->tx_next_bd = 0; + tx_dma_buf_init(card, ch); + cpc_writeb(scabase + DCR_TX(ch), DCR_FCT_CLR); + cpc_writeb(scabase + DMR_TX(ch), (DMR_TMOD | DMR_NF)); + cpc_writeb(scabase + DIR_TX(ch), (DIR_EOM | DIR_BOF | DIR_UDRF)); + cpc_writel(scabase + DTX_REG(CDAL, ch), TX_BD_ADDR(ch, chan->tx_first_bd)); + cpc_writel(scabase + DTX_REG(EDAL, ch), TX_BD_ADDR(ch, chan->tx_next_bd)); + + return 0; +} + +static int cpc_attach(hdlc_device * hdlc, unsigned short encoding, + unsigned short parity) +{ + struct net_device * dev = hdlc_to_dev(hdlc); + pc300dev_t *d = (pc300dev_t *)dev->priv; + pc300ch_t *chan = (pc300ch_t *)d->chan; + pc300_t *card = (pc300_t *)chan->card; + pc300chconf_t *conf = (pc300chconf_t *)&chan->conf; + + if (card->hw.type == PC300_TE) { + if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) { + return -EINVAL; + } + } else { + if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI && + encoding != ENCODING_FM_MARK && encoding != ENCODING_FM_SPACE) { + /* Driver doesn't support ENCODING_MANCHESTER yet */ + return -EINVAL; + } + } + + if (parity != PARITY_NONE && parity != PARITY_CRC16_PR0 && + parity != PARITY_CRC16_PR1 && parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT) { + return -EINVAL; + } + + conf->proto_settings.encoding = encoding; + conf->proto_settings.parity = parity; + return 0; +} + +void cpc_opench(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + int ch = chan->channel; + uclong scabase = card->hw.scabase; + + ch_config(d); + + rx_config(d); + + tx_config(d); + + /* Assert RTS and DTR */ + cpc_writeb(scabase + M_REG(CTL, ch), + cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR)); +} + +void cpc_closech(pc300dev_t * d) +{ + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + falc_t *pfalc = (falc_t *) & chan->falc; + int ch = chan->channel; + + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_CH_RST); + rx_dma_stop(card, ch); + tx_dma_stop(card, ch); + + if (card->hw.type == PC300_TE) { + memset(pfalc, 0, sizeof(falc_t)); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK | + CPLD_REG2_FALC_LED2) << (2 * ch))); + /* Reset the FALC chip */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + (CPLD_REG1_FALC_RESET << (2 * ch))); + udelay(10000); + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) & + ~(CPLD_REG1_FALC_RESET << (2 * ch))); + } +} + +int cpc_open(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + struct ifreq ifr; + int result; + +#ifdef PC300_DEBUG_OTHER + printk("pc300: cpc_open"); +#endif + + if (hdlc->proto == IF_PROTO_PPP) { + d->if_ptr = &hdlc->state.ppp.pppdev; + } + + result = hdlc_open(hdlc); + if (hdlc->proto == IF_PROTO_PPP) { + dev->priv = d; + } + if (result) { + return result; + } + + MOD_INC_USE_COUNT; + sprintf(ifr.ifr_name, "%s", dev->name); + cpc_opench(d); + netif_start_queue(dev); + return 0; +} + +int cpc_close(struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + pc300dev_t *d = (pc300dev_t *) dev->priv; + pc300ch_t *chan = (pc300ch_t *) d->chan; + pc300_t *card = (pc300_t *) chan->card; + uclong flags; + +#ifdef PC300_DEBUG_OTHER + printk("pc300: cpc_close"); +#endif + + netif_stop_queue(dev); + + CPC_LOCK(card, flags); + cpc_closech(d); + CPC_UNLOCK(card, flags); + + hdlc_close(hdlc); + if (hdlc->proto == IF_PROTO_PPP) { + d->if_ptr = NULL; + } +#ifdef CONFIG_PC300_MLPPP + if (chan->conf.proto == PC300_PROTO_MLPPP) { + cpc_tty_unregister_service(d); + chan->conf.proto = 0xffff; + } +#endif + + MOD_DEC_USE_COUNT; + return 0; +} + +static uclong detect_ram(pc300_t * card) +{ + uclong i; + ucchar data; + uclong rambase = card->hw.rambase; + + card->hw.ramsize = PC300_RAMSIZE; + /* Let's find out how much RAM is present on this board */ + for (i = 0; i < card->hw.ramsize; i++) { + data = (ucchar) (i & 0xff); + cpc_writeb(rambase + i, data); + if (cpc_readb(rambase + i) != data) { + break; + } + } + return (i); +} + +static void plx_init(pc300_t * card) +{ + struct RUNTIME_9050 *plx_ctl = (struct RUNTIME_9050 *) card->hw.plxbase; + + /* Reset PLX */ + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) | 0x40000000); + udelay(10000L); + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) & ~0x40000000); + + /* Reload Config. Registers from EEPROM */ + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) | 0x20000000); + udelay(10000L); + cpc_writel(&plx_ctl->init_ctrl, + cpc_readl(&plx_ctl->init_ctrl) & ~0x20000000); + +} + +static inline void show_version(void) +{ + char *rcsvers, *rcsdate, *tmp; + + rcsvers = strchr(rcsid, ' '); + rcsvers++; + tmp = strchr(rcsvers, ' '); + *tmp++ = '\0'; + rcsdate = strchr(tmp, ' '); + rcsdate++; + tmp = strrchr(rcsdate, ' '); + *tmp = '\0'; + printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", + rcsvers, rcsdate, __DATE__, __TIME__); +} /* show_version */ + +static void cpc_init_card(pc300_t * card) +{ + int i, devcount = 0; + static int board_nbr = 1; + + /* Enable interrupts on the PCI bridge */ + plx_init(card); + cpc_writew(card->hw.plxbase + card->hw.intctl_reg, + cpc_readw(card->hw.plxbase + card->hw.intctl_reg) | 0x0040); + +#ifdef USE_PCI_CLOCK + /* Set board clock to PCI clock */ + cpc_writel(card->hw.plxbase + card->hw.gpioc_reg, + cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) | 0x00000004UL); + card->hw.clock = PC300_PCI_CLOCK; +#else + /* Set board clock to internal oscillator clock */ + cpc_writel(card->hw.plxbase + card->hw.gpioc_reg, + cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & ~0x00000004UL); + card->hw.clock = PC300_OSC_CLOCK; +#endif + + /* Detect actual on-board RAM size */ + card->hw.ramsize = detect_ram(card); + + /* Set Global SCA-II registers */ + cpc_writeb(card->hw.scabase + PCR, PCR_PR2); + cpc_writeb(card->hw.scabase + BTCR, 0x10); + cpc_writeb(card->hw.scabase + WCRL, 0); + cpc_writeb(card->hw.scabase + DMER, 0x80); + + if (card->hw.type == PC300_TE) { + ucchar reg1; + + /* Check CPLD version */ + reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); + cpc_writeb(card->hw.falcbase + CPLD_REG1, (reg1 + 0x5a)); + if (cpc_readb(card->hw.falcbase + CPLD_REG1) == reg1) { + /* New CPLD */ + card->hw.cpld_id = cpc_readb(card->hw.falcbase + CPLD_ID_REG); + card->hw.cpld_reg1 = CPLD_V2_REG1; + card->hw.cpld_reg2 = CPLD_V2_REG2; + } else { + /* old CPLD */ + card->hw.cpld_id = 0; + card->hw.cpld_reg1 = CPLD_REG1; + card->hw.cpld_reg2 = CPLD_REG2; + cpc_writeb(card->hw.falcbase + CPLD_REG1, reg1); + } + + /* Enable the board's global clock */ + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) | + CPLD_REG1_GLOBAL_CLK); + + } + + for (i = 0; i < card->hw.nchan; i++) { + pc300ch_t *chan = &card->chan[i]; + pc300dev_t *d = &chan->d; + hdlc_device *hdlc; + struct net_device *dev; + + chan->card = card; + chan->channel = i; + chan->conf.phys_settings.clock_rate = 0; + chan->conf.phys_settings.clock_type = CLOCK_EXT; + chan->conf.proto_settings.encoding = ENCODING_NRZ; + chan->conf.proto_settings.parity = PARITY_CRC16_PR1_CCITT; + switch (card->hw.type) { + case PC300_TE: + chan->conf.media = IF_IFACE_T1; + chan->conf.lcode = PC300_LC_B8ZS; + chan->conf.fr_mode = PC300_FR_ESF; + chan->conf.lbo = PC300_LBO_0_DB; + chan->conf.rx_sens = PC300_RX_SENS_SH; + chan->conf.tslot_bitmap = 0xffffffffUL; + break; + + case PC300_X21: + chan->conf.media = IF_IFACE_X21; + break; + + case PC300_RSV: + default: + chan->conf.media = IF_IFACE_V35; + break; + } + chan->conf.proto = IF_PROTO_PPP; + chan->tx_first_bd = 0; + chan->tx_next_bd = 0; + chan->rx_first_bd = 0; + chan->rx_last_bd = N_DMA_RX_BUF - 1; + chan->nfree_tx_bd = N_DMA_TX_BUF; + + d->chan = chan; + d->tx_skb = NULL; + d->trace_on = 0; + d->line_on = 0; + d->line_off = 0; + + d->hdlc = (hdlc_device *) kmalloc(sizeof(hdlc_device), GFP_KERNEL); + if (d->hdlc == NULL) + continue; + memset(d->hdlc, 0, sizeof(hdlc_device)); + + hdlc = d->hdlc; + hdlc->xmit = cpc_queue_xmit; + hdlc->attach = cpc_attach; + + dev = hdlc_to_dev(hdlc); + + dev->mem_start = card->hw.ramphys; + dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1; + dev->irq = card->hw.irq; + dev->init = NULL; + dev->tx_queue_len = PC300_TX_QUEUE_LEN; + dev->mtu = PC300_DEF_MTU; + + dev->open = cpc_open; + dev->stop = cpc_close; + dev->tx_timeout = cpc_tx_timeout; + dev->watchdog_timeo = PC300_TX_TIMEOUT; + dev->get_stats = cpc_get_stats; + dev->set_multicast_list = NULL; + dev->set_mac_address = NULL; + dev->change_mtu = cpc_change_mtu; + dev->do_ioctl = cpc_ioctl; + + if (register_hdlc_device(hdlc) == 0) { + dev->priv = d; /* We need 'priv', hdlc doesn't */ + printk("%s: Cyclades-PC300/", dev->name); + switch (card->hw.type) { + case PC300_TE: + if (card->hw.bus == PC300_PMC) { + printk("TE-M"); + } else { + printk("TE "); + } + break; + + case PC300_X21: + printk("X21 "); + break; + + case PC300_RSV: + default: + printk("RSV "); + break; + } + printk (" #%d, %ldKB of RAM at 0x%08lx, IRQ%d, channel %d.\n", + board_nbr, card->hw.ramsize / 1024, + card->hw.ramphys, card->hw.irq, i + 1); + devcount++; + } else { + printk ("Dev%d on card(0x%08lx): unable to allocate i/f name.\n", + i + 1, card->hw.ramphys); + *(dev->name) = 0; + kfree(d->hdlc); + continue; + } + } + spin_lock_init(&card->card_lock); + + board_nbr++; +} + +static int __devinit +cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int first_time = 1; + ucchar cpc_rev_id; + int err = 0, eeprom_outdated = 0; + ucshort device_id; + pc300_t *card; + + if (first_time) { + first_time = 0; + show_version(); +#ifdef CONFIG_PC300_MLPPP + cpc_tty_reset_var(); +#endif + } + + card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL); + if (card == NULL) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate card structure.\n", + pci_resource_start(pdev, 3)); + return -ENOMEM; + } + memset(card, 0, sizeof(pc300_t)); + + /* read PCI configuration area */ + device_id = ent->device; + card->hw.irq = pdev->irq; + card->hw.iophys = pci_resource_start(pdev, 1); + card->hw.iosize = pci_resource_len(pdev, 1); + card->hw.scaphys = pci_resource_start(pdev, 2); + card->hw.scasize = pci_resource_len(pdev, 2); + card->hw.ramphys = pci_resource_start(pdev, 3); + card->hw.alloc_ramsize = pci_resource_len(pdev, 3); + card->hw.falcphys = pci_resource_start(pdev, 4); + card->hw.falcsize = pci_resource_len(pdev, 4); + card->hw.plxphys = pci_resource_start(pdev, 5); + card->hw.plxsize = pci_resource_len(pdev, 5); + pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id); + + switch (device_id) { + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_TE_1: + card->hw.nchan = 1; + break; + + case PCI_DEVICE_ID_PC300_RX_2: + case PCI_DEVICE_ID_PC300_TE_2: + default: + card->hw.nchan = PC300_MAXCHAN; + break; + } +#ifdef PC300_DEBUG_PCI + printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn); + printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq); + printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx " + "ctladdr=0x%08lx falcaddr=0x%08lx\n", + card->hw.ramphys, card->hw.plxphys, card->hw.scaphys, + card->hw.falcphys); +#endif + /* Although we don't use this I/O region, we should + * request it from the kernel anyway, to avoid problems + * with other drivers accessing it. */ + if (!request_region(card->hw.iophys, card->hw.iosize, "PLX Registers")) { + /* In case we can't allocate it, warn user */ + printk("WARNING: couldn't allocate I/O region for PC300 board " + "at 0x%08lx!\n", card->hw.ramphys); + } + + if (card->hw.plxphys) { + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, card->hw.plxphys); + } else { + eeprom_outdated = 1; + card->hw.plxphys = pci_resource_start(pdev, 0); + card->hw.plxsize = pci_resource_len(pdev, 0); + } + + if (!request_mem_region(card->hw.plxphys, card->hw.plxsize, + "PLX Registers")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate PLX mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_io; + } + if (!request_mem_region(card->hw.ramphys, card->hw.alloc_ramsize, + "On-board RAM")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate RAM mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_plx; + } + if (!request_mem_region(card->hw.scaphys, card->hw.scasize, + "SCA-II Registers")) { + printk("PC300 found at RAM 0x%08lx, " + "but could not allocate SCA mem region.\n", + card->hw.ramphys); + err = -ENODEV; + goto err_release_ram; + } + + if ((err = pci_enable_device(pdev)) != 0) + goto err_release_sca; + + card->hw.plxbase = (uclong) ioremap(card->hw.plxphys, card->hw.plxsize); + card->hw.rambase = (uclong) ioremap(card->hw.ramphys, + card->hw.alloc_ramsize); + card->hw.scabase = (uclong) ioremap(card->hw.scaphys, card->hw.scasize); + switch (device_id) { + case PCI_DEVICE_ID_PC300_TE_1: + case PCI_DEVICE_ID_PC300_TE_2: + request_mem_region(card->hw.falcphys, card->hw.falcsize, + "FALC Registers"); + card->hw.falcbase = (uclong) ioremap(card->hw.falcphys, + card->hw.falcsize); + break; + + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_RX_2: + default: + card->hw.falcbase = 0; + break; + } + +#ifdef PC300_DEBUG_PCI + printk("cpc: relocate ramaddr=0x%08lx plxaddr=0x%08lx " + "ctladdr=0x%08lx falcaddr=0x%08lx\n", + card->hw.rambase, card->hw.plxbase, card->hw.scabase, + card->hw.falcbase); +#endif + + /* Set PCI drv pointer to the card structure */ + pdev->driver_data = card; + + /* Set board type */ + switch (device_id) { + case PCI_DEVICE_ID_PC300_TE_1: + case PCI_DEVICE_ID_PC300_TE_2: + card->hw.type = PC300_TE; + card->hw.bus = PC300_PCI; + /* Set PLX register offsets */ + card->hw.gpioc_reg = 0x50; + card->hw.intctl_reg = 0x4c; + break; + case PCI_DEVICE_ID_PC300_RX_1: + case PCI_DEVICE_ID_PC300_RX_2: + default: + card->hw.bus = PC300_PCI; + /* Set PLX register offsets */ + card->hw.gpioc_reg = 0x50; + card->hw.intctl_reg = 0x4c; + + if ((cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & PC300_CTYPE_MASK)) { + card->hw.type = PC300_X21; + } else { + card->hw.type = PC300_RSV; + } + break; + } + + /* Allocate IRQ */ + if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) { + printk ("PC300 found at RAM 0x%08lx, but could not allocate IRQ%d.\n", + card->hw.ramphys, card->hw.irq); + goto err_io_unmap; + } + + cpc_init_card(card); + + if (eeprom_outdated) + printk("WARNING: PC300 with outdated EEPROM.\n"); + return 0; + +err_io_unmap: + iounmap((void *) card->hw.plxbase); + iounmap((void *) card->hw.scabase); + iounmap((void *) card->hw.rambase); + if (card->hw.type == PC300_TE) { + iounmap((void *) card->hw.falcbase); + release_mem_region(card->hw.falcphys, card->hw.falcsize); + } +err_release_sca: + release_mem_region(card->hw.scaphys, card->hw.scasize); +err_release_ram: + release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize); +err_release_plx: + release_mem_region(card->hw.plxphys, card->hw.plxsize); +err_release_io: + release_region(card->hw.iophys, card->hw.iosize); + kfree(card); + return -ENODEV; +} + +static void __devexit cpc_remove_one(struct pci_dev *pdev) +{ + pc300_t *card = (pc300_t *) pdev->driver_data; + + if (card->hw.rambase != 0) { + int i; + + /* Disable interrupts on the PCI bridge */ + cpc_writew(card->hw.plxbase + card->hw.intctl_reg, + cpc_readw(card->hw.plxbase + card->hw.intctl_reg) & ~(0x0040)); + + for (i = 0; i < card->hw.nchan; i++) { + unregister_hdlc_device(card->chan[i].d.hdlc); + } + iounmap((void *) card->hw.plxbase); + iounmap((void *) card->hw.scabase); + iounmap((void *) card->hw.rambase); + release_mem_region(card->hw.plxphys, card->hw.plxsize); + release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize); + release_mem_region(card->hw.scaphys, card->hw.scasize); + release_region(card->hw.iophys, card->hw.iosize); + if (card->hw.type == PC300_TE) { + iounmap((void *) card->hw.falcbase); + release_mem_region(card->hw.falcphys, card->hw.falcsize); + } + if (card->hw.irq) + free_irq(card->hw.irq, card); + kfree(card); + } +} + +static struct pci_driver cpc_driver = { + .name = "pc300", + .id_table = cpc_pci_dev_id, + .probe = cpc_init_one, + .remove = __devexit_p(cpc_remove_one), +}; + +static int __init cpc_init(void) +{ + return pci_module_init(&cpc_driver); +} + +static void __exit cpc_cleanup_module(void) +{ + pci_unregister_driver(&cpc_driver); +} + +module_init(cpc_init); +module_exit(cpc_cleanup_module); + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,10) ) + MODULE_DESCRIPTION("Cyclades-PC300 cards driver"); + MODULE_AUTHOR( "Author: Ivan Passos \r\n" + "Maintainer: Henrique Gobbi + * + * Copyright: (c) 2000-2001 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _FALC_LH_H +#define _FALC_LH_H + +#define NUM_OF_T1_CHANNELS 24 +#define NUM_OF_E1_CHANNELS 32 + +/*>>>>>>>>>>>>>>>>> FALC Register Bits (Transmit Mode) <<<<<<<<<<<<<<<<<<< */ + +/* CMDR (Command Register) + ---------------- E1 & T1 ------------------------------ */ +#define CMDR_RMC 0x80 +#define CMDR_RRES 0x40 +#define CMDR_XREP 0x20 +#define CMDR_XRES 0x10 +#define CMDR_XHF 0x08 +#define CMDR_XTF 0x04 +#define CMDR_XME 0x02 +#define CMDR_SRES 0x01 + +/* MODE (Mode Register) + ----------------- E1 & T1 ----------------------------- */ +#define MODE_MDS2 0x80 +#define MODE_MDS1 0x40 +#define MODE_MDS0 0x20 +#define MODE_BRAC 0x10 +#define MODE_HRAC 0x08 + +/* IPC (Interrupt Port Configuration) + ----------------- E1 & T1 ----------------------------- */ +#define IPC_VIS 0x80 +#define IPC_SCI 0x04 +#define IPC_IC1 0x02 +#define IPC_IC0 0x01 + +/* CCR1 (Common Configuration Register 1) + ----------------- E1 & T1 ----------------------------- */ +#define CCR1_SFLG 0x80 +#define CCR1_XTS16RA 0x40 +#define CCR1_BRM 0x40 +#define CCR1_CASSYM 0x20 +#define CCR1_EDLX 0x20 +#define CCR1_EITS 0x10 +#define CCR1_ITF 0x08 +#define CCR1_RFT1 0x02 +#define CCR1_RFT0 0x01 + +/* CCR3 (Common Configuration Register 3) + ---------------- E1 & T1 ------------------------------ */ + +#define CCR3_PRE1 0x80 +#define CCR3_PRE0 0x40 +#define CCR3_EPT 0x20 +#define CCR3_RADD 0x10 +#define CCR3_RCRC 0x04 +#define CCR3_XCRC 0x02 + + +/* RTR1-4 (Receive Timeslot Register 1-4) + ---------------- E1 & T1 ------------------------------ */ + +#define RTR1_TS0 0x80 +#define RTR1_TS1 0x40 +#define RTR1_TS2 0x20 +#define RTR1_TS3 0x10 +#define RTR1_TS4 0x08 +#define RTR1_TS5 0x04 +#define RTR1_TS6 0x02 +#define RTR1_TS7 0x01 + +#define RTR2_TS8 0x80 +#define RTR2_TS9 0x40 +#define RTR2_TS10 0x20 +#define RTR2_TS11 0x10 +#define RTR2_TS12 0x08 +#define RTR2_TS13 0x04 +#define RTR2_TS14 0x02 +#define RTR2_TS15 0x01 + +#define RTR3_TS16 0x80 +#define RTR3_TS17 0x40 +#define RTR3_TS18 0x20 +#define RTR3_TS19 0x10 +#define RTR3_TS20 0x08 +#define RTR3_TS21 0x04 +#define RTR3_TS22 0x02 +#define RTR3_TS23 0x01 + +#define RTR4_TS24 0x80 +#define RTR4_TS25 0x40 +#define RTR4_TS26 0x20 +#define RTR4_TS27 0x10 +#define RTR4_TS28 0x08 +#define RTR4_TS29 0x04 +#define RTR4_TS30 0x02 +#define RTR4_TS31 0x01 + + +/* TTR1-4 (Transmit Timeslot Register 1-4) + ---------------- E1 & T1 ------------------------------ */ + +#define TTR1_TS0 0x80 +#define TTR1_TS1 0x40 +#define TTR1_TS2 0x20 +#define TTR1_TS3 0x10 +#define TTR1_TS4 0x08 +#define TTR1_TS5 0x04 +#define TTR1_TS6 0x02 +#define TTR1_TS7 0x01 + +#define TTR2_TS8 0x80 +#define TTR2_TS9 0x40 +#define TTR2_TS10 0x20 +#define TTR2_TS11 0x10 +#define TTR2_TS12 0x08 +#define TTR2_TS13 0x04 +#define TTR2_TS14 0x02 +#define TTR2_TS15 0x01 + +#define TTR3_TS16 0x80 +#define TTR3_TS17 0x40 +#define TTR3_TS18 0x20 +#define TTR3_TS19 0x10 +#define TTR3_TS20 0x08 +#define TTR3_TS21 0x04 +#define TTR3_TS22 0x02 +#define TTR3_TS23 0x01 + +#define TTR4_TS24 0x80 +#define TTR4_TS25 0x40 +#define TTR4_TS26 0x20 +#define TTR4_TS27 0x10 +#define TTR4_TS28 0x08 +#define TTR4_TS29 0x04 +#define TTR4_TS30 0x02 +#define TTR4_TS31 0x01 + + + +/* IMR0-4 (Interrupt Mask Register 0-4) + + ----------------- E1 & T1 ----------------------------- */ + +#define IMR0_RME 0x80 +#define IMR0_RFS 0x40 +#define IMR0_T8MS 0x20 +#define IMR0_ISF 0x20 +#define IMR0_RMB 0x10 +#define IMR0_CASC 0x08 +#define IMR0_RSC 0x08 +#define IMR0_CRC6 0x04 +#define IMR0_CRC4 0x04 +#define IMR0_PDEN 0x02 +#define IMR0_RPF 0x01 + +#define IMR1_CASE 0x80 +#define IMR1_RDO 0x40 +#define IMR1_ALLS 0x20 +#define IMR1_XDU 0x10 +#define IMR1_XMB 0x08 +#define IMR1_XLSC 0x02 +#define IMR1_XPR 0x01 +#define IMR1_LLBSC 0x80 + +#define IMR2_FAR 0x80 +#define IMR2_LFA 0x40 +#define IMR2_MFAR 0x20 +#define IMR2_T400MS 0x10 +#define IMR2_LMFA 0x10 +#define IMR2_AIS 0x08 +#define IMR2_LOS 0x04 +#define IMR2_RAR 0x02 +#define IMR2_RA 0x01 + +#define IMR3_ES 0x80 +#define IMR3_SEC 0x40 +#define IMR3_LMFA16 0x20 +#define IMR3_AIS16 0x10 +#define IMR3_RA16 0x08 +#define IMR3_API 0x04 +#define IMR3_XSLP 0x20 +#define IMR3_XSLN 0x10 +#define IMR3_LLBSC 0x08 +#define IMR3_XRS 0x04 +#define IMR3_SLN 0x02 +#define IMR3_SLP 0x01 + +#define IMR4_LFA 0x80 +#define IMR4_FER 0x40 +#define IMR4_CER 0x20 +#define IMR4_AIS 0x10 +#define IMR4_LOS 0x08 +#define IMR4_CVE 0x04 +#define IMR4_SLIP 0x02 +#define IMR4_EBE 0x01 + +/* FMR0-5 for E1 and T1 (Framer Mode Register ) */ + +#define FMR0_XC1 0x80 +#define FMR0_XC0 0x40 +#define FMR0_RC1 0x20 +#define FMR0_RC0 0x10 +#define FMR0_EXTD 0x08 +#define FMR0_ALM 0x04 +#define E1_FMR0_FRS 0x02 +#define T1_FMR0_FRS 0x08 +#define FMR0_SRAF 0x04 +#define FMR0_EXLS 0x02 +#define FMR0_SIM 0x01 + +#define FMR1_MFCS 0x80 +#define FMR1_AFR 0x40 +#define FMR1_ENSA 0x20 +#define FMR1_CTM 0x80 +#define FMR1_SIGM 0x40 +#define FMR1_EDL 0x20 +#define FMR1_PMOD 0x10 +#define FMR1_XFS 0x08 +#define FMR1_CRC 0x08 +#define FMR1_ECM 0x04 +#define FMR1_IMOD 0x02 +#define FMR1_XAIS 0x01 + +#define FMR2_RFS1 0x80 +#define FMR2_RFS0 0x40 +#define FMR2_MCSP 0x40 +#define FMR2_RTM 0x20 +#define FMR2_SSP 0x20 +#define FMR2_DAIS 0x10 +#define FMR2_SAIS 0x08 +#define FMR2_PLB 0x04 +#define FMR2_AXRA 0x02 +#define FMR2_ALMF 0x01 +#define FMR2_EXZE 0x01 + +#define LOOP_RTM 0x40 +#define LOOP_SFM 0x40 +#define LOOP_ECLB 0x20 +#define LOOP_CLA 0x1f + +/*--------------------- E1 ----------------------------*/ +#define FMR3_XLD 0x20 +#define FMR3_XLU 0x10 + +/*--------------------- T1 ----------------------------*/ +#define FMR4_AIS3 0x80 +#define FMR4_TM 0x40 +#define FMR4_XRA 0x20 +#define FMR4_SSC1 0x10 +#define FMR4_SSC0 0x08 +#define FMR4_AUTO 0x04 +#define FMR4_FM1 0x02 +#define FMR4_FM0 0x01 + +#define FMR5_SRS 0x80 +#define FMR5_EIBR 0x40 +#define FMR5_XLD 0x20 +#define FMR5_XLU 0x10 + + +/* LOOP (Channel Loop Back) + + ------------------ E1 & T1 ---------------------------- */ + +#define LOOP_SFM 0x40 +#define LOOP_ECLB 0x20 +#define LOOP_CLA4 0x10 +#define LOOP_CLA3 0x08 +#define LOOP_CLA2 0x04 +#define LOOP_CLA1 0x02 +#define LOOP_CLA0 0x01 + + + +/* XSW (Transmit Service Word Pulseframe) + + ------------------- E1 --------------------------- */ + +#define XSW_XSIS 0x80 +#define XSW_XTM 0x40 +#define XSW_XRA 0x20 +#define XSW_XY0 0x10 +#define XSW_XY1 0x08 +#define XSW_XY2 0x04 +#define XSW_XY3 0x02 +#define XSW_XY4 0x01 + + +/* XSP (Transmit Spare Bits) + + ------------------- E1 --------------------------- */ + +#define XSP_XAP 0x80 +#define XSP_CASEN 0x40 +#define XSP_TT0 0x20 +#define XSP_EBP 0x10 +#define XSP_AXS 0x08 +#define XSP_XSIF 0x04 +#define XSP_XS13 0x02 +#define XSP_XS15 0x01 + + +/* XC0/1 (Transmit Control 0/1) + ------------------ E1 & T1 ---------------------------- */ + +#define XC0_SA8E 0x80 +#define XC0_SA7E 0x40 +#define XC0_SA6E 0x20 +#define XC0_SA5E 0x10 +#define XC0_SA4E 0x08 +#define XC0_BRM 0x80 +#define XC0_MFBS 0x40 +#define XC0_SFRZ 0x10 +#define XC0_XCO2 0x04 +#define XC0_XCO1 0x02 +#define XC0_XCO0 0x01 + +#define XC1_XTO5 0x20 +#define XC1_XTO4 0x10 +#define XC1_XTO3 0x08 +#define XC1_XTO2 0x04 +#define XC1_XTO1 0x02 +#define XC1_XTO0 0x01 + + +/* RC0/1 (Receive Control 0/1) + ------------------ E1 & T1 ---------------------------- */ + +#define RC0_SICS 0x40 +#define RC0_CRCI 0x20 +#define RC0_XCRCI 0x10 +#define RC0_RDIS 0x08 +#define RC0_RCO2 0x04 +#define RC0_RCO1 0x02 +#define RC0_RCO0 0x01 + +#define RC1_SWD 0x80 +#define RC1_ASY4 0x40 +#define RC1_RRAM 0x40 +#define RC1_RTO5 0x20 +#define RC1_RTO4 0x10 +#define RC1_RTO3 0x08 +#define RC1_RTO2 0x04 +#define RC1_RTO1 0x02 +#define RC1_RTO0 0x01 + + + +/* XPM0-2 (Transmit Pulse Mask 0-2) + --------------------- E1 & T1 ------------------------- */ + +#define XPM0_XP12 0x80 +#define XPM0_XP11 0x40 +#define XPM0_XP10 0x20 +#define XPM0_XP04 0x10 +#define XPM0_XP03 0x08 +#define XPM0_XP02 0x04 +#define XPM0_XP01 0x02 +#define XPM0_XP00 0x01 + +#define XPM1_XP30 0x80 +#define XPM1_XP24 0x40 +#define XPM1_XP23 0x20 +#define XPM1_XP22 0x10 +#define XPM1_XP21 0x08 +#define XPM1_XP20 0x04 +#define XPM1_XP14 0x02 +#define XPM1_XP13 0x01 + +#define XPM2_XLHP 0x80 +#define XPM2_XLT 0x40 +#define XPM2_DAXLT 0x20 +#define XPM2_XP34 0x08 +#define XPM2_XP33 0x04 +#define XPM2_XP32 0x02 +#define XPM2_XP31 0x01 + + +/* TSWM (Transparent Service Word Mask) + ------------------ E1 ---------------------------- */ + +#define TSWM_TSIS 0x80 +#define TSWM_TSIF 0x40 +#define TSWM_TRA 0x20 +#define TSWM_TSA4 0x10 +#define TSWM_TSA5 0x08 +#define TSWM_TSA6 0x04 +#define TSWM_TSA7 0x02 +#define TSWM_TSA8 0x01 + +/* IDLE + + ------------------ E1 & T1 ----------------------- */ + +#define IDLE_IDL7 0x80 +#define IDLE_IDL6 0x40 +#define IDLE_IDL5 0x20 +#define IDLE_IDL4 0x10 +#define IDLE_IDL3 0x08 +#define IDLE_IDL2 0x04 +#define IDLE_IDL1 0x02 +#define IDLE_IDL0 0x01 + + +/* XSA4-8 + -------------------E1 ----------------------------- */ + +#define XSA4_XS47 0x80 +#define XSA4_XS46 0x40 +#define XSA4_XS45 0x20 +#define XSA4_XS44 0x10 +#define XSA4_XS43 0x08 +#define XSA4_XS42 0x04 +#define XSA4_XS41 0x02 +#define XSA4_XS40 0x01 + +#define XSA5_XS57 0x80 +#define XSA5_XS56 0x40 +#define XSA5_XS55 0x20 +#define XSA5_XS54 0x10 +#define XSA5_XS53 0x08 +#define XSA5_XS52 0x04 +#define XSA5_XS51 0x02 +#define XSA5_XS50 0x01 + +#define XSA6_XS67 0x80 +#define XSA6_XS66 0x40 +#define XSA6_XS65 0x20 +#define XSA6_XS64 0x10 +#define XSA6_XS63 0x08 +#define XSA6_XS62 0x04 +#define XSA6_XS61 0x02 +#define XSA6_XS60 0x01 + +#define XSA7_XS77 0x80 +#define XSA7_XS76 0x40 +#define XSA7_XS75 0x20 +#define XSA7_XS74 0x10 +#define XSA7_XS73 0x08 +#define XSA7_XS72 0x04 +#define XSA7_XS71 0x02 +#define XSA7_XS70 0x01 + +#define XSA8_XS87 0x80 +#define XSA8_XS86 0x40 +#define XSA8_XS85 0x20 +#define XSA8_XS84 0x10 +#define XSA8_XS83 0x08 +#define XSA8_XS82 0x04 +#define XSA8_XS81 0x02 +#define XSA8_XS80 0x01 + + +/* XDL1-3 (Transmit DL-Bit Register1-3 (read/write)) + ----------------------- T1 --------------------- */ + +#define XDL1_XDL17 0x80 +#define XDL1_XDL16 0x40 +#define XDL1_XDL15 0x20 +#define XDL1_XDL14 0x10 +#define XDL1_XDL13 0x08 +#define XDL1_XDL12 0x04 +#define XDL1_XDL11 0x02 +#define XDL1_XDL10 0x01 + +#define XDL2_XDL27 0x80 +#define XDL2_XDL26 0x40 +#define XDL2_XDL25 0x20 +#define XDL2_XDL24 0x10 +#define XDL2_XDL23 0x08 +#define XDL2_XDL22 0x04 +#define XDL2_XDL21 0x02 +#define XDL2_XDL20 0x01 + +#define XDL3_XDL37 0x80 +#define XDL3_XDL36 0x40 +#define XDL3_XDL35 0x20 +#define XDL3_XDL34 0x10 +#define XDL3_XDL33 0x08 +#define XDL3_XDL32 0x04 +#define XDL3_XDL31 0x02 +#define XDL3_XDL30 0x01 + + +/* ICB1-4 (Idle Channel Register 1-4) + ------------------ E1 ---------------------------- */ + +#define E1_ICB1_IC0 0x80 +#define E1_ICB1_IC1 0x40 +#define E1_ICB1_IC2 0x20 +#define E1_ICB1_IC3 0x10 +#define E1_ICB1_IC4 0x08 +#define E1_ICB1_IC5 0x04 +#define E1_ICB1_IC6 0x02 +#define E1_ICB1_IC7 0x01 + +#define E1_ICB2_IC8 0x80 +#define E1_ICB2_IC9 0x40 +#define E1_ICB2_IC10 0x20 +#define E1_ICB2_IC11 0x10 +#define E1_ICB2_IC12 0x08 +#define E1_ICB2_IC13 0x04 +#define E1_ICB2_IC14 0x02 +#define E1_ICB2_IC15 0x01 + +#define E1_ICB3_IC16 0x80 +#define E1_ICB3_IC17 0x40 +#define E1_ICB3_IC18 0x20 +#define E1_ICB3_IC19 0x10 +#define E1_ICB3_IC20 0x08 +#define E1_ICB3_IC21 0x04 +#define E1_ICB3_IC22 0x02 +#define E1_ICB3_IC23 0x01 + +#define E1_ICB4_IC24 0x80 +#define E1_ICB4_IC25 0x40 +#define E1_ICB4_IC26 0x20 +#define E1_ICB4_IC27 0x10 +#define E1_ICB4_IC28 0x08 +#define E1_ICB4_IC29 0x04 +#define E1_ICB4_IC30 0x02 +#define E1_ICB4_IC31 0x01 + +/* ICB1-4 (Idle Channel Register 1-4) + ------------------ T1 ---------------------------- */ + +#define T1_ICB1_IC1 0x80 +#define T1_ICB1_IC2 0x40 +#define T1_ICB1_IC3 0x20 +#define T1_ICB1_IC4 0x10 +#define T1_ICB1_IC5 0x08 +#define T1_ICB1_IC6 0x04 +#define T1_ICB1_IC7 0x02 +#define T1_ICB1_IC8 0x01 + +#define T1_ICB2_IC9 0x80 +#define T1_ICB2_IC10 0x40 +#define T1_ICB2_IC11 0x20 +#define T1_ICB2_IC12 0x10 +#define T1_ICB2_IC13 0x08 +#define T1_ICB2_IC14 0x04 +#define T1_ICB2_IC15 0x02 +#define T1_ICB2_IC16 0x01 + +#define T1_ICB3_IC17 0x80 +#define T1_ICB3_IC18 0x40 +#define T1_ICB3_IC19 0x20 +#define T1_ICB3_IC20 0x10 +#define T1_ICB3_IC21 0x08 +#define T1_ICB3_IC22 0x04 +#define T1_ICB3_IC23 0x02 +#define T1_ICB3_IC24 0x01 + +/* FMR3 (Framer Mode Register 3) + --------------------E1------------------------ */ + +#define FMR3_CMI 0x08 +#define FMR3_SYNSA 0x04 +#define FMR3_CFRZ 0x02 +#define FMR3_EXTIW 0x01 + + + +/* CCB1-3 (Clear Channel Register) + ------------------- T1 ----------------------- */ + +#define CCB1_CH1 0x80 +#define CCB1_CH2 0x40 +#define CCB1_CH3 0x20 +#define CCB1_CH4 0x10 +#define CCB1_CH5 0x08 +#define CCB1_CH6 0x04 +#define CCB1_CH7 0x02 +#define CCB1_CH8 0x01 + +#define CCB2_CH9 0x80 +#define CCB2_CH10 0x40 +#define CCB2_CH11 0x20 +#define CCB2_CH12 0x10 +#define CCB2_CH13 0x08 +#define CCB2_CH14 0x04 +#define CCB2_CH15 0x02 +#define CCB2_CH16 0x01 + +#define CCB3_CH17 0x80 +#define CCB3_CH18 0x40 +#define CCB3_CH19 0x20 +#define CCB3_CH20 0x10 +#define CCB3_CH21 0x08 +#define CCB3_CH22 0x04 +#define CCB3_CH23 0x02 +#define CCB3_CH24 0x01 + + +/* LIM0/1 (Line Interface Mode 0/1) + ------------------- E1 & T1 --------------------------- */ + +#define LIM0_XFB 0x80 +#define LIM0_XDOS 0x40 +#define LIM0_SCL1 0x20 +#define LIM0_SCL0 0x10 +#define LIM0_EQON 0x08 +#define LIM0_ELOS 0x04 +#define LIM0_LL 0x02 +#define LIM0_MAS 0x01 + +#define LIM1_EFSC 0x80 +#define LIM1_RIL2 0x40 +#define LIM1_RIL1 0x20 +#define LIM1_RIL0 0x10 +#define LIM1_DCOC 0x08 +#define LIM1_JATT 0x04 +#define LIM1_RL 0x02 +#define LIM1_DRS 0x01 + + +/* PCDR (Pulse Count Detection Register(Read/Write)) + ------------------ E1 & T1 ------------------------- */ + +#define PCDR_PCD7 0x80 +#define PCDR_PCD6 0x40 +#define PCDR_PCD5 0x20 +#define PCDR_PCD4 0x10 +#define PCDR_PCD3 0x08 +#define PCDR_PCD2 0x04 +#define PCDR_PCD1 0x02 +#define PCDR_PCD0 0x01 + +#define PCRR_PCR7 0x80 +#define PCRR_PCR6 0x40 +#define PCRR_PCR5 0x20 +#define PCRR_PCR4 0x10 +#define PCRR_PCR3 0x08 +#define PCRR_PCR2 0x04 +#define PCRR_PCR1 0x02 +#define PCRR_PCR0 0x01 + + +/* LIM2 (Line Interface Mode 2) + + ------------------ E1 & T1 ---------------------------- */ + +#define LIM2_DJA2 0x20 +#define LIM2_DJA1 0x10 +#define LIM2_LOS2 0x02 +#define LIM2_LOS1 0x01 + +/* LCR1 (Loop Code Register 1) */ + +#define LCR1_EPRM 0x80 +#define LCR1_XPRBS 0x40 + +/* SIC1 (System Interface Control 1) */ +#define SIC1_SRSC 0x80 +#define SIC1_RBS1 0x20 +#define SIC1_RBS0 0x10 +#define SIC1_SXSC 0x08 +#define SIC1_XBS1 0x02 +#define SIC1_XBS0 0x01 + +/* DEC (Disable Error Counter) + ------------------ E1 & T1 ---------------------------- */ + +#define DEC_DCEC3 0x20 +#define DEC_DBEC 0x10 +#define DEC_DCEC1 0x08 +#define DEC_DCEC 0x08 +#define DEC_DEBC 0x04 +#define DEC_DCVC 0x02 +#define DEC_DFEC 0x01 + + +/* FALC Register Bits (Receive Mode) + ---------------------------------------------------------------------------- */ + + +/* FRS0/1 (Framer Receive Status Register 0/1) + ----------------- E1 & T1 ---------------------------------- */ + +#define FRS0_LOS 0x80 +#define FRS0_AIS 0x40 +#define FRS0_LFA 0x20 +#define FRS0_RRA 0x10 +#define FRS0_API 0x08 +#define FRS0_NMF 0x04 +#define FRS0_LMFA 0x02 +#define FRS0_FSRF 0x01 + +#define FRS1_TS16RA 0x40 +#define FRS1_TS16LOS 0x20 +#define FRS1_TS16AIS 0x10 +#define FRS1_TS16LFA 0x08 +#define FRS1_EXZD 0x80 +#define FRS1_LLBDD 0x10 +#define FRS1_LLBAD 0x08 +#define FRS1_XLS 0x02 +#define FRS1_XLO 0x01 +#define FRS1_PDEN 0x40 + +/* FRS2/3 (Framer Receive Status Register 2/3) + ----------------- T1 ---------------------------------- */ + +#define FRS2_ESC2 0x80 +#define FRS2_ESC1 0x40 +#define FRS2_ESC0 0x20 + +#define FRS3_FEH5 0x20 +#define FRS3_FEH4 0x10 +#define FRS3_FEH3 0x08 +#define FRS3_FEH2 0x04 +#define FRS3_FEH1 0x02 +#define FRS3_FEH0 0x01 + + +/* RSW (Receive Service Word Pulseframe) + ----------------- E1 ------------------------------ */ + +#define RSW_RSI 0x80 +#define RSW_RRA 0x20 +#define RSW_RYO 0x10 +#define RSW_RY1 0x08 +#define RSW_RY2 0x04 +#define RSW_RY3 0x02 +#define RSW_RY4 0x01 + + +/* RSP (Receive Spare Bits / Additional Status) + ---------------- E1 ------------------------------- */ + +#define RSP_SI1 0x80 +#define RSP_SI2 0x40 +#define RSP_LLBDD 0x10 +#define RSP_LLBAD 0x08 +#define RSP_RSIF 0x04 +#define RSP_RS13 0x02 +#define RSP_RS15 0x01 + + +/* FECL (Framing Error Counter) + ---------------- E1 & T1 -------------------------- */ + +#define FECL_FE7 0x80 +#define FECL_FE6 0x40 +#define FECL_FE5 0x20 +#define FECL_FE4 0x10 +#define FECL_FE3 0x08 +#define FECL_FE2 0x04 +#define FECL_FE1 0x02 +#define FECL_FE0 0x01 + +#define FECH_FE15 0x80 +#define FECH_FE14 0x40 +#define FECH_FE13 0x20 +#define FECH_FE12 0x10 +#define FECH_FE11 0x08 +#define FECH_FE10 0x04 +#define FECH_FE9 0x02 +#define FECH_FE8 0x01 + + +/* CVCl (Code Violation Counter) + ----------------- E1 ------------------------- */ + +#define CVCL_CV7 0x80 +#define CVCL_CV6 0x40 +#define CVCL_CV5 0x20 +#define CVCL_CV4 0x10 +#define CVCL_CV3 0x08 +#define CVCL_CV2 0x04 +#define CVCL_CV1 0x02 +#define CVCL_CV0 0x01 + +#define CVCH_CV15 0x80 +#define CVCH_CV14 0x40 +#define CVCH_CV13 0x20 +#define CVCH_CV12 0x10 +#define CVCH_CV11 0x08 +#define CVCH_CV10 0x04 +#define CVCH_CV9 0x02 +#define CVCH_CV8 0x01 + + +/* CEC1-3L (CRC Error Counter) + ------------------ E1 ----------------------------- */ + +#define CEC1L_CR7 0x80 +#define CEC1L_CR6 0x40 +#define CEC1L_CR5 0x20 +#define CEC1L_CR4 0x10 +#define CEC1L_CR3 0x08 +#define CEC1L_CR2 0x04 +#define CEC1L_CR1 0x02 +#define CEC1L_CR0 0x01 + +#define CEC1H_CR15 0x80 +#define CEC1H_CR14 0x40 +#define CEC1H_CR13 0x20 +#define CEC1H_CR12 0x10 +#define CEC1H_CR11 0x08 +#define CEC1H_CR10 0x04 +#define CEC1H_CR9 0x02 +#define CEC1H_CR8 0x01 + +#define CEC2L_CR7 0x80 +#define CEC2L_CR6 0x40 +#define CEC2L_CR5 0x20 +#define CEC2L_CR4 0x10 +#define CEC2L_CR3 0x08 +#define CEC2L_CR2 0x04 +#define CEC2L_CR1 0x02 +#define CEC2L_CR0 0x01 + +#define CEC2H_CR15 0x80 +#define CEC2H_CR14 0x40 +#define CEC2H_CR13 0x20 +#define CEC2H_CR12 0x10 +#define CEC2H_CR11 0x08 +#define CEC2H_CR10 0x04 +#define CEC2H_CR9 0x02 +#define CEC2H_CR8 0x01 + +#define CEC3L_CR7 0x80 +#define CEC3L_CR6 0x40 +#define CEC3L_CR5 0x20 +#define CEC3L_CR4 0x10 +#define CEC3L_CR3 0x08 +#define CEC3L_CR2 0x04 +#define CEC3L_CR1 0x02 +#define CEC3L_CR0 0x01 + +#define CEC3H_CR15 0x80 +#define CEC3H_CR14 0x40 +#define CEC3H_CR13 0x20 +#define CEC3H_CR12 0x10 +#define CEC3H_CR11 0x08 +#define CEC3H_CR10 0x04 +#define CEC3H_CR9 0x02 +#define CEC3H_CR8 0x01 + + +/* CECL (CRC Error Counter) + + ------------------ T1 ----------------------------- */ + +#define CECL_CR7 0x80 +#define CECL_CR6 0x40 +#define CECL_CR5 0x20 +#define CECL_CR4 0x10 +#define CECL_CR3 0x08 +#define CECL_CR2 0x04 +#define CECL_CR1 0x02 +#define CECL_CR0 0x01 + +#define CECH_CR15 0x80 +#define CECH_CR14 0x40 +#define CECH_CR13 0x20 +#define CECH_CR12 0x10 +#define CECH_CR11 0x08 +#define CECH_CR10 0x04 +#define CECH_CR9 0x02 +#define CECH_CR8 0x01 + +/* EBCL (E Bit Error Counter) + ------------------- E1 & T1 ------------------------- */ + +#define EBCL_EB7 0x80 +#define EBCL_EB6 0x40 +#define EBCL_EB5 0x20 +#define EBCL_EB4 0x10 +#define EBCL_EB3 0x08 +#define EBCL_EB2 0x04 +#define EBCL_EB1 0x02 +#define EBCL_EB0 0x01 + +#define EBCH_EB15 0x80 +#define EBCH_EB14 0x40 +#define EBCH_EB13 0x20 +#define EBCH_EB12 0x10 +#define EBCH_EB11 0x08 +#define EBCH_EB10 0x04 +#define EBCH_EB9 0x02 +#define EBCH_EB8 0x01 + + +/* RSA4-8 (Receive Sa4-8-Bit Register) + -------------------- E1 --------------------------- */ + +#define RSA4_RS47 0x80 +#define RSA4_RS46 0x40 +#define RSA4_RS45 0x20 +#define RSA4_RS44 0x10 +#define RSA4_RS43 0x08 +#define RSA4_RS42 0x04 +#define RSA4_RS41 0x02 +#define RSA4_RS40 0x01 + +#define RSA5_RS57 0x80 +#define RSA5_RS56 0x40 +#define RSA5_RS55 0x20 +#define RSA5_RS54 0x10 +#define RSA5_RS53 0x08 +#define RSA5_RS52 0x04 +#define RSA5_RS51 0x02 +#define RSA5_RS50 0x01 + +#define RSA6_RS67 0x80 +#define RSA6_RS66 0x40 +#define RSA6_RS65 0x20 +#define RSA6_RS64 0x10 +#define RSA6_RS63 0x08 +#define RSA6_RS62 0x04 +#define RSA6_RS61 0x02 +#define RSA6_RS60 0x01 + +#define RSA7_RS77 0x80 +#define RSA7_RS76 0x40 +#define RSA7_RS75 0x20 +#define RSA7_RS74 0x10 +#define RSA7_RS73 0x08 +#define RSA7_RS72 0x04 +#define RSA7_RS71 0x02 +#define RSA7_RS70 0x01 + +#define RSA8_RS87 0x80 +#define RSA8_RS86 0x40 +#define RSA8_RS85 0x20 +#define RSA8_RS84 0x10 +#define RSA8_RS83 0x08 +#define RSA8_RS82 0x04 +#define RSA8_RS81 0x02 +#define RSA8_RS80 0x01 + +/* RSA6S (Receive Sa6 Bit Status Register) + ------------------------ T1 ------------------------- */ + +#define RSA6S_SX 0x20 +#define RSA6S_SF 0x10 +#define RSA6S_SE 0x08 +#define RSA6S_SC 0x04 +#define RSA6S_SA 0x02 +#define RSA6S_S8 0x01 + + +/* RDL1-3 Receive DL-Bit Register1-3) + ------------------------ T1 ------------------------- */ + +#define RDL1_RDL17 0x80 +#define RDL1_RDL16 0x40 +#define RDL1_RDL15 0x20 +#define RDL1_RDL14 0x10 +#define RDL1_RDL13 0x08 +#define RDL1_RDL12 0x04 +#define RDL1_RDL11 0x02 +#define RDL1_RDL10 0x01 + +#define RDL2_RDL27 0x80 +#define RDL2_RDL26 0x40 +#define RDL2_RDL25 0x20 +#define RDL2_RDL24 0x10 +#define RDL2_RDL23 0x08 +#define RDL2_RDL22 0x04 +#define RDL2_RDL21 0x02 +#define RDL2_RDL20 0x01 + +#define RDL3_RDL37 0x80 +#define RDL3_RDL36 0x40 +#define RDL3_RDL35 0x20 +#define RDL3_RDL34 0x10 +#define RDL3_RDL33 0x08 +#define RDL3_RDL32 0x04 +#define RDL3_RDL31 0x02 +#define RDL3_RDL30 0x01 + + +/* SIS (Signaling Status Register) + + -------------------- E1 & T1 -------------------------- */ + +#define SIS_XDOV 0x80 +#define SIS_XFW 0x40 +#define SIS_XREP 0x20 +#define SIS_RLI 0x08 +#define SIS_CEC 0x04 +#define SIS_BOM 0x01 + + +/* RSIS (Receive Signaling Status Register) + + -------------------- E1 & T1 --------------------------- */ + +#define RSIS_VFR 0x80 +#define RSIS_RDO 0x40 +#define RSIS_CRC16 0x20 +#define RSIS_RAB 0x10 +#define RSIS_HA1 0x08 +#define RSIS_HA0 0x04 +#define RSIS_HFR 0x02 +#define RSIS_LA 0x01 + + +/* RBCL/H (Receive Byte Count Low/High) + + ------------------- E1 & T1 ----------------------- */ + +#define RBCL_RBC7 0x80 +#define RBCL_RBC6 0x40 +#define RBCL_RBC5 0x20 +#define RBCL_RBC4 0x10 +#define RBCL_RBC3 0x08 +#define RBCL_RBC2 0x04 +#define RBCL_RBC1 0x02 +#define RBCL_RBC0 0x01 + +#define RBCH_OV 0x10 +#define RBCH_RBC11 0x08 +#define RBCH_RBC10 0x04 +#define RBCH_RBC9 0x02 +#define RBCH_RBC8 0x01 + + +/* ISR1-3 (Interrupt Status Register 1-3) + + ------------------ E1 & T1 ------------------------------ */ + +#define FISR0_RME 0x80 +#define FISR0_RFS 0x40 +#define FISR0_T8MS 0x20 +#define FISR0_ISF 0x20 +#define FISR0_RMB 0x10 +#define FISR0_CASC 0x08 +#define FISR0_RSC 0x08 +#define FISR0_CRC6 0x04 +#define FISR0_CRC4 0x04 +#define FISR0_PDEN 0x02 +#define FISR0_RPF 0x01 + +#define FISR1_CASE 0x80 +#define FISR1_LLBSC 0x80 +#define FISR1_RDO 0x40 +#define FISR1_ALLS 0x20 +#define FISR1_XDU 0x10 +#define FISR1_XMB 0x08 +#define FISR1_XLSC 0x02 +#define FISR1_XPR 0x01 + +#define FISR2_FAR 0x80 +#define FISR2_LFA 0x40 +#define FISR2_MFAR 0x20 +#define FISR2_T400MS 0x10 +#define FISR2_LMFA 0x10 +#define FISR2_AIS 0x08 +#define FISR2_LOS 0x04 +#define FISR2_RAR 0x02 +#define FISR2_RA 0x01 + +#define FISR3_ES 0x80 +#define FISR3_SEC 0x40 +#define FISR3_LMFA16 0x20 +#define FISR3_AIS16 0x10 +#define FISR3_RA16 0x08 +#define FISR3_API 0x04 +#define FISR3_XSLP 0x20 +#define FISR3_XSLN 0x10 +#define FISR3_LLBSC 0x08 +#define FISR3_XRS 0x04 +#define FISR3_SLN 0x02 +#define FISR3_SLP 0x01 + + +/* GIS (Global Interrupt Status Register) + + --------------------- E1 & T1 --------------------- */ + +#define GIS_ISR3 0x08 +#define GIS_ISR2 0x04 +#define GIS_ISR1 0x02 +#define GIS_ISR0 0x01 + + +/* VSTR (Version Status Register) + + --------------------- E1 & T1 --------------------- */ + +#define VSTR_VN3 0x08 +#define VSTR_VN2 0x04 +#define VSTR_VN1 0x02 +#define VSTR_VN0 0x01 + + +/*>>>>>>>>>>>>>>>>>>>>> Local Control Structures <<<<<<<<<<<<<<<<<<<<<<<<< */ + +/* Write-only Registers (E1/T1 control mode write registers) */ +#define XFIFOH 0x00 /* Tx FIFO High Byte */ +#define XFIFOL 0x01 /* Tx FIFO Low Byte */ +#define CMDR 0x02 /* Command Reg */ +#define DEC 0x60 /* Disable Error Counter */ +#define TEST2 0x62 /* Manuf. Test Reg 2 */ +#define XS(nbr) (0x70 + (nbr)) /* Tx CAS Reg (0 to 15) */ + +/* Read-write Registers (E1/T1 status mode read registers) */ +#define MODE 0x03 /* Mode Reg */ +#define RAH1 0x04 /* Receive Address High 1 */ +#define RAH2 0x05 /* Receive Address High 2 */ +#define RAL1 0x06 /* Receive Address Low 1 */ +#define RAL2 0x07 /* Receive Address Low 2 */ +#define IPC 0x08 /* Interrupt Port Configuration */ +#define CCR1 0x09 /* Common Configuration Reg 1 */ +#define CCR3 0x0A /* Common Configuration Reg 3 */ +#define PRE 0x0B /* Preamble Reg */ +#define RTR1 0x0C /* Receive Timeslot Reg 1 */ +#define RTR2 0x0D /* Receive Timeslot Reg 2 */ +#define RTR3 0x0E /* Receive Timeslot Reg 3 */ +#define RTR4 0x0F /* Receive Timeslot Reg 4 */ +#define TTR1 0x10 /* Transmit Timeslot Reg 1 */ +#define TTR2 0x11 /* Transmit Timeslot Reg 2 */ +#define TTR3 0x12 /* Transmit Timeslot Reg 3 */ +#define TTR4 0x13 /* Transmit Timeslot Reg 4 */ +#define IMR0 0x14 /* Interrupt Mask Reg 0 */ +#define IMR1 0x15 /* Interrupt Mask Reg 1 */ +#define IMR2 0x16 /* Interrupt Mask Reg 2 */ +#define IMR3 0x17 /* Interrupt Mask Reg 3 */ +#define IMR4 0x18 /* Interrupt Mask Reg 4 */ +#define IMR5 0x19 /* Interrupt Mask Reg 5 */ +#define FMR0 0x1A /* Framer Mode Reigster 0 */ +#define FMR1 0x1B /* Framer Mode Reigster 1 */ +#define FMR2 0x1C /* Framer Mode Reigster 2 */ +#define LOOP 0x1D /* Channel Loop Back */ +#define XSW 0x1E /* Transmit Service Word */ +#define FMR4 0x1E /* Framer Mode Reg 4 */ +#define XSP 0x1F /* Transmit Spare Bits */ +#define FMR5 0x1F /* Framer Mode Reg 5 */ +#define XC0 0x20 /* Transmit Control 0 */ +#define XC1 0x21 /* Transmit Control 1 */ +#define RC0 0x22 /* Receive Control 0 */ +#define RC1 0x23 /* Receive Control 1 */ +#define XPM0 0x24 /* Transmit Pulse Mask 0 */ +#define XPM1 0x25 /* Transmit Pulse Mask 1 */ +#define XPM2 0x26 /* Transmit Pulse Mask 2 */ +#define TSWM 0x27 /* Transparent Service Word Mask */ +#define TEST1 0x28 /* Manuf. Test Reg 1 */ +#define IDLE 0x29 /* Idle Channel Code */ +#define XSA4 0x2A /* Transmit SA4 Bit Reg */ +#define XDL1 0x2A /* Transmit DL-Bit Reg 2 */ +#define XSA5 0x2B /* Transmit SA4 Bit Reg */ +#define XDL2 0x2B /* Transmit DL-Bit Reg 2 */ +#define XSA6 0x2C /* Transmit SA4 Bit Reg */ +#define XDL3 0x2C /* Transmit DL-Bit Reg 2 */ +#define XSA7 0x2D /* Transmit SA4 Bit Reg */ +#define CCB1 0x2D /* Clear Channel Reg 1 */ +#define XSA8 0x2E /* Transmit SA4 Bit Reg */ +#define CCB2 0x2E /* Clear Channel Reg 2 */ +#define FMR3 0x2F /* Framer Mode Reg. 3 */ +#define CCB3 0x2F /* Clear Channel Reg 3 */ +#define ICB1 0x30 /* Idle Channel Reg 1 */ +#define ICB2 0x31 /* Idle Channel Reg 2 */ +#define ICB3 0x32 /* Idle Channel Reg 3 */ +#define ICB4 0x33 /* Idle Channel Reg 4 */ +#define LIM0 0x34 /* Line Interface Mode 0 */ +#define LIM1 0x35 /* Line Interface Mode 1 */ +#define PCDR 0x36 /* Pulse Count Detection */ +#define PCRR 0x37 /* Pulse Count Recovery */ +#define LIM2 0x38 /* Line Interface Mode Reg 2 */ +#define LCR1 0x39 /* Loop Code Reg 1 */ +#define LCR2 0x3A /* Loop Code Reg 2 */ +#define LCR3 0x3B /* Loop Code Reg 3 */ +#define SIC1 0x3C /* System Interface Control 1 */ + +/* Read-only Registers (E1/T1 control mode read registers) */ +#define RFIFOH 0x00 /* Receive FIFO */ +#define RFIFOL 0x01 /* Receive FIFO */ +#define FRS0 0x4C /* Framer Receive Status 0 */ +#define FRS1 0x4D /* Framer Receive Status 1 */ +#define RSW 0x4E /* Receive Service Word */ +#define FRS2 0x4E /* Framer Receive Status 2 */ +#define RSP 0x4F /* Receive Spare Bits */ +#define FRS3 0x4F /* Framer Receive Status 3 */ +#define FECL 0x50 /* Framing Error Counter */ +#define FECH 0x51 /* Framing Error Counter */ +#define CVCL 0x52 /* Code Violation Counter */ +#define CVCH 0x53 /* Code Violation Counter */ +#define CECL 0x54 /* CRC Error Counter 1 */ +#define CECH 0x55 /* CRC Error Counter 1 */ +#define EBCL 0x56 /* E-Bit Error Counter */ +#define EBCH 0x57 /* E-Bit Error Counter */ +#define BECL 0x58 /* Bit Error Counter Low */ +#define BECH 0x59 /* Bit Error Counter Low */ +#define CEC3 0x5A /* CRC Error Counter 3 (16-bit) */ +#define RSA4 0x5C /* Receive SA4 Bit Reg */ +#define RDL1 0x5C /* Receive DL-Bit Reg 1 */ +#define RSA5 0x5D /* Receive SA5 Bit Reg */ +#define RDL2 0x5D /* Receive DL-Bit Reg 2 */ +#define RSA6 0x5E /* Receive SA6 Bit Reg */ +#define RDL3 0x5E /* Receive DL-Bit Reg 3 */ +#define RSA7 0x5F /* Receive SA7 Bit Reg */ +#define RSA8 0x60 /* Receive SA8 Bit Reg */ +#define RSA6S 0x61 /* Receive SA6 Bit Status Reg */ +#define TSR0 0x62 /* Manuf. Test Reg 0 */ +#define TSR1 0x63 /* Manuf. Test Reg 1 */ +#define SIS 0x64 /* Signaling Status Reg */ +#define RSIS 0x65 /* Receive Signaling Status Reg */ +#define RBCL 0x66 /* Receive Byte Control */ +#define RBCH 0x67 /* Receive Byte Control */ +#define FISR0 0x68 /* Interrupt Status Reg 0 */ +#define FISR1 0x69 /* Interrupt Status Reg 1 */ +#define FISR2 0x6A /* Interrupt Status Reg 2 */ +#define FISR3 0x6B /* Interrupt Status Reg 3 */ +#define GIS 0x6E /* Global Interrupt Status */ +#define VSTR 0x6F /* Version Status */ +#define RS(nbr) (0x70 + (nbr)) /* Rx CAS Reg (0 to 15) */ + +#endif /* _FALC_LH_H */ + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/pc300.h linux.22-ac2/drivers/net/wan/pc300.h --- linux.vanilla/drivers/net/wan/pc300.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/pc300.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,428 @@ +/* + * pc300.h Cyclades-PC300(tm) Kernel API Definitions. + * + * Author: Ivan Passos + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _PC300_H +#define _PC300_H + +#ifndef __HDLC_IOCTL_H__ +#include +#endif + +#ifndef __HDLC_H +#include +#endif + +#ifndef _HD64572_H +#include "hd64572.h" +#endif +#ifndef _FALC_LH_H +#include "pc300-falc-lh.h" +#endif + +#ifndef CY_TYPES +#define CY_TYPES +#if defined(__alpha__) +typedef unsigned long ucdouble; /* 64 bits, unsigned */ +typedef unsigned int uclong; /* 32 bits, unsigned */ +#else +typedef unsigned long uclong; /* 32 bits, unsigned */ +#endif +typedef unsigned short ucshort; /* 16 bits, unsigned */ +typedef unsigned char ucchar; /* 8 bits, unsigned */ +#endif /* CY_TYPES */ + +#define PC300_PROTO_MLPPP 1 + +#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */ + +#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */ +#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */ + +#define PC300_MAXCARDS 4 /* Max number of cards per system */ +#define PC300_MAXCHAN 2 /* Number of channels per card */ + +#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */ +#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ +#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */ +#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ + +#define PC300_OSC_CLOCK 24576000 +#define PC300_PCI_CLOCK 33000000 + +#define BD_DEF_LEN 0x0800 /* DMA buffer length (2KB) */ +#define DMA_TX_MEMSZ 0x8000 /* Total DMA Tx memory size (32KB/ch) */ +#define DMA_RX_MEMSZ 0x10000 /* Total DMA Rx memory size (64KB/ch) */ + +#define N_DMA_TX_BUF (DMA_TX_MEMSZ / BD_DEF_LEN) /* DMA Tx buffers */ +#define N_DMA_RX_BUF (DMA_RX_MEMSZ / BD_DEF_LEN) /* DMA Rx buffers */ + +/* DMA Buffer Offsets */ +#define DMA_TX_BASE ((N_DMA_TX_BUF + N_DMA_RX_BUF) * \ + PC300_MAXCHAN * sizeof(pcsca_bd_t)) +#define DMA_RX_BASE (DMA_TX_BASE + PC300_MAXCHAN*DMA_TX_MEMSZ) + +/* DMA Descriptor Offsets */ +#define DMA_TX_BD_BASE 0x0000 +#define DMA_RX_BD_BASE (DMA_TX_BD_BASE + ((PC300_MAXCHAN*DMA_TX_MEMSZ / \ + BD_DEF_LEN) * sizeof(pcsca_bd_t))) + +/* DMA Descriptor Macros */ +#define TX_BD_ADDR(chan, n) (DMA_TX_BD_BASE + \ + ((N_DMA_TX_BUF*chan) + n) * sizeof(pcsca_bd_t)) +#define RX_BD_ADDR(chan, n) (DMA_RX_BD_BASE + \ + ((N_DMA_RX_BUF*chan) + n) * sizeof(pcsca_bd_t)) + +/* Macro to access the FALC registers (TE only) */ +#define F_REG(reg, chan) (0x200*(chan) + ((reg)<<2)) + +/*************************************** + * Memory access functions/macros * + * (required to support Alpha systems) * + ***************************************/ +#ifdef __KERNEL__ +#define cpc_writeb(port,val) {writeb((ucchar)(val),(ulong)(port)); mb();} +#define cpc_writew(port,val) {writew((ushort)(val),(ulong)(port)); mb();} +#define cpc_writel(port,val) {writel((uclong)(val),(ulong)(port)); mb();} + +#define cpc_readb(port) readb(port) +#define cpc_readw(port) readw(port) +#define cpc_readl(port) readl(port) + +#else /* __KERNEL__ */ +#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val)) +#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val)) +#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val)) + +#define cpc_readb(port) (*(volatile ucchar *)(port)) +#define cpc_readw(port) (*(volatile ucshort *)(port)) +#define cpc_readl(port) (*(volatile uclong *)(port)) + +#endif /* __KERNEL__ */ + +/****** Data Structures *****************************************************/ + +/* + * RUNTIME_9050 - PLX PCI9050-1 local configuration and shared runtime + * registers. This structure can be used to access the 9050 registers + * (memory mapped). + */ +struct RUNTIME_9050 { + uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ + uclong loc_rom_range; /* 10h : Local ROM Range */ + uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ + uclong loc_rom_base; /* 24h : Local ROM Base */ + uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ + uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ + uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ + uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ + uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ +}; + +#define PLX_9050_LINT1_ENABLE 0x01 +#define PLX_9050_LINT1_POL 0x02 +#define PLX_9050_LINT1_STATUS 0x04 +#define PLX_9050_LINT2_ENABLE 0x08 +#define PLX_9050_LINT2_POL 0x10 +#define PLX_9050_LINT2_STATUS 0x20 +#define PLX_9050_INTR_ENABLE 0x40 +#define PLX_9050_SW_INTR 0x80 + +/* Masks to access the init_ctrl PLX register */ +#define PC300_CLKSEL_MASK (0x00000004UL) +#define PC300_CHMEDIA_MASK(chan) (0x00000020UL<<(chan*3)) +#define PC300_CTYPE_MASK (0x00000800UL) + +/* CPLD Registers (base addr = falcbase, TE only) */ +/* CPLD v. 0 */ +#define CPLD_REG1 0x140 /* Chip resets, DCD/CTS status */ +#define CPLD_REG2 0x144 /* Clock enable , LED control */ +/* CPLD v. 2 or higher */ +#define CPLD_V2_REG1 0x100 /* Chip resets, DCD/CTS status */ +#define CPLD_V2_REG2 0x104 /* Clock enable , LED control */ +#define CPLD_ID_REG 0x108 /* CPLD version */ + +/* CPLD Register bit description: for the FALC bits, they should always be + set based on the channel (use (bit<<(2*ch)) to access the correct bit for + that channel) */ +#define CPLD_REG1_FALC_RESET 0x01 +#define CPLD_REG1_SCA_RESET 0x02 +#define CPLD_REG1_GLOBAL_CLK 0x08 +#define CPLD_REG1_FALC_DCD 0x10 +#define CPLD_REG1_FALC_CTS 0x20 + +#define CPLD_REG2_FALC_TX_CLK 0x01 +#define CPLD_REG2_FALC_RX_CLK 0x02 +#define CPLD_REG2_FALC_LED1 0x10 +#define CPLD_REG2_FALC_LED2 0x20 + +/* Structure with FALC-related fields (TE only) */ +#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ + +typedef struct falc { + ucchar sync; /* If true FALC is synchronized */ + ucchar active; /* if TRUE then already active */ + ucchar loop_active; /* if TRUE a line loopback UP was received */ + ucchar loop_gen; /* if TRUE a line loopback UP was issued */ + + ucchar num_channels; + ucchar offset; /* 1 for T1, 0 for E1 */ + ucchar full_bandwidth; + + ucchar xmb_cause; + ucchar multiframe_mode; + + /* Statistics */ + ucshort pden; /* Pulse Density violation count */ + ucshort los; /* Loss of Signal count */ + ucshort losr; /* Loss of Signal recovery count */ + ucshort lfa; /* Loss of frame alignment count */ + ucshort farec; /* Frame Alignment Recovery count */ + ucshort lmfa; /* Loss of multiframe alignment count */ + ucshort ais; /* Remote Alarm indication Signal count */ + ucshort sec; /* One-second timer */ + ucshort es; /* Errored second */ + ucshort rai; /* remote alarm received */ + ucshort bec; + ucshort fec; + ucshort cvc; + ucshort cec; + ucshort ebc; + + /* Status */ + ucchar red_alarm; + ucchar blue_alarm; + ucchar loss_fa; + ucchar yellow_alarm; + ucchar loss_mfa; + ucchar prbs; +} falc_t; + +typedef struct falc_status { + ucchar sync; /* If true FALC is synchronized */ + ucchar red_alarm; + ucchar blue_alarm; + ucchar loss_fa; + ucchar yellow_alarm; + ucchar loss_mfa; + ucchar prbs; +} falc_status_t; + +typedef struct rsv_x21_status { + ucchar dcd; + ucchar dsr; + ucchar cts; + ucchar rts; + ucchar dtr; +} rsv_x21_status_t; + +typedef struct pc300stats { + int hw_type; + uclong line_on; + uclong line_off; + struct net_device_stats gen_stats; + falc_t te_stats; +} pc300stats_t; + +typedef struct pc300status { + int hw_type; + rsv_x21_status_t gen_status; + falc_status_t te_status; +} pc300status_t; + +typedef struct pc300loopback { + char loop_type; + char loop_on; +} pc300loopback_t; + +typedef struct pc300patterntst { + char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ + ucshort num_errors; +} pc300patterntst_t; + +typedef struct pc300dev { + void *if_ptr; /* General purpose pointer */ + struct pc300ch *chan; + ucchar trace_on; + uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ + uclong line_off; +#ifdef __KERNEL__ + char name[16]; + hdlc_device *hdlc; + + void *private; + struct sk_buff *tx_skb; + union { /* This union has all the protocol-specific structures */ + struct ppp_device pppdev; + }ifu; +#ifdef CONFIG_PC300_MLPPP + void *cpc_tty; /* information to PC300 TTY driver */ +#endif +#endif /* __KERNEL__ */ +}pc300dev_t; + +typedef struct pc300hw { + int type; /* RSV, X21, etc. */ + int bus; /* Bus (PCI, PMC, etc.) */ + int nchan; /* number of channels */ + int irq; /* interrupt request level */ + uclong clock; /* Board clock */ + ucchar cpld_id; /* CPLD ID (TE only) */ + ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ + ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ + ucshort gpioc_reg; /* PLX GPIOC reg */ + ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ + uclong iophys; /* PLX registers I/O base */ + uclong iosize; /* PLX registers I/O size */ + uclong plxphys; /* PLX registers MMIO base (physical) */ + uclong plxbase; /* PLX registers MMIO base (virtual) */ + uclong plxsize; /* PLX registers MMIO size */ + uclong scaphys; /* SCA registers MMIO base (physical) */ + uclong scabase; /* SCA registers MMIO base (virtual) */ + uclong scasize; /* SCA registers MMIO size */ + uclong ramphys; /* On-board RAM MMIO base (physical) */ + uclong rambase; /* On-board RAM MMIO base (virtual) */ + uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ + uclong ramsize; /* On-board RAM MMIO size */ + uclong falcphys; /* FALC registers MMIO base (physical) */ + uclong falcbase; /* FALC registers MMIO base (virtual) */ + uclong falcsize; /* FALC registers MMIO size */ +} pc300hw_t; + +typedef struct pc300chconf { + sync_serial_settings phys_settings; /* Clock type/rate (in bps), + loopback mode */ + raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ + uclong media; /* HW media (RS232, V.35, etc.) */ + uclong proto; /* Protocol (PPP, X.25, etc.) */ + ucchar monitor; /* Monitor mode (0 = off, !0 = on) */ + + /* TE-specific parameters */ + ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ + ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ + ucchar lbo; /* Line Build Out */ + ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ + uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ +} pc300chconf_t; + +typedef struct pc300ch { + struct pc300 *card; + int channel; + pc300dev_t d; + pc300chconf_t conf; + ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ + ucchar tx_next_bd; /* Next free TX DMA block descriptor */ + ucchar rx_first_bd; /* First free RX DMA block descriptor */ + ucchar rx_last_bd; /* Last free RX DMA block descriptor */ + ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ + falc_t falc; /* FALC structure (TE only) */ +} pc300ch_t; + +typedef struct pc300 { + pc300hw_t hw; /* hardware config. */ + pc300ch_t chan[PC300_MAXCHAN]; +#ifdef __KERNEL__ + spinlock_t card_lock; +#endif /* __KERNEL__ */ +} pc300_t; + +typedef struct pc300conf { + pc300hw_t hw; + pc300chconf_t conf; +} pc300conf_t; + +/* DEV ioctl() commands */ +#define N_SPPP_IOCTLS 2 + +enum pc300_ioctl_cmds { + SIOCCPCRESERVED = (SIOCDEVPRIVATE + N_SPPP_IOCTLS), + SIOCGPC300CONF, + SIOCSPC300CONF, + SIOCGPC300STATUS, + SIOCGPC300FALCSTATUS, + SIOCGPC300UTILSTATS, + SIOCGPC300UTILSTATUS, + SIOCSPC300TRACE, + SIOCSPC300LOOPBACK, + SIOCSPC300PATTERNTEST, +}; + +/* Loopback types - PC300/TE boards */ +enum pc300_loopback_cmds { + PC300LOCLOOP = 1, + PC300REMLOOP, + PC300PAYLOADLOOP, + PC300GENLOOPUP, + PC300GENLOOPDOWN, +}; + +/* Control Constant Definitions */ +#define PC300_RSV 0x01 +#define PC300_X21 0x02 +#define PC300_TE 0x03 + +#define PC300_PCI 0x00 +#define PC300_PMC 0x01 + +#define PC300_LC_AMI 0x01 +#define PC300_LC_B8ZS 0x02 +#define PC300_LC_NRZ 0x03 +#define PC300_LC_HDB3 0x04 + +/* Framing (T1) */ +#define PC300_FR_ESF 0x01 +#define PC300_FR_D4 0x02 +#define PC300_FR_ESF_JAPAN 0x03 + +/* Framing (E1) */ +#define PC300_FR_MF_CRC4 0x04 +#define PC300_FR_MF_NON_CRC4 0x05 +#define PC300_FR_UNFRAMED 0x06 + +#define PC300_LBO_0_DB 0x00 +#define PC300_LBO_7_5_DB 0x01 +#define PC300_LBO_15_DB 0x02 +#define PC300_LBO_22_5_DB 0x03 + +#define PC300_RX_SENS_SH 0x01 +#define PC300_RX_SENS_LH 0x02 + +#define PC300_TX_TIMEOUT (2*HZ) +#define PC300_TX_QUEUE_LEN 100 +#define PC300_DEF_MTU 1600 + +#ifdef __KERNEL__ +/* Function Prototypes */ +int dma_buf_write(pc300_t *, int, ucchar *, int); +int dma_buf_read(pc300_t *, int, struct sk_buff *); +void tx_dma_start(pc300_t *, int); +void rx_dma_start(pc300_t *, int); +void tx_dma_stop(pc300_t *, int); +void rx_dma_stop(pc300_t *, int); +int cpc_queue_xmit(struct sk_buff *, struct net_device *); +void cpc_net_rx(hdlc_device *); +void cpc_sca_status(pc300_t *, int); +int cpc_change_mtu(struct net_device *, int); +int cpc_ioctl(struct net_device *, struct ifreq *, int); +int ch_config(pc300dev_t *); +int rx_config(pc300dev_t *); +int tx_config(pc300dev_t *); +void cpc_opench(pc300dev_t *); +void cpc_closech(pc300dev_t *); +int cpc_open(struct net_device *dev); +int cpc_close(struct net_device *dev); +int cpc_set_media(hdlc_device *, int); +#endif /* __KERNEL__ */ + +#endif /* _PC300_H */ + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/net/wan/pc300_tty.c linux.22-ac2/drivers/net/wan/pc300_tty.c --- linux.vanilla/drivers/net/wan/pc300_tty.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/net/wan/pc300_tty.c 2003-06-29 16:09:51.000000000 +0100 @@ -0,0 +1,1123 @@ +/* + * pc300_tty.c Cyclades-PC300(tm) TTY Driver. + * + * Author: Regina Kodato + * Maintainer: PC300 Maintainer + * + * Copyright: (c) 1999-2002 Cyclades Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* TTY includes */ +#include +#include +#include + +#include "pc300.h" + +/* defines and macros */ +/* TTY Global definitions */ +#define CPC_TTY_NPORTS 8 /* maximum number of the sync tty connections */ +#define CPC_TTY_MAJOR CYCLADES_MAJOR +#define CPC_TTY_MINOR_START 240 /* minor of the first PC300 interface */ + +#define CPC_TTY_MAX_MTU 2000 + +/* tty interface state */ +#define CPC_TTY_ST_IDLE 0 +#define CPC_TTY_ST_INIT 1 /* configured with MLPPP and up */ +#define CPC_TTY_ST_OPEN 2 /* opened by application */ + +#define CPC_TTY_LOCK(card,flags)\ + do {\ + spin_lock_irqsave(&card->card_lock, flags); \ + } while (0) + +#define CPC_TTY_UNLOCK(card,flags) \ + do {\ + spin_unlock_irqrestore(&card->card_lock, flags); \ + } while (0) + +//#define CPC_TTY_DBG(format,a...) printk(format,##a) +#define CPC_TTY_DBG(format,a...) + +/* data structures */ +typedef struct _st_cpc_rx_buf { + struct _st_cpc_rx_buf *next; + int size; + unsigned char data[1]; +} st_cpc_rx_buf; + +struct st_cpc_rx_list { + st_cpc_rx_buf *first; + st_cpc_rx_buf *last; +}; + +typedef struct _st_cpc_tty_area { + int state; /* state of the TTY interface */ + int num_open; + unsigned int tty_minor; /* minor this interface */ + volatile struct st_cpc_rx_list buf_rx; /* ptr. to reception buffer */ + unsigned char* buf_tx; /* ptr. to transmission buffer */ + pc300dev_t* pc300dev; /* ptr. to info struct in PC300 driver */ + unsigned char name[20]; /* interf. name + "-tty" */ + struct tty_struct *tty; + struct tq_struct tty_tx_task_queue; /* tx task - tx interrupt */ + struct tq_struct tty_rx_task_queue; /* rx task - rx interrupt */ + } st_cpc_tty_area; + +/* TTY data structures */ +static struct tty_struct *cpc_tty_serial_table[CPC_TTY_NPORTS]; +static struct termios *cpc_tty_serial_termios[CPC_TTY_NPORTS]; +static struct termios *cpc_tty_serial_termios_locked[CPC_TTY_NPORTS]; +static struct tty_driver serial_drv, callout_drv; + +/* local variables */ +st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS]; + +int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */ +int cpc_tty_refcount; +int cpc_tty_unreg_flag = 0; + +/* TTY functions prototype */ +static int cpc_tty_open(struct tty_struct *tty, struct file *flip); +static void cpc_tty_close(struct tty_struct *tty, struct file *flip); +static int cpc_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count); +static int cpc_tty_write_room(struct tty_struct *tty); +static int cpc_tty_chars_in_buffer(struct tty_struct *tty); +static int cpc_tty_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); +static void cpc_tty_flush_buffer(struct tty_struct *tty); +static void cpc_tty_hangup(struct tty_struct *tty); +static void cpc_tty_rx_task(void *data); +static void cpc_tty_tx_task(void *data); +static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); +static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); +static void cpc_tty_dtr_off(pc300dev_t *pc300dev); +static void cpc_tty_dtr_on(pc300dev_t *pc300dev); + +/* functions called by PC300 driver */ +void cpc_tty_init(pc300dev_t *dev); +void cpc_tty_unregister_service(pc300dev_t *pc300dev); +void cpc_tty_receive(pc300dev_t *pc300dev); +void cpc_tty_trigger_poll(pc300dev_t *pc300dev); +void cpc_tty_reset_var(void); + +/* + * PC300 TTY clear DTR signal + */ +static void cpc_tty_dtr_off(pc300dev_t *pc300dev) +{ + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *) pc300chan->card; + int ch = pc300chan->channel; + unsigned long flags; + + CPC_TTY_DBG("%s-tty: Clear signal DTR\n", + ((struct net_device*)(pc300dev->hdlc))->name); + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CTL,ch), + cpc_readb(card->hw.scabase+M_REG(CTL,ch))& CTL_DTR); + CPC_TTY_UNLOCK(card,flags); +} + +/* + * PC300 TTY set DTR signal to ON + */ +static void cpc_tty_dtr_on(pc300dev_t *pc300dev) +{ + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *) pc300chan->card; + int ch = pc300chan->channel; + unsigned long flags; + + CPC_TTY_DBG("%s-tty: Set signal DTR\n", + ((struct net_device*)(pc300dev->hdlc))->name); + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CTL,ch), + cpc_readb(card->hw.scabase+M_REG(CTL,ch))& ~CTL_DTR); + CPC_TTY_UNLOCK(card,flags); +} + +/* + * PC300 TTY initialization routine + * + * This routine is called by the PC300 driver during board configuration + * (ioctl=SIOCSP300CONF). At this point the adapter is completely + * initialized. + * o verify kernel version (only 2.4.x) + * o register TTY driver + * o init cpc_tty_area struct + */ +void cpc_tty_init(pc300dev_t *pc300dev) +{ + int port, aux; + st_cpc_tty_area * cpc_tty; + + if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)) { + printk("%s-tty: Error: TTY driver is supported on 2.4.X kernel!\n", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + + /* hdlcX - X=interface number */ + port = ((struct net_device*)(pc300dev->hdlc))->name[4] - '0'; + if (port >= CPC_TTY_NPORTS) { + printk("%s-tty: invalid interface selected (0-%i): %i", + ((struct net_device*)(pc300dev->hdlc))->name, + CPC_TTY_NPORTS-1,port); + return; + } + + if (cpc_tty_cnt == 0) { /* first TTY connection -> register driver */ + CPC_TTY_DBG("%s-tty: driver init, major:%i, minor range:%i=%i\n", + ((struct net_device*)(pc300dev->hdlc))->name, + CPC_TTY_MAJOR, CPC_TTY_MINOR_START, + CPC_TTY_MINOR_START+CPC_TTY_NPORTS); + /* initialize tty driver struct */ + memset(&serial_drv,0,sizeof(struct tty_driver)); + serial_drv.magic = TTY_DRIVER_MAGIC; + serial_drv.driver_name = "pc300_tty"; + serial_drv.name = "ttyCP"; + serial_drv.major = CPC_TTY_MAJOR; + serial_drv.minor_start = CPC_TTY_MINOR_START; + serial_drv.num = CPC_TTY_NPORTS; + serial_drv.type = TTY_DRIVER_TYPE_SERIAL; + serial_drv.subtype = SERIAL_TYPE_NORMAL; + + serial_drv.init_termios = tty_std_termios; + serial_drv.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL; + serial_drv.flags = TTY_DRIVER_REAL_RAW; + serial_drv.refcount = &cpc_tty_refcount; + + /* tty data structures */ + serial_drv.table = cpc_tty_serial_table; + serial_drv.termios = cpc_tty_serial_termios; + serial_drv.termios_locked = cpc_tty_serial_termios_locked; + + /* interface routines from the upper tty layer to the tty driver */ + serial_drv.open = cpc_tty_open; + serial_drv.close = cpc_tty_close; + serial_drv.write = cpc_tty_write; + serial_drv.write_room = cpc_tty_write_room; + serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer; + serial_drv.ioctl = cpc_tty_ioctl; + serial_drv.flush_buffer = cpc_tty_flush_buffer; + serial_drv.hangup = cpc_tty_hangup; + + /* the callout device is just like normal device except for major */ + /* number and the subtype code */ + callout_drv = serial_drv; + callout_drv.name = "cucp"; + callout_drv.major = CPC_TTY_MAJOR + 1; + callout_drv.subtype = SERIAL_TYPE_CALLOUT; + callout_drv.read_proc = 0; + callout_drv.proc_entry = 0; + + /* register the TTY driver */ + if (tty_register_driver(&serial_drv)) { + printk("%s-tty: Failed to register serial driver! ", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + + if (tty_register_driver(&callout_drv)) { + CPC_TTY_DBG("%s-tty: Failed to register callout driver! ", + ((struct net_device*)(pc300dev->hdlc))->name); + return; + } + memset((void *)cpc_tty_area, 0, + sizeof(st_cpc_tty_area) * CPC_TTY_NPORTS); + } + + cpc_tty = &cpc_tty_area[port]; + + if (cpc_tty->state != CPC_TTY_ST_IDLE) { + CPC_TTY_DBG("%s-tty: TTY port %i, already in use.\n", + ((struct net_device*)(pc300dev->hdlc))->name,port); + return; + } + + cpc_tty_cnt++; + cpc_tty->state = CPC_TTY_ST_INIT; + cpc_tty->num_open= 0; + cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; + cpc_tty->pc300dev = pc300dev; + + cpc_tty->tty_tx_task_queue.routine = cpc_tty_tx_task; + cpc_tty->tty_tx_task_queue.data = (void *)cpc_tty; + + cpc_tty->tty_rx_task_queue.routine = cpc_tty_rx_task; + cpc_tty->tty_rx_task_queue.data = (void *) port; + + cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = 0; + + pc300dev->cpc_tty = (void *)cpc_tty; + + aux = strlen(((struct net_device*)(pc300dev->hdlc))->name); + memcpy(cpc_tty->name,((struct net_device*)(pc300dev->hdlc))->name,aux); + memcpy(&cpc_tty->name[aux], "-tty", 5); + + cpc_open((struct net_device *)pc300dev->hdlc); + cpc_tty_dtr_off(pc300dev); + + CPC_TTY_DBG("%s: Initializing TTY Sync Driver, tty major#%d minor#%i\n", + cpc_tty->name,CPC_TTY_MAJOR,cpc_tty->tty_minor); + return; +} + +/* + * PC300 TTY OPEN routine + * + * This routine is called by the tty driver to open the interface + * o verify minor + * o allocate buffer to Rx and Tx + */ +static int cpc_tty_open(struct tty_struct *tty, struct file *flip) +{ + int port ; + st_cpc_tty_area *cpc_tty; + + if (!tty) { + return -ENODEV; + } + + port = MINOR(tty->device) - tty->driver.minor_start; + + if ((port < 0) || (port >= CPC_TTY_NPORTS)){ + CPC_TTY_DBG("pc300_tty: open invalid minor %i\n",MINOR(tty->device)); + return -ENODEV; + } + + cpc_tty = &cpc_tty_area[port]; + + if (cpc_tty->state == CPC_TTY_ST_IDLE){ + CPC_TTY_DBG("%s: open - invalid interface, minor=%i\n", + cpc_tty->name, MINOR(tty->device)); + return -ENODEV; + } + + if (cpc_tty->num_open == 0) { /* first open of this tty */ + if (!cpc_tty_area[port].buf_tx){ + cpc_tty_area[port].buf_tx = kmalloc(CPC_TTY_MAX_MTU,GFP_KERNEL); + if (cpc_tty_area[port].buf_tx == 0){ + CPC_TTY_DBG("%s: error in memory allocation\n",cpc_tty->name); + return -ENOMEM; + } + } + + if (cpc_tty_area[port].buf_rx.first) { + unsigned char * aux; + while (cpc_tty_area[port].buf_rx.first) { + aux = (unsigned char *)cpc_tty_area[port].buf_rx.first; + cpc_tty_area[port].buf_rx.first = cpc_tty_area[port].buf_rx.first->next; + kfree(aux); + } + cpc_tty_area[port].buf_rx.first = NULL; + cpc_tty_area[port].buf_rx.last = NULL; + } + + cpc_tty_area[port].state = CPC_TTY_ST_OPEN; + cpc_tty_area[port].tty = tty; + tty->driver_data = &cpc_tty_area[port]; + + cpc_tty_dtr_on(cpc_tty->pc300dev); + } + + cpc_tty->num_open++; + + CPC_TTY_DBG("%s: opening TTY driver\n", cpc_tty->name); + + /* avisar driver PC300 */ + return 0; +} + +/* + * PC300 TTY CLOSE routine + * + * This routine is called by the tty driver to close the interface + * o call close channel in PC300 driver (cpc_closech) + * o free Rx and Tx buffers + */ + +static void cpc_tty_close(struct tty_struct *tty, struct file *flip) +{ + st_cpc_tty_area *cpc_tty; + unsigned long flags; + int res; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlx-tty: no TTY in close \n"); + return; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty)|| (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return; + } + + if (!cpc_tty->num_open) { + CPC_TTY_DBG("%s: TTY is closed\n",cpc_tty->name); + return; + } + + if (--cpc_tty->num_open > 0) { + CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name); + return; + } + + cpc_tty_dtr_off(cpc_tty->pc300dev); + + CPC_TTY_LOCK(cpc_tty->pc300dev->chan->card, flags); /* lock irq */ + cpc_tty->tty = NULL; + cpc_tty->state = CPC_TTY_ST_INIT; + CPC_TTY_UNLOCK(cpc_tty->pc300dev->chan->card, flags); /* unlock irq */ + + if (cpc_tty->buf_rx.first) { + unsigned char * aux; + while (cpc_tty->buf_rx.first) { + aux = (unsigned char *)cpc_tty->buf_rx.first; + cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; + kfree(aux); + } + cpc_tty->buf_rx.first = NULL; + cpc_tty->buf_rx.last = NULL; + } + + if (cpc_tty->buf_tx) { + kfree(cpc_tty->buf_tx); + cpc_tty->buf_tx = NULL; + } + + CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name); + + if (!cpc_tty_refcount && cpc_tty_unreg_flag) { + cpc_tty_unreg_flag = 0; + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + return; +} + +/* + * PC300 TTY WRITE routine + * + * This routine is called by the tty driver to write a series of characters + * to the tty device. The characters may come from user or kernel space. + * o verify the DCD signal + * o send characters to board and start the transmission + */ +static int cpc_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + st_cpc_tty_area *cpc_tty; + pc300ch_t *pc300chan; + pc300_t *card; + int ch; + unsigned long flags; + struct net_device_stats *stats; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY in write\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n", cpc_tty->name); + return -ENODEV; + } + + if (count > CPC_TTY_MAX_MTU) { + CPC_TTY_DBG("%s: count is invalid\n",cpc_tty->name); + return -EINVAL; /* frame too big */ + } + + CPC_TTY_DBG("%s: cpc_tty_write %s data len=%i\n",cpc_tty->name, + (from_user)?"from user" : "from kernel",count); + + pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan; + stats = &((pc300dev_t*)cpc_tty->pc300dev)->hdlc->stats; + card = (pc300_t *) pc300chan->card; + ch = pc300chan->channel; + + /* verify DCD signal*/ + if (cpc_readb(card->hw.scabase + M_REG(ST3,ch)) & ST3_DCD) { + /* DCD is OFF */ + CPC_TTY_DBG("%s : DCD is OFF\n", cpc_tty->name); + stats->tx_errors++; + stats->tx_carrier_errors++; + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR); + + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & + ~(CPLD_REG2_FALC_LED1 << (2 *ch))); + } + + CPC_TTY_UNLOCK(card, flags); + + return -EINVAL; + } + + if (from_user) { + unsigned char *buf_tmp; + + buf_tmp = cpc_tty->buf_tx; + if (copy_from_user(buf_tmp, buf, count)) { + /* failed to copy from user */ + CPC_TTY_DBG("%s: error in copy from user\n",cpc_tty->name); + return -EINVAL; + } + + if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*) buf_tmp,count)) { + /* failed to send */ + CPC_TTY_DBG("%s: transmission error\n",cpc_tty->name); + return 0; + } + } else { + if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*)buf, count)) { + /* failed to send */ + CPC_TTY_DBG("%s: trasmition error\n", cpc_tty->name); + return 0; + } + } + return count; +} + +/* + * PC300 TTY Write Room routine + * + * This routine returns the numbers of characteres the tty driver will accept + * for queuing to be written. + * o return MTU + */ +static int cpc_tty_write_room(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to write room\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + CPC_TTY_DBG("%s: write room\n",cpc_tty->name); + + return CPC_TTY_MAX_MTU; +} + +/* + * PC300 TTY chars in buffer routine + * + * This routine returns the chars number in the transmission buffer + * o returns 0 + */ +static int cpc_tty_chars_in_buffer(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + return(0); +} + +/* + * PC300 TTY IOCTL routine + * + * This routine treats TIOCMBIS (set DTR signal) and TIOCMBIC (clear DTR + * signal)IOCTL commands. + */ +static int cpc_tty_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) + +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); + return -ENODEV; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return -ENODEV; + } + + CPC_TTY_DBG("%s: IOCTL cmd %x\n",cpc_tty->name,cmd); + + switch (cmd) { + case TIOCMBIS : /* set DTR */ + cpc_tty_dtr_on(cpc_tty->pc300dev); + break; + + case TIOCMBIC: /* clear DTR */ + cpc_tty_dtr_off(cpc_tty->pc300dev); + break; + default : + return -ENOIOCTLCMD; + } + return 0; +} + +/* + * PC300 TTY Flush Buffer routine + * + * This routine resets the transmission buffer + */ +static void cpc_tty_flush_buffer(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to flush buffer\n"); + return; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return; + } + + CPC_TTY_DBG("%s: call wake_up_interruptible\n",cpc_tty->name); + + wake_up_interruptible(&tty->write_wait); + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup){ + CPC_TTY_DBG("%s: call line disc. wake up\n",cpc_tty->name); + tty->ldisc.write_wakeup(tty); + } + + return; +} + +/* + * PC300 TTY Hangup routine + * + * This routine is called by the tty driver to hangup the interface + * o clear DTR signal + */ + +static void cpc_tty_hangup(struct tty_struct *tty) +{ + st_cpc_tty_area *cpc_tty; + int res; + + if (!tty || !tty->driver_data ) { + CPC_TTY_DBG("hdlcX-tty: no TTY to hangup\n"); + return ; + } + + cpc_tty = (st_cpc_tty_area *) tty->driver_data; + + if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { + CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name); + return ; + } + if (!cpc_tty_refcount && cpc_tty_unreg_flag) { + cpc_tty_unreg_flag = 0; + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + cpc_tty_dtr_off(cpc_tty->pc300dev); +} + +/* + * PC300 TTY RX task routine + * This routine treats RX task + * o verify read buffer + * o call the line disc. read + * o free memory + */ +static void cpc_tty_rx_task(void * data) +{ + int port, i, j; + st_cpc_tty_area *cpc_tty; + volatile st_cpc_rx_buf * buf; + char flags=0,flg_rx=1; + + if (cpc_tty_cnt == 0) return; + + for (i=0; (i < 4) && flg_rx ; i++) { + flg_rx = 0; + port = (int) data; + for (j=0; j < CPC_TTY_NPORTS; j++) { + cpc_tty = &cpc_tty_area[port]; + + if ((buf=cpc_tty->buf_rx.first) != 0) { + + if (cpc_tty->tty && (cpc_tty->tty->ldisc.receive_buf)) { + CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name); + cpc_tty->tty->ldisc.receive_buf(cpc_tty->tty, buf->data, + &flags, buf->size); + } + cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; + kfree((unsigned char *)buf); + buf = cpc_tty->buf_rx.first; + flg_rx = 1; + } + if (++port == CPC_TTY_NPORTS) port = 0; + } + } +} + +/* + * PC300 TTY RX task routine + * + * This routine treats RX interrupt. + * o read all frames in card + * o verify the frame size + * o read the frame in rx buffer + */ +static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan) +{ + volatile pcsca_bd_t * ptdescr; + volatile unsigned char status; + pc300_t *card = (pc300_t *)pc300chan->card; + int ch = pc300chan->channel; + + /* dma buf read */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + RX_BD_ADDR(ch, pc300chan->rx_first_bd)); + while (pc300chan->rx_first_bd != pc300chan->rx_last_bd) { + status = cpc_readb(&ptdescr->status); + cpc_writeb(&ptdescr->status, 0); + cpc_writeb(&ptdescr->len, 0); + pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & + (N_DMA_RX_BUF - 1); + if (status & DST_EOM) { + break; /* end of message */ + } + ptdescr = (pcsca_bd_t *)(card->hw.rambase + cpc_readl(&ptdescr->next)); + } +} + +void cpc_tty_receive(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty; + pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; + pc300_t *card = (pc300_t *)pc300chan->card; + int ch = pc300chan->channel; + volatile pcsca_bd_t * ptdescr; + struct net_device_stats *stats = &pc300dev->hdlc->stats; + int rx_len, rx_aux; + volatile unsigned char status; + unsigned short first_bd = pc300chan->rx_first_bd; + st_cpc_rx_buf *new; + unsigned char dsr_rx; + + if (pc300dev->cpc_tty == NULL) { + return; + } + + dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch)); + + cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; + + while (1) { + rx_len = 0; + ptdescr = (pcsca_bd_t *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd)); + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rx_len += cpc_readw(&ptdescr->len); + first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1); + if (status & DST_EOM) { + break; + } + ptdescr=(pcsca_bd_t*)(card->hw.rambase+cpc_readl(&ptdescr->next)); + } + + if (!rx_len) { + if (dsr_rx & DSR_BOF) { + /* update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, pc300chan->rx_last_bd)); + } + return; + } + + if (rx_len > CPC_TTY_MAX_MTU) { + /* Free RX descriptors */ + CPC_TTY_DBG("%s: frame size is invalid.\n",cpc_tty->name); + stats->rx_errors++; + stats->rx_frame_errors++; + cpc_tty_rx_disc_frame(pc300chan); + continue; + } + + new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); + if (new == 0) { + cpc_tty_rx_disc_frame(pc300chan); + continue; + } + + /* dma buf read */ + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + RX_BD_ADDR(ch, pc300chan->rx_first_bd)); + + rx_len = 0; /* counter frame size */ + + while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { + rx_aux = cpc_readw(&ptdescr->len); + if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) + || (rx_aux > BD_DEF_LEN)) { + CPC_TTY_DBG("%s: reception error\n", cpc_tty->name); + stats->rx_errors++; + if (status & DST_OVR) { + stats->rx_fifo_errors++; + } + if (status & DST_CRC) { + stats->rx_crc_errors++; + } + if ((status & (DST_RBIT | DST_SHRT | DST_ABT)) || + (rx_aux > BD_DEF_LEN)) { + stats->rx_frame_errors++; + } + /* discard remainig descriptors used by the bad frame */ + CPC_TTY_DBG("%s: reception error - discard descriptors", + cpc_tty->name); + cpc_tty_rx_disc_frame(pc300chan); + rx_len = 0; + kfree((unsigned char *)new); + break; /* read next frame - while(1) */ + } + + if (cpc_tty->state != CPC_TTY_ST_OPEN) { + /* Free RX descriptors */ + cpc_tty_rx_disc_frame(pc300chan); + stats->rx_dropped++; + rx_len = 0; + kfree((unsigned char *)new); + break; /* read next frame - while(1) */ + } + + /* read the segment of the frame */ + if (rx_aux != 0) { + memcpy_fromio((new->data + rx_len), + (void *)(card->hw.rambase + + cpc_readl(&ptdescr->ptbuf)), rx_aux); + rx_len += rx_aux; + } + cpc_writeb(&ptdescr->status,0); + cpc_writeb(&ptdescr->len, 0); + pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & + (N_DMA_RX_BUF -1); + if (status & DST_EOM)break; + + ptdescr = (pcsca_bd_t *) (card->hw.rambase + + cpc_readl(&ptdescr->next)); + } + /* update pointer */ + pc300chan->rx_last_bd = (pc300chan->rx_first_bd - 1) & + (N_DMA_RX_BUF - 1) ; + if (!(dsr_rx & DSR_BOF)) { + /* update EDA */ + cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), + RX_BD_ADDR(ch, pc300chan->rx_last_bd)); + } + if (rx_len != 0) { + stats->rx_bytes += rx_len; + + if (pc300dev->trace_on) { + cpc_tty_trace(pc300dev, new->data,rx_len, 'R'); + } + new->size = rx_len; + new->next = 0; + if (cpc_tty->buf_rx.first == 0) { + cpc_tty->buf_rx.first = new; + cpc_tty->buf_rx.last = new; + } else { + cpc_tty->buf_rx.last->next = new; + cpc_tty->buf_rx.last = new; + } + schedule_task(&(cpc_tty->tty_rx_task_queue)); + stats->rx_packets++; + } + } +} + +/* + * PC300 TTY TX task routine + * + * This routine treats TX interrupt. + * o if need call line discipline wakeup + * o call wake_up_interruptible + */ +static void cpc_tty_tx_task(void *data) +{ + st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; + struct tty_struct *tty; + + CPC_TTY_DBG("%s: cpc_tty_tx_task init\n",cpc_tty->name); + + if ((tty = cpc_tty->tty) == 0) { + CPC_TTY_DBG("%s: the interface is not opened\n",cpc_tty->name); + return; + } + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup){ + CPC_TTY_DBG("%s:call line disc. wakeup\n",cpc_tty->name); + tty->ldisc.write_wakeup (tty); + } + + wake_up_interruptible(&tty->write_wait); +} + +/* + * PC300 TTY send to card routine + * + * This routine send data to card. + * o clear descriptors + * o write data to DMA buffers + * o start the transmission + */ +static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len) +{ + pc300ch_t *chan = (pc300ch_t *)dev->chan; + pc300_t *card = (pc300_t *)chan->card; + int ch = chan->channel; + struct net_device_stats *stats = &dev->hdlc->stats; + unsigned long flags; + volatile pcsca_bd_t * ptdescr; + int i, nchar; + int tosend = len; + int nbuf = ((len - 1)/BD_DEF_LEN) + 1; + unsigned char *pdata=buf; + + CPC_TTY_DBG("%s:cpc_tty_send_to_cars len=%i", + (st_cpc_tty_area *)dev->cpc_tty->name,len); + + if (nbuf >= card->chan[ch].nfree_tx_bd) { + return 1; + } + + /* write buffer to DMA buffers */ + CPC_TTY_DBG("%s: call dma_buf_write\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + for (i = 0 ; i < nbuf ; i++) { + ptdescr = (pcsca_bd_t *)(card->hw.rambase + + TX_BD_ADDR(ch, card->chan[ch].tx_next_bd)); + nchar = (BD_DEF_LEN > tosend) ? tosend : BD_DEF_LEN; + if (cpc_readb(&ptdescr->status) & DST_OSB) { + memcpy_toio((void *)(card->hw.rambase + + cpc_readl(&ptdescr->ptbuf)), + &pdata[len - tosend], + nchar); + card->chan[ch].nfree_tx_bd--; + if ((i + 1) == nbuf) { + /* This must be the last BD to be used */ + cpc_writeb(&ptdescr->status, DST_EOM); + } else { + cpc_writeb(&ptdescr->status, 0); + } + cpc_writew(&ptdescr->len, nchar); + } else { + CPC_TTY_DBG("%s: error in dma_buf_write\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + stats->tx_dropped++; + return 1; + } + tosend -= nchar; + card->chan[ch].tx_next_bd = + (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1); + } + + if (dev->trace_on) { + cpc_tty_trace(dev, buf, len,'T'); + } + + /* start transmission */ + CPC_TTY_DBG("%s: start transmission\n", + (st_cpc_tty_area *)dev->cpc_tty->name); + + CPC_TTY_LOCK(card, flags); + cpc_writeb(card->hw.scabase + DTX_REG(EDAL, ch), + TX_BD_ADDR(ch, chan->tx_next_bd)); + cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA); + cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE); + + if (card->hw.type == PC300_TE) { + cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, + cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) | + (CPLD_REG2_FALC_LED1 << (2 * ch))); + } + CPC_TTY_UNLOCK(card, flags); + return 0; +} + +/* + * PC300 TTY trace routine + * + * This routine send trace of connection to application. + * o clear descriptors + * o write data to DMA buffers + * o start the transmission + */ + +static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) +{ + struct sk_buff *skb; + + if ((skb = dev_alloc_skb(10 + len)) == NULL) { + /* out of memory */ + CPC_TTY_DBG("%s: tty_trace - out of memory\n", + ((struct net_device *)(dev->hdlc))->name); + return; + } + + skb_put (skb, 10 + len); + skb->dev = (struct net_device *) dev->hdlc; + skb->protocol = htons(ETH_P_CUST); + skb->mac.raw = skb->data; + skb->pkt_type = PACKET_HOST; + skb->len = 10 + len; + + memcpy(skb->data,((struct net_device *)(dev->hdlc))->name,5); + skb->data[5] = '['; + skb->data[6] = rxtx; + skb->data[7] = ']'; + skb->data[8] = ':'; + skb->data[9] = ' '; + memcpy(&skb->data[10], buf, len); + netif_rx(skb); +} + +/* + * PC300 TTY unregister service routine + * + * This routine unregister one interface. + */ +void cpc_tty_unregister_service(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty; + ulong flags; + int res; + + if ((cpc_tty= (st_cpc_tty_area *) pc300dev->cpc_tty) == 0) { + CPC_TTY_DBG("%s: interface is not TTY\n", + ((struct net_device *)(pc300dev->hdlc))->name); + return; + } + CPC_TTY_DBG("%s: cpc_tty_unregister_service", cpc_tty->name); + + if (cpc_tty->pc300dev != pc300dev) { + CPC_TTY_DBG("%s: invalid tty ptr=%s\n", + ((struct net_device *)(pc300dev->hdlc))->name, cpc_tty->name); + return; + } + + if (--cpc_tty_cnt == 0) { + if (cpc_tty_refcount) { + CPC_TTY_DBG("%s: unregister is not possible, refcount=%d", + cpc_tty->name, cpc_tty_refcount); + cpc_tty_cnt++; + cpc_tty_unreg_flag = 1; + return; + } else { + CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name); + if ((res=tty_unregister_driver(&serial_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + if ((res=tty_unregister_driver(&callout_drv))) { + CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n", + cpc_tty->name,res); + } + } + } + CPC_TTY_LOCK(pc300dev->chan->card,flags); + cpc_tty->tty = NULL; + CPC_TTY_UNLOCK(pc300dev->chan->card, flags); + cpc_tty->tty_minor = 0; + cpc_tty->state = CPC_TTY_ST_IDLE; +} + +/* + * PC300 TTY trigger poll routine + * This routine is called by pc300driver to treats Tx interrupt. + */ +void cpc_tty_trigger_poll(pc300dev_t *pc300dev) +{ + st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; + if (!cpc_tty) { + return; + } + schedule_task(&(cpc_tty->tty_tx_task_queue)); +} + +/* + * PC300 TTY reset var routine + * This routine is called by pc300driver to init the TTY area. + */ + +void cpc_tty_reset_var(void) +{ + int i ; + + CPC_TTY_DBG("hdlcX-tty: reset variables\n"); + /* reset the tty_driver structure - serial_drv */ + memset(&serial_drv, 0, sizeof(struct tty_driver)); + memset(&callout_drv, 0, sizeof(struct tty_driver)); + for (i=0; i < CPC_TTY_NPORTS; i++){ + memset(&cpc_tty_area[i],0, sizeof(st_cpc_tty_area)); + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pci/pci.c linux.22-ac2/drivers/pci/pci.c --- linux.vanilla/drivers/pci/pci.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/pci/pci.c 2003-06-29 16:10:14.000000000 +0100 @@ -39,6 +39,8 @@ LIST_HEAD(pci_root_buses); LIST_HEAD(pci_devices); +static int pci_announce_device(struct pci_driver *drv, struct pci_dev *dev); + /** * pci_find_slot - locate PCI device from a given PCI slot * @bus: number of PCI bus on which desired PCI device resides @@ -360,6 +362,56 @@ } /** + * pci_device_restart - restart a failed PCI device + * @dev: PCI device to restart + * @name: name to refer to it by + * + * When a PCI device gets into a mess we can often recover it by + * using D3 state to effectively power cycle the interface and + * faking hot unplug/replug. This function must be called from + * process context. Within an IRQ the caller should invoke + * pci_disable_device and then schedule the recovery to occur + * in process context using schedule_task. + */ + +int pci_device_restart(struct pci_dev *dev, char *name) +{ + struct pci_driver *driver = dev->driver; + + /* Fence the card */ + pci_disable_device(dev); + printk(KERN_WARNING "%s: PCI device being restarted.\n", name); + + /* Fake a hot unplug - the real one removes the card resources + so we do this by hand */ + if (driver) + { + driver->remove(dev); + dev->driver = NULL; + } + + /* Off and wait and on and pray */ + if(pci_set_power_state(dev, 3) != -EIO) + { + /* PM supported so do the PM cycling too */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/5); + pci_set_power_state(dev, 0); + } + + /* We use the proper announce function because there + we can. The less magic the better. Assuming the device + came back ok this will make the device driver reconfigure + the hardware without tons of error code in each driver */ + + pci_announce_device(driver, dev); + printk(KERN_WARNING "%s: PCI device has been restarted.\n", name); + return 0; +} + +EXPORT_SYMBOL_GPL(pci_device_restart); + +/** * pci_enable_device_bars - Initialize some of a device for use * @dev: PCI device to be initialized * @bars: bitmask of BAR's that must be configured diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pci/pci.ids linux.22-ac2/drivers/pci/pci.ids --- linux.vanilla/drivers/pci/pci.ids 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/pci/pci.ids 2003-08-28 17:03:10.000000000 +0100 @@ -2280,6 +2280,9 @@ 1006 MINI PCI type 3B Data Fax Modem 1007 Mini PCI 56k Winmodem 10b7 615c Mini PCI 56K Modem + 1700 Gigabit Ethernet Adapter + 10b7 0010 3Com 3C940 Gigabit LOM Ethernet Adapter + 10b7 0020 3Com 3C941 Gigabit LOM Ethernet Adapter 3390 3c339 TokenLink Velocity 3590 3c359 TokenLink Velocity XL 10b7 3590 TokenLink Velocity XL Adapter (3C359/359B) @@ -3385,23 +3388,36 @@ 1148 5844 FDDI SK-5844 (SK-NET FDDI-LP64 DAS) 4200 Token Ring adapter 4300 Gigabit Ethernet - 1148 9821 SK-9821 (1000Base-T single link) - 1148 9822 SK-9822 (1000Base-T dual link) - 1148 9841 SK-9841 (1000Base-LX single link) - 1148 9842 SK-9842 (1000Base-LX dual link) - 1148 9843 SK-9843 (1000Base-SX single link) - 1148 9844 SK-9844 (1000Base-SX dual link) - 1148 9861 SK-9861 (1000Base-SX VF45 single link) - 1148 9862 SK-9862 (1000Base-SX VF45 dual link) -# Information got from SysKonnekt - 1148 9871 SK-9871 (1000Base-ZX single link) -# Information got from SysKonnekt - 1148 9872 SK-9872 (1000Base-ZX dual link) - 1259 2970 AT-2970SX [Allied Telesyn] - 1259 2972 AT-2970T [Allied Telesyn] - 1259 2975 AT-2970SX [Allied Telesyn] - 1259 2977 AT-2970T [Allied Telesyn] + 1148 9821 SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T) + 1148 9822 SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link) + 1148 9841 SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX) + 1148 9842 SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link) + 1148 9843 SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX) + 1148 9844 SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link) + 1148 9861 SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition) + 1148 9862 SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link) + 1148 9871 SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX) + 1148 9872 SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link) + 1259 2970 Allied Telesyn AT-2970SX Gigabit Ethernet Adapter + 1259 2971 Allied Telesyn AT-2970LX Gigabit Ethernet Adapter + 1259 2972 Allied Telesyn AT-2970TX Gigabit Ethernet Adapter + 1259 2973 Allied Telesyn AT-2971SX Gigabit Ethernet Adapter + 1259 2974 Allied Telesyn AT-2971T Gigabit Ethernet Adapter + 1259 2975 Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter + 1259 2976 Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter + 1259 2977 Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter 4320 SK-98xx Gigabit Ethernet Server Adapter + 1148 0121 Marvell RDK-8001 Adapter + 1148 0221 Marvell RDK-8002 Adapter + 1148 0321 Marvell RDK-8003 Adapter + 1148 0421 Marvell RDK-8004 Adapter + 1148 0621 Marvell RDK-8006 Adapter + 1148 0721 Marvell RDK-8007 Adapter + 1148 0821 Marvell RDK-8008 Adapter + 1148 0921 Marvell RDK-8009 Adapter + 1148 1121 Marvell RDK-8011 Adapter + 1148 1221 Marvell RDK-8012 Adapter + 1148 3221 SK-9521 V2.0 10/100/1000Base-T Adapter 1148 5021 SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter 1148 5041 SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter 1148 5043 SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter @@ -3626,6 +3642,8 @@ 1340 DFE-690TXD CardBus PC Card 1561 DRP-32TXD Cardbus PC Card 4000 DL2K Ethernet + 4c00 Gigabit Ethernet Adapter + 1186 4c00 DGE-530T Gigabit Ethernet Adapter 1187 Advanced Technology Laboratories, Inc. 1188 Shima Seiki Manufacturing Ltd. 1189 Matsushita Electronics Co Ltd @@ -3706,6 +3724,8 @@ 4611 GT-64115 System Controller 4620 GT-64120/64120A/64121A System Controller 4801 GT-48001 + 4320 Gigabit Ethernet Adapter + 11ab 9521 Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter f003 GT-64010 Primary Image Piranha Image Generator 11ac Canon Information Systems Research Aust. 11ad Lite-On Communications Inc @@ -4933,6 +4953,8 @@ 136f Applied Magic Inc 1370 ATL Products 1371 CNet Technology Inc + 434e GigaCard Network Adapter + 1371 434e N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L) 1373 Silicon Vision Inc 1374 Silicom Ltd 1375 Argosystems Inc @@ -6064,6 +6086,10 @@ 170c YottaYotta Inc. 172a Accelerated Encryption 1737 Linksys + 1032 Gigabit Network Adapter + 1737 0015 EG1032 v2 Instant Gigabit Network Adapter + 1064 Gigabit Network Adapter + 1737 0016 EG1064 v2 Instant Gigabit Network Adapter 173b Altima (nee Broadcom) 03e8 AC1000 Gigabit Ethernet 03ea AC9100 Gigabit Ethernet diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pci/quirks.c linux.22-ac2/drivers/pci/quirks.c --- linux.vanilla/drivers/pci/quirks.c 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/pci/quirks.c 2003-07-31 14:42:07.000000000 +0100 @@ -464,23 +464,24 @@ * Following the PCI ordering rules is optional on the AMD762. I'm not * sure what the designers were smoking but let's not inhale... * - * To be fair to AMD, it follows the spec by default, its BIOS people - * who turn it off! + * In fact, AMD even recommends to don't follow PCI standards + * in the section "Recommended BIOS settings" of the datasheet */ static void __init quirk_amd_ordering(struct pci_dev *dev) { u32 pcic; + + printk(KERN_WARNING "Setting AMD recommended values for PCI bus. It isn't fully PCI standards compliant\n"); + pci_read_config_dword(dev, 0x4C, &pcic); - if((pcic&6)!=6) - { - pcic |= 6; - printk(KERN_WARNING "BIOS failed to enable PCI standards compliance, fixing this error.\n"); + pcic &= ~((u32)6); pci_write_config_dword(dev, 0x4C, pcic); + pci_read_config_dword(dev, 0x84, &pcic); - pcic |= (1<<23); /* Required in this mode */ + pcic &= ~((u32)(1<<23)); + pcic |= (1<<3); pci_write_config_dword(dev, 0x84, pcic); - } } #ifdef CONFIG_X86_IO_APIC diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pcmcia/ricoh.h linux.22-ac2/drivers/pcmcia/ricoh.h --- linux.vanilla/drivers/pcmcia/ricoh.h 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/pcmcia/ricoh.h 2003-09-01 13:22:09.000000000 +0100 @@ -109,7 +109,7 @@ /* 16-bit IO and memory timing registers */ #define RL5C4XX_16BIT_IO_0 0x0088 /* 16 bit */ -#define RL5C4XX_16BIT_MEM_0 0x0088 /* 16 bit */ +#define RL5C4XX_16BIT_MEM_0 0x008a /* 16 bit */ #define RL5C4XX_SETUP_MASK 0x0007 #define RL5C4XX_SETUP_SHIFT 0 #define RL5C4XX_CMD_MASK 0x01f0 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pnp/Config.in linux.22-ac2/drivers/pnp/Config.in --- linux.vanilla/drivers/pnp/Config.in 2001-10-25 22:01:51.000000000 +0100 +++ linux.22-ac2/drivers/pnp/Config.in 2003-06-29 16:10:17.000000000 +0100 @@ -8,4 +8,8 @@ dep_tristate ' ISA Plug and Play support' CONFIG_ISAPNP $CONFIG_PNP +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_bool ' PNPBIOS support (EXPERIMENTAL)' CONFIG_PNPBIOS $CONFIG_PNP +fi + endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pnp/Makefile linux.22-ac2/drivers/pnp/Makefile --- linux.vanilla/drivers/pnp/Makefile 2001-03-27 00:36:30.000000000 +0100 +++ linux.22-ac2/drivers/pnp/Makefile 2003-06-29 16:10:17.000000000 +0100 @@ -10,15 +10,22 @@ O_TARGET := pnp.o -export-objs := isapnp.o -list-multi := isa-pnp.o +export-objs := isapnp.o pnpbios_core.o +multi-objs := isa-pnp.o pnpbios.o -proc-$(CONFIG_PROC_FS) = isapnp_proc.o -isa-pnp-objs := isapnp.o quirks.o $(proc-y) +isa-pnp-proc-$(CONFIG_PROC_FS) = isapnp_proc.o +pnpbios-proc-$(CONFIG_PROC_FS) = pnpbios_proc.o + +isa-pnp-objs := isapnp.o quirks.o $(isa-pnp-proc-y) +pnpbios-objs := pnpbios_core.o $(pnpbios-proc-y) obj-$(CONFIG_ISAPNP) += isa-pnp.o +obj-$(CONFIG_PNPBIOS) += pnpbios.o include $(TOPDIR)/Rules.make isa-pnp.o: $(isa-pnp-objs) $(LD) $(LD_RFLAG) -r -o $@ $(isa-pnp-objs) + +pnpbios.o: $(pnpbios-objs) + $(LD) $(LD_RFLAG) -r -o $@ $(pnpbios-objs) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pnp/pnpbios_core.c linux.22-ac2/drivers/pnp/pnpbios_core.c --- linux.vanilla/drivers/pnp/pnpbios_core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/pnp/pnpbios_core.c 2003-06-29 16:10:17.000000000 +0100 @@ -0,0 +1,1352 @@ +/* + * PnP BIOS services + * + * Originally (C) 1998 Christian Schmidt + * Modifications (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * Modifications (c) 2001,2002 by Thomas Hood + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * References: + * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corporation + * Plug and Play BIOS Specification, Version 1.0A, May 5, 1994 + * Plug and Play BIOS Clarification Paper, October 6, 1994 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * + * PnP BIOS INTERFACE + * + */ + +/* PnP BIOS signature: "$PnP" */ +#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) + +#pragma pack(1) +union pnp_bios_expansion_header { + struct { + u32 signature; /* "$PnP" */ + u8 version; /* in BCD */ + u8 length; /* length in bytes, currently 21h */ + u16 control; /* system capabilities */ + u8 checksum; /* all bytes must add up to 0 */ + + u32 eventflag; /* phys. address of the event flag */ + u16 rmoffset; /* real mode entry point */ + u16 rmcseg; + u16 pm16offset; /* 16 bit protected mode entry */ + u32 pm16cseg; + u32 deviceID; /* EISA encoded system ID or 0 */ + u16 rmdseg; /* real mode data segment */ + u32 pm16dseg; /* 16 bit pm data segment base */ + } fields; + char chars[0x21]; /* To calculate the checksum */ +}; +#pragma pack() + +static struct { + u16 offset; + u16 segment; +} pnp_bios_callpoint; + +static union pnp_bios_expansion_header * pnp_bios_hdr = NULL; + +/* The PnP BIOS entries in the GDT */ +#define PNP_GDT (0x0060) +#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */ +#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */ +#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */ +#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */ +#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */ + +/* + * These are some opcodes for a "static asmlinkage" + * As this code is *not* executed inside the linux kernel segment, but in a + * alias at offset 0, we need a far return that can not be compiled by + * default (please, prove me wrong! this is *really* ugly!) + * This is the only way to get the bios to return into the kernel code, + * because the bios code runs in 16 bit protected mode and therefore can only + * return to the caller if the call is within the first 64kB, and the linux + * kernel begins at offset 3GB... + */ + +asmlinkage void pnp_bios_callfunc(void); + +__asm__( + ".text \n" + __ALIGN_STR "\n" + SYMBOL_NAME_STR(pnp_bios_callfunc) ":\n" + " pushl %edx \n" + " pushl %ecx \n" + " pushl %ebx \n" + " pushl %eax \n" + " lcallw " SYMBOL_NAME_STR(pnp_bios_callpoint) "\n" + " addl $16, %esp \n" + " lret \n" + ".previous \n" +); + +#define Q_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], __va((u32)(address))); \ +set_limit (gdt [(selname) >> 3], size) + +#define Q2_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], (u32)(address)); \ +set_limit (gdt [(selname) >> 3], size) + +/* + * At some point we want to use this stack frame pointer to unwind + * after PnP BIOS oopses. + */ + +u32 pnp_bios_fault_esp; +u32 pnp_bios_fault_eip; +u32 pnp_bios_is_utter_crap = 0; + +static spinlock_t pnp_bios_lock; + +static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + u16 arg4, u16 arg5, u16 arg6, u16 arg7, + void *ts1_base, u32 ts1_size, + void *ts2_base, u32 ts2_size) +{ + unsigned long flags; + u16 status; + + /* + * PnP BIOSes are generally not terribly re-entrant. + * Also, don't rely on them to save everything correctly. + */ + if(pnp_bios_is_utter_crap) + return PNP_FUNCTION_NOT_SUPPORTED; + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); + + if (ts1_size) + Q2_SET_SEL(PNP_TS1, ts1_base, ts1_size); + if (ts2_size) + Q2_SET_SEL(PNP_TS2, ts2_base, ts2_size); + + __asm__ __volatile__( + "pushl %%ebp\n\t" + "pushl %%edi\n\t" + "pushl %%esi\n\t" + "pushl %%ds\n\t" + "pushl %%es\n\t" + "pushl %%fs\n\t" + "pushl %%gs\n\t" + "pushfl\n\t" + "movl %%esp, pnp_bios_fault_esp\n\t" + "movl $1f, pnp_bios_fault_eip\n\t" + "lcall %5,%6\n\t" + "1:popfl\n\t" + "popl %%gs\n\t" + "popl %%fs\n\t" + "popl %%es\n\t" + "popl %%ds\n\t" + "popl %%esi\n\t" + "popl %%edi\n\t" + "popl %%ebp\n\t" + : "=a" (status) + : "0" ((func) | (((u32)arg1) << 16)), + "b" ((arg2) | (((u32)arg3) << 16)), + "c" ((arg4) | (((u32)arg5) << 16)), + "d" ((arg6) | (((u32)arg7) << 16)), + "i" (PNP_CS32), + "i" (0) + : "memory" + ); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ + if(pnp_bios_is_utter_crap) + { + printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue.\n"); + printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably.\n"); + printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS.\n"); + } + + return status; +} + + +/* + * + * UTILITY FUNCTIONS + * + */ + +static void pnpbios_warn_unexpected_status(const char * module, u16 status) +{ + printk(KERN_ERR "PnPBIOS: %s: Unexpected status 0x%x\n", module, status); +} + +void *pnpbios_kmalloc(size_t size, int f) +{ + void *p = kmalloc( size, f ); + if ( p == NULL ) + printk(KERN_ERR "PnPBIOS: kmalloc() failed\n"); + return p; +} + +/* + * Call this only after init time + */ +static inline int pnp_bios_present(void) +{ + return (pnp_bios_hdr != NULL); +} + +/* Forward declaration */ +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ); + + +/* + * + * PnP BIOS ACCESS FUNCTIONS + * + */ + +#define PNP_GET_NUM_SYS_DEV_NODES 0x00 +#define PNP_GET_SYS_DEV_NODE 0x01 +#define PNP_SET_SYS_DEV_NODE 0x02 +#define PNP_GET_EVENT 0x03 +#define PNP_SEND_MESSAGE 0x04 +#define PNP_GET_DOCKING_STATION_INFORMATION 0x05 +#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09 +#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a +#define PNP_GET_APM_ID_TABLE 0x0b +#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40 +#define PNP_GET_ESCD_INFO 0x41 +#define PNP_READ_ESCD 0x42 +#define PNP_WRITE_ESCD 0x43 + +/* + * Call PnP BIOS with function 0x00, "get number of system device nodes" + */ +static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0, + data, sizeof(struct pnp_dev_node_info), 0, 0); + data->no_nodes &= 0xff; + return status; +} + +int pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + int status = __pnp_bios_dev_node_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "dev_node_info", status ); + return status; +} + +/* + * Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible + * death if they are asked to access the "current" configuration. + * Therefore, if it's a matter of indifference, it's better to call + * get_dev_node() and set_dev_node() with boot=1 rather than with boot=0. + */ + +/* + * Call PnP BIOS with function 0x01, "get system device node" + * Input: *nodenum = desired node, + * boot = whether to get nonvolatile boot (!=0) + * or volatile current (0) config + * Output: *nodenum=next node or 0xff if no more nodes + */ +static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, + nodenum, sizeof(char), data, 65536); + return status; +} + +int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_get_dev_node( nodenum, boot, data ); + if ( status ) + pnpbios_warn_unexpected_status( "get_dev_node", status ); + return status; +} + + +/* + * Call PnP BIOS with function 0x02, "set system device node" + * Input: *nodenum = desired node, + * boot = whether to set nonvolatile boot (!=0) + * or volatile current (0) config + */ +static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0, + data, 65536, 0, 0); + return status; +} + +int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_set_dev_node( nodenum, boot, data ); + if ( status ) { + pnpbios_warn_unexpected_status( "set_dev_node", status ); + return status; + } + if ( !boot ) { /* Update devlist */ + u8 thisnodenum = nodenum; + status = pnp_bios_get_dev_node( &nodenum, boot, data ); + if ( status ) + return status; + update_devlist( thisnodenum, data ); + } + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x03, "get event" + */ +static int pnp_bios_get_event(u16 *event) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0, + event, sizeof(u16), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x04, "send message" + */ +static int pnp_bios_send_message(u16 message) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0, 0, 0, 0, 0); + return status; +} +#endif + +#ifdef CONFIG_HOTPLUG +/* + * Call PnP BIOS with function 0x05, "get docking station information" + */ +static int pnp_bios_dock_station_info(struct pnp_docking_station_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_docking_station_info), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x09, "set statically allocated resource + * information" + */ +static int pnp_bios_set_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, *((u16 *) info), 0, 0); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x0a, "get statically allocated resource + * information" + */ +static int __pnp_bios_get_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, 65536, 0, 0); + return status; +} + +int pnp_bios_get_stat_res(char *info) +{ + int status; + status = __pnp_bios_get_stat_res( info ); + if ( status ) + pnpbios_warn_unexpected_status( "get_stat_res", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x0b, "get APM id table" + */ +static int pnp_bios_apm_id_table(char *table, u16 *size) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0, + table, *size, size, sizeof(u16)); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x40, "get isa pnp configuration structure" + */ +static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_isa_config_struc), 0, 0); + return status; +} + +int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + int status; + status = __pnp_bios_isapnp_config( data ); + if ( status ) + pnpbios_warn_unexpected_status( "isapnp_config", status ); + return status; +} + +/* + * Call PnP BIOS with function 0x41, "get ESCD info" + */ +static int __pnp_bios_escd_info(struct escd_info_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS, + data, sizeof(struct escd_info_struc), 0, 0); + return status; +} + +int pnp_bios_escd_info(struct escd_info_struc *data) +{ + int status; + status = __pnp_bios_escd_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "escd_info", status ); + return status; +} + +/* + * Call PnP BIOS function 0x42, "read ESCD" + * nvram_base is determined by calling escd_info + */ +static int __pnp_bios_read_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, (void *)nvram_base, 65536); + return status; +} + +int pnp_bios_read_escd(char *data, u32 nvram_base) +{ + int status; + status = __pnp_bios_read_escd( data, nvram_base ); + if ( status ) + pnpbios_warn_unexpected_status( "read_escd", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS function 0x43, "write ESCD" + */ +static int pnp_bios_write_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, nvram_base, 65536); + return status; +} +#endif + + +/* + * + * DOCKING FUNCTIONS + * + */ + +#ifdef CONFIG_HOTPLUG + +static int unloading = 0; +static struct completion unload_sem; + +/* + * (Much of this belongs in a shared routine somewhere) + */ + +static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) +{ + char *argv [3], **envp, *buf, *scratch; + int i = 0, value; + + if (!hotplug_path [0]) + return -ENOENT; + if (!current->fs->root) { + return -EAGAIN; + } + if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) { + return -ENOMEM; + } + if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) { + kfree (envp); + return -ENOMEM; + } + + /* only one standardized param to hotplug command: type */ + argv [0] = hotplug_path; + argv [1] = "dock"; + argv [2] = 0; + + /* minimal command environment */ + envp [i++] = "HOME=/"; + envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + +#ifdef DEBUG + /* hint that policy agent should enter no-stdout debug mode */ + envp [i++] = "DEBUG=kernel"; +#endif + /* extensible set of named bus-specific parameters, + * supporting multiple driver selection algorithms. + */ + scratch = buf; + + /* action: add, remove */ + envp [i++] = scratch; + scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1; + + /* Report the ident for the dock */ + envp [i++] = scratch; + scratch += sprintf (scratch, "DOCK=%x/%x/%x", + info->location_id, info->serial, info->capabilities); + envp[i] = 0; + + value = call_usermodehelper (argv [0], argv, envp); + kfree (buf); + kfree (envp); + return 0; +} + +/* + * Poll the PnP docking at regular intervals + */ +static int pnp_dock_thread(void * unused) +{ + static struct pnp_docking_station_info now; + int docked = -1, d = 0; + daemonize(); + reparent_to_init(); + strcpy(current->comm, "kpnpbiosd"); + while(!unloading && !signal_pending(current)) + { + int status; + + /* + * Poll every 2 seconds + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ*2); + if(signal_pending(current)) + break; + + status = pnp_bios_dock_station_info(&now); + + switch(status) + { + /* + * No dock to manage + */ + case PNP_FUNCTION_NOT_SUPPORTED: + complete_and_exit(&unload_sem, 0); + case PNP_SYSTEM_NOT_DOCKED: + d = 0; + break; + case PNP_SUCCESS: + d = 1; + break; + default: + pnpbios_warn_unexpected_status( "pnp_dock_thread", status ); + continue; + } + if(d != docked) + { + if(pnp_dock_event(d, &now)==0) + { + docked = d; +#if 0 + printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked?"at":"de"); +#endif + } + } + } + complete_and_exit(&unload_sem, 0); +} + +#endif /* CONFIG_HOTPLUG */ + + +/* + * + * NODE DATA PARSING FUNCTIONS + * + */ + +static void add_irqresource(struct pci_dev *dev, int irq) +{ + int i = 0; + while (!(dev->irq_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_IRQ) i++; + if (i < DEVICE_COUNT_IRQ) { + dev->irq_resource[i].start = (unsigned long) irq; + dev->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag + } +} + +static void add_dmaresource(struct pci_dev *dev, int dma) +{ + int i = 0; + while (!(dev->dma_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_DMA) i++; + if (i < DEVICE_COUNT_DMA) { + dev->dma_resource[i].start = (unsigned long) dma; + dev->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag + } +} + +static void add_ioresource(struct pci_dev *dev, int io, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) io; + dev->resource[i].end = (unsigned long)(io + len - 1); + dev->resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag + } +} + +static void add_memresource(struct pci_dev *dev, int mem, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) mem; + dev->resource[i].end = (unsigned long)(mem + len - 1); + dev->resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag + } +} + +static void node_resource_data_to_dev(struct pnp_bios_node *node, struct pci_dev *dev) +{ + unsigned char *p = node->data, *lastp=NULL; + int i; + + /* + * First, set resource info to default values + */ + for (i=0;iresource[i].start = 0; // "disabled" + dev->resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;iirq_resource[i].start = (unsigned long)-1; // "disabled" + dev->irq_resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;idma_resource[i].start = (unsigned long)-1; // "disabled" + dev->dma_resource[i].flags = IORESOURCE_UNSET; + } + + /* + * Fill in dev resource info + */ + while ( (char *)p < ((char *)node->data + node->size )) { + if(p==lastp) break; + + if( p[0] & 0x80 ) {// large item + switch (p[0] & 0x7f) { + case 0x01: // memory + { + int io = *(short *) &p[4]; + int len = *(short *) &p[10]; + add_memresource(dev, io, len); + break; + } + case 0x02: // device name + { + int len = *(short *) &p[1]; + memcpy(dev->name, p + 3, len >= 80 ? 79 : len); + break; + } + case 0x05: // 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[16]; + add_memresource(dev, io, len); + break; + } + case 0x06: // fixed location 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[8]; + add_memresource(dev, io, len); + break; + } + } /* switch */ + lastp = p+3; + p = p + p[1] + p[2]*256 + 3; + continue; + } + if ((p[0]>>3) == 0x0f) // end tag + break; + switch (p[0]>>3) { + case 0x04: // irq + { + int i, mask, irq = -1; + mask= p[1] + p[2]*256; + for (i=0;i<16;i++, mask=mask>>1) + if(mask & 0x01) irq=i; + add_irqresource(dev, irq); + break; + } + case 0x05: // dma + { + int i, mask, dma = -1; + mask = p[1]; + for (i=0;i<8;i++, mask = mask>>1) + if(mask & 0x01) dma=i; + add_dmaresource(dev, dma); + break; + } + case 0x08: // io + { + int io= p[2] + p[3] *256; + int len = p[7]; + add_ioresource(dev, io, len); + break; + } + case 0x09: // fixed location io + { + int io = p[1] + p[2] * 256; + int len = p[3]; + add_ioresource(dev, io, len); + break; + } + } /* switch */ + lastp=p+1; + p = p + (p[0] & 0x07) + 1; + + } /* while */ + + return; +} + + +/* + * + * DEVICE LIST MANAGEMENT FUNCTIONS + * + * + * Some of these are exported to give public access + * + * Question: Why maintain a device list when the PnP BIOS can + * list devices for us? Answer: Some PnP BIOSes can't report + * the current configuration, only the boot configuration. + * The boot configuration can be changed, so we need to keep + * a record of what the configuration was when we booted; + * presumably it continues to describe the current config. + * For those BIOSes that can change the current config, we + * keep the information in the devlist up to date. + * + * Note that it is currently assumed that the list does not + * grow or shrink in size after init time, and slot_name + * never changes. The list is protected by a spinlock. + */ + +static LIST_HEAD(pnpbios_devices); + +static spinlock_t pnpbios_devices_lock; + +static int inline insert_device(struct pci_dev *dev) +{ + + /* + * FIXME: Check for re-add of existing node; + * return -1 if node already present + */ + + /* We don't lock because we only do this at init time */ + list_add_tail(&dev->global_list, &pnpbios_devices); + + return 0; +} + +#define HEX(id,a) hex[((id)>>a) & 15] +#define CHAR(id,a) (0x40 + (((id)>>a) & 31)) +// +static void inline pnpid32_to_pnpid(u32 id, char *str) +{ + const char *hex = "0123456789abcdef"; + + id = be32_to_cpu(id); + str[0] = CHAR(id, 26); + str[1] = CHAR(id, 21); + str[2] = CHAR(id,16); + str[3] = HEX(id, 12); + str[4] = HEX(id, 8); + str[5] = HEX(id, 4); + str[6] = HEX(id, 0); + str[7] = '\0'; + + return; +} +// +#undef CHAR +#undef HEX + +/* + * Build a linked list of pci_devs in order of ascending node number + * Called only at init time. + */ +static void __init build_devlist(void) +{ + u8 nodenum; + unsigned int nodes_got = 0; + unsigned int devs = 0; + struct pnp_bios_node *node; + struct pnp_dev_node_info node_info; + struct pci_dev *dev; + + if (!pnp_bios_present()) + return; + + if (pnp_bios_dev_node_info(&node_info) != 0) + return; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return; + + for(nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* We build the list from the "boot" config because + * asking for the "current" config causes some + * BIOSes to crash. + */ + if (pnp_bios_get_dev_node(&nodenum, (char )1 , node)) + break; + nodes_got++; + dev = pnpbios_kmalloc(sizeof (struct pci_dev), GFP_KERNEL); + if (!dev) + break; + memset(dev,0,sizeof(struct pci_dev)); + dev->devfn = thisnodenum; + memcpy(dev->name,"PNPBIOS",8); + pnpid32_to_pnpid(node->eisa_id,dev->slot_name); + node_resource_data_to_dev(node,dev); + if(insert_device(dev)<0) + kfree(dev); + else + devs++; + if (nodenum <= thisnodenum) { + printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", + nodes_got, nodes_got != 1 ? "s" : "", devs); +} + +static struct pci_dev *find_device_by_nodenum( u8 nodenum ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if(dev->devfn == nodenum) + return dev; + } + + return NULL; +} + +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ) +{ + unsigned long flags; + struct pci_dev *dev; + + spin_lock_irqsave(&pnpbios_devices_lock, flags); + dev = find_device_by_nodenum( nodenum ); + if ( dev ) { + node_resource_data_to_dev(data,dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + + return; +} + + +/* + * + * DRIVER REGISTRATION FUNCTIONS + * + * + * Exported to give public access + * + */ + +static LIST_HEAD(pnpbios_drivers); + +static const struct pnpbios_device_id * +match_device(const struct pnpbios_device_id *ids, const struct pci_dev *dev) +{ + while (*ids->id) + { + if(memcmp(ids->id, dev->slot_name, 7)==0) + return ids; + ids++; + } + return NULL; +} + +static int announce_device(struct pnpbios_driver *drv, struct pci_dev *dev) +{ + const struct pnpbios_device_id *id; + struct pci_dev tmpdev; + int ret; + + if (drv->id_table) { + id = match_device(drv->id_table, dev); + if (!id) + return 0; + } else + id = NULL; + + memcpy( &tmpdev, dev, sizeof(struct pci_dev)); + tmpdev.global_list.prev = NULL; + tmpdev.global_list.next = NULL; + + dev_probe_lock(); + /* Obviously, probe() should not call any pnpbios functions */ + ret = drv->probe(&tmpdev, id); + dev_probe_unlock(); + if (ret < 1) + return 0; + + dev->driver = (void *)drv; + + return 1; +} + +/** + * pnpbios_register_driver - register a new pci driver + * @drv: the driver structure to register + * + * Adds the driver structure to the list of registered drivers + * + * For each device in the pnpbios device list that matches one of + * the ids in drv->id_table, calls the driver's "probe" function with + * arguments (1) a pointer to a *temporary* struct pci_dev containing + * resource info for the device, and (2) a pointer to the id string + * of the device. Expects the probe function to return 1 if the + * driver claims the device (otherwise 0) in which case, marks the + * device as having this driver. + * + * Returns the number of pci devices which were claimed by the driver + * during registration. The driver remains registered even if the + * return value is zero. + */ +int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + struct pci_dev *dev; + unsigned long flags; + int count = 0; + + list_add_tail(&drv->node, &pnpbios_drivers); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (!pnpbios_dev_driver(dev)) + count += announce_device(drv, dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + return count; +} + +EXPORT_SYMBOL(pnpbios_register_driver); + +/** + * pnpbios_unregister_driver - unregister a pci driver + * @drv: the driver structure to unregister + * + * Deletes the driver structure from the list of registered PnPBIOS + * drivers, gives it a chance to clean up by calling its "remove" + * function for each device it was responsible for, and marks those + * devices as driverless. + */ +void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + unsigned long flags; + struct pci_dev *dev; + + list_del(&drv->node); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (dev->driver == (void *)drv) { + if (drv->remove) + drv->remove(dev); + dev->driver = NULL; + } + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); +} + +EXPORT_SYMBOL(pnpbios_unregister_driver); + + +/* + * + * RESOURCE RESERVATION FUNCTIONS + * + * + * Used only at init time + * + */ + +static void __init reserve_ioport_range(char *pnpid, int start, int end) +{ + struct resource *res; + char *regionid; + +#if 0 + /* + * TEMPORARY hack to work around the fact that the + * floppy driver inappropriately reserves ioports 0x3f0 and 0x3f1 + * Remove this once the floppy driver is fixed. + */ + if ( + (0x3f0 >= start && 0x3f0 <= end) + || (0x3f1 >= start && 0x3f1 <= end) + ) { + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x NOT reserved\n", + pnpid, start, end + ); + return; + } +#endif + + regionid = pnpbios_kmalloc(16, GFP_KERNEL); + if ( regionid == NULL ) + return; + snprintf(regionid, 16, "PnPBIOS %s", pnpid); + res = request_region(start,end-start+1,regionid); + if ( res == NULL ) + kfree( regionid ); + else + res->flags &= ~IORESOURCE_BUSY; + /* + * Failures at this point are usually harmless. pci quirks for + * example do reserve stuff they know about too, so we may well + * have double reservations. + */ + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x %s reserved\n", + pnpid, start, end, + NULL != res ? "has been" : "could not be" + ); + + return; +} + +static void __init reserve_resources_of_dev( struct pci_dev *dev ) +{ + int i; + + for (i=0;iresource[i].flags & IORESOURCE_UNSET ) + /* end of resources */ + break; + if (dev->resource[i].flags & IORESOURCE_IO) { + /* ioport */ + if ( dev->resource[i].start == 0 ) + /* disabled */ + /* Do nothing */ + continue; + if ( dev->resource[i].start < 0x100 ) + /* + * Below 0x100 is only standard PC hardware + * (pics, kbd, timer, dma, ...) + * We should not get resource conflicts there, + * and the kernel reserves these anyway + * (see arch/i386/kernel/setup.c). + * So, do nothing + */ + continue; + if ( dev->resource[i].end < dev->resource[i].start ) + /* invalid endpoint */ + /* Do nothing */ + continue; + reserve_ioport_range( + dev->slot_name, + dev->resource[i].start, + dev->resource[i].end + ); + } else if (dev->resource[i].flags & IORESOURCE_MEM) { + /* iomem */ + /* For now do nothing */ + continue; + } else { + /* Neither ioport nor iomem */ + /* Do nothing */ + continue; + } + } + + return; +} + +static void __init reserve_resources( void ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if ( + 0 != strcmp(dev->slot_name,"PNP0c01") && /* memory controller */ + 0 != strcmp(dev->slot_name,"PNP0c02") /* system peripheral: other */ + ) { + continue; + } + reserve_resources_of_dev(dev); + } + + return; +} + + +/* + * + * INIT AND EXIT + * + */ + +extern int is_sony_vaio_laptop; + +static int pnpbios_disabled; /* = 0 */ +static int dont_reserve_resources; /* = 0 */ +int pnpbios_dont_use_current_config; /* = 0 */ + +#ifndef MODULE +static int __init pnpbios_setup(char *str) +{ + int invert; + + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "off", 3) == 0) + pnpbios_disabled=1; + if (strncmp(str, "on", 2) == 0) + pnpbios_disabled=0; + invert = (strncmp(str, "no-", 3) == 0); + if (invert) + str += 3; + if (strncmp(str, "curr", 4) == 0) + pnpbios_dont_use_current_config = invert; + if (strncmp(str, "res", 3) == 0) + dont_reserve_resources = invert; + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + + return 1; +} + +__setup("pnpbios=", pnpbios_setup); +#endif + +int __init pnpbios_init(void) +{ + union pnp_bios_expansion_header *check; + u8 sum; + int i, length, r; + + spin_lock_init(&pnp_bios_lock); + spin_lock_init(&pnpbios_devices_lock); + + if(pnpbios_disabled || (dmi_broken & BROKEN_PNP_BIOS) ) { + printk(KERN_INFO "PnPBIOS: Disabled\n"); + return -ENODEV; + } + + if ( is_sony_vaio_laptop ) + pnpbios_dont_use_current_config = 1; + + /* + * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS + * structure and, if one is found, sets up the selectors and + * entry points + */ + for (check = (union pnp_bios_expansion_header *) __va(0xf0000); + check < (union pnp_bios_expansion_header *) __va(0xffff0); + ((void *) (check)) += 16) { + if (check->fields.signature != PNP_SIGNATURE) + continue; + length = check->fields.length; + if (!length) + continue; + for (sum = 0, i = 0; i < length; i++) + sum += check->chars[i]; + if (sum) + continue; + if (check->fields.version < 0x10) { + printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", + check->fields.version >> 4, + check->fields.version & 15); + continue; + } + printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); + printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", + check->fields.version >> 4, check->fields.version & 15, + check->fields.pm16cseg, check->fields.pm16offset, + check->fields.pm16dseg); + Q2_SET_SEL(PNP_CS32, &pnp_bios_callfunc, 64 * 1024); + Q_SET_SEL(PNP_CS16, check->fields.pm16cseg, 64 * 1024); + Q_SET_SEL(PNP_DS, check->fields.pm16dseg, 64 * 1024); + pnp_bios_callpoint.offset = check->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + pnp_bios_hdr = check; + break; + } + if (!pnp_bios_present()) + return -ENODEV; + build_devlist(); + if ( ! dont_reserve_resources ) + reserve_resources(); +#ifdef CONFIG_PROC_FS + r = pnpbios_proc_init(); + if (r) + return r; +#endif + return 0; +} + +static int pnpbios_thread_init(void) +{ +#ifdef CONFIG_HOTPLUG + init_completion(&unload_sem); + if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0) + unloading = 0; +#endif + return 0; +} + +#ifndef MODULE + +/* init/main.c calls pnpbios_init early */ + +/* Start the kernel thread later: */ +module_init(pnpbios_thread_init); + +#else + +/* + * N.B.: Building pnpbios as a module hasn't been fully implemented + */ + +MODULE_LICENSE("GPL"); + +static int pnpbios_init_all(void) +{ + int r; + r = pnpbios_init(); + if (r) + return r; + r = pnpbios_thread_init(); + if (r) + return r; + return 0; +} + +static void __exit pnpbios_exit(void) +{ +#ifdef CONFIG_HOTPLUG + unloading = 1; + wait_for_completion(&unload_sem); +#endif + pnpbios_proc_exit(); + /* We ought to free resources here */ + return; +} + +module_init(pnpbios_init_all); +module_exit(pnpbios_exit); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/pnp/pnpbios_proc.c linux.22-ac2/drivers/pnp/pnpbios_proc.c --- linux.vanilla/drivers/pnp/pnpbios_proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/pnp/pnpbios_proc.c 2003-06-29 16:10:17.000000000 +0100 @@ -0,0 +1,278 @@ +/* + * /proc/bus/pnp interface for Plug and Play devices + * + * Written by David Hinds, dahinds@users.sourceforge.net + * Modified by Thomas Hood, jdthood@mail.com + * + * The .../devices and .../ and .../boot/ files are + * utilized by the lspnp and setpnp utilities, supplied with the + * pcmcia-cs package. + * http://pcmcia-cs.sourceforge.net + * + * The .../escd file is utilized by the lsescd utility written by + * Gunther Mayer. + * http://home.t-online.de/home/gunther.mayer/lsescd + * + * The .../legacy_device_resources file is not used yet. + * + * The other files are human-readable. + */ + +//#include +#define __NO_VERSION__ +//#include + +#include +#include +#include +#include +#include +#include + +static struct proc_dir_entry *proc_pnp = NULL; +static struct proc_dir_entry *proc_pnp_boot = NULL; +static struct pnp_dev_node_info node_info; + +static int proc_read_pnpconfig(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_isa_config_struc pnps; + + if (pnp_bios_isapnp_config(&pnps)) + return -EIO; + return snprintf(buf, count, + "structure_revision %d\n" + "number_of_CSNs %d\n" + "ISA_read_data_port 0x%x\n", + pnps.revision, + pnps.no_csns, + pnps.isa_rd_data_port + ); +} + +static int proc_read_escdinfo(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + return snprintf(buf, count, + "min_ESCD_write_size %d\n" + "ESCD_size %d\n" + "NVRAM_base 0x%x\n", + escd.min_escd_write_size, + escd.escd_size, + escd.nv_storage_base + ); +} + +#define MAX_SANE_ESCD_SIZE (32*1024) +static int proc_read_escd(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + char *tmpbuf; + int escd_size, escd_left_to_read, n; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + + /* sanity check */ + if (escd.escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n"); + return -EFBIG; + } + + tmpbuf = pnpbios_kmalloc(escd.escd_size, GFP_KERNEL); + if (!tmpbuf) return -ENOMEM; + + if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) + return -EIO; + + escd_size = (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1])*256; + + /* sanity check */ + if (escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS read_escd call is too great\n"); + return -EFBIG; + } + + escd_left_to_read = escd_size - pos; + if (escd_left_to_read < 0) escd_left_to_read = 0; + if (escd_left_to_read == 0) *eof = 1; + n = min(count,escd_left_to_read); + memcpy(buf, tmpbuf + pos, n); + kfree(tmpbuf); + *start = buf; + return n; +} + +static int proc_read_legacyres(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + /* Assume that the following won't overflow the buffer */ + if (pnp_bios_get_stat_res(buf)) + return -EIO; + + return count; // FIXME: Return actual length +} + +static int proc_read_devices(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + u8 nodenum; + char *p = buf; + + if (pos >= 0xff) + return 0; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + + for (nodenum=pos; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* 26 = the number of characters per line sprintf'ed */ + if ((p - buf + 26) > count) + break; + if (pnp_bios_get_dev_node(&nodenum, 1, node)) + break; + p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n", + node->handle, node->eisa_id, + node->type_code[0], node->type_code[1], + node->type_code[2], node->flags); + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_read_devices:", (unsigned int)nodenum, (unsigned int)thisnodenum); + *eof = 1; + break; + } + } + kfree(node); + if (nodenum == 0xff) + *eof = 1; + *start = (char *)((off_t)nodenum - pos); + return p - buf; +} + +static int proc_read_node(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + int len; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if (pnp_bios_get_dev_node(&nodenum, boot, node)) + return -EIO; + len = node->size - sizeof(struct pnp_bios_node); + memcpy(buf, node->data, len); + kfree(node); + return len; +} + +static int proc_write_node(struct file *file, const char *buf, + unsigned long count, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if ( pnp_bios_get_dev_node(&nodenum, boot, node) ) + return -EIO; + if (count != node->size - sizeof(struct pnp_bios_node)) + return -EINVAL; + memcpy(node->data, buf, count); + if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) + return -EINVAL; + kfree(node); + return count; +} + +/* + * When this is called, pnpbios functions are assumed to + * work and the pnpbios_dont_use_current_config flag + * should already have been set to the appropriate value + */ +int __init pnpbios_proc_init( void ) +{ + struct pnp_bios_node *node; + struct proc_dir_entry *ent; + char name[3]; + u8 nodenum; + + if (pnp_bios_dev_node_info(&node_info)) + return -EIO; + + proc_pnp = proc_mkdir("pnp", proc_bus); + if (!proc_pnp) + return -EIO; + proc_pnp_boot = proc_mkdir("boot", proc_pnp); + if (!proc_pnp_boot) + return -EIO; + create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL); + create_proc_read_entry("configuration_info", 0, proc_pnp, proc_read_pnpconfig, NULL); + create_proc_read_entry("escd_info", S_IRUSR, proc_pnp, proc_read_escdinfo, NULL); + create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL); + create_proc_read_entry("legacy_device_resources", 0, proc_pnp, proc_read_legacyres, NULL); + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return -ENOMEM; + + for (nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + if (pnp_bios_get_dev_node(&nodenum, 1, node) != 0) + break; + sprintf(name, "%02x", node->handle); + if ( !pnpbios_dont_use_current_config ) { + ent = create_proc_entry(name, 0, proc_pnp); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle); + } + } + ent = create_proc_entry(name, 0, proc_pnp_boot); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle+0x100); + } + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_init:", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + return 0; +} + +void __exit pnpbios_proc_exit(void) +{ + int i; + char name[3]; + + if (!proc_pnp) return; + + for (i=0; i<0xff; i++) { + sprintf(name, "%02x", i); + if ( !pnpbios_dont_use_current_config ) + remove_proc_entry(name, proc_pnp); + remove_proc_entry(name, proc_pnp_boot); + } + remove_proc_entry("legacy_device_resources", proc_pnp); + remove_proc_entry("escd", proc_pnp); + remove_proc_entry("escd_info", proc_pnp); + remove_proc_entry("configuration_info", proc_pnp); + remove_proc_entry("devices", proc_pnp); + remove_proc_entry("boot", proc_pnp); + remove_proc_entry("pnp", proc_bus); + + return; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/con3215.c linux.22-ac2/drivers/s390/char/con3215.c --- linux.vanilla/drivers/s390/char/con3215.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/con3215.c 2003-07-06 18:38:26.000000000 +0100 @@ -903,7 +903,7 @@ raw3215_info *raw; raw = (raw3215_info *) tty->driver_data; - if (raw == NULL || tty->count > 1) + if (raw == NULL || atomic_read(&tty->count) > 1) return; tty->closing = 1; /* Shutdown the terminal */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc_con.c linux.22-ac2/drivers/s390/char/hwc_con.c --- linux.vanilla/drivers/s390/char/hwc_con.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/hwc_con.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -/* - * drivers/s390/char/hwc_con.c - * HWC line mode console driver - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hwc_rw.h" - -#ifdef CONFIG_HWC_CONSOLE - -#define hwc_console_major 4 -#define hwc_console_minor 64 -#define hwc_console_name "console" - -void hwc_console_write (struct console *, const char *, unsigned int); -kdev_t hwc_console_device (struct console *); -void hwc_console_unblank (void); - -#define HWC_CON_PRINT_HEADER "hwc console driver: " - -struct console hwc_console = { - name: hwc_console_name, - write: hwc_console_write, - device: hwc_console_device, - unblank:hwc_console_unblank, - flags: CON_PRINTBUFFER, -}; - -void -hwc_console_write ( - struct console *console, - const char *message, - unsigned int count) -{ - - if (console->device (console) != hwc_console.device (&hwc_console)) { - - hwc_printk (KERN_WARNING HWC_CON_PRINT_HEADER - "hwc_console_write() called with wrong " - "device number"); - return; - } - hwc_write (0, message, count); -} - -kdev_t -hwc_console_device (struct console * c) -{ - return MKDEV (hwc_console_major, hwc_console_minor); -} - -void -hwc_console_unblank (void) -{ - hwc_unblank (); -} - -#endif - -void __init -hwc_console_init (void) -{ - if (!MACHINE_HAS_HWC) - return; - - if (hwc_init () == 0) { -#ifdef CONFIG_HWC_CONSOLE - - if (CONSOLE_IS_HWC) - register_console (&hwc_console); -#endif - } else - panic (HWC_CON_PRINT_HEADER "hwc initialisation failed !"); - - return; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc_cpi.c linux.22-ac2/drivers/s390/char/hwc_cpi.c --- linux.vanilla/drivers/s390/char/hwc_cpi.c 2001-10-11 17:43:29.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/hwc_cpi.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,211 +0,0 @@ - -/* - * Author: Martin Peschke - * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "hwc_rw.h" -#include "hwc.h" - -#define CPI_RETRIES 3 -#define CPI_SLEEP_TICKS 50 - -#define CPI_LENGTH_SYSTEM_TYPE 8 -#define CPI_LENGTH_SYSTEM_NAME 8 -#define CPI_LENGTH_SYSPLEX_NAME 8 - -typedef struct { - _EBUF_HEADER - u8 id_format; - u8 reserved0; - u8 system_type[CPI_LENGTH_SYSTEM_TYPE]; - u64 reserved1; - u8 system_name[CPI_LENGTH_SYSTEM_NAME]; - u64 reserved2; - u64 system_level; - u64 reserved3; - u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME]; - u8 reserved4[16]; -} __attribute__ ((packed)) - -cpi_evbuf_t; - -typedef struct _cpi_hwcb_t { - _HWCB_HEADER - cpi_evbuf_t cpi_evbuf; -} __attribute__ ((packed)) - -cpi_hwcb_t; - -cpi_hwcb_t *cpi_hwcb; - -static int __init cpi_module_init (void); -static void __exit cpi_module_exit (void); - -module_init (cpi_module_init); -module_exit (cpi_module_exit); - -MODULE_AUTHOR ( - "Martin Peschke, IBM Deutschland Entwicklung GmbH " - ""); - -MODULE_DESCRIPTION ( - "identify this operating system instance to the S/390 or zSeries hardware"); - -static char *system_name = NULL; -MODULE_PARM (system_name, "s"); -MODULE_PARM_DESC (system_name, "e.g. hostname - max. 8 characters"); - -static char *sysplex_name = NULL; -#ifdef ALLOW_SYSPLEX_NAME -MODULE_PARM (sysplex_name, "s"); -MODULE_PARM_DESC (sysplex_name, "if applicable - max. 8 characters"); -#endif - -static char *system_type = "LINUX"; - -hwc_request_t cpi_request = -{}; - -hwc_callback_t cpi_callback; - -static DECLARE_MUTEX_LOCKED (sem); - -static int __init -cpi_module_init (void) -{ - int retval; - int system_type_length; - int system_name_length; - int sysplex_name_length = 0; - int retries; - - if (!MACHINE_HAS_HWC) { - printk ("cpi: bug: hardware console not present\n"); - retval = -EINVAL; - goto out; - } - if (!system_type) { - printk ("cpi: bug: no system type specified\n"); - retval = -EINVAL; - goto out; - } - system_type_length = strlen (system_type); - if (system_type_length > CPI_LENGTH_SYSTEM_NAME) { - printk ("cpi: bug: system type has length of %i characters - " - "only %i characters supported\n", - system_type_length, - CPI_LENGTH_SYSTEM_TYPE); - retval = -EINVAL; - goto out; - } - if (!system_name) { - printk ("cpi: no system name specified\n"); - retval = -EINVAL; - goto out; - } - system_name_length = strlen (system_name); - if (system_name_length > CPI_LENGTH_SYSTEM_NAME) { - printk ("cpi: system name has length of %i characters - " - "only %i characters supported\n", - system_name_length, - CPI_LENGTH_SYSTEM_NAME); - retval = -EINVAL; - goto out; - } - if (sysplex_name) { - sysplex_name_length = strlen (sysplex_name); - if (sysplex_name_length > CPI_LENGTH_SYSPLEX_NAME) { - printk ("cpi: sysplex name has length of %i characters - " - "only %i characters supported\n", - sysplex_name_length, - CPI_LENGTH_SYSPLEX_NAME); - retval = -EINVAL; - goto out; - } - } - cpi_hwcb = kmalloc (sizeof (cpi_hwcb_t), GFP_KERNEL); - if (!cpi_hwcb) { - printk ("cpi: no storage to fulfill request\n"); - retval = -ENOMEM; - goto out; - } - memset (cpi_hwcb, 0, sizeof (cpi_hwcb_t)); - - cpi_hwcb->length = sizeof (cpi_hwcb_t); - cpi_hwcb->cpi_evbuf.length = sizeof (cpi_evbuf_t); - cpi_hwcb->cpi_evbuf.type = 0x0B; - - memset (cpi_hwcb->cpi_evbuf.system_type, ' ', CPI_LENGTH_SYSTEM_TYPE); - memcpy (cpi_hwcb->cpi_evbuf.system_type, system_type, system_type_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE); - - memset (cpi_hwcb->cpi_evbuf.system_name, ' ', CPI_LENGTH_SYSTEM_NAME); - memcpy (cpi_hwcb->cpi_evbuf.system_name, system_name, system_name_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME); - - cpi_hwcb->cpi_evbuf.system_level = LINUX_VERSION_CODE; - - if (sysplex_name) { - memset (cpi_hwcb->cpi_evbuf.sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME); - memcpy (cpi_hwcb->cpi_evbuf.sysplex_name, sysplex_name, sysplex_name_length); - HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME); - EBC_TOUPPER (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME); - } - cpi_request.block = cpi_hwcb; - cpi_request.word = HWC_CMDW_WRITEDATA; - cpi_request.callback = cpi_callback; - - for (retries = CPI_RETRIES; retries; retries--) { - retval = hwc_send (&cpi_request); - if (retval) { - - set_current_state (TASK_INTERRUPTIBLE); - schedule_timeout (CPI_SLEEP_TICKS); - } else { - - down (&sem); - - switch (cpi_hwcb->response_code) { - case 0x0020: - printk ("cpi: succeeded\n"); - break; - default: - printk ("cpi: failed with response code 0x%x\n", - cpi_hwcb->response_code); - } - goto free; - } - } - - printk ("cpi: failed (%i)\n", retval); - - free: - kfree (cpi_hwcb); - - out: - return retval; -} - -static void __exit -cpi_module_exit (void) -{ - printk ("cpi: exit\n"); -} - -void -cpi_callback (hwc_request_t * req) -{ - up (&sem); -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc.h linux.22-ac2/drivers/s390/char/hwc.h --- linux.vanilla/drivers/s390/char/hwc.h 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/s390/char/hwc.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,275 +0,0 @@ -/* - * drivers/s390/char/hwc.h - * - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * - * - */ - -#ifndef __HWC_H__ -#define __HWC_H__ - -#define HWC_EXT_INT_PARAM_ADDR 0xFFFFFFF8 -#define HWC_EXT_INT_PARAM_PEND 0x00000001 - -#define ET_OpCmd 0x01 -#define ET_Msg 0x02 -#define ET_StateChange 0x08 -#define ET_PMsgCmd 0x09 -#define ET_CntlProgOpCmd 0x20 -#define ET_CntlProgIdent 0x0B -#define ET_SigQuiesce 0x1D - -#define ET_OpCmd_Mask 0x80000000 -#define ET_Msg_Mask 0x40000000 -#define ET_StateChange_Mask 0x01000000 -#define ET_PMsgCmd_Mask 0x00800000 -#define ET_CtlProgOpCmd_Mask 0x00000001 -#define ET_CtlProgIdent_Mask 0x00200000 -#define ET_SigQuiesce_Mask 0x00000008 - -#define GMF_DOM 0x8000 -#define GMF_SndAlrm 0x4000 -#define GMF_HoldMsg 0x2000 - -#define LTF_CntlText 0x8000 -#define LTF_LabelText 0x4000 -#define LTF_DataText 0x2000 -#define LTF_EndText 0x1000 -#define LTF_PromptText 0x0800 - -#define HWC_COMMAND_INITIATED 0 -#define HWC_BUSY 2 -#define HWC_NOT_OPERATIONAL 3 - -#define hwc_cmdw_t u32; - -#define HWC_CMDW_READDATA 0x00770005 - -#define HWC_CMDW_WRITEDATA 0x00760005 - -#define HWC_CMDW_WRITEMASK 0x00780005 - -#define GDS_ID_MDSMU 0x1310 - -#define GDS_ID_MDSRouteInfo 0x1311 - -#define GDS_ID_AgUnWrkCorr 0x1549 - -#define GDS_ID_SNACondReport 0x1532 - -#define GDS_ID_CPMSU 0x1212 - -#define GDS_ID_RoutTargInstr 0x154D - -#define GDS_ID_OpReq 0x8070 - -#define GDS_ID_TextCmd 0x1320 - -#define GDS_KEY_SelfDefTextMsg 0x31 - -#define _HWCB_HEADER u16 length; \ - u8 function_code; \ - u8 control_mask[3]; \ - u16 response_code; - -#define _EBUF_HEADER u16 length; \ - u8 type; \ - u8 flags; \ - u16 _reserved; - -typedef struct { - _EBUF_HEADER -} __attribute__ ((packed)) - -evbuf_t; - -#define _MDB_HEADER u16 length; \ - u16 type; \ - u32 tag; \ - u32 revision_code; - -#define _GO_HEADER u16 length; \ - u16 type; \ - u32 domid; \ - u8 hhmmss_time[8]; \ - u8 th_time[3]; \ - u8 _reserved_0; \ - u8 dddyyyy_date[7]; \ - u8 _reserved_1; \ - u16 general_msg_flags; \ - u8 _reserved_2[10]; \ - u8 originating_system_name[8]; \ - u8 job_guest_name[8]; - -#define _MTO_HEADER u16 length; \ - u16 type; \ - u16 line_type_flags; \ - u8 alarm_control; \ - u8 _reserved[3]; - -typedef struct { - _GO_HEADER -} __attribute__ ((packed)) - -go_t; - -typedef struct { - go_t go; -} __attribute__ ((packed)) - -mdb_body_t; - -typedef struct { - _MDB_HEADER - mdb_body_t mdb_body; -} __attribute__ ((packed)) - -mdb_t; - -typedef struct { - _EBUF_HEADER - mdb_t mdb; -} __attribute__ ((packed)) - -msgbuf_t; - -typedef struct { - _HWCB_HEADER - msgbuf_t msgbuf; -} __attribute__ ((packed)) - -write_hwcb_t; - -typedef struct { - _MTO_HEADER -} __attribute__ ((packed)) - -mto_t; - -static write_hwcb_t write_hwcb_template = -{ - sizeof (write_hwcb_t), - 0x00, - { - 0x00, - 0x00, - 0x00 - }, - 0x0000, - { - sizeof (msgbuf_t), - ET_Msg, - 0x00, - 0x0000, - { - sizeof (mdb_t), - 0x0001, - 0xD4C4C240, - 0x00000001, - { - { - sizeof (go_t), - 0x0001 - - } - } - } - } -}; - -static mto_t mto_template = -{ - sizeof (mto_t), - 0x0004, - LTF_EndText, - 0x00 -}; - -typedef u32 _hwcb_mask_t; - -typedef struct { - _HWCB_HEADER - u16 _reserved; - u16 mask_length; - _hwcb_mask_t cp_receive_mask; - _hwcb_mask_t cp_send_mask; - _hwcb_mask_t hwc_receive_mask; - _hwcb_mask_t hwc_send_mask; -} __attribute__ ((packed)) - -init_hwcb_t; - -static init_hwcb_t init_hwcb_template = -{ - sizeof (init_hwcb_t), - 0x00, - { - 0x00, - 0x00, - 0x00 - }, - 0x0000, - 0x0000, - sizeof (_hwcb_mask_t), - ET_OpCmd_Mask | ET_PMsgCmd_Mask | - ET_StateChange_Mask | ET_SigQuiesce_Mask, - ET_Msg_Mask | ET_PMsgCmd_Mask | ET_CtlProgIdent_Mask -}; - -typedef struct { - _EBUF_HEADER - u8 validity_hwc_active_facility_mask:1; - u8 validity_hwc_receive_mask:1; - u8 validity_hwc_send_mask:1; - u8 validity_read_data_function_mask:1; - u16 _zeros:12; - u16 mask_length; - u64 hwc_active_facility_mask; - _hwcb_mask_t hwc_receive_mask; - _hwcb_mask_t hwc_send_mask; - u32 read_data_function_mask; -} __attribute__ ((packed)) - -statechangebuf_t; - -#define _GDS_VECTOR_HEADER u16 length; \ - u16 gds_id; - -#define _GDS_SUBVECTOR_HEADER u8 length; \ - u8 key; - -typedef struct { - _GDS_VECTOR_HEADER -} __attribute__ ((packed)) - -gds_vector_t; - -typedef struct { - _GDS_SUBVECTOR_HEADER -} __attribute__ ((packed)) - -gds_subvector_t; - -typedef struct { - _HWCB_HEADER -} __attribute__ ((packed)) - -read_hwcb_t; - -static read_hwcb_t read_hwcb_template = -{ - PAGE_SIZE, - 0x00, - { - 0x00, - 0x00, - 0x80 - } -}; - -#endif /* __HWC_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc_rw.c linux.22-ac2/drivers/s390/char/hwc_rw.c --- linux.vanilla/drivers/s390/char/hwc_rw.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/s390/char/hwc_rw.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,2458 +0,0 @@ -/* - * drivers/s390/char/hwc_rw.c - * driver: reading from and writing to system console on S/390 via HWC - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * - * - * - * - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef MIN -#define MIN(a,b) (((amto_char_sum) - -#define _HWCB_MTO(hwcb) (_LIST(hwcb)->mto_number) - -#define _HWCB_CHAR_LOST(hwcb) (_LIST(hwcb)->mto_char_sum_lost) - -#define _HWCB_MTO_LOST(hwcb) (_LIST(hwcb)->mto_number_lost) - -#define _HWCB_TIMES_LOST(hwcb) (_LIST(hwcb)->times_lost) - -#define _HWCB_NEXT(hwcb) (_LIST(hwcb)->next) - -#define BUF_HWCB_CHAR _HWCB_CHAR(BUF_HWCB) - -#define BUF_HWCB_MTO _HWCB_MTO(BUF_HWCB) - -#define BUF_HWCB_NEXT _HWCB_NEXT(BUF_HWCB) - -#define OUT_HWCB_CHAR _HWCB_CHAR(OUT_HWCB) - -#define OUT_HWCB_MTO _HWCB_MTO(OUT_HWCB) - -#define OUT_HWCB_NEXT _HWCB_NEXT(OUT_HWCB) - -#define BUF_HWCB_CHAR_LOST _HWCB_CHAR_LOST(BUF_HWCB) - -#define BUF_HWCB_MTO_LOST _HWCB_MTO_LOST(BUF_HWCB) - -#define OUT_HWCB_CHAR_LOST _HWCB_CHAR_LOST(OUT_HWCB) - -#define OUT_HWCB_MTO_LOST _HWCB_MTO_LOST(OUT_HWCB) - -#define BUF_HWCB_TIMES_LOST _HWCB_TIMES_LOST(BUF_HWCB) - -#include "hwc.h" - -#define __HWC_RW_C__ -#include "hwc_rw.h" -#undef __HWC_RW_C__ - -static unsigned char _obuf[MAX_HWCB_ROOM]; - -static unsigned char - _page[PAGE_SIZE] __attribute__ ((aligned (PAGE_SIZE))); - -typedef unsigned long kmem_pages_t; - -#define MAX_KMEM_PAGES (sizeof(kmem_pages_t) << 3) - -#define HWC_WTIMER_RUNS 1 -#define HWC_FLUSH 2 -#define HWC_INIT 4 -#define HWC_BROKEN 8 -#define HWC_INTERRUPT 16 -#define HWC_PTIMER_RUNS 32 - -static struct { - - hwc_ioctls_t ioctls; - - hwc_ioctls_t init_ioctls; - - unsigned char *hwcb_list_head; - - unsigned char *hwcb_list_tail; - - unsigned short int mto_number; - - unsigned int mto_char_sum; - - unsigned char hwcb_count; - - unsigned long kmem_start; - - unsigned long kmem_end; - - kmem_pages_t kmem_pages; - - unsigned char *obuf; - - unsigned short int obuf_cursor; - - unsigned short int obuf_count; - - unsigned short int obuf_start; - - unsigned char *page; - - u32 current_servc; - - unsigned char *current_hwcb; - - unsigned char write_nonprio:1; - unsigned char write_prio:1; - unsigned char read_nonprio:1; - unsigned char read_prio:1; - unsigned char read_statechange:1; - unsigned char sig_quiesce:1; - - unsigned char flags; - - hwc_high_level_calls_t *calls; - - hwc_request_t *request; - - spinlock_t lock; - - struct timer_list write_timer; - - struct timer_list poll_timer; -} hwc_data = -{ - { - }, - { - 8, - 0, - 80, - 1, - MAX_KMEM_PAGES, - MAX_KMEM_PAGES, - - 0, - - 0x6c - - }, - NULL, - NULL, - 0, - 0, - 0, - 0, - 0, - 0, - _obuf, - 0, - 0, - 0, - _page, - 0, - NULL, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - NULL, - NULL - -}; - -static unsigned long cr0 __attribute__ ((aligned (8))); -static unsigned long cr0_save __attribute__ ((aligned (8))); -static unsigned char psw_mask __attribute__ ((aligned (8))); - -static ext_int_info_t ext_int_info_hwc; - -#define DELAYED_WRITE 0 -#define IMMEDIATE_WRITE 1 - -static signed int do_hwc_write (int from_user, unsigned char *, - unsigned int, - unsigned char); - -unsigned char hwc_ip_buf[512]; - -static asmlinkage int -internal_print (char write_time, char *fmt,...) -{ - va_list args; - int i; - - va_start (args, fmt); - i = vsprintf (hwc_ip_buf, fmt, args); - va_end (args); - return do_hwc_write (0, hwc_ip_buf, i, write_time); -} - -int -hwc_printk (const char *fmt,...) -{ - va_list args; - int i; - unsigned long flags; - int retval; - - spin_lock_irqsave (&hwc_data.lock, flags); - - i = vsprintf (hwc_ip_buf, fmt, args); - va_end (args); - retval = do_hwc_write (0, hwc_ip_buf, i, IMMEDIATE_WRITE); - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return retval; -} - -#ifdef DUMP_HWCB_INPUT - -static void -dump_storage_area (unsigned char *area, unsigned short int count) -{ - unsigned short int index; - ioctl_nl_t old_final_nl; - - if (!area || !count) - return; - - old_final_nl = hwc_data.ioctls.final_nl; - hwc_data.ioctls.final_nl = 1; - - internal_print (DELAYED_WRITE, "\n%8x ", area); - - for (index = 0; index < count; index++) { - - if (area[index] <= 0xF) - internal_print (DELAYED_WRITE, "0%x", area[index]); - else - internal_print (DELAYED_WRITE, "%x", area[index]); - - if ((index & 0xF) == 0xF) - internal_print (DELAYED_WRITE, "\n%8x ", - &area[index + 1]); - else if ((index & 3) == 3) - internal_print (DELAYED_WRITE, " "); - } - - internal_print (IMMEDIATE_WRITE, "\n"); - - hwc_data.ioctls.final_nl = old_final_nl; -} -#endif - -static inline u32 -service_call ( - u32 hwc_command_word, - unsigned char hwcb[]) -{ - unsigned int condition_code = 1; - - __asm__ __volatile__ ("L 1, 0(%0) \n\t" - "LRA 2, 0(%1) \n\t" - ".long 0xB2200012 \n\t" - : - :"a" (&hwc_command_word), "a" (hwcb) - :"1", "2", "memory"); - - __asm__ __volatile__ ("IPM %0 \n\t" - "SRL %0, 28 \n\t" - :"=r" (condition_code)); - - return condition_code; -} - -static inline unsigned long -hwc_ext_int_param (void) -{ - u32 param; - - __asm__ __volatile__ ("L %0,128\n\t" - :"=r" (param)); - - return (unsigned long) param; -} - -static int -prepare_write_hwcb (void) -{ - write_hwcb_t *hwcb; - - if (!BUF_HWCB) - return -ENOMEM; - - BUF_HWCB_MTO = 0; - BUF_HWCB_CHAR = 0; - - hwcb = (write_hwcb_t *) BUF_HWCB; - - memcpy (hwcb, &write_hwcb_template, sizeof (write_hwcb_t)); - - return 0; -} - -static int -sane_write_hwcb (void) -{ - unsigned short int lost_msg; - unsigned int lost_char; - unsigned char lost_hwcb; - unsigned char *bad_addr; - unsigned long page; - int page_nr; - - if (!OUT_HWCB) - return -ENOMEM; - - if ((unsigned long) OUT_HWCB & 0xFFF) { - - bad_addr = OUT_HWCB; - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - __asm__ ("LHI 1,0xe30\n\t" - "LRA 2,0(%0) \n\t" - "J .+0 \n\t" - : - : "a" (bad_addr) - : "1", "2"); -#endif - - hwc_data.kmem_pages = 0; - if ((unsigned long) BUF_HWCB & 0xFFF) { - - lost_hwcb = hwc_data.hwcb_count; - lost_msg = ALL_HWCB_MTO; - lost_char = ALL_HWCB_CHAR; - - OUT_HWCB = NULL; - BUF_HWCB = NULL; - ALL_HWCB_MTO = 0; - ALL_HWCB_CHAR = 0; - hwc_data.hwcb_count = 0; - } else { - - lost_hwcb = hwc_data.hwcb_count - 1; - lost_msg = ALL_HWCB_MTO - BUF_HWCB_MTO; - lost_char = ALL_HWCB_CHAR - BUF_HWCB_CHAR; - OUT_HWCB = BUF_HWCB; - ALL_HWCB_MTO = BUF_HWCB_MTO; - ALL_HWCB_CHAR = BUF_HWCB_CHAR; - hwc_data.hwcb_count = 1; - page = (unsigned long) BUF_HWCB; - - if (page >= hwc_data.kmem_start && - page <= hwc_data.kmem_end) { - - page_nr = (int) - ((page - hwc_data.kmem_start) >> 12); - set_bit (page_nr, &hwc_data.kmem_pages); - } - } - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "found invalid HWCB at address 0x%lx. List corrupted. " - "Lost %i HWCBs with %i characters within up to %i " - "messages. Saved %i HWCB with last %i characters i" - "within up to %i messages.\n", - (unsigned long) bad_addr, - lost_hwcb, lost_char, lost_msg, - hwc_data.hwcb_count, - ALL_HWCB_CHAR, ALL_HWCB_MTO); - } - return 0; -} - -static int -reuse_write_hwcb (void) -{ - int retval; - - if (hwc_data.hwcb_count < 2) -#ifdef DUMP_HWC_WRITE_LIST_ERROR - __asm__ ("LHI 1,0xe31\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (BUF_HWCB), "a" (OUT_HWCB) - : "1", "2", "3"); -#else - return -EPERM; -#endif - - if (hwc_data.current_hwcb == OUT_HWCB) { - - if (hwc_data.hwcb_count > 2) { - - BUF_HWCB_NEXT = OUT_HWCB_NEXT; - - BUF_HWCB = OUT_HWCB_NEXT; - - OUT_HWCB_NEXT = BUF_HWCB_NEXT; - - BUF_HWCB_NEXT = NULL; - } - } else { - - BUF_HWCB_NEXT = OUT_HWCB; - - BUF_HWCB = OUT_HWCB; - - OUT_HWCB = OUT_HWCB_NEXT; - - BUF_HWCB_NEXT = NULL; - } - - BUF_HWCB_TIMES_LOST += 1; - BUF_HWCB_CHAR_LOST += BUF_HWCB_CHAR; - BUF_HWCB_MTO_LOST += BUF_HWCB_MTO; - ALL_HWCB_MTO -= BUF_HWCB_MTO; - ALL_HWCB_CHAR -= BUF_HWCB_CHAR; - - retval = prepare_write_hwcb (); - - if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "reached my own limit of " - "allowed buffer space for output (%i HWCBs = %li " - "bytes), skipped content of oldest HWCB %i time(s) " - "(%i lines = %i characters)\n", - hwc_data.ioctls.max_hwcb, - hwc_data.ioctls.max_hwcb * PAGE_SIZE, - BUF_HWCB_TIMES_LOST, - BUF_HWCB_MTO_LOST, - BUF_HWCB_CHAR_LOST); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "page allocation failed, " - "could not expand buffer for output (currently in " - "use: %i HWCBs = %li bytes), skipped content of " - "oldest HWCB %i time(s) (%i lines = %i characters)\n", - hwc_data.hwcb_count, - hwc_data.hwcb_count * PAGE_SIZE, - BUF_HWCB_TIMES_LOST, - BUF_HWCB_MTO_LOST, - BUF_HWCB_CHAR_LOST); - - return retval; -} - -static int -allocate_write_hwcb (void) -{ - unsigned char *page; - int page_nr; - - if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb) - return -ENOMEM; - - page_nr = find_first_zero_bit (&hwc_data.kmem_pages, MAX_KMEM_PAGES); - if (page_nr < hwc_data.ioctls.kmem_hwcb) { - - page = (unsigned char *) - (hwc_data.kmem_start + (page_nr << 12)); - set_bit (page_nr, &hwc_data.kmem_pages); - } else - page = (unsigned char *) __get_free_page (GFP_ATOMIC | GFP_DMA); - - if (!page) - return -ENOMEM; - - if (!OUT_HWCB) - OUT_HWCB = page; - else - BUF_HWCB_NEXT = page; - - BUF_HWCB = page; - - BUF_HWCB_NEXT = NULL; - - hwc_data.hwcb_count++; - - prepare_write_hwcb (); - - BUF_HWCB_TIMES_LOST = 0; - BUF_HWCB_MTO_LOST = 0; - BUF_HWCB_CHAR_LOST = 0; - -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - "*** " HWC_RW_PRINT_HEADER - "page #%i at 0x%x for buffering allocated. ***\n", - hwc_data.hwcb_count, page); - -#endif - - return 0; -} - -static int -release_write_hwcb (void) -{ - unsigned long page; - int page_nr; - - if (!hwc_data.hwcb_count) - return -ENODATA; - - if (hwc_data.hwcb_count == 1) { - - prepare_write_hwcb (); - - ALL_HWCB_CHAR = 0; - ALL_HWCB_MTO = 0; - BUF_HWCB_TIMES_LOST = 0; - BUF_HWCB_MTO_LOST = 0; - BUF_HWCB_CHAR_LOST = 0; - } else { - page = (unsigned long) OUT_HWCB; - - ALL_HWCB_MTO -= OUT_HWCB_MTO; - ALL_HWCB_CHAR -= OUT_HWCB_CHAR; - hwc_data.hwcb_count--; - - OUT_HWCB = OUT_HWCB_NEXT; - - if (page >= hwc_data.kmem_start && - page <= hwc_data.kmem_end) { - /*memset((void *) page, 0, PAGE_SIZE); */ - - page_nr = (int) ((page - hwc_data.kmem_start) >> 12); - clear_bit (page_nr, &hwc_data.kmem_pages); - } else - free_page (page); -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - "*** " HWC_RW_PRINT_HEADER - "page at 0x%x released, %i pages still in use ***\n", - page, hwc_data.hwcb_count); - -#endif - } - return 0; -} - -static int -add_mto ( - unsigned char *message, - unsigned short int count) -{ - unsigned short int mto_size; - write_hwcb_t *hwcb; - mto_t *mto; - void *dest; - - if (!BUF_HWCB) - return -ENOMEM; - - if (BUF_HWCB == hwc_data.current_hwcb) - return -ENOMEM; - - mto_size = sizeof (mto_t) + count; - - hwcb = (write_hwcb_t *) BUF_HWCB; - - if ((MAX_HWCB_ROOM - hwcb->length) < mto_size) - return -ENOMEM; - - mto = (mto_t *) (((unsigned long) hwcb) + hwcb->length); - - memcpy (mto, &mto_template, sizeof (mto_t)); - - dest = (void *) (((unsigned long) mto) + sizeof (mto_t)); - - memcpy (dest, message, count); - - mto->length += count; - - hwcb->length += mto_size; - hwcb->msgbuf.length += mto_size; - hwcb->msgbuf.mdb.length += mto_size; - - BUF_HWCB_MTO++; - ALL_HWCB_MTO++; - BUF_HWCB_CHAR += count; - ALL_HWCB_CHAR += count; - - return count; -} - -static int write_event_data_1 (void); - -static void -do_poll_hwc (unsigned long data) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - write_event_data_1 (); - - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -void -start_poll_hwc (void) -{ - init_timer (&hwc_data.poll_timer); - hwc_data.poll_timer.function = do_poll_hwc; - hwc_data.poll_timer.data = (unsigned long) NULL; - hwc_data.poll_timer.expires = jiffies + 2 * HZ; - add_timer (&hwc_data.poll_timer); - hwc_data.flags |= HWC_PTIMER_RUNS; -} - -static int -write_event_data_1 (void) -{ - unsigned short int condition_code; - int retval; - write_hwcb_t *hwcb = (write_hwcb_t *) OUT_HWCB; - - if ((!hwc_data.write_prio) && - (!hwc_data.write_nonprio) && - hwc_data.read_statechange) - return -EOPNOTSUPP; - - if (hwc_data.current_servc) - return -EBUSY; - - retval = sane_write_hwcb (); - if (retval < 0) - return -EIO; - - if (!OUT_HWCB_MTO) - return -ENODATA; - - if (!hwc_data.write_nonprio && hwc_data.write_prio) - hwcb->msgbuf.type = ET_PMsgCmd; - else - hwcb->msgbuf.type = ET_Msg; - - condition_code = service_call (HWC_CMDW_WRITEDATA, OUT_HWCB); - -#ifdef DUMP_HWC_WRITE_ERROR - if (condition_code != HWC_COMMAND_INITIATED) - __asm__ ("LHI 1,0xe20\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (&condition_code), "a" (OUT_HWCB) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_WRITEDATA; - hwc_data.current_hwcb = OUT_HWCB; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - case HWC_NOT_OPERATIONAL: - start_poll_hwc (); - default: - retval = -EIO; - } - - return retval; -} - -static void -flush_hwcbs (void) -{ - while (hwc_data.hwcb_count > 1) - release_write_hwcb (); - - release_write_hwcb (); - - hwc_data.flags &= ~HWC_FLUSH; -} - -static int -write_event_data_2 (u32 ext_int_param) -{ - write_hwcb_t *hwcb; - int retval = 0; - -#ifdef DUMP_HWC_WRITE_ERROR - if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR) - != (unsigned long) hwc_data.current_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "write_event_data_2 : " - "HWCB address does not fit " - "(expected: 0x%lx, got: 0x%lx).\n", - (unsigned long) hwc_data.current_hwcb, - ext_int_param); - return -EINVAL; - } -#endif - - hwcb = (write_hwcb_t *) OUT_HWCB; - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - if (((unsigned char *) hwcb) != hwc_data.current_hwcb) { - __asm__ ("LHI 1,0xe22\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LRA 5,0(%3)\n\t" - "J .+0 \n\t" - : - : "a" (OUT_HWCB), - "a" (hwc_data.current_hwcb), - "a" (BUF_HWCB), - "a" (hwcb) - : "1", "2", "3", "4", "5"); - } -#endif - -#ifdef DUMP_HWC_WRITE_ERROR - if (hwcb->response_code != 0x0020) { - __asm__ ("LHI 1,0xe21\n\t" - "LRA 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LH 5,0(%3)\n\t" - "SRL 5,8\n\t" - "J .+0 \n\t" - : - : "a" (OUT_HWCB), "a" (hwc_data.current_hwcb), - "a" (BUF_HWCB), - "a" (&(hwc_data.hwcb_count)) - : "1", "2", "3", "4", "5"); - } -#endif - - switch (hwcb->response_code) { - case 0x0020: - - retval = OUT_HWCB_CHAR; - release_write_hwcb (); - break; - case 0x0040: - case 0x0340: - case 0x40F0: - if (!hwc_data.read_statechange) { - hwcb->response_code = 0; - start_poll_hwc (); - } - retval = -EIO; - break; - default: - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "write_event_data_2 : " - "failed operation " - "(response code: 0x%x " - "HWCB address: 0x%x).\n", - hwcb->response_code, - hwcb); - retval = -EIO; - } - - if (retval == -EIO) { - - hwcb->control_mask[0] = 0; - hwcb->control_mask[1] = 0; - hwcb->control_mask[2] = 0; - hwcb->response_code = 0; - } - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - if (hwc_data.flags & HWC_FLUSH) - flush_hwcbs (); - - return retval; -} - -static void -do_put_line ( - unsigned char *message, - unsigned short count) -{ - - if (add_mto (message, count) != count) { - - if (allocate_write_hwcb () < 0) - reuse_write_hwcb (); - -#ifdef DUMP_HWC_WRITE_LIST_ERROR - if (add_mto (message, count) != count) - __asm__ ("LHI 1,0xe32\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "LRA 4,0(%2)\n\t" - "LRA 5,0(%3)\n\t" - "J .+0 \n\t" - : - : "a" (message), "a" (&hwc_data.kmem_pages), - "a" (BUF_HWCB), "a" (OUT_HWCB) - : "1", "2", "3", "4", "5"); -#else - add_mto (message, count); -#endif - } -} - -static void -put_line ( - unsigned char *message, - unsigned short count) -{ - - if ((!hwc_data.obuf_start) && (hwc_data.flags & HWC_WTIMER_RUNS)) { - del_timer (&hwc_data.write_timer); - hwc_data.flags &= ~HWC_WTIMER_RUNS; - } - hwc_data.obuf_start += count; - - do_put_line (message, count); - - hwc_data.obuf_start -= count; -} - -static void -set_alarm (void) -{ - write_hwcb_t *hwcb; - - if ((!BUF_HWCB) || (BUF_HWCB == hwc_data.current_hwcb)) - allocate_write_hwcb (); - - hwcb = (write_hwcb_t *) BUF_HWCB; - hwcb->msgbuf.mdb.mdb_body.go.general_msg_flags |= GMF_SndAlrm; -} - -static void -hwc_write_timeout (unsigned long data) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - hwc_data.obuf_start = hwc_data.obuf_count; - if (hwc_data.obuf_count) - put_line (hwc_data.obuf, hwc_data.obuf_count); - hwc_data.obuf_start = 0; - - hwc_data.obuf_cursor = 0; - hwc_data.obuf_count = 0; - - write_event_data_1 (); - - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -static int -do_hwc_write ( - int from_user, - unsigned char *msg, - unsigned int count, - unsigned char write_time) -{ - unsigned int i_msg = 0; - unsigned short int spaces = 0; - unsigned int processed_characters = 0; - unsigned char ch; - unsigned short int obuf_count; - unsigned short int obuf_cursor; - unsigned short int obuf_columns; - - if (hwc_data.obuf_start) { - obuf_cursor = 0; - obuf_count = 0; - obuf_columns = MIN (hwc_data.ioctls.columns, - MAX_MESSAGE_SIZE - hwc_data.obuf_start); - } else { - obuf_cursor = hwc_data.obuf_cursor; - obuf_count = hwc_data.obuf_count; - obuf_columns = hwc_data.ioctls.columns; - } - - for (i_msg = 0; i_msg < count; i_msg++) { - if (from_user) - get_user (ch, msg + i_msg); - else - ch = msg[i_msg]; - - processed_characters++; - - if ((obuf_cursor == obuf_columns) && - - (ch != '\n') && - - (ch != '\t')) { - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_columns); - obuf_cursor = 0; - obuf_count = 0; - } - switch (ch) { - - case '\n': - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - break; - - case '\a': - - hwc_data.obuf_start += obuf_count; - set_alarm (); - hwc_data.obuf_start -= obuf_count; - - break; - - case '\t': - - do { - if (obuf_cursor < obuf_columns) { - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor] - = HWC_ASCEBC (' '); - obuf_cursor++; - } else - break; - } while (obuf_cursor % hwc_data.ioctls.width_htab); - - break; - - case '\f': - case '\v': - - spaces = obuf_cursor; - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_count = obuf_cursor; - while (spaces) { - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor - spaces] - = HWC_ASCEBC (' '); - spaces--; - } - - break; - - case '\b': - - if (obuf_cursor) - obuf_cursor--; - break; - - case '\r': - - obuf_cursor = 0; - break; - - case 0x00: - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - goto out; - - default: - - if (isprint (ch)) - hwc_data.obuf[hwc_data.obuf_start + - obuf_cursor++] - = HWC_ASCEBC (ch); - } - if (obuf_cursor > obuf_count) - obuf_count = obuf_cursor; - } - - if (obuf_cursor) { - - if (hwc_data.obuf_start || - (hwc_data.ioctls.final_nl == 0)) { - - put_line (&hwc_data.obuf[hwc_data.obuf_start], - obuf_count); - obuf_cursor = 0; - obuf_count = 0; - } else { - - if (hwc_data.ioctls.final_nl > 0) { - - if (hwc_data.flags & HWC_WTIMER_RUNS) { - - mod_timer (&hwc_data.write_timer, - jiffies + hwc_data.ioctls.final_nl * HZ / 10); - } else { - - init_timer (&hwc_data.write_timer); - hwc_data.write_timer.function = - hwc_write_timeout; - hwc_data.write_timer.data = - (unsigned long) NULL; - hwc_data.write_timer.expires = - jiffies + - hwc_data.ioctls.final_nl * HZ / 10; - add_timer (&hwc_data.write_timer); - hwc_data.flags |= HWC_WTIMER_RUNS; - } - } else; - - } - } else; - - out: - - if (!hwc_data.obuf_start) { - hwc_data.obuf_cursor = obuf_cursor; - hwc_data.obuf_count = obuf_count; - } - if (write_time == IMMEDIATE_WRITE) - write_event_data_1 (); - - return processed_characters; -} - -signed int -hwc_write (int from_user, const unsigned char *msg, unsigned int count) -{ - unsigned long flags; - int retval; - - spin_lock_irqsave (&hwc_data.lock, flags); - - retval = do_hwc_write (from_user, (unsigned char *) msg, - count, IMMEDIATE_WRITE); - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return retval; -} - -unsigned int -hwc_chars_in_buffer (unsigned char flag) -{ - unsigned short int number = 0; - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) - number += ALL_HWCB_CHAR; - - if (flag & IN_WRITE_BUF) - number += hwc_data.obuf_cursor; - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return number; -} - -static inline int -nr_setbits (kmem_pages_t arg) -{ - int i; - int nr = 0; - - for (i = 0; i < (sizeof (arg) << 3); i++) { - if (arg & 1) - nr++; - arg >>= 1; - } - - return nr; -} - -unsigned int -hwc_write_room (unsigned char flag) -{ - unsigned int number = 0; - unsigned long flags; - write_hwcb_t *hwcb; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) { - - if (BUF_HWCB) { - hwcb = (write_hwcb_t *) BUF_HWCB; - number += MAX_HWCB_ROOM - hwcb->length; - } - number += (hwc_data.ioctls.kmem_hwcb - - nr_setbits (hwc_data.kmem_pages)) * - (MAX_HWCB_ROOM - - (sizeof (write_hwcb_t) + sizeof (mto_t))); - } - if (flag & IN_WRITE_BUF) - number += MAX_HWCB_ROOM - hwc_data.obuf_cursor; - - spin_unlock_irqrestore (&hwc_data.lock, flags); - - return number; -} - -void -hwc_flush_buffer (unsigned char flag) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_data.lock, flags); - - if (flag & IN_HWCB) { - if (hwc_data.current_servc != HWC_CMDW_WRITEDATA) - flush_hwcbs (); - else - hwc_data.flags |= HWC_FLUSH; - } - if (flag & IN_WRITE_BUF) { - hwc_data.obuf_cursor = 0; - hwc_data.obuf_count = 0; - } - spin_unlock_irqrestore (&hwc_data.lock, flags); -} - -unsigned short int -seperate_cases (unsigned char *buf, unsigned short int count) -{ - - unsigned short int i_in; - - unsigned short int i_out = 0; - - unsigned char _case = 0; - - for (i_in = 0; i_in < count; i_in++) { - - if (buf[i_in] == hwc_data.ioctls.delim) { - - if ((i_in + 1 < count) && - (buf[i_in + 1] == hwc_data.ioctls.delim)) { - - buf[i_out] = hwc_data.ioctls.delim; - - i_out++; - - i_in++; - - } else - _case = ~_case; - - } else { - - if (_case) { - - if (hwc_data.ioctls.tolower) - buf[i_out] = _ebc_toupper[buf[i_in]]; - - else - buf[i_out] = _ebc_tolower[buf[i_in]]; - - } else - buf[i_out] = buf[i_in]; - - i_out++; - } - } - - return i_out; -} - -#ifdef DUMP_HWCB_INPUT - -static int -gds_vector_name (u16 id, unsigned char name[]) -{ - int retval = 0; - - switch (id) { - case GDS_ID_MDSMU: - name = "Multiple Domain Support Message Unit"; - break; - case GDS_ID_MDSRouteInfo: - name = "MDS Routing Information"; - break; - case GDS_ID_AgUnWrkCorr: - name = "Agent Unit of Work Correlator"; - break; - case GDS_ID_SNACondReport: - name = "SNA Condition Report"; - break; - case GDS_ID_CPMSU: - name = "CP Management Services Unit"; - break; - case GDS_ID_RoutTargInstr: - name = "Routing and Targeting Instructions"; - break; - case GDS_ID_OpReq: - name = "Operate Request"; - break; - case GDS_ID_TextCmd: - name = "Text Command"; - break; - - default: - name = "unknown GDS variable"; - retval = -EINVAL; - } - - return retval; -} -#endif - -inline static gds_vector_t * -find_gds_vector ( - gds_vector_t * start, void *end, u16 id) -{ - gds_vector_t *vec; - gds_vector_t *retval = NULL; - - vec = start; - - while (((void *) vec) < end) { - if (vec->gds_id == id) { - -#ifdef DUMP_HWCB_INPUT - int retval_name; - unsigned char name[64]; - - retval_name = gds_vector_name (id, name); - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "%s at 0x%x up to 0x%x, length: %d", - name, - (unsigned long) vec, - ((unsigned long) vec) + vec->length - 1, - vec->length); - if (retval_name < 0) - internal_print ( - IMMEDIATE_WRITE, - ", id: 0x%x\n", - vec->gds_id); - else - internal_print ( - IMMEDIATE_WRITE, - "\n"); -#endif - - retval = vec; - break; - } - vec = (gds_vector_t *) (((unsigned long) vec) + vec->length); - } - - return retval; -} - -inline static gds_subvector_t * -find_gds_subvector ( - gds_subvector_t * start, void *end, u8 key) -{ - gds_subvector_t *subvec; - gds_subvector_t *retval = NULL; - - subvec = start; - - while (((void *) subvec) < end) { - if (subvec->key == key) { - retval = subvec; - break; - } - subvec = (gds_subvector_t *) - (((unsigned long) subvec) + subvec->length); - } - - return retval; -} - -inline static int -get_input (void *start, void *end) -{ - int count; - - count = ((unsigned long) end) - ((unsigned long) start); - - if (hwc_data.ioctls.tolower) - EBC_TOLOWER (start, count); - - if (hwc_data.ioctls.delim) - count = seperate_cases (start, count); - - HWC_EBCASC_STR (start, count); - - if (hwc_data.ioctls.echo) - do_hwc_write (0, start, count, IMMEDIATE_WRITE); - - if (hwc_data.calls != NULL) - if (hwc_data.calls->move_input != NULL) - (hwc_data.calls->move_input) (start, count); - - return count; -} - -inline static int -eval_selfdeftextmsg (gds_subvector_t * start, void *end) -{ - gds_subvector_t *subvec; - void *subvec_data; - void *subvec_end; - int retval = 0; - - subvec = start; - - while (((void *) subvec) < end) { - subvec = find_gds_subvector (subvec, end, 0x30); - if (!subvec) - break; - subvec_data = (void *) - (((unsigned long) subvec) + - sizeof (gds_subvector_t)); - subvec_end = (void *) - (((unsigned long) subvec) + subvec->length); - retval += get_input (subvec_data, subvec_end); - subvec = (gds_subvector_t *) subvec_end; - } - - return retval; -} - -inline static int -eval_textcmd (gds_subvector_t * start, void *end) -{ - gds_subvector_t *subvec; - gds_subvector_t *subvec_data; - void *subvec_end; - int retval = 0; - - subvec = start; - - while (((void *) subvec) < end) { - subvec = find_gds_subvector ( - subvec, end, GDS_KEY_SelfDefTextMsg); - if (!subvec) - break; - subvec_data = (gds_subvector_t *) - (((unsigned long) subvec) + - sizeof (gds_subvector_t)); - subvec_end = (void *) - (((unsigned long) subvec) + subvec->length); - retval += eval_selfdeftextmsg (subvec_data, subvec_end); - subvec = (gds_subvector_t *) subvec_end; - } - - return retval; -} - -inline static int -eval_cpmsu (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_subvector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = start; - - while (((void *) vec) < end) { - vec = find_gds_vector (vec, end, GDS_ID_TextCmd); - if (!vec) - break; - vec_data = (gds_subvector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval += eval_textcmd (vec_data, vec_end); - vec = (gds_vector_t *) vec_end; - } - - return retval; -} - -inline static int -eval_mdsmu (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_vector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = find_gds_vector (start, end, GDS_ID_CPMSU); - if (vec) { - vec_data = (gds_vector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval = eval_cpmsu (vec_data, vec_end); - } - return retval; -} - -static int -eval_evbuf (gds_vector_t * start, void *end) -{ - gds_vector_t *vec; - gds_vector_t *vec_data; - void *vec_end; - int retval = 0; - - vec = find_gds_vector (start, end, GDS_ID_MDSMU); - if (vec) { - vec_data = (gds_vector_t *) - (((unsigned long) vec) + sizeof (gds_vector_t)); - vec_end = (void *) (((unsigned long) vec) + vec->length); - retval = eval_mdsmu (vec_data, vec_end); - } - return retval; -} - -static inline int -eval_hwc_receive_mask (_hwcb_mask_t mask) -{ - - hwc_data.write_nonprio - = ((mask & ET_Msg_Mask) == ET_Msg_Mask); - - hwc_data.write_prio - = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask); - - if (hwc_data.write_prio || hwc_data.write_nonprio) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can write messages\n"); - return 0; - } else { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not write messages\n"); - return -1; - } -} - -static inline int -eval_hwc_send_mask (_hwcb_mask_t mask) -{ - - hwc_data.read_statechange - = ((mask & ET_StateChange_Mask) == ET_StateChange_Mask); - if (hwc_data.read_statechange) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read state change notifications\n"); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not read state change notifications\n"); - - hwc_data.sig_quiesce - = ((mask & ET_SigQuiesce_Mask) == ET_SigQuiesce_Mask); - if (hwc_data.sig_quiesce) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can receive signal quiesce\n"); - else - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not receive signal quiesce\n"); - - hwc_data.read_nonprio - = ((mask & ET_OpCmd_Mask) == ET_OpCmd_Mask); - if (hwc_data.read_nonprio) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read commands\n"); - - hwc_data.read_prio - = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask); - if (hwc_data.read_prio) - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can read priority commands\n"); - - if (hwc_data.read_prio || hwc_data.read_nonprio) { - return 0; - } else { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "can not read commands from operator\n"); - return -1; - } -} - -static int -eval_statechangebuf (statechangebuf_t * scbuf) -{ - int retval = 0; - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "HWC state change detected\n"); - - if (scbuf->validity_hwc_active_facility_mask) { - - } - if (scbuf->validity_hwc_receive_mask) { - - if (scbuf->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe50\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (scbuf) - : "1", "2"); -#endif - } else { - - retval += eval_hwc_receive_mask - (scbuf->hwc_receive_mask); - } - } - if (scbuf->validity_hwc_send_mask) { - - if (scbuf->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe51\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (scbuf) - : "1", "2"); -#endif - } else { - - retval += eval_hwc_send_mask - (scbuf->hwc_send_mask); - } - } - if (scbuf->validity_read_data_function_mask) { - - } - return retval; -} - -#ifdef CONFIG_SMP -extern unsigned long cpu_online_map; -static volatile unsigned long cpu_quiesce_map; - -static void -do_load_quiesce_psw (void) -{ - psw_t quiesce_psw; - - clear_bit (smp_processor_id (), &cpu_quiesce_map); - if (smp_processor_id () == 0) { - - while (cpu_quiesce_map != 0) ; - - quiesce_psw.mask = _DW_PSW_MASK; - quiesce_psw.addr = 0xfff; - __load_psw (quiesce_psw); - } - signal_processor (smp_processor_id (), sigp_stop); -} - -static void -do_machine_quiesce (void) -{ - cpu_quiesce_map = cpu_online_map; - smp_call_function (do_load_quiesce_psw, NULL, 0, 0); - do_load_quiesce_psw (); -} - -#else -static void -do_machine_quiesce (void) -{ - psw_t quiesce_psw; - - quiesce_psw.mask = _DW_PSW_MASK; - queisce_psw.addr = 0xfff; - __load_psw (quiesce_psw); -} - -#endif - -static int -process_evbufs (void *start, void *end) -{ - int retval = 0; - evbuf_t *evbuf; - void *evbuf_end; - gds_vector_t *evbuf_data; - - evbuf = (evbuf_t *) start; - while (((void *) evbuf) < end) { - evbuf_data = (gds_vector_t *) - (((unsigned long) evbuf) + sizeof (evbuf_t)); - evbuf_end = (void *) (((unsigned long) evbuf) + evbuf->length); - switch (evbuf->type) { - case ET_OpCmd: - case ET_CntlProgOpCmd: - case ET_PMsgCmd: -#ifdef DUMP_HWCB_INPUT - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "event buffer " - "at 0x%x up to 0x%x, length: %d\n", - (unsigned long) evbuf, - (unsigned long) (evbuf_end - 1), - evbuf->length); - dump_storage_area ((void *) evbuf, evbuf->length); -#endif - retval += eval_evbuf (evbuf_data, evbuf_end); - break; - case ET_StateChange: - retval += eval_statechangebuf - ((statechangebuf_t *) evbuf); - break; - case ET_SigQuiesce: - - _machine_restart = do_machine_quiesce; - _machine_halt = do_machine_quiesce; - _machine_power_off = do_machine_quiesce; - ctrl_alt_del (); - break; - default: - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "unknown event buffer found, " - "type 0x%x", - evbuf->type); - retval = -ENOSYS; - } - evbuf = (evbuf_t *) evbuf_end; - } - return retval; -} - -static int -unconditional_read_1 (void) -{ - unsigned short int condition_code; - read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page; - int retval; - -#if 0 - - if ((!hwc_data.read_prio) && (!hwc_data.read_nonprio)) - return -EOPNOTSUPP; - - if (hwc_data.current_servc) - return -EBUSY; -#endif - - memset (hwcb, 0x00, PAGE_SIZE); - memcpy (hwcb, &read_hwcb_template, sizeof (read_hwcb_t)); - - condition_code = service_call (HWC_CMDW_READDATA, hwc_data.page); - -#ifdef DUMP_HWC_READ_ERROR - if (condition_code == HWC_NOT_OPERATIONAL) - __asm__ ("LHI 1,0xe40\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0 \n\t" - : - : "a" (&condition_code), "a" (hwc_data.page) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_READDATA; - hwc_data.current_hwcb = hwc_data.page; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - default: - retval = -EIO; - } - - return retval; -} - -static int -unconditional_read_2 (u32 ext_int_param) -{ - read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page; - -#ifdef DUMP_HWC_READ_ERROR - if ((hwcb->response_code != 0x0020) && - (hwcb->response_code != 0x0220) && - (hwcb->response_code != 0x60F0) && - (hwcb->response_code != 0x62F0)) - __asm__ ("LHI 1,0xe41\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (hwc_data.page), "a" (&(hwcb->response_code)) - : "1", "2", "3"); -#endif - - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - switch (hwcb->response_code) { - - case 0x0020: - case 0x0220: - return process_evbufs ( - (void *) (((unsigned long) hwcb) + sizeof (read_hwcb_t)), - (void *) (((unsigned long) hwcb) + hwcb->length)); - - case 0x60F0: - case 0x62F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "got interrupt and tried to read input, " - "but nothing found (response code=0x%x).\n", - hwcb->response_code); - return 0; - - case 0x0100: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: HWCB boundary violation - this " - "must not occur in a correct driver, please contact " - "author\n"); - return -EIO; - - case 0x0300: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "insufficient HWCB length - this must not occur in a " - "correct driver, please contact author\n"); - return -EIO; - - case 0x01F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: " - "invalid command - this must not occur in a correct " - "driver, please contact author\n"); - return -EIO; - - case 0x40F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid function code\n"); - return -EIO; - - case 0x70F0: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid selection mask\n"); - return -EIO; - - case 0x0040: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: HWC equipment check\n"); - return -EIO; - - default: - internal_print ( - IMMEDIATE_WRITE, - HWC_RW_PRINT_HEADER - "unconditional read: invalid response code %x - this " - "must not occur in a correct driver, please contact " - "author\n", - hwcb->response_code); - return -EIO; - } -} - -static int -write_event_mask_1 (void) -{ - unsigned int condition_code; - int retval; - - condition_code = service_call (HWC_CMDW_WRITEMASK, hwc_data.page); - -#ifdef DUMP_HWC_INIT_ERROR - - if (condition_code == HWC_NOT_OPERATIONAL) - __asm__ ("LHI 1,0xe10\n\t" - "L 2,0(%0)\n\t" - "LRA 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (&condition_code), "a" (hwc_data.page) - : "1", "2", "3"); -#endif - - switch (condition_code) { - case HWC_COMMAND_INITIATED: - hwc_data.current_servc = HWC_CMDW_WRITEMASK; - hwc_data.current_hwcb = hwc_data.page; - retval = condition_code; - break; - case HWC_BUSY: - retval = -EBUSY; - break; - default: - retval = -EIO; - } - - return retval; -} - -static int -write_event_mask_2 (u32 ext_int_param) -{ - init_hwcb_t *hwcb = (init_hwcb_t *) hwc_data.page; - int retval = 0; - - if (hwcb->response_code != 0x0020) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe11\n\t" - "LRA 2,0(%0)\n\t" - "L 3,0(%1)\n\t" - "J .+0\n\t" - : - : "a" (hwcb), "a" (&(hwcb->response_code)) - : "1", "2", "3"); -#else - retval = -1; -#endif - } else { - if (hwcb->mask_length != 4) { -#ifdef DUMP_HWC_INIT_ERROR - __asm__ ("LHI 1,0xe52\n\t" - "LRA 2,0(%0)\n\t" - "J .+0 \n\t" - : - : "a" (hwcb) - : "1", "2"); -#endif - } else { - retval += eval_hwc_receive_mask - (hwcb->hwc_receive_mask); - retval += eval_hwc_send_mask (hwcb->hwc_send_mask); - } - } - - hwc_data.current_servc = 0; - hwc_data.current_hwcb = NULL; - - return retval; -} - -static int -set_hwc_ioctls (hwc_ioctls_t * ioctls, char correct) -{ - int retval = 0; - hwc_ioctls_t tmp; - - if (ioctls->width_htab > MAX_MESSAGE_SIZE) { - if (correct) - tmp.width_htab = MAX_MESSAGE_SIZE; - else - retval = -EINVAL; - } else - tmp.width_htab = ioctls->width_htab; - - tmp.echo = ioctls->echo; - - if (ioctls->columns > MAX_MESSAGE_SIZE) { - if (correct) - tmp.columns = MAX_MESSAGE_SIZE; - else - retval = -EINVAL; - } else - tmp.columns = ioctls->columns; - - tmp.final_nl = ioctls->final_nl; - - if (ioctls->max_hwcb < 2) { - if (correct) - tmp.max_hwcb = 2; - else - retval = -EINVAL; - } else - tmp.max_hwcb = ioctls->max_hwcb; - - tmp.tolower = ioctls->tolower; - - if (ioctls->kmem_hwcb > ioctls->max_hwcb) { - if (correct) - tmp.kmem_hwcb = ioctls->max_hwcb; - else - retval = -EINVAL; - } else - tmp.kmem_hwcb = ioctls->kmem_hwcb; - - if (ioctls->kmem_hwcb > MAX_KMEM_PAGES) { - if (correct) - ioctls->kmem_hwcb = MAX_KMEM_PAGES; - else - retval = -EINVAL; - } - if (ioctls->kmem_hwcb < 2) { - if (correct) - ioctls->kmem_hwcb = 2; - else - retval = -EINVAL; - } - tmp.delim = ioctls->delim; - - if (!(retval < 0)) - hwc_data.ioctls = tmp; - - return retval; -} - -int -do_hwc_init (void) -{ - int retval; - - memcpy (hwc_data.page, &init_hwcb_template, sizeof (init_hwcb_t)); - - do { - - retval = write_event_mask_1 (); - - if (retval == -EBUSY) { - - hwc_data.flags |= HWC_INIT; - - __ctl_store (cr0, 0, 0); - cr0_save = cr0; - cr0 |= 0x00000200; - cr0 &= 0xFFFFF3AC; - __ctl_load (cr0, 0, 0); - - asm volatile ("STOSM %0,0x01" - :"=m" (psw_mask)::"memory"); - - while (!(hwc_data.flags & HWC_INTERRUPT)) - barrier (); - - asm volatile ("STNSM %0,0xFE" - :"=m" (psw_mask)::"memory"); - - __ctl_load (cr0_save, 0, 0); - - hwc_data.flags &= ~HWC_INIT; - } - } while (retval == -EBUSY); - - if (retval == -EIO) { - hwc_data.flags |= HWC_BROKEN; - printk (HWC_RW_PRINT_HEADER "HWC not operational\n"); - } - return retval; -} - -void hwc_interrupt_handler (struct pt_regs *regs, __u16 code); - -int -hwc_init (void) -{ - int retval; - -#ifdef BUFFER_STRESS_TEST - - init_hwcb_t *hwcb; - int i; - -#endif - - if (register_early_external_interrupt (0x2401, hwc_interrupt_handler, - &ext_int_info_hwc) != 0) - panic ("Couldn't request external interrupts 0x2401"); - - spin_lock_init (&hwc_data.lock); - -#ifdef USE_VM_DETECTION - - if (MACHINE_IS_VM) { - - if (hwc_data.init_ioctls.columns > 76) - hwc_data.init_ioctls.columns = 76; - hwc_data.init_ioctls.tolower = 1; - if (!hwc_data.init_ioctls.delim) - hwc_data.init_ioctls.delim = DEFAULT_CASE_DELIMITER; - } else { - hwc_data.init_ioctls.tolower = 0; - hwc_data.init_ioctls.delim = 0; - } -#endif - retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1); - - hwc_data.kmem_start = (unsigned long) - alloc_bootmem_low_pages (hwc_data.ioctls.kmem_hwcb * PAGE_SIZE); - hwc_data.kmem_end = hwc_data.kmem_start + - hwc_data.ioctls.kmem_hwcb * PAGE_SIZE - 1; - - retval = do_hwc_init (); - - ctl_set_bit (0, 9); - -#ifdef BUFFER_STRESS_TEST - - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "use %i bytes for buffering.\n", - hwc_data.ioctls.kmem_hwcb * PAGE_SIZE); - for (i = 0; i < 500; i++) { - hwcb = (init_hwcb_t *) BUF_HWCB; - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "This is stress test message #%i, free: %i bytes\n", - i, - MAX_HWCB_ROOM - (hwcb->length + sizeof (mto_t))); - } - -#endif - - return /*retval */ 0; -} - -signed int -hwc_register_calls (hwc_high_level_calls_t * calls) -{ - if (calls == NULL) - return -EINVAL; - - if (hwc_data.calls != NULL) - return -EBUSY; - - hwc_data.calls = calls; - return 0; -} - -signed int -hwc_unregister_calls (hwc_high_level_calls_t * calls) -{ - if (hwc_data.calls == NULL) - return -EINVAL; - - if (calls != hwc_data.calls) - return -EINVAL; - - hwc_data.calls = NULL; - return 0; -} - -int -hwc_send (hwc_request_t * req) -{ - unsigned long flags; - int retval; - int cc; - - spin_lock_irqsave (&hwc_data.lock, flags); - if (!req || !req->callback || !req->block) { - retval = -EINVAL; - goto unlock; - } - if (hwc_data.request) { - retval = -ENOTSUPP; - goto unlock; - } - cc = service_call (req->word, req->block); - switch (cc) { - case 0: - hwc_data.request = req; - hwc_data.current_servc = req->word; - hwc_data.current_hwcb = req->block; - retval = 0; - break; - case 2: - retval = -EBUSY; - break; - default: - retval = -ENOSYS; - - } - unlock: - spin_unlock_irqrestore (&hwc_data.lock, flags); - return retval; -} - -EXPORT_SYMBOL (hwc_send); - -void -do_hwc_callback (u32 ext_int_param) -{ - if (!hwc_data.request || !hwc_data.request->callback) - return; - if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR) - != (unsigned long) hwc_data.request->block) - return; - hwc_data.request->callback (hwc_data.request); - hwc_data.request = NULL; - hwc_data.current_hwcb = NULL; - hwc_data.current_servc = 0; -} - -void -hwc_do_interrupt (u32 ext_int_param) -{ - u32 finished_hwcb = ext_int_param & HWC_EXT_INT_PARAM_ADDR; - u32 evbuf_pending = ext_int_param & HWC_EXT_INT_PARAM_PEND; - - if (hwc_data.flags & HWC_PTIMER_RUNS) { - del_timer (&hwc_data.poll_timer); - hwc_data.flags &= ~HWC_PTIMER_RUNS; - } - if (finished_hwcb) { - - if ((unsigned long) hwc_data.current_hwcb != finished_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "interrupt: mismatch: " - "ext. int param. (0x%x) vs. " - "current HWCB (0x%x)\n", - ext_int_param, - hwc_data.current_hwcb); - } else { - if (hwc_data.request) { - - do_hwc_callback (ext_int_param); - } else { - - switch (hwc_data.current_servc) { - - case HWC_CMDW_WRITEMASK: - - write_event_mask_2 (ext_int_param); - break; - - case HWC_CMDW_WRITEDATA: - - write_event_data_2 (ext_int_param); - break; - - case HWC_CMDW_READDATA: - - unconditional_read_2 (ext_int_param); - break; - default: - } - } - } - } else { - - if (hwc_data.current_hwcb) { - internal_print ( - DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "interrupt: mismatch: " - "ext. int. param. (0x%x) vs. " - "current HWCB (0x%x)\n", - ext_int_param, - hwc_data.current_hwcb); - } - } - - if (evbuf_pending) { - - unconditional_read_1 (); - } else { - - write_event_data_1 (); - } - - if (!hwc_data.calls || !hwc_data.calls->wake_up) - return; - (hwc_data.calls->wake_up) (); -} - -void -hwc_interrupt_handler (struct pt_regs *regs, __u16 code) -{ - int cpu = smp_processor_id (); - - u32 ext_int_param = hwc_ext_int_param (); - - irq_enter (cpu, 0x2401); - - if (hwc_data.flags & HWC_INIT) { - - hwc_data.flags |= HWC_INTERRUPT; - } else if (hwc_data.flags & HWC_BROKEN) { - - if (!do_hwc_init ()) { - hwc_data.flags &= ~HWC_BROKEN; - internal_print (DELAYED_WRITE, - HWC_RW_PRINT_HEADER - "delayed HWC setup after" - " temporary breakdown" - " (ext. int. parameter=0x%x)\n", - ext_int_param); - } - } else { - spin_lock (&hwc_data.lock); - hwc_do_interrupt (ext_int_param); - spin_unlock (&hwc_data.lock); - } - irq_exit (cpu, 0x2401); -} - -void -hwc_unblank (void) -{ - - spin_lock (&hwc_data.lock); - spin_unlock (&hwc_data.lock); - - __ctl_store (cr0, 0, 0); - cr0_save = cr0; - cr0 |= 0x00000200; - cr0 &= 0xFFFFF3AC; - __ctl_load (cr0, 0, 0); - - asm volatile ("STOSM %0,0x01":"=m" (psw_mask)::"memory"); - - while (ALL_HWCB_CHAR) - barrier (); - - asm volatile ("STNSM %0,0xFE":"=m" (psw_mask)::"memory"); - - __ctl_load (cr0_save, 0, 0); -} - -int -hwc_ioctl (unsigned int cmd, unsigned long arg) -{ - hwc_ioctls_t tmp = hwc_data.ioctls; - int retval = 0; - unsigned long flags; - unsigned int obuf; - - spin_lock_irqsave (&hwc_data.lock, flags); - - switch (cmd) { - - case TIOCHWCSHTAB: - if (get_user (tmp.width_htab, (ioctl_htab_t *) arg)) - goto fault; - break; - - case TIOCHWCSECHO: - if (get_user (tmp.echo, (ioctl_echo_t *) arg)) - goto fault; - break; - - case TIOCHWCSCOLS: - if (get_user (tmp.columns, (ioctl_cols_t *) arg)) - goto fault; - break; - - case TIOCHWCSNL: - if (get_user (tmp.final_nl, (ioctl_nl_t *) arg)) - goto fault; - break; - - case TIOCHWCSOBUF: - if (get_user (obuf, (unsigned int *) arg)) - goto fault; - if (obuf & 0xFFF) - tmp.max_hwcb = (((obuf | 0xFFF) + 1) >> 12); - else - tmp.max_hwcb = (obuf >> 12); - break; - - case TIOCHWCSCASE: - if (get_user (tmp.tolower, (ioctl_case_t *) arg)) - goto fault; - break; - - case TIOCHWCSDELIM: - if (get_user (tmp.delim, (ioctl_delim_t *) arg)) - goto fault; - break; - - case TIOCHWCSINIT: - retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1); - break; - - case TIOCHWCGHTAB: - if (put_user (tmp.width_htab, (ioctl_htab_t *) arg)) - goto fault; - break; - - case TIOCHWCGECHO: - if (put_user (tmp.echo, (ioctl_echo_t *) arg)) - goto fault; - break; - - case TIOCHWCGCOLS: - if (put_user (tmp.columns, (ioctl_cols_t *) arg)) - goto fault; - break; - - case TIOCHWCGNL: - if (put_user (tmp.final_nl, (ioctl_nl_t *) arg)) - goto fault; - break; - - case TIOCHWCGOBUF: - if (put_user (tmp.max_hwcb, (ioctl_obuf_t *) arg)) - goto fault; - break; - - case TIOCHWCGKBUF: - if (put_user (tmp.kmem_hwcb, (ioctl_obuf_t *) arg)) - goto fault; - break; - - case TIOCHWCGCASE: - if (put_user (tmp.tolower, (ioctl_case_t *) arg)) - goto fault; - break; - - case TIOCHWCGDELIM: - if (put_user (tmp.delim, (ioctl_delim_t *) arg)) - goto fault; - break; -#if 0 - - case TIOCHWCGINIT: - if (put_user (&hwc_data.init_ioctls, (hwc_ioctls_t *) arg)) - goto fault; - break; - - case TIOCHWCGCURR: - if (put_user (&hwc_data.ioctls, (hwc_ioctls_t *) arg)) - goto fault; - break; -#endif - - default: - goto noioctlcmd; - } - - if (_IOC_DIR (cmd) == _IOC_WRITE) - retval = set_hwc_ioctls (&tmp, 0); - - goto out; - - fault: - retval = -EFAULT; - goto out; - noioctlcmd: - retval = -ENOIOCTLCMD; - out: - spin_unlock_irqrestore (&hwc_data.lock, flags); - return retval; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc_rw.h linux.22-ac2/drivers/s390/char/hwc_rw.h --- linux.vanilla/drivers/s390/char/hwc_rw.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/hwc_rw.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,132 +0,0 @@ -/* - * drivers/s390/char/hwc_rw.h - * interface to the HWC-read/write driver - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - */ - -#ifndef __HWC_RW_H__ -#define __HWC_RW_H__ - -#include - -typedef struct { - - void (*move_input) (unsigned char *, unsigned int); - - void (*wake_up) (void); -} hwc_high_level_calls_t; - -struct _hwc_request; - -typedef void hwc_callback_t (struct _hwc_request *); - -typedef struct _hwc_request { - void *block; - u32 word; - hwc_callback_t *callback; - void *data; -} __attribute__ ((packed)) - -hwc_request_t; - -#define HWC_ASCEBC(x) ((MACHINE_IS_VM ? _ascebc[x] : _ascebc_500[x])) - -#define HWC_EBCASC_STR(s,c) ((MACHINE_IS_VM ? EBCASC(s,c) : EBCASC_500(s,c))) - -#define HWC_ASCEBC_STR(s,c) ((MACHINE_IS_VM ? ASCEBC(s,c) : ASCEBC_500(s,c))) - -#define IN_HWCB 1 -#define IN_WRITE_BUF 2 -#define IN_BUFS_TOTAL (IN_HWCB | IN_WRITE_BUF) - -typedef unsigned short int ioctl_htab_t; -typedef unsigned char ioctl_echo_t; -typedef unsigned short int ioctl_cols_t; -typedef signed char ioctl_nl_t; -typedef unsigned short int ioctl_obuf_t; -typedef unsigned char ioctl_case_t; -typedef unsigned char ioctl_delim_t; - -typedef struct { - ioctl_htab_t width_htab; - ioctl_echo_t echo; - ioctl_cols_t columns; - ioctl_nl_t final_nl; - ioctl_obuf_t max_hwcb; - ioctl_obuf_t kmem_hwcb; - ioctl_case_t tolower; - ioctl_delim_t delim; -} hwc_ioctls_t; - -static hwc_ioctls_t _hwc_ioctls; - -#define HWC_IOCTL_LETTER 'B' - -#define TIOCHWCSHTAB _IOW(HWC_IOCTL_LETTER, 0, _hwc_ioctls.width_htab) - -#define TIOCHWCSECHO _IOW(HWC_IOCTL_LETTER, 1, _hwc_ioctls.echo) - -#define TIOCHWCSCOLS _IOW(HWC_IOCTL_LETTER, 2, _hwc_ioctls.columns) - -#define TIOCHWCSNL _IOW(HWC_IOCTL_LETTER, 4, _hwc_ioctls.final_nl) - -#define TIOCHWCSOBUF _IOW(HWC_IOCTL_LETTER, 5, _hwc_ioctls.max_hwcb) - -#define TIOCHWCSINIT _IO(HWC_IOCTL_LETTER, 6) - -#define TIOCHWCSCASE _IOW(HWC_IOCTL_LETTER, 7, _hwc_ioctls.tolower) - -#define TIOCHWCSDELIM _IOW(HWC_IOCTL_LETTER, 9, _hwc_ioctls.delim) - -#define TIOCHWCGHTAB _IOR(HWC_IOCTL_LETTER, 10, _hwc_ioctls.width_htab) - -#define TIOCHWCGECHO _IOR(HWC_IOCTL_LETTER, 11, _hwc_ioctls.echo) - -#define TIOCHWCGCOLS _IOR(HWC_IOCTL_LETTER, 12, _hwc_ioctls.columns) - -#define TIOCHWCGNL _IOR(HWC_IOCTL_LETTER, 14, _hwc_ioctls.final_nl) - -#define TIOCHWCGOBUF _IOR(HWC_IOCTL_LETTER, 15, _hwc_ioctls.max_hwcb) - -#define TIOCHWCGINIT _IOR(HWC_IOCTL_LETTER, 16, _hwc_ioctls) - -#define TIOCHWCGCASE _IOR(HWC_IOCTL_LETTER, 17, _hwc_ioctls.tolower) - -#define TIOCHWCGDELIM _IOR(HWC_IOCTL_LETTER, 19, _hwc_ioctls.delim) - -#define TIOCHWCGKBUF _IOR(HWC_IOCTL_LETTER, 20, _hwc_ioctls.max_hwcb) - -#define TIOCHWCGCURR _IOR(HWC_IOCTL_LETTER, 21, _hwc_ioctls) - -#ifndef __HWC_RW_C__ - -extern int hwc_init (void); - -extern int hwc_write (int from_user, const unsigned char *, unsigned int); - -extern unsigned int hwc_chars_in_buffer (unsigned char); - -extern unsigned int hwc_write_room (unsigned char); - -extern void hwc_flush_buffer (unsigned char); - -extern void hwc_unblank (void); - -extern signed int hwc_ioctl (unsigned int, unsigned long); - -extern void do_hwc_interrupt (void); - -extern int hwc_printk (const char *,...); - -extern signed int hwc_register_calls (hwc_high_level_calls_t *); - -extern signed int hwc_unregister_calls (hwc_high_level_calls_t *); - -extern int hwc_send (hwc_request_t *); - -#endif - -#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/hwc_tty.c linux.22-ac2/drivers/s390/char/hwc_tty.c --- linux.vanilla/drivers/s390/char/hwc_tty.c 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/hwc_tty.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,273 +0,0 @@ -/* - * drivers/s390/char/hwc_tty.c - * HWC line mode terminal driver. - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Peschke - * - * Thanks to Martin Schwidefsky. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "hwc_rw.h" -#include "ctrlchar.h" - -#define HWC_TTY_PRINT_HEADER "hwc tty driver: " - -#define HWC_TTY_BUF_SIZE 512 - -typedef struct { - - struct tty_struct *tty; - - unsigned char buf[HWC_TTY_BUF_SIZE]; - - unsigned short int buf_count; - - spinlock_t lock; - - hwc_high_level_calls_t calls; -} hwc_tty_data_struct; - -static hwc_tty_data_struct hwc_tty_data = -{ /* NULL/0 */ }; -static struct tty_driver hwc_tty_driver; -static struct tty_struct *hwc_tty_table[1]; -static struct termios *hwc_tty_termios[1]; -static struct termios *hwc_tty_termios_locked[1]; -static int hwc_tty_refcount = 0; - -extern struct termios tty_std_termios; - -void hwc_tty_wake_up (void); -void hwc_tty_input (unsigned char *, unsigned int); - -static int -hwc_tty_open (struct tty_struct *tty, - struct file *filp) -{ - - if (MINOR (tty->device) - tty->driver.minor_start) - return -ENODEV; - - tty->driver_data = &hwc_tty_data; - hwc_tty_data.buf_count = 0; - hwc_tty_data.tty = tty; - tty->low_latency = 0; - - hwc_tty_data.calls.wake_up = hwc_tty_wake_up; - hwc_tty_data.calls.move_input = hwc_tty_input; - hwc_register_calls (&(hwc_tty_data.calls)); - - return 0; -} - -static void -hwc_tty_close (struct tty_struct *tty, - struct file *filp) -{ - if (MINOR (tty->device) != tty->driver.minor_start) { - printk (KERN_WARNING HWC_TTY_PRINT_HEADER - "do not close hwc tty because of wrong device number"); - return; - } - if (tty->count > 1) - return; - - hwc_tty_data.tty = NULL; - - hwc_unregister_calls (&(hwc_tty_data.calls)); -} - -static int -hwc_tty_write_room (struct tty_struct *tty) -{ - int retval; - - retval = hwc_write_room (IN_BUFS_TOTAL); - return retval; -} - -static int -hwc_tty_write (struct tty_struct *tty, - int from_user, - const unsigned char *buf, - int count) -{ - int retval; - - if (hwc_tty_data.buf_count > 0) { - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - } - retval = hwc_write (from_user, buf, count); - return retval; -} - -static void -hwc_tty_put_char (struct tty_struct *tty, - unsigned char ch) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_tty_data.lock, flags); - if (hwc_tty_data.buf_count >= HWC_TTY_BUF_SIZE) { - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - } - hwc_tty_data.buf[hwc_tty_data.buf_count] = ch; - hwc_tty_data.buf_count++; - spin_unlock_irqrestore (&hwc_tty_data.lock, flags); -} - -static void -hwc_tty_flush_chars (struct tty_struct *tty) -{ - unsigned long flags; - - spin_lock_irqsave (&hwc_tty_data.lock, flags); - hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count); - hwc_tty_data.buf_count = 0; - spin_unlock_irqrestore (&hwc_tty_data.lock, flags); -} - -static int -hwc_tty_chars_in_buffer (struct tty_struct *tty) -{ - int retval; - - retval = hwc_chars_in_buffer (IN_BUFS_TOTAL); - return retval; -} - -static void -hwc_tty_flush_buffer (struct tty_struct *tty) -{ - hwc_tty_wake_up (); -} - -static int -hwc_tty_ioctl ( - struct tty_struct *tty, - struct file *file, - unsigned int cmd, - unsigned long arg) -{ - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - - return hwc_ioctl (cmd, arg); -} - -void -hwc_tty_wake_up (void) -{ - if (hwc_tty_data.tty == NULL) - return; - if ((hwc_tty_data.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && - hwc_tty_data.tty->ldisc.write_wakeup) - (hwc_tty_data.tty->ldisc.write_wakeup) (hwc_tty_data.tty); - wake_up_interruptible (&hwc_tty_data.tty->write_wait); -} - -void -hwc_tty_input (unsigned char *buf, unsigned int count) -{ - struct tty_struct *tty = hwc_tty_data.tty; - - if (tty != NULL) { - char *cchar; - if ((cchar = ctrlchar_handle (buf, count, tty))) { - if (cchar == (char *) -1) - return; - tty->flip.count++; - *tty->flip.flag_buf_ptr++ = TTY_NORMAL; - *tty->flip.char_buf_ptr++ = *cchar; - } else { - - memcpy (tty->flip.char_buf_ptr, buf, count); - if (count < 2 || ( - strncmp (buf + count - 2, "^n", 2) || - strncmp (buf + count - 2, "\0252n", 2))) { - tty->flip.char_buf_ptr[count] = '\n'; - count++; - } else - count -= 2; - memset (tty->flip.flag_buf_ptr, TTY_NORMAL, count); - tty->flip.char_buf_ptr += count; - tty->flip.flag_buf_ptr += count; - tty->flip.count += count; - } - tty_flip_buffer_push (tty); - hwc_tty_wake_up (); - } -} - -void -hwc_tty_init (void) -{ - if (!CONSOLE_IS_HWC) - return; - - ctrlchar_init (); - - memset (&hwc_tty_driver, 0, sizeof (struct tty_driver)); - memset (&hwc_tty_data, 0, sizeof (hwc_tty_data_struct)); - hwc_tty_driver.magic = TTY_DRIVER_MAGIC; - hwc_tty_driver.driver_name = "tty_hwc"; - hwc_tty_driver.name = "ttyS"; - hwc_tty_driver.name_base = 0; - hwc_tty_driver.major = TTY_MAJOR; - hwc_tty_driver.minor_start = 64; - hwc_tty_driver.num = 1; - hwc_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM; - hwc_tty_driver.subtype = SYSTEM_TYPE_TTY; - hwc_tty_driver.init_termios = tty_std_termios; - hwc_tty_driver.init_termios.c_iflag = IGNBRK | IGNPAR; - hwc_tty_driver.init_termios.c_oflag = ONLCR; - hwc_tty_driver.init_termios.c_lflag = ISIG | ECHO; - hwc_tty_driver.flags = TTY_DRIVER_REAL_RAW; - hwc_tty_driver.refcount = &hwc_tty_refcount; - - hwc_tty_driver.table = hwc_tty_table; - hwc_tty_driver.termios = hwc_tty_termios; - hwc_tty_driver.termios_locked = hwc_tty_termios_locked; - - hwc_tty_driver.open = hwc_tty_open; - hwc_tty_driver.close = hwc_tty_close; - hwc_tty_driver.write = hwc_tty_write; - hwc_tty_driver.put_char = hwc_tty_put_char; - hwc_tty_driver.flush_chars = hwc_tty_flush_chars; - hwc_tty_driver.write_room = hwc_tty_write_room; - hwc_tty_driver.chars_in_buffer = hwc_tty_chars_in_buffer; - hwc_tty_driver.flush_buffer = hwc_tty_flush_buffer; - hwc_tty_driver.ioctl = hwc_tty_ioctl; - - hwc_tty_driver.throttle = NULL; - hwc_tty_driver.unthrottle = NULL; - hwc_tty_driver.send_xchar = NULL; - hwc_tty_driver.set_termios = NULL; - hwc_tty_driver.set_ldisc = NULL; - hwc_tty_driver.stop = NULL; - hwc_tty_driver.start = NULL; - hwc_tty_driver.hangup = NULL; - hwc_tty_driver.break_ctl = NULL; - hwc_tty_driver.wait_until_sent = NULL; - hwc_tty_driver.read_proc = NULL; - hwc_tty_driver.write_proc = NULL; - - if (tty_register_driver (&hwc_tty_driver)) - panic ("Couldn't register hwc_tty driver\n"); -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/Makefile linux.22-ac2/drivers/s390/char/Makefile --- linux.vanilla/drivers/s390/char/Makefile 2001-10-11 17:43:29.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/Makefile 2003-06-29 16:10:26.000000000 +0100 @@ -4,31 +4,36 @@ O_TARGET := s390-char.o -list-multi := tub3270.o tape390.o -export-objs := hwc_rw.o +list-multi := tub3270.o \ + tape390.o + +export-objs := sclp.o \ + tape_core.o \ + tape_devmap.o \ + tape_std.o tub3270-objs := tuball.o tubfs.o tubtty.o \ tubttyaid.o tubttybld.o tubttyscl.o \ tubttyrcl.o tubttysiz.o -tape390-$(CONFIG_S390_TAPE_CHAR) += tapechar.o -tape390-$(CONFIG_S390_TAPE_BLOCK) += tapeblock.o -tape390-$(CONFIG_S390_TAPE_3480) += tape3480.o tape34xx.o -tape390-$(CONFIG_S390_TAPE_3490) += tape3490.o tape34xx.o -tape390-objs := tape.o $(sort $(tape390-y)) +tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o +tape-objs := tape_core.o tape_devmap.o tape_proc.o tape_std.o tape_char.o \ + $(sort $(tape-y)) +obj-$(CONFIG_S390_TAPE) += tape390.o +obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-y += ctrlchar.o obj-$(CONFIG_TN3215) += con3215.o -obj-$(CONFIG_HWC) += hwc_con.o hwc_rw.o hwc_tty.o -obj-$(CONFIG_HWC_CPI) += hwc_cpi.o +obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o +obj-$(CONFIG_SCLP_TTY) += sclp_tty.o +obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o +obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o obj-$(CONFIG_TN3270) += tub3270.o -obj-$(CONFIG_S390_TAPE) += tape390.o include $(TOPDIR)/Rules.make tub3270.o: $(tub3270-objs) $(LD) -r -o $@ $(tub3270-objs) -tape390.o: $(tape390-objs) - $(LD) -r -o $@ $(tape390-objs) - +tape390.o: $(tape-objs) + $(LD) -r -o $@ $(tape-objs) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp.c linux.22-ac2/drivers/s390/char/sclp.c --- linux.vanilla/drivers/s390/char/sclp.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,786 @@ +/* + * drivers/s390/char/sclp.c + * core function to access sclp interface + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" + +#define SCLP_CORE_PRINT_HEADER "sclp low level driver: " + +/* Structure for register_early_external_interrupt. */ +static ext_int_info_t ext_int_info_hwc; + +/* spinlock to protect global variables of sclp_core */ +static spinlock_t sclp_lock; + +/* Mask of valid sclp events */ +static sccb_mask_t sclp_receive_mask; +static sccb_mask_t sclp_send_mask; + +/* List of registered event types */ +static struct list_head sclp_reg_list; + +/* sccb queue */ +static struct list_head sclp_req_queue; + +/* sccb for unconditional read */ +static struct sclp_req sclp_read_req; +static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +/* sccb for write mask sccb */ +static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + +/* Timer for init mask retries. */ +static struct timer_list retry_timer; + +static volatile unsigned long sclp_status = 0; +/* some status flags */ +#define SCLP_INIT 0 +#define SCLP_RUNNING 1 +#define SCLP_READING 2 + +#define SCLP_INIT_POLL_INTERVAL 1 + +#define SCLP_COMMAND_INITIATED 0 +#define SCLP_BUSY 2 +#define SCLP_NOT_OPERATIONAL 3 + +/* + * assembler instruction for Service Call + */ +static int +__service_call(sclp_cmdw_t command, void *sccb) +{ + int cc; + + /* + * Mnemonic: SERVC Rx, Ry [RRE] + * + * Rx: SCLP command word + * Ry: address of SCCB + */ + __asm__ __volatile__( + " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ + " ipm %0\n" + " srl %0,28" + : "=&d" (cc) + : "d" (command), "a" (__pa(sccb)) + : "cc", "memory" ); + /* + * cc == 0: Service Call succesful initiated + * cc == 2: SCLP busy, new Service Call not initiated, + * new SCCB unchanged + * cc == 3: SCLP function not operational + */ + if (cc == SCLP_NOT_OPERATIONAL) + return -EIO; + /* + * We set the SCLP_RUNNING bit for cc 2 as well because if + * service_call returns cc 2 some old request is running + * that has to complete first + */ + set_bit(SCLP_RUNNING, &sclp_status); + if (cc == SCLP_BUSY) + return -EBUSY; + return 0; +} + +static int +sclp_start_request(void) +{ + struct sclp_req *req; + int rc; + unsigned long flags; + + /* quick exit if sclp is already in use */ + if (test_bit(SCLP_RUNNING, &sclp_status)) + return -EBUSY; + spin_lock_irqsave(&sclp_lock, flags); + /* Get first request on queue if available */ + req = NULL; + if (!list_empty(&sclp_req_queue)) + req = list_entry(sclp_req_queue.next, struct sclp_req, list); + if (req) { + rc = __service_call(req->command, req->sccb); + if (rc) { + req->status = SCLP_REQ_FAILED; + list_del(&req->list); + } else + req->status = SCLP_REQ_RUNNING; + } else + rc = -EINVAL; + spin_unlock_irqrestore(&sclp_lock, flags); + if (rc == -EIO && req->callback != NULL) + req->callback(req, req->callback_data); + return rc; +} + +static int +sclp_process_evbufs(struct sccb_header *sccb) +{ + int result; + unsigned long flags; + struct evbuf_header *evbuf; + struct list_head *l; + struct sclp_register *t; + + spin_lock_irqsave(&sclp_lock, flags); + evbuf = (struct evbuf_header *) (sccb + 1); + result = 0; + while ((addr_t) evbuf < (addr_t) sccb + sccb->length) { + /* check registered event */ + t = NULL; + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + if (t->receive_mask & (1 << (32 - evbuf->type))) { + if (t->receiver_fn != NULL) { + spin_unlock_irqrestore(&sclp_lock, + flags); + t->receiver_fn(evbuf); + spin_lock_irqsave(&sclp_lock, flags); + } + break; + } + else + t = NULL; + } + /* Check for unrequested event buffer */ + if (t == NULL) + result = -ENOSYS; + evbuf = (struct evbuf_header *) + ((addr_t) evbuf + evbuf->length); + } + spin_unlock_irqrestore(&sclp_lock, flags); + return result; +} + +char * +sclp_error_message(u16 rc) +{ + static struct { + u16 code; char *msg; + } sclp_errors[] = { + { 0x0000, "No response code stored (machine malfunction)" }, + { 0x0020, "Normal Completion" }, + { 0x0040, "SCLP equipment check" }, + { 0x0100, "SCCB boundary violation" }, + { 0x01f0, "Invalid command" }, + { 0x0220, "Normal Completion; suppressed buffers pending" }, + { 0x0300, "Insufficient SCCB length" }, + { 0x0340, "Contained SCLP equipment check" }, + { 0x05f0, "Target resource in improper state" }, + { 0x40f0, "Invalid function code/not installed" }, + { 0x60f0, "No buffers stored" }, + { 0x62f0, "No buffers stored; suppressed buffers pending" }, + { 0x70f0, "Invalid selection mask" }, + { 0x71f0, "Event buffer exceeds available space" }, + { 0x72f0, "Inconsistent lengths" }, + { 0x73f0, "Event buffer syntax error" } + }; + int i; + for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++) + if (rc == sclp_errors[i].code) + return sclp_errors[i].msg; + return "Invalid response code"; +} + +/* + * postprocessing of unconditional read service call + */ +static void +sclp_unconditional_read_cb(struct sclp_req *read_req, void *data) +{ + struct sccb_header *sccb; + + sccb = read_req->sccb; + if (sccb->response_code == 0x0020 || + sccb->response_code == 0x0220) { + if (sclp_process_evbufs(sccb) != 0) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "unconditional read: " + "unrequested event buffer received.\n"); + } + + if (sccb->response_code != 0x0020) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "unconditional read: %s (response code=0x%x).\n", + sclp_error_message(sccb->response_code), + sccb->response_code); + + clear_bit(SCLP_READING, &sclp_status); +} + +/* + * Function to queue Read Event Data/Unconditional Read + */ +static void +__sclp_unconditional_read(void) +{ + struct sccb_header *sccb; + struct sclp_req *read_req; + + /* + * Don't try to initiate Unconditional Read if we are not able to + * receive anything + */ + if (sclp_receive_mask == 0) + return; + /* Don't try reading if a read is already outstanding */ + if (test_and_set_bit(SCLP_READING, &sclp_status)) + return; + /* Initialize read sccb */ + sccb = (struct sccb_header *) sclp_read_sccb; + clear_page(sccb); + sccb->length = PAGE_SIZE; + sccb->function_code = 0; /* unconditional read */ + sccb->control_mask[2] = 0x80; /* variable length response */ + /* Initialize request structure */ + read_req = &sclp_read_req; + read_req->command = SCLP_CMDW_READDATA; + read_req->status = SCLP_REQ_QUEUED; + read_req->callback = sclp_unconditional_read_cb; + read_req->sccb = sccb; + /* Add read request to the head of queue */ + list_add(&read_req->list, &sclp_req_queue); +} + +/* Bit masks to interpret external interruption parameter contents. */ +#define EXT_INT_SCCB_MASK 0xfffffff8 +#define EXT_INT_STATECHANGE_PENDING 0x00000002 +#define EXT_INT_EVBUF_PENDING 0x00000001 + +/* + * Handler for service-signal external interruptions + */ +static void +sclp_interrupt_handler(struct pt_regs *regs, __u16 code) +{ + u32 ext_int_param, finished_sccb, evbuf_pending; + struct list_head *l; + struct sclp_req *req, *tmp; + int cpu; + + spin_lock(&sclp_lock); + /* + * Only process interrupt if sclp is initialized. + * This avoids strange effects for a pending request + * from before the last re-ipl. + */ + if (!test_bit(SCLP_INIT, &sclp_status)) { + /* Now clear the running bit */ + clear_bit(SCLP_RUNNING, &sclp_status); + spin_unlock(&sclp_lock); + return; + } + ext_int_param = S390_lowcore.ext_params; + finished_sccb = ext_int_param & EXT_INT_SCCB_MASK; + evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING | + EXT_INT_STATECHANGE_PENDING); + cpu = smp_processor_id(); + irq_enter(cpu, 0x2401); + req = NULL; + if (finished_sccb != 0U) { + list_for_each(l, &sclp_req_queue) { + tmp = list_entry(l, struct sclp_req, list); + if (finished_sccb == (u32)(addr_t) tmp->sccb) { + list_del(&tmp->list); + req = tmp; + break; + } + } + } + spin_unlock(&sclp_lock); + /* Perform callback */ + if (req != NULL) { + req->status = SCLP_REQ_DONE; + if (req->callback != NULL) + req->callback(req, req->callback_data); + } + spin_lock(&sclp_lock); + /* Head queue a read sccb if an event buffer is pending */ + if (evbuf_pending) + __sclp_unconditional_read(); + /* Now clear the running bit */ + clear_bit(SCLP_RUNNING, &sclp_status); + spin_unlock(&sclp_lock); + /* and start next request on the queue */ + sclp_start_request(); + irq_exit(cpu, 0x2401); +} + +/* + * Wait synchronously for external interrupt of sclp. We may not receive + * any other external interrupt, so we disable all other external interrupts + * in control register 0. + */ +void +sclp_sync_wait(void) +{ + unsigned long psw_mask; + unsigned long cr0, cr0_sync; + + /* + * save cr0 + * enable service signal external interruption (cr0.22) + * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31 + * don't touch any other bit in cr0 + */ + __ctl_store(cr0, 0, 0); + cr0_sync = cr0; + cr0_sync |= 0x00000200; + cr0_sync &= 0xFFFFF3AC; + __ctl_load(cr0_sync, 0, 0); + + /* enable external interruptions (PSW-mask.7) */ + asm volatile ("STOSM 0(%1),0x01" + : "=m" (psw_mask) : "a" (&psw_mask) : "memory"); + + /* wait until ISR signals receipt of interrupt */ + while (test_bit(SCLP_RUNNING, &sclp_status)) { + barrier(); + cpu_relax(); + } + + /* disable external interruptions */ + asm volatile ("SSM 0(%0)" + : : "a" (&psw_mask) : "memory"); + + /* restore cr0 */ + __ctl_load(cr0, 0, 0); +} + +/* + * Queue an SCLP request. Request will immediately be processed if queue is + * empty. + */ +void +sclp_add_request(struct sclp_req *req) +{ + unsigned long flags; + + if (!test_bit(SCLP_INIT, &sclp_status)) { + req->status = SCLP_REQ_FAILED; + if (req->callback != NULL) + req->callback(req, req->callback_data); + return; + } + spin_lock_irqsave(&sclp_lock, flags); + /* queue the request */ + req->status = SCLP_REQ_QUEUED; + list_add_tail(&req->list, &sclp_req_queue); + spin_unlock_irqrestore(&sclp_lock, flags); + /* try to start the first request on the queue */ + sclp_start_request(); +} + +/* state change notification */ +struct sclp_statechangebuf { + struct evbuf_header header; + u8 validity_sclp_active_facility_mask : 1; + u8 validity_sclp_receive_mask : 1; + u8 validity_sclp_send_mask : 1; + u8 validity_read_data_function_mask : 1; + u16 _zeros : 12; + u16 mask_length; + u64 sclp_active_facility_mask; + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; + u32 read_data_function_mask; +} __attribute__((packed)); + +static inline void +__sclp_notify_state_change(void) +{ + struct list_head *l; + struct sclp_register *t; + sccb_mask_t receive_mask, send_mask; + + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + receive_mask = t->receive_mask & sclp_receive_mask; + send_mask = t->send_mask & sclp_send_mask; + if (t->sclp_receive_mask != receive_mask || + t->sclp_send_mask != send_mask) { + t->sclp_receive_mask = receive_mask; + t->sclp_send_mask = send_mask; + if (t->state_change_fn != NULL) + t->state_change_fn(t); + } + } +} + +static void +sclp_state_change(struct evbuf_header *evbuf) +{ + unsigned long flags; + struct sclp_statechangebuf *scbuf; + + spin_lock_irqsave(&sclp_lock, flags); + scbuf = (struct sclp_statechangebuf *) evbuf; + + if (scbuf->validity_sclp_receive_mask) { + if (scbuf->mask_length != sizeof(sccb_mask_t)) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "state change event with mask length %i\n", + scbuf->mask_length); + else + /* set new receive mask */ + sclp_receive_mask = scbuf->sclp_receive_mask; + } + + if (scbuf->validity_sclp_send_mask) { + if (scbuf->mask_length != sizeof(sccb_mask_t)) + printk(KERN_WARNING SCLP_CORE_PRINT_HEADER + "state change event with mask length %i\n", + scbuf->mask_length); + else + /* set new send mask */ + sclp_send_mask = scbuf->sclp_send_mask; + } + + __sclp_notify_state_change(); + spin_unlock_irqrestore(&sclp_lock, flags); +} + +static struct sclp_register sclp_state_change_event = { + .receive_mask = EvTyp_StateChange_Mask, + .receiver_fn = sclp_state_change +}; + + +/* + * SCLP quiesce event handler + */ +#ifdef CONFIG_SMP +static volatile unsigned long cpu_quiesce_map; + +static void +do_load_quiesce_psw(void * __unused) +{ + psw_t quiesce_psw; + + clear_bit(smp_processor_id(), &cpu_quiesce_map); + if (smp_processor_id() == 0) { + /* Wait for all other cpus to enter do_load_quiesce_psw */ + while (cpu_quiesce_map != 0); + /* Quiesce the last cpu with the special psw */ + quiesce_psw.mask = _DW_PSW_MASK; + quiesce_psw.addr = 0xfff; + __load_psw(quiesce_psw); + } + signal_processor(smp_processor_id(), sigp_stop); +} + +static void +do_machine_quiesce(void) +{ + cpu_quiesce_map = cpu_online_map; + smp_call_function(do_load_quiesce_psw, NULL, 0, 0); + do_load_quiesce_psw(NULL); +} +#else +static void +do_machine_quiesce(void) +{ + psw_t quiesce_psw; + + quiesce_psw.mask = _DW_PSW_MASK; + quiesce_psw.addr = 0xfff; + __load_psw(quiesce_psw); +} +#endif + +extern void ctrl_alt_del(void); + +static void +sclp_quiesce(struct evbuf_header *evbuf) +{ + /* + * We got a "shutdown" request. + * Add a call to an appropriate "shutdown" routine here. This + * routine should set all PSWs to 'disabled-wait', 'stopped' + * or 'check-stopped' - except 1 PSW which needs to carry a + * special bit pattern called 'quiesce PSW'. + */ + _machine_restart = (void *) do_machine_quiesce; + _machine_halt = do_machine_quiesce; + _machine_power_off = do_machine_quiesce; + ctrl_alt_del(); +} + +static struct sclp_register sclp_quiesce_event = { + .receive_mask = EvTyp_SigQuiesce_Mask, + .receiver_fn = sclp_quiesce +}; + +/* initialisation of SCLP */ +struct init_sccb { + struct sccb_header header; + u16 _reserved; + u16 mask_length; + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + sccb_mask_t sclp_send_mask; + sccb_mask_t sclp_receive_mask; +} __attribute__((packed)); + +static void sclp_init_mask_retry(unsigned long); + +static int +sclp_init_mask(void) +{ + unsigned long flags; + struct init_sccb *sccb; + struct sclp_req *req; + struct list_head *l; + struct sclp_register *t; + int rc; + + sccb = (struct init_sccb *) sclp_init_sccb; + /* stick the request structure to the end of the init sccb page */ + req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1; + + /* SCLP setup concerning receiving and sending Event Buffers */ + req->command = SCLP_CMDW_WRITEMASK; + req->status = SCLP_REQ_QUEUED; + req->callback = NULL; + req->sccb = sccb; + /* setup sccb for writemask command */ + memset(sccb, 0, sizeof(struct init_sccb)); + sccb->header.length = sizeof(struct init_sccb); + sccb->mask_length = sizeof(sccb_mask_t); + /* copy in the sccb mask of the registered event types */ + spin_lock_irqsave(&sclp_lock, flags); + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + sccb->receive_mask |= t->receive_mask; + sccb->send_mask |= t->send_mask; + } + sccb->sclp_receive_mask = 0; + sccb->sclp_send_mask = 0; + if (test_bit(SCLP_INIT, &sclp_status)) { + /* add request to sclp queue */ + list_add_tail(&req->list, &sclp_req_queue); + spin_unlock_irqrestore(&sclp_lock, flags); + /* and start if SCLP is idle */ + sclp_start_request(); + /* now wait for completion */ + while (req->status != SCLP_REQ_DONE && + req->status != SCLP_REQ_FAILED) + sclp_sync_wait(); + spin_lock_irqsave(&sclp_lock, flags); + } else { + /* + * Special case for the very first write mask command. + * The interrupt handler is not removing request from + * the request queue and doesn't call callbacks yet + * because there might be an pending old interrupt + * after a Re-IPL. We have to receive and ignore it. + */ + do { + rc = __service_call(req->command, req->sccb); + spin_unlock_irqrestore(&sclp_lock, flags); + if (rc == -EIO) + return -ENOSYS; + sclp_sync_wait(); + spin_lock_irqsave(&sclp_lock, flags); + } while (rc == -EBUSY); + } + if (sccb->header.response_code != 0x0020) { + /* WRITEMASK failed - we cannot rely on receiving a state + change event, so initially, polling is the only alternative + for us to ever become operational. */ + if (!timer_pending(&retry_timer) || + !mod_timer(&retry_timer, + jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) { + retry_timer.function = sclp_init_mask_retry; + retry_timer.data = 0; + retry_timer.expires = jiffies + + SCLP_INIT_POLL_INTERVAL*HZ; + add_timer(&retry_timer); + } + } else { + sclp_receive_mask = sccb->sclp_receive_mask; + sclp_send_mask = sccb->sclp_send_mask; + __sclp_notify_state_change(); + } + spin_unlock_irqrestore(&sclp_lock, flags); + return 0; +} + +static void +sclp_init_mask_retry(unsigned long data) +{ + sclp_init_mask(); +} + +/* + * sclp setup function. Called early (no kmalloc!) from sclp_console_init(). + */ +static int +sclp_init(void) +{ + int rc; + + if (test_bit(SCLP_INIT, &sclp_status)) + /* Already initialized. */ + return 0; + + spin_lock_init(&sclp_lock); + INIT_LIST_HEAD(&sclp_req_queue); + + /* init event list */ + INIT_LIST_HEAD(&sclp_reg_list); + list_add(&sclp_state_change_event.list, &sclp_reg_list); + list_add(&sclp_quiesce_event.list, &sclp_reg_list); + + /* + * request the 0x2401 external interrupt + * The sclp driver is initialized early (before kmalloc works). We + * need to use register_early_external_interrupt. + */ + if (register_early_external_interrupt(0x2401, sclp_interrupt_handler, + &ext_int_info_hwc) != 0) + return -EBUSY; + + /* enable service-signal external interruptions, + * Control Register 0 bit 22 := 1 + * (besides PSW bit 7 must be set to 1 sometimes for external + * interruptions) + */ + ctl_set_bit(0, 9); + + init_timer(&retry_timer); + /* do the initial write event mask */ + rc = sclp_init_mask(); + if (rc == 0) { + /* Ok, now everything is setup right. */ + set_bit(SCLP_INIT, &sclp_status); + return 0; + } + + /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */ + ctl_clear_bit(0,9); + unregister_early_external_interrupt(0x2401, sclp_interrupt_handler, + &ext_int_info_hwc); + + return rc; +} + +/* + * Register the SCLP event listener identified by REG. Return 0 on success. + * Some error codes and their meaning: + * + * -ENODEV = SCLP interface is not supported on this machine + * -EBUSY = there is already a listener registered for the requested + * event type + * -EIO = SCLP interface is currently not operational + */ +int +sclp_register(struct sclp_register *reg) +{ + unsigned long flags; + struct list_head *l; + struct sclp_register *t; + + if (!MACHINE_HAS_SCLP) + return -ENODEV; + + if (!test_bit(SCLP_INIT, &sclp_status)) + sclp_init(); + spin_lock_irqsave(&sclp_lock, flags); + /* check already registered event masks for collisions */ + list_for_each(l, &sclp_reg_list) { + t = list_entry(l, struct sclp_register, list); + if (t->receive_mask & reg->receive_mask || + t->send_mask & reg->send_mask) { + spin_unlock_irqrestore(&sclp_lock, flags); + return -EBUSY; + } + } + /* + * set present mask to 0 to trigger state change + * callback in sclp_init_mask + */ + reg->sclp_receive_mask = 0; + reg->sclp_send_mask = 0; + list_add(®->list, &sclp_reg_list); + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_init_mask(); + return 0; +} + +/* + * Unregister the SCLP event listener identified by REG. + */ +void +sclp_unregister(struct sclp_register *reg) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_lock, flags); + list_del(®->list); + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_init_mask(); +} + +#define SCLP_EVBUF_PROCESSED 0x80 + +/* + * Traverse array of event buffers contained in SCCB and remove all buffers + * with a set "processed" flag. Return the number of unprocessed buffers. + */ +int +sclp_remove_processed(struct sccb_header *sccb) +{ + struct evbuf_header *evbuf; + int unprocessed; + u16 remaining; + + evbuf = (struct evbuf_header *) (sccb + 1); + unprocessed = 0; + remaining = sccb->length - sizeof(struct sccb_header); + while (remaining > 0) { + remaining -= evbuf->length; + if (evbuf->flags & SCLP_EVBUF_PROCESSED) { + sccb->length -= evbuf->length; + memcpy((void *) evbuf, + (void *) ((addr_t) evbuf + evbuf->length), + remaining); + } else { + unprocessed++; + evbuf = (struct evbuf_header *) + ((addr_t) evbuf + evbuf->length); + } + } + + return unprocessed; +} + +module_init(sclp_init); + +EXPORT_SYMBOL(sclp_add_request); +EXPORT_SYMBOL(sclp_sync_wait); +EXPORT_SYMBOL(sclp_register); +EXPORT_SYMBOL(sclp_unregister); +EXPORT_SYMBOL(sclp_error_message); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_con.c linux.22-ac2/drivers/s390/char/sclp_con.c --- linux.vanilla/drivers/s390/char/sclp_con.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_con.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,244 @@ +/* + * drivers/s390/char/sclp_con.c + * SCLP line mode console driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define SCLP_CON_PRINT_HEADER "sclp console driver: " + +#define sclp_console_major 4 /* TTYAUX_MAJOR */ +#define sclp_console_minor 64 +#define sclp_console_name "ttyS" + +/* Lock to guard over changes to global variables */ +static spinlock_t sclp_con_lock; +/* List of free pages that can be used for console output buffering */ +static struct list_head sclp_con_pages; +/* List of full struct sclp_buffer structures ready for output */ +static struct list_head sclp_con_outqueue; +/* Counter how many buffers are emitted (max 1) and how many */ +/* are on the output queue. */ +static int sclp_con_buffer_count; +/* Pointer to current console buffer */ +static struct sclp_buffer *sclp_conbuf; +/* Timer for delayed output of console messages */ +static struct timer_list sclp_con_timer; + +/* Output format for console messages */ +static unsigned short sclp_con_columns; +static unsigned short sclp_con_width_htab; + +static void +sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) +{ + unsigned long flags; + struct sclp_buffer *next; + void *page; + + /* Ignore return code - because console-writes aren't critical, + we do without a sophisticated error recovery mechanism. */ + page = sclp_unmake_buffer(buffer); + spin_lock_irqsave(&sclp_con_lock, flags); + /* Remove buffer from outqueue */ + list_del(&buffer->list); + sclp_con_buffer_count--; + list_add_tail((struct list_head *) page, &sclp_con_pages); + /* Check if there is a pending buffer on the out queue. */ + next = NULL; + if (!list_empty(&sclp_con_outqueue)) + next = list_entry(sclp_con_outqueue.next, + struct sclp_buffer, list); + spin_unlock_irqrestore(&sclp_con_lock, flags); + if (next != NULL) + sclp_emit_buffer(next, sclp_conbuf_callback); +} + +static inline void +sclp_conbuf_emit(void) +{ + struct sclp_buffer* buffer; + unsigned long flags; + int count; + + spin_lock_irqsave(&sclp_con_lock, flags); + buffer = sclp_conbuf; + sclp_conbuf = NULL; + if (buffer == NULL) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + return; + } + list_add_tail(&buffer->list, &sclp_con_outqueue); + count = sclp_con_buffer_count++; + spin_unlock_irqrestore(&sclp_con_lock, flags); + if (count == 0) + sclp_emit_buffer(buffer, sclp_conbuf_callback); +} + +/* + * When this routine is called from the timer then we flush the + * temporary write buffer without further waiting on a final new line. + */ +static void +sclp_console_timeout(unsigned long data) +{ + sclp_conbuf_emit(); +} + +/* + * Writes the given message to S390 system console + */ +static void +sclp_console_write(struct console *console, const char *message, + unsigned int count) +{ + unsigned long flags; + void *page; + int written; + + if (count == 0) + return; + spin_lock_irqsave(&sclp_con_lock, flags); + /* + * process escape characters, write message into buffer, + * send buffer to SCLP + */ + do { + /* make sure we have a console output buffer */ + if (sclp_conbuf == NULL) { + while (list_empty(&sclp_con_pages)) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_con_lock, flags); + } + page = sclp_con_pages.next; + list_del((struct list_head *) page); + sclp_conbuf = sclp_make_buffer(page, sclp_con_columns, + sclp_con_width_htab); + } + /* try to write the string to the current output buffer */ + written = sclp_write(sclp_conbuf, (const unsigned char *) + message, count, 0); + if (written == -EFAULT || written == count) + break; + /* + * Not all characters could be written to the current + * output buffer. Emit the buffer, create a new buffer + * and then output the rest of the string. + */ + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_conbuf_emit(); + spin_lock_irqsave(&sclp_con_lock, flags); + message += written; + count -= written; + } while (count > 0); + /* Setup timer to output current console buffer after 1/10 second */ + if (sclp_conbuf != NULL && !timer_pending(&sclp_con_timer)) { + init_timer(&sclp_con_timer); + sclp_con_timer.function = sclp_console_timeout; + sclp_con_timer.data = 0UL; + sclp_con_timer.expires = jiffies + HZ/10; + add_timer(&sclp_con_timer); + } + spin_unlock_irqrestore(&sclp_con_lock, flags); +} + +/* returns the device number of the SCLP console */ +static kdev_t +sclp_console_device(struct console *c) +{ + return mk_kdev(sclp_console_major, sclp_console_minor); +} + +/* + * This routine is called from panic when the kernel + * is going to give up. We have to make sure that all buffers + * will be flushed to the SCLP. + */ +static void +sclp_console_unblank(void) +{ + unsigned long flags; + + sclp_conbuf_emit(); + spin_lock_irqsave(&sclp_con_lock, flags); + if (timer_pending(&sclp_con_timer)) + del_timer(&sclp_con_timer); + while (sclp_con_buffer_count > 0) { + spin_unlock_irqrestore(&sclp_con_lock, flags); + sclp_sync_wait(); + spin_lock_irqsave(&sclp_con_lock, flags); + } + spin_unlock_irqrestore(&sclp_con_lock, flags); +} + +/* + * used to register the SCLP console to the kernel and to + * give printk necessary information + */ +static struct console sclp_console = +{ + .name = sclp_console_name, + .write = sclp_console_write, + .device = sclp_console_device, + .unblank = sclp_console_unblank, + .flags = CON_PRINTBUFFER, + .index = 0 /* ttyS0 */ +}; + +/* + * called by console_init() in drivers/char/tty_io.c at boot-time. + */ +void __init +sclp_console_init(void) +{ + void *page; + int i; + + if (!CONSOLE_IS_SCLP) + return; + if (sclp_rw_init() != 0) + return; + /* Allocate pages for output buffering */ + INIT_LIST_HEAD(&sclp_con_pages); + for (i = 0; i < MAX_CONSOLE_PAGES; i++) { + page = alloc_bootmem_low_pages(PAGE_SIZE); + if (page == NULL) + return; + list_add_tail((struct list_head *) page, &sclp_con_pages); + } + INIT_LIST_HEAD(&sclp_con_outqueue); + spin_lock_init(&sclp_con_lock); + sclp_con_buffer_count = 0; + sclp_conbuf = NULL; + init_timer(&sclp_con_timer); + + /* Set output format */ + if (MACHINE_IS_VM) + /* + * save 4 characters for the CPU number + * written at start of each line by VM/CP + */ + sclp_con_columns = 76; + else + sclp_con_columns = 80; + sclp_con_width_htab = 8; + + /* enable printk-access to this driver */ + register_console(&sclp_console); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_cpi.c linux.22-ac2/drivers/s390/char/sclp_cpi.c --- linux.vanilla/drivers/s390/char/sclp_cpi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_cpi.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,244 @@ +/* + * Author: Martin Peschke + * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation + * + * SCLP Control-Program Identification. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define CPI_LENGTH_SYSTEM_TYPE 8 +#define CPI_LENGTH_SYSTEM_NAME 8 +#define CPI_LENGTH_SYSPLEX_NAME 8 + +struct cpi_evbuf { + struct evbuf_header header; + u8 id_format; + u8 reserved0; + u8 system_type[CPI_LENGTH_SYSTEM_TYPE]; + u64 reserved1; + u8 system_name[CPI_LENGTH_SYSTEM_NAME]; + u64 reserved2; + u64 system_level; + u64 reserved3; + u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME]; + u8 reserved4[16]; +} __attribute__((packed)); + +struct cpi_sccb { + struct sccb_header header; + struct cpi_evbuf cpi_evbuf; +} __attribute__((packed)); + +/* Event type structure for write message and write priority message */ +static struct sclp_register sclp_cpi_event = +{ + .send_mask = EvTyp_CtlProgIdent_Mask +}; + +MODULE_AUTHOR( + "Martin Peschke, IBM Deutschland Entwicklung GmbH " + ""); + +MODULE_DESCRIPTION( + "identify this operating system instance to the S/390 " + "or zSeries hardware"); + +static char *system_name = NULL; +MODULE_PARM(system_name, "s"); +MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters"); + +static char *sysplex_name = NULL; +#ifdef ALLOW_SYSPLEX_NAME +MODULE_PARM(sysplex_name, "s"); +MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters"); +#endif + +/* use default value for this field (as well as for system level) */ +static char *system_type = "LINUX"; + +static int +cpi_check_parms(void) +{ + /* reject if no system type specified */ + if (!system_type) { + printk("cpi: bug: no system type specified\n"); + return -EINVAL; + } + + /* reject if system type larger than 8 characters */ + if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) { + printk("cpi: bug: system type has length of %li characters - " + "only %i characters supported\n", + strlen(system_type), CPI_LENGTH_SYSTEM_TYPE); + return -EINVAL; + } + + /* reject if no system name specified */ + if (!system_name) { + printk("cpi: no system name specified\n"); + return -EINVAL; + } + + /* reject if system name larger than 8 characters */ + if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) { + printk("cpi: system name has length of %li characters - " + "only %i characters supported\n", + strlen(system_name), CPI_LENGTH_SYSTEM_NAME); + return -EINVAL; + } + + /* reject if specified sysplex name larger than 8 characters */ + if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) { + printk("cpi: sysplex name has length of %li characters" + " - only %i characters supported\n", + strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME); + return -EINVAL; + } + return 0; +} + +static void +cpi_callback(struct sclp_req *req, void *data) +{ + struct semaphore *sem; + + sem = (struct semaphore *) data; + up(sem); +} + +static struct sclp_req * +cpi_prepare_req(void) +{ + struct sclp_req *req; + struct cpi_sccb *sccb; + struct cpi_evbuf *evb; + + req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL); + if (req == NULL) + return ERR_PTR(-ENOMEM); + sccb = (struct cpi_sccb *) get_free_page(GFP_KERNEL | GFP_DMA); + if (sccb == NULL) { + kfree(req); + return ERR_PTR(-ENOMEM); + } + memset(sccb, 0, sizeof(struct cpi_sccb)); + + /* setup SCCB for Control-Program Identification */ + sccb->header.length = sizeof(struct cpi_sccb); + sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf); + sccb->cpi_evbuf.header.type = 0x0B; + evb = &sccb->cpi_evbuf; + + /* set system type */ + memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE); + memcpy(evb->system_type, system_type, strlen(system_type)); + sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); + EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); + + /* set system name */ + memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME); + memcpy(evb->system_name, system_name, strlen(system_name)); + sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME); + EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME); + + /* set sytem level */ + evb->system_level = LINUX_VERSION_CODE; + + /* set sysplex name */ + if (sysplex_name) { + memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME); + memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name)); + sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); + EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); + } + + /* prepare request data structure presented to SCLP driver */ + req->command = SCLP_CMDW_WRITEDATA; + req->sccb = sccb; + req->status = SCLP_REQ_FILLED; + req->callback = cpi_callback; + return req; +} + +static void +cpi_free_req(struct sclp_req *req) +{ + free_page((unsigned long) req->sccb); + kfree(req); +} + +static int __init +cpi_module_init(void) +{ + struct semaphore sem; + struct sclp_req *req; + int rc; + + rc = cpi_check_parms(); + if (rc) + return rc; + + rc = sclp_register(&sclp_cpi_event); + if (rc) { + /* could not register sclp event. Die. */ + printk("cpi: could not register to hardware console.\n"); + return -EINVAL; + } + if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { + printk("cpi: no control program identification support\n"); + sclp_unregister(&sclp_cpi_event); + return -ENOTSUPP; + } + + req = cpi_prepare_req(); + if (IS_ERR(req)) { + printk("cpi: couldn't allocate request\n"); + sclp_unregister(&sclp_cpi_event); + return PTR_ERR(req); + } + + /* Prepare semaphore */ + sema_init(&sem, 0); + req->callback_data = &sem; + /* Add request to sclp queue */ + sclp_add_request(req); + /* make "insmod" sleep until callback arrives */ + down(&sem); + + rc = ((struct cpi_sccb *) req->sccb)->header.response_code; + if (rc != 0x0020) { + printk("cpi: failed with response code 0x%x\n", rc); + rc = -ECOMM; + } else + rc = 0; + + cpi_free_req(req); + sclp_unregister(&sclp_cpi_event); + + return rc; +} + + +static void __exit cpi_module_exit(void) +{ +} + + +/* declare driver module init/cleanup functions */ +module_init(cpi_module_init); +module_exit(cpi_module_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp.h linux.22-ac2/drivers/s390/char/sclp.h --- linux.vanilla/drivers/s390/char/sclp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp.h 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,157 @@ +/* + * drivers/s390/char/sclp.h + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_H__ +#define __SCLP_H__ + +#include +#include + +#include + +/* maximum number of pages concerning our own memory management */ +#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) +#define MAX_CONSOLE_PAGES 4 + +#define EvTyp_OpCmd 0x01 +#define EvTyp_Msg 0x02 +#define EvTyp_StateChange 0x08 +#define EvTyp_PMsgCmd 0x09 +#define EvTyp_CntlProgOpCmd 0x20 +#define EvTyp_CntlProgIdent 0x0B +#define EvTyp_SigQuiesce 0x1D +#define EvTyp_VT220Msg 0x1A + +#define EvTyp_OpCmd_Mask 0x80000000 +#define EvTyp_Msg_Mask 0x40000000 +#define EvTyp_StateChange_Mask 0x01000000 +#define EvTyp_PMsgCmd_Mask 0x00800000 +#define EvTyp_CtlProgOpCmd_Mask 0x00000001 +#define EvTyp_CtlProgIdent_Mask 0x00200000 +#define EvTyp_SigQuiesce_Mask 0x00000008 +#define EvTyp_VT220Msg_Mask 0x00000040 + +#define GnrlMsgFlgs_DOM 0x8000 +#define GnrlMsgFlgs_SndAlrm 0x4000 +#define GnrlMsgFlgs_HoldMsg 0x2000 + +#define LnTpFlgs_CntlText 0x8000 +#define LnTpFlgs_LabelText 0x4000 +#define LnTpFlgs_DataText 0x2000 +#define LnTpFlgs_EndText 0x1000 +#define LnTpFlgs_PromptText 0x0800 + +typedef unsigned int sclp_cmdw_t; + +#define SCLP_CMDW_READDATA 0x00770005 +#define SCLP_CMDW_WRITEDATA 0x00760005 +#define SCLP_CMDW_WRITEMASK 0x00780005 + +#define GDS_ID_MDSMU 0x1310 +#define GDS_ID_MDSRouteInfo 0x1311 +#define GDS_ID_AgUnWrkCorr 0x1549 +#define GDS_ID_SNACondReport 0x1532 +#define GDS_ID_CPMSU 0x1212 +#define GDS_ID_RoutTargInstr 0x154D +#define GDS_ID_OpReq 0x8070 +#define GDS_ID_TextCmd 0x1320 + +#define GDS_KEY_SelfDefTextMsg 0x31 + +typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ + +struct sccb_header { + u16 length; + u8 function_code; + u8 control_mask[3]; + u16 response_code; +} __attribute__((packed)); + +struct gds_subvector { + u8 length; + u8 key; +} __attribute__((packed)); + +struct gds_vector { + u16 length; + u16 gds_id; +} __attribute__((packed)); + +struct evbuf_header { + u16 length; + u8 type; + u8 flags; + u16 _reserved; +} __attribute__((packed)); + +struct sclp_req { + struct list_head list; /* list_head for request queueing. */ + sclp_cmdw_t command; /* sclp command to execute */ + void *sccb; /* pointer to the sccb to execute */ + char status; /* status of this request */ + /* Callback that is called after reaching final status. */ + void (*callback)(struct sclp_req *, void *data); + void *callback_data; +}; + +#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */ +#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */ +#define SCLP_REQ_RUNNING 0x02 /* request is currently running */ +#define SCLP_REQ_DONE 0x03 /* request is completed successfully */ +#define SCLP_REQ_FAILED 0x05 /* request is finally failed */ + +/* function pointers that a high level driver has to use for registration */ +/* of some routines it wants to be called from the low level driver */ +struct sclp_register { + struct list_head list; + /* event masks this user is registered for */ + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + /* actually present events */ + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; + /* called if event type availability changes */ + void (*state_change_fn)(struct sclp_register *); + /* called for events in cp_receive_mask/sclp_receive_mask */ + void (*receiver_fn)(struct evbuf_header *); +}; + +/* externals from sclp.c */ +void sclp_add_request(struct sclp_req *req); +void sclp_sync_wait(void); +int sclp_register(struct sclp_register *reg); +void sclp_unregister(struct sclp_register *reg); +char *sclp_error_message(u16 response_code); +int sclp_remove_processed(struct sccb_header *sccb); + +/* useful inlines */ + +/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ +/* translate single character from ASCII to EBCDIC */ +static inline unsigned char +sclp_ascebc(unsigned char ch) +{ + return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch]; +} + +/* translate string from EBCDIC to ASCII */ +static inline void +sclp_ebcasc_str(unsigned char *str, int nr) +{ + (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr); +} + +/* translate string from ASCII to EBCDIC */ +static inline void +sclp_ascebc_str(unsigned char *str, int nr) +{ + (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); +} + +#endif /* __SCLP_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_rw.c linux.22-ac2/drivers/s390/char/sclp_rw.c --- linux.vanilla/drivers/s390/char/sclp_rw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_rw.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,496 @@ +/* + * drivers/s390/char/sclp_rw.c + * driver: reading from and writing to system console on S/390 via SCLP + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" + +#define SCLP_RW_PRINT_HEADER "sclp low level driver: " + +/* + * The room for the SCCB (only for writing) is not equal to a pages size + * (as it is specified as the maximum size in the the SCLP ducumentation) + * because of the additional data structure described above. + */ +#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) + +/* Event type structure for write message and write priority message */ +static struct sclp_register sclp_rw_event = { + .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask +}; + +/* + * Setup a sclp write buffer. Gets a page as input (4K) and returns + * a pointer to a struct sclp_buffer structure that is located at the + * end of the input page. This reduces the buffer space by a few + * bytes but simplifies things. + */ +struct sclp_buffer * +sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) +{ + struct sclp_buffer *buffer; + struct write_sccb *sccb; + + sccb = (struct write_sccb *) page; + /* + * We keep the struct sclp_buffer structure at the end + * of the sccb page. + */ + buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1; + buffer->sccb = sccb; + buffer->retry_count = 0; + init_timer(&buffer->retry_timer); + buffer->mto_number = 0; + buffer->mto_char_sum = 0; + buffer->current_line = NULL; + buffer->current_length = 0; + buffer->columns = columns; + buffer->htab = htab; + + /* initialize sccb */ + memset(sccb, 0, sizeof(struct write_sccb)); + sccb->header.length = sizeof(struct write_sccb); + sccb->msg_buf.header.length = sizeof(struct msg_buf); + sccb->msg_buf.header.type = EvTyp_Msg; + sccb->msg_buf.mdb.header.length = sizeof(struct mdb); + sccb->msg_buf.mdb.header.type = 1; + sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ + sccb->msg_buf.mdb.header.revision_code = 1; + sccb->msg_buf.mdb.go.length = sizeof(struct go); + sccb->msg_buf.mdb.go.type = 1; + + return buffer; +} + +/* + * Return a pointer to the orignal page that has been used to create + * the buffer. + */ +void * +sclp_unmake_buffer(struct sclp_buffer *buffer) +{ + return buffer->sccb; +} + +/* + * Initialize a new Message Text Object (MTO) at the end of the provided buffer + * with enough room for max_len characters. Return 0 on success. + */ +static int +sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) +{ + struct write_sccb *sccb; + struct mto *mto; + int mto_size; + + /* max size of new Message Text Object including message text */ + mto_size = sizeof(struct mto) + max_len; + + /* check if current buffer sccb can contain the mto */ + sccb = buffer->sccb; + if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size) + return -ENOMEM; + + /* find address of new message text object */ + mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); + + /* + * fill the new Message-Text Object, + * starting behind the former last byte of the SCCB + */ + memset(mto, 0, sizeof(struct mto)); + mto->length = sizeof(struct mto); + mto->type = 4; /* message text object */ + mto->line_type_flags = LnTpFlgs_EndText; /* end text */ + + /* set pointer to first byte after struct mto. */ + buffer->current_line = (char *) (mto + 1); + buffer->current_length = 0; + + return 0; +} + +/* + * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of + * MTO, enclosing MDB, event buffer and SCCB. + */ +static void +sclp_finalize_mto(struct sclp_buffer *buffer) +{ + struct write_sccb *sccb; + struct mto *mto; + int str_len, mto_size; + + str_len = buffer->current_length; + buffer->current_line = NULL; + buffer->current_length = 0; + + /* real size of new Message Text Object including message text */ + mto_size = sizeof(struct mto) + str_len; + + /* find address of new message text object */ + sccb = buffer->sccb; + mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); + + /* set size of message text object */ + mto->length = mto_size; + + /* + * update values of sizes + * (SCCB, Event(Message) Buffer, Message Data Block) + */ + sccb->header.length += mto_size; + sccb->msg_buf.header.length += mto_size; + sccb->msg_buf.mdb.header.length += mto_size; + + /* + * count number of buffered messages (= number of Message Text + * Objects) and number of buffered characters + * for the SCCB currently used for buffering and at all + */ + buffer->mto_number++; + buffer->mto_char_sum += str_len; +} + +/* + * processing of a message including escape characters, + * returns number of characters written to the output sccb + * ("processed" means that is not guaranteed that the character have already + * been sent to the SCLP but that it will be done at least next time the SCLP + * is not busy) + */ +int +sclp_write(struct sclp_buffer *buffer, + const unsigned char *msg, int count, int from_user) +{ + int spaces, i_msg; + char ch; + int rc; + + /* + * parse msg for escape sequences (\t,\v ...) and put formated + * msg into an mto (created by sclp_initialize_mto). + * + * We have to do this work ourselfs because there is no support for + * these characters on the native machine and only partial support + * under VM (Why does VM interpret \n but the native machine doesn't ?) + * + * Depending on i/o-control setting the message is always written + * immediately or we wait for a final new line maybe coming with the + * next message. Besides we avoid a buffer overrun by writing its + * content. + * + * RESTRICTIONS: + * + * \r and \b work within one line because we are not able to modify + * previous output that have already been accepted by the SCLP. + * + * \t combined with following \r is not correctly represented because + * \t is expanded to some spaces but \r does not know about a + * previous \t and decreases the current position by one column. + * This is in order to a slim and quick implementation. + */ + for (i_msg = 0; i_msg < count; i_msg++) { + if (from_user) { + if (get_user(ch, msg + i_msg) != 0) + return -EFAULT; + } else + ch = msg[i_msg]; + + switch (ch) { + case '\n': /* new line, line feed (ASCII) */ + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, 0); + if (rc) + return i_msg; + } + sclp_finalize_mto(buffer); + break; + case '\a': /* bell, one for several times */ + /* set SCLP sound alarm bit in General Object */ + buffer->sccb->msg_buf.mdb.go.general_msg_flags |= + GnrlMsgFlgs_SndAlrm; + break; + case '\t': /* horizontal tabulator */ + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + } + /* "go to (next htab-boundary + 1, same line)" */ + do { + if (buffer->current_length >= buffer->columns) + break; + /* ok, add a blank */ + *buffer->current_line++ = 0x40; + buffer->current_length++; + } while (buffer->current_length % buffer->htab); + break; + case '\f': /* form feed */ + case '\v': /* vertical tabulator */ + /* "go to (actual column, actual line + 1)" */ + /* = new line, leading spaces */ + if (buffer->current_line != NULL) { + spaces = buffer->current_length; + sclp_finalize_mto(buffer); + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + memset(buffer->current_line, 0x40, spaces); + buffer->current_line += spaces; + buffer->current_length = spaces; + } else { + /* one an empty line this is the same as \n */ + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + sclp_finalize_mto(buffer); + } + break; + case '\b': /* backspace */ + /* "go to (actual column - 1, actual line)" */ + /* decrement counter indicating position, */ + /* do not remove last character */ + if (buffer->current_line != NULL && + buffer->current_length > 0) { + buffer->current_length--; + buffer->current_line--; + } + break; + case 0x00: /* end of string */ + /* transfer current line to SCCB */ + if (buffer->current_line != NULL) + sclp_finalize_mto(buffer); + /* skip the rest of the message including the 0 byte */ + i_msg = count; + break; + default: /* no escape character */ + /* do not output unprintable characters */ + if (!isprint(ch)) + break; + /* check if new mto needs to be created */ + if (buffer->current_line == NULL) { + rc = sclp_initialize_mto(buffer, + buffer->columns); + if (rc) + return i_msg; + } + *buffer->current_line++ = sclp_ascebc(ch); + buffer->current_length++; + break; + } + /* check if current mto is full */ + if (buffer->current_line != NULL && + buffer->current_length >= buffer->columns) + sclp_finalize_mto(buffer); + } + + /* return number of processed characters */ + return i_msg; +} + +/* + * Return the number of free bytes in the sccb + */ +int +sclp_buffer_space(struct sclp_buffer *buffer) +{ + int count; + + count = MAX_SCCB_ROOM - buffer->sccb->header.length; + if (buffer->current_line != NULL) + count -= sizeof(struct mto) + buffer->current_length; + return count; +} + +/* + * Return number of characters in buffer + */ +int +sclp_chars_in_buffer(struct sclp_buffer *buffer) +{ + int count; + + count = buffer->mto_char_sum; + if (buffer->current_line != NULL) + count += buffer->current_length; + return count; +} + +/* + * sets or provides some values that influence the drivers behaviour + */ +void +sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns) +{ + buffer->columns = columns; + if (buffer->current_line != NULL && + buffer->current_length > buffer->columns) + sclp_finalize_mto(buffer); +} + +void +sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab) +{ + buffer->htab = htab; +} + +/* + * called by sclp_console_init and/or sclp_tty_init + */ +int +sclp_rw_init(void) +{ + static int init_done = 0; + int rc; + + if (init_done) + return 0; + + rc = sclp_register(&sclp_rw_event); + if (rc == 0) + init_done = 1; + return rc; +} + +static void +sclp_buffer_retry(unsigned long data) +{ + struct sclp_buffer *buffer = (struct sclp_buffer *) data; + buffer->request.status = SCLP_REQ_FILLED; + buffer->sccb->header.response_code = 0x0000; + sclp_add_request(&buffer->request); +} + +#define SCLP_BUFFER_MAX_RETRY 5 +#define SCLP_BUFFER_RETRY_INTERVAL 2 + +/* + * second half of Write Event Data-function that has to be done after + * interruption indicating completion of Service Call. + */ +static void +sclp_writedata_callback(struct sclp_req *request, void *data) +{ + int rc; + struct sclp_buffer *buffer; + struct write_sccb *sccb; + + buffer = (struct sclp_buffer *) data; + sccb = buffer->sccb; + + if (request->status == SCLP_REQ_FAILED) { + if (buffer->callback != NULL) + buffer->callback(buffer, -EIO); + return; + } + /* check SCLP response code and choose suitable action */ + switch (sccb->header.response_code) { + case 0x0020 : + /* Normal completion, buffer processed, message(s) sent */ + rc = 0; + break; + + case 0x0340: /* Contained SCLP equipment check */ + if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) { + rc = -EIO; + break; + } + /* remove processed buffers and requeue rest */ + if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { + /* not all buffers were processed */ + sccb->header.response_code = 0x0000; + buffer->request.status = SCLP_REQ_FILLED; + sclp_add_request(request); + return; + } + rc = 0; + break; + + case 0x0040: /* SCLP equipment check */ + case 0x05f0: /* Target resource in improper state */ + if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) { + rc = -EIO; + break; + } + /* wait some time, then retry request */ + buffer->retry_timer.function = sclp_buffer_retry; + buffer->retry_timer.data = (unsigned long) buffer; + buffer->retry_timer.expires = jiffies + + SCLP_BUFFER_RETRY_INTERVAL*HZ; + add_timer(&buffer->retry_timer); + return; + + default: + if (sccb->header.response_code == 0x71f0) + rc = -ENOMEM; + else + rc = -EINVAL; + break; + } + if (buffer->callback != NULL) + buffer->callback(buffer, rc); +} + +/* + * Setup the request structure in the struct sclp_buffer to do SCLP Write + * Event Data and pass the request to the core SCLP loop. + */ +void +sclp_emit_buffer(struct sclp_buffer *buffer, + void (*callback)(struct sclp_buffer *, int)) +{ + struct write_sccb *sccb; + + /* add current line if there is one */ + if (buffer->current_line != NULL) + sclp_finalize_mto(buffer); + + /* Are there messages in the output buffer ? */ + if (buffer->mto_number == 0) { + if (callback != NULL) + callback(buffer, 0); + return; + } + + sccb = buffer->sccb; + if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) + /* Use normal write message */ + sccb->msg_buf.header.type = EvTyp_Msg; + else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) + /* Use write priority message */ + sccb->msg_buf.header.type = EvTyp_PMsgCmd; + else { + if (callback != NULL) + callback(buffer, -ENOSYS); + return; + } + buffer->request.command = SCLP_CMDW_WRITEDATA; + buffer->request.status = SCLP_REQ_FILLED; + buffer->request.callback = sclp_writedata_callback; + buffer->request.callback_data = buffer; + buffer->request.sccb = sccb; + buffer->callback = callback; + sclp_add_request(&buffer->request); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_rw.h linux.22-ac2/drivers/s390/char/sclp_rw.h --- linux.vanilla/drivers/s390/char/sclp_rw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_rw.h 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,98 @@ +/* + * drivers/s390/char/sclp_rw.h + * interface to the SCLP-read/write driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_RW_H__ +#define __SCLP_RW_H__ + +#include +#include + +struct mto { + u16 length; + u16 type; + u16 line_type_flags; + u8 alarm_control; + u8 _reserved[3]; +} __attribute__((packed)); + +struct go { + u16 length; + u16 type; + u32 domid; + u8 hhmmss_time[8]; + u8 th_time[3]; + u8 reserved_0; + u8 dddyyyy_date[7]; + u8 _reserved_1; + u16 general_msg_flags; + u8 _reserved_2[10]; + u8 originating_system_name[8]; + u8 job_guest_name[8]; +} __attribute__((packed)); + +struct mdb_header { + u16 length; + u16 type; + u32 tag; + u32 revision_code; +} __attribute__((packed)); + +struct mdb { + struct mdb_header header; + struct go go; +} __attribute__((packed)); + +struct msg_buf { + struct evbuf_header header; + struct mdb mdb; +} __attribute__((packed)); + +struct write_sccb { + struct sccb_header header; + struct msg_buf msg_buf; +} __attribute__((packed)); + +/* The number of empty mto buffers that can be contained in a single sccb. */ +#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \ + sizeof(struct write_sccb)) / sizeof(struct mto)) + +/* + * data structure for information about list of SCCBs (only for writing), + * will be located at the end of a SCCBs page + */ +struct sclp_buffer { + struct list_head list; /* list_head for sccb_info chain */ + struct sclp_req request; + struct write_sccb *sccb; + char *current_line; + int current_length; + int retry_count; + struct timer_list retry_timer; + /* output format settings */ + unsigned short columns; + unsigned short htab; + /* statistics about this buffer */ + unsigned int mto_char_sum; /* # chars in sccb */ + unsigned int mto_number; /* # mtos in sccb */ + /* Callback that is called after reaching final status. */ + void (*callback)(struct sclp_buffer *, int); +}; + +int sclp_rw_init(void); +struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short); +void *sclp_unmake_buffer(struct sclp_buffer *); +int sclp_buffer_space(struct sclp_buffer *); +int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int, int); +void sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int)); +void sclp_set_columns(struct sclp_buffer *, unsigned short); +void sclp_set_htab(struct sclp_buffer *, unsigned short); +int sclp_chars_in_buffer(struct sclp_buffer *); + +#endif /* __SCLP_RW_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_tty.c linux.22-ac2/drivers/s390/char/sclp_tty.c --- linux.vanilla/drivers/s390/char/sclp_tty.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_tty.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,816 @@ +/* + * drivers/s390/char/sclp_tty.c + * SCLP line mode terminal driver. + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sclp.h" +#include "sclp_rw.h" +#include "sclp_tty.h" + +#define SCLP_TTY_PRINT_HEADER "sclp tty driver: " + +/* + * size of a buffer that collects single characters coming in + * via sclp_tty_put_char() + */ +#define SCLP_TTY_BUF_SIZE 512 + +/* + * There is exactly one SCLP terminal, so we can keep things simple + * and allocate all variables statically. + */ + +/* Lock to guard over changes to global variables. */ +static spinlock_t sclp_tty_lock; +/* List of free pages that can be used for console output buffering. */ +static struct list_head sclp_tty_pages; +/* List of full struct sclp_buffer structures ready for output. */ +static struct list_head sclp_tty_outqueue; +/* Counter how many buffers are emitted. */ +static int sclp_tty_buffer_count; +/* Pointer to current console buffer. */ +static struct sclp_buffer *sclp_ttybuf; +/* Timer for delayed output of console messages. */ +static struct timer_list sclp_tty_timer; +/* Waitqueue to wait for buffers to get empty. */ +static wait_queue_head_t sclp_tty_waitq; + +static struct tty_struct *sclp_tty; +static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; +static unsigned short int sclp_tty_chars_count; + +static struct tty_driver sclp_tty_driver; +static struct tty_struct * sclp_tty_table[1]; +static struct termios * sclp_tty_termios[1]; +static struct termios * sclp_tty_termios_locked[1]; +static int sclp_tty_refcount = 0; + +extern struct termios tty_std_termios; + +static struct sclp_ioctls sclp_ioctls; +static struct sclp_ioctls sclp_ioctls_init = +{ + 8, /* 1 hor. tab. = 8 spaces */ + 0, /* no echo of input by this driver */ + 80, /* 80 characters/line */ + 1, /* write after 1/10 s without final new line */ + MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */ + MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */ + 0, /* do not convert to lower case */ + 0x6c /* to seprate upper and lower case */ + /* ('%' in EBCDIC) */ +}; + +/* This routine is called whenever we try to open a SCLP terminal. */ +static int +sclp_tty_open(struct tty_struct *tty, struct file *filp) +{ + sclp_tty = tty; + tty->driver_data = NULL; + tty->low_latency = 0; + return 0; +} + +/* This routine is called when the SCLP terminal is closed. */ +static void +sclp_tty_close(struct tty_struct *tty, struct file *filp) +{ + if (atomic_read(&tty->count) > 1) + return; + sclp_tty = NULL; +} + +/* execute commands to control the i/o behaviour of the SCLP tty at runtime */ +static int +sclp_tty_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) +{ + unsigned long flags; + unsigned int obuf; + int check; + int rc; + + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + rc = 0; + check = 0; + switch (cmd) { + case TIOCSCLPSHTAB: + /* set width of horizontal tab */ + if (get_user(sclp_ioctls.htab, (unsigned short *) arg)) + rc = -EFAULT; + else + check = 1; + break; + case TIOCSCLPGHTAB: + /* get width of horizontal tab */ + if (put_user(sclp_ioctls.htab, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSECHO: + /* enable/disable echo of input */ + if (get_user(sclp_ioctls.echo, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGECHO: + /* Is echo of input enabled ? */ + if (put_user(sclp_ioctls.echo, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSCOLS: + /* set number of columns for output */ + if (get_user(sclp_ioctls.columns, (unsigned short *) arg)) + rc = -EFAULT; + else + check = 1; + break; + case TIOCSCLPGCOLS: + /* get number of columns for output */ + if (put_user(sclp_ioctls.columns, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSNL: + /* enable/disable writing without final new line character */ + if (get_user(sclp_ioctls.final_nl, (signed char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGNL: + /* Is writing without final new line character enabled ? */ + if (put_user(sclp_ioctls.final_nl, (signed char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSOBUF: + /* + * set the maximum buffers size for output, will be rounded + * up to next 4kB boundary and stored as number of SCCBs + * (4kB Buffers) limitation: 256 x 4kB + */ + if (get_user(obuf, (unsigned int *) arg) == 0) { + if (obuf & 0xFFF) + sclp_ioctls.max_sccb = (obuf >> 12) + 1; + else + sclp_ioctls.max_sccb = (obuf >> 12); + } else + rc = -EFAULT; + break; + case TIOCSCLPGOBUF: + /* get the maximum buffers size for output */ + obuf = sclp_ioctls.max_sccb << 12; + if (put_user(obuf, (unsigned int *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGKBUF: + /* get the number of buffers got from kernel at startup */ + if (put_user(sclp_ioctls.kmem_sccb, (unsigned short *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSCASE: + /* enable/disable conversion from upper to lower case */ + if (get_user(sclp_ioctls.tolower, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGCASE: + /* Is conversion from upper to lower case of input enabled? */ + if (put_user(sclp_ioctls.tolower, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSDELIM: + /* + * set special character used for separating upper and + * lower case, 0x00 disables this feature + */ + if (get_user(sclp_ioctls.delim, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPGDELIM: + /* + * get special character used for separating upper and + * lower case, 0x00 disables this feature + */ + if (put_user(sclp_ioctls.delim, (unsigned char *) arg)) + rc = -EFAULT; + break; + case TIOCSCLPSINIT: + /* set initial (default) sclp ioctls */ + sclp_ioctls = sclp_ioctls_init; + check = 1; + break; + default: + rc = -ENOIOCTLCMD; + break; + } + if (check) { + spin_lock_irqsave(&sclp_tty_lock, flags); + if (sclp_ttybuf != NULL) { + sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab); + sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns); + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); + } + return rc; +} + +/* + * This routine returns the numbers of characters the tty driver + * will accept for queuing to be written. This number is subject + * to change as output buffers get emptied, or if the output flow + * control is acted. This is not an exact number because not every + * character needs the same space in the sccb. The worst case is + * a string of newlines. Every newlines creates a new mto which + * needs 8 bytes. + */ +static int +sclp_tty_write_room (struct tty_struct *tty) +{ + unsigned long flags; + struct list_head *l; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + count = 0; + if (sclp_ttybuf != NULL) + count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto); + list_for_each(l, &sclp_tty_pages) + count += NR_EMPTY_MTO_PER_SCCB; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + return count; +} + +static void +sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) +{ + unsigned long flags; + struct sclp_buffer *next; + void *page; + + /* Ignore return code - because tty-writes aren't critical, + we do without a sophisticated error recovery mechanism. */ + page = sclp_unmake_buffer(buffer); + spin_lock_irqsave(&sclp_tty_lock, flags); + /* Remove buffer from outqueue */ + list_del(&buffer->list); + sclp_tty_buffer_count--; + list_add_tail((struct list_head *) page, &sclp_tty_pages); + /* Check if there is a pending buffer on the out queue. */ + next = NULL; + if (!list_empty(&sclp_tty_outqueue)) + next = list_entry(sclp_tty_outqueue.next, + struct sclp_buffer, list); + spin_unlock_irqrestore(&sclp_tty_lock, flags); + if (next != NULL) + sclp_emit_buffer(next, sclp_ttybuf_callback); + wake_up(&sclp_tty_waitq); + /* check if the tty needs a wake up call */ + if (sclp_tty != NULL) { + if ((sclp_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + sclp_tty->ldisc.write_wakeup) + (sclp_tty->ldisc.write_wakeup)(sclp_tty); + wake_up_interruptible(&sclp_tty->write_wait); + } +} + +static inline void +__sclp_ttybuf_emit(struct sclp_buffer *buffer) +{ + unsigned long flags; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + list_add_tail(&buffer->list, &sclp_tty_outqueue); + count = sclp_tty_buffer_count++; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + + if (count == 0) + sclp_emit_buffer(buffer, sclp_ttybuf_callback); +} + +/* + * When this routine is called from the timer then we flush the + * temporary write buffer. + */ +static void +sclp_tty_timeout(unsigned long data) +{ + unsigned long flags; + struct sclp_buffer *buf; + + spin_lock_irqsave(&sclp_tty_lock, flags); + buf = sclp_ttybuf; + sclp_ttybuf = NULL; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + + if (buf != NULL) { + __sclp_ttybuf_emit(buf); + } +} + +/* + * Write a string to the sclp tty. + */ +static void +sclp_tty_write_string(const unsigned char *str, int count, int from_user) +{ + unsigned long flags; + void *page; + int written; + struct sclp_buffer *buf; + + if (count <= 0) + return; + spin_lock_irqsave(&sclp_tty_lock, flags); + do { + /* Create a sclp output buffer if none exists yet */ + if (sclp_ttybuf == NULL) { + while (list_empty(&sclp_tty_pages)) { + spin_unlock_irqrestore(&sclp_tty_lock, flags); + wait_event(sclp_tty_waitq, + !list_empty(&sclp_tty_pages)); + spin_lock_irqsave(&sclp_tty_lock, flags); + } + page = sclp_tty_pages.next; + list_del((struct list_head *) page); + sclp_ttybuf = sclp_make_buffer(page, + sclp_ioctls.columns, + sclp_ioctls.htab); + } + /* try to write the string to the current output buffer */ + written = sclp_write(sclp_ttybuf, str, count, from_user); + if (written == -EFAULT || written == count) + break; + /* + * Not all characters could be written to the current + * output buffer. Emit the buffer, create a new buffer + * and then output the rest of the string. + */ + buf = sclp_ttybuf; + sclp_ttybuf = NULL; + spin_unlock_irqrestore(&sclp_tty_lock, flags); + __sclp_ttybuf_emit(buf); + spin_lock_irqsave(&sclp_tty_lock, flags); + str += written; + count -= written; + } while (count > 0); + /* Setup timer to output current console buffer after 1/10 second */ + if (sclp_ioctls.final_nl) { + if (sclp_ttybuf != NULL && !timer_pending(&sclp_tty_timer)) { + init_timer(&sclp_tty_timer); + sclp_tty_timer.function = sclp_tty_timeout; + sclp_tty_timer.data = 0UL; + sclp_tty_timer.expires = jiffies + HZ/10; + add_timer(&sclp_tty_timer); + } + } else { + __sclp_ttybuf_emit(sclp_ttybuf); + sclp_ttybuf = NULL; + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); +} + +/* + * This routine is called by the kernel to write a series of characters to the + * tty device. The characters may come from user space or kernel space. This + * routine will return the number of characters actually accepted for writing. + */ +static int +sclp_tty_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } + sclp_tty_write_string(buf, count, from_user); + return count; +} + +/* + * This routine is called by the kernel to write a single character to the tty + * device. If the kernel uses this routine, it must call the flush_chars() + * routine (if defined) when it is done stuffing characters into the driver. + * + * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver. + * If the given character is a '\n' the contents of the SCLP write buffer + * - including previous characters from sclp_tty_put_char() and strings from + * sclp_write() without final '\n' - will be written. + */ +static void +sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) +{ + sclp_tty_chars[sclp_tty_chars_count++] = ch; + if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * This routine is called by the kernel after it has written a series of + * characters to the tty device using put_char(). + */ +static void +sclp_tty_flush_chars(struct tty_struct *tty) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * This routine returns the number of characters in the write buffer of the + * SCLP driver. The provided number includes all characters that are stored + * in the SCCB (will be written next time the SCLP is not busy) as well as + * characters in the write buffer (will not be written as long as there is a + * final line feed missing). + */ +static int +sclp_tty_chars_in_buffer(struct tty_struct *tty) +{ + unsigned long flags; + struct list_head *l; + struct sclp_buffer *t; + int count; + + spin_lock_irqsave(&sclp_tty_lock, flags); + count = 0; + if (sclp_ttybuf != NULL) + count = sclp_chars_in_buffer(sclp_ttybuf); + list_for_each(l, &sclp_tty_outqueue) { + t = list_entry(l, struct sclp_buffer, list); + count += sclp_chars_in_buffer(sclp_ttybuf); + } + spin_unlock_irqrestore(&sclp_tty_lock, flags); + return count; +} + +/* + * removes all content from buffers of low level driver + */ +static void +sclp_tty_flush_buffer(struct tty_struct *tty) +{ + if (sclp_tty_chars_count > 0) { + sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); + sclp_tty_chars_count = 0; + } +} + +/* + * push input to tty + */ +static void +sclp_tty_input(unsigned char* buf, unsigned int count) +{ + unsigned int cchar; + + /* + * If this tty driver is currently closed + * then throw the received input away. + */ + if (sclp_tty == NULL) + return; + cchar = ctrlchar_handle(buf, count, sclp_tty, 1); + switch (cchar & CTRLCHAR_MASK) { + case CTRLCHAR_SYSRQ: + break; + case CTRLCHAR_CTRL: + sclp_tty->flip.count++; + *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL; + *sclp_tty->flip.char_buf_ptr++ = cchar; + tty_flip_buffer_push(sclp_tty); + break; + case CTRLCHAR_NONE: + /* send (normal) input to line discipline */ + memcpy(sclp_tty->flip.char_buf_ptr, buf, count); + if (count < 2 || + (strncmp ((const char *) buf + count - 2, "^n", 2) && + strncmp ((const char *) buf + count - 2, "\252n", 2))) { + sclp_tty->flip.char_buf_ptr[count] = '\n'; + count++; + } else + count -= 2; + memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count); + sclp_tty->flip.char_buf_ptr += count; + sclp_tty->flip.flag_buf_ptr += count; + sclp_tty->flip.count += count; + tty_flip_buffer_push(sclp_tty); + break; + } +} + +/* + * get a EBCDIC string in upper/lower case, + * find out characters in lower/upper case separated by a special character, + * modifiy original string, + * returns length of resulting string + */ +static int +sclp_switch_cases(unsigned char *buf, int count, + unsigned char delim, int tolower) +{ + unsigned char *ip, *op; + int toggle; + + /* initially changing case is off */ + toggle = 0; + ip = op = buf; + while (count-- > 0) { + /* compare with special character */ + if (*ip == delim) { + /* followed by another special character? */ + if (count && ip[1] == delim) { + /* + * ... then put a single copy of the special + * character to the output string + */ + *op++ = *ip++; + count--; + } else + /* + * ... special character follower by a normal + * character toggles the case change behaviour + */ + toggle = ~toggle; + /* skip special character */ + ip++; + } else + /* not the special character */ + if (toggle) + /* but case switching is on */ + if (tolower) + /* switch to uppercase */ + *op++ = _ebc_toupper[(int) *ip++]; + else + /* switch to lowercase */ + *op++ = _ebc_tolower[(int) *ip++]; + else + /* no case switching, copy the character */ + *op++ = *ip++; + } + /* return length of reformatted string. */ + return op - buf; +} + +static void +sclp_get_input(unsigned char *start, unsigned char *end) +{ + int count; + + count = end - start; + /* + * if set in ioctl convert EBCDIC to lower case + * (modify original input in SCCB) + */ + if (sclp_ioctls.tolower) + EBC_TOLOWER(start, count); + + /* + * if set in ioctl find out characters in lower or upper case + * (depends on current case) separated by a special character, + * works on EBCDIC + */ + if (sclp_ioctls.delim) + count = sclp_switch_cases(start, count, + sclp_ioctls.delim, + sclp_ioctls.tolower); + + /* convert EBCDIC to ASCII (modify original input in SCCB) */ + sclp_ebcasc_str(start, count); + + /* if set in ioctl write operators input to console */ + if (sclp_ioctls.echo) + sclp_tty_write(sclp_tty, 0, start, count); + + /* transfer input to high level driver */ + sclp_tty_input(start, count); +} + +static inline struct gds_vector * +find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id) +{ + struct gds_vector *vec; + + for (vec = start; vec < end; (void *) vec += vec->length) + if (vec->gds_id == id) + return vec; + return NULL; +} + +static inline struct gds_subvector * +find_gds_subvector(struct gds_subvector *start, + struct gds_subvector *end, u8 key) +{ + struct gds_subvector *subvec; + + for (subvec = start; subvec < end; (void *) subvec += subvec->length) + if (subvec->key == key) + return subvec; + return NULL; +} + +static inline void +sclp_eval_selfdeftextmsg(struct gds_subvector *start, + struct gds_subvector *end) +{ + struct gds_subvector *subvec; + + subvec = start; + while (subvec < end) { + subvec = find_gds_subvector(subvec, end, 0x30); + if (!subvec) + break; + sclp_get_input((unsigned char *)(subvec + 1), + (unsigned char *) subvec + subvec->length); + (void *) subvec += subvec->length; + } +} + +static inline void +sclp_eval_textcmd(struct gds_subvector *start, + struct gds_subvector *end) +{ + struct gds_subvector *subvec; + + subvec = start; + while (subvec < end) { + subvec = find_gds_subvector(subvec, end, + GDS_KEY_SelfDefTextMsg); + if (!subvec) + break; + sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), + (void *)subvec + subvec->length); + (void *) subvec += subvec->length; + } +} + +static inline void +sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) +{ + struct gds_vector *vec; + + vec = start; + while (vec < end) { + vec = find_gds_vector(vec, end, GDS_ID_TextCmd); + if (!vec) + break; + sclp_eval_textcmd((struct gds_subvector *)(vec + 1), + (void *) vec + vec->length); + (void *) vec += vec->length; + } +} + + +static inline void +sclp_eval_mdsmu(struct gds_vector *start, void *end) +{ + struct gds_vector *vec; + + vec = find_gds_vector(start, end, GDS_ID_CPMSU); + if (vec) + sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length); +} + +static void +sclp_tty_receiver(struct evbuf_header *evbuf) +{ + struct gds_vector *start, *end, *vec; + + start = (struct gds_vector *)(evbuf + 1); + end = (void *) evbuf + evbuf->length; + vec = find_gds_vector(start, end, GDS_ID_MDSMU); + if (vec) + sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length); +} + +static void +sclp_tty_state_change(struct sclp_register *reg) +{ +} + +static struct sclp_register sclp_input_event = +{ + .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, + .state_change_fn = sclp_tty_state_change, + .receiver_fn = sclp_tty_receiver +}; + +void +sclp_tty_init(void) +{ + void *page; + int i; + int rc; + + if (!CONSOLE_IS_SCLP) + return; + rc = sclp_rw_init(); + if (rc != 0) { + printk(KERN_ERR SCLP_TTY_PRINT_HEADER + "could not register tty - " + "sclp_rw_init returned %d\n", rc); + return; + } + /* Allocate pages for output buffering */ + INIT_LIST_HEAD(&sclp_tty_pages); + for (i = 0; i < MAX_KMEM_PAGES; i++) { + page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (page == NULL) + return; + list_add_tail((struct list_head *) page, &sclp_tty_pages); + } + INIT_LIST_HEAD(&sclp_tty_outqueue); + spin_lock_init(&sclp_tty_lock); + init_waitqueue_head(&sclp_tty_waitq); + init_timer(&sclp_tty_timer); + sclp_ttybuf = NULL; + sclp_tty_buffer_count = 0; + if (MACHINE_IS_VM) { + /* + * save 4 characters for the CPU number + * written at start of each line by VM/CP + */ + sclp_ioctls_init.columns = 76; + /* case input lines to lowercase */ + sclp_ioctls_init.tolower = 1; + } + sclp_ioctls = sclp_ioctls_init; + sclp_tty_chars_count = 0; + sclp_tty = NULL; + + ctrlchar_init(); + + if (sclp_register(&sclp_input_event) != 0) + return; + + memset (&sclp_tty_driver, 0, sizeof(struct tty_driver)); + sclp_tty_driver.magic = TTY_DRIVER_MAGIC; + sclp_tty_driver.driver_name = "sclp_line"; + sclp_tty_driver.name = "ttyS"; + sclp_tty_driver.name_base = 0; + sclp_tty_driver.major = TTY_MAJOR; + sclp_tty_driver.minor_start = 64; + sclp_tty_driver.num = 1; + sclp_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM; + sclp_tty_driver.subtype = SYSTEM_TYPE_TTY; + sclp_tty_driver.init_termios = tty_std_termios; + sclp_tty_driver.flags = TTY_DRIVER_REAL_RAW; + sclp_tty_driver.refcount = &sclp_tty_refcount; + /* sclp_tty_driver.proc_entry ? */ + sclp_tty_driver.table = sclp_tty_table; + sclp_tty_driver.termios = sclp_tty_termios; + sclp_tty_driver.termios_locked = sclp_tty_termios_locked; + sclp_tty_driver.open = sclp_tty_open; + sclp_tty_driver.close = sclp_tty_close; + sclp_tty_driver.write = sclp_tty_write; + sclp_tty_driver.put_char = sclp_tty_put_char; + sclp_tty_driver.flush_chars = sclp_tty_flush_chars; + sclp_tty_driver.write_room = sclp_tty_write_room; + sclp_tty_driver.chars_in_buffer = sclp_tty_chars_in_buffer; + sclp_tty_driver.flush_buffer = sclp_tty_flush_buffer; + sclp_tty_driver.ioctl = sclp_tty_ioctl; + /* + * No need for these function because they would be only called when + * the line discipline is close to full. That means that there must be + * collected nearly 4kB of input data. I suppose it is very difficult + * for the operator to enter lines quickly enough to let overrun the + * line discipline. Besides the n_tty line discipline does not try to + * call such functions if the pointers are set to NULL. Finally I have + * no idea what to do within these function. I can not prevent the + * operator and the SCLP to deliver input. Because of the reasons + * above it seems not worth to implement a buffer mechanism. + */ + sclp_tty_driver.throttle = NULL; + sclp_tty_driver.unthrottle = NULL; + sclp_tty_driver.send_xchar = NULL; + sclp_tty_driver.set_termios = NULL; + sclp_tty_driver.set_ldisc = NULL; + sclp_tty_driver.stop = NULL; + sclp_tty_driver.start = NULL; + sclp_tty_driver.hangup = NULL; + sclp_tty_driver.break_ctl = NULL; + sclp_tty_driver.wait_until_sent = NULL; + sclp_tty_driver.read_proc = NULL; + sclp_tty_driver.write_proc = NULL; + + rc = tty_register_driver(&sclp_tty_driver); + if (rc != 0) + printk(KERN_ERR SCLP_TTY_PRINT_HEADER + "could not register tty - " + "sclp_drv_register returned %d\n", rc); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/sclp_tty.h linux.22-ac2/drivers/s390/char/sclp_tty.h --- linux.vanilla/drivers/s390/char/sclp_tty.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/sclp_tty.h 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * drivers/s390/char/sclp_tty.h + * interface to the SCLP-read/write driver + * + * S390 version + * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Peschke + * Martin Schwidefsky + */ + +#ifndef __SCLP_TTY_H__ +#define __SCLP_TTY_H__ + +#include + +/* This is the type of data structures storing sclp ioctl setting. */ +struct sclp_ioctls { + unsigned short htab; + unsigned char echo; + unsigned short columns; + unsigned char final_nl; + unsigned short max_sccb; + unsigned short kmem_sccb; /* can't be modified at run time */ + unsigned char tolower; + unsigned char delim; +}; + +/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */ +#define SCLP_IOCTL_LETTER 'B' + +/* set width of horizontal tabulator */ +#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short) +/* enable/disable echo of input (independent from line discipline) */ +#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char) +/* set number of colums for output */ +#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short) +/* enable/disable writing without final new line character */ +#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char) +/* set the maximum buffers size for output, rounded up to next 4kB boundary */ +#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short) +/* set initial (default) sclp ioctls */ +#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6) +/* enable/disable conversion from upper to lower case of input */ +#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char) +/* set special character used for separating upper and lower case, */ +/* 0x00 disables this feature */ +#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char) + +/* get width of horizontal tabulator */ +#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short) +/* Is echo of input enabled ? (independent from line discipline) */ +#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char) +/* get number of colums for output */ +#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short) +/* Is writing without final new line character enabled ? */ +#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char) +/* get the maximum buffers size for output */ +#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short) +/* Is conversion from upper to lower case of input enabled ? */ +#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char) +/* get special character used for separating upper and lower case, */ +/* 0x00 disables this feature */ +#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char) +/* get the number of buffers/pages got from kernel at startup */ +#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short) + +#endif /* __SCLP_TTY_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3480.c linux.22-ac2/drivers/s390/char/tape3480.c --- linux.vanilla/drivers/s390/char/tape3480.c 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3480.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3480.c - * tape device discipline for 3480 tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include /* CCW allocations */ -#include -#include -#include -#include "tape.h" -#include "tape34xx.h" -#include "tape3480.h" - -tape_event_handler_t tape3480_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - -devreg_t tape3480_devreg = { - ci: - {hc: - {ctype:0x3480}}, - flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS, - oper_func:tape_oper_handler -}; - - -void -tape3480_setup_assist (tape_info_t * ti) -{ - tape3480_disc_data_t *data = NULL; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"3480 dsetu"); - debug_text_event (tape_debug_area,6,"dev:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif /* TAPE_DEBUG */ - while (data == NULL) - data = kmalloc (sizeof (tape3480_disc_data_t), GFP_KERNEL); - data->modeset_byte = 0x00; - ti->discdata = (void *) data; -} - - -void -tape3480_shutdown (int autoprobe) { - if (autoprobe) - s390_device_unregister(&tape3480_devreg); -} - -tape_discipline_t * -tape3480_init (int autoprobe) -{ - tape_discipline_t *disc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3480 init"); -#endif /* TAPE_DEBUG */ - disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL); - if (disc == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"disc:nomem"); -#endif /* TAPE_DEBUG */ - return disc; - } - disc->cu_type = 0x3480; - disc->setup_assist = tape3480_setup_assist; - disc->error_recovery = tape34xx_error_recovery; - disc->write_block = tape34xx_write_block; - disc->free_write_block = tape34xx_free_write_block; - disc->read_block = tape34xx_read_block; - disc->free_read_block = tape34xx_free_read_block; - disc->mtfsf = tape34xx_mtfsf; - disc->mtbsf = tape34xx_mtbsf; - disc->mtfsr = tape34xx_mtfsr; - disc->mtbsr = tape34xx_mtbsr; - disc->mtweof = tape34xx_mtweof; - disc->mtrew = tape34xx_mtrew; - disc->mtoffl = tape34xx_mtoffl; - disc->mtnop = tape34xx_mtnop; - disc->mtbsfm = tape34xx_mtbsfm; - disc->mtfsfm = tape34xx_mtfsfm; - disc->mteom = tape34xx_mteom; - disc->mterase = tape34xx_mterase; - disc->mtsetdensity = tape34xx_mtsetdensity; - disc->mtseek = tape34xx_mtseek; - disc->mttell = tape34xx_mttell; - disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer; - disc->mtlock = tape34xx_mtlock; - disc->mtunlock = tape34xx_mtunlock; - disc->mtload = tape34xx_mtload; - disc->mtunload = tape34xx_mtunload; - disc->mtcompression = tape34xx_mtcompression; - disc->mtsetpart = tape34xx_mtsetpart; - disc->mtmkpart = tape34xx_mtmkpart; - disc->mtiocget = tape34xx_mtiocget; - disc->mtiocpos = tape34xx_mtiocpos; - disc->shutdown = tape3480_shutdown; - disc->discipline_ioctl_overload = tape34xx_ioctl_overload; - disc->event_table = &tape3480_event_handler_table; - disc->default_handler = tape34xx_default_handler; - disc->bread = tape34xx_bread; - disc->free_bread = tape34xx_free_bread; - disc->tape = NULL; /* pointer for backreference */ - disc->next = NULL; - if (autoprobe) - s390_device_register(&tape3480_devreg); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3480 regis"); -#endif /* TAPE_DEBUG */ - return disc; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3480.h linux.22-ac2/drivers/s390/char/tape3480.h --- linux.vanilla/drivers/s390/char/tape3480.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3480.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3480.h - * tape device discipline for 3480 tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE3480_H - -#define _TAPE3480_H - - -typedef struct _tape3480_disc_data_t { - __u8 modeset_byte; -} tape3480_disc_data_t __attribute__ ((packed, aligned(8))); -tape_discipline_t * tape3480_init (int); -#endif // _TAPE3480_H diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3490.c linux.22-ac2/drivers/s390/char/tape3490.c --- linux.vanilla/drivers/s390/char/tape3490.c 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3490.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape3490.c - * tape device discipline for 3490E tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include /* CCW allocations */ -#include -#include -#include -#include "tape.h" -#include "tape34xx.h" -#include "tape3490.h" - -tape_event_handler_t tape3490_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - -devreg_t tape3490_devreg = { - ci: - {hc: - {ctype:0x3490}}, - flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS, - oper_func:tape_oper_handler -}; - -void -tape3490_setup_assist (tape_info_t * ti) -{ - tape3490_disc_data_t *data = NULL; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"3490 dsetu"); - debug_text_event (tape_debug_area,6,"dev:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif /* TAPE_DEBUG */ - while (data == NULL) - data = kmalloc (sizeof (tape3490_disc_data_t), GFP_KERNEL); - data->modeset_byte = 0x00; - ti->discdata = (void *) data; -} - - -void -tape3490_shutdown (int autoprobe) { - if (autoprobe) - s390_device_unregister(&tape3490_devreg); -} - - -tape_discipline_t * -tape3490_init (int autoprobe) -{ - tape_discipline_t *disc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3490 init"); -#endif /* TAPE_DEBUG */ - disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL); - if (disc == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"disc:nomem"); -#endif /* TAPE_DEBUG */ - return disc; - } - disc->cu_type = 0x3490; - disc->setup_assist = tape3490_setup_assist; - disc->error_recovery = tape34xx_error_recovery; - disc->write_block = tape34xx_write_block; - disc->free_write_block = tape34xx_free_write_block; - disc->read_block = tape34xx_read_block; - disc->free_read_block = tape34xx_free_read_block; - disc->mtfsf = tape34xx_mtfsf; - disc->mtbsf = tape34xx_mtbsf; - disc->mtfsr = tape34xx_mtfsr; - disc->mtbsr = tape34xx_mtbsr; - disc->mtweof = tape34xx_mtweof; - disc->mtrew = tape34xx_mtrew; - disc->mtoffl = tape34xx_mtoffl; - disc->mtnop = tape34xx_mtnop; - disc->mtbsfm = tape34xx_mtbsfm; - disc->mtfsfm = tape34xx_mtfsfm; - disc->mteom = tape34xx_mteom; - disc->mterase = tape34xx_mterase; - disc->mtsetdensity = tape34xx_mtsetdensity; - disc->mtseek = tape34xx_mtseek; - disc->mttell = tape34xx_mttell; - disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer; - disc->mtlock = tape34xx_mtlock; - disc->mtunlock = tape34xx_mtunlock; - disc->mtload = tape34xx_mtload; - disc->mtunload = tape34xx_mtunload; - disc->mtcompression = tape34xx_mtcompression; - disc->mtsetpart = tape34xx_mtsetpart; - disc->mtmkpart = tape34xx_mtmkpart; - disc->mtiocget = tape34xx_mtiocget; - disc->mtiocpos = tape34xx_mtiocpos; - disc->shutdown = tape3490_shutdown; - disc->discipline_ioctl_overload = tape34xx_ioctl_overload; - disc->event_table = &tape3490_event_handler_table; - disc->default_handler = tape34xx_default_handler; - disc->bread = tape34xx_bread; - disc->free_bread = tape34xx_free_bread; - disc->tape = NULL; /* pointer for backreference */ - disc->next = NULL; - if (autoprobe) - s390_device_register(&tape3490_devreg); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"3490 regis"); -#endif /* TAPE_DEBUG */ - return disc; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3490.h linux.22-ac2/drivers/s390/char/tape3490.h --- linux.vanilla/drivers/s390/char/tape3490.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3490.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tape3490.h - * tape device discipline for 3490E tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE3490_H - -#define _TAPE3490_H - - -typedef struct _tape3490_disc_data_t { - __u8 modeset_byte; -} tape3490_disc_data_t __attribute__ ((packed, aligned(8))); -tape_discipline_t * tape3490_init (int); -#endif // _TAPE3490_H diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_34xx.c linux.22-ac2/drivers/s390/char/tape_34xx.c --- linux.vanilla/drivers/s390/char/tape_34xx.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_34xx.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,1340 @@ +/* + * drivers/s390/char/tape_34xx.c + * tape device discipline for 3480/3490 tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "T34xx:" + +/* + * The block ID is the complete marker for a specific tape position. + * It contains a physical part (wrap, segment, format) and a logical + * block number. + */ +#define TBI_FORMAT_3480 0x00 +#define TBI_FORMAT_3480_2_XF 0x01 +#define TBI_FORMAT_3480_XF 0x02 +#define TBI_FORMAT_RESERVED 0x03 + +struct tape_34xx_block_id { + unsigned int tbi_wrap : 1; + unsigned int tbi_segment : 7; + unsigned int tbi_format : 2; + unsigned int tbi_block : 22; +} __attribute__ ((packed)); + +struct sbid_entry { + struct list_head list; + struct tape_34xx_block_id bid; +}; + +struct tape_34xx_discdata { + /* A list of block id's of the tape segments (for faster seek) */ + struct list_head sbid_list; +}; + +/* Internal prototypes */ +static void tape_34xx_clear_sbid_list(struct tape_device *); + +/* 34xx specific functions */ +static void +__tape_34xx_medium_sense_callback(struct tape_request *request, void *data) +{ + unsigned char *sense = request->cpdata; + + request->callback = NULL; + + DBF_EVENT(5, "TO_MSEN[0]: %08x\n", *((unsigned int *) sense)); + DBF_EVENT(5, "TO_MSEN[1]: %08x\n", *((unsigned int *) sense+1)); + DBF_EVENT(5, "TO_MSEN[2]: %08x\n", *((unsigned int *) sense+2)); + DBF_EVENT(5, "TO_MSEN[3]: %08x\n", *((unsigned int *) sense+3)); + + if(sense[0] & SENSE_INTERVENTION_REQUIRED) { + tape_med_state_set(request->device, MS_UNLOADED); + } else { + tape_med_state_set(request->device, MS_LOADED); + } + + if(sense[1] & SENSE_WRITE_PROTECT) { + request->device->tape_generic_status |= GMT_WR_PROT(~0); + } else{ + request->device->tape_generic_status &= ~GMT_WR_PROT(~0); + } + + tape_put_request(request); +} + +static int +tape_34xx_medium_sense(struct tape_device *device) +{ + struct tape_request * request; + int rc; + + tape_34xx_clear_sbid_list(device); + + request = tape_alloc_request(1, 32); + if(IS_ERR(request)) { + DBF_EXCEPTION(6, "MSN fail\n"); + return PTR_ERR(request); + } + + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + request->callback = __tape_34xx_medium_sense_callback; + + rc = tape_do_io_async(device, request); + + return rc; +} + +static void +tape_34xx_work_handler(void *data) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct tq_struct task; + } *p = data; + + switch(p->op) { + case TO_MSEN: + tape_34xx_medium_sense(p->device); + break; + default: + DBF_EVENT(3, "T34XX: internal error: unknown work\n"); + } + + tape_put_device(p->device); + kfree(p); +} + +/* + * This function is currently used to schedule a sense for later execution. + * For example whenever a unsolicited interrupt signals a new tape medium + * and we can't call tape_do_io from that interrupt handler. + */ +static int +tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct tq_struct task; + } *p; + + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return -ENOMEM; + + memset(p, 0, sizeof(*p)); + INIT_LIST_HEAD(&p->task.list); + p->task.routine = tape_34xx_work_handler; + p->task.data = p; + + p->device = tape_clone_device(device); + p->op = op; + + schedule_task(&p->task); + + return 0; +} + +/* + * Done Handler is called when dev stat = DEVICE-END (successful operation) + */ +static int +tape_34xx_done(struct tape_device *device, struct tape_request *request) +{ + DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + // FIXME: Maybe only on assign/unassign + TAPE_CLEAR_STATE(device, TAPE_STATUS_BOXED); + + return TAPE_IO_SUCCESS; +} + +static inline int +tape_34xx_erp_failed(struct tape_device *device, + struct tape_request *request, int rc) +{ + DBF_EVENT(3, "Error recovery failed for %s\n", + tape_op_verbose[request->op]); + return rc; +} + +static inline int +tape_34xx_erp_succeeded(struct tape_device *device, + struct tape_request *request) +{ + DBF_EVENT(3, "Error Recovery successful for %s\n", + tape_op_verbose[request->op]); + return tape_34xx_done(device, request); +} + +static inline int +tape_34xx_erp_retry(struct tape_device *device, struct tape_request *request) +{ + DBF_EVENT(3, "xerp retr %s\n", + tape_op_verbose[request->op]); + return TAPE_IO_RETRY; +} + +/* + * This function is called, when no request is outstanding and we get an + * interrupt + */ +static int +tape_34xx_unsolicited_irq(struct tape_device *device) +{ + if (device->devstat.dstat == 0x85 /* READY */) { + /* A medium was inserted in the drive. */ + DBF_EVENT(6, "T34xx: tape load\n"); + tape_34xx_schedule_work(device, TO_MSEN); + } else { + DBF_EVENT(3, "T34xx: unsol.irq! dev end: %x\n", + device->devinfo.irq); + PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); + tape_dump_sense(device, NULL); + } + return TAPE_IO_SUCCESS; +} + +/* + * Read Opposite Error Recovery Function: + * Used, when Read Forward does not work + */ +static int +tape_34xx_erp_read_opposite(struct tape_device *device, + struct tape_request *request) +{ + if (request->op == TO_RFO) { + /* + * We did read forward, but the data could not be read + * *correctly*. We transform the request to a read backward + * and try again. + */ + tape_std_read_backward(device, request); + return tape_34xx_erp_retry(device, request); + } + if (request->op != TO_RBA) + PRINT_ERR("read_opposite called with state:%s\n", + tape_op_verbose[request->op]); + /* + * We tried to read forward and backward, but hat no + * success -> failed. + */ + return tape_34xx_erp_failed(device, request, -EIO); +} + +static int +tape_34xx_erp_bug(struct tape_device *device, + struct tape_request *request, int no) +{ + if (request->op != TO_ASSIGN) { + PRINT_WARN("An unexpected condition #%d was caught in " + "tape error recovery.\n", no); + PRINT_WARN("Please report this incident.\n"); + if (request) + PRINT_WARN("Operation of tape:%s\n", + tape_op_verbose[request->op]); + tape_dump_sense(device, request); + } + return tape_34xx_erp_failed(device, request, -EIO); +} + +/* + * Handle data overrun between cu and drive. The channel speed might + * be too slow. + */ +static int +tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request) +{ + if (device->devstat.ii.sense.data[3] == 0x40) { + PRINT_WARN ("Data overrun error between control-unit " + "and drive. Use a faster channel connection, " + "if possible! \n"); + return tape_34xx_erp_failed(device, request, -EIO); + } + return tape_34xx_erp_bug(device, request, -1); +} + +/* + * Handle record sequence error. + */ +static int +tape_34xx_erp_sequence(struct tape_device *device, + struct tape_request *request) +{ + if (device->devstat.ii.sense.data[3] == 0x41) { + /* + * cu detected incorrect block-id sequence on tape. + */ + PRINT_WARN("Illegal block-id sequence found!\n"); + return tape_34xx_erp_failed(device, request, -EIO); + } + /* + * Record sequence error bit is set, but erpa does not + * show record sequence error. + */ + return tape_34xx_erp_bug(device, request, -2); +} + +/* + * This function analyses the tape's sense-data in case of a unit-check. + * If possible, it tries to recover from the error. Else the user is + * informed about the problem. + */ +static int +tape_34xx_unit_check(struct tape_device *device, struct tape_request *request) +{ + int inhibit_cu_recovery; + __u8* sense; + + inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; + sense = device->devstat.ii.sense.data; + +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) { + /* + * Recovery for block device requests. Set the block_position + * to something invalid and retry. + */ + device->blk_data.block_position = -1; + if (request->retries-- <= 0) + return tape_34xx_erp_failed(device, request, -EIO); + else + return tape_34xx_erp_retry(device, request); + } +#endif + + if ( + sense[0] & SENSE_COMMAND_REJECT && + sense[1] & SENSE_WRITE_PROTECT + ) { + if ( + request->op == TO_DSE || + request->op == TO_WRI || + request->op == TO_WTM + ) { + /* medium is write protected */ + return tape_34xx_erp_failed(device, request, -EACCES); + } else { + return tape_34xx_erp_bug(device, request, -3); + } + } + + /* + * special cases for various tape-states when reaching + * end of recorded area + */ + /* + * FIXME: Maybe a special case of the special case: + * sense[0] == SENSE_EQUIPMENT_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x47 (Volume Fenced) + * + * This was caused by continued FSF or FSR after an + * 'End Of Data'. + */ + if (( + sense[0] == SENSE_DATA_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK + ) && ( + sense[1] == SENSE_DRIVE_ONLINE || + sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE + )) { + switch (request->op) { + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE + * sense[3] == 0x36 (End Of Data) + * + * Further seeks might return a 'Volume Fenced'. + */ + case TO_FSF: + case TO_FSB: + /* Trying to seek beyond end of recorded area */ + return tape_34xx_erp_failed(device, request, -ENOSPC); + case TO_BSB: + return tape_34xx_erp_retry(device, request); + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x36 (End Of Data) + */ + case TO_LBL: + /* Block could not be located. */ + return tape_34xx_erp_failed(device, request, -EIO); + case TO_RFO: + /* Read beyond end of recorded area -> 0 bytes read */ + return tape_34xx_erp_failed(device, request, 0); + default: + PRINT_ERR("Invalid op %s in %s:%i\n", + tape_op_verbose[request->op], + __FUNCTION__, __LINE__); + return tape_34xx_erp_failed(device, request, 0); + } + } + + /* Sensing special bits */ + if (sense[0] & SENSE_BUS_OUT_CHECK) + return tape_34xx_erp_retry(device, request); + + if (sense[0] & SENSE_DATA_CHECK) { + /* + * hardware failure, damaged tape or improper + * operating conditions + */ + switch (sense[3]) { + case 0x23: + /* a read data check occurred */ + if ((sense[2] & SENSE_TAPE_SYNC_MODE) || + inhibit_cu_recovery) + // data check is not permanent, may be + // recovered. We always use async-mode with + // cu-recovery, so this should *never* happen. + return tape_34xx_erp_bug(device, request, -4); + + /* data check is permanent, CU recovery has failed */ + PRINT_WARN("Permanent read error\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x25: + // a write data check occurred + if ((sense[2] & SENSE_TAPE_SYNC_MODE) || + inhibit_cu_recovery) + // data check is not permanent, may be + // recovered. We always use async-mode with + // cu-recovery, so this should *never* happen. + return tape_34xx_erp_bug(device, request, -5); + + // data check is permanent, cu-recovery has failed + PRINT_WARN("Permanent write error\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x26: + /* Data Check (read opposite) occurred. */ + return tape_34xx_erp_read_opposite(device, request); + case 0x28: + /* ID-Mark at tape start couldn't be written */ + PRINT_WARN("ID-Mark could not be written.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x31: + /* Tape void. Tried to read beyond end of device. */ + PRINT_WARN("Read beyond end of recorded area.\n"); + return tape_34xx_erp_failed(device, request, -ENOSPC); + case 0x41: + /* Record sequence error. */ + PRINT_WARN("Invalid block-id sequence found.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + default: + /* all data checks for 3480 should result in one of + * the above erpa-codes. For 3490, other data-check + * conditions do exist. */ + if (device->discipline->cu_type == 0x3480) + return tape_34xx_erp_bug(device, request, -6); + } + } + + if (sense[0] & SENSE_OVERRUN) + return tape_34xx_erp_overrun(device, request); + + if (sense[1] & SENSE_RECORD_SEQUENCE_ERR) + return tape_34xx_erp_sequence(device, request); + + /* Sensing erpa codes */ + switch (sense[3]) { + case 0x00: + /* Unit check with erpa code 0. Report and ignore. */ + PRINT_WARN("Non-error sense was found. " + "Unit-check will be ignored.\n"); + return TAPE_IO_SUCCESS; + case 0x21: + /* + * Data streaming not operational. CU will switch to + * interlock mode. Reissue the command. + */ + PRINT_WARN("Data streaming not operational. " + "Switching to interlock-mode.\n"); + return tape_34xx_erp_retry(device, request); + case 0x22: + /* + * Path equipment check. Might be drive adapter error, buffer + * error on the lower interface, internal path not usable, + * or error during cartridge load. + */ + PRINT_WARN("A path equipment check occurred. One of the " + "following conditions occurred:\n"); + PRINT_WARN("drive adapter error, buffer error on the lower " + "interface, internal path not usable, error " + "during cartridge load.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x24: + /* + * Load display check. Load display was command was issued, + * but the drive is displaying a drive check message. Can + * be threated as "device end". + */ + return tape_34xx_erp_succeeded(device, request); + case 0x27: + /* + * Command reject. May indicate illegal channel program or + * buffer over/underrun. Since all channel programs are + * issued by this driver and ought be correct, we assume a + * over/underrun situation and retry the channel program. + */ + return tape_34xx_erp_retry(device, request); + case 0x29: + /* + * Function incompatible. Either the tape is idrc compressed + * but the hardware isn't capable to do idrc, or a perform + * subsystem func is issued and the CU is not on-line. + */ + PRINT_WARN ("Function incompatible. Try to switch off idrc\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x2a: + /* + * Unsolicited environmental data. An internal counter + * overflows, we can ignore this and reissue the cmd. + */ + return tape_34xx_erp_retry(device, request); + case 0x2b: + /* + * Environmental data present. Indicates either unload + * completed ok or read buffered log command completed ok. + */ + if (request->op == TO_RUN) { + tape_med_state_set(device, MS_UNLOADED); + /* Rewind unload completed ok. */ + return tape_34xx_erp_succeeded(device, request); + } + /* tape_34xx doesn't use read buffered log commands. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x2c: + /* + * Permanent equipment check. CU has tried recovery, but + * did not succeed. + */ + return tape_34xx_erp_failed(device, request, -EIO); + case 0x2d: + /* Data security erase failure. */ + if (request->op == TO_DSE) + return tape_34xx_erp_failed(device, request, -EIO); + /* Data security erase failure, but no such command issued. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x2e: + /* + * Not capable. This indicates either that the drive fails + * reading the format id mark or that that format specified + * is not supported by the drive. + */ + PRINT_WARN("Drive not capable processing the tape format!"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + case 0x30: + /* The medium is write protected. */ + PRINT_WARN("Medium is write protected!\n"); + return tape_34xx_erp_failed(device, request, -EACCES); + case 0x32: + // Tension loss. We cannot recover this, it's an I/O error. + PRINT_WARN("The drive lost tape tension.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x33: + /* + * Load Failure. The cartridge was not inserted correctly or + * the tape is not threaded correctly. + */ + PRINT_WARN("Cartridge load failure. Reload the cartridge " + "and try again.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x34: + /* + * Unload failure. The drive cannot maintain tape tension + * and control tape movement during an unload operation. + */ + PRINT_WARN("Failure during cartridge unload. " + "Please try manually.\n"); + if (request->op == TO_RUN) + return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x35: + /* + * Drive equipment check. One of the following: + * - cu cannot recover from a drive detected error + * - a check code message is shown on drive display + * - the cartridge loader does not respond correctly + * - a failure occurs during an index, load, or unload cycle + */ + PRINT_WARN("Equipment check! Please check the drive and " + "the cartridge loader.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x36: + if (device->discipline->cu_type == 0x3490) + /* End of data. */ + return tape_34xx_erp_failed(device, request, -EIO); + /* This erpa is reserved for 3480 */ + return tape_34xx_erp_bug(device,request,sense[3]); + case 0x37: + /* + * Tape length error. The tape is shorter than reported in + * the beginning-of-tape data. + */ + PRINT_WARN("Tape length error.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x38: + /* + * Physical end of tape. A read/write operation reached + * the physical end of tape. + */ + if (request->op==TO_WRI || + request->op==TO_DSE || + request->op==TO_WTM) + return tape_34xx_erp_failed(device, request, -ENOSPC); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x39: + /* Backward at Beginning of tape. */ + return tape_34xx_erp_failed(device, request, -EIO); + case 0x3a: + /* Drive switched to not ready. */ + PRINT_WARN("Drive not ready. Turn the ready/not ready switch " + "to ready position and try again.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x3b: + /* Manual rewind or unload. This causes an I/O error. */ + PRINT_WARN("Medium was rewound or unloaded manually.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x42: + /* + * Degraded mode. A condition that can cause degraded + * performance is detected. + */ + PRINT_WARN("Subsystem is running in degraded mode.\n"); + return tape_34xx_erp_retry(device, request); + case 0x43: + /* Drive not ready. */ + tape_med_state_set(device, MS_UNLOADED); + /* SMB: some commands do not need a tape inserted */ + if((sense[1] & SENSE_DRIVE_ONLINE)) { + switch(request->op) { + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_DIS: + return tape_34xx_done(device, request); + break; + default: + break; + } + } + PRINT_WARN("The drive is not ready.\n"); + return tape_34xx_erp_failed(device, request, -ENOMEDIUM); + case 0x44: + /* Locate Block unsuccessful. */ + if (request->op != TO_BLOCK && request->op != TO_LBL) + /* No locate block was issued. */ + return tape_34xx_erp_bug(device, request, sense[3]); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x45: + /* The drive is assigned to a different channel path. */ + PRINT_WARN("The drive is assigned elsewhere.\n"); + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x46: + /* + * Drive not on-line. Drive may be switched offline, + * the power supply may be switched off or + * the drive address may not be set correctly. + */ + PRINT_WARN("The drive is not on-line."); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x47: + /* Volume fenced. CU reports volume integrity is lost. */ + PRINT_WARN("Volume fenced. The volume integrity is lost because\n"); + PRINT_WARN("assignment or tape position was lost.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x48: + /* Log sense data and retry request. */ + return tape_34xx_erp_retry(device, request); + case 0x49: + /* Bus out check. A parity check error on the bus was found. */ + PRINT_WARN("Bus out check. A data transfer over the bus " + "has been corrupted.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4a: + /* Control unit erp failed. */ + PRINT_WARN("The control unit I/O error recovery failed.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4b: + /* + * CU and drive incompatible. The drive requests micro-program + * patches, which are not available on the CU. + */ + PRINT_WARN("The drive needs microprogram patches from the " + "control unit, which are not available.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x4c: + /* + * Recovered Check-One failure. Cu develops a hardware error, + * but is able to recover. + */ + return tape_34xx_erp_retry(device, request); + case 0x4d: + if (device->discipline->cu_type == 0x3490) + /* + * Resetting event received. Since the driver does + * not support resetting event recovery (which has to + * be handled by the I/O Layer), retry our command. + */ + return tape_34xx_erp_retry(device, request); + /* This erpa is reserved for 3480. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x4e: + if (device->discipline->cu_type == 0x3490) { + /* + * Maximum block size exceeded. This indicates, that + * the block to be written is larger than allowed for + * buffered mode. + */ + PRINT_WARN("Maximum block size for buffered " + "mode exceeded.\n"); + return tape_34xx_erp_failed(device, request, -ENOBUFS); + } + /* This erpa is reserved for 3480. */ + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x50: + /* + * Read buffered log (Overflow). CU is running in extended + * buffered log mode, and a counter overflows. This should + * never happen, since we're never running in extended + * buffered log mode. + */ + return tape_34xx_erp_retry(device, request); + case 0x51: + /* + * Read buffered log (EOV). EOF processing occurs while the + * CU is in extended buffered log mode. This should never + * happen, since we're never running in extended buffered + * log mode. + */ + return tape_34xx_erp_retry(device, request); + case 0x52: + /* End of Volume complete. Rewind unload completed ok. */ + if (request->op == TO_RUN) { + /* SMB */ + tape_med_state_set(device, MS_UNLOADED); + return tape_34xx_erp_succeeded(device, request); + } + return tape_34xx_erp_bug(device, request, sense[3]); + case 0x53: + /* Global command intercept. */ + return tape_34xx_erp_retry(device, request); + case 0x54: + /* Channel interface recovery (temporary). */ + return tape_34xx_erp_retry(device, request); + case 0x55: + /* Channel interface recovery (permanent). */ + PRINT_WARN("A permanent channel interface error occurred.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x56: + /* Channel protocol error. */ + PRINT_WARN("A channel protocol error occurred.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x57: + if (device->discipline->cu_type == 0x3480) { + /* Attention intercept. */ + PRINT_WARN("An attention intercept occurred, " + "which will be recovered.\n"); + return tape_34xx_erp_retry(device, request); + } else { + /* Global status intercept. */ + PRINT_WARN("An global status intercept was received, " + "which will be recovered.\n"); + return tape_34xx_erp_retry(device, request); + } + case 0x5a: + /* + * Tape length incompatible. The tape inserted is too long, + * which could cause damage to the tape or the drive. + */ + PRINT_WARN("Tape length incompatible [should be IBM Cartridge " + "System Tape]. May cause damage to drive or tape.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5b: + /* Format 3480 XF incompatible */ + if (sense[1] & SENSE_BEGINNING_OF_TAPE) + /* The tape will get overwritten. */ + return tape_34xx_erp_retry(device, request); + PRINT_WARN("Tape format is incompatible to the drive, " + "which writes 3480-2 XF.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5c: + /* Format 3480-2 XF incompatible */ + PRINT_WARN("Tape format is incompatible to the drive. " + "The drive cannot access 3480-2 XF volumes.\n"); + return tape_34xx_erp_failed(device, request, -EIO); + case 0x5d: + /* Tape length violation. */ + PRINT_WARN("Tape length violation [should be IBM Enhanced " + "Capacity Cartridge System Tape]. May cause " + "damage to drive or tape.\n"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + case 0x5e: + /* Compaction algorithm incompatible. */ + PRINT_WARN("The volume is recorded using an incompatible " + "compaction algorithm, which is not supported by " + "the control unit.\n"); + return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + + /* The following erpas should have been covered earlier. */ + case 0x23: /* Read data check. */ + case 0x25: /* Write data check. */ + case 0x26: /* Data check (read opposite). */ + case 0x28: /* Write id mark check. */ + case 0x31: /* Tape void. */ + case 0x40: /* Overrun error. */ + case 0x41: /* Record sequence error. */ + /* All other erpas are reserved for future use. */ + default: + return tape_34xx_erp_bug(device, request, sense[3]); + } +} + +/* + * 3480/3490 interrupt handler + */ +static int +tape_34xx_irq(struct tape_device *device, struct tape_request *request) +{ + if (request == NULL) + return tape_34xx_unsolicited_irq(device); + + if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) && + (device->devstat.dstat & DEV_STAT_DEV_END) && + (request->op == TO_WRI)) { + /* Write at end of volume */ + PRINT_INFO("End of volume\n"); /* XXX */ + return tape_34xx_erp_failed(device, request, -ENOSPC); + } + + if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) && + (request->op == TO_BSB || request->op == TO_FSB)) + DBF_EVENT(5, "Skipped over tapemark\n"); + + if (device->devstat.dstat & DEV_STAT_UNIT_CHECK) + return tape_34xx_unit_check(device, request); + + if (device->devstat.dstat & DEV_STAT_DEV_END) + return tape_34xx_done(device, request); + + DBF_EVENT(6, "xunknownirq\n"); + PRINT_ERR("Unexpected interrupt.\n"); + PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]); + tape_dump_sense(device, request); + return TAPE_IO_STOP; +} + +/* + * ioctl_overload + */ +static int +tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) +{ + if (cmd == TAPE390_DISPLAY) { + struct display_struct disp; + + if(copy_from_user(&disp, (char *) arg, sizeof(disp)) != 0) + return -EFAULT; + + return tape_std_display(device, &disp); + } else + return -EINVAL; +} + +static int +tape_34xx_setup_device(struct tape_device * device) +{ + struct tape_34xx_discdata *discdata; + + DBF_EVENT(5, "tape_34xx_setup_device(%p)\n", device); + DBF_EVENT(6, "34xx minor1: %x\n", device->first_minor); + discdata = kmalloc(sizeof(struct tape_34xx_discdata), GFP_ATOMIC); + if(discdata) { + memset(discdata, 0, sizeof(struct tape_34xx_discdata)); + INIT_LIST_HEAD(&discdata->sbid_list); + device->discdata = discdata; + } + tape_34xx_medium_sense(device); + return 0; +} + +static void +tape_34xx_cleanup_device(struct tape_device * device) +{ + if (device->discdata) { + tape_34xx_clear_sbid_list(device); + kfree(device->discdata); + device->discdata = NULL; + } +} + +/* + * Build up the lookup table... + */ +static void +tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid) +{ + struct tape_34xx_discdata * discdata = device->discdata; + struct sbid_entry * new; + struct sbid_entry * cur; + struct list_head * l; + + if(discdata == NULL) + return; + if((new = kmalloc(sizeof(struct sbid_entry), GFP_ATOMIC)) == NULL) + return; + + new->bid = bid; + new->bid.tbi_format = 0; + + /* + * Search the position where to insert the new entry. It is possible + * that the entry should not be added but the block number has to be + * updated to approximate the logical block, where a segment starts. + */ + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + /* + * If the current entry has the same segment and wrap, then + * there is no new entry needed. Only the block number of the + * current entry might be adjusted to reflect an earlier start + * of the segment. + */ + if( + (cur->bid.tbi_segment == new->bid.tbi_segment) && + (cur->bid.tbi_wrap == new->bid.tbi_wrap) + ) { + if(new->bid.tbi_block < cur->bid.tbi_block) { + cur->bid.tbi_block = new->bid.tbi_block; + } + kfree(new); + break; + } + + /* + * Otherwise the list is sorted by block number because it + * is alway ascending while the segment number decreases on + * the second wrap. + */ + if(cur->bid.tbi_block > new->bid.tbi_block) { + list_add_tail(&new->list, l); + break; + } + } + + /* + * The loop went through without finding a merge or adding an entry + * add the new entry to the end of the list. + */ + if(l == &discdata->sbid_list) { + list_add_tail(&new->list, &discdata->sbid_list); + } + + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + DBF_EVENT(3, "sbid_list(%03i:%1i:%08i)\n", + cur->bid.tbi_segment, cur->bid.tbi_wrap, + cur->bid.tbi_block); + } + + return; +} + +/* + * Fill hardware positioning information into the given block id. With that + * seeks don't have to go back to the beginning of the tape and are done at + * faster speed because the vicinity of a segment can be located at faster + * speed. + * + * The caller must have set tbi_block. + */ +static void +tape_34xx_merge_sbid( + struct tape_device * device, + struct tape_34xx_block_id * bid +) { + struct tape_34xx_discdata * discdata = device->discdata; + struct sbid_entry * cur; + struct list_head * l; + + bid->tbi_wrap = 0; + bid->tbi_segment = 1; + bid->tbi_format = (*device->modeset_byte & 0x08) ? + TBI_FORMAT_3480_XF : TBI_FORMAT_3480; + + if(discdata == NULL) + goto tape_34xx_merge_sbid_exit; + if(list_empty(&discdata->sbid_list)) + goto tape_34xx_merge_sbid_exit; + + list_for_each(l, &discdata->sbid_list) { + cur = list_entry(l, struct sbid_entry, list); + + if(cur->bid.tbi_block > bid->tbi_block) + break; + } + + /* If block comes before first entries block, use seek from start. */ + if(l->prev == &discdata->sbid_list) + goto tape_34xx_merge_sbid_exit; + + cur = list_entry(l->prev, struct sbid_entry, list); + bid->tbi_wrap = cur->bid.tbi_wrap; + bid->tbi_segment = cur->bid.tbi_segment; + +tape_34xx_merge_sbid_exit: + DBF_EVENT(6, "merged_bid = %08x\n", *((unsigned int *) bid)); + return; +} + +static void +tape_34xx_clear_sbid_list(struct tape_device *device) +{ + struct list_head * l; + struct list_head * n; + struct tape_34xx_discdata * discdata = device->discdata; + + list_for_each_safe(l, n, &discdata->sbid_list) { + list_del(l); + kfree(list_entry(l, struct sbid_entry, list)); + } +} + +/* + * MTTELL: Tell block. Return the number of block relative to current file. + */ +int +tape_34xx_mttell(struct tape_device *device, int mt_count) +{ + struct tape_34xx_block_id bid; + int rc; + + rc = tape_std_read_block_id(device, (unsigned int *) &bid); + if (rc) + return rc; + + /* + * Build up a lookup table. The format id is ingored. + */ + tape_34xx_add_sbid(device, bid); + + return bid.tbi_block; +} + +/* + * MTSEEK: seek to the specified block. + */ +int +tape_34xx_mtseek(struct tape_device *device, int mt_count) +{ + struct tape_34xx_block_id bid; + + if (mt_count > 0x400000) { + DBF_EXCEPTION(6, "xsee parm\n"); + return -EINVAL; + } + + bid.tbi_block = mt_count; + + /* + * Set hardware seek information in the block id. + */ + tape_34xx_merge_sbid(device, &bid); + + return tape_std_seek_block_id(device, *((unsigned int *) &bid)); +} + +/* + * Tape block read for 34xx. + */ +#ifdef CONFIG_S390_TAPE_BLOCK +struct tape_request * +tape_34xx_bread(struct tape_device *device, struct request *req) +{ + struct tape_request *request; + struct buffer_head *bh; + ccw1_t *ccw; + int count; + int size; + + DBF_EVENT(6, "tape_34xx_bread(sector=%u,size=%u)\n", + req->sector, req->nr_sectors); + + /* Count the number of blocks for the request. */ + count = 0; + size = 0; + for(bh = req->bh; bh; bh = bh->b_reqnext) { + for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE) + count++; + } + + /* Allocate the ccw request. */ + request = tape_alloc_request(3+count+1, 8); + if (IS_ERR(request)) + return request; + + /* + * Setup the tape block id to start the read from. The block number + * is later compared to the current position to decide whether a + * locate block is required. If one is needed this block id is used + * to locate it. + */ + ((struct tape_34xx_block_id *) request->cpdata)->tbi_block = + req->sector >> TAPEBLOCK_HSEC_S2B; + + /* Setup ccws. */ + request->op = TO_BLOCK; + ccw = request->cpaddr; + ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); + + /* + * We always setup a nop after the mode set ccw. This slot is + * used in tape_std_check_locate to insert a locate ccw if the + * current tape position doesn't match the start block to be read. + * The second nop will be filled with a read block id which is in + * turn used by tape_34xx_free_bread to populate the segment bid + * table. + */ + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + + for(bh = req->bh; bh; bh = bh->b_reqnext) { + for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE) { + ccw->flags = CCW_FLAG_CC; + ccw->cmd_code = READ_FORWARD; + ccw->count = TAPEBLOCK_HSEC_SIZE; + set_normalized_cda(ccw, (void *) __pa(bh->b_data+size)); + ccw++; + } + } + + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + + return request; +} + +void +tape_34xx_free_bread (struct tape_request *request) +{ + ccw1_t* ccw = request->cpaddr; + + if((ccw + 2)->cmd_code == READ_BLOCK_ID) { + struct { + struct tape_34xx_block_id channel_block_id; + struct tape_34xx_block_id device_block_id; + } __attribute__ ((packed)) *rbi_data; + + rbi_data = request->cpdata; + + if(!request->device) + DBF_EVENT(6, "tape_34xx_free_bread: no device!\n"); + DBF_EVENT(6, "tape_34xx_free_bread: update_sbid\n"); + tape_34xx_add_sbid( + request->device, + rbi_data->channel_block_id + ); + } else { + DBF_EVENT(3, "tape_34xx_free_bread: no block info\n"); + } + + /* Last ccw is a nop and doesn't need clear_normalized_cda */ + for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) + if (ccw->cmd_code == READ_FORWARD) + clear_normalized_cda(ccw); + tape_put_request(request); +} + +/* + * check_locate is called just before the tape request is passed to + * the common io layer for execution. It has to check the current + * tape position and insert a locate ccw if it doesn't match the + * start block for the request. + */ +void +tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) +{ + struct tape_34xx_block_id *id; + struct tape_34xx_block_id *start; + + id = (struct tape_34xx_block_id *) request->cpdata; + + /* + * The tape is already at the correct position. No seek needed. + */ + if (id->tbi_block == device->blk_data.block_position) + return; + + /* + * In case that the block device image doesn't start at the beginning + * of the tape, adjust the blocknumber for the locate request. + */ + start = (struct tape_34xx_block_id *) &device->blk_data.start_block_id; + if(start->tbi_block) + id->tbi_block = id->tbi_block + start->tbi_block; + + /* + * Merge HW positioning information to the block id. This information + * is used by the device for faster seeks. + */ + tape_34xx_merge_sbid(device, id); + + /* + * Transform the NOP to a LOCATE entry. + */ + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); + + return; +} +#endif + +static int +tape_34xx_mtweof(struct tape_device *device, int count) +{ + tape_34xx_clear_sbid_list(device); + return tape_std_mtweof(device, count); +} + +/* + * List of 3480/3490 magnetic tape commands. + */ +static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = +{ + [MTRESET] = tape_std_mtreset, + [MTFSF] = tape_std_mtfsf, + [MTBSF] = tape_std_mtbsf, + [MTFSR] = tape_std_mtfsr, + [MTBSR] = tape_std_mtbsr, + [MTWEOF] = tape_34xx_mtweof, + [MTREW] = tape_std_mtrew, + [MTOFFL] = tape_std_mtoffl, + [MTNOP] = tape_std_mtnop, + [MTRETEN] = tape_std_mtreten, + [MTBSFM] = tape_std_mtbsfm, + [MTFSFM] = tape_std_mtfsfm, + [MTEOM] = tape_std_mteom, + [MTERASE] = tape_std_mterase, + [MTRAS1] = NULL, + [MTRAS2] = NULL, + [MTRAS3] = NULL, + [MTSETBLK] = tape_std_mtsetblk, + [MTSETDENSITY] = NULL, + [MTSEEK] = tape_34xx_mtseek, + [MTTELL] = tape_34xx_mttell, + [MTSETDRVBUFFER] = NULL, + [MTFSS] = NULL, + [MTBSS] = NULL, + [MTWSM] = NULL, + [MTLOCK] = NULL, + [MTUNLOCK] = NULL, + [MTLOAD] = tape_std_mtload, + [MTUNLOAD] = tape_std_mtunload, + [MTCOMPRESSION] = tape_std_mtcompression, + [MTSETPART] = NULL, + [MTMKPART] = NULL +}; + +/* + * Tape discipline structures for 3480 and 3490. + */ +static struct tape_discipline tape_discipline_3480 = { + .owner = THIS_MODULE, + .cu_type = 0x3480, + .setup_device = tape_34xx_setup_device, + .cleanup_device = tape_34xx_cleanup_device, + .process_eov = tape_std_process_eov, + .irq = tape_34xx_irq, + .read_block = tape_std_read_block, + .write_block = tape_std_write_block, + .assign = tape_std_assign, + .unassign = tape_std_unassign, +#ifdef TAPE390_FORCE_UNASSIGN + .force_unassign = tape_std_force_unassign, +#endif +#ifdef CONFIG_S390_TAPE_BLOCK + .bread = tape_34xx_bread, + .free_bread = tape_34xx_free_bread, + .check_locate = tape_34xx_check_locate, +#endif + .ioctl_fn = tape_34xx_ioctl, + .mtop_array = tape_34xx_mtop +}; + +static struct tape_discipline tape_discipline_3490 = { + .owner = THIS_MODULE, + .cu_type = 0x3490, + .setup_device = tape_34xx_setup_device, + .cleanup_device = tape_34xx_cleanup_device, + .process_eov = tape_std_process_eov, + .irq = tape_34xx_irq, + .read_block = tape_std_read_block, + .write_block = tape_std_write_block, + .assign = tape_std_assign, + .unassign = tape_std_unassign, +#ifdef TAPE390_FORCE_UNASSIGN + .force_unassign = tape_std_force_unassign, +#endif +#ifdef CONFIG_S390_TAPE_BLOCK + .bread = tape_34xx_bread, + .free_bread = tape_34xx_free_bread, + .check_locate = tape_34xx_check_locate, +#endif + .ioctl_fn = tape_34xx_ioctl, + .mtop_array = tape_34xx_mtop +}; + +int +tape_34xx_init (void) +{ + int rc; + + DBF_EVENT(3, "34xx init: $Revision: 1.9 $\n"); + /* Register discipline. */ + rc = tape_register_discipline(&tape_discipline_3480); + if (rc == 0) { + rc = tape_register_discipline(&tape_discipline_3490); + if (rc) + tape_unregister_discipline(&tape_discipline_3480); + } + if (rc) + DBF_EVENT(3, "34xx init failed\n"); + else + DBF_EVENT(3, "34xx registered\n"); + return rc; +} + +void +tape_34xx_exit(void) +{ + tape_unregister_discipline(&tape_discipline_3480); + tape_unregister_discipline(&tape_discipline_3490); +} + +MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH"); +MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape " + "device driver ($Revision: 1.9 $)"); +MODULE_LICENSE("GPL"); + +module_init(tape_34xx_init); +module_exit(tape_34xx_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape34xx.c linux.22-ac2/drivers/s390/char/tape34xx.c --- linux.vanilla/drivers/s390/char/tape34xx.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape34xx.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,2389 +0,0 @@ -/*************************************************************************** - * - * drivers/s390/char/tape34xx.c - * common tape device discipline for 34xx tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_S390_TAPE_DYNAMIC -#include -#endif -#include -#include -#include "tape.h" -#include "tape34xx.h" - -#define PRINTK_HEADER "T34xx:" - -tape_event_handler_t tape34xx_event_handler_table[TS_SIZE][TE_SIZE] = -{ - /* {START , DONE, FAILED, ERROR, OTHER } */ - {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */ - {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */ - {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */ - {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */ - {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */ - {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */ - {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */ - {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */ - {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */ - {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBA_INIT */ - {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */ - {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */ - {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */ - {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */ - {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */ - {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */ - {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */ - {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */ - {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */ - - -int -tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) -{ - return -EINVAL; // no additional ioctls - -} - -ccw_req_t * -tape34xx_write_block (const char *data, size_t count, tape_info_t * ti) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (count, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - if (copy_from_user (mem, data, count)) { - kfree (mem); - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xwbl segf."); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = WRITE_CMD; - ccw->flags = 0; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) mem); - if ((ccw->cda) == 0) { - kfree (mem); - tape_free_request (cqr); - return NULL; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = (void *) data; - tapestate_set (ti, TS_WRI_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xwbl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -void -tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti) -{ - unsigned long lockflags; - ccw1_t *ccw; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ccw = cqr->cpaddr; - ccw++; - clear_normalized_cda (ccw); - kfree (ti->kernbuf); - tape_free_request (cqr); - ti->kernbuf = ti->userbuf = NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfwb free"); -#endif /* TAPE_DEBUG */ -} - -ccw_req_t * -tape34xx_read_block (const char *data, size_t count, tape_info_t * ti) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (count, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrbl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_FORWARD; - ccw->flags = 0; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) mem); - if ((ccw->cda) == 0) { - kfree (mem); - tape_free_request (cqr); - return NULL; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = (void *) data; - tapestate_set (ti, TS_RFO_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrbl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -ccw_req_t * -tape34xx_read_opposite (tape_info_t * ti,int novalue) -{ - ccw_req_t *cqr; - ccw1_t *ccw; - size_t count; - // first, retrieve the count from the old cqr. - cqr = ti->cqr; - ccw = cqr->cpaddr; - ccw++; - count=ccw->count; - // free old cqr. - clear_normalized_cda (ccw); - tape_free_request (cqr); - // build new cqr - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrop nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_BACKWARD; - ccw->flags = CCW_FLAG_CC; - ccw->count = count; - set_normalized_cda (ccw, (unsigned long) ti->kernbuf); - if ((ccw->cda) == 0) { - tape_free_request (cqr); - return NULL; - } - ccw++; - ccw->cmd_code = FORSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long)ccw; - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 1; - ccw->cda = (unsigned long)ccw; - tapestate_set (ti, TS_RBA_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrop ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -void -tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti) -{ - unsigned long lockflags; - size_t cpysize; - ccw1_t *ccw; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ccw = cqr->cpaddr; - ccw++; - cpysize = ccw->count - ti->devstat.rescnt; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user (ti->userbuf, ti->kernbuf, cpysize)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfrb segf."); -#endif /* TAPE_DEBUG */ - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - clear_normalized_cda (ccw); - kfree (ti->kernbuf); - tape_free_request (cqr); - ti->kernbuf = ti->userbuf = NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfrb free"); -#endif /* TAPE_DEBUG */ -} - -/* - * The IOCTL interface is implemented in the following section, - * excepted the MTRESET, MTSETBLK which are handled by tapechar.c - */ -/* - * MTFSF: Forward space over 'count' file marks. The tape is positioned - * at the EOT (End of Tape) side of the file mark. - */ -ccw_req_t * -tape34xx_mtfsf (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsf parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSF: Backward space over 'count' file marks. The tape is positioned at - * the EOT (End of Tape) side of the last skipped file mark. - */ -ccw_req_t * -tape34xx_mtbsf (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsf parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTFSR: Forward space over 'count' tape blocks (blocksize is set - * via MTSETBLK. - */ -ccw_req_t * -tape34xx_mtfsr (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsr parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsr nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSB_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsr ccwgen"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSR: Backward space over 'count' tape blocks. - * (blocksize is set via MTSETBLK. - */ -ccw_req_t * -tape34xx_mtbsr (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsr parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsr nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEBLOCK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSB_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsr ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTWEOF: Write 'count' file marks at the current position. - */ -ccw_req_t * -tape34xx_mtweof (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xweo parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xweo nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = WRITETAPEMARK; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_WTM_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xweo ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTREW: Rewind the tape. - */ -ccw_req_t * -tape34xx_mtrew (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xrew nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_REW_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xrew ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTOFFL: Rewind the tape and put the drive off-line. - * Implement 'rewind unload' - */ -ccw_req_t * -tape34xx_mtoffl (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 32); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xoff nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND_UNLOAD; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = SENSE; - ccw->flags = 0; - ccw->count = 32; - ccw->cda = (unsigned long) cqr->cpaddr; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_RUN_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xoff ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTNOP: 'No operation'. - */ -ccw_req_t * -tape34xx_mtnop (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 1, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xnop nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) ccw->cmd_code; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xnop ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTBSFM: Backward space over 'count' file marks. - * The tape is positioned at the BOT (Begin Of Tape) side of the - * last skipped file mark. - */ -ccw_req_t * -tape34xx_mtbsfm (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsm parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbsm nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = BACKSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_BSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbsm ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTFSFM: Forward space over 'count' file marks. - * The tape is positioned at the BOT (Begin Of Tape) side - * of the last skipped file mark. - */ -ccw_req_t * -tape34xx_mtfsfm (tape_info_t * ti, int count) -{ - long lockflags; - int i; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count == 0) || (count > 510)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsm parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - cqr = tape_alloc_ccw_req (ti, 2 + count, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xfsm nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - for (i = 0; i < count; i++) { - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - } - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xfsm ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTEOM: positions at the end of the portion of the tape already used - * for recordind data. MTEOM positions after the last file mark, ready for - * appending another file. - * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind. - */ -ccw_req_t * -tape34xx_mteom (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 4, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xeom nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = FORSPACEFILE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = CCW_CMD_TIC; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (cqr->cpaddr); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_FSF_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xeom ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTERASE: erases the tape. - */ -ccw_req_t * -tape34xx_mterase (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 5, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xera nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = ERASE_GAP; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = DATA_SEC_ERASE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_DSE_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xera ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSETDENSITY: set tape density. - */ -ccw_req_t * -tape34xx_mtsetdensity (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xden nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xden ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSEEK: seek to the specified block. - */ -ccw_req_t * -tape34xx_mtseek (tape_info_t * ti, int count) -{ - long lockflags; - __u8 *data; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((data = kmalloc (4 * sizeof (__u8), GFP_KERNEL)) == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - data[0] = 0x01; - data[1] = data[2] = data[3] = 0x00; - if (count >= 4194304) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee parm"); -#endif /* TAPE_DEBUG */ - kfree(data); - return NULL; - } - if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on - - data[1] = data[1] | 0x80; - data[3] += count % 256; - data[2] += (count / 256) % 256; - data[1] += (count / 65536); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xsee id:"); - debug_int_event (tape_debug_area,6,count); -#endif /* TAPE_DEBUG */ - cqr = tape_alloc_ccw_req (ti, 3, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xsee nomem"); -#endif /* TAPE_DEBUG */ - kfree (data); - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = LOCATE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 4; - set_normalized_cda (ccw, (unsigned long) data); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = data; - ti->userbuf = NULL; - tapestate_set (ti, TS_LBL_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xsee ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTTELL: Tell block. Return the number of block relative to current file. - */ -ccw_req_t * -tape34xx_mttell (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - void *mem; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xtel nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - mem = kmalloc (8, GFP_KERNEL); - if (!mem) { - tape_free_request (cqr); -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xtel nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - - ccw->cmd_code = READ_BLOCK_ID; - ccw->flags = 0; - ccw->count = 8; - set_normalized_cda (ccw, (unsigned long) mem); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = mem; - ti->userbuf = NULL; - tapestate_set (ti, TS_RBI_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xtel ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSETDRVBUFFER: Set the tape drive buffer code to number. - * Implement NOP. - */ -ccw_req_t * -tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xbuf nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xbuf ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTLOCK: Locks the tape drive door. - * Implement NOP CCW command. - */ -ccw_req_t * -tape34xx_mtlock (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xloc nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xloc ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTUNLOCK: Unlocks the tape drive door. - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtunlock (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xulk nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xulk ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTLOAD: Loads the tape. - * This function is not implemented and returns NULL, which causes the Frontend to wait for a medium being loaded. - * The 3480/3490 type Tapes do not support a load command - */ -ccw_req_t * -tape34xx_mtload (tape_info_t * ti, int count) -{ - return NULL; -} - -/* - * MTUNLOAD: Rewind the tape and unload it. - */ -ccw_req_t * -tape34xx_mtunload (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 3, 32); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xunl nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = REWIND_UNLOAD; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ccw++; - ccw->cmd_code = SENSE; - ccw->flags = 0; - ccw->count = 32; - ccw->cda = (unsigned long) cqr->cpaddr; - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_RUN_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xunl ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTCOMPRESSION: used to enable compression. - * Sets the IDRC on/off. - */ -ccw_req_t * -tape34xx_mtcompression (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - if ((count < 0) || (count > 1)) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xcom parm"); -#endif /* TAPE_DEBUG */ - return NULL; - } - if (count == 0) - ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x00; // IDRC off - - else - ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x08; // IDRC on - - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xcom nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xcom ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTSTPART: Move the tape head at the partition with the number 'count'. - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtsetpart (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xspa nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xspa ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTMKPART: .... dummy . - * Implement the NOP CCW command. - */ -ccw_req_t * -tape34xx_mtmkpart (tape_info_t * ti, int count) -{ - long lockflags; - ccw_req_t *cqr; - ccw1_t *ccw; - cqr = tape_alloc_ccw_req (ti, 2, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xnpa nomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = NULL; - ti->userbuf = NULL; - tapestate_set (ti, TS_NOP_INIT); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xnpa ccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} - -/* - * MTIOCGET: query the tape drive status. - */ -ccw_req_t * -tape34xx_mtiocget (tape_info_t * ti, int count) -{ - return NULL; -} - -/* - * MTIOCPOS: query the tape position. - */ -ccw_req_t * -tape34xx_mtiocpos (tape_info_t * ti, int count) -{ - return NULL; -} - -ccw_req_t * tape34xx_bread (struct request *req,tape_info_t* ti,int tapeblock_major) { - ccw_req_t *cqr; - ccw1_t *ccw; - __u8 *data; - int s2b = blksize_size[tapeblock_major][ti->blk_minor]/hardsect_size[tapeblock_major][ti->blk_minor]; - int realcount; - int size,bhct = 0; - struct buffer_head* bh; - for (bh = req->bh; bh; bh = bh->b_reqnext) { - if (bh->b_size > blksize_size[tapeblock_major][ti->blk_minor]) - for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor]) - bhct++; - else - bhct++; - } - if ((data = kmalloc (4 * sizeof (__u8), GFP_ATOMIC)) == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"xBREDnomem"); -#endif /* TAPE_DEBUG */ - return NULL; - } - data[0] = 0x01; - data[1] = data[2] = data[3] = 0x00; - realcount=req->sector/s2b; - if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on - - data[1] = data[1] | 0x80; - data[3] += realcount % 256; - data[2] += (realcount / 256) % 256; - data[1] += (realcount / 65536); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xBREDid:"); - debug_int_event (tape_debug_area,6,realcount); -#endif /* TAPE_DEBUG */ - cqr = tape_alloc_ccw_req (ti, 2+bhct+1, 0); - if (!cqr) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,6,"xBREDnomem"); -#endif /* TAPE_DEBUG */ - kfree(data); - return NULL; - } - ccw = cqr->cpaddr; - ccw->cmd_code = MODE_SET_DB; - ccw->flags = CCW_FLAG_CC; - ccw->count = 1; - set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte))); - if (realcount!=ti->position) { - ccw++; - ccw->cmd_code = LOCATE; - ccw->flags = CCW_FLAG_CC; - ccw->count = 4; - set_normalized_cda (ccw, (unsigned long) data); - } - ti->position=realcount+req->nr_sectors/s2b; - for (bh=req->bh;bh!=NULL;) { - ccw->flags = CCW_FLAG_CC; - if (bh->b_size >= blksize_size[tapeblock_major][ti->blk_minor]) { - for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor]) { - ccw++; - ccw->flags = CCW_FLAG_CC; - ccw->cmd_code = READ_FORWARD; - ccw->count = blksize_size[tapeblock_major][ti->blk_minor]; - set_normalized_cda (ccw, __pa (bh->b_data + size)); - } - bh = bh->b_reqnext; - } else { /* group N bhs to fit into byt_per_blk */ - for (size = 0; bh != NULL && size < blksize_size[tapeblock_major][ti->blk_minor];) { - ccw++; - ccw->flags = CCW_FLAG_DC; - ccw->cmd_code = READ_FORWARD; - ccw->count = bh->b_size; - set_normalized_cda (ccw, __pa (bh->b_data)); - size += bh->b_size; - bh = bh->b_reqnext; - } - if (size != blksize_size[tapeblock_major][ti->blk_minor]) { - PRINT_WARN ("Cannot fulfill small request %d vs. %d (%ld sects)\n", - size, - blksize_size[tapeblock_major][ti->blk_minor], - req->nr_sectors); - kfree(data); - tape_free_request (cqr); - return NULL; - } - } - } - ccw -> flags &= ~(CCW_FLAG_DC); - ccw -> flags |= (CCW_FLAG_CC); - ccw++; - ccw->cmd_code = NOP; - ccw->flags = 0; - ccw->count = 0; - ccw->cda = (unsigned long) (&(ccw->cmd_code)); - ti->kernbuf = data; - ti->userbuf = NULL; - tapestate_set (ti, TS_BLOCK_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xBREDccwg"); -#endif /* TAPE_DEBUG */ - return cqr; -} -void tape34xx_free_bread (ccw_req_t* cqr,struct _tape_info_t* ti) { - ccw1_t* ccw; - for (ccw=(ccw1_t*)cqr->cpaddr;(ccw->flags & CCW_FLAG_CC)||(ccw->flags & CCW_FLAG_DC);ccw++) - if ((ccw->cmd_code == MODE_SET_DB) || - (ccw->cmd_code == LOCATE) || - (ccw->cmd_code == READ_FORWARD)) - clear_normalized_cda(ccw); - tape_free_request(cqr); - kfree(ti->kernbuf); - ti->kernbuf=NULL; -} - -/* event handlers */ -void -tape34xx_default_handler (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xdefhandle"); -#endif /* TAPE_DEBUG */ - PRINT_ERR ("TAPE34XX: An unexpected Unit Check occurred.\n"); - PRINT_ERR ("TAPE34XX: Please read Documentation/s390/TAPE and report it!\n"); - PRINT_ERR ("TAPE34XX: Current state is: %s", - (((tapestate_get (ti) < TS_SIZE) && (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "->UNKNOWN STATE<-")); - tape_dump_sense (&ti->devstat); - ti->rc = -EIO; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - tapestate_set(ti,TS_FAILED); - wake_up (&ti->wq); - break; - case TS_BLOCK_INIT: - tapestate_set(ti,TS_FAILED); - schedule_tapeblock_exec_IO(ti); - break; - default: - tapestate_set(ti,TS_FAILED); - wake_up_interruptible (&ti->wq); - } -} - -void -tape34xx_unexpect_uchk_handler (tape_info_t * ti) -{ - if ((ti->devstat.ii.sense.data[0] == 0x40) && - (ti->devstat.ii.sense.data[1] == 0x40) && - (ti->devstat.ii.sense.data[3] == 0x43)) { - // no tape in the drive - PRINT_INFO ("Drive %d not ready. No volume loaded.\n", ti->rew_minor / 2); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh nomed"); -#endif /* TAPE_DEBUG */ - tapestate_set (ti, TS_FAILED); - ti->rc = -ENOMEDIUM; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); - } else if ((ti->devstat.ii.sense.data[0] == 0x42) && - (ti->devstat.ii.sense.data[1] == 0x44) && - (ti->devstat.ii.sense.data[3] == 0x3b)) { - PRINT_INFO ("Media in drive %d was changed!\n", - ti->rew_minor / 2); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh medchg"); -#endif - /* nothing to do. chan end & dev end will be reported when io is finished */ - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xuuh unexp"); - debug_text_event (tape_debug_area,3,"state:"); - debug_text_event (tape_debug_area,3,((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : - "TS UNKNOWN"); -#endif /* TAPE_DEBUG */ - tape34xx_default_handler (ti); - } -} - -void -tape34xx_unused_done (tape_info_t * ti) -{ - if (ti->medium_is_unloaded) { - // A medium was inserted in the drive! -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xuui med"); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("A medium was inserted into the tape.\n"); - ti->medium_is_unloaded=0; - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"unsol.irq!"); - debug_text_event (tape_debug_area,3,"dev end"); - debug_int_exception (tape_debug_area,3,ti->devinfo.irq); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("Unsolicited IRQ (Device End) caught in unused state.\n"); - tape_dump_sense (&ti->devstat); - } -} - - -void -tape34xx_idle_done (tape_info_t * ti) -{ - if (ti->medium_is_unloaded) { - // A medium was inserted in the drive! -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"xuud med"); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("A medium was inserted into the tape.\n"); - ti->medium_is_unloaded=0; - wake_up_interruptible (&ti->wq); - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"unsol.irq!"); - debug_text_event (tape_debug_area,3,"dev end"); - debug_int_exception (tape_debug_area,3,ti->devinfo.irq); -#endif /* TAPE_DEBUG */ - PRINT_WARN ("Unsolicited IRQ (Device End) caught in idle state.\n"); - tape_dump_sense (&ti->devstat); - } -} - -void -tape34xx_block_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"x:bREQdone"); -#endif /* TAPE_DEBUG */ - tapestate_set(ti,TS_DONE); - schedule_tapeblock_exec_IO(ti); -} - -void -tape34xx_bsf_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"bsf done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_dse_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"dse done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_fsf_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"fsf done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_fsb_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"fsb done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_bsb_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"bsb done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_lbl_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"lbl done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_nop_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"nop done.."); - debug_text_exception (tape_debug_area,6,"or rew/rel"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_rfo_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rfo done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_rbi_init_done (tape_info_t * ti) -{ - __u8 *data; -#ifdef TAPE_DEBUG - int i; -#endif - tapestate_set (ti, TS_FAILED); - data = ti->kernbuf; - ti->rc = data[3]; - ti->rc += 256 * data[2]; - ti->rc += 65536 * (data[1] & 0x3F); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rbi done"); - debug_text_event (tape_debug_area,6,"data:"); - for (i=0;i<8;i++) - debug_int_event (tape_debug_area,6,data[i]); -#endif - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_rew_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rew done"); -#endif - //BH: use irqsave - //s390irq_spin_lock(tape->devinfo.irq); - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_rew_release_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rewR done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(tape->devinfo.irq); - ti->wanna_wakeup=1; - wake_up (&ti->wq); -} - -void -tape34xx_run_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"rew done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_wri_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"wri done"); -#endif - //BH: use irqsave - //s390irq_spin_lock(ti->devinfo.irq); - tapestate_set (ti, TS_DONE); - ti->rc = 0; - //s390irq_spin_unlock(ti->devinfo.irq); - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -void -tape34xx_wtm_init_done (tape_info_t * ti) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"wtm done"); -#endif - tapestate_set (ti, TS_DONE); - ti->rc = 0; - ti->wanna_wakeup=1; - wake_up_interruptible (&ti->wq); -} - -/* This function analyses the tape's sense-data in case of a unit-check. If possible, - it tries to recover from the error. Else the user is informed about the problem. */ -void -tape34xx_error_recovery (tape_info_t* ti) -{ - __u8* sense=ti->devstat.ii.sense.data; - int inhibit_cu_recovery=0; - int cu_type=ti->discipline->cu_type; - if ((((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)&0x80) inhibit_cu_recovery=1; - if (tapestate_get(ti)==TS_BLOCK_INIT) { - // no recovery for block device, bottom half will retry... - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - if (sense[0]&SENSE_COMMAND_REJECT) - switch (tapestate_get(ti)) { - case TS_BLOCK_INIT: - case TS_DSE_INIT: - case TS_EGA_INIT: - case TS_WRI_INIT: - case TS_WTM_INIT: - if (sense[1]&SENSE_WRITE_PROTECT) { - // trying to write, but medium is write protected - tape34xx_error_recovery_has_failed(ti,EACCES); - return; - } - default: - tape34xx_error_recovery_HWBUG(ti,1); - return; - } - // special cases for various tape-states when reaching end of recorded area - if (((sense[0]==0x08) || (sense[0]==0x10) || (sense[0]==0x12)) && - ((sense[1]==0x40) || (sense[1]==0x0c))) - switch (tapestate_get(ti)) { - case TS_FSF_INIT: - // Trying to seek beyond end of recorded area - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case TS_LBL_INIT: - // Block could not be located. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case TS_RFO_INIT: - // Try to read beyond end of recorded area -> 0 bytes read - tape34xx_error_recovery_has_failed(ti,0); - return; - } - // Sensing special bits - if (sense[0]&SENSE_BUS_OUT_CHECK) { - tape34xx_error_recovery_do_retry(ti); - return; - } - if (sense[0]&SENSE_DATA_CHECK) { - // hardware failure, damaged tape or improper operating conditions - switch (sense[3]) { - case 0x23: - // a read data check occurred - if ((sense[2]&SENSE_TAPE_SYNC_MODE) || - (inhibit_cu_recovery)) { - // data check is not permanent, may be recovered. - // We always use async-mode with cu-recovery, so this should *never* happen. - tape34xx_error_recovery_HWBUG(ti,2); - return; - } else { - // data check is permanent, CU recovery has failed - PRINT_WARN("Permanent read error, recovery failed!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x25: - // a write data check occurred - if ((sense[2]&SENSE_TAPE_SYNC_MODE) || - (inhibit_cu_recovery)) { - // data check is not permanent, may be recovered. - // We always use async-mode with cu-recovery, so this should *never* happen. - tape34xx_error_recovery_HWBUG(ti,3); - return; - } else { - // data check is permanent, cu-recovery has failed - PRINT_WARN("Permanent write error, recovery failed!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x26: - // Data Check (read opposite) occurred. We'll recover this. - tape34xx_error_recovery_read_opposite(ti); - return; - case 0x28: - // The ID-Mark at the beginning of the tape could not be written. This is fatal, we'll report and exit. - PRINT_WARN("ID-Mark could not be written. Check your hardware!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x31: - // Tape void. Tried to read beyond end of device. We'll report and exit. - PRINT_WARN("Try to read beyond end of recorded area!\n"); - tape34xx_error_recovery_has_failed(ti,ENOSPC); - return; - case 0x41: - // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit. - PRINT_WARN("Illegal block-id sequence found!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // well, all data checks for 3480 should result in one of the above erpa-codes. if not -> bug - // On 3490, other data-check conditions do exist. - if (cu_type==0x3480) { - tape34xx_error_recovery_HWBUG(ti,4); - return; - } - } - } - if (sense[0]&SENSE_OVERRUN) { - // A data overrun between cu and drive occurred. The channel speed is to slow! We'll report this and exit! - switch (sense[3]) { - case 0x40: // overrun error - PRINT_WARN ("Data overrun error between control-unit and drive. Use a faster channel connection, if possible! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // Overrun bit is set, but erpa does not show overrun error. This is a bug. - tape34xx_error_recovery_HWBUG(ti,5); - return; - } - } - if (sense[1]&SENSE_RECORD_SEQUENCE_ERR) { - switch (sense[3]) { - case 0x41: - // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit. - PRINT_WARN("Illegal block-id sequence found!\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - default: - // Record sequence error bit is set, but erpa does not show record sequence error. This is a bug. - tape34xx_error_recovery_HWBUG(ti,6); - return; - } - } - // Sensing erpa codes - switch (sense[3]) { - case 0x00: - // Everything is fine, but we got a unit check. Report and ignore! - PRINT_WARN ("Non-error sense was found. Unit-check will be ignored, expect errors...\n"); - return; - case 0x21: - // Data streaming not operational. Cu switches to interlock mode, we reissue the command. - PRINT_WARN ("Data streaming not operational. Switching to interlock-mode! \n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x22: - // Path equipment check. Might be drive adapter error, buffer error on the lower interface, internal path not useable, or error during cartridge load. - // All of the above are not recoverable - PRINT_WARN ("A path equipment check occurred. One of the following conditions occurred:\n"); - PRINT_WARN ("drive adapter error,buffer error on the lower interface, internal path not useable, error during cartridge load.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x23: - // Read data check. Should have been be covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,7); - return; - case 0x24: - // Load display check. Load display was command was issued, but the drive is displaying a drive check message. Can be threated as "device end". - tape34xx_error_recovery_succeded(ti); - return; - case 0x25: - // Write data check. Should have been covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,8); - return; - case 0x26: - // Data check (read opposite). Should have been covered earlier -> Bug! - tape34xx_error_recovery_HWBUG(ti,9); - return; - case 0x27: - // Command reject. May indicate illegal channel program or buffer over/underrun. - // Since all channel programms are issued by this driver and ought be correct, - // we assume a over/underrun situaltion and retry the channel program. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x28: - // Write id mark check. Should have beed covered earlier -> bug! - tape34xx_error_recovery_HWBUG(ti,10); - return; - case 0x29: - // Function incompatible. Either idrc is on but hardware not capable doing idrc - // or a perform subsystem func is issued and the cu is not online. Anyway, this - // cannot be recovered and is an I/O error. - PRINT_WARN ("Function incompatible. Try to switch off idrc! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x2a: - // Unsolicited environmental data. An internal counter overflows, we can ignore - // this and reissue the cmd. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x2b: - // Environmental data present. Indicates either unload completed ok or read buffered - // log command completed ok. - if (tapestate_get(ti)==TS_RUN_INIT) { - // Rewind unload completed ok. - tape34xx_error_recovery_succeded(ti); - return; - } - // Since we do not issue read buffered log commands, this should never occur -> bug. - tape34xx_error_recovery_HWBUG(ti,11); - return; - case 0x2c: - // Permanent equipment check. cu has tried recovery, but did not succeed. This is an - // I/O error. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x2d: - // Data security erase failure. - if (tapestate_get(ti)==TS_DSE_INIT) { - // report an I/O error - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - // Data security erase failure, but no such command issued. This is a bug. - tape34xx_error_recovery_HWBUG(ti,12); - return; - case 0x2e: - // Not capable. This indicates either that the drive fails reading the format id mark - // or that that format specified is not supported by the drive. We write a message and - // return an I/O error. - PRINT_WARN("Drive not capable processing the tape format!"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - case 0x2f: - // This erpa is reserved. This is a bug. - tape34xx_error_recovery_HWBUG(ti,13); - return; - case 0x30: - // The medium is write protected, while trying to write on it. We'll report this. - PRINT_WARN("Medium is write protected!\n"); - tape34xx_error_recovery_has_failed(ti,EACCES); - return; - case 0x31: - // Tape void. Should have beed covered ealier -> bug - tape34xx_error_recovery_HWBUG(ti,14); - return; - case 0x32: - // Tension loss. We cannot recover this, it's an I/O error. - PRINT_WARN("The drive lost tape tension.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x33: - // Load Failure. The catridge was not inserted correctly or the tape is not threaded - // correctly. We cannot recover this, the user has to reload the catridge. - PRINT_WARN("Cartridge load failure. Reload the cartridge and try again.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x34: - // Unload failure. The drive cannot maintain tape tension and control tape movement - // during an unload operation. - PRINT_WARN("Failure during cartridge unload. Please try manually.\n"); - if (tapestate_get(ti)!=TS_RUN_INIT) { - tape34xx_error_recovery_HWBUG(ti,15); - return; - } - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x35: - // Drive equipment check. One of the following: - // - cu cannot recover from a drive detected error - // - a check code message is displayed on drive message/load displays - // - the cartridge loader does not respond correctly - // - a failure occurs during an index, load, or unload cycle - PRINT_WARN("Equipment check! Please check the drive and the cartridge loader.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x36: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> BUG - tape34xx_error_recovery_HWBUG(ti,16); - return; - case 0x3490: - // End of data. This is a permanent I/O error, which cannot be recovered. - // A read-type command has reached the end-of-data mark. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - } - case 0x37: - // Tape length error. The tape is shorter than reported in the beginning-of-tape data. - PRINT_WARN("Tape length error.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x38: - // Physical end of tape. A read/write operation reached the physical end of tape. - if (tapestate_get(ti)==TS_WRI_INIT || - tapestate_get(ti)==TS_DSE_INIT || - tapestate_get(ti)==TS_EGA_INIT || - tapestate_get(ti)==TS_WTM_INIT){ - tape34xx_error_recovery_has_failed(ti,ENOSPC); - } else { - tape34xx_error_recovery_has_failed(ti,EIO); - } - return; - case 0x39: - // Backward at BOT. The drive is at BOT and is requestet to move backward. - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3a: - // Drive switched not ready, but the command needs the drive to be ready. - PRINT_WARN("Drive not ready. Turn the ready/not ready switch to ready position and try again.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3b: - // Manual rewind or unload. This causes an I/O error. - PRINT_WARN("Medium was rewound or unloaded manually. Expect errors! Please do only use the mtoffl and mtrew ioctl to unload tapes or rewind tapes.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x3c: - case 0x3d: - case 0x3e: - case 0x3f: - // These erpas are reserved -> BUG - tape34xx_error_recovery_HWBUG(ti,17); - return; - case 0x40: - // Overrun error. This should have been covered earlier -> bug. - tape34xx_error_recovery_HWBUG(ti,18); - return; - case 0x41: - // Record sequence error. This should have been covered earlier -> bug. - tape34xx_error_recovery_HWBUG(ti,19); - return; - case 0x42: - // Degraded mode. A condition that can cause degraded performace is detected. - PRINT_WARN("Subsystem is running in degraded mode. This may compromise your performace.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x43: - // Drive not ready. Probably swith the ready/not ready switch to ready? - PRINT_WARN("The drive is not ready. Maybe no medium in?\n"); - tape34xx_error_recovery_has_failed(ti,ENOMEDIUM); - return; - case 0x44: - // Locate Block unsuccessfull. We'll report this. - if ((tapestate_get(ti)!=TS_BLOCK_INIT) && - (tapestate_get(ti)!=TS_LBL_INIT)) { - tape34xx_error_recovery_HWBUG(ti,20); // No locate block was issued... - return; - } - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x45: - // The drive is assigned elsewhere [to a different channel path/computer]. - PRINT_WARN("The drive is assigned elsewhere.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x46: - // Drive not online. Drive may be switched offline, the power supply may be switched off - // or the drive address may not be set correctly. - PRINT_WARN("The drive is not online."); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x47: - // Volume fenced. cu reports volume integrity is lost! - PRINT_WARN("Volume fenced. The volume integrity is lost! \n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x48: - // Log sense data and retry request. We'll do so... - tape34xx_error_recovery_do_retry(ti); - return; - case 0x49: - // Bus out check. A parity check error on the bus was found. PRINT_WARN("Bus out check. A data transfer over the bus was corrupted.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4a: - // Control unit erp failed. We'll report this. - PRINT_WARN("The control unit failed recovering an I/O error.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4b: - // Cu and drive incompatible. The drive requests micro-program patches, which are not available on the cu. - PRINT_WARN("The drive needs microprogram patches from the control unit, which are not available.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x4c: - // Recovered Check-One failure. Cu develops a hardware error, but is able to recover. We'll reissue the command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x4d: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> bug - tape34xx_error_recovery_HWBUG(ti,21); - return; - case 0x3490: - // Resetting event received. Since the driver does not support resetting event recovery - // (which has to be handled by the I/O Layer), we'll report and retry our command. - tape34xx_error_recovery_do_retry(ti); - return; - } - case 0x4e: - switch (cu_type) { - case 0x3480: - // This erpa is reserved for 3480 -> bug. - tape34xx_error_recovery_HWBUG(ti,22); - return; - case 0x3490: - // Maximum block size exeeded. This indicates, that the block to be written is larger - // than allowed for buffered mode. We'll report this... - PRINT_WARN("Maximum block size for buffered mode exceeded.\n"); - tape34xx_error_recovery_has_failed(ti,ENOBUFS); - return; - } - case 0x4f: - // These erpas are reserved -> bug - tape34xx_error_recovery_HWBUG(ti,23); - return; - case 0x50: - // Read buffered log (Overflow). Cu is running in extended beffered log mode, and a counter overflows. - // This should never happen, since we're never running in extended buffered log mode -> bug. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x51: - // Read buffered log (EOV). EOF processing occurs while the cu is in extended buffered log mode. - // This should never happen, since we're never running in extended buffered log mode -> bug. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x52: - // End of Volume complete. Rewind unload completed ok. We'll report to the user... - if (tapestate_get(ti)!=TS_RUN_INIT) { - tape34xx_error_recovery_HWBUG(ti,24); - return; - } - tape34xx_error_recovery_succeded(ti); - return; - case 0x53: - // Global command intercept. We'll have to reissue our command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x54: - // Channel interface recovery (temporary). This can be recovered by reissuing the command. - tape34xx_error_recovery_do_retry(ti); - return; - case 0x55: - // Channel interface recovery (permanent). This cannot be recovered, we'll inform the user. - PRINT_WARN("A permanent channel interface error occurred.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x56: - // Channel protocol error. This cannot be recovered. - PRINT_WARN("A channel protocol error occurred.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x57: - switch (cu_type) { - case 0x3480: - // Attention intercept. We have to reissue the command. - PRINT_WARN("An attention intercept occurred, which will be recovered.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - case 0x3490: - // Global status intercept. We have to reissue the command. - PRINT_WARN("An global status intercept was received, which will be recovered.\n"); - tape34xx_error_recovery_do_retry(ti); - return; - } - case 0x58: - case 0x59: - // These erpas are reserved -> bug. - tape34xx_error_recovery_HWBUG(ti,25); - return; - case 0x5a: - // Tape length incompatible. The tape inserted is too long, - // which could cause damage to the tape or the drive. - PRINT_WARN("Tape length incompatible [should be IBM Cartridge System Tape]. May cause damage to drive or tape.n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5b: - // Format 3480 XF incompatible - if (sense[1]&SENSE_BEGINNING_OF_TAPE) { - // Everything is fine. The tape will be overwritten in a different format. - tape34xx_error_recovery_do_retry(ti); - return; - } - PRINT_WARN("Tape format is incompatible to the drive, which writes 3480-2 XF.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5c: - // Format 3480-2 XF incompatible - PRINT_WARN("Tape format is incompatible to the drive. The drive cannot access 3480-2 XF volumes.\n"); - tape34xx_error_recovery_has_failed(ti,EIO); - return; - case 0x5d: - // Tape length violation. - PRINT_WARN("Tape length violation [should be IBM Enhanced Capacity Cartridge System Tape]. May cause damage to drive or tape.\n"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - case 0x5e: - // Compaction algorithm incompatible. - PRINT_WARN("The volume is recorded using an incompatible compaction algorith, which is not supported by the control unit.\n"); - tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE); - return; - default: - // Reserved erpas -> bug - tape34xx_error_recovery_HWBUG(ti,26); - return; - } -} - -void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp fail"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) { - tape_dump_sense(&ti->devstat); - ti->rc = -error_id; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - case TS_RFO_INIT: - case TS_RBA_INIT: - tapestate_set(ti,TS_FAILED); - wake_up (&ti->wq); - break; - case TS_BLOCK_INIT: - tapestate_set(ti,TS_FAILED); - schedule_tapeblock_exec_IO(ti); - break; - default: - tapestate_set(ti,TS_FAILED); - wake_up_interruptible (&ti->wq); - } - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void tape34xx_error_recovery_succeded(tape_info_t* ti) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp done"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_DONE)) { - tapestate_event (ti, TE_DONE); - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void tape34xx_error_recovery_do_retry(tape_info_t* ti) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"xerp retr"); - debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); -#endif - if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) { - tape_dump_sense(&ti->devstat); - while (do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr, 0x00, ti->cqr->options)); - } else { - PRINT_WARN("Recieved an unsolicited IRQ.\n"); - tape_dump_sense(&ti->devstat); - } -} - -void -tape34xx_error_recovery_read_opposite (tape_info_t* ti) { - switch (tapestate_get(ti)) { - case TS_RFO_INIT: - // We did read forward, but the data could not be read *correctly*. - // We will read backward and then skip forward again. - ti->cqr=tape34xx_read_opposite(ti,0); - if (ti->cqr==NULL) - tape34xx_error_recovery_has_failed(ti,EIO); - else - tape34xx_error_recovery_do_retry(ti); - break; - case TS_RBA_INIT: - // We tried to read forward and backward, but hat no success -> failed. - tape34xx_error_recovery_has_failed(ti,EIO); - break; - case TS_BLOCK_INIT: - tape34xx_error_recovery_do_retry(ti); - break; - default: - PRINT_WARN("read_opposite_recovery_called_with_state:%s\n", - (((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); - } -} - -void -tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno) { - devstat_t* stat=&ti->devstat; - PRINT_WARN("An unexpected condition #%d was caught in tape error recovery.\n",condno); - PRINT_WARN("Please report this incident.\n"); - PRINT_WARN("State of the tape:%s\n", - (((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >= 0)) ? - state_verbose[tapestate_get (ti)] : "UNKNOWN")); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[0], stat->ii.sense.data[1], - stat->ii.sense.data[2], stat->ii.sense.data[3], - stat->ii.sense.data[4], stat->ii.sense.data[5], - stat->ii.sense.data[6], stat->ii.sense.data[7], - stat->ii.sense.data[8], stat->ii.sense.data[9], - stat->ii.sense.data[10], stat->ii.sense.data[11], - stat->ii.sense.data[12], stat->ii.sense.data[13], - stat->ii.sense.data[14], stat->ii.sense.data[15]); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[16], stat->ii.sense.data[17], - stat->ii.sense.data[18], stat->ii.sense.data[19], - stat->ii.sense.data[20], stat->ii.sense.data[21], - stat->ii.sense.data[22], stat->ii.sense.data[23], - stat->ii.sense.data[24], stat->ii.sense.data[25], - stat->ii.sense.data[26], stat->ii.sense.data[27], - stat->ii.sense.data[28], stat->ii.sense.data[29], - stat->ii.sense.data[30], stat->ii.sense.data[31]); - tape34xx_error_recovery_has_failed(ti,EIO); -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape34xx.h linux.22-ac2/drivers/s390/char/tape34xx.h --- linux.vanilla/drivers/s390/char/tape34xx.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape34xx.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,183 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tape34xx.h - * common tape device discipline for 34xx tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** - */ - -#ifndef _TAPE34XX_H - -#define _TAPE34XX_H - -/* - * The CCW commands for the Tape type of command. - */ - -#define INVALID_00 0x00 /* Invalid cmd */ -#define BACKSPACEBLOCK 0x27 /* Back Space block */ -#define BACKSPACEFILE 0x2f /* Back Space file */ -#define DATA_SEC_ERASE 0x97 /* Data security erase */ -#define ERASE_GAP 0x17 /* Erase Gap */ -#define FORSPACEBLOCK 0x37 /* Forward space block */ -#define FORSPACEFILE 0x3F /* Forward Space file */ -#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */ -#define NOP 0x03 /* No operation */ -#define READ_FORWARD 0x02 /* Read forward */ -#define REWIND 0x07 /* Rewind */ -#define REWIND_UNLOAD 0x0F /* Rewind and Unload */ -#define SENSE 0x04 /* Sense */ -#define NEW_MODE_SET 0xEB /* Guess it is Mode set */ -#define WRITE_CMD 0x01 /* Write */ -#define WRITETAPEMARK 0x1F /* Write Tape Mark */ - -#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */ -#define CONTROL_ACCESS 0xE3 /* Set high speed */ -#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT*/ -#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */ -#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */ -#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */ -#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */ -#define MODE_SET_C3 0xC3 /* for 3420 */ -#define MODE_SET_CB 0xCB /* for 3420 */ -#define MODE_SET_D3 0xD3 /* for 3420 */ -#define READ_BACKWARD 0x0C /* */ -#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */ -#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */ -#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */ -#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT*/ -#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT*/ -#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT*/ -#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */ -#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */ -#define READ_DEV_CHAR 0x64 /* Read device characteristics */ -#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT*/ -#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */ -#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */ -#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */ -#define SYNC 0x43 /* Synchronize (flush buffer) */ -#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */ -#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */ -#define READ_CONFIG_DATA 0xFA /* 3490 CMD */ -#define READ_MESSAGE_ID 0x4E /* 3490 CMD */ -#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */ -#define SET_INTERFACE_ID 0x73 /* 3490 CMD */ - -#ifndef MIN -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#endif - - -#define BLOCKSIZE 4096 /* size of the tape rcds */ - -#define COMMAND_CHAIN CCW_FLAG_CC /* redefine from irq.h */ -#define CHANNEL_END DEV_STAT_CHN_END /* redefine from irq.h */ -#define DEVICE_END DEV_STAT_DEV_END /* redefine from irq.h */ -#define UNIT_CHECK DEV_STAT_UNIT_CHECK /* redefine from irq.h */ -#define UNIT_EXCEPTION DEV_STAT_UNIT_EXCEP /* redefine from irq.h */ -#define CONTROL_UNIT_END DEV_STAT_CU_END /* redefine from irq.h */ -#define INCORR_LEN SCHN_STAT_INCORR_LEN /* redefine from irq.h */ - -#define SENSE_COMMAND_REJECT 0x80 -#define SENSE_INTERVENTION_REQUIRED 0x40 -#define SENSE_BUS_OUT_CHECK 0x20 -#define SENSE_EQUIPMENT_CHECK 0x10 -#define SENSE_DATA_CHECK 0x08 -#define SENSE_OVERRUN 0x04 -#define SENSE_DEFERRED_UNIT_CHECK 0x02 -#define SENSE_ASSIGNED_ELSEWHERE 0x01 - -#define SENSE_LOCATE_FAILURE 0x80 -#define SENSE_DRIVE_ONLINE 0x40 -#define SENSE_RESERVED 0x20 -#define SENSE_RECORD_SEQUENCE_ERR 0x10 -#define SENSE_BEGINNING_OF_TAPE 0x08 -#define SENSE_WRITE_MODE 0x04 -#define SENSE_WRITE_PROTECT 0x02 -#define SENSE_NOT_CAPABLE 0x01 - -#define SENSE_CHANNEL_ADAPTER_CODE 0xE0 -#define SENSE_CHANNEL_ADAPTER_LOC 0x10 -#define SENSE_REPORTING_CU 0x08 -#define SENSE_AUTOMATIC_LOADER 0x04 -#define SENSE_TAPE_SYNC_MODE 0x02 -#define SENSE_TAPE_POSITIONING 0x01 - -typedef struct _tape34xx_disc_data_t { - __u8 modeset_byte; -} tape34xx_disc_data_t __attribute__ ((packed, aligned(8))); - -/* discipline functions */ -int tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -ccw_req_t * tape34xx_write_block (const char *data, size_t count, tape_info_t * ti); -void tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti); -ccw_req_t * tape34xx_read_block (const char *data, size_t count, tape_info_t * ti); -void tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti); -void tape34xx_clear_read_block (ccw_req_t * cqr, tape_info_t * ti); -ccw_req_t * tape34xx_mtfsf (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsf (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtfsr (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsr (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtweof (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtrew (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtoffl (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtnop (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtbsfm (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtfsfm (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mteom (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mterase (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetdensity (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtseek (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mttell (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtlock (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtunlock (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtload (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtunload (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtcompression (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtsetpart (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtmkpart (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtiocget (tape_info_t * ti, int count); -ccw_req_t * tape34xx_mtiocpos (tape_info_t * ti, int count); -ccw_req_t * tape34xx_bread (struct request *req, tape_info_t* ti,int tapeblock_major); -ccw_req_t * tape34xx_bwrite (struct request *req, tape_info_t* ti,int tapeblock_major); -void tape34xx_free_bread (ccw_req_t*,struct _tape_info_t*); -void tape34xx_free_bwrite (ccw_req_t*,struct _tape_info_t*); - -/* Event handlers */ -void tape34xx_default_handler (tape_info_t * ti); -void tape34xx_unexpect_uchk_handler (tape_info_t * ti); -void tape34xx_unused_done(tape_info_t* ti); -void tape34xx_idle_done(tape_info_t* ti); -void tape34xx_block_done(tape_info_t* ti); -void tape34xx_bsf_init_done(tape_info_t* ti); -void tape34xx_dse_init_done(tape_info_t* ti); -void tape34xx_fsf_init_done(tape_info_t* ti); -void tape34xx_bsb_init_done(tape_info_t* ti); -void tape34xx_fsb_init_done(tape_info_t* ti); -void tape34xx_lbl_init_done(tape_info_t* ti); -void tape34xx_nop_init_done(tape_info_t* ti); -void tape34xx_rfo_init_done(tape_info_t* ti); -void tape34xx_rbi_init_done(tape_info_t* ti); -void tape34xx_rew_init_done(tape_info_t* ti); -void tape34xx_rew_release_init_done(tape_info_t* ti); -void tape34xx_run_init_done(tape_info_t* ti); -void tape34xx_wri_init_done(tape_info_t* ti); -void tape34xx_wtm_init_done(tape_info_t* ti); - -extern void schedule_tapeblock_exec_IO (tape_info_t *ti); - -// the error recovery stuff: -void tape34xx_error_recovery (tape_info_t* ti); -void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id); -void tape34xx_error_recovery_succeded(tape_info_t* ti); -void tape34xx_error_recovery_do_retry(tape_info_t* ti); -void tape34xx_error_recovery_read_opposite (tape_info_t* ti); -void tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno); -#endif // _TAPE34XX_H diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3590.c linux.22-ac2/drivers/s390/char/tape3590.c --- linux.vanilla/drivers/s390/char/tape3590.c 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3590.c 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -// tbd diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape3590.h linux.22-ac2/drivers/s390/char/tape3590.h --- linux.vanilla/drivers/s390/char/tape3590.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape3590.h 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -// tbd diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_block.c linux.22-ac2/drivers/s390/char/tape_block.c --- linux.vanilla/drivers/s390/char/tape_block.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_block.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,676 @@ +/* + * drivers/s390/char/tape_block.c + * block device frontend for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "TBLOCK:" + +#define TAPEBLOCK_DEVFSMODE 0060644 /* brwxrw-rw- */ +#define TAPEBLOCK_MAX_SEC 100 +#define TAPEBLOCK_MIN_REQUEUE 3 + +/* + * file operation structure for tape block frontend + */ +static int tapeblock_open(struct inode *, struct file *); +static int tapeblock_release(struct inode *, struct file *); +static int tapeblock_ioctl( + struct inode *, struct file *, unsigned int, unsigned long); + +static struct block_device_operations tapeblock_bdops = { + .owner = THIS_MODULE, + .open = tapeblock_open, + .release = tapeblock_release, + .ioctl = tapeblock_ioctl, +}; + +int tapeblock_major = 0; + +/* + * Some helper inlines + */ +static inline int tapeblock_size(int minor) { + return blk_size[tapeblock_major][minor]; +} +static inline int tapeblock_ssize(int minor) { + return blksize_size[tapeblock_major][minor]; +} +static inline int tapeblock_hw_ssize(int minor) { + return hardsect_size[tapeblock_major][minor]; +} + +/* + * Post finished request. + */ +static inline void +tapeblock_end_request(struct request *req, int uptodate) +{ + if (end_that_request_first(req, uptodate, "tBLK")) + BUG(); + end_that_request_last(req); +} + +static void +__tapeblock_end_request(struct tape_request *ccw_req, void *data) +{ + struct tape_device *device; + struct request *req; + + device = ccw_req->device; + req = (struct request *) data; + if(!device || !req) + BUG(); + + tapeblock_end_request(req, ccw_req->rc == 0); + if (ccw_req->rc == 0) + /* Update position. */ + device->blk_data.block_position = + (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; + else + /* We lost the position information due to an error. */ + device->blk_data.block_position = -1; + + device->discipline->free_bread(ccw_req); + + if (!list_empty(&device->req_queue) || + !list_empty(&device->blk_data.request_queue.queue_head)) + tasklet_schedule(&device->blk_data.tasklet); +} + +/* + * Fetch requests from block device queue. + */ +static inline void +__tape_process_blk_queue(struct tape_device *device, struct list_head *new_req) +{ + request_queue_t *queue; + struct list_head *l; + struct request *req; + struct tape_request *ccw_req; + int nr_queued; + + if (!TAPE_BLOCKDEV(device)) { + PRINT_WARN("can't process queue. Not a tape blockdevice.\n"); + return; + } + + nr_queued = 0; + queue = &device->blk_data.request_queue; + + /* Count number of requests on ccw queue. */ + list_for_each(l, &device->req_queue) + nr_queued++; + + while ( + !queue->plugged && + !list_empty(&queue->queue_head) && + nr_queued < TAPEBLOCK_MIN_REQUEUE + ) { + /* tape_block_next_request(queue); */ + req = blkdev_entry_next_request(&queue->queue_head); + + if (req->cmd == WRITE) { + DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); + blkdev_dequeue_request(req); + tapeblock_end_request(req, 0); + continue; + } + ccw_req = device->discipline->bread(device, req); + if (IS_ERR(ccw_req)) { + if (PTR_ERR(ccw_req) == -ENOMEM) + break; /* don't try again */ + DBF_EVENT(1, "TBLOCK: bread failed\n"); + blkdev_dequeue_request(req); + tapeblock_end_request(req, 0); + continue; + } + blkdev_dequeue_request(req); + ccw_req->callback = __tapeblock_end_request; + ccw_req->callback_data = (void *) req; + ccw_req->retries = TAPEBLOCK_RETRIES; + + list_add_tail(&ccw_req->list, new_req); + nr_queued++; + } +} + +/* + * Feed requests to the tape device. + */ +static inline int +tape_queue_requests(struct tape_device *device, struct list_head *new_req) +{ + struct list_head *l, *n; + struct tape_request *ccw_req; + struct request *req; + int rc, fail; + + fail = 0; + list_for_each_safe(l, n, new_req) { + ccw_req = list_entry(l, struct tape_request, list); + list_del(&ccw_req->list); + + rc = tape_do_io_async(device, ccw_req); + if (rc) { + /* + * Start/enqueueing failed. No retries in + * this case. + */ + DBF_EVENT(5, "enqueueing failed\n"); + req = (struct request *) ccw_req->callback_data; + tapeblock_end_request(req, 0); + device->discipline->free_bread(ccw_req); + fail = 1; + } + } + return fail; +} + +/* + * Tape request queue function. Called from ll_rw_blk.c + */ +static void +tapeblock_request_fn(request_queue_t *queue) +{ + struct list_head new_req; + struct tape_device *device; + + device = (struct tape_device *) queue->queuedata; + if(device == NULL) + BUG(); + + while (!list_empty(&queue->queue_head)) { + INIT_LIST_HEAD(&new_req); + spin_lock(get_irq_lock(device->devinfo.irq)); + __tape_process_blk_queue(device, &new_req); + spin_unlock(get_irq_lock(device->devinfo.irq)); + /* + * Now queue the new request to the tape. This needs to be + * done without the device lock held. + */ + if (tape_queue_requests(device, &new_req) == 0) + /* All requests queued. Thats enough for now. */ + break; + } +} + +/* + * Returns block frontend request queue for a tape device. + * FIXME: on shutdown make sure ll_rw_blk can put requests on a dead queue. + */ +static request_queue_t * +tapeblock_get_queue(kdev_t kdev) +{ + struct tape_device *device; + request_queue_t *queue; + + if (major(kdev) != tapeblock_major) + return NULL; + + device = tape_get_device(minor(kdev) >> 1); + if (IS_ERR(device)) + return NULL; + + queue = &device->blk_data.request_queue; + tape_put_device(device); + return queue; +} + +/* + * Acquire the device lock and process queues for the device. + */ +static void +tapeblock_tasklet(unsigned long data) +{ + struct list_head new_req; + struct tape_device *device; + + device = (struct tape_device *) data; + while (!list_empty(&device->blk_data.request_queue.queue_head)) { + INIT_LIST_HEAD(&new_req); + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + __tape_process_blk_queue(device, &new_req); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + /* + * Now queue the new request to the tape. This needs to be + * done without the device lock held. + */ + if (tape_queue_requests(device, &new_req) == 0) + /* All requests queued. Thats enough for now. */ + break; + } +} + +/* + * Create block directory with disc entries + */ +static int +tapeblock_mkdevfstree (struct tape_device *device) +{ +#ifdef CONFIG_DEVFS_FS + device->blk_data.devfs_block_dir = + devfs_mk_dir (device->devfs_dir, "block", device); + if (device->blk_data.devfs_block_dir == 0) + return -ENOENT; + device->blk_data.devfs_disc = + devfs_register(device->blk_data.devfs_block_dir, + "disc", DEVFS_FL_DEFAULT, + tapeblock_major, device->first_minor, + TAPEBLOCK_DEVFSMODE, &tapeblock_bdops, device); + if (device->blk_data.devfs_disc == NULL) { + devfs_unregister(device->blk_data.devfs_block_dir); + return -ENOENT; + } +#endif + return 0; +} + +/* + * Remove devfs entries + */ +static void +tapeblock_rmdevfstree (struct tape_device *device) +{ +#ifdef CONFIG_DEVFS_FS + if (device->blk_data.devfs_disc) + devfs_unregister(device->blk_data.devfs_disc); + if (device->blk_data.devfs_block_dir) + devfs_unregister(device->blk_data.devfs_block_dir); +#endif +} + +/* + * This function is called for every new tapedevice + */ +int +tapeblock_setup_device(struct tape_device * device) +{ + int rc; + + /* FIXME: We should be able to sense the sector size */ + blk_size[tapeblock_major][device->first_minor] = 0; + blksize_size[tapeblock_major][device->first_minor] = + hardsect_size[tapeblock_major][device->first_minor] = + TAPEBLOCK_HSEC_SIZE; + + /* Create devfs entries. */ + rc = tapeblock_mkdevfstree(device); + if (rc) + return rc; + + /* Setup request queue and initialize gendisk for this device. */ + device->blk_data.request_queue.queuedata = tape_clone_device(device); + + + /* As long as the tasklet is running it may access the device */ + tasklet_init(&device->blk_data.tasklet, tapeblock_tasklet, + (unsigned long) tape_clone_device(device)); + + blk_init_queue(&device->blk_data.request_queue, tapeblock_request_fn); + blk_queue_headactive(&device->blk_data.request_queue, 0); + + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_ADD); + + set_device_ro(mk_kdev(tapeblock_major, device->first_minor), 1); + return 0; +} + +void +tapeblock_cleanup_device(struct tape_device *device) +{ + /* Prevent further requests to the block request queue. */ + blk_size[tapeblock_major][device->first_minor] = 0; + + tapeblock_rmdevfstree(device); + + /* With the tasklet gone the reference is gone as well. */ + tasklet_kill(&device->blk_data.tasklet); + tape_put_device(device); + + /* Cleanup the request queue. */ + blk_cleanup_queue(&device->blk_data.request_queue); + + /* Remove reference in private data */ + device->blk_data.request_queue.queuedata = NULL; + tape_put_device(device); + + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_REMOVE); +} + +/* + * Detect number of blocks of the tape. + * FIXME: can we extent this to detect the blocks size as well ? + * FIXME: (minor) On 34xx the block id also contains a format specification + * which is unknown before the block was skipped or read at + * least once. So detection is sometimes done a second time. + */ +int tapeblock_mediumdetect(struct tape_device *device) +{ + unsigned int bid; + unsigned int nr_of_blks; + int rc; + + /* + * Identify the first records format + */ + if((rc = tape_mtop(device, MTFSR, 1)) < 0) + return rc; + if((rc = tape_mtop(device, MTBSR, 1)) < 0) + return rc; + + device->blk_data.block_position = 0; + if (tape_std_read_block_id(device, &bid)) { + rc = tape_mtop(device, MTREW, 1); + if (rc) { + device->blk_data.block_position = -1; + blk_size[tapeblock_major][device->first_minor] = 0; + return rc; + } + bid = 0; + } + + if(bid != device->blk_data.start_block_id) { + device->blk_data.start_block_id = bid; + blk_size[tapeblock_major][device->first_minor] = 0; + } + + if(blk_size[tapeblock_major][device->first_minor] > 0) + return 0; + + PRINT_INFO("Detecting media size...\n"); + blk_size[tapeblock_major][device->first_minor] = 0; + + rc = tape_mtop(device, MTFSF, 1); + if (rc) + return rc; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + nr_of_blks = rc - 1; /* don't count FM */ + + if (device->blk_data.start_block_id) { + rc = tape_std_seek_block_id( + device, + device->blk_data.start_block_id); + } else { + rc = tape_mtop(device, MTREW, 1); + } + if (rc) + return rc; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + + /* Don't include start offset */ + nr_of_blks -= rc; + + PRINT_INFO("Found %i blocks on media\n", nr_of_blks); + if (tapeblock_hw_ssize(device->first_minor) > 1024) { + nr_of_blks *= tapeblock_hw_ssize(device->first_minor) / 1024; + } else { + nr_of_blks /= 1024 / tapeblock_hw_ssize(device->first_minor); + } + PRINT_INFO("Tape block device size is %i KB\n", nr_of_blks); + blk_size[tapeblock_major][device->first_minor] = nr_of_blks; + + return 0; +} + +/* + * This function has to be called whenever a new medium has been inserted + * into the drive. + */ +void +tapeblock_medium_change(struct tape_device *device) { + device->blk_data.start_block_id = 0; + blk_size[tapeblock_major][device->first_minor] = 0; +} + +/* + * Block frontend tape device open function. + */ +int +tapeblock_open(struct inode *inode, struct file *filp) { + struct tape_device *device; + int rc; + + if (major(filp->f_dentry->d_inode->i_rdev) != tapeblock_major) + return -ENODEV; + + MOD_INC_USE_COUNT; + device = tape_get_device(minor(filp->f_dentry->d_inode->i_rdev) >> 1); + if (IS_ERR(device)) { + MOD_DEC_USE_COUNT; + return PTR_ERR(device); + } + + DBF_EVENT(6, "TBLOCK: open: %x\n", device->first_minor); + + if(device->required_tapemarks) { + DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); + PRINT_ERR("TBLOCK: Refusing to open tape with missing" + " end of file marks.\n"); + tape_put_device(device); + MOD_DEC_USE_COUNT; + return -EPERM; + } + + rc = tape_open(device); + if (rc == 0) { + rc = tape_assign(device, TAPE_STATUS_ASSIGN_A); + if (rc == 0) { + rc = tapeblock_mediumdetect(device); + if (rc == 0) { + TAPE_SET_STATE(device, TAPE_STATUS_BLOCKDEV); + tape_put_device(device); + return 0; + } + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + } + tape_release(device); + } + tape_put_device(device); + MOD_DEC_USE_COUNT; + return rc; +} + +/* + * Block frontend tape device release function. + */ +int +tapeblock_release(struct inode *inode, struct file *filp) { + struct tape_device *device; + + device = tape_get_device(minor(inode->i_rdev) >> 1); + + DBF_EVENT(4, "TBLOCK: release %i\n", device->first_minor); + + /* Remove all buffers at device close. */ + /* FIXME: can we do that a tape unload ? */ + invalidate_buffers(inode->i_rdev); + + if (device->blk_data.start_block_id) { + tape_std_seek_block_id(device, device->blk_data.start_block_id); + } else { + tape_mtop(device, MTREW, 1); + } + TAPE_CLEAR_STATE(device, TAPE_STATUS_BLOCKDEV); + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + tape_release(device); + tape_put_device(device); + MOD_DEC_USE_COUNT; + + return 0; +} + +int +tapeblock_ioctl( + struct inode *inode, + struct file *file, + unsigned int command, + unsigned long arg +) { + int rc = 0; + int minor = minor(inode->i_rdev); + + DBF_EVENT(6, "tapeblock_ioctl(%x)\n", command); + + switch(command) { + case BLKSSZGET: + if(put_user(tapeblock_ssize(minor), (int *) arg)) + rc = -EFAULT; + break; + case BLKGETSIZE: + if( + put_user( + tapeblock_size(minor), + (unsigned long *) arg + ) + ) + rc = -EFAULT; + break; +#ifdef BLKGETSIZE64 + case BLKGETSIZE64: + if(put_user(tapeblock_size(minor) << 9, (u64 *) arg)) + rc = -EFAULT; + break; +#endif + case CDROMMULTISESSION: + case CDROMREADTOCENTRY: + /* No message for these... */ + rc = -EINVAL; + break; + default: + PRINT_WARN("invalid ioctl 0x%x\n", command); + rc = -EINVAL; + } + return rc; +} + +/* + * Initialize block device frontend. + */ +int +tapeblock_init(void) +{ + int rc; + + /* Register the tape major number to the kernel */ +#ifdef CONFIG_DEVFS_FS + if (tapeblock_major == 0) + tapeblock_major = devfs_alloc_major(DEVFS_SPECIAL_BLK); +#endif + rc = register_blkdev(tapeblock_major, "tBLK", &tapeblock_bdops); + if (rc < 0) { + PRINT_ERR("can't get major %d for block device\n", + tapeblock_major); + return rc; + } + if(tapeblock_major == 0) + tapeblock_major = rc; + + /* Allocate memory for kernel block device tables */ + rc = -ENOMEM; + blk_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(blk_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(blk_size[tapeblock_major], 0, 256*sizeof(int)); + blksize_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(blksize_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(blksize_size[tapeblock_major], 0, 256*sizeof(int)); + hardsect_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(hardsect_size[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(hardsect_size[tapeblock_major], 0, 256*sizeof(int)); + max_sectors[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL); + if(max_sectors[tapeblock_major] == NULL) + goto tapeblock_init_fail; + memset(max_sectors[tapeblock_major], 0, 256*sizeof(int)); + + blk_dev[tapeblock_major].queue = tapeblock_get_queue; + + PRINT_INFO("tape gets major %d for block device\n", tapeblock_major); + DBF_EVENT(3, "TBLOCK: major = %d\n", tapeblock_major); + DBF_EVENT(3, "TBLOCK: init ok\n"); + + return 0; + +tapeblock_init_fail: + if(tapeblock_major > 0) { + if(blk_size[tapeblock_major]) { + kfree(blk_size[tapeblock_major]); + blk_size[tapeblock_major] = NULL; + } + if(blksize_size[tapeblock_major]) { + kfree(blksize_size[tapeblock_major]); + blksize_size[tapeblock_major] = NULL; + } + if(hardsect_size[tapeblock_major]) { + kfree(hardsect_size[tapeblock_major]); + hardsect_size[tapeblock_major] = NULL; + } + if(max_sectors[tapeblock_major]) { + kfree(max_sectors[tapeblock_major]); + max_sectors[tapeblock_major] = NULL; + } +#ifdef CONFIG_DEVFS_FS + devfs_unregister_blkdev(tapeblock_major, "tBLK"); +#else + unregister_blkdev(tapeblock_major, "tBLK"); +#endif + tapeblock_major = -1; + } + + DBF_EVENT(3, "TBLOCK: init failed(%d)\n", rc); + return rc; +} + +/* + * Deregister major for block device frontend + */ +void +tapeblock_exit(void) +{ + if(blk_size[tapeblock_major]) { + kfree(blk_size[tapeblock_major]); + blk_size[tapeblock_major] = NULL; + } + if(blksize_size[tapeblock_major]) { + kfree(blksize_size[tapeblock_major]); + blksize_size[tapeblock_major] = NULL; + } + if(hardsect_size[tapeblock_major]) { + kfree(hardsect_size[tapeblock_major]); + hardsect_size[tapeblock_major] = NULL; + } + if(max_sectors[tapeblock_major]) { + kfree(max_sectors[tapeblock_major]); + max_sectors[tapeblock_major] = NULL; + } + blk_dev[tapeblock_major].queue = NULL; + unregister_blkdev(tapeblock_major, "tBLK"); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tapeblock.c linux.22-ac2/drivers/s390/char/tapeblock.c --- linux.vanilla/drivers/s390/char/tapeblock.c 2001-10-25 21:58:35.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tapeblock.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,593 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapeblock.c - * block device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include -#include /* CCW allocations */ -#include -#include -#include -#ifdef MODULE -#define __NO_VERSION__ -#include -#endif -#include "tape.h" -#include "tapeblock.h" - -#define PRINTK_HEADER "TBLOCK:" - -/* - * file operation structure for tape devices - */ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -static struct block_device_operations tapeblock_fops = { -#else -static struct file_operations tapeblock_fops = { -#endif - owner : THIS_MODULE, - open : tapeblock_open, /* open */ - release : tapeblock_release, /* release */ - }; - -int tapeblock_major = 0; - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -static void tape_request_fn (request_queue_t * queue); -#else -static void tape_request_fn (void); -#endif - -static request_queue_t* tapeblock_getqueue (kdev_t kdev); - -#ifdef CONFIG_DEVFS_FS -void -tapeblock_mkdevfstree (tape_info_t* ti) { - ti->devfs_block_dir=devfs_mk_dir (ti->devfs_dir, "block", ti); - ti->devfs_disc=devfs_register(ti->devfs_block_dir, "disc",DEVFS_FL_DEFAULT, - tapeblock_major, ti->blk_minor, - TAPEBLOCK_DEFAULTMODE, &tapeblock_fops, ti); -} - -void -tapeblock_rmdevfstree (tape_info_t* ti) { - devfs_unregister(ti->devfs_disc); - devfs_unregister(ti->devfs_block_dir); -} -#endif - -void -tapeblock_setup(tape_info_t* ti) { - blk_size[tapeblock_major][ti->blk_minor]=0; // this will be detected - blksize_size[tapeblock_major][ti->blk_minor]=2048; // blocks are 2k by default. - hardsect_size[tapeblock_major][ti->blk_minor]=512; - blk_init_queue (&ti->request_queue, tape_request_fn); - blk_queue_headactive (&ti->request_queue, 0); -#ifdef CONFIG_DEVFS_FS - tapeblock_mkdevfstree(ti); -#endif -} - -int -tapeblock_init(void) { - int result; - tape_frontend_t* blkfront,*temp; - tape_info_t* ti; - - tape_init(); - /* Register the tape major number to the kernel */ -#ifdef CONFIG_DEVFS_FS - result = devfs_register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops); -#else - result = register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops); -#endif - if (result < 0) { - PRINT_WARN(KERN_ERR "tape: can't get major %d for block device\n", tapeblock_major); - panic ("cannot get major number for tape block device"); - } - if (tapeblock_major == 0) tapeblock_major = result; /* accept dynamic major number*/ - INIT_BLK_DEV(tapeblock_major,tape_request_fn,tapeblock_getqueue,NULL); - read_ahead[tapeblock_major]=TAPEBLOCK_READAHEAD; - PRINT_WARN(KERN_ERR " tape gets major %d for block device\n", result); - blk_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(blk_size[tapeblock_major],0,256*sizeof(int)); - blksize_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(blksize_size[tapeblock_major],0,256*sizeof(int)); - hardsect_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(hardsect_size[tapeblock_major],0,256*sizeof(int)); - max_sectors[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC); - memset(max_sectors[tapeblock_major],0,256*sizeof(int)); - blkfront = kmalloc(sizeof(tape_frontend_t),GFP_KERNEL); - if (blkfront==NULL) panic ("no mem for tape block device structure"); - blkfront->device_setup=tapeblock_setup; -#ifdef CONFIG_DEVFS_FS - blkfront->mkdevfstree = tapeblock_mkdevfstree; - blkfront->rmdevfstree = tapeblock_rmdevfstree; -#endif - blkfront->next=NULL; - if (first_frontend==NULL) { - first_frontend=blkfront; - } else { - temp=first_frontend; - while (temp->next!=NULL) - temp=temp->next; - temp->next=blkfront; - } - ti=first_tape_info; - while (ti!=NULL) { - tapeblock_setup(ti); - ti=ti->next; - } - return 0; -} - - -void -tapeblock_uninit(void) { - unregister_blkdev(tapeblock_major, "tBLK"); -} - -int -tapeblock_open(struct inode *inode, struct file *filp) { - tape_info_t *ti; - kdev_t dev; - int rc; - long lockflags; - - inode = filp->f_dentry->d_inode; - ti = first_tape_info; - while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:open:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) != TS_UNUSED) { - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:dbusy"); -#endif - return -EBUSY; - } - tapestate_set (ti, TS_IDLE); - ti->position=-1; - - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - rc=tapeblock_mediumdetect(ti); - if (rc) { - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return rc; // in case of errors, we don't have a size of the medium - } - dev = MKDEV (tapeblock_major, MINOR (inode->i_rdev)); /* Get the device */ - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->blk_filp = filp; - filp->private_data = ti; /* save the dev.info for later reference */ - ti->cqr=NULL; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - - return 0; -} - -int -tapeblock_release(struct inode *inode, struct file *filp) { - long lockflags; - tape_info_t *ti,*lastti; - ti = first_tape_info; - while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) { - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } - kfree(ti); - return 0; - } - if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:notidle!"); -#endif - return -ENXIO; /* error in tape_release */ - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:release:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - invalidate_buffers(inode->i_rdev); - return 0; -} - -static void -tapeblock_end_request(tape_info_t* ti) { - struct buffer_head *bh; - int uptodate; - if ((tapestate_get(ti)!=TS_FAILED) && - (tapestate_get(ti)!=TS_DONE)) - BUG(); // A request has to be completed to end it - uptodate=(tapestate_get(ti)==TS_DONE); // is the buffer up to date? -#ifdef TAPE_DEBUG - if (uptodate) { - debug_text_event (tape_debug_area,6,"b:done:"); - debug_int_event (tape_debug_area,6,(long)ti->cqr); - } else { - debug_text_event (tape_debug_area,3,"b:failed:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); - } -#endif - // now inform ll_rw_block about a request status - while ((bh = ti->current_request->bh) != NULL) { - ti->current_request->bh = bh->b_reqnext; - bh->b_reqnext = NULL; - bh->b_end_io (bh, uptodate); - } - if (!end_that_request_first (ti->current_request, uptodate, "tBLK")) { -#ifndef DEVICE_NO_RANDOM - add_blkdev_randomness (MAJOR (ti->current_request->rq_dev)); -#endif - end_that_request_last (ti->current_request); - } - ti->discipline->free_bread(ti->cqr,ti); - ti->cqr=NULL; - ti->current_request=NULL; - if (tapestate_get(ti)!=TS_NOT_OPER) tapestate_set(ti,TS_IDLE); - return; -} - -static void -tapeblock_exec_IO (tape_info_t* ti) { - int rc; - struct request* req; - if (ti->cqr) { // process done/failed request - while ((tapestate_get(ti)==TS_FAILED) && - ti->blk_retries>0) { - ti->blk_retries--; - ti->position=-1; - tapestate_set(ti,TS_BLOCK_INIT); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:retryreq:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); -#endif - rc = do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr, - 0x00, ti->cqr->options); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:doIOfail:"); - debug_int_event (tape_debug_area,3,(long)ti->cqr); -#endif - continue; // one retry lost 'cause doIO failed - } - return; - } - tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl - } - if (ti->cqr!=NULL) BUG(); // tape should be idle now, request should be freed! - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - return; - } -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - if (list_empty (&ti->request_queue.queue_head)) { -#else - if (ti->request_queue==NULL) { -#endif - // nothing more to do or device has dissapeared;) -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:Qempty"); -#endif - tapestate_set(ti,TS_IDLE); - return; - } - // queue is not empty, fetch a request and start IO! - req=ti->current_request=tape_next_request(&ti->request_queue); - if (req==NULL) { - BUG(); // Yo. The queue was not reported empy, but no request found. This is _bad_. - } - if (req->cmd!=READ) { // we only support reading - tapestate_set(ti,TS_FAILED); - tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl - tapestate_set(ti,TS_BLOCK_INIT); - schedule_tapeblock_exec_IO(ti); - return; - } - ti->cqr=ti->discipline->bread(req,ti,tapeblock_major); //build channel program from request - if (!ti->cqr) { - // ccw generation failed. we try again later. -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:cqrNULL"); -#endif - schedule_tapeblock_exec_IO(ti); - ti->current_request=NULL; - return; - } - ti->blk_retries = TAPEBLOCK_RETRIES; - rc= do_IO (ti->devinfo.irq, ti->cqr->cpaddr, - (unsigned long) ti->cqr, 0x00, ti->cqr->options); - if (rc) { - // okay. ssch failed. we try later. -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:doIOfail"); -#endif - ti->discipline->free_bread(ti->cqr,ti); - ti->cqr=NULL; - ti->current_request=NULL; - schedule_tapeblock_exec_IO(ti); - return; - } - // our request is in IO. we remove it from the queue and exit - tape_dequeue_request (&ti->request_queue,req); -} - -static void -do_tape_request (request_queue_t * queue) { - tape_info_t* ti; - long lockflags; - for (ti=first_tape_info; - ((ti!=NULL) && ((&ti->request_queue)!=queue)); - ti=ti->next); - if (ti==NULL) BUG(); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get(ti)!=TS_IDLE) { - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags); - return; - } - if (tapestate_get(ti)!=TS_IDLE) BUG(); - tapestate_set(ti,TS_BLOCK_INIT); - tapeblock_exec_IO(ti); - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags); -} - -static void -run_tapeblock_exec_IO (tape_info_t* ti) { - long flags_390irq,flags_ior; - spin_lock_irqsave (&io_request_lock, flags_ior); - s390irq_spin_lock_irqsave(ti->devinfo.irq,flags_390irq); - atomic_set(&ti->bh_scheduled,0); - tapeblock_exec_IO(ti); - s390irq_spin_unlock_irqrestore(ti->devinfo.irq,flags_390irq); - spin_unlock_irqrestore (&io_request_lock, flags_ior); -} - -void -schedule_tapeblock_exec_IO (tape_info_t *ti) -{ - /* Protect against rescheduling, when already running */ - if (atomic_compare_and_swap(0,1,&ti->bh_scheduled)) { - return; - } -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - INIT_LIST_HEAD(&ti->bh_tq.list); -#endif - ti->bh_tq.sync = 0; - ti->bh_tq.routine = (void *) (void *) run_tapeblock_exec_IO; - ti->bh_tq.data = ti; - - queue_task (&ti->bh_tq, &tq_immediate); - mark_bh (IMMEDIATE_BH); - return; -} - -/* wrappers around do_tape_request for different kernel versions */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98)) -static void tape_request_fn (void) { - tape_info_t* ti=first_tape_info; - while (ti!=NULL) { - do_tape_request(&ti->request_queue); - ti=ti->next; - } -} -#else -static void tape_request_fn (request_queue_t* queue) { - do_tape_request(queue); -} -#endif - -static request_queue_t* tapeblock_getqueue (kdev_t kdev) { - tape_info_t* ti=first_tape_info; - while ((ti!=NULL) && (MINOR(kdev)!=ti->blk_minor)) - ti=ti->next; - if (ti!=NULL) return &ti->request_queue; - return NULL; -} - -int tapeblock_mediumdetect(tape_info_t* ti) { - ccw_req_t* cqr; - int losize=1,hisize=1,rc; - long lockflags; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"b:medDet"); -#endif - PRINT_WARN("Detecting media size. This will take _long_, so get yourself a coffee...\n"); - while (1) { //is interruped by break - hisize=hisize << 1; // try twice the size tested before - cqr=ti->discipline->mtseek (ti, hisize); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (rc) return -EIO; - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (ti->kernbuf) { - kfree (ti->kernbuf); - ti->kernbuf=NULL; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - break; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - losize=hisize; - } - cqr = ti->discipline->mtrew (ti, 1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - while (losize!=hisize) { - cqr=ti->discipline->mtseek (ti, (hisize+losize)/2+1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (rc) return -EIO; - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (ti->kernbuf) { - kfree (ti->kernbuf); - ti->kernbuf=NULL; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - hisize=(hisize+losize)/2; - cqr = ti->discipline->mtrew (ti, 1); - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"b:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - continue; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - losize=(hisize+losize)/2+1; - } - blk_size[tapeblock_major][ti->blk_minor]=(losize)*(blksize_size[tapeblock_major][ti->blk_minor]/1024); - return 0; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tapeblock.h linux.22-ac2/drivers/s390/char/tapeblock.h --- linux.vanilla/drivers/s390/char/tapeblock.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tapeblock.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.h - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#ifndef TAPEBLOCK_H -#define TAPEBLOCK_H -#include -#define PARTN_BITS 0 - -#define TAPEBLOCK_READAHEAD 30 -#define TAPEBLOCK_MAJOR 0 - -#define TAPEBLOCK_DEFAULTMODE 0060644 - -int tapeblock_open(struct inode *, struct file *); -int tapeblock_release(struct inode *, struct file *); -void tapeblock_setup(tape_info_t* ti); -void schedule_tapeblock_exec_IO (tape_info_t *ti); -int tapeblock_mediumdetect(tape_info_t* ti); -#ifdef CONFIG_DEVFS_FS -void tapeblock_mkdevfstree (tape_info_t* ti); -#endif -int tapeblock_init (void); -void tapeblock_uninit (void); -#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape.c linux.22-ac2/drivers/s390/char/tape.c --- linux.vanilla/drivers/s390/char/tape.c 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,1120 +0,0 @@ - -/*********************************************************************** - * drivers/s390/char/tape.c - * tape device driver for S/390 and zSeries tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - *********************************************************************** - */ - -#include "tapedefs.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef MODULE -#include -#endif -#include -#ifdef CONFIG_S390_TAPE_DYNAMIC -#include -#endif -#include "tape.h" -#ifdef CONFIG_S390_TAPE_3490 -#include "tape3490.h" -#endif -#ifdef CONFIG_S390_TAPE_3480 -#include "tape3480.h" -#endif -#ifdef CONFIG_S390_TAPE_BLOCK -#include "tapeblock.h" -#endif -#ifdef CONFIG_S390_TAPE_CHAR -#include "tapechar.h" -#endif -#ifdef CONFIG_PROC_FS -#include -#endif -#define PRINTK_HEADER "T390:" - - -/* state handling routines */ -inline void tapestate_set (tape_info_t * ti, int newstate); -inline int tapestate_get (tape_info_t * ti); -void tapestate_event (tape_info_t * ti, int event); - -/* our globals */ -tape_info_t *first_tape_info = NULL; -tape_discipline_t *first_discipline = NULL; -tape_frontend_t *first_frontend = NULL; -devreg_t* tape_devreg[128]; -int devregct=0; - -#ifdef TAPE_DEBUG -debug_info_t *tape_debug_area = NULL; -#endif - -char* state_verbose[TS_SIZE]={ - "TS_UNUSED", "TS_IDLE", "TS_DONE", "TS_FAILED", - "TS_BLOCK_INIT", - "TS_BSB_INIT", - "TS_BSF_INIT", - "TS_DSE_INIT", - "TS_EGA_INIT", - "TS_FSB_INIT", - "TS_FSF_INIT", - "TS_LDI_INIT", - "TS_LBL_INIT", - "TS_MSE_INIT", - "TS_NOP_INIT", - "TS_RBA_INIT", - "TS_RBI_INIT", - "TS_RBU_INIT", - "TS_RBL_INIT", - "TS_RDC_INIT", - "TS_RFO_INIT", - "TS_RSD_INIT", - "TS_REW_INIT", - "TS_REW_RELEASE_INIT", - "TS_RUN_INIT", - "TS_SEN_INIT", - "TS_SID_INIT", - "TS_SNP_INIT", - "TS_SPG_INIT", - "TS_SWI_INIT", - "TS_SMR_INIT", - "TS_SYN_INIT", - "TS_TIO_INIT", - "TS_UNA_INIT", - "TS_WRI_INIT", - "TS_WTM_INIT", - "TS_NOT_OPER"}; - -char* event_verbose[TE_SIZE]= { - "TE_START", "TE_DONE", "TE_FAILED", "TE_ERROR", "TE_OTHER"}; - -/* our root devfs handle */ -#ifdef CONFIG_DEVFS_FS -devfs_handle_t tape_devfs_root_entry; - -inline void -tape_mkdevfsroots (tape_info_t* ti) -{ - char devno [5]; - sprintf (devno,"%04x",ti->devinfo.devno); - ti->devfs_dir=devfs_mk_dir (tape_devfs_root_entry, devno, ti); -} - -inline void -tape_rmdevfsroots (tape_info_t* ti) -{ - devfs_unregister (ti->devfs_dir); -} -#endif - -#ifdef CONFIG_PROC_FS -/* our proc tapedevices entry */ -static struct proc_dir_entry *tape_devices_entry; - -typedef struct { - char *data; - int len; -} tempinfo_t; - - -static int -tape_devices_open (struct inode *inode, struct file *file) -{ - int size=80; - tape_info_t* ti; - tempinfo_t* tempinfo; - char* data; - int pos=0; - tempinfo = kmalloc (sizeof(tempinfo_t),GFP_KERNEL); - if (!tempinfo) - return -ENOMEM; - for (ti=first_tape_info;ti!=NULL;ti=ti->next) - size+=80; // FIXME: Guess better! - data=vmalloc(size); - if (!data) { - kfree (tempinfo); - return -ENOMEM; - } - pos+=sprintf(data+pos,"TapeNo\tDevNo\tCuType\tCuModel\tDevType\tDevModel\tState\n"); - for (ti=first_tape_info;ti!=NULL;ti=ti->next) { - pos+=sprintf(data+pos,"%d\t%04X\t%04X\t%02X\t%04X\t%02X\t\t%s\n",ti->rew_minor/2, - ti->devinfo.devno,ti->devinfo.sid_data.cu_type, - ti->devinfo.sid_data.cu_model,ti->devinfo.sid_data.dev_type, - ti->devinfo.sid_data.dev_model,((tapestate_get(ti) >= 0) && - (tapestate_get(ti) < TS_SIZE)) ? - state_verbose[tapestate_get (ti)] : "TS UNKNOWN"); - } - tempinfo->len=pos; - tempinfo->data=data; - file->private_data= (void*) tempinfo; -#ifdef MODULE - MOD_INC_USE_COUNT; -#endif - return 0; -} - -static ssize_t -tape_devices_read (struct file *file, char *user_buf, size_t user_len, loff_t * offset) -{ - loff_t len; - tempinfo_t *p_info = (tempinfo_t *) file->private_data; - - if (*offset >= p_info->len) { - return 0; /* EOF */ - } else { - len = user_len<(p_info->len - *offset)?user_len:(p_info->len - *offset); - if (copy_to_user (user_buf, &(p_info->data[*offset]), len)) - return -EFAULT; - (*offset) += len; - return len; /* number of bytes "read" */ - } -} - -static int -tape_devices_release (struct inode *inode, struct file *file) -{ - int rc = 0; - tempinfo_t *p_info = (tempinfo_t *) file->private_data; - if (p_info) { - if (p_info->data) - vfree (p_info->data); - kfree (p_info); - } -#ifdef MODULE - MOD_DEC_USE_COUNT; -#endif - return rc; -} - -static struct file_operations tape_devices_file_ops = -{ - read:tape_devices_read, /* read */ - open:tape_devices_open, /* open */ - release:tape_devices_release, /* close */ -}; - -static struct inode_operations tape_devices_inode_ops = -{ -#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - default_file_ops:&tape_devices_file_ops /* file ops */ -#endif /* LINUX_IS_24 */ -}; -#endif /* CONFIG_PROC_FS */ - -/* SECTION: Parameters for tape */ -char *tape[256] = { NULL, }; - -#ifndef MODULE -static char tape_parm_string[1024] __initdata = { 0, }; -static void -tape_split_parm_string (char *str) -{ - char *tmp = str; - int count = 0; - while (tmp != NULL && *tmp != '\0') { - char *end; - int len; - end = strchr (tmp, ','); - if (end == NULL) { - len = strlen (tmp) + 1; - } else { - len = (long) end - (long) tmp + 1; - *end = '\0'; - end++; - } - tape[count] = kmalloc (len * sizeof (char), GFP_ATOMIC); - if (tape[count] == NULL) { - printk (KERN_WARNING PRINTK_HEADER - "can't store tape= parameter no %d\n", - count + 1); - break; - } - memset (tape[count], 0, len * sizeof (char)); - memcpy (tape[count], tmp, len * sizeof (char)); - count++; - tmp = end; - }; -} - -void __init -tape_parm_setup (char *str, int *ints) -{ - int len = strlen (tape_parm_string); - if (len != 0) { - strcat (tape_parm_string, ","); - } - strcat (tape_parm_string, str); -} - -int __init -tape_parm_call_setup (char *str) -{ - int dummy; - tape_parm_setup (str, &dummy); - return 1; -} - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,16)) -__setup("tape=", tape_parm_call_setup); -#endif /* kernel <2.2.19 */ -#endif /* not defined MODULE */ - -static inline int -tape_parm_strtoul (char *str, char **stra) -{ - char *temp = str; - int val; - if (*temp == '0') { - temp++; /* strip leading zero */ - if (*temp == 'x') - temp++; /* strip leading x */ - } - val = simple_strtoul (temp, &temp, 16); /* interpret anything as hex */ - *stra = temp; - return val; -} - -static inline devreg_t * -tape_create_devreg (int devno) -{ - devreg_t *devreg = kmalloc (sizeof (devreg_t), GFP_KERNEL); - if (devreg != NULL) { - memset (devreg, 0, sizeof (devreg_t)); - devreg->ci.devno = devno; - devreg->flag = DEVREG_TYPE_DEVNO; - devreg->oper_func = tape_oper_handler; - } - return devreg; -} - -static inline void -tape_parm_parse (char **str) -{ - char *temp; - int from, to,i,irq=0,rc,retries=0,tape_num=0; - s390_dev_info_t dinfo; - tape_info_t* ti,*tempti; - tape_discipline_t* disc; - long lockflags; - if (*str==NULL) { - /* no params present -> leave */ - return; - } - while (*str) { - temp = *str; - from = 0; - to = 0; - - /* turn off autodetect mode, if any range is present */ - from = tape_parm_strtoul (temp, &temp); - to = from; - if (*temp == '-') { - temp++; - to = tape_parm_strtoul (temp, &temp); - } - for (i=from;i<=to;i++) { - retries=0; - // register for attch/detach of a devno - tape_devreg[devregct]=tape_create_devreg(i); - if (tape_devreg[devregct]==NULL) { - PRINT_WARN ("Could not create devreg for devno %04x, dyn. attach for this devno deactivated.\n",i); - } else { - s390_device_register (tape_devreg[devregct++]); - } - // we are activating a device if it is present - for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) { - rc = get_dev_info_by_irq (irq, &dinfo); - - disc = first_discipline; - while ((dinfo.devno == i) && (disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if ((disc == NULL) || (rc == -ENODEV) || (i!=dinfo.devno)) { - continue; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"det irq: "); - debug_int_event (tape_debug_area,3,irq); - debug_text_event (tape_debug_area,3,"cu: "); - debug_int_event (tape_debug_area,3,disc->cu_type); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2); - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"ti:no mem "); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("tape: can't allocate memory for " - "tape info structure\n"); - continue; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - rc = tape_setup (ti, irq, tape_num); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"tsetup err"); - debug_int_exception (tape_debug_area,3,rc); -#endif /* TAPE_DEBUG */ - kfree (ti); - } else { - s390irq_spin_lock_irqsave (irq, lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - tempti = first_tape_info; - while (tempti->next != NULL) - tempti = tempti->next; - tempti->next = ti; - } - s390irq_spin_unlock_irqrestore (irq, lockflags); - } - } - tape_num+=2; - } - str++; - } -} - - -/* SECTION: Managing wrappers for ccwcache */ - -#define TAPE_EMERGENCY_REQUESTS 16 - -static ccw_req_t *tape_emergency_req[TAPE_EMERGENCY_REQUESTS] = -{NULL,}; -static spinlock_t tape_emergency_req_lock = SPIN_LOCK_UNLOCKED; - -static void -tape_init_emergency_req (void) -{ - int i; - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - tape_emergency_req[i] = (ccw_req_t *) get_free_page (GFP_KERNEL); - } -} - -#ifdef MODULE // We only cleanup the emergency requests on module unload. -static void -tape_cleanup_emergency_req (void) -{ - int i; - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - if (tape_emergency_req[i]) - free_page ((long) (tape_emergency_req[i])); - else - printk (KERN_WARNING PRINTK_HEADER "losing one page for 'in-use' emergency request\n"); - } -} -#endif - -ccw_req_t * -tape_alloc_request (char *magic, int cplength, int datasize) -{ - ccw_req_t *rv = NULL; - int i; - if ((rv = ccw_alloc_request (magic, cplength, datasize)) != NULL) { - return rv; - } - if (cplength * sizeof (ccw1_t) + datasize + sizeof (ccw_req_t) > PAGE_SIZE) { - return NULL; - } - spin_lock (&tape_emergency_req_lock); - for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) { - if (tape_emergency_req[i] != NULL) { - rv = tape_emergency_req[i]; - tape_emergency_req[i] = NULL; - } - } - spin_unlock (&tape_emergency_req_lock); - if (rv) { - memset (rv, 0, PAGE_SIZE); - rv->cache = (kmem_cache_t *) (tape_emergency_req + i); - strncpy ((char *) (&rv->magic), magic, 4); - ASCEBC ((char *) (&rv->magic), 4); - rv->cplength = cplength; - rv->datasize = datasize; - rv->data = (void *) ((long) rv + PAGE_SIZE - datasize); - rv->cpaddr = (ccw1_t *) ((long) rv + sizeof (ccw_req_t)); - } - return rv; -} - -void -tape_free_request (ccw_req_t * request) -{ - if (request->cache >= (kmem_cache_t *) tape_emergency_req && - request->cache <= (kmem_cache_t *) (tape_emergency_req + TAPE_EMERGENCY_REQUESTS)) { - *((ccw_req_t **) (request->cache)) = request; - } else { - clear_normalized_cda ((ccw1_t *) (request->cpaddr)); // avoid memory leak caused by modeset_byte - ccw_free_request (request); - } -} - -/* - * Allocate a ccw request and reserve it for tape driver - */ -inline - ccw_req_t * -tape_alloc_ccw_req (tape_info_t * ti, int cplength, int datasize) -{ - char tape_magic_id[] = "tape"; - ccw_req_t *cqr = NULL; - - if (!ti) - return NULL; - cqr = tape_alloc_request (tape_magic_id, cplength, datasize); - - if (!cqr) { -#ifdef TAPE_DEBUG - PRINT_WARN ("empty CQR generated\n"); -#endif - } - cqr->magic = TAPE_MAGIC; /* sets an identifier for tape driver */ - cqr->device = ti; /* save pointer to tape info */ - return cqr; -} - -/* - * Find the tape_info_t structure associated with irq - */ -static inline tape_info_t * -tapedev_find_info (int irq) -{ - tape_info_t *ti; - - ti = first_tape_info; - if (ti != NULL) - do { - if (ti->devinfo.irq == irq) - break; - } while ((ti = (tape_info_t *) ti->next) != NULL); - return ti; -} - -#define QUEUE_THRESHOLD 5 - -/* - * Tape interrupt routine, called from Ingo's I/O layer - */ -void -tape_irq (int irq, void *int_parm, struct pt_regs *regs) -{ - tape_info_t *ti = tapedev_find_info (irq); - - /* analyse devstat and fire event */ - if (ti->devstat.dstat & DEV_STAT_UNIT_CHECK) { - tapestate_event (ti, TE_ERROR); - } else if (ti->devstat.dstat & (DEV_STAT_DEV_END)) { - tapestate_event (ti, TE_DONE); - } else - tapestate_event (ti, TE_OTHER); -} - -int -tape_oper_handler ( int irq, struct _devreg *dreg) { - tape_info_t* ti=first_tape_info; - tape_info_t* newtape; - int rc,tape_num,retries=0,i; - s390_dev_info_t dinfo; - tape_discipline_t* disc; -#ifdef CONFIG_DEVFS_FS - tape_frontend_t* frontend; -#endif - long lockflags; - while ((ti!=NULL) && (ti->devinfo.irq!=irq)) - ti=ti->next; - if (ti!=NULL) { - // irq is (still) used by tape. tell ingo to try again later - PRINT_WARN ("Oper handler for irq %d called while irq still (internaly?) used.\n",irq); - return -EAGAIN; - } - // irq is not used by tape - rc = get_dev_info_by_irq (irq, &dinfo); - if (rc == -ENODEV) { - retries++; - rc = get_dev_info_by_irq (irq, &dinfo); - if (retries > 5) { - PRINT_WARN ("No device information for new dev. could be retrieved.\n"); - return -ENODEV; - } - } - disc = first_discipline; - while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if (disc == NULL) - PRINT_WARN ("No matching discipline for cu_type %x found, ignoring device %04x.\n",dinfo.sid_data.cu_type,dinfo.devno); - if (rc == -ENODEV) - PRINT_WARN ("No device information for new dev. could be retrieved.\n"); - if ((disc == NULL) || (rc == -ENODEV)) - return -ENODEV; - - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { - PRINT_INFO ( "tape: can't allocate memory for " - "tape info structure\n"); - return -ENOBUFS; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - tape_num=0; - if (*tape) { - // we have static device ranges, so fingure out the tape_num of the attached tape - for (i=0;ici.devno==dinfo.devno) { - tape_num=2*i; - break; - } - } else { - // we are running in autoprobe mode, find a free tape_num - newtape=first_tape_info; - while (newtape!=NULL) { - if (newtape->rew_minor==tape_num) { - // tape num in use. try next one - tape_num+=2; - newtape=first_tape_info; - } else { - // tape num not used by newtape. look at next tape info - newtape=newtape->next; - } - } - } - rc = tape_setup (ti, irq, tape_num); - if (rc) { - kfree (ti); - return -ENOBUFS; - } -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->mkdevfstree(ti); -#endif - s390irq_spin_lock_irqsave (irq,lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - newtape = first_tape_info; - while (newtape->next != NULL) - newtape = newtape->next; - newtape->next = ti; - } - s390irq_spin_unlock_irqrestore (irq, lockflags); - return 0; -} - - -static void -tape_noper_handler ( int irq, int status ) { - tape_info_t *ti=first_tape_info; - tape_info_t *lastti; -#ifdef CONFIG_DEVFS_FS - tape_frontend_t *frontend; -#endif - long lockflags; - s390irq_spin_lock_irqsave(irq,lockflags); - while (ti!=NULL && ti->devinfo.irq!=irq) ti=ti->next; - if (ti==NULL) return; - if (tapestate_get(ti)!=TS_UNUSED) { - // device is in use! - PRINT_WARN ("Tape #%d was detached while it was busy. Expect errors!",ti->blk_minor/2); - tapestate_set(ti,TS_NOT_OPER); - ti->rc=-ENODEV; - ti->wanna_wakeup=1; - switch (tapestate_get(ti)) { - case TS_REW_RELEASE_INIT: - tapestate_set(ti,TS_NOT_OPER); - wake_up (&ti->wq); - break; -#ifdef CONFIG_S390_TAPE_BLOCK - case TS_BLOCK_INIT: - tapestate_set(ti,TS_NOT_OPER); - schedule_tapeblock_exec_IO(ti); - break; -#endif - default: - tapestate_set(ti,TS_NOT_OPER); - wake_up_interruptible (&ti->wq); - } - } else { - // device is unused! - PRINT_WARN ("Tape #%d was detached.\n",ti->blk_minor/2); - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->rmdevfstree(ti); - tape_rmdevfsroots(ti); -#endif - kfree(ti); - } - s390irq_spin_unlock_irqrestore(irq,lockflags); - return; -} - - -void -tape_dump_sense (devstat_t * stat) -{ -#ifdef TAPE_DEBUG - int sl; -#endif -#if 0 - - PRINT_WARN ("------------I/O resulted in unit check:-----------\n"); - for (sl = 0; sl < 4; sl++) { - PRINT_WARN ("Sense:"); - for (sct = 0; sct < 8; sct++) { - PRINT_WARN (" %2d:0x%02X", 8 * sl + sct, - stat->ii.sense.data[8 * sl + sct]); - } - PRINT_WARN ("\n"); - } - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[0], stat->ii.sense.data[1], - stat->ii.sense.data[2], stat->ii.sense.data[3], - stat->ii.sense.data[4], stat->ii.sense.data[5], - stat->ii.sense.data[6], stat->ii.sense.data[7], - stat->ii.sense.data[8], stat->ii.sense.data[9], - stat->ii.sense.data[10], stat->ii.sense.data[11], - stat->ii.sense.data[12], stat->ii.sense.data[13], - stat->ii.sense.data[14], stat->ii.sense.data[15]); - PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X " - " %02X%02X%02X%02X %02X%02X%02X%02X \n", - stat->ii.sense.data[16], stat->ii.sense.data[17], - stat->ii.sense.data[18], stat->ii.sense.data[19], - stat->ii.sense.data[20], stat->ii.sense.data[21], - stat->ii.sense.data[22], stat->ii.sense.data[23], - stat->ii.sense.data[24], stat->ii.sense.data[25], - stat->ii.sense.data[26], stat->ii.sense.data[27], - stat->ii.sense.data[28], stat->ii.sense.data[29], - stat->ii.sense.data[30], stat->ii.sense.data[31]); -#endif -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"SENSE:"); - for (sl=0;sl<31;sl++) { - debug_int_event (tape_debug_area,3,stat->ii.sense.data[sl]); - } - debug_int_exception (tape_debug_area,3,stat->ii.sense.data[31]); -#endif -} - -/* - * Setup tape_info_t structure of a tape device - */ -int -tape_setup (tape_info_t * ti, int irq, int minor) -{ - long lockflags; - int rc = 0; - - if (minor>254) { - PRINT_WARN ("Device id %d on irq %d will not be accessible since this driver is restricted to 128 devices.\n",minor/2,irq); - return -EINVAL; - } - rc = get_dev_info_by_irq (irq, &(ti->devinfo)); - if (rc == -ENODEV) { /* end of device list */ - return rc; - } - ti->rew_minor = minor; - ti->nor_minor = minor + 1; - ti->blk_minor = minor; -#ifdef CONFIG_DEVFS_FS - tape_mkdevfsroots(ti); -#endif - /* Register IRQ */ -#ifdef CONFIG_S390_TAPE_DYNAMIC - rc = s390_request_irq_special (irq, tape_irq, tape_noper_handler,0, "tape", &(ti->devstat)); -#else - rc = s390_request_irq (irq, tape_irq, 0, "tape", &(ti->devstat)); -#endif - s390irq_spin_lock_irqsave (irq, lockflags); - ti->next = NULL; - if (rc) - PRINT_WARN ("Cannot register irq %d, rc=%d\n", irq, rc); - init_waitqueue_head (&ti->wq); - ti->kernbuf = ti->userbuf = ti->discdata = NULL; - tapestate_set (ti, TS_UNUSED); - ti->discdata=NULL; - ti->discipline->setup_assist (ti); - ti->wanna_wakeup=0; - s390irq_spin_unlock_irqrestore (irq, lockflags); - return rc; -} - -/* - * tape_init will register the driver for each tape. - */ -int -tape_init (void) -{ - long lockflags; - s390_dev_info_t dinfo; - tape_discipline_t *disc; - tape_info_t *ti = NULL, *tempti = NULL; - char *opt_char,*opt_block,*opt_3490,*opt_3480; - int irq = 0, rc, retries = 0, tape_num = 0; - static int initialized=0; - - if (initialized) // Only init the devices once - return 0; - initialized=1; - -#ifdef TAPE_DEBUG - tape_debug_area = debug_register ( "tape", 3, 2, 10); - debug_register_view(tape_debug_area,&debug_hex_ascii_view); - debug_text_event (tape_debug_area,3,"begin init"); -#endif /* TAPE_DEBUG */ - - /* print banner */ - PRINT_WARN ("IBM S/390 Tape Device Driver (v1.01).\n"); - PRINT_WARN ("(C) IBM Deutschland Entwicklung GmbH, 2000\n"); - opt_char=opt_block=opt_3480=opt_3490="not present"; -#ifdef CONFIG_S390_TAPE_CHAR - opt_char="built in"; -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - opt_block="built in"; -#endif -#ifdef CONFIG_S390_TAPE_3480 - opt_3480="built in"; -#endif -#ifdef CONFIG_S390_TAPE_3490 - opt_3490="built in"; -#endif - /* print feature info */ - PRINT_WARN ("character device frontend : %s\n",opt_char); - PRINT_WARN ("block device frontend : %s\n",opt_block); - PRINT_WARN ("support for 3480 compatible : %s\n",opt_3480); - PRINT_WARN ("support for 3490 compatible : %s\n",opt_3490); - -#ifndef MODULE - tape_split_parm_string(tape_parm_string); -#endif - if (*tape) - PRINT_INFO ("Using ranges supplied in parameters, disabling autoprobe mode.\n"); - else - PRINT_INFO ("No parameters supplied, enabling autoprobe mode for all supported devices.\n"); -#ifdef CONFIG_S390_TAPE_3490 - if (*tape) - first_discipline = tape3490_init (0); // no autoprobe for devices - else - first_discipline = tape3490_init (1); // do autoprobe since no parm specified - first_discipline->next = NULL; -#endif - -#ifdef CONFIG_S390_TAPE_3480 - if (first_discipline == NULL) { - if (*tape) - first_discipline = tape3480_init (0); // no autoprobe for devices - else - first_discipline = tape3480_init (1); // do autoprobe since no parm specified - first_discipline->next = NULL; - } else { - if (*tape) - first_discipline->next = tape3480_init (0); // no autoprobe for devices - else - first_discipline->next = tape3480_init (1); // do autoprobe since no parm specified - ((tape_discipline_t*) (first_discipline->next))->next=NULL; - } -#endif -#ifdef CONFIG_DEVFS_FS - tape_devfs_root_entry=devfs_mk_dir (NULL, "tape", NULL); -#endif CONFIG_DEVFS_FS - -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"dev detect"); -#endif /* TAPE_DEBUG */ - /* Allocate the tape structures */ - if (*tape!=NULL) { - // we have parameters, continue with parsing the parameters and set the devices online - tape_parm_parse (tape); - } else { - // we are running in autodetect mode, search all devices for compatibles - for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) { - rc = get_dev_info_by_irq (irq, &dinfo); - disc = first_discipline; - while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type)) - disc = (tape_discipline_t *) (disc->next); - if ((disc == NULL) || (rc == -ENODEV)) - continue; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"det irq: "); - debug_int_event (tape_debug_area,3,irq); - debug_text_event (tape_debug_area,3,"cu: "); - debug_int_event (tape_debug_area,3,disc->cu_type); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2); - /* Allocate tape structure */ - ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC); - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"ti:no mem "); -#endif /* TAPE_DEBUG */ - PRINT_INFO ("tape: can't allocate memory for " - "tape info structure\n"); - continue; - } - memset(ti,0,sizeof(tape_info_t)); - ti->discipline = disc; - disc->tape = ti; - rc = tape_setup (ti, irq, tape_num); - if (rc) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"tsetup err"); - debug_int_exception (tape_debug_area,3,rc); -#endif /* TAPE_DEBUG */ - kfree (ti); - } else { - s390irq_spin_lock_irqsave (irq, lockflags); - if (first_tape_info == NULL) { - first_tape_info = ti; - } else { - tempti = first_tape_info; - while (tempti->next != NULL) - tempti = tempti->next; - tempti->next = ti; - } - tape_num += 2; - s390irq_spin_unlock_irqrestore (irq, lockflags); - } - } - } - - /* Allocate local buffer for the ccwcache */ - tape_init_emergency_req (); -#ifdef CONFIG_PROC_FS -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - tape_devices_entry = create_proc_entry ("tapedevices", - S_IFREG | S_IRUGO | S_IWUSR, - &proc_root); - tape_devices_entry->proc_fops = &tape_devices_file_ops; - tape_devices_entry->proc_iops = &tape_devices_inode_ops; -#else - tape_devices_entry = (struct proc_dir_entry *) kmalloc - (sizeof (struct proc_dir_entry), GFP_ATOMIC); - if (tape_devices_entry) { - memset (tape_devices_entry, 0, sizeof (struct proc_dir_entry)); - tape_devices_entry->name = "tapedevices"; - tape_devices_entry->namelen = strlen ("tapedevices"); - tape_devices_entry->low_ino = 0; - tape_devices_entry->mode = (S_IFREG | S_IRUGO | S_IWUSR); - tape_devices_entry->nlink = 1; - tape_devices_entry->uid = 0; - tape_devices_entry->gid = 0; - tape_devices_entry->size = 0; - tape_devices_entry->get_info = NULL; - tape_devices_entry->ops = &tape_devices_inode_ops; - proc_register (&proc_root, tape_devices_entry); - } -#endif -#endif /* CONFIG_PROC_FS */ - - return 0; -} - -#ifdef MODULE -MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte (cotte@de.ibm.com)"); -MODULE_DESCRIPTION("Linux for S/390 channel attached tape device driver"); -MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s"); - -int -init_module (void) -{ -#ifdef CONFIG_S390_TAPE_CHAR - tapechar_init (); -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - tapeblock_init (); -#endif - return 0; -} - -void -cleanup_module (void) -{ - tape_info_t *ti ,*temp; - tape_frontend_t* frontend, *tempfe; - tape_discipline_t* disc ,*tempdi; - int i; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"cleaup mod"); -#endif /* TAPE_DEBUG */ - - if (*tape) { - // we are running with parameters. we'll now deregister from our devno's - for (i=0;inext; - //cleanup a device -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"free irq:"); - debug_int_event (tape_debug_area,6,temp->devinfo.irq); -#endif /* TAPE_DEBUG */ - free_irq (temp->devinfo.irq, &(temp->devstat)); - if (temp->discdata) kfree (temp->discdata); - if (temp->kernbuf) kfree (temp->kernbuf); - if (temp->cqr) tape_free_request(temp->cqr); -#ifdef CONFIG_DEVFS_FS - for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next) - frontend->rmdevfstree(temp); - tape_rmdevfsroots(temp); -#endif - kfree (temp); - } -#ifdef CONFIG_DEVFS_FS - devfs_unregister (tape_devfs_root_entry); -#endif CONFIG_DEVFS_FS -#ifdef CONFIG_PROC_FS -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) - remove_proc_entry ("tapedevices", &proc_root); -#else - proc_unregister (&proc_root, tape_devices_entry->low_ino); - kfree (tape_devices_entry); -#endif /* LINUX_IS_24 */ -#endif -#ifdef CONFIG_S390_TAPE_CHAR - tapechar_uninit(); -#endif -#ifdef CONFIG_S390_TAPE_BLOCK - tapeblock_uninit(); -#endif - frontend=first_frontend; - while (frontend != NULL) { - tempfe = frontend; - frontend = frontend->next; - kfree (tempfe); - } - disc=first_discipline; - while (disc != NULL) { - if (*tape) - disc->shutdown(0); - else - disc->shutdown(1); - tempdi = disc; - disc = disc->next; - kfree (tempdi); - } - /* Deallocate the local buffer for the ccwcache */ - tape_cleanup_emergency_req (); -#ifdef TAPE_DEBUG - debug_unregister (tape_debug_area); -#endif /* TAPE_DEBUG */ -} -#endif /* MODULE */ - -inline void -tapestate_set (tape_info_t * ti, int newstate) -{ - if (ti->tape_state == TS_NOT_OPER) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"ts_set err"); - debug_text_exception (tape_debug_area,3,"dev n.oper"); -#endif /* TAPE_DEBUG */ - } else { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,4,"ts. dev: "); - debug_int_event (tape_debug_area,4,ti->blk_minor); - debug_text_event (tape_debug_area,4,"old ts: "); - debug_text_event (tape_debug_area,4,(((tapestate_get (ti) < TS_SIZE) && - (tapestate_get (ti) >=0 )) ? - state_verbose[tapestate_get (ti)] : - "UNKNOWN TS")); - debug_text_event (tape_debug_area,4,"new ts: "); - debug_text_event (tape_debug_area,4,(((newstate < TS_SIZE) && - (newstate >= 0)) ? - state_verbose[newstate] : - "UNKNOWN TS")); -#endif /* TAPE_DEBUG */ - ti->tape_state = newstate; - } -} - -inline int -tapestate_get (tape_info_t * ti) -{ - return (ti->tape_state); -} - -void -tapestate_event (tape_info_t * ti, int event) -{ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"te! dev: "); - debug_int_event (tape_debug_area,6,ti->blk_minor); - debug_text_event (tape_debug_area,6,"event:"); - debug_text_event (tape_debug_area,6,((event >=0) && - (event < TE_SIZE)) ? - event_verbose[event] : "TE UNKNOWN"); - debug_text_event (tape_debug_area,6,"state:"); - debug_text_event (tape_debug_area,6,((tapestate_get(ti) >= 0) && - (tapestate_get(ti) < TS_SIZE)) ? - state_verbose[tapestate_get (ti)] : - "TS UNKNOWN"); -#endif /* TAPE_DEBUG */ - if (event == TE_ERROR) { - ti->discipline->error_recovery(ti); - } else { - if ((event >= 0) && - (event < TE_SIZE) && - (tapestate_get (ti) >= 0) && - (tapestate_get (ti) < TS_SIZE) && - ((*(ti->discipline->event_table))[tapestate_get (ti)][event] != NULL)) - ((*(ti->discipline->event_table))[tapestate_get (ti)][event]) (ti); - else { -#ifdef TAPE_DEBUG - debug_text_exception (tape_debug_area,3,"TE UNEXPEC"); -#endif /* TAPE_DEBUG */ - ti->discipline->default_handler (ti); - } - } -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 4 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -4 - * c-argdecl-indent: 4 - * c-label-offset: -4 - * c-continued-statement-offset: 4 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_char.c linux.22-ac2/drivers/s390/char/tape_char.c --- linux.vanilla/drivers/s390/char/tape_char.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_char.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,514 @@ +/* + * drivers/s390/char/tape_char.c + * character device frontend for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "TCHAR:" + +#define TAPECHAR_DEVFSMODE 0020644 /* crwxrw-rw- */ +#define TAPECHAR_MAJOR 0 /* get dynamic major */ + +int tapechar_major = TAPECHAR_MAJOR; + +/* + * Prototypes for file operation functions + */ +static ssize_t tapechar_read(struct file *, char *, size_t, loff_t *); +static ssize_t tapechar_write(struct file *, const char *, size_t, loff_t *); +static int tapechar_open(struct inode *,struct file *); +static int tapechar_release(struct inode *,struct file *); +static int tapechar_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); + +/* + * File operation structure for tape character frontend + */ +static struct file_operations tape_fops = +{ + .read = tapechar_read, + .write = tapechar_write, + .ioctl = tapechar_ioctl, + .open = tapechar_open, + .release = tapechar_release, +}; + +#ifdef CONFIG_DEVFS_FS +/* + * Create Char directory with (non)rewinding entries + */ +static int +tapechar_mkdevfstree(struct tape_device *device) +{ + device->char_data.devfs_char_dir = + devfs_mk_dir(device->devfs_dir, "char", device); + if (device->char_data.devfs_char_dir == NULL) + return -ENOENT; + device->char_data.devfs_nonrewinding = + devfs_register(device->char_data.devfs_char_dir, + "nonrewinding", DEVFS_FL_DEFAULT, + tapechar_major, device->first_minor, + TAPECHAR_DEVFSMODE, &tape_fops, device); + if (device->char_data.devfs_nonrewinding == NULL) { + devfs_unregister(device->char_data.devfs_char_dir); + return -ENOENT; + } + device->char_data.devfs_rewinding = + devfs_register(device->char_data.devfs_char_dir, + "rewinding", DEVFS_FL_DEFAULT, + tapechar_major, device->first_minor + 1, + TAPECHAR_DEVFSMODE, &tape_fops, device); + if (device->char_data.devfs_rewinding == NULL) { + devfs_unregister(device->char_data.devfs_nonrewinding); + devfs_unregister(device->char_data.devfs_char_dir); + return -ENOENT; + } + return 0; +} + +/* + * Remove devfs entries + */ +static void +tapechar_rmdevfstree (struct tape_device *device) +{ + if (device->char_data.devfs_nonrewinding) + devfs_unregister(device->char_data.devfs_nonrewinding); + if (device->char_data.devfs_rewinding) + devfs_unregister(device->char_data.devfs_rewinding); + if (device->char_data.devfs_char_dir) + devfs_unregister(device->char_data.devfs_char_dir); +} +#endif + +/* + * This function is called for every new tapedevice + */ +int +tapechar_setup_device(struct tape_device * device) +{ +#ifdef CONFIG_DEVFS_FS + int rc; + + rc = tapechar_mkdevfstree(device); + if (rc) + return rc; +#endif + + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_ADD); + return 0; + +} + +void +tapechar_cleanup_device(struct tape_device* device) +{ +#ifdef CONFIG_DEVFS_FS + tapechar_rmdevfstree(device); +#endif + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_REMOVE); +} + +static inline int +tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) +{ + struct idal_buffer *new; + + /* Idal buffer must be the same size as the requested block size! */ + if (device->char_data.idal_buf != NULL && + device->char_data.idal_buf->size == block_size) + return 0; + + /* The current idal buffer is not big enough. Allocate a new one. */ + new = idal_buffer_alloc(block_size, 0); + if (new == NULL) + return -ENOMEM; + if (device->char_data.idal_buf != NULL) + idal_buffer_free(device->char_data.idal_buf); + device->char_data.idal_buf = new; + return 0; +} + +/* + * Tape device read function + */ +ssize_t +tapechar_read (struct file *filp, char *data, size_t count, loff_t *ppos) +{ + struct tape_device *device; + struct tape_request *request; + size_t block_size; + int rc; + + DBF_EVENT(6, "TCHAR:read\n"); + device = (struct tape_device *) filp->private_data; + + /* Check position. */ + if (ppos != &filp->f_pos) { + /* + * "A request was outside the capabilities of the device." + * This check uses internal knowledge about how pread and + * read work... + */ + DBF_EVENT(6, "TCHAR:ppos wrong\n"); + return -EOVERFLOW; + } + + /* + * If the tape isn't terminated yet, do it now. And since we then + * are at the end of the tape there wouldn't be anything to read + * anyways. So we return immediatly. + */ + if(device->required_tapemarks) { + return tape_std_terminate_write(device); + } + + /* Find out block size to use */ + if (device->char_data.block_size != 0) { + if (count < device->char_data.block_size) { + DBF_EVENT(3, "TCHAR:read smaller than block " + "size was requested\n"); + return -EINVAL; + } + block_size = device->char_data.block_size; + } else { + block_size = count; + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + } + DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); + /* Let the discipline build the ccw chain. */ + request = device->discipline->read_block(device, block_size); + if (IS_ERR(request)) + return PTR_ERR(request); + /* Execute it. */ + rc = tape_do_io(device, request); + if (rc == 0) { + rc = block_size - device->devstat.rescnt; + DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); + filp->f_pos += rc; + /* Copy data from idal buffer to user space. */ + if (idal_buffer_to_user(device->char_data.idal_buf, + data, rc) != 0) + rc = -EFAULT; + } + tape_put_request(request); + return rc; +} + +/* + * Tape device write function + */ +ssize_t +tapechar_write(struct file *filp, const char *data, size_t count, loff_t *ppos) +{ + struct tape_device *device; + struct tape_request *request; + size_t block_size; + size_t written; + int nblocks; + int i, rc; + + DBF_EVENT(6, "TCHAR:write\n"); + device = (struct tape_device *) filp->private_data; + /* Check position */ + if (ppos != &filp->f_pos) { + /* "A request was outside the capabilities of the device." */ + DBF_EVENT(6, "TCHAR:ppos wrong\n"); + return -EOVERFLOW; + } + /* Find out block size and number of blocks */ + if (device->char_data.block_size != 0) { + if (count < device->char_data.block_size) { + DBF_EVENT(3, "TCHAR:write smaller than block " + "size was requested\n"); + return -EINVAL; + } + block_size = device->char_data.block_size; + nblocks = count / block_size; + } else { + block_size = count; + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + nblocks = 1; + } + DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); + DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); + /* Let the discipline build the ccw chain. */ + request = device->discipline->write_block(device, block_size); + if (IS_ERR(request)) + return PTR_ERR(request); + rc = 0; + written = 0; + for (i = 0; i < nblocks; i++) { + /* Copy data from user space to idal buffer. */ + if (idal_buffer_from_user(device->char_data.idal_buf, + data, block_size)) { + rc = -EFAULT; + break; + } + rc = tape_do_io(device, request); + if (rc) + break; + DBF_EVENT(6, "TCHAR:wbytes: %lx\n", + block_size - device->devstat.rescnt); + filp->f_pos += block_size - device->devstat.rescnt; + written += block_size - device->devstat.rescnt; + if (device->devstat.rescnt != 0) + break; + data += block_size; + } + tape_put_request(request); + + if (rc == -ENOSPC) { + /* + * Ok, the device has no more space. It has NOT written + * the block. + */ + if (device->discipline->process_eov) + device->discipline->process_eov(device); + if (written > 0) + rc = 0; + } + + /* + * After doing a write we always need two tapemarks to correctly + * terminate the tape (one to terminate the file, the second to + * flag the end of recorded data. + * Since process_eov positions the tape in front of the written + * tapemark it doesn't hurt to write two marks again. + */ + if(!rc) + device->required_tapemarks = 2; + + return rc ? rc : written; +} + +/* + * Character frontend tape device open function. + */ +int +tapechar_open (struct inode *inode, struct file *filp) +{ + struct tape_device *device; + int minor, rc; + + MOD_INC_USE_COUNT; + if (major(filp->f_dentry->d_inode->i_rdev) != tapechar_major) + return -ENODEV; + minor = minor(filp->f_dentry->d_inode->i_rdev); + device = tape_get_device(minor / TAPE_MINORS_PER_DEV); + if (IS_ERR(device)) { + MOD_DEC_USE_COUNT; + return PTR_ERR(device); + } + DBF_EVENT(6, "TCHAR:open: %x\n", minor(inode->i_rdev)); + rc = tape_open(device); + if (rc == 0) { + rc = tape_assign(device, TAPE_STATUS_ASSIGN_A); + if (rc == 0) { + filp->private_data = device; + return 0; + } + tape_release(device); + } + tape_put_device(device); + MOD_DEC_USE_COUNT; + return rc; +} + +/* + * Character frontend tape device release function. + */ + +int +tapechar_release(struct inode *inode, struct file *filp) +{ + struct tape_device *device; + + device = (struct tape_device *) filp->private_data; + DBF_EVENT(6, "TCHAR:release: %x\n", minor(inode->i_rdev)); + + /* + * If this is the rewinding tape minor then rewind. In that case we + * write all required tapemarks. Otherwise only one to terminate the + * file. + */ + if ((minor(inode->i_rdev) & 1) != 0) { + if(device->required_tapemarks) + tape_std_terminate_write(device); + tape_mtop(device, MTREW, 1); + } else { + if(device->required_tapemarks > 1) { + if(tape_mtop(device, MTWEOF, 1) == 0) + device->required_tapemarks--; + } + } + + if (device->char_data.idal_buf != NULL) { + idal_buffer_free(device->char_data.idal_buf); + device->char_data.idal_buf = NULL; + } + tape_unassign(device, TAPE_STATUS_ASSIGN_A); + tape_release(device); + filp->private_data = NULL; tape_put_device(device); + MOD_DEC_USE_COUNT; + return 0; +} + +/* + * Tape device io controls. + */ +static int +tapechar_ioctl(struct inode *inp, struct file *filp, + unsigned int no, unsigned long data) +{ + struct tape_device *device; + int rc; + + DBF_EVENT(6, "TCHAR:ioct(%x)\n", no); + + device = (struct tape_device *) filp->private_data; + + if (no == MTIOCTOP) { + struct mtop op; + + if (copy_from_user(&op, (char *) data, sizeof(op)) != 0) + return -EFAULT; + if (op.mt_count < 0) + return -EINVAL; + + /* + * Operations that change tape position should write final + * tapemarks + */ + switch(op.mt_op) { + case MTFSF: + case MTBSF: + case MTFSR: + case MTBSR: + case MTREW: + case MTOFFL: + case MTEOM: + case MTRETEN: + case MTBSFM: + case MTFSFM: + case MTSEEK: + if(device->required_tapemarks) + tape_std_terminate_write(device); + default: + ; + } + rc = tape_mtop(device, op.mt_op, op.mt_count); + + if(op.mt_op == MTWEOF && rc == 0) { + if(op.mt_count > device->required_tapemarks) + device->required_tapemarks = 0; + else + device->required_tapemarks -= op.mt_count; + } + return rc; + } + if (no == MTIOCPOS) { + /* MTIOCPOS: query the tape position. */ + struct mtpos pos; + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) + return rc; + pos.mt_blkno = rc; + if (copy_to_user((char *) data, &pos, sizeof(pos)) != 0) + return -EFAULT; + return 0; + } + if (no == MTIOCGET) { + /* MTIOCGET: query the tape drive status. */ + struct mtget get; + + memset(&get, 0, sizeof(get)); + get.mt_type = MT_ISUNKNOWN; + get.mt_resid = device->devstat.rescnt; + get.mt_dsreg = device->tape_status; + /* FIXME: mt_erreg, mt_fileno */ + get.mt_gstat = device->tape_generic_status; + + if(device->medium_state == MS_LOADED) { + rc = tape_mtop(device, MTTELL, 1); + + if(rc < 0) + return rc; + + if(rc == 0) + get.mt_gstat |= GMT_BOT(~0); + + get.mt_blkno = rc; + } + get.mt_erreg = 0; + if (copy_to_user((char *) data, &get, sizeof(get)) != 0) + return -EFAULT; + return 0; + } + /* Try the discipline ioctl function. */ + if (device->discipline->ioctl_fn == NULL) + return -EINVAL; + return device->discipline->ioctl_fn(device, no, data); +} + +/* + * Initialize character device frontend. + */ +int +tapechar_init (void) +{ + int rc; + + /* Register the tape major number to the kernel */ +#ifdef CONFIG_DEVFS_FS + if (tapechar_major == 0) + tapechar_major = devfs_alloc_major(DEVFS_SPECIAL_CHR); +#endif + rc = register_chrdev(tapechar_major, "tape", &tape_fops); + if (rc < 0) { + PRINT_ERR("can't get major %d\n", tapechar_major); + DBF_EVENT(3, "TCHAR:initfail\n"); + return rc; + } + if (tapechar_major == 0) + tapechar_major = rc; /* accept dynamic major number */ + PRINT_INFO("Tape gets major %d for char device\n", tapechar_major); + DBF_EVENT(3, "Tape gets major %d for char device\n", rc); + DBF_EVENT(3, "TCHAR:init ok\n"); + return 0; +} + +/* + * cleanup + */ +void +tapechar_exit(void) +{ + unregister_chrdev (tapechar_major, "tape"); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tapechar.c linux.22-ac2/drivers/s390/char/tapechar.c --- linux.vanilla/drivers/s390/char/tapechar.c 2001-11-09 22:05:02.000000000 +0000 +++ linux.22-ac2/drivers/s390/char/tapechar.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,759 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.c - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#include "tapedefs.h" -#include -#include -#include -#include -#include /* CCW allocations */ -#include -#include -#include -#include -#include -#ifdef MODULE -#define __NO_VERSION__ -#include -#endif -#include "tape.h" -#include "tapechar.h" - -#define PRINTK_HEADER "TCHAR:" - -/* - * file operation structure for tape devices - */ -static struct file_operations tape_fops = -{ - // owner : THIS_MODULE, - llseek:NULL, /* lseek - default */ - read:tape_read, /* read */ - write:tape_write, /* write */ - readdir:NULL, /* readdir - bad */ - poll:NULL, /* poll */ - ioctl:tape_ioctl, /* ioctl */ - mmap:NULL, /* mmap */ - open:tape_open, /* open */ - flush:NULL, /* flush */ - release:tape_release, /* release */ - fsync:NULL, /* fsync */ - fasync:NULL, /* fasync */ - lock:NULL, -}; - -int tape_major = TAPE_MAJOR; - -#ifdef CONFIG_DEVFS_FS -void -tapechar_mkdevfstree (tape_info_t* ti) { - ti->devfs_char_dir=devfs_mk_dir (ti->devfs_dir, "char", ti); - ti->devfs_nonrewinding=devfs_register(ti->devfs_char_dir, "nonrewinding", - DEVFS_FL_DEFAULT,tape_major, - ti->nor_minor, TAPECHAR_DEFAULTMODE, - &tape_fops, ti); - ti->devfs_rewinding=devfs_register(ti->devfs_char_dir, "rewinding", - DEVFS_FL_DEFAULT, tape_major, ti->rew_minor, - TAPECHAR_DEFAULTMODE, &tape_fops, ti); -} - -void -tapechar_rmdevfstree (tape_info_t* ti) { - devfs_unregister(ti->devfs_nonrewinding); - devfs_unregister(ti->devfs_rewinding); - devfs_unregister(ti->devfs_char_dir); -} -#endif - -void -tapechar_setup (tape_info_t * ti) -{ -#ifdef CONFIG_DEVFS_FS - tapechar_mkdevfstree(ti); -#endif -} - -void -tapechar_init (void) -{ - int result; - tape_frontend_t *charfront,*temp; - tape_info_t* ti; - - tape_init(); - - /* Register the tape major number to the kernel */ -#ifdef CONFIG_DEVFS_FS - result = devfs_register_chrdev (tape_major, "tape", &tape_fops); -#else - result = register_chrdev (tape_major, "tape", &tape_fops); -#endif - - if (result < 0) { - PRINT_WARN (KERN_ERR "tape: can't get major %d\n", tape_major); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:initfail"); - debug_text_event (tape_debug_area,3,"regchrfail"); -#endif /* TAPE_DEBUG */ - panic ("no major number available for tape char device"); - } - if (tape_major == 0) - tape_major = result; /* accept dynamic major number */ - PRINT_WARN (KERN_ERR " tape gets major %d for character device\n", result); - charfront = kmalloc (sizeof (tape_frontend_t), GFP_KERNEL); - if (charfront == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:initfail"); - debug_text_event (tape_debug_area,3,"no mem"); -#endif /* TAPE_DEBUG */ - panic ("no major number available for tape char device"); - } - charfront->device_setup = tapechar_setup; -#ifdef CONFIG_DEVFS_FS - charfront->mkdevfstree = tapechar_mkdevfstree; - charfront->rmdevfstree = tapechar_rmdevfstree; -#endif -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:init ok"); -#endif /* TAPE_DEBUG */ - charfront->next=NULL; - if (first_frontend==NULL) { - first_frontend=charfront; - } else { - temp=first_frontend; - while (temp->next!=NULL) - temp=temp->next; - temp->next=charfront; - } - ti=first_tape_info; - while (ti!=NULL) { - tapechar_setup(ti); - ti=ti->next; - } -} - -void -tapechar_uninit (void) -{ - unregister_chrdev (tape_major, "tape"); -} - -/* - * Tape device read function - */ -ssize_t -tape_read (struct file *filp, char *data, size_t count, loff_t * ppos) -{ - long lockflags; - tape_info_t *ti; - size_t block_size; - ccw_req_t *cqr; - int rc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:read"); -#endif /* TAPE_DEBUG */ - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nodev"); -#endif /* TAPE_DEBUG */ - return -ENODEV; - } - if (ppos != &filp->f_pos) { - /* "A request was outside the capabilities of the device." */ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ppos wrong"); -#endif /* TAPE_DEBUG */ - return -EOVERFLOW; /* errno=75 Value too large for def. data type */ - } - if (ti->block_size == 0) { - block_size = count; - } else { - block_size = ti->block_size; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nbytes:"); - debug_int_event (tape_debug_area,6,block_size); -#endif - cqr = ti->discipline->read_block (data, block_size, ti); - if (!cqr) { - return -ENOBUFS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - if (rc) { - tapestate_set(ti,TS_IDLE); - kfree (cqr); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return rc; - } - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - ti->discipline->free_read_block (cqr, ti); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:rbytes:"); - debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt); -#endif /* TAPE_DEBUG */ - filp->f_pos += block_size - ti->devstat.rescnt; - return block_size - ti->devstat.rescnt; -} - -/* - * Tape device write function - */ -ssize_t -tape_write (struct file *filp, const char *data, size_t count, loff_t * ppos) -{ - long lockflags; - tape_info_t *ti; - size_t block_size; - ccw_req_t *cqr; - int nblocks, i, rc; - size_t written = 0; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:write"); -#endif - ti = first_tape_info; - while ((ti != NULL) && (ti->nor_filp != filp) && (ti->rew_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; - if (ppos != &filp->f_pos) { - /* "A request was outside the capabilities of the device." */ -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ppos wrong"); -#endif - return -EOVERFLOW; /* errno=75 Value too large for def. data type */ - } - if ((ti->block_size != 0) && (count % ti->block_size != 0)) - return -EIO; - if (ti->block_size == 0) { - block_size = count; - nblocks = 1; - } else { - block_size = ti->block_size; - nblocks = count / (ti->block_size); - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nbytes:"); - debug_int_event (tape_debug_area,6,block_size); - debug_text_event (tape_debug_area,6,"c:nblocks:"); - debug_int_event (tape_debug_area,6,nblocks); -#endif - for (i = 0; i < nblocks; i++) { - cqr = ti->discipline->write_block (data + i * block_size, block_size, ti); - if (!cqr) { - return -ENOBUFS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - ti->discipline->free_write_block (cqr, ti); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if ((ti->rc==-ENOSPC) && (i!=0)) - return i*block_size; - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:wbytes:"); - debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt); -#endif - filp->f_pos += block_size - ti->devstat.rescnt; - written += block_size - ti->devstat.rescnt; - if (ti->devstat.rescnt > 0) - return written; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:wtotal:"); - debug_int_event (tape_debug_area,6,written); -#endif - return written; -} - -static int -tape_mtioctop (struct file *filp, short mt_op, int mt_count) -{ - tape_info_t *ti; - ccw_req_t *cqr = NULL; - int rc; - long lockflags; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:mtio"); - debug_text_event (tape_debug_area,6,"c:ioop:"); - debug_int_event (tape_debug_area,6,mt_op); - debug_text_event (tape_debug_area,6,"c:arg:"); - debug_int_event (tape_debug_area,6,mt_count); -#endif - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp)) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; - switch (mt_op) { - case MTREW: // rewind - - cqr = ti->discipline->mtrew (ti, mt_count); - break; - case MTOFFL: // put drive offline - - cqr = ti->discipline->mtoffl (ti, mt_count); - break; - case MTUNLOAD: // unload the tape - - cqr = ti->discipline->mtunload (ti, mt_count); - break; - case MTWEOF: // write tapemark - - cqr = ti->discipline->mtweof (ti, mt_count); - break; - case MTFSF: // forward space file - - cqr = ti->discipline->mtfsf (ti, mt_count); - break; - case MTBSF: // backward space file - - cqr = ti->discipline->mtbsf (ti, mt_count); - break; - case MTFSFM: // forward space file, stop at BOT side - - cqr = ti->discipline->mtfsfm (ti, mt_count); - break; - case MTBSFM: // backward space file, stop at BOT side - - cqr = ti->discipline->mtbsfm (ti, mt_count); - break; - case MTFSR: // forward space file - - cqr = ti->discipline->mtfsr (ti, mt_count); - break; - case MTBSR: // backward space file - - cqr = ti->discipline->mtbsr (ti, mt_count); - break; - case MTNOP: - cqr = ti->discipline->mtnop (ti, mt_count); - break; - case MTEOM: // postion at the end of portion - - case MTRETEN: // retension the tape - - cqr = ti->discipline->mteom (ti, mt_count); - break; - case MTERASE: - cqr = ti->discipline->mterase (ti, mt_count); - break; - case MTSETDENSITY: - cqr = ti->discipline->mtsetdensity (ti, mt_count); - break; - case MTSEEK: - cqr = ti->discipline->mtseek (ti, mt_count); - break; - case MTSETDRVBUFFER: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTLOCK: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTUNLOCK: - cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count); - break; - case MTLOAD: - cqr = ti->discipline->mtload (ti, mt_count); - if (cqr!=NULL) break; // if backend driver has an load function ->use it - // if no medium is in, wait until it gets inserted - if (ti->medium_is_unloaded) { - wait_event_interruptible (ti->wq,ti->medium_is_unloaded==0); - } - return 0; - case MTCOMPRESSION: - cqr = ti->discipline->mtcompression (ti, mt_count); - break; - case MTSETPART: - cqr = ti->discipline->mtsetpart (ti, mt_count); - break; - case MTMKPART: - cqr = ti->discipline->mtmkpart (ti, mt_count); - break; - case MTTELL: // return number of block relative to current file - - cqr = ti->discipline->mttell (ti, mt_count); - break; - case MTSETBLK: - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->block_size = mt_count; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:setblk:"); - debug_int_event (tape_debug_area,6,mt_count); -#endif - return 0; - case MTRESET: - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->kernbuf = ti->userbuf = NULL; - tapestate_set (ti, TS_IDLE); - ti->block_size = 0; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:devreset:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - return 0; - default: -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:inv.mtio"); -#endif - return -EINVAL; - } - if (cqr == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ccwg fail"); -#endif - return -ENOSPC; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - // if medium was unloaded, update the corresponding variable. - switch (mt_op) { - case MTOFFL: - case MTUNLOAD: - ti->medium_is_unloaded=1; - } - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (((mt_op == MTEOM) || (mt_op == MTRETEN)) && (tapestate_get (ti) == TS_FAILED)) - tapestate_set (ti, TS_DONE); - if (tapestate_get (ti) == TS_FAILED) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return ti->rc; - } - if (tapestate_get (ti) == TS_NOT_OPER) { - ti->blk_minor=ti->rew_minor=ti->nor_minor=-1; - ti->devinfo.irq=-1; - s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags); - return -ENODEV; - } - if (tapestate_get (ti) != TS_DONE) { - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - return -EIO; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - switch (mt_op) { - case MTRETEN: //need to rewind the tape after moving to eom - - return tape_mtioctop (filp, MTREW, 1); - case MTFSFM: //need to skip back over the filemark - - return tape_mtioctop (filp, MTBSFM, 1); - case MTBSF: //need to skip forward over the filemark - - return tape_mtioctop (filp, MTFSF, 1); - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:mtio done"); -#endif - return 0; -} - -/* - * Tape device io controls. - */ -int -tape_ioctl (struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - long lockflags; - tape_info_t *ti; - ccw_req_t *cqr; - struct mtop op; /* structure for MTIOCTOP */ - struct mtpos pos; /* structure for MTIOCPOS */ - struct mtget get; - - int rc; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ioct"); -#endif - ti = first_tape_info; - while ((ti != NULL) && - (ti->rew_minor != MINOR (inode->i_rdev)) && - (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:nodev"); -#endif - return -ENODEV; - } - // check for discipline ioctl overloading - if ((rc = ti->discipline->discipline_ioctl_overload (inode, filp, cmd, arg)) - != -EINVAL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:ioverloa"); -#endif - return rc; - } - - switch (cmd) { - case MTIOCTOP: /* tape op command */ - if (copy_from_user (&op, (char *) arg, sizeof (struct mtop))) { - return -EFAULT; - } - return (tape_mtioctop (filp, op.mt_op, op.mt_count)); - case MTIOCPOS: /* query tape position */ - cqr = ti->discipline->mttell (ti, 0); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - pos.mt_blkno = ti->rc; - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user ((char *) arg, &pos, sizeof (struct mtpos))) - return -EFAULT; - return 0; - case MTIOCGET: - get.mt_erreg = ti->rc; - cqr = ti->discipline->mttell (ti, 0); - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - ti->cqr = cqr; - ti->wanna_wakeup=0; - do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event_interruptible (ti->wq,ti->wanna_wakeup); - get.mt_blkno = ti->rc; - get.mt_fileno = 0; - get.mt_type = MT_ISUNKNOWN; - get.mt_resid = ti->devstat.rescnt; - get.mt_dsreg = ti->devstat.ii.sense.data[3]; - get.mt_gstat = 0; - if (ti->devstat.ii.sense.data[1] & 0x08) - get.mt_gstat &= GMT_BOT (1); // BOT - - if (ti->devstat.ii.sense.data[1] & 0x02) - get.mt_gstat &= GMT_WR_PROT (1); // write protected - - if (ti->devstat.ii.sense.data[1] & 0x40) - get.mt_gstat &= GMT_ONLINE (1); //drive online - - ti->cqr = NULL; - if (ti->kernbuf != NULL) { - kfree (ti->kernbuf); - ti->kernbuf = NULL; - } - tape_free_request (cqr); - if (signal_pending (current)) { - tapestate_set (ti, TS_IDLE); - return -ERESTARTSYS; - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - if (copy_to_user ((char *) arg, &get, sizeof (struct mtget))) - return -EFAULT; - return 0; - default: -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,3,"c:ioct inv"); -#endif - return -EINVAL; - } -} - -/* - * Tape device open function. - */ -int -tape_open (struct inode *inode, struct file *filp) -{ - tape_info_t *ti; - kdev_t dev; - long lockflags; - - inode = filp->f_dentry->d_inode; - ti = first_tape_info; - while ((ti != NULL) && - (ti->rew_minor != MINOR (inode->i_rdev)) && - (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if (ti == NULL) - return -ENODEV; -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:open:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (tapestate_get (ti) != TS_UNUSED) { - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:dbusy"); -#endif - return -EBUSY; - } - tapestate_set (ti, TS_IDLE); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - - dev = MKDEV (tape_major, MINOR (inode->i_rdev)); /* Get the device */ - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - if (ti->rew_minor == MINOR (inode->i_rdev)) - ti->rew_filp = filp; /* save for later reference */ - else - ti->nor_filp = filp; - filp->private_data = ti; /* save the dev.info for later reference */ - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - -#ifdef MODULE - MOD_INC_USE_COUNT; -#endif /* MODULE */ - return 0; -} - -/* - * Tape device release function. - */ -int -tape_release (struct inode *inode, struct file *filp) -{ - long lockflags; - tape_info_t *ti,*lastti; - ccw_req_t *cqr = NULL; - int rc = 0; - - ti = first_tape_info; - while ((ti != NULL) && (ti->rew_minor != MINOR (inode->i_rdev)) && (ti->nor_minor != MINOR (inode->i_rdev))) - ti = (tape_info_t *) ti->next; - if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) { - if (ti==first_tape_info) { - first_tape_info=ti->next; - } else { - lastti=first_tape_info; - while (lastti->next!=ti) lastti=lastti->next; - lastti->next=ti->next; - } - kfree(ti); - goto out; - } - if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:notidle!"); -#endif - rc = -ENXIO; /* error in tape_release */ - goto out; - } -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:release:"); - debug_int_event (tape_debug_area,6,ti->blk_minor); -#endif - if (ti->rew_minor == MINOR (inode->i_rdev)) { - cqr = ti->discipline->mtrew (ti, 1); - if (cqr != NULL) { -#ifdef TAPE_DEBUG - debug_text_event (tape_debug_area,6,"c:rewrelea"); -#endif - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_REW_RELEASE_INIT); - ti->cqr = cqr; - ti->wanna_wakeup=0; - rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); - wait_event (ti->wq,ti->wanna_wakeup); - ti->cqr = NULL; - tape_free_request (cqr); - } - } - s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags); - tapestate_set (ti, TS_UNUSED); - s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags); -out: -#ifdef MODULE - MOD_DEC_USE_COUNT; -#endif /* MODULE */ - return rc; -} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tapechar.h linux.22-ac2/drivers/s390/char/tapechar.h --- linux.vanilla/drivers/s390/char/tapechar.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tapechar.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ - -/*************************************************************************** - * - * drivers/s390/char/tapechar.h - * character device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - **************************************************************************** - */ - -#ifndef TAPECHAR_H -#define TAPECHAR_H -#include -#define TAPECHAR_DEFAULTMODE 0020644 -#define TAPE_MAJOR 0 /* get dynamic major since no major officialy defined for tape */ -/* - * Prototypes for tape_fops - */ -ssize_t tape_read(struct file *, char *, size_t, loff_t *); -ssize_t tape_write(struct file *, const char *, size_t, loff_t *); -int tape_ioctl(struct inode *,struct file *,unsigned int,unsigned long); -int tape_open (struct inode *,struct file *); -int tape_release (struct inode *,struct file *); -#ifdef CONFIG_DEVFS_FS -void tapechar_mkdevfstree (tape_info_t* ti); -#endif -void tapechar_init (void); -void tapechar_uninit (void); -#endif /* TAPECHAR_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_core.c linux.22-ac2/drivers/s390/char/tape_core.c --- linux.vanilla/drivers/s390/char/tape_core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_core.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,1345 @@ +/* + * drivers/s390/char/tape_core.c + * basic function of the tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include // for kernel parameters +#include // for requesting modules +#include // for locks +#include + +#include // for variable types +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#ifdef CONFIG_S390_TAPE_3590 +#include "tape_3590.h" +#endif + +#define PRINTK_HEADER "T390:" + +/* + * Prototypes for some static functions. + */ +static void __tape_do_irq (int, void *, struct pt_regs *); +static void __tape_remove_request(struct tape_device *, struct tape_request *); +static void tape_timeout_io (unsigned long); + +/* + * List of tape disciplines guarded by tape_discipline_lock. + */ +static struct list_head tape_disciplines = LIST_HEAD_INIT(tape_disciplines); +static spinlock_t tape_discipline_lock = SPIN_LOCK_UNLOCKED; + +/* + * Pointer to debug area. + */ +debug_info_t *tape_dbf_area = NULL; + +const char *tape_op_verbose[TO_SIZE] = +{ + [TO_BLOCK] = "BLK", + [TO_BSB] = "BSB", + [TO_BSF] = "BSF", + [TO_DSE] = "DSE", + [TO_FSB] = "FSB", + [TO_FSF] = "FSF", + [TO_LBL] = "LBL", + [TO_NOP] = "NOP", + [TO_RBA] = "RBA", + [TO_RBI] = "RBI", + [TO_RFO] = "RFO", + [TO_REW] = "REW", + [TO_RUN] = "RUN", + [TO_WRI] = "WRI", + [TO_WTM] = "WTM", + [TO_MSEN] = "MSN", + [TO_LOAD] = "LOA", + [TO_READ_CONFIG] = "RCF", + [TO_READ_ATTMSG] = "RAT", + [TO_DIS] = "DIS", + [TO_ASSIGN] = "ASS", + [TO_UNASSIGN] = "UAS", + [TO_BREAKASS] = "BRK" +}; + +/* + * Inline functions, that have to be defined. + */ + +/* + * I/O helper function. Adds the request to the request queue + * and starts it if the tape is idle. Has to be called with + * the device lock held. + */ +static inline int +__do_IO(struct tape_device *device, struct tape_request *request) +{ + int rc = 0; + + if(request->cpaddr == NULL) + BUG(); + + if(request->timeout.expires > 0) { + /* Init should be done by caller */ + DBF_EVENT(6, "(%04x): starting timed request\n", + device->devstat.devno); + + request->timeout.function = tape_timeout_io; + request->timeout.data = (unsigned long) + tape_clone_request(request); + add_timer(&request->timeout); + } + + rc = do_IO(device->devinfo.irq, request->cpaddr, + (unsigned long) request, 0x00, request->options); + + return rc; +} + +static void +__tape_process_queue(void *data) +{ + struct tape_device *device = (struct tape_device *) data; + struct list_head *l, *n; + struct tape_request *request; + int rc; + + DBF_EVENT(6, "tape_process_queue(%p)\n", device); + + /* + * We were told to be quiet. Do nothing for now. + */ + if (TAPE_NOACCESS(device)) { + return; + } + + /* + * Try to start each request on request queue until one is + * started successful. + */ + list_for_each_safe(l, n, &device->req_queue) { + request = list_entry(l, struct tape_request, list); + + /* Happens when new request arrive while still doing one. */ + if (request->status == TAPE_REQUEST_IN_IO) + break; + +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) + device->discipline->check_locate(device, request); +#endif + switch(request->op) { + case TO_MSEN: + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_BREAKASS: + break; + default: + if (TAPE_OPEN(device)) + break; + DBF_EVENT(3, + "TAPE(%04x): REQ in UNUSED state\n", + device->devstat.devno); + } + + rc = __do_IO(device, request); + if (rc == 0) { + DBF_EVENT(6, "tape: do_IO success\n"); + request->status = TAPE_REQUEST_IN_IO; + break; + } + /* Start failed. Remove request and indicate failure. */ + DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); + + /* Set final status and remove. */ + request->rc = rc; + __tape_remove_request(device, request); + } +} + +static void +tape_process_queue(void *data) +{ + unsigned long flags; + struct tape_device * device; + + device = (struct tape_device *) data; + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + atomic_set(&device->bh_scheduled, 0); + __tape_process_queue(device); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); +} + +void +tape_schedule_bh(struct tape_device *device) +{ + /* Protect against rescheduling, when already running. */ + if (atomic_compare_and_swap(0, 1, &device->bh_scheduled)) + return; + + INIT_LIST_HEAD(&device->bh_task.list); + device->bh_task.sync = 0; + device->bh_task.routine = tape_process_queue; + device->bh_task.data = device; + + queue_task(&device->bh_task, &tq_immediate); + mark_bh(IMMEDIATE_BH); + + return; +} + +/* + * Stop running ccw. Has to be called with the device lock held. + */ +static inline int +__tape_halt_io(struct tape_device *device, struct tape_request *request) +{ + int retries; + int rc; + + /* SMB: This should never happen */ + if(request->cpaddr == NULL) + BUG(); + + /* Check if interrupt has already been processed */ + if (request->callback == NULL) + return 0; + + /* Stop a possibly running timer */ + if(request->timeout.expires) { + if(del_timer(&request->timeout) > 0) { + tape_put_request(request); + request->timeout.data = 0L; + } + } + + rc = 0; + for (retries = 0; retries < 5; retries++) { + if (retries < 2) + rc = halt_IO(device->devinfo.irq, + (long) request, request->options); + else + rc = clear_IO(device->devinfo.irq, + (long) request, request->options); + if (rc == 0) + break; /* termination successful */ + if (rc == -ENODEV) + DBF_EXCEPTION(2, "device gone, retry\n"); + else if (rc == -EIO) + DBF_EXCEPTION(2, "I/O error, retry\n"); + else if (rc == -EBUSY) + DBF_EXCEPTION(2, "device busy, retry later\n"); + else + BUG(); + } + if (rc == 0) + request->status = TAPE_REQUEST_DONE; + return rc; +} + +static void +__tape_remove_request(struct tape_device *device, struct tape_request *request) +{ + /* First remove the request from the queue. */ + list_del(&request->list); + + /* This request isn't processed any further. */ + request->status = TAPE_REQUEST_DONE; + + /* Finally, if the callback hasn't been called, do it now. */ + if (request->callback != NULL) { + request->callback(request, request->callback_data); + request->callback = NULL; + } +} + +/* + * Tape state functions + */ +/* + * Printable strings for tape enumerations. + */ +const char *tape_state_string(struct tape_device *device) { + char *s = " ???? "; + + if (TAPE_NOT_OPER(device)) { + s = "NOT_OP"; + } else if (TAPE_NOACCESS(device)) { + s = "NO_ACC"; + } else if (TAPE_BOXED(device)) { + s = "BOXED "; + } else if (TAPE_OPEN(device)) { + s = "IN_USE"; + } else if (TAPE_ASSIGNED(device)) { + s = "ASSIGN"; + } else if (TAPE_INIT(device)) { + s = "INIT "; + } else if (TAPE_UNUSED(device)) { + s = "UNUSED"; + } + + return s; +} + +void +tape_state_set(struct tape_device *device, unsigned int status) +{ + const char *str; + + /* Maybe nothing changed. */ + if (device->tape_status == status) + return; + + DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); + str = tape_state_string(device); + DBF_EVENT(4, "old ts: 0x%08x %s\n", device->tape_status, str); + + device->tape_status = status; + + str = tape_state_string(device); + DBF_EVENT(4, "new ts: 0x%08x %s\n", status, str); + + wake_up(&device->state_change_wq); +} + +void +tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) +{ + if (device->medium_state == newstate) + return; + + switch(newstate){ + case MS_UNLOADED: + device->tape_generic_status |= GMT_DR_OPEN(~0); + PRINT_INFO("(%04x): Tape is unloaded\n", + device->devstat.devno); + break; + case MS_LOADED: + device->tape_generic_status &= ~GMT_DR_OPEN(~0); + PRINT_INFO("(%04x): Tape has been mounted\n", + device->devstat.devno); + break; + default: + // print nothing + break; + } +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_medium_change(device); +#endif + device->medium_state = newstate; + wake_up(&device->state_change_wq); +} + +static void +tape_timeout_io(unsigned long data) +{ + struct tape_request *request; + struct tape_device *device; + unsigned long flags; + + request = (struct tape_request *) data; + device = request->device; + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + if(request->callback != NULL) { + DBF_EVENT(3, "TAPE(%04x): %s timeout\n", + device->devstat.devno, tape_op_verbose[request->op]); + PRINT_ERR("TAPE(%04x): %s timeout\n", + device->devstat.devno, tape_op_verbose[request->op]); + + if(__tape_halt_io(device, request) == 0) + DBF_EVENT(6, "tape_timeout_io: success\n"); + else { + DBF_EVENT(2, "tape_timeout_io: halt_io failed\n"); + PRINT_ERR("tape_timeout_io: halt_io failed\n"); + } + request->rc = -EIO; + + /* Remove from request queue. */ + __tape_remove_request(device, request); + + /* Start next request. */ + if (!list_empty(&device->req_queue)) + tape_schedule_bh(device); + } + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + tape_put_request(request); +} + +/* + * DEVFS Functions + */ +#ifdef CONFIG_DEVFS_FS +devfs_handle_t tape_devfs_root_entry; + +/* + * Create devfs root entry (devno in hex) for device td + */ +static int +tape_mkdevfsroot (struct tape_device* device) +{ + char devno [5]; + + sprintf(devno, "%04x", device->devinfo.devno); + device->devfs_dir = devfs_mk_dir(tape_devfs_root_entry, devno, device); + return (device->devfs_dir == NULL) ? -ENOMEM : 0; +} + +/* + * Remove devfs root entry for a device + */ +static void +tape_rmdevfsroot (struct tape_device *device) +{ + if (device->devfs_dir) { + devfs_unregister(device->devfs_dir); + device->devfs_dir = NULL; + } +} +#endif + +/* + * Enable tape device + */ +int +tape_enable_device(struct tape_device *device, + struct tape_discipline *discipline) +{ + int rc; + + if (!TAPE_INIT(device)) + return -EINVAL; + + /* Register IRQ. */ + rc = s390_request_irq_special(device->devinfo.irq, __tape_do_irq, + tape_noper_handler, 0, + TAPE_MAGIC, &device->devstat); + if (rc) + return rc; + + s390_set_private_data(device->devinfo.irq, tape_clone_device(device)); + + device->discipline = discipline; + + /* Let the discipline have a go at the device. */ + rc = discipline->setup_device(device); + if (rc) { + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } + +#ifdef CONFIG_DEVFS_FS + /* Create devfs entries */ + rc = tape_mkdevfsroot(device); + if (rc){ + PRINT_WARN ("Cannot create a devfs directory for " + "device %04x\n", device->devinfo.devno); + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#endif + rc = tapechar_setup_device(device); + if (rc) { +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#ifdef CONFIG_S390_TAPE_BLOCK + rc = tapeblock_setup_device(device); + if (rc) { + tapechar_cleanup_device(device); +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + free_irq(device->devinfo.irq, &device->devstat); + return rc; + } +#endif + + TAPE_CLEAR_STATE(device, TAPE_STATUS_INIT); + + return 0; +} + +/* + * Disable tape device. Check if there is a running request and + * terminate it. Post all queued requests with -EIO. + */ +void +tape_disable_device(struct tape_device *device) +{ + struct list_head *l, *n; + struct tape_request *request; + + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + /* Post remaining requests with -EIO */ + list_for_each_safe(l, n, &device->req_queue) { + request = list_entry(l, struct tape_request, list); + if (request->status == TAPE_REQUEST_IN_IO) + __tape_halt_io(device, request); + + request->rc = -EIO; + __tape_remove_request(device, request); + } + + if (TAPE_ASSIGNED(device)) { + spin_unlock(get_irq_lock(device->devinfo.irq)); + if( + tape_unassign( + device, + TAPE_STATUS_ASSIGN_M|TAPE_STATUS_ASSIGN_A + ) == 0 + ) { + printk(KERN_WARNING "%04x: automatically unassigned\n", + device->devinfo.devno); + } + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + } + + TAPE_SET_STATE(device, TAPE_STATUS_NOT_OPER); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + + s390_set_private_data(device->devinfo.irq, NULL); + tape_put_device(device); + +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_cleanup_device(device); +#endif + tapechar_cleanup_device(device); +#ifdef CONFIG_DEVFS_FS + tape_rmdevfsroot(device); +#endif + device->discipline->cleanup_device(device); + device->discipline = NULL; + free_irq(device->devinfo.irq, &device->devstat); +} + +/* + * Find discipline by cu_type. + */ +struct tape_discipline * +tape_get_discipline(int cu_type) +{ + struct list_head *l; + struct tape_discipline *discipline, *tmp; + + discipline = NULL; + spin_lock(&tape_discipline_lock); + list_for_each(l, &tape_disciplines) { + tmp = list_entry(l, struct tape_discipline, list); + if (tmp->cu_type == cu_type) { + discipline = tmp; + break; + } + } + if (discipline->owner != NULL) { + if (!try_inc_mod_count(discipline->owner)) + /* Discipline is currently unloaded! */ + discipline = NULL; + } + spin_unlock(&tape_discipline_lock); + return discipline; +} + +/* + * Decrement usage count for discipline. + */ +void +tape_put_discipline(struct tape_discipline *discipline) +{ + spin_lock(&tape_discipline_lock); + if (discipline->owner) + __MOD_DEC_USE_COUNT(discipline->owner); + spin_unlock(&tape_discipline_lock); +} + +/* + * Register backend discipline + */ +int +tape_register_discipline(struct tape_discipline *discipline) +{ + if (!try_inc_mod_count(THIS_MODULE)) + /* Tape module is currently unloaded! */ + return -ENOSYS; + spin_lock(&tape_discipline_lock); + list_add_tail(&discipline->list, &tape_disciplines); + spin_unlock(&tape_discipline_lock); + /* Now add the tape devices with matching cu_type. */ + tape_add_devices(discipline); + return 0; +} + +/* + * Unregister backend discipline + */ +void +__tape_unregister_discipline(struct tape_discipline *discipline) +{ + list_del(&discipline->list); + /* Remove tape devices with matching cu_type. */ + tape_remove_devices(discipline); + MOD_DEC_USE_COUNT; +} + +void +tape_unregister_discipline(struct tape_discipline *discipline) +{ + struct list_head *l; + + spin_lock(&tape_discipline_lock); + list_for_each(l, &tape_disciplines) { + if (list_entry(l, struct tape_discipline, list) == discipline){ + __tape_unregister_discipline(discipline); + break; + } + } + spin_unlock(&tape_discipline_lock); +} + +/* + * Allocate a new tape ccw request + */ +struct tape_request * +tape_alloc_request(int cplength, int datasize) +{ + struct tape_request *request; + + if (datasize > PAGE_SIZE || (cplength*sizeof(ccw1_t)) > PAGE_SIZE) + BUG(); + + DBF_EVENT(5, "tape_alloc_request(%d,%d)\n", cplength, datasize); + + request = (struct tape_request *) + kmalloc(sizeof(struct tape_request), GFP_KERNEL); + if (request == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + return ERR_PTR(-ENOMEM); + } + memset(request, 0, sizeof(struct tape_request)); + INIT_LIST_HEAD(&request->list); + atomic_set(&request->ref_count, 1); + + /* allocate channel program */ + if (cplength > 0) { + request->cpaddr = + kmalloc(cplength*sizeof(ccw1_t), GFP_ATOMIC | GFP_DMA); + if (request->cpaddr == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + kfree(request); + return ERR_PTR(-ENOMEM); + } + memset(request->cpaddr, 0, cplength*sizeof(ccw1_t)); + } + /* alloc small kernel buffer */ + if (datasize > 0) { + request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); + if (request->cpdata == NULL) { + DBF_EXCEPTION(1, "cqra nomem\n"); + if (request->cpaddr != NULL) + kfree(request->cpaddr); + kfree(request); + return ERR_PTR(-ENOMEM); + } + memset(request->cpdata, 0, datasize); + } + + DBF_EVENT(5, "request=%p(%p/%p)\n", request, request->cpaddr, + request->cpdata); + + return request; +} + +/* + * Free tape ccw request + */ +void +tape_free_request (struct tape_request * request) +{ + DBF_EVENT(5, "tape_free_request(%p)\n", request); + + if (request->device != NULL) { + tape_put_device(request->device); + request->device = NULL; + } + if (request->cpdata != NULL) { + kfree(request->cpdata); + } + if (request->cpaddr != NULL) { + kfree(request->cpaddr); + } + kfree(request); +} + +struct tape_request * +tape_clone_request(struct tape_request *request) +{ + DBF_EVENT(5, "tape_clone_request(%p) = %i\n", request, + atomic_inc_return(&request->ref_count)); + return request; +} + +struct tape_request * +tape_put_request(struct tape_request *request) +{ + int remain; + + DBF_EVENT(4, "tape_put_request(%p)\n", request); + if((remain = atomic_dec_return(&request->ref_count)) > 0) { + DBF_EVENT(5, "remaining = %i\n", remain); + } else { + tape_free_request(request); + } + + return NULL; +} + +/* + * Write sense data to console/dbf + */ +void +tape_dump_sense(struct tape_device* device, struct tape_request *request) +{ + devstat_t *stat; + unsigned int *sptr; + + stat = &device->devstat; + PRINT_INFO("-------------------------------------------------\n"); + PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", + stat->dstat, stat->cstat, stat->cpa); + PRINT_INFO("DEVICE: %04x\n", device->devinfo.devno); + if (request != NULL) + PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); + + sptr = (unsigned int *) stat->ii.sense.data; + PRINT_INFO("Sense data: %08X %08X %08X %08X \n", + sptr[0], sptr[1], sptr[2], sptr[3]); + PRINT_INFO("Sense data: %08X %08X %08X %08X \n", + sptr[4], sptr[5], sptr[6], sptr[7]); + PRINT_INFO("--------------------------------------------------\n"); +} + +/* + * Write sense data to dbf + */ +void +tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request) +{ + devstat_t *stat = &device->devstat; + unsigned int *sptr; + const char* op; + + if (request != NULL) + op = tape_op_verbose[request->op]; + else + op = "---"; + DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", stat->dstat,stat->cstat); + DBF_EVENT(3, "DEVICE: %04x OP\t: %s\n", device->devinfo.devno,op); + sptr = (unsigned int *) stat->ii.sense.data; + DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); + DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); + DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); + DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); +} + +static inline int +__tape_do_io(struct tape_device *device, struct tape_request *request) +{ + /* Some operations may happen even on an unused tape device */ + switch(request->op) { + case TO_MSEN: + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_BREAKASS: + break; + default: + if (!TAPE_OPEN(device)) + return -ENODEV; + } + + /* Add reference to device to the request. This increases the reference + count. */ + request->device = tape_clone_device(device); + request->status = TAPE_REQUEST_QUEUED; + + list_add_tail(&request->list, &device->req_queue); + __tape_process_queue(device); + + return 0; +} + +/* + * Add the request to the request queue, try to start it if the + * tape is idle. Return without waiting for end of i/o. + */ +int +tape_do_io_async(struct tape_device *device, struct tape_request *request) +{ + int rc; + long flags; + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Add request to request queue and try to start it. */ + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + return rc; +} + +/* + * tape_do_io/__tape_wake_up + * Add the request to the request queue, try to start it if the + * tape is idle and wait uninterruptible for its completion. + */ +static void +__tape_wake_up(struct tape_request *request, void *data) +{ + request->callback = NULL; + wake_up((wait_queue_head_t *) data); +} + +int +tape_do_io(struct tape_device *device, struct tape_request *request) +{ + wait_queue_head_t wq; + long flags; + int rc; + + DBF_EVENT(5, "tape: tape_do_io(%p, %p)\n", device, request); + + init_waitqueue_head(&wq); + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Setup callback */ + request->callback = __tape_wake_up; + request->callback_data = &wq; + /* Add request to request queue and try to start it. */ + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + if (rc) + return rc; + /* Request added to the queue. Wait for its completion. */ + wait_event(wq, (request->callback == NULL)); + /* Get rc from request */ + return request->rc; +} + +/* + * tape_do_io_interruptible/__tape_wake_up_interruptible + * Add the request to the request queue, try to start it if the + * tape is idle and wait uninterruptible for its completion. + */ +static void +__tape_wake_up_interruptible(struct tape_request *request, void *data) +{ + request->callback = NULL; + wake_up_interruptible((wait_queue_head_t *) data); +} + +int +tape_do_io_interruptible(struct tape_device *device, + struct tape_request *request) +{ + wait_queue_head_t wq; + long flags; + int rc; + + DBF_EVENT(5, "tape: tape_do_io_int(%p, %p)\n", device, request); + + init_waitqueue_head(&wq); + // debug paranoia + if(!device) BUG(); + if(!request) BUG(); + + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + /* Setup callback */ + request->callback = __tape_wake_up_interruptible; + request->callback_data = &wq; + rc = __tape_do_io(device, request); + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + if (rc) + return rc; + /* Request added to the queue. Wait for its completion. */ + rc = wait_event_interruptible(wq, (request->callback == NULL)); + if (rc != -ERESTARTSYS) + /* Request finished normally. */ + return request->rc; + /* Interrupted by a signal. We have to stop the current request. */ + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + rc = __tape_halt_io(device, request); + if (rc == 0) { + DBF_EVENT(3, "IO stopped on irq %d\n", device->devinfo.irq); + rc = -ERESTARTSYS; + } + spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags); + return rc; +} + + +/* + * Tape interrupt routine, called from Ingo's I/O layer + */ +static void +__tape_do_irq (int irq, void *ds, struct pt_regs *regs) +{ + struct tape_device *device; + struct tape_request *request; + devstat_t *devstat; + int final; + int rc; + + devstat = (devstat_t *) ds; + device = (struct tape_device *) s390_get_private_data(irq); + if (device == NULL) { + PRINT_ERR("could not get device structure for irq %d " + "in interrupt\n", irq); + return; + } + request = (struct tape_request *) devstat->intparm; + + DBF_EVENT(5, "tape: __tape_do_irq(%p, %p)\n", device, request); + + if(request && request->timeout.expires) { + /* + * If the timer was not yet startet the reference to the + * request has to be dropped here. Otherwise it will be + * dropped by the timeout handler. + */ + if(del_timer(&request->timeout) > 0) + request->timeout.data = (unsigned long) + tape_put_request(request); + } + + if (device->devstat.cstat & SCHN_STAT_INCORR_LEN) + DBF_EVENT(4, "tape: incorrect blocksize\n"); + + if (device->devstat.dstat != 0x0c){ + /* + * Any request that does not come back with channel end + * and device end is unusual. Log the sense data. + */ + DBF_EVENT(3,"-- Tape Interrupthandler --\n"); + tape_dump_sense_dbf(device, request); + } + if (TAPE_NOT_OPER(device)) { + DBF_EVENT(6, "tape:device is not operational\n"); + return; + } + + /* Some status handling */ + if(devstat && devstat->dstat & DEV_STAT_UNIT_CHECK) { + unsigned char *sense = devstat->ii.sense.data; + + if(!(sense[1] & SENSE_DRIVE_ONLINE)) + device->tape_generic_status &= ~GMT_ONLINE(~0); + } else { + device->tape_generic_status |= GMT_ONLINE(~0); + } + + rc = device->discipline->irq(device, request); + /* + * rc < 0 : request finished unsuccessfully. + * rc == TAPE_IO_SUCCESS: request finished successfully. + * rc == TAPE_IO_PENDING: request is still running. Ignore rc. + * rc == TAPE_IO_RETRY: request finished but needs another go. + * rc == TAPE_IO_STOP: request needs to get terminated. + */ + final = 0; + switch (rc) { + case TAPE_IO_SUCCESS: + final = 1; + break; + case TAPE_IO_PENDING: + break; + case TAPE_IO_RETRY: +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) + device->discipline->check_locate(device, request); +#endif + rc = __do_IO(device, request); + if (rc) { + DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); + final = 1; + } + break; + case TAPE_IO_STOP: + __tape_halt_io(device, request); + rc = -EIO; + final = 1; + break; + default: + if (rc > 0) { + DBF_EVENT(6, "xunknownrc\n"); + PRINT_ERR("Invalid return code from discipline " + "interrupt function.\n"); + rc = -EIO; + } + final = 1; + break; + } + if (final) { + /* This might be an unsolicited interrupt (no request) */ + if(request != NULL) { + /* Set ending status. */ + request->rc = rc; + __tape_remove_request(device, request); + } + /* Start next request. */ + if (!list_empty(&device->req_queue)) + tape_schedule_bh(device); + } +} + +/* + * Lock a shared tape for our exclusive use. + */ +int +tape_assign(struct tape_device *device, int type) +{ + int rc; + + spin_lock_irq(&device->assign_lock); + + /* The device is already assigned */ + rc = 0; + if (!TAPE_ASSIGNED(device)) { + rc = device->discipline->assign(device); + + spin_lock(get_irq_lock(device->devinfo.irq)); + if (rc) { + PRINT_WARN( + "(%04x): assign failed - " + "device might be busy\n", + device->devstat.devno); + DBF_EVENT(3, + "(%04x): assign failed " + "- device might be busy\n", + device->devstat.devno); + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + } else { + DBF_EVENT(3, "(%04x): assign lpum = %02x\n", + device->devstat.devno, device->devstat.lpum); + tape_state_set( + device, + (device->tape_status | type) & + (~TAPE_STATUS_BOXED) + ); + } + } else { + spin_lock(get_irq_lock(device->devinfo.irq)); + TAPE_SET_STATE(device, type); + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&device->assign_lock); + + return rc; +} + +/* + * Unlock a shared tape. + */ +int +tape_unassign(struct tape_device *device, int type) +{ + int rc; + + spin_lock_irq(&device->assign_lock); + + rc = 0; + spin_lock(get_irq_lock(device->devinfo.irq)); + if (!TAPE_ASSIGNED(device)) { + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&device->assign_lock); + return 0; + } + TAPE_CLEAR_STATE(device, type); + spin_unlock(get_irq_lock(device->devinfo.irq)); + + if (!TAPE_ASSIGNED(device)) { + rc = device->discipline->unassign(device); + if (rc) { + PRINT_WARN("(%04x): unassign failed\n", + device->devstat.devno); + DBF_EVENT(3, "(%04x): unassign failed\n", + device->devstat.devno); + } else { + DBF_EVENT(3, "(%04x): unassign lpum = %02x\n", + device->devstat.devno, device->devstat.lpum); + } + } + + spin_unlock_irq(&device->assign_lock); + return rc; +} + +/* + * Tape device open function used by tape_char & tape_block frontends. + */ +int +tape_open(struct tape_device *device) +{ + int rc; + + spin_lock_irq(&tape_discipline_lock); + spin_lock(get_irq_lock(device->devinfo.irq)); + if (TAPE_NOT_OPER(device)) { + DBF_EVENT(6, "TAPE:nodev\n"); + rc = -ENODEV; + } else if (TAPE_OPEN(device)) { + DBF_EVENT(6, "TAPE:dbusy\n"); + rc = -EBUSY; + } else if (device->discipline != NULL && + !try_inc_mod_count(device->discipline->owner)) { + DBF_EVENT(6, "TAPE:nodisc\n"); + rc = -ENODEV; + } else { + TAPE_SET_STATE(device, TAPE_STATUS_OPEN); + rc = 0; + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&tape_discipline_lock); + return rc; +} + +/* + * Tape device release function used by tape_char & tape_block frontends. + */ +int +tape_release(struct tape_device *device) +{ + spin_lock_irq(&tape_discipline_lock); + spin_lock(get_irq_lock(device->devinfo.irq)); + + if (TAPE_OPEN(device)) { + TAPE_CLEAR_STATE(device, TAPE_STATUS_OPEN); + + if (device->discipline->owner) + __MOD_DEC_USE_COUNT(device->discipline->owner); + } + spin_unlock(get_irq_lock(device->devinfo.irq)); + spin_unlock_irq(&tape_discipline_lock); + + return 0; +} + +/* + * Execute a magnetic tape command a number of times. + */ +int +tape_mtop(struct tape_device *device, int mt_op, int mt_count) +{ + tape_mtop_fn fn; + int rc; + + DBF_EVENT(6, "TAPE:mtio\n"); + DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); + DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); + + if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) + return -EINVAL; + fn = device->discipline->mtop_array[mt_op]; + if(fn == NULL) + return -EINVAL; + + /* We assume that the backends can handle count up to 500. */ + if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || + mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { + rc = 0; + for (; mt_count > 500; mt_count -= 500) + if ((rc = fn(device, 500)) != 0) + break; + if (rc == 0) + rc = fn(device, mt_count); + } else + rc = fn(device, mt_count); + return rc; + +} + +void +tape_init_disciplines(void) +{ +#ifdef CONFIG_S390_TAPE_34XX + tape_34xx_init(); +#endif +#ifdef CONFIG_S390_TAPE_34XX_MODULE + request_module("tape_34xx"); +#endif + +#ifdef CONFIG_S390_TAPE_3590 + tape_3590_init(); +#endif +#ifdef CONFIG_S390_TAPE_3590_MODULE + request_module("tape_3590"); +#endif + tape_auto_detect(); +} + +/* + * Tape init function. + */ +static int +tape_init (void) +{ + tape_dbf_area = debug_register ( "tape", 1, 2, 4*sizeof(long)); + debug_register_view(tape_dbf_area, &debug_sprintf_view); + debug_set_level(tape_dbf_area, 6); /* FIXME */ + DBF_EVENT(3, "tape init: ($Revision: 1.6 $)\n"); +#ifdef CONFIG_DEVFS_FS + tape_devfs_root_entry = devfs_mk_dir (NULL, "tape", NULL); +#endif /* CONFIG_DEVFS_FS */ + DBF_EVENT(3, "dev detect\n"); + /* Parse the parameters. */ + tape_devmap_init(); +#ifdef CONFIG_PROC_FS + tape_proc_init(); +#endif /* CONFIG_PROC_FS */ + tapechar_init(); +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_init(); +#endif + tape_init_disciplines(); + return 0; +} + +/* + * Tape exit function. + */ +void +tape_exit(void) +{ + struct list_head *l, *n; + struct tape_discipline *discipline; + + DBF_EVENT(6, "tape exit\n"); + + /* Cleanup registered disciplines. */ + spin_lock(&tape_discipline_lock); + list_for_each_safe(l, n, &tape_disciplines) { + discipline = list_entry(l, struct tape_discipline, list); + __tape_unregister_discipline(discipline); + } + spin_unlock(&tape_discipline_lock); + + /* Get rid of the frontends */ + tapechar_exit(); +#ifdef CONFIG_S390_TAPE_BLOCK + tapeblock_exit(); +#endif +#ifdef CONFIG_PROC_FS + tape_proc_cleanup(); +#endif + tape_devmap_exit(); +#ifdef CONFIG_DEVFS_FS + devfs_unregister (tape_devfs_root_entry); /* devfs checks for NULL */ +#endif /* CONFIG_DEVFS_FS */ + debug_unregister (tape_dbf_area); +} + +/* + * Issue an hotplug event + */ +void tape_hotplug_event(struct tape_device *device, int devmaj, int action) { +#ifdef CONFIG_HOTPLUG + char *argv[3]; + char *envp[8]; + char devno[20]; + char major[20]; + char minor[20]; + + sprintf(devno, "DEVNO=%04x", device->devinfo.devno); + sprintf(major, "MAJOR=%d", devmaj); + sprintf(minor, "MINOR=%d", device->first_minor); + + argv[0] = hotplug_path; + argv[1] = "tape"; + argv[2] = NULL; + + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + + switch(action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_BLOCK_ADD: + envp[2] = "ACTION=add"; + break; + case TAPE_HOTPLUG_CHAR_REMOVE: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[2] = "ACTION=remove"; + break; + default: + BUG(); + } + switch(action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_CHAR_REMOVE: + envp[3] = "INTERFACE=char"; + break; + case TAPE_HOTPLUG_BLOCK_ADD: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[3] = "INTERFACE=block"; + break; + } + envp[4] = devno; + envp[5] = major; + envp[6] = minor; + envp[7] = NULL; + + call_usermodehelper(argv[0], argv, envp); +#endif +} + +MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " + "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); +MODULE_DESCRIPTION("Linux on zSeries channel attached " + "tape device driver ($Revision: 1.6 $)"); + +module_init(tape_init); +module_exit(tape_exit); + +EXPORT_SYMBOL(tape_dbf_area); +EXPORT_SYMBOL(tape_state_string); +EXPORT_SYMBOL(tape_op_verbose); +EXPORT_SYMBOL(tape_state_set); +EXPORT_SYMBOL(tape_med_state_set); +EXPORT_SYMBOL(tape_register_discipline); +EXPORT_SYMBOL(tape_unregister_discipline); +EXPORT_SYMBOL(tape_alloc_request); +EXPORT_SYMBOL(tape_put_request); +EXPORT_SYMBOL(tape_clone_request); +EXPORT_SYMBOL(tape_dump_sense); +EXPORT_SYMBOL(tape_dump_sense_dbf); +EXPORT_SYMBOL(tape_do_io); +EXPORT_SYMBOL(tape_do_io_free); +EXPORT_SYMBOL(tape_do_io_async); +EXPORT_SYMBOL(tape_do_io_interruptible); +EXPORT_SYMBOL(tape_mtop); +EXPORT_SYMBOL(tape_hotplug_event); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tapedefs.h linux.22-ac2/drivers/s390/char/tapedefs.h --- linux.vanilla/drivers/s390/char/tapedefs.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tapedefs.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -/*********************************************************************** - * drivers/s390/char/tapedefs.h - * tape device driver for S/390 and zSeries tapes. - * - * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation - * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - * - *********************************************************************** - */ - -/* Kernel Version Compatibility section */ -#include -#include -#include -#include - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17)) -#define TAPE_DEBUG // use s390 debug feature -#else -#undef TAPE_DEBUG // debug feature not supported by our 2.2.16 code -static inline void set_normalized_cda ( ccw1_t * cp, unsigned long address ) { - cp -> cda = address; -} -static inline void clear_normalized_cda ( ccw1_t * ccw ) { - ccw -> cda = 0; -} -#define BUG() PRINT_FATAL("tape390: CRITICAL INTERNAL ERROR OCCURED. REPORT THIS BACK TO LINUX390@DE.IBM.COM\n") -#endif -#define CONFIG_S390_TAPE_DYNAMIC // allow devices to be attached or detached on the fly -#define TAPEBLOCK_RETRIES 20 // number of retries, when a block-dev request fails. - - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98)) -#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \ -do { \ - blk_dev[d_major].queue = d_queue_fn; \ -} while(0) -static inline struct request * -tape_next_request( request_queue_t *queue ) -{ - return blkdev_entry_next_request(&queue->queue_head); -} -static inline void -tape_dequeue_request( request_queue_t * q, struct request *req ) -{ - blkdev_dequeue_request (req); -} -#else -#define s390_dev_info_t dev_info_t -typedef struct request *request_queue_t; -#ifndef init_waitqueue_head -#define init_waitqueue_head(x) do { *x = NULL; } while(0) -#endif -#define blk_init_queue(x,y) do {} while(0) -#define blk_queue_headactive(x,y) do {} while(0) -#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \ -do { \ - blk_dev[d_major].request_fn = d_request_fn; \ - blk_dev[d_major].queue = d_queue_fn; \ - blk_dev[d_major].current_request = d_current; \ -} while(0) -static inline struct request * -tape_next_request( request_queue_t *queue ) -{ - return *queue; -} -static inline void -tape_dequeue_request( request_queue_t * q, struct request *req ) -{ - *q = req->next; - req->next = NULL; -} -#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_devmap.c linux.22-ac2/drivers/s390/char/tape_devmap.c --- linux.vanilla/drivers/s390/char/tape_devmap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_devmap.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,895 @@ +/* + * drivers/s390/char/tape_devmap.c + * device mapping for tape device driver + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + * + * Device mapping and tape= parameter parsing functions. All devmap + * functions may not be called from interrupt context. In particular + * tape_get_device is a no-no from interrupt context. + */ + +#include +#include +#include +#include + +#include +#include +#include + +/* This is ugly... */ +#define PRINTK_HEADER "tape_devmap:" + +#include "tape.h" + +struct tape_devmap { + struct list_head list; + unsigned int devindex; + unsigned short devno; + devreg_t devreg; + struct tape_device *device; +}; + +struct tape_discmap { + struct list_head list; + devreg_t devreg; + struct tape_discipline *discipline; +}; + +/* + * List of all registered tapes and disciplines. + */ +static struct list_head tape_devreg_list = LIST_HEAD_INIT(tape_devreg_list); +static struct list_head tape_disc_devreg_list = LIST_HEAD_INIT(tape_disc_devreg_list); +int tape_max_devindex = 0; + +/* + * Single spinlock to protect devmap structures and lists. + */ +static spinlock_t tape_devmap_lock = SPIN_LOCK_UNLOCKED; + +/* + * Module/Kernel Parameter Handling. The syntax of tape= is: + * : (0x)?[0-9a-fA-F]+ + * : (-)? + * : (,)* + */ +int tape_autodetect = 0; /* is true, when autodetection is active */ + +/* + * char *tape[] is intended to hold the ranges supplied by the tape= statement + * it is named 'tape' to directly be filled by insmod with the comma separated + * strings when running as a module. + */ +static char *tape[256]; +MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s"); + +#ifndef MODULE +/* + * The parameter parsing functions for builtin-drivers are called + * before kmalloc works. Store the pointers to the parameters strings + * into tape[] for later processing. + */ +static int __init +tape_call_setup (char *str) +{ + static int count = 0; + + if (count < 256) + tape[count++] = str; + return 1; +} + +__setup("tape=", tape_call_setup); +#endif /* not defined MODULE */ + +/* + * Add a range of devices and create the corresponding devreg_t + * structures. The order of the ranges added by this function + * will define the kdevs for the individual devices. + */ +int +tape_add_range(int from, int to) +{ + struct tape_devmap *devmap, *tmp; + struct list_head *l; + int devno; + + if (from > to) { + PRINT_ERR("Invalid device range %04x-%04x", from, to); + return -EINVAL; + } + spin_lock(&tape_devmap_lock); + for (devno = from; devno <= to; devno++) { + devmap = NULL; + list_for_each(l, &tape_devreg_list) { + tmp = list_entry(l, struct tape_devmap, list); + if (tmp->devno == devno) { + devmap = tmp; + break; + } + } + if (devmap == NULL) { + /* This devno is new. */ + devmap = (struct tape_devmap *) + kmalloc(sizeof(struct tape_devmap), + GFP_KERNEL); + if (devmap == NULL) + return -ENOMEM; + memset(devmap, 0, sizeof(struct tape_devmap)); + devmap->devno = devno; + devmap->devindex = tape_max_devindex++; + list_add(&devmap->list, &tape_devreg_list); + devmap->devreg.ci.devno = devno; + devmap->devreg.flag = DEVREG_TYPE_DEVNO; + devmap->devreg.oper_func = tape_oper_handler; + s390_device_register(&devmap->devreg); + } + } + spin_unlock(&tape_devmap_lock); + return 0; +} + +/* + * Read device number from string. The number is always is hex, + * a leading 0x is accepted (and has to be removed for simple_stroul + * to work). + */ +static inline int +tape_devno(char *str, char **endp) +{ + /* remove leading '0x' */ + if (*str == '0') { + str++; + if (*str == 'x') + str++; + } + if (!isxdigit(*str)) + return -EINVAL; + return simple_strtoul(str, endp, 16); /* interpret anything as hex */ +} + +/* + * Parse Kernel/Module Parameters and create devregs for dynamic attach/detach + */ +static int +tape_parm_parse (char *str) +{ + int from, to, rc; + + while (1) { + to = from = tape_devno(str, &str); + if (*str == '-') { + str++; + to = tape_devno(str, &str); + } + /* Negative numbers in from/to indicate errors. */ + if (from >= 0 && to >= 0) { + rc = tape_add_range(from, to); + if (rc) + return rc; + } + if (*str != ',') + break; + str++; + } + if (*str != '\0') { + PRINT_WARN("junk at end of tape parameter string: %s\n", str); + return -EINVAL; + } + return 0; +} + +/* + * Parse parameters stored in tape[]. + */ +static int +tape_parse(void) +{ + int rc, i; + + if (*tape == NULL) { + /* No parameters present */ + PRINT_INFO ("No parameters supplied, enabling auto detect " + "mode for all supported devices.\n"); + tape_autodetect = 1; + return 0; + } + PRINT_INFO("Using ranges supplied in parameters, " + "disabling auto detect mode.\n"); + rc = 0; + for (i = 0; i < 256; i++) { + if (tape[i] == NULL) + break; + rc = tape_parm_parse(tape[i]); + if (rc) { + PRINT_ERR("Invalid tape parameter found.\n"); + break; + } + } + return rc; +} + +/* + * Create a devreg for a discipline. This is only done if no explicit + * tape range is given. The tape_oper_handler will call tape_add_range + * for each device that appears. + */ +static int +tape_add_disc_devreg(struct tape_discipline *discipline) +{ + struct tape_discmap *discmap; + + discmap = (struct tape_discmap *) kmalloc(sizeof(struct tape_discmap), + GFP_KERNEL); + if (discmap == NULL) { + PRINT_WARN("Could not alloc devreg: Out of memory\n" + "Dynamic attach/detach will not work!\n"); + return -ENOMEM; + } + spin_lock(&tape_devmap_lock); + discmap->devreg.ci.hc.ctype = discipline->cu_type; + discmap->devreg.flag = DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS; + discmap->devreg.oper_func = tape_oper_handler; + s390_device_register(&discmap->devreg); + list_add(&discmap->list, &tape_disc_devreg_list); + spin_unlock(&tape_devmap_lock); + return 0; +} + +/* + * Free devregs for a discipline. + */ +static void +tape_del_disc_devreg(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_discmap *discmap; + + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_disc_devreg_list) { + discmap = list_entry(l, struct tape_discmap, list); + if (discmap->discipline == discipline) { + s390_device_unregister(&discmap->devreg); + list_del(&discmap->list); + kfree(discmap); + break; + } + } + spin_unlock(&tape_devmap_lock); +} + + +/* + * Forget all about device numbers and disciplines. + * This may only be called at module unload or system shutdown. + */ +static void +tape_forget_devregs(void) +{ + struct list_head *l, *n; + struct tape_devmap *devmap; + struct tape_discmap *discmap; + + spin_lock(&tape_devmap_lock); + list_for_each_safe(l, n, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->device != NULL) + BUG(); + s390_device_unregister(&devmap->devreg); + list_del(&devmap->list); + kfree(devmap); + } + list_for_each_safe(l, n, &tape_disc_devreg_list) { + discmap = list_entry(l, struct tape_discmap, list); + s390_device_unregister(&discmap->devreg); + list_del(&discmap->list); + kfree(discmap); + } + spin_unlock(&tape_devmap_lock); +} + +/* + * Allocate memory for a new device structure. + */ +static struct tape_device * +tape_alloc_device(void) +{ + struct tape_device *device; + + device = (struct tape_device *) + kmalloc(sizeof(struct tape_device), GFP_KERNEL); + if (device == NULL) { + DBF_EXCEPTION(2, "ti:no mem\n"); + PRINT_INFO ("can't allocate memory for " + "tape info structure\n"); + return ERR_PTR(-ENOMEM); + } + memset(device, 0, sizeof(struct tape_device)); + device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); + if (device->modeset_byte == NULL) { + DBF_EXCEPTION(2, "ti:no mem\n"); + PRINT_INFO("can't allocate memory for modeset byte\n"); + kfree(device); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&device->req_queue); + init_waitqueue_head(&device->state_change_wq); + spin_lock_init(&device->assign_lock); + atomic_set(&device->ref_count, 1); + TAPE_SET_STATE(device, TAPE_STATUS_INIT); + device->medium_state = MS_UNKNOWN; + *device->modeset_byte = 0; + + return device; +} + +/* + * Create a device structure. + */ +static struct tape_device * +tape_create_device(int devno) +{ + struct list_head *l; + struct tape_devmap *devmap, *tmp; + struct tape_device *device; + int rc; + + DBF_EVENT(4, "tape_create_device(0x%04x)\n", devno); + + device = tape_alloc_device(); + if (IS_ERR(device)) + return device; + /* Get devinfo from the common io layer. */ + rc = get_dev_info_by_devno(devno, &device->devinfo); + if (rc) { + tape_put_device(device); + return ERR_PTR(rc); + } + spin_lock(&tape_devmap_lock); + devmap = NULL; + list_for_each(l, &tape_devreg_list) { + tmp = list_entry(l, struct tape_devmap, list); + if (tmp->devno == devno) { + devmap = tmp; + break; + } + } + if (devmap != NULL && devmap->device == NULL) { + devmap->device = tape_clone_device(device); + device->first_minor = devmap->devindex * TAPE_MINORS_PER_DEV; + } else if (devmap == NULL) { + /* devno not in tape range. */ + DBF_EVENT(4, "No devmap for entry 0x%04x\n", devno); + tape_put_device(device); + device = ERR_PTR(-ENODEV); + } else { + /* Should not happen. */ + DBF_EVENT(4, "A devmap entry for 0x%04x already exists\n", + devno); + tape_put_device(device); + device = ERR_PTR(-EEXIST); + } + spin_unlock(&tape_devmap_lock); + + return device; +} + +struct tape_device * +tape_clone_device(struct tape_device *device) +{ + DBF_EVENT(4, "tape_clone_device(%p) = %i\n", device, + atomic_inc_return(&device->ref_count)); + return device; +} + +/* + * Find tape device by a device index. + */ +struct tape_device * +tape_get_device(int devindex) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device(%i)\n", devindex); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + /* Find devmap for device with device number devno. */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->devindex == devindex) { + if (devmap->device != NULL) { + device = tape_clone_device(devmap->device); + } + break; + } + } + spin_unlock(&tape_devmap_lock); + return device; +} + +/* + * Find tape handle by a devno. + */ +struct tape_device * +tape_get_device_by_devno(int devno) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device_by_devno(0x%04x)\n", devno); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if(devmap->device != NULL && devmap->devno == devno) { + device = tape_clone_device(devmap->device); + break; + } + } + spin_unlock(&tape_devmap_lock); + + return device; +} + +/* + * Find tape handle by a device irq. + */ +struct tape_device * +tape_get_device_by_irq(int irq) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + DBF_EVENT(5, "tape_get_device_by_irq(0x%02x)\n", irq); + + device = ERR_PTR(-ENODEV); + spin_lock(&tape_devmap_lock); + /* Find devmap for device with device number devno. */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + if (devmap->device != NULL && + devmap->device->devinfo.irq == irq) { + device = tape_clone_device(devmap->device); + break; + } + } + spin_unlock(&tape_devmap_lock); + return device; +} + +/* + * Decrease the reference counter of a devices structure. If the + * reference counter reaches zero free the device structure and + * wake up sleepers. + */ +void +tape_put_device(struct tape_device *device) +{ + int remain; + + DBF_EVENT(4, "tape_put_device(%p)\n", device); + + if ((remain = atomic_dec_return(&device->ref_count)) > 0) { + DBF_EVENT(5, "remaining = %i\n", remain); + return; + } + + /* + * Reference counter dropped to zero. This means + * that the device is deleted and the last user + * of the device structure is gone. That is what + * tape_delete_device is waiting for. Do a wake up. + */ + if(remain < 0) { + PRINT_ERR("put device without reference\n"); + return; + } + + /* + * Free memory of a device structure. + */ + kfree(device->modeset_byte); + kfree(device); +} + +/* + * Scan the device range for devices with matching cu_type, create + * their device structures and enable them. + */ +void +tape_add_devices(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + /* + * Scan tape devices for matching cu type. + */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + device = tape_create_device(devmap->devno); + if (IS_ERR(device)) + continue; + + if (device->devinfo.sid_data.cu_type == discipline->cu_type) { + DBF_EVENT(4, "tape_add_devices(%p)\n", discipline); + DBF_EVENT(4, "det irq: %x\n", device->devinfo.irq); + DBF_EVENT(4, "cu : %x\n", discipline->cu_type); + tape_enable_device(device, discipline); + } else { + devmap->device = NULL; + tape_put_device(device); + } + tape_put_device(device); + } + if (tape_autodetect) + tape_add_disc_devreg(discipline); +} + +/* + * Scan the device range for devices with matching cu_type, disable them + * and remove their device structures. + */ +void +tape_remove_devices(struct tape_discipline *discipline) +{ + struct list_head *l; + struct tape_devmap *devmap; + struct tape_device *device; + + if (tape_autodetect) + tape_del_disc_devreg(discipline); + /* + * Go through our tape info list and disable, deq and free + * all devices with matching discipline + */ + list_for_each(l, &tape_devreg_list) { + devmap = list_entry(l, struct tape_devmap, list); + device = devmap->device; + if (device == NULL) + continue; + if (device->discipline == discipline) { + tape_disable_device(device); + tape_put_device(device); + devmap->device = NULL; + } + } +} + +/* + * Auto detect tape devices. + */ +void +tape_auto_detect(void) +{ + struct tape_device *device; + struct tape_discipline *discipline; + s390_dev_info_t dinfo; + int irq, devno; + + if (!tape_autodetect) + return; + for (irq = get_irq_first(); irq != -ENODEV; irq = get_irq_next(irq)) { + /* Get device info block. */ + devno = get_devno_by_irq(irq); + if (get_dev_info_by_irq(irq, &dinfo) < 0) + continue; + /* Search discipline with matching cu_type */ + discipline = tape_get_discipline(dinfo.sid_data.cu_type); + if (discipline == NULL) + continue; + DBF_EVENT(4, "tape_auto_detect()\n"); + DBF_EVENT(4, "det irq: %x\n", irq); + DBF_EVENT(4, "cu : %x\n", dinfo.sid_data.cu_type); + if (tape_add_range(dinfo.devno, dinfo.devno) == 0) { + device = tape_create_device(devno); + if (!IS_ERR(device)) { + tape_enable_device(device, discipline); + tape_put_device(device); + } + } + tape_put_discipline(discipline); + } +} + +/* + * Private task queue for oper/noper handling... + */ +static DECLARE_TASK_QUEUE(tape_cio_tasks); + +/* + * Oper Handler is called from Ingo's I/O layer when a new tape device is + * attached. + */ +static void +do_tape_oper_handler(void *data) +{ + struct { + int devno; + int cu_type; + struct tq_struct task; + } *p; + struct tape_device *device; + struct tape_discipline *discipline; + unsigned long flags; + + p = (void *) data; + + /* + * Handling the path revalidation scheme or common IO. Devices that + * were detected before will be reactivated. + */ + if(!IS_ERR(device = tape_get_device_by_devno(p->devno))) { + spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags); + if (!TAPE_NOACCESS(device)) { + PRINT_ERR( + "Oper handler for irq %d called, " + "which is (still) internally used.\n", + device->devinfo.irq); + } else { + DBF_EVENT(3, + "T390(%04x): resume processing\n", + p->devno); + TAPE_CLEAR_STATE(device, TAPE_STATUS_NOACCESS); + tape_schedule_bh(device); + } + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags); + + tape_put_device(device); + kfree(p); + return; + } + + /* If we get here device is NULL. */ + if (tape_autodetect && tape_add_range(p->devno, p->devno) != 0) { + kfree(p); + return; + } + + /* Find discipline for this device. */ + discipline = tape_get_discipline(p->cu_type); + if (discipline == NULL) { + /* Strange. Should not happen. */ + kfree(p); + return; + } + + device = tape_create_device(p->devno); + if (IS_ERR(device)) { + tape_put_discipline(discipline); + kfree(p); + return; + } + tape_enable_device(device, discipline); + tape_put_device(device); + tape_put_discipline(discipline); + kfree(p); +} + +int +tape_oper_handler(int irq, devreg_t *devreg) +{ + struct { + int devno; + int cu_type; + struct tq_struct task; + } *p; + s390_dev_info_t dinfo; + int rc; + + rc = get_dev_info_by_irq (irq, &dinfo); + if (rc < 0) + return rc; + + /* No memory, we loose. */ + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return -ENOMEM; + + p->devno = dinfo.devno; + p->cu_type = dinfo.sid_data.cu_type; + memset(&p->task, 0, sizeof(struct tq_struct)); + p->task.routine = do_tape_oper_handler; + p->task.data = p; + + /* queue call to do_oper_handler. */ + queue_task(&p->task, &tape_cio_tasks); + run_task_queue(&tape_cio_tasks); + + return 0; +} + + +/* + * Not Oper Handler is called from Ingo's IO layer, when a tape device + * is detached. + */ +static void +do_tape_noper_handler(void *data) +{ + struct { + int irq; + int status; + struct tq_struct task; + } *p; + struct tape_device *device; + struct list_head *l; + struct tape_devmap *devmap; + unsigned long flags; + + p = data; + + /* + * find out devno of leaving device: CIO has already deleted + * this information so we need to find it by irq! + */ + device = tape_get_device_by_irq(p->irq); + if (IS_ERR(device)) { + kfree(p); + return; + } + + /* + * Handle the new path revalidation scheme of the common IO layer. + */ + switch(p->status) { + case DEVSTAT_DEVICE_GONE: + case DEVSTAT_REVALIDATE: /* FIXME: What to do? */ + tape_disable_device(device); + + /* + * Remove the device reference from the device map. + */ + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_devreg_list) { + devmap = list_entry( + l, struct tape_devmap, list + ); + if (devmap->device == device) { + tape_put_device(device); + devmap->device = NULL; + break; + } + } + spin_unlock(&tape_devmap_lock); + break; + case DEVSTAT_NOT_ACC: + /* + * Device shouldn't be accessed at the moment. The + * currently running request will complete. + */ + spin_lock_irqsave( + get_irq_lock(device->devinfo.irq), flags + ); + DBF_EVENT(3, "T390(%04x): suspend processing\n", + device->devinfo.devno); + TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS); + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags + ); + break; + case DEVSTAT_NOT_ACC_ERR: { + struct tape_request *request; + + /* + * Device shouldn't be accessed at the moment. The + * request that was running is lost. + */ + spin_lock_irqsave( + get_irq_lock(device->devinfo.irq), flags + ); + + request = list_entry(device->req_queue.next, + struct tape_request, list); + if( + !list_empty(&device->req_queue) + && + request->status == TAPE_REQUEST_IN_IO + ) { + /* Argh! Might better belong to tape_core.c */ + list_del(&request->list); + request->rc = -EIO; + request->status = TAPE_REQUEST_DONE; + if (request->callback != NULL) { + request->callback( + request, + request->callback_data + ); + request->callback = NULL; + } + } + DBF_EVENT(3, "T390(%04x): suspend processing\n", + device->devinfo.devno); + DBF_EVENT(3, "T390(%04x): request lost\n", + device->devinfo.devno); + TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS); + spin_unlock_irqrestore( + get_irq_lock(device->devinfo.irq), flags + ); + break; + } + default: + PRINT_WARN("T390(%04x): no operation handler called " + "with unknown status(0x%x)\n", + device->devinfo.devno, p->status); + tape_disable_device(device); + + /* + * Remove the device reference from the device map. + */ + spin_lock(&tape_devmap_lock); + list_for_each(l, &tape_devreg_list) { + devmap = list_entry( + l, struct tape_devmap, list + ); + if (devmap->device == device) { + tape_put_device(device); + devmap->device = NULL; + break; + } + } + spin_unlock(&tape_devmap_lock); + } + + tape_put_device(device); + kfree(p); +} + +void +tape_noper_handler(int irq, int status) +{ + struct { + int irq; + int status; + struct tq_struct task; + } *p; + + /* No memory, we loose. */ + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return; + + p->irq = irq; + p->status = status; + memset(&p->task, 0, sizeof(struct tq_struct)); + p->task.routine = do_tape_noper_handler; + p->task.data = p; + + /* queue call to do_oper_handler. */ + queue_task(&p->task, &tape_cio_tasks); + run_task_queue(&tape_cio_tasks); +} + + +int +tape_devmap_init(void) +{ + return tape_parse(); +} + +void +tape_devmap_exit(void) +{ + tape_forget_devregs(); +} + +EXPORT_SYMBOL(tape_get_device); +EXPORT_SYMBOL(tape_get_device_by_irq); +EXPORT_SYMBOL(tape_get_device_by_devno); +EXPORT_SYMBOL(tape_put_device); +EXPORT_SYMBOL(tape_clone_device); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape.h linux.22-ac2/drivers/s390/char/tape.h --- linux.vanilla/drivers/s390/char/tape.h 2001-07-25 22:12:02.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape.h 2003-06-29 16:10:26.000000000 +0100 @@ -1,203 +1,436 @@ -/*************************************************************************** - * +/* * drivers/s390/char/tape.h - * tape device driver for 3480/3490E tapes. + * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Carsten Otte - * Tuan Ngo-Anh - * - **************************************************************************** + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader */ #ifndef _TAPE_H - #define _TAPE_H + #include #include - -#define MAX_TAPES 7 /* Max tapes supported is 7*/ -#define TAPE_MAGIC 0xE3C1D7C5 /* is ebcdic-"TAPE" */ - -typedef enum { - TS_UNUSED=0, TS_IDLE, TS_DONE, TS_FAILED, - TS_BLOCK_INIT, - TS_BSB_INIT, - TS_BSF_INIT, - TS_DSE_INIT, - TS_EGA_INIT, - TS_FSB_INIT, - TS_FSF_INIT, - TS_LDI_INIT, - TS_LBL_INIT, - TS_MSE_INIT, - TS_NOP_INIT, - TS_RBA_INIT, - TS_RBI_INIT, - TS_RBU_INIT, - TS_RBL_INIT, - TS_RDC_INIT, - TS_RFO_INIT, - TS_RSD_INIT, - TS_REW_INIT, - TS_REW_RELEASE_INIT, - TS_RUN_INIT, - TS_SEN_INIT, - TS_SID_INIT, - TS_SNP_INIT, - TS_SPG_INIT, - TS_SWI_INIT, - TS_SMR_INIT, - TS_SYN_INIT, - TS_TIO_INIT, - TS_UNA_INIT, - TS_WRI_INIT, - TS_WTM_INIT, - TS_NOT_OPER, - TS_SIZE } tape_stat; - -struct _tape_info_t; //Forward declaration - -typedef enum { - TE_START=0, TE_DONE, TE_FAILED, TE_ERROR, TE_OTHER, - TE_SIZE } tape_events; - -typedef void (*tape_disc_shutdown_t) (int); -typedef void (*tape_event_handler_t) (struct _tape_info_t*); -typedef ccw_req_t* (*tape_ccwgen_t)(struct _tape_info_t* ti,int count); -typedef ccw_req_t* (*tape_reqgen_t)(struct request* req,struct _tape_info_t* ti,int tapeblock_major); -typedef ccw_req_t* (*tape_rwblock_t)(const char* data,size_t count,struct _tape_info_t* ti); -typedef void (*tape_freeblock_t)(ccw_req_t* cqr,struct _tape_info_t* ti); -typedef void (*tape_setup_assist_t) (struct _tape_info_t*); +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_DEVFS_FS -typedef void (*tape_devfs_handler_t) (struct _tape_info_t*); +#include #endif -typedef tape_event_handler_t tape_event_table_t[TS_SIZE][TE_SIZE]; -typedef struct _tape_discipline_t { - unsigned int cu_type; - tape_setup_assist_t setup_assist; - tape_event_handler_t error_recovery; - tape_reqgen_t bread; - tape_freeblock_t free_bread; - tape_rwblock_t write_block; - tape_freeblock_t free_write_block; - tape_rwblock_t read_block; - tape_freeblock_t free_read_block; - tape_ccwgen_t mtfsf; - tape_ccwgen_t mtbsf; - tape_ccwgen_t mtfsr; - tape_ccwgen_t mtbsr; - tape_ccwgen_t mtweof; - tape_ccwgen_t mtrew; - tape_ccwgen_t mtoffl; - tape_ccwgen_t mtnop; - tape_ccwgen_t mtbsfm; - tape_ccwgen_t mtfsfm; - tape_ccwgen_t mteom; - tape_ccwgen_t mterase; - tape_ccwgen_t mtsetdensity; - tape_ccwgen_t mtseek; - tape_ccwgen_t mttell; - tape_ccwgen_t mtsetdrvbuffer; - tape_ccwgen_t mtlock; - tape_ccwgen_t mtunlock; - tape_ccwgen_t mtload; - tape_ccwgen_t mtunload; - tape_ccwgen_t mtcompression; - tape_ccwgen_t mtsetpart; - tape_ccwgen_t mtmkpart; - tape_ccwgen_t mtiocget; - tape_ccwgen_t mtiocpos; - tape_disc_shutdown_t shutdown; - int (*discipline_ioctl_overload)(struct inode *,struct file*, unsigned int,unsigned long); - tape_event_table_t* event_table; - tape_event_handler_t default_handler; - struct _tape_info_t* tape; /* pointer for backreference */ - void* next; -} tape_discipline_t __attribute__ ((aligned(8))); -typedef struct _tape_frontend_t { - tape_setup_assist_t device_setup; +/* + * macros s390 debug feature (dbf) + */ +#define DBF_EVENT(d_level, d_str...) \ +do { \ + debug_sprintf_event(tape_dbf_area, d_level, d_str); \ +} while (0) + +#define DBF_EXCEPTION(d_level, d_str...) \ +do { \ + debug_sprintf_exception(tape_dbf_area, d_level, d_str); \ +} while (0) + +#define TAPE_VERSION_MAJOR 2 +#define TAPE_VERSION_MINOR 0 +#define TAPE_MAGIC "tape" + +#define TAPE_MINORS_PER_DEV 2 /* two minors per device */ +#define TAPEBLOCK_HSEC_SIZE 2048 +#define TAPEBLOCK_HSEC_S2B 2 +#define TAPEBLOCK_RETRIES 5 + +/* Event types for hotplug */ +#define TAPE_HOTPLUG_CHAR_ADD 1 +#define TAPE_HOTPLUG_BLOCK_ADD 2 +#define TAPE_HOTPLUG_CHAR_REMOVE 3 +#define TAPE_HOTPLUG_BLOCK_REMOVE 4 + +enum tape_medium_state { + MS_UNKNOWN, + MS_LOADED, + MS_UNLOADED, + MS_SIZE +}; + +enum tape_op { + TO_BLOCK, /* Block read */ + TO_BSB, /* Backward space block */ + TO_BSF, /* Backward space filemark */ + TO_DSE, /* Data security erase */ + TO_FSB, /* Forward space block */ + TO_FSF, /* Forward space filemark */ + TO_LBL, /* Locate block label */ + TO_NOP, /* No operation */ + TO_RBA, /* Read backward */ + TO_RBI, /* Read block information */ + TO_RFO, /* Read forward */ + TO_REW, /* Rewind tape */ + TO_RUN, /* Rewind and unload tape */ + TO_WRI, /* Write block */ + TO_WTM, /* Write tape mark */ + TO_MSEN, /* Medium sense */ + TO_LOAD, /* Load tape */ + TO_READ_CONFIG, /* Read configuration data */ + TO_READ_ATTMSG, /* Read attention message */ + TO_DIS, /* Tape display */ + TO_ASSIGN, /* Assign tape to channel path */ + TO_UNASSIGN, /* Unassign tape from channel path */ + TO_BREAKASS, /* Break the assignment of another host */ + TO_SIZE /* #entries in tape_op_t */ +}; + +/* Forward declaration */ +struct tape_device; + +/* The tape device list lock */ +extern rwlock_t tape_dev_lock; + +/* Tape CCW request */ +struct tape_request { + struct list_head list; /* list head for request queueing. */ + struct tape_device *device; /* tape device of this request */ + ccw1_t *cpaddr; /* address of the channel program. */ + void *cpdata; /* pointer to ccw data. */ + char status; /* status of this request */ + int options; /* options for execution. */ + int retries; /* retry counter for error recovery. */ + + /* + * This timer can be used to automatically cancel a request after + * some time. Specifically the assign request seems to lockup under + * certain circumstances. + */ + struct timer_list timeout; + + enum tape_op op; + int rc; + atomic_t ref_count; + + /* Callback for delivering final status. */ + void (*callback)(struct tape_request *, void *); + void *callback_data; +}; + +/* tape_request->status can be: */ +#define TAPE_REQUEST_INIT 0x00 /* request is ready to be processed */ +#define TAPE_REQUEST_QUEUED 0x01 /* request is queued to be processed */ +#define TAPE_REQUEST_IN_IO 0x02 /* request is currently in IO */ +#define TAPE_REQUEST_DONE 0x03 /* request is completed. */ + +/* Function type for magnetic tape commands */ +typedef int (*tape_mtop_fn)(struct tape_device *, int); + +/* Size of the array containing the mtops for a discipline */ +#define TAPE_NR_MTOPS (MTMKPART+1) + +/* Tape Discipline */ +struct tape_discipline { + struct list_head list; + struct module *owner; + unsigned int cu_type; + int (*setup_device)(struct tape_device *); + void (*cleanup_device)(struct tape_device *); + int (*assign)(struct tape_device *); + int (*unassign)(struct tape_device *); + int (*force_unassign)(struct tape_device *); + int (*irq)(struct tape_device *, struct tape_request *); + struct tape_request *(*read_block)(struct tape_device *, size_t); + struct tape_request *(*write_block)(struct tape_device *, size_t); + void (*process_eov)(struct tape_device*); + /* Block device stuff. */ + struct tape_request *(*bread)(struct tape_device *, struct request *); + void (*check_locate)(struct tape_device *, struct tape_request *); + void (*free_bread)(struct tape_request *); + /* ioctl function for additional ioctls. */ + int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); + /* Array of tape commands with TAPE_NR_MTOPS entries */ + tape_mtop_fn *mtop_array; +}; + +/* + * The discipline irq function either returns an error code (<0) which + * means that the request has failed with an error or one of the following: + */ +#define TAPE_IO_SUCCESS 0 /* request successful */ +#define TAPE_IO_PENDING 1 /* request still running */ +#define TAPE_IO_RETRY 2 /* retry to current request */ +#define TAPE_IO_STOP 3 /* stop the running request */ + +/* Char Frontend Data */ +struct tape_char_data { + /* Idal buffer to temporaily store character data */ + struct idal_buffer * idal_buf; + /* Block size (in bytes) of the character device (0=auto) */ + int block_size; +#ifdef CONFIG_DEVFS_FS + /* tape//char subdirectory in devfs */ + devfs_handle_t devfs_char_dir; + /* tape//char/nonrewinding entry in devfs */ + devfs_handle_t devfs_nonrewinding; + /* tape//char/rewinding entry in devfs */ + devfs_handle_t devfs_rewinding; +#endif /* CONFIG_DEVFS_FS */ +}; + +#ifdef CONFIG_S390_TAPE_BLOCK +/* Block Frontend Data */ +struct tape_blk_data +{ + /* Block device request queue. */ + request_queue_t request_queue; + /* Block frontend tasklet */ + struct tasklet_struct tasklet; + /* Current position on the tape. */ + unsigned int block_position; + /* The start of the block device image file */ + unsigned int start_block_id; #ifdef CONFIG_DEVFS_FS - tape_devfs_handler_t mkdevfstree; - tape_devfs_handler_t rmdevfstree; + /* tape//block subdirectory in devfs */ + devfs_handle_t devfs_block_dir; + /* tape//block/disc entry in devfs */ + devfs_handle_t devfs_disc; +#endif /* CONFIG_DEVFS_FS */ +}; #endif - void* next; -} tape_frontend_t __attribute__ ((aligned(8))); +#define TAPE_STATUS_INIT 0x00000001 +#define TAPE_STATUS_ASSIGN_M 0x00000002 +#define TAPE_STATUS_ASSIGN_A 0x00000004 +#define TAPE_STATUS_OPEN 0x00000008 +#define TAPE_STATUS_BLOCKDEV 0x00000010 +#define TAPE_STATUS_BOXED 0x20000000 +#define TAPE_STATUS_NOACCESS 0x40000000 +#define TAPE_STATUS_NOT_OPER 0x80000000 + +#define TAPE_SET_STATE(td,st) \ + do { \ + tape_state_set(td, td->tape_status | (st)); \ + } while(0) +#define TAPE_CLEAR_STATE(td,st) \ + do { \ + tape_state_set(td, td->tape_status & ~(st)); \ + } while(0) + +#define TAPE_UNUSED(td) (!TAPE_OPEN(td)) +#define TAPE_INIT(td) (td->tape_status & TAPE_STATUS_INIT) +#define TAPE_ASSIGNED(td) ( \ + td->tape_status & ( \ + TAPE_STATUS_ASSIGN_M | \ + TAPE_STATUS_ASSIGN_A \ + ) \ + ) +#define TAPE_OPEN(td) (td->tape_status & TAPE_STATUS_OPEN) +#define TAPE_BLOCKDEV(td) (td->tape_status & TAPE_STATUS_BLOCKDEV) +#define TAPE_BOXED(td) (td->tape_status & TAPE_STATUS_BOXED) +#define TAPE_NOACCESS(td) (td->tape_status & TAPE_STATUS_NOACCESS) +#define TAPE_NOT_OPER(td) (td->tape_status & TAPE_STATUS_NOT_OPER) + +/* Tape Info */ +struct tape_device { + /* Device discipline information. */ + struct tape_discipline *discipline; + void * discdata; + + /* Generic status bits */ + long tape_generic_status; + unsigned int tape_status; + enum tape_medium_state medium_state; + + /* Number of tapemarks required for correct termination */ + int required_tapemarks; + + /* Waitqueue for state changes and device flags */ + wait_queue_head_t state_change_wq; + unsigned char * modeset_byte; + + /* Reference count. */ + atomic_t ref_count; + + /* For persistent assign */ + spinlock_t assign_lock; + + /* Request queue. */ + struct list_head req_queue; + atomic_t bh_scheduled; + struct tq_struct bh_task; + + /* Common i/o stuff. */ + s390_dev_info_t devinfo; + devstat_t devstat; + + /* each tape device has two minors */ + int first_minor; -typedef struct _tape_info_t { - wait_queue_head_t wq; - s390_dev_info_t devinfo; /* device info from Common I/O */ - int wanna_wakeup; - int rew_minor; /* minor number for the rewinding tape */ - int nor_minor; /* minor number for the nonrewinding tape */ - int blk_minor; /* minor number for the block device */ - devstat_t devstat; /* contains irq, devno, status */ - size_t block_size; /* block size of tape */ - int drive_type; /* Code indicating type of drive */ - struct file *rew_filp; /* backpointer to file structure */ - struct file *nor_filp; - struct file *blk_filp; - int tape_state; /* State of the device. See tape_stat */ - int rc; /* Return code. */ - tape_discipline_t* discipline; - request_queue_t request_queue; - struct request* current_request; - int blk_retries; - long position; - int medium_is_unloaded; // Becomes true when a unload-type operation was issued, false again when medium-insert was detected - ccw_req_t* cqr; - atomic_t bh_scheduled; - struct tq_struct bh_tq; #ifdef CONFIG_DEVFS_FS - devfs_handle_t devfs_dir; /* devfs handle for tape/DEVNO directory */ - devfs_handle_t devfs_char_dir; /* devfs handle for tape/DEVNO/char directory */ - devfs_handle_t devfs_block_dir; /* devfs handle for tape/DEVNO/block directory */ - devfs_handle_t devfs_nonrewinding; /* devfs handle for tape/DEVNO/char/nonrewinding device */ - devfs_handle_t devfs_rewinding; /* devfs handle for tape/DEVNO/char/rewinding device */ - devfs_handle_t devfs_disc; /* devfs handle for tape/DEVNO/block/disc device */ + /* Toplevel devfs directory. */ + devfs_handle_t devfs_dir; +#endif /* CONFIG_DEVFS_FS */ + /* Character device frontend data */ + struct tape_char_data char_data; +#ifdef CONFIG_S390_TAPE_BLOCK + /* Block dev frontend data */ + struct tape_blk_data blk_data; #endif - void* discdata; - void* kernbuf; - void* userbuf; - void* next; -} tape_info_t __attribute__ ((aligned(8))); +}; -/* tape initialisation functions */ -int tape_init(void); -int tape_setup (tape_info_t * ti, int irq, int minor); +/* Externals from tape_core.c */ +struct tape_request *tape_alloc_request(int cplength, int datasize); +struct tape_request *tape_put_request(struct tape_request *); +struct tape_request *tape_clone_request(struct tape_request *); +int tape_do_io(struct tape_device *, struct tape_request *); +int tape_do_io_async(struct tape_device *, struct tape_request *); +int tape_do_io_interruptible(struct tape_device *, struct tape_request *); +void tape_schedule_bh(struct tape_device *); +void tape_hotplug_event(struct tape_device *, int, int); + +static inline int +tape_do_io_free(struct tape_device *device, struct tape_request *request) +{ + int rc; + + rc = tape_do_io(device, request); + tape_put_request(request); + return rc; +} + +int tape_oper_handler(int irq, devreg_t *devreg); +void tape_noper_handler(int irq, int status); +int tape_open(struct tape_device *); +int tape_release(struct tape_device *); +int tape_assign(struct tape_device *, int type); +int tape_unassign(struct tape_device *, int type); +int tape_mtop(struct tape_device *, int, int); + +/* Externals from tape_devmap.c */ +int tape_devmap_init(void); +void tape_devmap_exit(void); + +struct tape_device *tape_get_device(int devindex); +struct tape_device *tape_get_device_by_devno(int devno); +struct tape_device *tape_clone_device(struct tape_device *); +void tape_put_device(struct tape_device *); + +void tape_auto_detect(void); +void tape_add_devices(struct tape_discipline *); +void tape_remove_devices(struct tape_discipline *); + +extern int tape_max_devindex; + +/* Externals from tape_char.c */ +int tapechar_init(void); +void tapechar_exit(void); +int tapechar_setup_device(struct tape_device *); +void tapechar_cleanup_device(struct tape_device *); + +/* Externals from tape_block.c */ +int tapeblock_init (void); +void tapeblock_exit(void); +int tapeblock_setup_device(struct tape_device *); +void tapeblock_cleanup_device(struct tape_device *); +void tapeblock_medium_change(struct tape_device *); + +/* Discipline functions */ +int tape_register_discipline(struct tape_discipline *); +void tape_unregister_discipline(struct tape_discipline *); +struct tape_discipline *tape_get_discipline(int cu_type); +void tape_put_discipline(struct tape_discipline *); +int tape_enable_device(struct tape_device *, struct tape_discipline *); +void tape_disable_device(struct tape_device *device); -/* functoins for alloc'ing ccw stuff */ -inline ccw_req_t * tape_alloc_ccw_req (tape_info_t* ti, int cplength, int datasize); -void tape_free_request (ccw_req_t * request); +/* tape initialisation functions */ +void tape_proc_init (void); +void tape_proc_cleanup (void); /* a function for dumping device sense info */ -void tape_dump_sense (devstat_t * stat); - -#ifdef CONFIG_S390_TAPE_DYNAMIC -/* functions for dyn. dev. attach/detach */ -int tape_oper_handler ( int irq, struct _devreg *dreg); -#endif +void tape_dump_sense(struct tape_device *, struct tape_request *); +void tape_dump_sense_dbf(struct tape_device *, struct tape_request *); /* functions for handling the status of a device */ -inline void tapestate_set (tape_info_t * ti, int newstate); -inline int tapestate_get (tape_info_t * ti); -void tapestate_event (tape_info_t * ti, int event); -extern char* state_verbose[TS_SIZE]; -extern char* event_verbose[TE_SIZE]; - -/****************************************************************************/ - -/* Some linked lists for storing plugins and devices */ -extern tape_info_t *first_tape_info; -extern tape_discipline_t *first_discipline; -extern tape_frontend_t *first_frontend; +inline void tape_state_set (struct tape_device *, unsigned int status); +inline void tape_med_state_set(struct tape_device *, enum tape_medium_state); +const char *tape_state_string(struct tape_device *); + +/* Tape 3480/3490 init/exit functions. */ +int tape_34xx_init(void); +void tape_34xx_exit(void); /* The debug area */ -#ifdef TAPE_DEBUG -extern debug_info_t *tape_debug_area; -#endif +extern debug_info_t *tape_dbf_area; + +/* functions for building ccws */ +static inline ccw1_t * +tape_ccw_cc(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + ccw->count = memsize; + ccw->cda = (__u32)(addr_t) cda; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_end(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + ccw->count = memsize; + ccw->cda = (__u32)(addr_t) cda; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_cmd(ccw1_t *ccw, __u8 cmd_code) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + ccw->count = 0; + ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + return ccw + 1; +} + +static inline ccw1_t * +tape_ccw_repeat(ccw1_t *ccw, __u8 cmd_code, int count) +{ + while (count-- > 0) { + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + ccw->count = 0; + ccw->cda = (__u32)(addr_t) &ccw->cmd_code; + ccw++; + } + return ccw; +} + +extern inline ccw1_t* +tape_ccw_cc_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_CC; + idal_buffer_set_cda(idal, ccw); + return ccw++; +} + +extern inline ccw1_t* +tape_ccw_end_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal) +{ + ccw->cmd_code = cmd_code; + ccw->flags = 0; + idal_buffer_set_cda(idal, ccw); + return ccw++; +} + +/* Global vars */ +extern const char *tape_op_verbose[]; #endif /* for ifdef tape.h */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_proc.c linux.22-ac2/drivers/s390/char/tape_proc.c --- linux.vanilla/drivers/s390/char/tape_proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_proc.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,383 @@ +/* + * drivers/s390/char/tape.c + * tape device driver for S/390 and zSeries tapes. + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * + * PROCFS Functions + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tape.h" + +#define PRINTK_HEADER "T390:" + +static const char *tape_med_st_verbose[MS_SIZE] = +{ + [MS_UNKNOWN] = "UNKNOWN ", + [MS_LOADED] = "LOADED ", + [MS_UNLOADED] = "UNLOADED" +}; + +/* our proc tapedevices entry */ +static struct proc_dir_entry *tape_proc_devices; + +static int tape_proc_show(struct seq_file *m, void *v) { + struct tape_device *device; + struct tape_request *request; + unsigned long n; + + n = ((unsigned long) v - 1); + + if(n == 0) { + seq_printf(m, + "TapeNo\tDevNo\tCuType\tCuModel\tDevType\t" + "DevMod\tBlkSize\tState\tOp\tMedState\n" + ); + } + + device = tape_get_device(n); + if(IS_ERR(device)) + return 0; + + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + + seq_printf(m, + "%d\t%04X\t%04X\t%02X\t%04X\t%02X\t", + device->first_minor/TAPE_MINORS_PER_DEV, + device->devinfo.devno, + device->devinfo.sid_data.cu_type, + device->devinfo.sid_data.cu_model, + device->devinfo.sid_data.dev_type, + device->devinfo.sid_data.dev_model + ); + + /* + * the blocksize is either 'auto' or the blocksize as a decimal number + */ + if(device->char_data.block_size == 0) + seq_printf(m, "auto\t"); + else + seq_printf(m, "%i\t", device->char_data.block_size); + + seq_printf(m, "%s\t", tape_state_string(device)); + + /* + * verbose desciption of current tape operation + */ + if(!list_empty(&device->req_queue)) { + request = list_entry( + device->req_queue.next, struct tape_request, list + ); + + seq_printf(m, "%s\t", tape_op_verbose[request->op]); + } else { + seq_printf(m, "---\t"); + } + + seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]); + + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + tape_put_device(device); + + return 0; +} + +static void *tape_proc_start(struct seq_file *m, loff_t *pos) { + if(*pos < tape_max_devindex) + return (void *) ((unsigned long) (*pos) + 1); + return NULL; +} + +static void tape_proc_stop(struct seq_file *m, void *v) { +} + +static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos) { + (*pos)++; + return tape_proc_start(m, pos); +} + +static struct seq_operations tape_proc_seq = { + .start = tape_proc_start, + .next = tape_proc_next, + .stop = tape_proc_stop, + .show = tape_proc_show, +}; + +static int tape_proc_open(struct inode *inode, struct file *file) { + return seq_open(file, &tape_proc_seq); +} + +static int +tape_proc_assign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): assign invalid device\n", devno); + PRINT_ERR("TAPE(%04x): assign invalid device\n", devno); + return PTR_ERR(device); + } + + rc = tape_assign(device, TAPE_STATUS_ASSIGN_M); + + tape_put_device(device); + + return rc; +} + +static int +tape_proc_unassign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): unassign invalid device\n", devno); + PRINT_ERR("TAPE(%04x): unassign invalid device\n", devno); + return PTR_ERR(device); + } + + rc = tape_unassign(device, TAPE_STATUS_ASSIGN_M); + + tape_put_device(device); + + return rc; +} + +#ifdef SMB_DEBUG_BOX +static int +tape_proc_put_into_box(int devno) +{ + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): invalid device\n", devno); + PRINT_ERR("TAPE(%04x): invalid device\n", devno); + return PTR_ERR(device); + } + + TAPE_SET_STATE(device, TAPE_STATUS_BOXED); + + tape_put_device(device); + + return 0; +} +#endif + +#ifdef TAPE390_FORCE_UNASSIGN +static int +tape_proc_force_unassign(int devno) +{ + int rc; + struct tape_device *device; + + if(IS_ERR(device = tape_get_device_by_devno(devno))) { + DBF_EVENT(3, "TAPE(%04x): force unassign invalid device\n", + devno); + PRINT_ERR("TAPE(%04x): force unassign invalid device\n", + devno); + return PTR_ERR(device); + } + + if (!TAPE_BOXED(device)) { + DBF_EVENT(3, "TAPE(%04x): forced unassignment only allowed for" + " boxed device\n", devno); + PRINT_ERR("TAPE(%04x): forced unassignment only allowed for" + " boxed device\n", devno); + rc = -EPERM; + } else if(device->discipline->force_unassign == NULL) { + DBF_EVENT(3, "TAPE(%04x: force unassign is not supported on" + " this device\n", devno); + PRINT_ERR("TAPE(%04x: force unassign is not supported on" + " this device\n", devno); + rc = -EPERM; + } else { + rc = device->discipline->force_unassign(device); + if(rc == 0) + spin_lock_irq(get_irq_lock(device->devinfo.irq)); + TAPE_CLEAR_STATE( + device, + TAPE_STATUS_BOXED + | TAPE_STATUS_ASSIGN_A + | TAPE_STATUS_ASSIGN_M + ); + spin_unlock_irq(get_irq_lock(device->devinfo.irq)); + } + + tape_put_device(device); + return rc; +} +#endif + +/* + * Skips over all characters to the position after a newline or beyond the + * last character of the string. + * Returns the number of characters skiped. + */ +static size_t +tape_proc_skip_eol(const char *buf, size_t len, loff_t *off) +{ + loff_t start = *off; + + while((*off - start) < len) { + if(*(buf+*off) == '\n') { + *off += 1; + break; + } + *off += 1; + } + + return (size_t) (*off - start); +} + +/* + * Skips over whitespace characters and returns the number of characters + * that where skiped. + */ +static size_t +tape_proc_skip_ws(const char *buf, size_t len, loff_t *off) +{ + loff_t start = *off; + + while((*off - start) < len) { + if(*(buf + *off) != ' ' && *(buf + *off) != '\t') + break; + *off += 1; + } + + return (size_t) (*off - start); +} + +static size_t +tape_proc_get_hexvalue(char *buf, size_t len, loff_t *off, unsigned int *hex) +{ + int hexdigit; + loff_t start = *off; + + /* Skip possible space characters */ + tape_proc_skip_ws(buf, len, off); + + /* The hexvalue might start with '0x' or '0X' */ + if((*off - start)+1 < len && *(buf + *off) == '0') + if(*(buf + *off + 1) == 'x' || *(buf + *off + 1) == 'X') + *off += 2; + + *hex = 0; + while((*off - start) < len) { + if(*(buf + *off) >= '0' && *(buf + *off) <= '9') { + hexdigit = *(buf + *off) - '0'; + } else if(*(buf + *off) >= 'a' && *(buf + *off) <= 'f') { + hexdigit = *(buf + *off) - 'a' + 10; + } else if(*(buf + *off) >= 'A' && *(buf + *off) <= 'F') { + hexdigit = *(buf + *off) - 'A' + 10; + } else { + break; + } + *hex = (*hex << 4) + hexdigit; + *off += 1; + } + + return (size_t) (*off - start); +} + +static ssize_t tape_proc_write( + struct file *file, + const char *buf, + size_t len, + loff_t *off +) { + loff_t start = *off; + int devno; + char *s; + + if(PAGE_SIZE < len) + return -EINVAL; + + if((s = kmalloc(len, GFP_KERNEL)) == NULL) + return -ENOMEM; + + if(copy_from_user(s, buf, len) != 0) { + kfree(s); + return -EFAULT; + } + + if(strncmp(s+*off, "assign", 6) == 0) { + (*off) += 6; + tape_proc_get_hexvalue(s, len - 6, off, &devno); + if(devno > 0) + tape_proc_assign(devno); + } else if(strncmp(s+*off, "unassign", 8) == 0) { + (*off) += 8; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_unassign(devno); +#ifdef TAPE390_FORCE_UNASSIGN + } else if(strncmp(s+*off, "forceunassign", 13) == 0) { + (*off) += 13; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_force_unassign(devno); +#endif +#ifdef SMB_DEBUG_BOX + } else if(strncmp(s+*off, "putintobox", 10) == 0) { + (*off) += 10; + tape_proc_get_hexvalue(s, len - (*off - start), off, &devno); + if(devno > 0) + tape_proc_put_into_box(devno); +#endif + } else { + DBF_EVENT(3, "tape_proc_write() parse error\n"); + PRINT_ERR("Invalid /proc/tapedevices command.\n"); + } + tape_proc_skip_eol(s, len - (*off - start), off); + + kfree(s); + + /* Just pretend to have processed all the stuff */ + return len; +} + +static struct file_operations tape_proc_ops = +{ + .open = tape_proc_open, + .read = seq_read, + .write = tape_proc_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Initialize procfs stuff on startup + */ +void tape_proc_init(void) { + tape_proc_devices = create_proc_entry( + "tapedevices", S_IFREG | S_IRUGO | S_IWUSR, &proc_root); + + if (tape_proc_devices == NULL) { + PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); + return; + } + tape_proc_devices->proc_fops = &tape_proc_ops; + tape_proc_devices->owner = THIS_MODULE; +} + +/* + * Cleanup all stuff registered to the procfs + */ +void tape_proc_cleanup(void) { + if(tape_proc_devices != NULL) + remove_proc_entry ("tapedevices", &proc_root); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_std.c linux.22-ac2/drivers/s390/char/tape_std.c --- linux.vanilla/drivers/s390/char/tape_std.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_std.c 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,768 @@ +/* + * drivers/s390/char/tape_std.c + * standard tape device functions for ibm tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Michael Holzheu + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_S390_TAPE_BLOCK +#include +#endif + +#include +#include +#include +#include + +#include "tape.h" +#include "tape_std.h" + +#define PRINTK_HEADER "T3xxx:" +#define ZLINUX_PASSWD "zLinux PWD" + +/* + * tape_std_assign + */ +int +tape_std_assign(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 11); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_ASSIGN; + + /* + * From the documentation assign requests should fail with the + * 'assigned elsewhere' bit set if the tape is already assigned + * to another host. However, it seems, in reality the request + * hangs forever. Therfor we just set a timeout for this request. + */ + init_timer(&request->timeout); + request->timeout.expires = jiffies + 1 * HZ; + + /* Setup the CCWs */ + tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + + return tape_do_io_free(device, request); +} + +/* + * tape_std_unassign + */ +int +tape_std_unassign (struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 11); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_UNASSIGN; + tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + return tape_do_io_free(device, request); +} + +#ifdef TAPE390_FORCE_UNASSIGN +/* + * tape_std_force_unassign: forces assignment from another host. + * (Since we need a password this works only with other zLinux hosts!) + */ +int +tape_std_force_unassign(struct tape_device *device) +{ + struct tape_request *request; + struct tape_ca_data *ca_data1; + struct tape_ca_data *ca_data2; + + request = tape_alloc_request(2, 24); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_BREAKASS; + ca_data1 = (struct tape_ca_data *) + (((char *) request->cpdata)); + ca_data2 = (struct tape_ca_data *) + (((char *) request->cpdata) + 12); + + ca_data1->function = 0x80; /* Conditional enable */ + strcpy(ca_data1->password, ZLINUX_PASSWD); + ASCEBC(ca_data1->password, 11); + ca_data2->function = 0x40; /* Conditional disable */ + memcpy(ca_data2->password, ca_data1->password, 11); + + tape_ccw_cc(request->cpaddr, CONTROL_ACCESS, 12, ca_data1); + tape_ccw_end(request->cpaddr + 1, CONTROL_ACCESS, 12, ca_data2); + + return tape_do_io_free(device, request); +} +#endif + +/* + * TAPE390_DISPLAY: Show a string on the tape display. + */ +int +tape_std_display(struct tape_device *device, struct display_struct *disp) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(2, 17); + if (IS_ERR(request)) { + DBF_EVENT(3, "TAPE: load display failed\n"); + return PTR_ERR(request); + } + + request->op = TO_DIS; + *(unsigned char *) request->cpdata = disp->cntrl; + DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl); + memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8); + memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8); + ASCEBC(((unsigned char*) request->cpdata) + 1, 16); + + tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + + rc = tape_do_io_interruptible(device, request); + tape_put_request(request); + return rc; +} + +/* + * Read block id. + */ +int +tape_std_read_block_id(struct tape_device *device, unsigned int *bid) +{ + struct tape_request *request; + struct { + unsigned int channel_block_id; + unsigned int device_block_id; + } __attribute__ ((packed)) *rbi_data; + int rc; + + request = tape_alloc_request(3, 8); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RBI; + + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + /* Get result from read buffer. */ + DBF_EVENT(6, "rbi_data = 0x%08x%08x\n", + *((unsigned int *) request->cpdata), + *(((unsigned int *) request->cpdata)+1)); + rbi_data = (void *) request->cpdata; + *bid = rbi_data->channel_block_id; + } + tape_put_request(request); + return rc; +} + +/* Seek block id */ +int +tape_std_seek_block_id(struct tape_device *device, unsigned int bid) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 4); + if (IS_ERR(request)) + return PTR_ERR(request); + + request->op = TO_LBL; + *(__u32 *) request->cpdata = bid; + + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + + /* execute it */ + return tape_do_io_free(device, request); +} + +int +tape_std_terminate_write(struct tape_device *device) +{ + int rc; + + if(device->required_tapemarks == 0) + return 0; + + DBF_EVENT(5, "(%04x): terminate_write %ixEOF\n", + device->devstat.devno, device->required_tapemarks); + + rc = tape_mtop(device, MTWEOF, device->required_tapemarks); + if (rc) + return rc; + + device->required_tapemarks = 0; + return tape_mtop(device, MTBSR, 1); +} + +/* + * MTLOAD: Loads the tape. + * The default implementation just wait until the tape medium state changes + * to MS_LOADED. + */ +int +tape_std_mtload(struct tape_device *device, int count) +{ + return wait_event_interruptible(device->state_change_wq, + (device->medium_state == MS_LOADED)); +} + +/* + * MTSETBLK: Set block size. + */ +int +tape_std_mtsetblk(struct tape_device *device, int count) +{ + struct idal_buffer *new; + + DBF_EVENT(6, "tape_std_mtsetblk(%d)\n", count); + if (count <= 0) { + /* + * Just set block_size to 0. tapechar_read/tapechar_write + * will realloc the idal buffer if a bigger one than the + * current is needed. + */ + device->char_data.block_size = 0; + return 0; + } + if (device->char_data.idal_buf != NULL && + device->char_data.idal_buf->size == count) + /* We already have a idal buffer of that size. */ + return 0; + /* Allocate a new idal buffer. */ + new = idal_buffer_alloc(count, 0); + if (new == NULL) + return -ENOMEM; + if (device->char_data.idal_buf != NULL) + idal_buffer_free(device->char_data.idal_buf); + + device->char_data.idal_buf = new; + device->char_data.block_size = count; + DBF_EVENT(6, "new blocksize is %d\n", device->char_data.block_size); + return 0; +} + +/* + * MTRESET: Set block size to 0. + */ +int +tape_std_mtreset(struct tape_device *device, int count) +{ + DBF_EVENT(6, "TCHAR:devreset:\n"); + device->char_data.block_size = 0; + return 0; +} + +/* + * MTFSF: Forward space over 'count' file marks. The tape is positioned + * at the EOT (End of Tape) side of the file mark. + */ +int +tape_std_mtfsf(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTFSR: Forward space over 'count' tape blocks (blocksize is set + * via MTSETBLK. + */ +int +tape_std_mtfsr(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSB; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSR: Backward space over 'count' tape blocks. + * (blocksize is set via MTSETBLK. + */ +int +tape_std_mtbsr(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSB; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTWEOF: Write 'count' file marks at the current position. + */ +int +tape_std_mtweof(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_WTM; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSFM: Backward space over 'count' file marks. + * The tape is positioned at the BOT (Begin Of Tape) side of the + * last skipped file mark. + */ +int +tape_std_mtbsfm(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTBSF: Backward space over 'count' file marks. The tape is positioned at + * the EOT (End of Tape) side of the last skipped file mark. + */ +int +tape_std_mtbsf(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + int rc; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_BSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + request->op = TO_FSF; + /* need to skip forward over the filemark. */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, FORSPACEFILE, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + } + tape_put_request(request); + return rc; +} + +/* + * MTFSFM: Forward space over 'count' file marks. + * The tape is positioned at the BOT (Begin Of Tape) side + * of the last skipped file mark. + */ +int +tape_std_mtfsfm(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + ccw1_t *ccw; + int rc; + + request = tape_alloc_request(mt_count + 2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + if (rc == 0) { + request->op = TO_BSF; + /* need to skip forward over the filemark. */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, BACKSPACEFILE, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + rc = tape_do_io(device, request); + } + tape_put_request(request); + return rc; +} + +/* + * MTREW: Rewind the tape. + */ +int +tape_std_mtrew(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_REW; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, + device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTOFFL: Rewind the tape and put the drive off-line. + * Implement 'rewind unload' + */ +int +tape_std_mtoffl(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RUN; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTNOP: 'No operation'. + */ +int +tape_std_mtnop(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_NOP; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTEOM: positions at the end of the portion of the tape already used + * for recordind data. MTEOM positions after the last file mark, ready for + * appending another file. + */ +int +tape_std_mteom(struct tape_device *device, int mt_count) +{ + int rc; + + /* + * Since there is currently no other way to seek, return to the + * BOT and start from there. + */ + if((rc = tape_mtop(device, MTREW, 1)) < 0) + return rc; + + do { + if((rc = tape_mtop(device, MTFSF, 1)) < 0) + return rc; + if((rc = tape_mtop(device, MTFSR, 1)) < 0) + return rc; + } while((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) == 0); + + return tape_mtop(device, MTBSR, 1); +} + +/* + * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind. + */ +int +tape_std_mtreten(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(4, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_FSF; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL); + tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); + tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); + /* execute it, MTRETEN rc gets ignored */ + rc = tape_do_io_interruptible(device, request); + tape_put_request(request); + return tape_std_mtrew(device, 1); +} + +/* + * MTERASE: erases the tape. + */ +int +tape_std_mterase(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(5, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_DSE; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); + tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL); + tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL); + tape_ccw_end(request->cpaddr + 4, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTUNLOAD: Rewind the tape and unload it. + */ +int +tape_std_mtunload(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + request = tape_alloc_request(3, 32); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_RUN; + /* setup ccws */ + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); + tape_ccw_end(request->cpaddr + 2, SENSE, 32, request->cpdata); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * MTCOMPRESSION: used to enable compression. + * Sets the IDRC on/off. + */ +int +tape_std_mtcompression(struct tape_device *device, int mt_count) +{ + struct tape_request *request; + + if (mt_count < 0 || mt_count > 1) { + DBF_EXCEPTION(6, "xcom parm\n"); + if (*device->modeset_byte & 0x08) + PRINT_INFO("(%x) Compression is currently on\n", + device->devstat.devno); + else + PRINT_INFO("(%x) Compression is currently off\n", + device->devstat.devno); + PRINT_INFO("Use 1 to switch compression on, 0 to " + "switch it off\n"); + return -EINVAL; + } + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_NOP; + /* setup ccws */ + *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); + /* execute it */ + return tape_do_io_free(device, request); +} + +/* + * Read Block + */ +struct tape_request * +tape_std_read_block(struct tape_device *device, size_t count) +{ + struct tape_request *request; + + /* + * We have to alloc 4 ccws in order to be able to transform request + * into a read backward request in error case. + */ + request = tape_alloc_request(4, 0); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "xrbl fail"); + return request; + } + request->op = TO_RFO; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD, + device->char_data.idal_buf); + DBF_EVENT(6, "xrbl ccwg\n"); + return request; +} + +/* + * Read Block backward transformation function. + */ +void +tape_std_read_backward(struct tape_device *device, struct tape_request *request) +{ + /* + * We have allocated 4 ccws in tape_std_read, so we can now + * transform the request to a read backward, followed by a + * forward space block. + */ + request->op = TO_RBA; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD, + device->char_data.idal_buf); + tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); + tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); + DBF_EVENT(6, "xrop ccwg");} + +/* + * Write Block + */ +struct tape_request * +tape_std_write_block(struct tape_device *device, size_t count) +{ + struct tape_request *request; + + request = tape_alloc_request(2, 0); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "xwbl fail\n"); + return request; + } + request->op = TO_WRI; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD, + device->char_data.idal_buf); + DBF_EVENT(6, "xwbl ccwg\n"); + return request; +} + +/* + * This routine is called by frontend after an ENOSP on write + */ +void +tape_std_process_eov(struct tape_device *device) +{ + /* + * End of volume: We have to backspace the last written record, then + * we TRY to write a tapemark and then backspace over the written TM + */ + if (tape_mtop(device, MTBSR, 1) < 0) + return; + if (tape_mtop(device, MTWEOF, 1) < 0) + return; + tape_mtop(device, MTBSR, 1); +} + +EXPORT_SYMBOL(tape_std_assign); +EXPORT_SYMBOL(tape_std_unassign); +#ifdef TAPE390_FORCE_UNASSIGN +EXPORT_SYMBOL(tape_std_force_unassign); +#endif +EXPORT_SYMBOL(tape_std_display); +EXPORT_SYMBOL(tape_std_read_block_id); +EXPORT_SYMBOL(tape_std_seek_block_id); +EXPORT_SYMBOL(tape_std_mtload); +EXPORT_SYMBOL(tape_std_mtsetblk); +EXPORT_SYMBOL(tape_std_mtreset); +EXPORT_SYMBOL(tape_std_mtfsf); +EXPORT_SYMBOL(tape_std_mtfsr); +EXPORT_SYMBOL(tape_std_mtbsr); +EXPORT_SYMBOL(tape_std_mtweof); +EXPORT_SYMBOL(tape_std_mtbsfm); +EXPORT_SYMBOL(tape_std_mtbsf); +EXPORT_SYMBOL(tape_std_mtfsfm); +EXPORT_SYMBOL(tape_std_mtrew); +EXPORT_SYMBOL(tape_std_mtoffl); +EXPORT_SYMBOL(tape_std_mtnop); +EXPORT_SYMBOL(tape_std_mteom); +EXPORT_SYMBOL(tape_std_mtreten); +EXPORT_SYMBOL(tape_std_mterase); +EXPORT_SYMBOL(tape_std_mtunload); +EXPORT_SYMBOL(tape_std_mtcompression); +EXPORT_SYMBOL(tape_std_read_block); +EXPORT_SYMBOL(tape_std_read_backward); +EXPORT_SYMBOL(tape_std_write_block); +EXPORT_SYMBOL(tape_std_process_eov); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tape_std.h linux.22-ac2/drivers/s390/char/tape_std.h --- linux.vanilla/drivers/s390/char/tape_std.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/s390/char/tape_std.h 2003-06-29 16:10:26.000000000 +0100 @@ -0,0 +1,155 @@ +/* + * drivers/s390/char/tape_std.h + * standard tape device functions for ibm tapes. + * + * S390 and zSeries version + * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Carsten Otte + * Tuan Ngo-Anh + * Martin Schwidefsky + * Stefan Bader + */ + +#ifndef _TAPE_STD_H +#define _TAPE_STD_H + +#include + +/* + * The CCW commands for the Tape type of command. + */ +#define INVALID_00 0x00 /* Invalid cmd */ +#define BACKSPACEBLOCK 0x27 /* Back Space block */ +#define BACKSPACEFILE 0x2f /* Back Space file */ +#define DATA_SEC_ERASE 0x97 /* Data security erase */ +#define ERASE_GAP 0x17 /* Erase Gap */ +#define FORSPACEBLOCK 0x37 /* Forward space block */ +#define FORSPACEFILE 0x3F /* Forward Space file */ +#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */ +#define NOP 0x03 /* No operation */ +#define READ_FORWARD 0x02 /* Read forward */ +#define REWIND 0x07 /* Rewind */ +#define REWIND_UNLOAD 0x0F /* Rewind and Unload */ +#define SENSE 0x04 /* Sense */ +#define NEW_MODE_SET 0xEB /* Guess it is Mode set */ +#define WRITE_CMD 0x01 /* Write */ +#define WRITETAPEMARK 0x1F /* Write Tape Mark */ + +#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */ +#define CONTROL_ACCESS 0xE3 /* Set high speed */ +#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */ +#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */ +#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */ +#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */ +#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */ +#define MODE_SET_C3 0xC3 /* for 3420 */ +#define MODE_SET_CB 0xCB /* for 3420 */ +#define MODE_SET_D3 0xD3 /* for 3420 */ +#define READ_BACKWARD 0x0C /* */ +#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */ +#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */ +#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */ +#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */ +#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */ +#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */ +#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */ +#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */ +#define READ_DEV_CHAR 0x64 /* Read device characteristics */ +#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */ +#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */ +#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */ +#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */ +#define SYNC 0x43 /* Synchronize (flush buffer) */ +#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */ +#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */ +#define READ_CONFIG_DATA 0xFA /* 3490 CMD */ +#define READ_MESSAGE_ID 0x4E /* 3490 CMD */ +#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */ +#define SET_INTERFACE_ID 0x73 /* 3490 CMD */ + +#define SENSE_COMMAND_REJECT 0x80 +#define SENSE_INTERVENTION_REQUIRED 0x40 +#define SENSE_BUS_OUT_CHECK 0x20 +#define SENSE_EQUIPMENT_CHECK 0x10 +#define SENSE_DATA_CHECK 0x08 +#define SENSE_OVERRUN 0x04 +#define SENSE_DEFERRED_UNIT_CHECK 0x02 +#define SENSE_ASSIGNED_ELSEWHERE 0x01 + +#define SENSE_LOCATE_FAILURE 0x80 +#define SENSE_DRIVE_ONLINE 0x40 +#define SENSE_RESERVED 0x20 +#define SENSE_RECORD_SEQUENCE_ERR 0x10 +#define SENSE_BEGINNING_OF_TAPE 0x08 +#define SENSE_WRITE_MODE 0x04 +#define SENSE_WRITE_PROTECT 0x02 +#define SENSE_NOT_CAPABLE 0x01 + +#define SENSE_CHANNEL_ADAPTER_CODE 0xE0 +#define SENSE_CHANNEL_ADAPTER_LOC 0x10 +#define SENSE_REPORTING_CU 0x08 +#define SENSE_AUTOMATIC_LOADER 0x04 +#define SENSE_TAPE_SYNC_MODE 0x02 +#define SENSE_TAPE_POSITIONING 0x01 + +/* Data structure for the CONTROL_ACCESS call */ +struct tape_ca_data { + unsigned char function; + char password[11]; +} __attribute__ ((packed)); + +/* discipline functions */ +struct tape_request *tape_std_read_block(struct tape_device *, size_t); +void tape_std_read_backward(struct tape_device *device, + struct tape_request *request); +struct tape_request *tape_std_write_block(struct tape_device *, size_t); +struct tape_request *tape_std_bread(struct tape_device *, struct request *); +void tape_std_free_bread(struct tape_request *); +void tape_std_check_locate(struct tape_device *, struct tape_request *); +struct tape_request *tape_std_bwrite(struct request *, + struct tape_device *, int); + +/* Some non-mtop commands. */ +int tape_std_assign(struct tape_device *); +int tape_std_unassign(struct tape_device *); +int tape_std_force_unassign(struct tape_device *); +int tape_std_read_block_id(struct tape_device *, unsigned int *); +int tape_std_seek_block_id(struct tape_device *, unsigned int); +int tape_std_display(struct tape_device *, struct display_struct *); +int tape_std_terminate_write(struct tape_device *); + +/* Standard magnetic tape commands. */ +int tape_std_mtbsf(struct tape_device *, int); +int tape_std_mtbsfm(struct tape_device *, int); +int tape_std_mtbsr(struct tape_device *, int); +int tape_std_mtcompression(struct tape_device *, int); +int tape_std_mteom(struct tape_device *, int); +int tape_std_mterase(struct tape_device *, int); +int tape_std_mtfsf(struct tape_device *, int); +int tape_std_mtfsfm(struct tape_device *, int); +int tape_std_mtfsr(struct tape_device *, int); +int tape_std_mtload(struct tape_device *, int); +int tape_std_mtnop(struct tape_device *, int); +int tape_std_mtoffl(struct tape_device *, int); +int tape_std_mtreset(struct tape_device *, int); +int tape_std_mtreten(struct tape_device *, int); +int tape_std_mtrew(struct tape_device *, int); +int tape_std_mtsetblk(struct tape_device *, int); +int tape_std_mtunload(struct tape_device *, int); +int tape_std_mtweof(struct tape_device *, int); + +/* Event handlers */ +void tape_std_default_handler(struct tape_device *); +void tape_std_unexpect_uchk_handler(struct tape_device *); +void tape_std_irq(struct tape_device *); +void tape_std_process_eov(struct tape_device *); + +// the error recovery stuff: +void tape_std_error_recovery(struct tape_device *); +void tape_std_error_recovery_has_failed(struct tape_device *,int error_id); +void tape_std_error_recovery_succeded(struct tape_device *); +void tape_std_error_recovery_do_retry(struct tape_device *); +void tape_std_error_recovery_read_opposite(struct tape_device *); +void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); + +#endif // _TAPE_STD_H diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/char/tubtty.c linux.22-ac2/drivers/s390/char/tubtty.c --- linux.vanilla/drivers/s390/char/tubtty.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/s390/char/tubtty.c 2003-06-29 16:10:26.000000000 +0100 @@ -10,6 +10,7 @@ * Author: Richard Hitt */ #include +#include #include "tubio.h" /* Initialization & uninitialization for tubtty */ @@ -118,6 +119,8 @@ td->read_proc = tty3270_read_proc; td->write_proc = tty3270_write_proc; + ctrlchar_init(); + rc = tty_register_driver(td); if (rc) { printk(KERN_ERR "tty3270 registration failed with %d\n", rc); @@ -875,23 +878,22 @@ { struct tty_struct *tty; int func = -1; + int is_console = 0; + unsigned int cchar; if ((tty = tubp->tty) == NULL) return; if (count < 0) return; - if (count == 2 && (cp[0] == '^' || cp[0] == '\252')) { - switch(cp[1]) { - case 'c': case 'C': - func = INTR_CHAR(tty); - break; - case 'd': case 'D': - func = EOF_CHAR(tty); - break; - case 'z': case 'Z': - func = SUSP_CHAR(tty); - break; - } +#ifdef CONFIG_TN3270_CONSOLE + if (CONSOLE_IS_3270 && tub3270_con_tubp == tubp) + is_console = 1; +#endif + cchar = ctrlchar_handle(cp, count, tty, is_console); + if ((cchar & CTRLCHAR_MASK) != CTRLCHAR_NONE) { + if ((cchar & CTRLCHAR_MASK) != CTRLCHAR_CTRL) + return; + func = cchar & 0xFF; } else if (count == 2 && cp[0] == 0x1b) { /* if ESC */ int inc = 0; char buf[GEOM_INPLEN + 1]; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/Config.in linux.22-ac2/drivers/s390/Config.in --- linux.vanilla/drivers/s390/Config.in 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/s390/Config.in 2003-07-07 16:03:56.000000000 +0100 @@ -52,19 +52,20 @@ if [ "$CONFIG_TN3215" = "y" ]; then bool 'Support for console on 3215 line mode terminal' CONFIG_TN3215_CONSOLE fi -bool 'Support for HWC line mode terminal' CONFIG_HWC -if [ "$CONFIG_HWC" = "y" ]; then - bool ' console on HWC line mode terminal' CONFIG_HWC_CONSOLE - tristate ' Control-Program Identification' CONFIG_HWC_CPI +bool 'Support for SCLP' CONFIG_SCLP +if [ "$CONFIG_SCLP" = "y" ]; then + bool ' Support for SCLP line mode terminal' CONFIG_SCLP_TTY + if [ "$CONFIG_SCLP_TTY" = "y" ]; then + bool ' Support for console on SCLP line mode terminal' CONFIG_SCLP_CONSOLE + fi + tristate ' Control-Program Identification' CONFIG_SCLP_CPI fi tristate 'S/390 tape device support' CONFIG_S390_TAPE if [ "$CONFIG_S390_TAPE" != "n" ]; then comment 'S/390 tape interface support' - bool ' Support for tape character devices' CONFIG_S390_TAPE_CHAR bool ' Support for tape block devices' CONFIG_S390_TAPE_BLOCK comment 'S/390 tape hardware support' - bool ' Support for 3490 tape hardware' CONFIG_S390_TAPE_3490 - bool ' Support for 3480 tape hardware' CONFIG_S390_TAPE_3480 + dep_tristate ' Support for 3480/3490 tape hardware' CONFIG_S390_TAPE_34XX $CONFIG_S390_TAPE fi endmenu @@ -88,6 +89,23 @@ define_bool CONFIG_HOTPLUG y fi + if [ "$CONFIG_QDIO" != "n" -a "$CONFIG_CHANDEV" = "y" -a "$CONFIG_IP_MULTICAST" = "y" ]; then + dep_tristate 'Support for Gigabit Ethernet' CONFIG_QETH $CONFIG_QDIO + if [ "$CONFIG_QETH" != "n" ]; then + comment 'Gigabit Ethernet default settings' + if [ "$CONFIG_IPV6" = "y" -o "$CONFIG_IPV6" = "$CONFIG_QETH" ]; then + bool ' IPv6 support for qeth' CONFIG_QETH_IPV6 + else + define_bool CONFIG_QETH_IPV6 n + fi + if [ "$CONFIG_VLAN_8021Q" = "y" -o "$CONFIG_VLAN_8021Q" = "$CONFIG_QETH" ]; then + bool ' VLAN support for qeth' CONFIG_QETH_VLAN + else + define_bool CONFIG_QETH_VLAN n + fi + bool ' Performance statistics in /proc' CONFIG_QETH_PERF_STATS + fi + fi tristate 'CTC device support' CONFIG_CTC tristate 'IUCV device support (VM only)' CONFIG_IUCV fi diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/Makefile linux.22-ac2/drivers/s390/Makefile --- linux.vanilla/drivers/s390/Makefile 2003-06-14 00:11:34.000000000 +0100 +++ linux.22-ac2/drivers/s390/Makefile 2003-07-07 16:05:42.000000000 +0100 @@ -9,6 +9,8 @@ obj-y := s390io.o s390mach.o s390dyn.o ccwcache.o sysinfo.o export-objs += ccwcache.o s390dyn.o s390io.o +obj-$(CONFIG_QDIO) += qdio.o +export-objs += qdio.o obj-y += $(foreach dir,$(subdir-y),$(dir)/s390-$(dir).o) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/s390/net/ctctty.c linux.22-ac2/drivers/s390/net/ctctty.c --- linux.vanilla/drivers/s390/net/ctctty.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/s390/net/ctctty.c 2003-07-06 18:41:25.000000000 +0100 @@ -1111,7 +1111,7 @@ #endif return; } - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sbus/char/aurora.c linux.22-ac2/drivers/sbus/char/aurora.c --- linux.vanilla/drivers/sbus/char/aurora.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/sbus/char/aurora.c 2003-06-29 16:10:16.000000000 +0100 @@ -1504,7 +1504,7 @@ } bp = port_Board(port); - if ((tty->count == 1) && (port->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (port->count != 1)) { printk(KERN_DEBUG "aurora%d: aurora_close: bad port count; " "tty->count is 1, port count is %d\n", board_No(bp), port->count); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sbus/char/sab82532.c linux.22-ac2/drivers/sbus/char/sab82532.c --- linux.vanilla/drivers/sbus/char/sab82532.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/sbus/char/sab82532.c 2003-06-29 16:10:16.000000000 +0100 @@ -1610,7 +1610,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("sab82532_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sbus/char/su.c linux.22-ac2/drivers/sbus/char/su.c --- linux.vanilla/drivers/sbus/char/su.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/sbus/char/su.c 2003-06-29 16:10:16.000000000 +0100 @@ -39,7 +39,7 @@ do { \ printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ kdevname(tty->device), (info->flags), serial_refcount, \ - info->count,tty->count,s); \ + info->count,atomic_read(&tty->count),s); \ } while (0) #else #define DBG_CNT(s) @@ -1756,7 +1756,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("su_close ttys%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sbus/char/zs.c linux.22-ac2/drivers/sbus/char/zs.c --- linux.vanilla/drivers/sbus/char/zs.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/sbus/char/zs.c 2003-06-29 16:10:16.000000000 +0100 @@ -1547,7 +1547,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("zs_close tty-%d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/aachba.c linux.22-ac2/drivers/scsi/aacraid/aachba.c --- linux.vanilla/drivers/scsi/aacraid/aachba.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/aachba.c 2003-08-17 17:56:09.000000000 +0100 @@ -221,9 +221,7 @@ static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap); static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg); static int aac_send_srb_fib(Scsi_Cmnd* scsicmd); -#ifdef AAC_DETAILED_STATUS_INFO static char *aac_get_status_string(u32 status); -#endif /* * Non dasd selection is handled entirely in aachba now @@ -231,8 +229,11 @@ MODULE_PARM(nondasd, "i"); MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); +MODULE_PARM(paemode, "i"); +MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on"); static int nondasd = -1; +static int paemode = -1; /** * aac_get_containers - list containers @@ -298,6 +299,64 @@ } /** + * aac_get_container_name - get container name + */ +static int aac_get_container_name(struct aac_dev *dev, int cid, char * pid) +{ + struct fsa_scsi_hba *fsa_dev_ptr; + int status = 0; + struct aac_get_name *dinfo; + struct aac_get_name_resp *dresp; + struct fib * fibptr; + unsigned instance; + + fsa_dev_ptr = &(dev->fsa_dev); + instance = dev->scsi_host_ptr->unique_id; + + if (!(fibptr = fib_alloc(dev))) + return -ENOMEM; + + fib_init(fibptr); + dinfo = (struct aac_get_name *) fib_data(fibptr); + + dinfo->command = cpu_to_le32(VM_ContainerConfig); + dinfo->type = cpu_to_le32(CT_READ_NAME); + dinfo->cid = cpu_to_le32(cid); + dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); + + status = fib_send(ContainerCommand, + fibptr, + sizeof (struct aac_get_name), + FsaNormal, + 1, 1, + NULL, NULL); + if (status < 0 ) { + printk(KERN_WARNING "aac_get_container_name: SendFIB failed.\n"); + } else { + dresp = (struct aac_get_name_resp *)fib_data(fibptr); + + status = (le32_to_cpu(dresp->status) != CT_OK) + || (dresp->data[0] == '\0'); + if (status == 0) { + char * sp = dresp->data; + char * dp = pid; + do { + if ((*sp == '\0') + || ((dp - pid) >= sizeof(((struct aac_get_name_resp *)NULL)->data))) { + *dp = ' '; + } else { + *dp = *sp++; + } + } while (++dp < &pid[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]); + } + } + fib_complete(fibptr); + fib_free(fibptr); + fsa_dev[instance] = fsa_dev_ptr; + return status; +} + +/** * probe_container - query a logical volume * @dev: device to query * @cid: container identifier @@ -569,8 +628,10 @@ if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ dev->pae_support = 1; } - /* TODO - dmb temporary until fw can set this bit */ - dev->pae_support = (BITS_PER_LONG >= 64); + + if(paemode != -1) + dev->pae_support = (paemode != 0); + if(dev->pae_support != 0) { printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id); @@ -997,7 +1058,6 @@ memset(inq_data_ptr, 0, sizeof (struct inquiry_data)); inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */ - inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ inq_data_ptr->inqd_len = 31; /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ @@ -1006,11 +1066,14 @@ * Set the Vendor, Product, and Revision Level * see: .c i.e. aac.c */ - setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]); - if (scsicmd->target == scsicmd->host->this_id) + if (scsicmd->target == scsicmd->host->this_id) { + setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *))); inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */ - else + } else { + setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]); + aac_get_container_name(dev, cid, inq_data_ptr->inqd_pid); inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ + } scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD; __aac_io_done(scsicmd); return 0; @@ -1123,7 +1186,7 @@ SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND, ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); __aac_io_done(scsicmd); - return -1; + return 0; } } @@ -1425,9 +1488,7 @@ case SRB_STATUS_FORCE_ABORT: case SRB_STATUS_DOMAIN_VALIDATION_FAIL: default: -#ifdef AAC_DETAILED_STATUS_INFO printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) ); -#endif scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } @@ -1525,7 +1586,7 @@ /* * Build Scatter/Gather list */ - fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64)); + fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + ((srbcmd->sg.count & 0xff) * sizeof (struct sgentry64)); /* * Now send the Fib to the adapter @@ -1686,8 +1747,6 @@ return byte_count; } -#ifdef AAC_DETAILED_STATUS_INFO - struct aac_srb_status_info { u32 status; char *str; @@ -1742,4 +1801,3 @@ return "Bad Status Code"; } -#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/aacraid.h linux.22-ac2/drivers/scsi/aacraid/aacraid.h --- linux.vanilla/drivers/scsi/aacraid/aacraid.h 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/aacraid.h 2003-08-17 18:00:08.000000000 +0100 @@ -1,3 +1,6 @@ +#define AAC_DRIVER_VERSION 0x01010300 +#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__ + //#define dprintk(x) printk x #if (!defined(dprintk)) # define dprintk(x) @@ -759,7 +762,7 @@ */ dma_addr_t hw_fib_pa; struct hw_fib *hw_fib_va; - ulong fib_base_va; + struct hw_fib *aif_base_va; /* * Fib Headers */ @@ -1178,6 +1181,31 @@ struct aac_mntent mnt[1]; }; +#define CT_READ_NAME 130 +struct aac_get_name { + u32 command; + u32 type; // CT_READ_NAME + u32 cid; + u32 parm1; + u32 parm2; + u32 parm3; + u32 parm4; + u32 count; // sizeof(((struct aac_get_name_resp *)NULL)->data) +}; + +#define CT_OK 218 +struct aac_get_name_resp { + u32 dummy0; + u32 dummy1; + u32 status; // CT_OK + u32 parm1; + u32 parm2; + u32 parm3; + u32 parm4; + u32 parm5; + u8 data[16]; +}; + /* * The following command is sent to shut down each container. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/commctrl.c linux.22-ac2/drivers/scsi/aacraid/commctrl.c --- linux.vanilla/drivers/scsi/aacraid/commctrl.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/commctrl.c 2003-08-17 18:00:39.000000000 +0100 @@ -362,7 +362,7 @@ * @dev: adapter * @arg: ioctl arguments * - * This routine returns the firmware version. + * This routine returns the driver version. * Under Linux, there have been no version incompatibilities, so this is simple! */ @@ -371,14 +371,223 @@ struct revision response; response.compat = 1; - response.version = dev->adapter_info.kernelrev; - response.build = dev->adapter_info.kernelbuild; + response.version = AAC_DRIVER_VERSION; + response.build = 9999; if (copy_to_user(arg, &response, sizeof(response))) return -EFAULT; return 0; } +/** + * + * aac_send_raw_scb + * + */ + +int aac_send_raw_srb(struct aac_dev* dev, void* arg) +{ + struct fib* srbfib; + int status; + struct aac_srb *srbcmd; + struct aac_srb *user_srb = arg; + struct aac_srb_reply* user_reply; + struct aac_srb_reply* reply; + u32 fibsize = 0; + u32 flags = 0; + s32 rcode = 0; + u32 data_dir; + ulong sg_user[32]; + ulong sg_list[32]; + u32 sg_indx = 0; + u32 byte_count = 0; + u32 actual_fibsize = 0; + int i; + + + if (!capable(CAP_SYS_ADMIN)){ + printk(KERN_DEBUG"aacraid: No permission to send raw srb\n"); + return -EPERM; + } + /* + * Allocate and initialize a Fib then setup a BlockWrite command + */ + if (!(srbfib = fib_alloc(dev))) { + return -1; + } + fib_init(srbfib); + + srbcmd = (struct aac_srb*) fib_data(srbfib); + + if(copy_from_user((void*)&fibsize, (void*)&user_srb->count,sizeof(u32))){ + printk(KERN_DEBUG"aacraid: Could not copy data size from user\n"); + rcode = -EFAULT; + goto cleanup; + } + + if(copy_from_user(srbcmd, user_srb,fibsize)){ + printk(KERN_DEBUG"aacraid: Could not copy srb from user\n"); + rcode = -EFAULT; + goto cleanup; + } + + user_reply = arg+fibsize; + + flags = srbcmd->flags; + // Fix up srb for endian and force some values + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this + srbcmd->channel = cpu_to_le32(srbcmd->channel); + srbcmd->target = cpu_to_le32(srbcmd->target); + srbcmd->lun = cpu_to_le32(srbcmd->lun); + srbcmd->flags = cpu_to_le32(srbcmd->flags); + srbcmd->timeout = cpu_to_le32(srbcmd->timeout); + srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter + srbcmd->cdb_size = cpu_to_le32(srbcmd->cdb_size); + + switch(srbcmd->flags & (SRB_DataIn | SRB_DataOut)){ + case SRB_DataOut: + data_dir = SCSI_DATA_WRITE; + break; + case (SRB_DataIn | SRB_DataOut): + data_dir = SCSI_DATA_UNKNOWN; + break; + case SRB_DataIn: + data_dir = SCSI_DATA_READ; + break; + default: + data_dir = SCSI_DATA_NONE; + } + + if( dev->pae_support ==1 ) { + struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; + byte_count = 0; + + // This should also catch if user used the 32 bit sgmap + actual_fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64)); + if(actual_fibsize != fibsize){ // User made a mistake - should not continue + printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n"); + rcode = -EINVAL; + goto cleanup; + } + if ((data_dir == SCSI_DATA_NONE) && psg->count) { // Dogs and cats sleeping with eachother - should not continue + printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"); + rcode = -EINVAL; + goto cleanup; + } + + for (i = 0; i < psg->count; i++) { + dma_addr_t addr; + u64 le_addr; + void* p; + p = kmalloc(psg->sg[i].count,GFP_KERNEL|__GFP_DMA); + if(p == 0) { + printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + psg->sg[i].count,i,psg->count); + rcode = -ENOMEM; + goto cleanup; + } + sg_user[i] = (ulong)psg->sg[i].addr; + sg_list[i] = (ulong)p; // save so we can clean up later + sg_indx = i + 1; + + if( flags & SRB_DataOut ){ + if(copy_from_user(p,psg->sg[i].addr,psg->sg[i].count)){ + printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n"); + rcode = -EFAULT; + goto cleanup; + } + } + addr = pci_map_single(dev->pdev, p, psg->sg[i].count, scsi_to_pci_dma_dir(data_dir)); + + le_addr = cpu_to_le64(addr); + psg->sg[i].addr[1] = (u32)(le_addr>>32); + psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff); + psg->sg[i].count = cpu_to_le32(psg->sg[i].count); + byte_count += psg->sg[i].count; + } + + srbcmd->count = cpu_to_le32(byte_count); + status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,0,0); + } else { + struct sgmap* psg = &srbcmd->sg; + byte_count = 0; + + actual_fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); + if(actual_fibsize != fibsize){ // User made a mistake - should not continue + printk(KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n"); + rcode = -EINVAL; + goto cleanup; + } + if ((data_dir == SCSI_DATA_NONE) && psg->count) { // Dogs and cats sleeping with eachother - should not continue + printk(KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"); + rcode = -EINVAL; + goto cleanup; + } + for (i = 0; i < psg->count; i++) { + dma_addr_t addr; + void* p; + p = kmalloc(psg->sg[i].count,GFP_KERNEL); + if(p == 0) { + printk(KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + psg->sg[i].count,i,psg->count); + rcode = -ENOMEM; + goto cleanup; + } + sg_user[i] = (ulong)(psg->sg[i].addr); + sg_list[i] = (ulong)p; // save so we can clean up later + sg_indx = i + 1; + + if( flags & SRB_DataOut ){ + if(copy_from_user((void*)p,(void*)(ulong)(psg->sg[i].addr),psg->sg[i].count)){ + printk(KERN_DEBUG"aacraid: Could not copy sg data from user\n"); + rcode = -EFAULT; + goto cleanup; + } + } + addr = pci_map_single(dev->pdev, p, psg->sg[i].count, scsi_to_pci_dma_dir(data_dir)); + + psg->sg[i].addr = cpu_to_le32(addr); + psg->sg[i].count = cpu_to_le32(psg->sg[i].count); + byte_count += psg->sg[i].count; + } + srbcmd->count = cpu_to_le32(byte_count); + status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, 0, 0); + } + + if (status != 0){ + printk(KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"); + rcode = -1; + goto cleanup; + } + + if( flags & SRB_DataIn ) { + for(i = 0 ; i < sg_indx; i++){ + if(copy_to_user((void*)(sg_user[i]),(void*)(sg_list[i]),le32_to_cpu(srbcmd->sg.sg[i].count))){ + printk(KERN_DEBUG"aacraid: Could not copy sg data to user\n"); + rcode = -EFAULT; + goto cleanup; + + } + } + } + + reply = (struct aac_srb_reply *) fib_data(srbfib); + if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ + printk(KERN_DEBUG"aacraid: Could not copy reply to user\n"); + rcode = -EFAULT; + goto cleanup; + } + +cleanup: + for(i=0; i < sg_indx; i++){ + kfree((void*)sg_list[i]); + } + fib_complete(srbfib); + fib_free(srbfib); + + return rcode; +} + struct aac_pci_info { u32 bus; @@ -427,6 +636,9 @@ case FSACTL_CLOSE_GET_ADAPTER_FIB: status = close_getadapter_fib(dev, arg); break; + case FSACTL_SEND_RAW_SRB: + status = aac_send_raw_srb(dev,arg); + break; case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/comminit.c linux.22-ac2/drivers/scsi/aacraid/comminit.c --- linux.vanilla/drivers/scsi/aacraid/comminit.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/comminit.c 2003-08-17 17:37:31.000000000 +0100 @@ -84,14 +84,14 @@ * Adapter Fibs are the first thing allocated so that they * start page aligned */ - dev->fib_base_va = (ulong)base; + dev->aif_base_va = (struct hw_fib *)base; /* We submit the physical address for AIF tags to limit to 32 bits */ - init->AdapterFibsVirtualAddress = cpu_to_le32((u32)phys); + init->AdapterFibsVirtualAddress = cpu_to_le32(0); init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); init->AdapterFibsSize = cpu_to_le32(fibsize); init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); - init->HostPhysMemPages = cpu_to_le32(num_physpages); // number of 4k pages of host physical memory + init->HostPhysMemPages = cpu_to_le32((num_physpages << PAGE_SHIFT) / 4096); // number of 4k pages of host physical memory /* * Increment the base address by the amount already used diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/commsup.c linux.22-ac2/drivers/scsi/aacraid/commsup.c --- linux.vanilla/drivers/scsi/aacraid/commsup.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/commsup.c 2003-08-17 17:39:34.000000000 +0100 @@ -203,7 +203,7 @@ hw_fib->header.StructType = FIB_MAGIC; hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib)); hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); - hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa); + hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); } @@ -450,8 +450,7 @@ * Map the fib into 32bits by using the fib number */ -// hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1; - hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa); + hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 1); hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); /* * Set FIB state to indicate where it came from and if we want a @@ -792,7 +791,6 @@ struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; int busy; u32 container; - mm_segment_t fs; /* Sniff for container changes */ dprintk ((KERN_INFO "AifCmdDriverNotify=%x\n", le32_to_cpu(*(u32 *)aifcmd->data))); @@ -859,18 +857,24 @@ * go away. We need to check the access_count for the * device since we are not wanting the devices to go away. */ - if (busy == 0 && proc_scsi != NULL) { + if ((busy == 0) + && (proc_scsi != (struct proc_dir_entry *)NULL)) { struct proc_dir_entry * entry; dprintk((KERN_INFO "proc_scsi=%p ", proc_scsi)); - for (entry = proc_scsi->subdir; entry != (struct proc_dir_entry *)NULL; entry = entry->next) { + for (entry = proc_scsi->subdir; + entry != (struct proc_dir_entry *)NULL; + entry = entry->next) { dprintk(("\"%.*s\"[%d]=%x ", entry->namelen, entry->name, entry->namelen, entry->low_ino)); - if ((entry->low_ino != 0) && (entry->namelen == 4) && (memcmp ("scsi", entry->name, 4) == 0)) { + if ((entry->low_ino != 0) + && (entry->namelen == 4) + && (memcmp ("scsi", entry->name, 4) == 0)) { dprintk(("%p->write_proc=%p ", entry, entry->write_proc)); if (entry->write_proc != (int (*)(struct file *, const char *, unsigned long, void *))NULL) { char buffer[80]; int length; + mm_segment_t fs; sprintf (buffer, "scsi add-single-device %d %d %d %d\n", @@ -879,12 +883,17 @@ CONTAINER_TO_TARGET(container), CONTAINER_TO_LUN(container)); length = strlen (buffer); - dprintk((KERN_INFO "echo %.*s > /proc/scsi/scsi\n", length-1, buffer)); + dprintk((KERN_INFO + "echo %.*s > /proc/scsi/scsi\n", + length-1, + buffer)); fs = get_fs(); set_fs(get_ds()); - length = entry->write_proc(NULL, buffer, length, NULL); + length = entry->write_proc( + NULL, buffer, length, NULL); set_fs(fs); - dprintk((KERN_INFO "returns %d\n", length)); + dprintk((KERN_INFO + "returns %d\n", length)); } break; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/dpcsup.c linux.22-ac2/drivers/scsi/aacraid/dpcsup.c --- linux.vanilla/drivers/scsi/aacraid/dpcsup.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/dpcsup.c 2003-08-17 17:39:54.000000000 +0100 @@ -74,12 +74,12 @@ */ while(aac_consumer_get(dev, q, &entry)) { - u32 fast ; - fast = (entry->addr & cpu_to_le32(0x01)); -// fib = &dev->fibs[(entry->addr >> 1)]; -// hwfib = fib->hw_fib; - hwfib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01))); - fib = &dev->fibs[hwfib->header.SenderData]; + int fast; + u32 index; + index = le32_to_cpu(entry->addr); + fast = index & 0x01; + fib = &dev->fibs[index >> 1]; + hwfib = fib->hw_fib; aac_consumer_free(dev, q, HostNormRespQueue); /* @@ -178,13 +178,12 @@ while(aac_consumer_get(dev, q, &entry)) { struct fib fibctx; + struct hw_fib * hw_fib; + u32 index; struct fib *fib = &fibctx; - u32 hw_fib_pa = le32_to_cpu(entry->addr & cpu_to_le32(~0x01)); - struct hw_fib * hw_fib_va = ((dev->comm_phys <= hw_fib_pa) - && (hw_fib_pa < (dev->comm_phys + dev->comm_size))) - ? dev->comm_addr + (hw_fib_pa - dev->comm_phys) - : /* inconceivable */ bus_to_virt(hw_fib_pa); - dprintk((KERN_INFO "hw_fib_pa=%x hw_fib_va=%p\n", hw_fib_pa, hw_fib_va)); + + index = le32_to_cpu(entry->addr / sizeof(struct hw_fib)); + hw_fib = &dev->aif_base_va[index]; /* * Allocate a FIB at all costs. For non queued stuff @@ -199,8 +198,8 @@ INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); - fib->hw_fib = hw_fib_va; - fib->data = hw_fib_va->data; + fib->hw_fib = hw_fib; + fib->data = hw_fib->data; fib->dev = dev; if (dev->aif_thread && fib != &fibctx) @@ -214,7 +213,7 @@ /* * Set the status of this FIB */ - *(u32 *)hw_fib_va->data = cpu_to_le32(ST_OK); + *(u32 *)hw_fib->data = cpu_to_le32(ST_OK); fib_adapter_complete(fib, sizeof(u32)); spin_lock_irqsave(q->lock, flags); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/aacraid/linit.c linux.22-ac2/drivers/scsi/aacraid/linit.c --- linux.vanilla/drivers/scsi/aacraid/linit.c 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/aacraid/linit.c 2003-08-17 17:59:51.000000000 +0100 @@ -35,9 +35,6 @@ * */ -#define AAC_DRIVER_VERSION "1.1.2" -#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__ - #include #include #include @@ -56,15 +53,11 @@ #include "aacraid.h" #include "sd.h" -#define AAC_DRIVERNAME "aacraid" +#define AAC_DRIVER_NAME "aacraid" MODULE_AUTHOR("Red Hat Inc and Adaptec"); MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec Advanced Raid Products, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com"); MODULE_LICENSE("GPL"); -MODULE_PARM(paemode, "i"); -MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on"); - -static int paemode = -1; struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS]; @@ -107,15 +100,19 @@ { 0x9005, 0x0285, 0x9005, 0x0292, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 2 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ { 0x9005, 0x0285, 0x9005, 0x0293, aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA ", 2 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ { 0x9005, 0x0285, 0x9005, 0x0294, aac_rx_init, "aacraid", "ADAPTEC ", "SO-DIMM SATA ZCR ", 2 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ - /* ServeRAID */ -/* { 0x9005, 0x0250, 0x1014, 0x0279, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, */ /* (Marco) */ -/* { 0x9005, 0x0250, 0x1014, 0x028c, aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec ", 2 }, */ /* (Sebring)*/ + { 0x9005, 0x0285, 0x0E11, 0x0295, aac_rx_init, "aacraid", "ADAPTEC ", "SATA 6Channel ", 1 }, /* SATA 6Ch (Bearcat) */ { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2 }, /* Perc 320/DC*/ { 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4 }, /* Dell PERC2 "Quad Channel" */ - { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4 } /* HP NetRAID-4M */ + { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4 }, /* HP NetRAID-4M */ + { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, + aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT },/* Dell Catchall */ + { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, + aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT },/* Legend Catchall */ + { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, + aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT } /* Adaptec Catch All */ }; #define NUM_AACTYPES (sizeof(aac_drivers) / sizeof(struct aac_driver_ident)) @@ -175,9 +172,15 @@ struct fsa_scsi_hba *fsa_dev_ptr; char *name = NULL; - printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE); - + printk(KERN_INFO "Red Hat/Adaptec %s driver (%d.%d-%d %s)\n", + AAC_DRIVER_NAME, + AAC_DRIVER_VERSION >> 24, + (AAC_DRIVER_VERSION >> 16) & 0xFF, + (AAC_DRIVER_VERSION >> 8) & 0xFF, + AAC_DRIVER_BUILD_DATE); + /* setting up the proc directory structure */ + template->proc_name = "aacraid"; spin_unlock_irq(&io_request_lock); @@ -607,7 +610,7 @@ static int aac_eh_reset(Scsi_Cmnd* cmd) { - printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n"); + printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVER_NAME); return FAILED; } @@ -741,13 +744,41 @@ static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset, int bytes_available, int host_no, int write) { + struct aac_dev * dev; + int index, ret, tmp; + if(write || offset > 0) return 0; *start_ptr = proc_buffer; - return sprintf(proc_buffer, - "Adaptec Raid Controller %s %s, scsi hba number %d\n", - AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE, - host_no); + ret = sprintf(proc_buffer, + "Adaptec Raid Controller %d.%d-%d %s, scsi hba number %d\n", + AAC_DRIVER_VERSION >> 24, + (AAC_DRIVER_VERSION >> 16) & 0xFF, + (AAC_DRIVER_VERSION >> 8) & 0xFF, + AAC_DRIVER_BUILD_DATE, + host_no); + for (index = 0; index < aac_count; ++index) { + if (((dev = aac_devices[index]) != NULL) && dev->scsi_host_ptr->host_no == host_no) + break; + } + if (index >= aac_count || dev == NULL) + return ret; + tmp = dev->adapter_info.kernelrev; + ret += sprintf(proc_buffer + ret, "kernel: %d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, (tmp >> 8) & 0xff, + dev->adapter_info.kernelbuild); + tmp = dev->adapter_info.monitorrev; + ret += sprintf(proc_buffer + ret, "monitor: %d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, (tmp >> 8) & 0xff, + dev->adapter_info.monitorbuild); + tmp = dev->adapter_info.biosrev; + ret += sprintf(proc_buffer + ret, "bios: %d.%d-%d[%d]\n", + tmp >> 24, (tmp >> 16) & 0xff, (tmp >> 8) & 0xff, + dev->adapter_info.biosbuild); + ret += sprintf(proc_buffer + ret, "serial: %x%x\n", + dev->adapter_info.serial[0], + dev->adapter_info.serial[1]); + return ret; } EXPORT_NO_SYMBOLS; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/ata_piix.c linux.22-ac2/drivers/scsi/ata_piix.c --- linux.vanilla/drivers/scsi/ata_piix.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/ata_piix.c 2003-09-01 13:25:08.000000000 +0100 @@ -0,0 +1,647 @@ +/* + + ata_piix.c - Intel PATA/SATA controllers + + + Copyright 2003 Red Hat Inc + Copyright 2003 Jeff Garzik + + + Copyright header from piix.c: + + Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer + Copyright (C) 1998-2000 Andre Hedrick + Copyright (C) 2003 Red Hat Inc + + May be copied or modified under the terms of the GNU General Public License + + TODO: + * check traditional port enable/disable bits in pata port_probe + + */ +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "ata_piix" +#define DRV_VERSION "0.94" + +enum { + PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ + ICH5_PCS = 0x92, /* port control and status */ + + PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ + + PIIX_COMB_PRI = (1 << 0), /* combined mode, PATA primary */ + PIIX_COMB_SEC = (1 << 1), /* combined mode, PATA secondary */ + + PIIX_80C_PRI = (1 << 5) | (1 << 4), + PIIX_80C_SEC = (1 << 7) | (1 << 6), + + ich5_pata = 0, + ich5_sata = 1, + piix4_pata = 2, +}; + +static int piix_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent); + +static void piix_pata_phy_probe(struct ata_port *ap); +static void piix_pata_port_probe(struct ata_port *ap); + +static void piix_sata_phy_probe(struct ata_port *ap); +static void piix_sata_port_probe(struct ata_port *ap); +static void piix_sata_port_disable(struct ata_port *ap); +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); + +static unsigned int in_module_init = 1; + +static struct pci_device_id piix_pci_tbl[] = { +#ifdef CONFIG_SCSI_ATA_PATA + { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, + { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, + { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, +#endif + + { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + + { } /* terminate list */ +}; + +static struct pci_driver piix_pci_driver = { + .name = DRV_NAME, + .id_table = piix_pci_tbl, + .probe = piix_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template piix_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, +}; + +static struct ata_host_info piix_pata_ops = { + .port_probe = piix_pata_port_probe, + .port_disable = ata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + + .phy_probe = piix_pata_phy_probe, + + .bmdma_start = ata_bmdma_start_pio, +}; + +static struct ata_host_info piix_sata_ops = { + .port_probe = piix_sata_port_probe, + .port_disable = piix_sata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + + .phy_probe = piix_sata_phy_probe, + + .bmdma_start = ata_bmdma_start_pio, +}; + +static struct ata_board ata_board_tbl[] = { + /* ich5_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .host_info = &piix_pata_ops, + }, + + /* ich5_sata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | + ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .host_info = &piix_sata_ops, + }, + + /* piix4_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .host_info = &piix_pata_ops, + }, +}; + +static struct pci_bits piix_enable_bits[] = { + /* port 0 */ + { + .reg = 0x41, + .width = 1, + .mask = 0x80, + .val = 0x80, + }, + + /* port 1 */ + { + .reg = 0x43, + .width = 1, + .mask = 0x80, + .val = 0x80, + }, +}; + +MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, piix_pci_tbl); + +/** + * piix_pata_cbl_detect - Probe host controller cable detect info + * @ap: Port for which cable detect info is desired + * + * Read 80c cable indicator from SATA PCI device's PCI config + * register. This register is normally set by firmware (BIOS). + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pata_cbl_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u8 tmp, mask; + + /* no 80c support in host controller? */ + if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) + goto cbl40; + + /* check BIOS cable detect results */ + mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; + pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); + if ((tmp & mask) == 0) + goto cbl40; + + ap->cbl = ATA_CBL_PATA80; + return; + +cbl40: + ap->cbl = ATA_CBL_PATA40; + ap->udma_mask &= ATA_UDMA_MASK_40C; +} + +/** + * piix_pata_port_probe - Probe specified port on PATA host controller + * @ap: Port to probe + * + * Probe PATA phy. + * + * NOTES: + * This is routine is going away. Getting absorbed into + * piix_pata_phy_probe. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_pata_port_probe(struct ata_port *ap) +{ + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + ata_port_probe(ap); +} + +/** + * piix_pata_phy_probe - Probe specified port on PATA host controller + * @ap: Port to probe + * + * Perform all duties necessary to set up bus for read/write + * transactions. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pata_phy_probe(struct ata_port *ap) +{ + /* FIXME: check port enable PCI config bits */ + + piix_pata_cbl_detect(ap); + + /* TODO: set timings here */ +} + +/** + * piix_pcs_probe - Probe SATA port configuration and status register + * @ap: Port to probe + * @have_port: (output) Non-zero if SATA port is enabled + * @have_device: (output) Non-zero if SATA phy indicates device present + * + * Reads SATA PCI device's PCI config register Port Configuration + * and Status (PCS) to determine port and device availability. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pcs_probe (struct ata_port *ap, unsigned int *have_port, + unsigned int *have_device) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u16 pcs; + + pci_read_config_word(pdev, ICH5_PCS, &pcs); + + /* is SATA port enabled? */ + if (pcs & (1 << ap->port_no)) { + *have_port = 1; + + if (pcs & (1 << (ap->port_no + 4))) + *have_device = 1; + } +} + +/** + * piix_pcs_disable - Disable SATA port + * @ap: Port to disable + * + * Disable SATA phy for specified port. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pcs_disable (struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u16 pcs; + + pci_read_config_word(pdev, ICH5_PCS, &pcs); + + if (pcs & (1 << ap->port_no)) { + pcs &= ~(1 << ap->port_no); + pci_write_config_word(pdev, ICH5_PCS, pcs); + } +} + +/** + * piix_sata_port_probe - Probe specified port on SATA host controller + * @ap: Port to probe + * + * Probe SATA phy. + * + * NOTES: + * This is routine is going away. Getting absorbed into + * piix_sata_phy_probe. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_sata_port_probe(struct ata_port *ap) +{ + unsigned int have_port = 0, have_dev = 0; + + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + piix_pcs_probe(ap, &have_port, &have_dev); + + /* if port not enabled, exit */ + if (!have_port) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: SATA port disabled. ignoring.\n", + ap->id); + return; + } + + /* if port enabled but no device, disable port and exit */ + if (!have_dev) { + piix_sata_port_disable(ap); + printk(KERN_INFO "ata%u: SATA port has no device. disabling.\n", + ap->id); + return; + } + + ata_port_probe(ap); +} + +/** + * piix_sata_port_disable - Disable SATA port + * @ap: Port to disable. + * + * Disable SATA port. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_sata_port_disable(struct ata_port *ap) +{ + ata_port_disable(ap); + piix_pcs_disable(ap); +} + +/** + * piix_sata_phy_probe - Probe specified port on SATA host controller + * @ap: Port to probe + * + * Probe SATA port. + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_sata_phy_probe(struct ata_port *ap) +{ + /* FIXME: check port enable PCI config bits */ + + ap->cbl = ATA_CBL_SATA; + + /* FIXME: call piix_pcs_probe */ +} + +/** + * piix_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @pio: PIO mode, 0 - 4 + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + struct pci_dev *dev = ap->host_set->pdev; + unsigned int is_slave = (adev->flags & ATA_DFLAG_MASTER) ? 0 : 1; + unsigned int master_port= ap->port_no ? 0x42 : 0x40; + unsigned int slave_port = 0x44; + u16 master_data; + u8 slave_data; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + pci_read_config_word(dev, master_port, &master_data); + if (is_slave) { + master_data |= 0x4000; + /* enable PPE, IE and TIME */ + master_data |= 0x0070; + pci_read_config_byte(dev, slave_port, &slave_data); + slave_data &= (ap->port_no ? 0x0f : 0xf0); + slave_data |= + (timings[pio][0] << 2) | + (timings[pio][1] << (ap->port_no ? 4 : 0)); + } else { + master_data &= 0xccf8; + /* enable PPE, IE and TIME */ + master_data |= 0x0007; + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + pci_write_config_word(dev, master_port, master_data); + if (is_slave) + pci_write_config_byte(dev, slave_port, slave_data); +} + +/** + * piix_set_udmamode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @udma: udma mode, 0 - 6 + * + * Set UDMA mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + struct pci_dev *dev = ap->host_set->pdev; + u8 maslave = ap->port_no ? 0x42 : 0x40; + u8 speed = udma; + unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno; + int a_speed = 3 << (drive_dn * 4); + int u_flag = 1 << drive_dn; + int v_flag = 0x01 << drive_dn; + int w_flag = 0x10 << drive_dn; + int u_speed = 0; + int sitre; + u16 reg4042, reg44, reg48, reg4a, reg54; + u8 reg55; + + pci_read_config_word(dev, maslave, ®4042); + DPRINTK("reg4042 = 0x%04x\n", reg4042); + sitre = (reg4042 & 0x4000) ? 1 : 0; + pci_read_config_word(dev, 0x44, ®44); + pci_read_config_word(dev, 0x48, ®48); + pci_read_config_word(dev, 0x4a, ®4a); + pci_read_config_word(dev, 0x54, ®54); + pci_read_config_byte(dev, 0x55, ®55); + + switch(speed) { + case XFER_UDMA_4: + case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break; + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_3: + case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break; + default: + BUG(); + return; + } + + if (!(reg48 & u_flag)) + pci_write_config_word(dev, 0x48, reg48|u_flag); + if (speed == XFER_UDMA_5) { + pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); + } else { + pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); + } + if (!(reg4a & u_speed)) { + pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); + pci_write_config_word(dev, 0x4a, reg4a|u_speed); + } + if (speed > XFER_UDMA_2) { + if (!(reg54 & v_flag)) { + pci_write_config_word(dev, 0x54, reg54|v_flag); + } + } else { + pci_write_config_word(dev, 0x54, reg54 & ~v_flag); + } +} + +/** + * piix_probe_combined - Determine if PATA and SATA are combined + * @pdev: PCI device to examine + * @mask: (output) zero, %PIIX_COMB_PRI or %PIIX_COMB_SEC + * + * Determine if BIOS has secretly stuffed a PATA port into our + * otherwise-beautiful SATA PCI device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ +static void piix_probe_combined (struct pci_dev *pdev, unsigned int *mask) +{ + u8 tmp; + + pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ + tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ + + /* backwards from what one might expect */ + if (tmp == 0x4) /* bits 10x */ + *mask |= PIIX_COMB_SEC; + if (tmp == 0x6) /* bits 11x */ + *mask |= PIIX_COMB_PRI; +} + +/** + * piix_init_one - Register PIIX ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in piix_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. We probe for combined mode (sigh), + * and then hand over control to libata, for it to do the rest. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_board *boards[2]; + unsigned int combined = 0, n_boards = 1; + unsigned int pata_comb = 0, sata_comb = 0; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* no hotplugging support (FIXME) */ + if (!in_module_init) + return -ENODEV; + + boards[0] = &ata_board_tbl[ent->driver_data]; + boards[1] = NULL; + if (boards[0]->host_flags & PIIX_FLAG_COMBINED) + piix_probe_combined(pdev, &combined); + + if (combined & PIIX_COMB_PRI) + sata_comb = 1; + else if (combined & PIIX_COMB_SEC) + pata_comb = 1; + + if (pata_comb || sata_comb) { + boards[sata_comb] = &ata_board_tbl[ent->driver_data]; + boards[sata_comb]->host_flags |= ATA_FLAG_SLAVE_POSS; /* sigh */ + boards[pata_comb] = &ata_board_tbl[ich5_pata]; /*ich5-specific*/ + n_boards++; + + printk(KERN_WARNING DRV_NAME ": combined mode detected\n"); + } + + return ata_pci_init_one(pdev, boards, n_boards); +} + +/** + * piix_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init piix_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&piix_pci_driver); + if (rc) + return rc; + + in_module_init = 0; + + DPRINTK("scsi_register_host\n"); + rc = scsi_register_module(MODULE_SCSI_HA, &piix_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + DPRINTK("done\n"); + return 0; + +err_out: + pci_unregister_driver(&piix_pci_driver); + return rc; +} + +/** + * piix_exit - + * + * LOCKING: + * + */ + +static void __exit piix_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &piix_sht); + pci_unregister_driver(&piix_pci_driver); +} + +module_init(piix_init); +module_exit(piix_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/Config.in linux.22-ac2/drivers/scsi/Config.in --- linux.vanilla/drivers/scsi/Config.in 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/Config.in 2003-09-01 13:25:08.000000000 +0100 @@ -66,7 +66,15 @@ dep_tristate 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS $CONFIG_SCSI dep_tristate 'Always IN2000 SCSI support' CONFIG_SCSI_IN2000 $CONFIG_SCSI dep_tristate 'AM53/79C974 PCI SCSI support' CONFIG_SCSI_AM53C974 $CONFIG_SCSI $CONFIG_PCI -dep_tristate 'AMI MegaRAID support' CONFIG_SCSI_MEGARAID $CONFIG_SCSI +dep_tristate 'AMI MegaRAID support (old driver)' CONFIG_SCSI_MEGARAID $CONFIG_SCSI +if [ "$CONFIG_SCSI_MEGARAID" != "y" ]; then + dep_tristate 'AMI MegaRAID support (new driver)' CONFIG_SCSI_MEGARAID2 $CONFIG_SCSI +fi + +dep_bool 'SATA support' CONFIG_SCSI_ATA $CONFIG_SCSI +#dep_bool ' Parallel ATA support' CONFIG_SCSI_ATA_PATA $CONFIG_SCSI_ATA +dep_tristate ' Intel PIIX/ICH support' CONFIG_SCSI_ATA_PIIX $CONFIG_SCSI_ATA $CONFIG_PCI +dep_tristate ' VIA SATA support' CONFIG_SCSI_SATA_VIA $CONFIG_SCSI_ATA $CONFIG_PCI dep_tristate 'BusLogic SCSI support' CONFIG_SCSI_BUSLOGIC $CONFIG_SCSI if [ "$CONFIG_SCSI_BUSLOGIC" != "n" ]; then diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/ips.c linux.22-ac2/drivers/scsi/ips.c --- linux.vanilla/drivers/scsi/ips.c 2003-06-14 00:11:36.000000000 +0100 +++ linux.22-ac2/drivers/scsi/ips.c 2003-06-16 22:04:16.000000000 +0100 @@ -5,8 +5,8 @@ /* Jack Hammer, Adaptec, Inc. */ /* David Jeffery, Adaptec, Inc. */ /* */ -/* Copyright (C) 2000 IBM Corporation */ -/* Copyright (C) 2003 Adaptec, Inc. */ +/* Copyright (C) 2000 IBM Corporation */ +/* Copyright (C) 2002,2003 Adaptec, Inc. */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ @@ -83,7 +83,7 @@ /* 2.3.18 and later */ /* - Sync with other changes from the 2.3 kernels */ /* 4.00.06 - Fix timeout with initial FFDC command */ -/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig */ +/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig */ /* 4.10.00 - Add support for ServeRAID 4M/4L */ /* 4.10.13 - Fix for dynamic unload and proc file system */ /* 4.20.03 - Rename version to coincide with new release schedules */ @@ -130,6 +130,7 @@ /* 5.10.15 - remove unused code (sem, macros, etc.) */ /* 5.30.00 - use __devexit_p() */ /* 6.00.00 - Add 6x Adapters and Battery Flash */ +/* 6.10.00 - Remove 1G Addressing Limitations */ /*****************************************************************************/ /* @@ -150,7 +151,7 @@ * nommap - Don't use memory mapped I/O * ioctlsize - Initial size of the IOCTL buffer */ - + #include #include #include @@ -182,75 +183,35 @@ #include #include -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - #include - #include -#else - #include -#endif +#include +#include #include #ifdef MODULE - static char *ips = NULL; - MODULE_PARM(ips, "s"); +static char *ips = NULL; +MODULE_PARM(ips, "s"); #endif /* * DRIVER_VER */ -#define IPS_VERSION_HIGH "6.00" -#define IPS_VERSION_LOW ".26 " - - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) -struct proc_dir_entry proc_scsi_ips = { - 0, - 3, "ips", - S_IFDIR | S_IRUGO | S_IXUGO, 2 -}; -#endif +#define IPS_VERSION_HIGH "6.10" +#define IPS_VERSION_LOW ".24 " #if !defined(__i386__) && !defined(__ia64__) - #error "This driver has only been tested on the x86/ia64 platforms" -#endif - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0) - #error "This driver only works with kernel 2.2.0 and later" -#elif LINUX_VERSION_CODE <= LinuxVersionCode(2,3,18) - #define dma_addr_t uint32_t - - static inline void *pci_alloc_consistent(struct pci_dev *dev,int size, - dma_addr_t *dmahandle) { - void * ptr = kmalloc(size, GFP_ATOMIC); - if(ptr){ - *dmahandle = (uint32_t)virt_to_bus(ptr); - } - return ptr; - } - - #define pci_free_consistent(a,size,address,dmahandle) kfree(address) - - #define pci_map_sg(a,b,n,z) (n) - #define pci_unmap_sg(a,b,c,d) - #define pci_map_single(a,b,c,d) ((uint32_t)virt_to_bus(b)) - #define pci_unmap_single(a,b,c,d) - #ifndef sg_dma_address - #define sg_dma_address(x) ((uint32_t)virt_to_bus((x)->address)) - #define sg_dma_len(x) ((x)->length) - #endif - #define pci_unregister_driver(x) +#error "This driver has only been tested on the x86/ia64 platforms" #endif #if LINUX_VERSION_CODE <= LinuxVersionCode(2,5,0) - #define IPS_SG_ADDRESS(sg) ((sg)->address) - #define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) - #define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) +#define IPS_SG_ADDRESS(sg) ((sg)->address) +#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) +#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) #else - #define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \ +#define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \ page_address((sg)->page)+(sg)->offset : 0) - #define IPS_LOCK_SAVE(lock,flags) spin_lock(lock) - #define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock(lock) +#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0) +#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0) #endif #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ @@ -259,218 +220,144 @@ scsi_to_pci_dma_dir(scb->scsi_cmd->sc_data_direction)) #ifdef IPS_DEBUG - #define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); - #define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); - #define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); +#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); +#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); +#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); #else - #define METHOD_TRACE(s, i) - #define DEBUG(i, s) - #define DEBUG_VAR(i, s, v...) +#define METHOD_TRACE(s, i) +#define DEBUG(i, s) +#define DEBUG_VAR(i, s, v...) #endif /* * global variables */ -static const char ips_name[] = "ips"; -static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ -static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ -static unsigned int ips_next_controller = 0; -static unsigned int ips_num_controllers = 0; -static unsigned int ips_released_controllers = 0; -static int ips_cmd_timeout = 60; -static int ips_reset_timeout = 60 * 5; -static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ -static int ips_force_i2o = 1; /* Always use I2O command delivery */ -static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ -static int ips_cd_boot = 0; /* Booting from Manager CD */ -static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ -static long ips_FlashDataInUse = 0; /* CD Boot - Flash Data In Use Flag */ -static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ +static const char ips_name[] = "ips"; +static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ +static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ +static unsigned int ips_next_controller; +static unsigned int ips_num_controllers; +static unsigned int ips_released_controllers; +static int ips_cmd_timeout = 60; +static int ips_reset_timeout = 60 * 5; +static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ +static int ips_force_i2o = 1; /* Always use I2O command delivery */ +static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ +static int ips_cd_boot; /* Booting from Manager CD */ +static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ +static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */ +static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ +static Scsi_Host_Template ips_driver_template = IPS; -IPS_DEFINE_COMPAT_TABLE( Compatable ); /* Version Compatability Table */ +IPS_DEFINE_COMPAT_TABLE(Compatable); /* Version Compatability Table */ - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) /* This table describes all ServeRAID Adapters */ - static struct pci_device_id ips_pci_table[] __devinitdata = { - { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes only Anaconda Family Adapters */ - static struct pci_device_id ips_pci_table_anaconda[] __devinitdata = { - { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes only Sarasota ( ServeRAID 5i ) Adapters */ - static struct pci_device_id ips_pci_table_5i[] __devinitdata = { - { 0x1014, 0x01BD, PCI_ANY_ID, 0x259, 0, 0 }, - { 0x1014, 0x01BD, PCI_ANY_ID, 0x258, 0, 0 }, - { 0, } - }; - - /* This table describes only Sebring ( ServeRAID 6i ) Adapters */ - static struct pci_device_id ips_pci_table_6i[] __devinitdata = { - { 0x9005, 0x0250, PCI_ANY_ID, 0x28C, 0, 0 }, - { 0, } - }; - - /* This table describes all i960 ( 4M, 4Mx, 4L, 4Lx ) Adapters */ - static struct pci_device_id ips_pci_table_i960[] __devinitdata = { - { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - /* This table describes all Adaptec ( 6M ) Adapters */ - static struct pci_device_id ips_pci_table_adaptec[] __devinitdata = { - { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, - { 0, } - }; - - MODULE_DEVICE_TABLE( pci, ips_pci_table ); - - static char ips_hot_plug_name[] = "ips"; - - static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); - static void ips_remove_device(struct pci_dev *pci_dev); - - struct pci_driver ips_pci_driver = { - name: ips_hot_plug_name, - id_table: ips_pci_table, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_anaconda = { - name: ips_hot_plug_name, - id_table: ips_pci_table_anaconda, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_5i = { - name: ips_hot_plug_name, - id_table: ips_pci_table_5i, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_6i = { - name: ips_hot_plug_name, - id_table: ips_pci_table_6i, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_i960 = { - name: ips_hot_plug_name, - id_table: ips_pci_table_i960, - probe: ips_insert_device, - remove: ips_remove_device, - }; - - struct pci_driver ips_pci_driver_adaptec = { - name: ips_hot_plug_name, - id_table: ips_pci_table_adaptec, - probe: ips_insert_device, - remove: ips_remove_device, - }; +static struct pci_device_id ips_pci_table[] __devinitdata = { + {0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, + {0,} +}; -#endif +MODULE_DEVICE_TABLE(pci, ips_pci_table); + +static char ips_hot_plug_name[] = "ips"; + +static int __devinit ips_insert_device(struct pci_dev *pci_dev, + const struct pci_device_id *ent); +static void ips_remove_device(struct pci_dev *pci_dev); + +struct pci_driver ips_pci_driver = { + .name = ips_hot_plug_name, + .id_table = ips_pci_table, + .probe = ips_insert_device, + .remove = ips_remove_device, +}; /* * Necessary forward function protoypes */ static int ips_halt(struct notifier_block *nb, ulong event, void *buf); -#define MAX_ADAPTER_NAME 11 +#define MAX_ADAPTER_NAME 15 static char ips_adapter_name[][30] = { - "ServeRAID", - "ServeRAID II", - "ServeRAID on motherboard", - "ServeRAID on motherboard", - "ServeRAID 3H", - "ServeRAID 3L", - "ServeRAID 4H", - "ServeRAID 4M", - "ServeRAID 4L", - "ServeRAID 4Mx", - "ServeRAID 4Lx", - "ServeRAID 5i", - "ServeRAID 5i", - "ServeRAID 6M", - "ServeRAID 6i" + "ServeRAID", + "ServeRAID II", + "ServeRAID on motherboard", + "ServeRAID on motherboard", + "ServeRAID 3H", + "ServeRAID 3L", + "ServeRAID 4H", + "ServeRAID 4M", + "ServeRAID 4L", + "ServeRAID 4Mx", + "ServeRAID 4Lx", + "ServeRAID 5i", + "ServeRAID 5i", + "ServeRAID 6M", + "ServeRAID 6i" }; -/* Init State 0 means we're only looking for a device to provide us the BIOS Adapter Ordering Table */ -/* Init State 1 is when we are actually enumerating the devices. */ -static int InitState; -/* IF BIOS wants to tell us the enumeration order, it puts a table in NVRAM Page 5 */ -static uint8_t AdapterOrder[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - static struct notifier_block ips_notifier = { - ips_halt, NULL, 0 + ips_halt, NULL, 0 }; /* * Direction table */ static char ips_command_direction[] = { -IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, -IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, -IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, -IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, -IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, -IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, -IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, -IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, -IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, -IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, -IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, + IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, + IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, + IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, + IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK }; /* @@ -480,9 +367,9 @@ int ips_release(struct Scsi_Host *); int ips_eh_abort(Scsi_Cmnd *); int ips_eh_reset(Scsi_Cmnd *); -int ips_queue(Scsi_Cmnd *, void (*) (Scsi_Cmnd *)); +int ips_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); int ips_biosparam(Disk *, kdev_t, int *); -const char * ips_info(struct Scsi_Host *); +const char *ips_info(struct Scsi_Host *); void do_ipsintr(int, void *, struct pt_regs *); static int ips_hainit(ips_ha_t *); static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *); @@ -527,10 +414,9 @@ static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *); -static void ips_free_flash_copperhead(ips_ha_t *ha); +static void ips_free_flash_copperhead(ips_ha_t * ha); static void ips_get_bios_version(ips_ha_t *, int); static void ips_identify_controller(ips_ha_t *); -static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *); static void ips_chkstatus(ips_ha_t *, IPS_STATUS *); static void ips_enable_int_copperhead(ips_ha_t *); static void ips_enable_int_copperhead_memio(ips_ha_t *); @@ -553,40 +439,43 @@ static uint32_t ips_statupd_copperhead(ips_ha_t *); static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); static uint32_t ips_statupd_morpheus(ips_ha_t *); -static ips_scb_t * ips_getscb(ips_ha_t *); +static ips_scb_t *ips_getscb(ips_ha_t *); static inline void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); static inline void ips_putq_scb_tail(ips_scb_queue_t *, ips_scb_t *); static inline void ips_putq_wait_head(ips_wait_queue_t *, Scsi_Cmnd *); static inline void ips_putq_wait_tail(ips_wait_queue_t *, Scsi_Cmnd *); -static inline void ips_putq_copp_head(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline void ips_putq_copp_tail(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline ips_scb_t * ips_removeq_scb_head(ips_scb_queue_t *); -static inline ips_scb_t * ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); -static inline Scsi_Cmnd * ips_removeq_wait_head(ips_wait_queue_t *); -static inline Scsi_Cmnd * ips_removeq_wait(ips_wait_queue_t *, Scsi_Cmnd *); -static inline ips_copp_wait_item_t * ips_removeq_copp(ips_copp_queue_t *, ips_copp_wait_item_t *); -static inline ips_copp_wait_item_t * ips_removeq_copp_head(ips_copp_queue_t *); +static inline void ips_putq_copp_head(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline void ips_putq_copp_tail(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); +static inline ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); +static inline Scsi_Cmnd *ips_removeq_wait_head(ips_wait_queue_t *); +static inline Scsi_Cmnd *ips_removeq_wait(ips_wait_queue_t *, Scsi_Cmnd *); +static inline ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, + ips_copp_wait_item_t *); +static inline ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *); static int ips_is_passthru(Scsi_Cmnd *); static int ips_make_passthru(ips_ha_t *, Scsi_Cmnd *, ips_scb_t *, int); static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *); +static void ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, + unsigned int count); +static void ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned int count); -int ips_proc_info(char *, char **, off_t, int, int, int); +int ips_proc_info(char *, char **, off_t, int, int, int); static int ips_host_info(ips_ha_t *, char *, off_t, int); static void copy_mem_info(IPS_INFOSTR *, char *, int); static int copy_info(IPS_INFOSTR *, char *, ...); -static int ips_get_version_info(ips_ha_t *ha, IPS_VERSION_DATA *Buffer, int intr ); -static void ips_version_check(ips_ha_t *ha, int intr); -static int ips_abort_init(ips_ha_t *ha, struct Scsi_Host *sh, int index); -static int ips_init_phase2( int index ); - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) -static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr ); -#else -static int ips_init_oldphase1(Scsi_Host_Template *SHT); -#endif +static int ips_get_version_info(ips_ha_t * ha, IPS_VERSION_DATA * Buffer, + int intr); +static void ips_version_check(ips_ha_t * ha, int intr); +static int ips_abort_init(ips_ha_t * ha, int index); +static int ips_init_phase2(int index); +static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr); +static int ips_register_scsi(int index); /*--------------------------------------------------------------------------*/ /* Exported Functions */ /*--------------------------------------------------------------------------*/ @@ -600,92 +489,52 @@ /* setup parameters to the driver */ /* */ /****************************************************************************/ -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) static int -ips_setup(char *ips_str) { -#else -void -ips_setup(char *ips_str, int *dummy) { -#endif - - int i; - char *key; - char *value; - IPS_OPTION options[] = { - {"noi2o", &ips_force_i2o, 0}, - {"nommap", &ips_force_memio, 0}, - {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, - {"cdboot", &ips_cd_boot, 0}, - {"maxcmds", &MaxLiteCmds, 32}, - }; - - /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - /* Search for value */ - while ((key = strsep(&ips_str, ",."))) { - if (!*key) - continue; - value = strchr(key, ':'); - if (value) - *value++ = '\0'; - /* - * We now have key/value pairs. - * Update the variables - */ - for (i = 0; i < (sizeof(options) / sizeof(options[0])); i++) { - if (strnicmp(key, options[i].option_name, strlen(options[i].option_name)) == 0) { - if (value) - *options[i].option_flag = simple_strtoul(value, NULL, 0); - else - *options[i].option_flag = options[i].option_value; - break; - } - } - } - - return (1); - -#else - - char *p; - char tokens[3] = {',', '.', 0}; - - for (key = strtok(ips_str, tokens); key; key = strtok(NULL, tokens)) { - p = key; - - /* Search for value */ - while ((p) && (*p != ':')) - p++; - - if (p) { - *p = '\0'; - value = p+1; - } else - value = NULL; - - /* - * We now have key/value pairs. - * Update the variables - */ - for (i = 0; i < (sizeof(options) / sizeof(options[0])); i++) { - if (strnicmp(key, options[i].option_name, strlen(ips_str)) == 0) { - if (value) - *options[i].option_flag = simple_strtoul(value, NULL, 0); - else - *options[i].option_flag = options[i].option_value; - - break; - } - } - } +ips_setup(char *ips_str) +{ -#endif + int i; + char *key; + char *value; + IPS_OPTION options[] = { + {"noi2o", &ips_force_i2o, 0}, + {"nommap", &ips_force_memio, 0}, + {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, + {"cdboot", &ips_cd_boot, 0}, + {"maxcmds", &MaxLiteCmds, 32}, + }; + + /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ + /* Search for value */ + while ((key = strsep(&ips_str, ",."))) { + if (!*key) + continue; + value = strchr(key, ':'); + if (value) + *value++ = '\0'; + /* + * We now have key/value pairs. + * Update the variables + */ + for (i = 0; i < (sizeof (options) / sizeof (options[0])); i++) { + if (strnicmp + (key, options[i].option_name, + strlen(options[i].option_name)) == 0) { + if (value) + *options[i].option_flag = + simple_strtoul(value, NULL, 0); + else + *options[i].option_flag = + options[i].option_value; + break; + } + } + } + return (1); } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) __setup("ips=", ips_setup); -#endif /****************************************************************************/ /* */ @@ -699,682 +548,97 @@ /* */ /****************************************************************************/ int -ips_detect(Scsi_Host_Template *SHT) { -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - int i; -#endif +ips_detect(Scsi_Host_Template * SHT) +{ + int i; - METHOD_TRACE("ips_detect", 1); + METHOD_TRACE("ips_detect", 1); #ifdef MODULE - if (ips) -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - ips_setup(ips); -#else - ips_setup(ips, NULL); -#endif -#endif - - /* If Booting from the Manager CD, Allocate a large Flash */ - /* Buffer ( so we won't need to allocate one for each adapter ). */ - if ( ips_cd_boot ) { - ips_FlashData = ( char * ) __get_free_pages( GFP_KERNEL, 7 ); - if (ips_FlashData == NULL) { - /* The validity of this pointer is checked in ips_make_passthru() before it is used */ - printk( KERN_WARNING "ERROR: Can't Allocate Large Buffer for Flashing\n" ); - } - } - /* initalize number of controllers */ - ips_num_controllers = 0; - ips_next_controller = 0; - ips_released_controllers = 0; - - if (!pci_present()) - return (0); - -/**********************************************************************************/ -/* For Kernel Versions 2.4 or greater, use new PCI ( Hot Pluggable ) architecture */ -/**********************************************************************************/ - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - #if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - spin_unlock_irq(&io_request_lock); - #endif - SHT->proc_info = ips_proc_info; - SHT->proc_name = "ips"; - - /* There are several special cases ( which are too complicated to enumerate here ) where, due */ - /* to System BIOS rules, the adapters must be enumerated in a certain order. If ServeRAID */ - /* BIOS tells us the order, then we will follow it. The first pass at init is simply to be */ - /* able to communicate with the first adapter to see if BIOS is telling us the order. */ - /* This does not apply to ia64 EFI BIOS. */ - -#if !defined(__ia64__) - InitState = 0; - pci_module_init(&ips_pci_driver); /* Look for Any Adapter, to fill in the Adapter Order Table */ + if (ips) + ips_setup(ips); #endif - InitState = 1; - - if ( AdapterOrder[0] ) { - /* BIOS has dictated the order that we should enumerate Adapters */ - for ( i = 1; i <= AdapterOrder[0]; i++ ) { - switch (AdapterOrder[i]) { - case 'M': - pci_module_init(&ips_pci_driver_adaptec); /* Ask for Adaptec Adapters */ - break; - case 'S': - pci_module_init(&ips_pci_driver_5i); /* Ask for 5i Adapters */ - pci_module_init(&ips_pci_driver_6i); /* Ask for 6i Adapters */ - break; - case 'N': - pci_module_init(&ips_pci_driver_i960); /* Ask for i960 Adapters */ - break; - case 'A': - pci_module_init(&ips_pci_driver_anaconda); /* Ask for Anaconda Family Adapters */ - break; - default: - i = AdapterOrder[0] + 1; /* Premature End of List - Ensure Loop Ends */ - break; - } - } - } - else { - /* No Adapter Order Table from BIOS, so sort things the old-fashioned way */ - - /* By definition, an Internal ( 5i or 6i ) Adapter MUST be enumerated first */ - /* or the server may not boot properly. The adapters must be enumerated in */ - /* exactly the same order as BIOS for the machine to come up properly. */ - /* NOTE: There will never be both a 5i and a 6i in the same machine. */ - - pci_module_init(&ips_pci_driver_5i); /* Ask for 5i Adapters First */ - if (ips_num_controllers) { /* If there is a 5i Adapter */ - pci_module_init(&ips_pci_driver_i960); /* Get all i960's next */ - } - else { - pci_module_init(&ips_pci_driver_6i); /* Ask if any 6i Adapters */ - if (ips_num_controllers) /* If there is a 6i Adapter */ - pci_module_init(&ips_pci_driver_adaptec); /* Get all Adaptecs next */ - } - - pci_module_init(&ips_pci_driver); /* Get all remaining Adapters */ - /* ( in normal BUS order ) */ - } - - #if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - spin_lock_irq(&io_request_lock); - #endif - if (ips_num_controllers > 0) - register_reboot_notifier(&ips_notifier); - - return (ips_num_controllers); -#else - InitState = 1; - SHT->proc_info = ips_proc_info; - SHT->proc_dir = &proc_scsi_ips; - return ips_init_oldphase1(SHT); -#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) */ - -} - -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) - -/***********************************************************************************/ -/* Sort the Device Structures */ -/* Devices are sorted by groups ( type ) and then PCI address within each group */ -/* This also results in the same ordering that results when using the 2.4 kernel */ -/* architecture for initialization. */ -/***********************************************************************************/ - -static void -ips_sort_controllers(struct pci_dev *dev[]) { - struct pci_dev *tempdev[IPS_MAX_ADAPTERS]; - struct pci_dev *lowestdev; - int i, j; - int temp_index = 0; /* Index into tempdev[] array */ - int lowIndex = 0; - int newlowflag = 0; /* Flag to indicate when a new low address has been found */ - uint16_t subdevice_id; - unsigned char BusNumber; - - /* Clear the Temporary Dev Structure */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) - tempdev[i] = NULL; - - /* The Outer Loop goes thru each Adapter Type Supported */ - for (j = 0; j < IPS_MAX_ADAPTER_TYPES; j++) { - lowestdev = NULL; - /* The Inner Loop Checks each Device still in the List and */ - /* Finds the lowset adapter left ( by PCI boot order ) */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - if (dev[i]) { - if (lowestdev == NULL) { /* If this is the first one found, it must be the lowest ! */ - lowestdev = dev[i]; - lowIndex = i; - } - - /* If you find a Sarasota ( 5i ), it must always be treated as the first adapter */ - if (dev[i]->device == IPS_DEVICEID_MORPHEUS) { - pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id); - if ((subdevice_id == IPS_SUBDEVICEID_5I1) || - (subdevice_id == IPS_SUBDEVICEID_5I2)) { - lowestdev = dev[i]; - lowIndex = i; - break; - } - } - - /* If you find a Sebring ( 6i ), it must always be treated as the first adapter */ - if (dev[i]->device == IPS_DEVICEID_MARCO) { - pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id); - if (subdevice_id == IPS_SUBDEVICEID_6I) { - lowestdev = dev[i]; - lowIndex = i; - break; - } - } - - /* Determine if this device is at a lower PCI address than the current lowest device */ - newlowflag = 0; - - if (dev[i]->device == IPS_DEVICEID_MARCO) /* System BIOS adds 1 to Marco Bus Number */ - BusNumber = ( dev[i]->bus->number ) - 1; /* because of Bridge Chip */ - else - BusNumber = dev[i]->bus->number; - - if (BusNumber < lowestdev->bus->number) /* IF a lower BUS # */ - newlowflag = i; - - if ((BusNumber == lowestdev->bus->number) && /* If Same Bus #, but a lower device # */ - (dev[i]->devfn < lowestdev->devfn)) - newlowflag = i; - - if ( newlowflag ) { - lowestdev = dev[i]; - lowIndex = i; - } - } - } - - if (lowestdev) { /* If we found another adapter */ - tempdev[temp_index] = lowestdev; /* Add it in the list */ - dev[lowIndex] = NULL; /* Null it out so we don't find it again */ - temp_index++; - /* Now get all the adapters that are the same type as the low one . */ - /* They will already be in order, so they don't need any further sorting.*/ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - if (dev[i]) { - if (dev[i]->device == lowestdev->device) { - tempdev[temp_index] = dev[i]; /* Add the same type adapter to the list */ - temp_index++; - dev[i] = NULL; /* Null it out so we don't find it again */ - } - } - } - } - } - - /* Copy the Sorted Adapter Pointers ( tempdev[] ) to the Original Structure */ - for (i = 0; i < IPS_MAX_ADAPTERS; i++) - dev[i] = tempdev[i]; + /* If Booting from the Manager CD, Allocate a large Flash */ + /* Buffer ( so we won't need to allocate one for each adapter ). */ + if (ips_cd_boot) { + ips_FlashData = (char *) __get_free_pages(IPS_INIT_GFP, 7); + if (ips_FlashData == NULL) { + /* The validity of this pointer is checked in ips_make_passthru() before it is used */ + printk(KERN_WARNING + "ERROR: Can't Allocate Large Buffer for Flashing\n"); + } + } + + SHT->proc_info = ips_proc_info; + SHT->proc_name = "ips"; + + for (i = 0; i < ips_num_controllers; i++) { + if (ips_register_scsi(i)) + ips_free(ips_ha[i]); + ips_released_controllers++; + } + return (ips_num_controllers); } /****************************************************************************/ -/* Detect and initialize the driver for 2.2 kernels */ -/* */ -/* NOTE: this routine is called under the io_request_lock spinlock */ -/****************************************************************************/ -static int ips_init_oldphase1(Scsi_Host_Template *SHT){ - struct Scsi_Host *sh; - ips_ha_t *ha; - uint32_t io_addr; - uint32_t mem_addr; - uint32_t io_len; - uint32_t mem_len; - uint16_t planer; - uint8_t revision_id; - uint8_t bus; - uint8_t func; - uint8_t irq; - uint16_t subdevice_id; - int i; - int j; - uint32_t count; - char *ioremap_ptr; - char *mem_ptr; - struct pci_dev *dev[IPS_MAX_ADAPTERS]; - dma_addr_t dma_address; - uint32_t currbar; - uint32_t maskbar; - uint8_t barnum; - uint32_t IsDead; - - METHOD_TRACE("ips_init_oldphase1", 1); - - for ( i = 0; i < IPS_MAX_ADAPTERS; i++ ) - dev[i] = NULL; - - /* Find all the adapters that we support and save them in the dev[] structure */ - i = 0; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MORPHEUS, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MORPHEUS, dev[i-1]); - } - - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_COPPERHEAD, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_COPPERHEAD, dev[i-1]); - } - - dev[i] = pci_find_device(IPS_VENDORID_ADAPTEC, IPS_DEVICEID_MARCO, NULL); - while ( dev[i] ) { - i++; - dev[i] = pci_find_device(IPS_VENDORID_IBM, IPS_DEVICEID_MARCO, dev[i-1]); - } - - /* Sort the Adapters */ - if ( dev[0] ) - ips_sort_controllers( dev ); - else - return (0); - - /* Now scan and Initialize the controllers */ - for ( i = 0; i < IPS_MAX_ADAPTERS; i++ ) { - if (!dev[i]) - break; - - if (ips_next_controller >= IPS_MAX_ADAPTERS) - break; - - /* stuff that we get in dev */ - irq = dev[i]->irq; - bus = dev[i]->bus->number; - func = dev[i]->devfn; - - /* Init MEM/IO addresses to 0 */ - mem_addr = 0; - io_addr = 0; - mem_len = 0; - io_len = 0; - - for (j = 0; j < 2; j++) { - if (!dev[i]->base_address[j]) - break; - - if ((dev[i]->base_address[j] & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { - barnum = PCI_BASE_ADDRESS_0 + (j * 4); - io_addr = dev[i]->base_address[j] & PCI_BASE_ADDRESS_IO_MASK; - - /* Get Size */ - pci_read_config_dword(dev[i], barnum, &currbar); - pci_write_config_dword(dev[i], barnum, ~0); - pci_read_config_dword(dev[i], barnum, &maskbar); - pci_write_config_dword(dev[i], barnum, currbar); - - io_len = ~(maskbar & PCI_BASE_ADDRESS_IO_MASK) + 1; - } else { - barnum = PCI_BASE_ADDRESS_0 + (j * 4); - mem_addr = dev[i]->base_address[j] & PCI_BASE_ADDRESS_MEM_MASK; - - /* Get Size */ - pci_read_config_dword(dev[i], barnum, &currbar); - pci_write_config_dword(dev[i], barnum, ~0); - pci_read_config_dword(dev[i], barnum, &maskbar); - pci_write_config_dword(dev[i], barnum, currbar); - - mem_len = ~(maskbar & PCI_BASE_ADDRESS_MEM_MASK) + 1; - } - } - - /* setup memory mapped area (if applicable) */ - if (mem_addr) { - uint32_t base; - uint32_t offs; - - DEBUG_VAR(1, "(%s%d) detect, Memory region %x, size: %d", - ips_name, ips_next_controller, mem_addr, mem_len); - - base = mem_addr & PAGE_MASK; - offs = mem_addr - base; - - ioremap_ptr = ioremap(base, PAGE_SIZE); - mem_ptr = ioremap_ptr + offs; - } else { - ioremap_ptr = NULL; - mem_ptr = NULL; - } - - /* setup I/O mapped area (if applicable) */ - if (io_addr) { - DEBUG_VAR(1, "(%s%d) detect, IO region %x, size: %d", - ips_name, ips_next_controller, io_addr, io_len); - - if (check_region(io_addr, io_len)) { - /* Couldn't allocate io space */ - printk(KERN_WARNING "(%s%d) couldn't allocate IO space %x len %d.\n", - ips_name, ips_next_controller, io_addr, io_len); - - ips_next_controller++; - - continue; - } - - request_region(io_addr, io_len, "ips"); - } - - /* get planer status */ - if (pci_read_config_word(dev[i], 0x04, &planer)) { - printk(KERN_WARNING "(%s%d) can't get planer status.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - /* check to see if an onboard planer controller is disabled */ - if (!(planer & 0x000C)) { - - DEBUG_VAR(1, "(%s%d) detect, Onboard controller disabled by BIOS", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - DEBUG_VAR(1, "(%s%d) detect bus %d, func %x, irq %d, io %x, mem: %x, ptr: %p", - ips_name, ips_next_controller, bus, func, irq, io_addr, mem_addr, mem_ptr); - - /* get the revision ID */ - if (pci_read_config_byte(dev[i], PCI_REVISION_ID, &revision_id)) { - printk(KERN_WARNING "(%s%d) can't get revision id.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - continue; - } - - /* get the subdevice id */ - if (pci_read_config_word(dev[i], PCI_SUBSYSTEM_ID, &subdevice_id)) { - printk(KERN_WARNING "(%s%d) can't get subdevice id.\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - /* found a controller */ - sh = scsi_register(SHT, sizeof(ips_ha_t)); - - if (sh == NULL) { - printk(KERN_WARNING "(%s%d) Unable to register controller with SCSI subsystem - skipping controller\n", - ips_name, ips_next_controller); - - ips_next_controller++; - - continue; - } - - ha = IPS_HA(sh); - memset(ha, 0, sizeof(ips_ha_t)); - - ips_sh[ips_next_controller] = sh; - ips_ha[ips_next_controller] = ha; - ips_num_controllers++; - ha->active = 1; - - ha->enq = kmalloc(sizeof(IPS_ENQ), GFP_KERNEL); - - if (!ha->enq) { - printk(KERN_WARNING "(%s%d) Unable to allocate host inquiry structure - skipping contoller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->adapt = pci_alloc_consistent(dev[i], sizeof(IPS_ADAPTER) + sizeof(IPS_IO_CMD), &dma_address); - - if (!ha->adapt) { - printk(KERN_WARNING "(%s%d) Unable to allocate host adapt and dummy structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - ha->adapt->hw_status_start = dma_address; - ha->dummy = (void *)ha->adapt + 1; - - ha->conf = kmalloc(sizeof(IPS_CONF), GFP_KERNEL); - - if (!ha->conf) { - printk(KERN_WARNING "(%s%d) Unable to allocate host conf structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), GFP_KERNEL); - - if (!ha->nvram) { - printk(KERN_WARNING "(%s%d) Unable to allocate host nvram structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ha->subsys = kmalloc(sizeof(IPS_SUBSYS), GFP_KERNEL); - - if (!ha->subsys) { - printk(KERN_WARNING "(%s%d) Unable to allocate host subsystem structure - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - for (count = PAGE_SIZE, ha->ioctl_order = 0; - count < ips_ioctlsize; - ha->ioctl_order++, count <<= 1); - - ha->ioctl_data = (char *) __get_free_pages(GFP_KERNEL, ha->ioctl_order); - ha->ioctl_datasize = count; - - if (!ha->ioctl_data) { - printk(KERN_WARNING "(%s%d) Unable to allocate ioctl data\n", - ips_name, ips_next_controller); - - ha->ioctl_data = NULL; - ha->ioctl_order = 0; - ha->ioctl_datasize = 0; - } - - /* Store away needed values for later use */ - sh->io_port = io_addr; - sh->n_io_port = io_addr ? 255 : 0; - sh->unique_id = (io_addr) ? io_addr : mem_addr; - sh->irq = irq; - sh->select_queue_depths = ips_select_queue_depth; - sh->sg_tablesize = sh->hostt->sg_tablesize; - sh->can_queue = sh->hostt->can_queue; - sh->cmd_per_lun = sh->hostt->cmd_per_lun; - sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; - sh->use_clustering = sh->hostt->use_clustering; - - sh->wish_block = FALSE; - - /* Store info in HA structure */ - ha->irq = irq; - ha->io_addr = io_addr; - ha->io_len = io_len; - ha->mem_addr = mem_addr; - ha->mem_len = mem_len; - ha->mem_ptr = mem_ptr; - ha->ioremap_ptr = ioremap_ptr; - ha->host_num = ips_next_controller; - ha->revision_id = revision_id; - ha->slot_num = PCI_SLOT(dev[i]->devfn); - ha->device_id = dev[i]->device; - ha->subdevice_id = subdevice_id; - ha->pcidev = dev[i]; - - /* - * Setup Functions - */ - ips_setup_funclist(ha); - - /* If Morpheus appears dead, reset it */ - if ( ( IPS_IS_MORPHEUS( ha ) ) || ( IPS_IS_MARCO( ha ) ) ) { - IsDead = readl( ha->mem_ptr + IPS_REG_I960_MSG1 ); - if ( IsDead == 0xDEADBEEF ) { - ips_reset_morpheus( ha ); - } - } - - /* - * Initialize the card if it isn't already - */ - - if (!(*ha->func.isinit)(ha)) { - if (!(*ha->func.init)(ha)) { - /* - * Initialization failed - */ - printk(KERN_WARNING "(%s%d) unable to initialize controller - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - } - - /* install the interrupt handler */ - if (request_irq(irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { - printk(KERN_WARNING "(%s%d) unable to install interrupt handler - skipping controller\n", - ips_name, ips_next_controller); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - /* - * Allocate a temporary SCB for initialization - */ - ha->max_cmds = 1; - if (!ips_allocatescbs(ha)) { - /* couldn't allocate a temp SCB */ - printk(KERN_WARNING "(%s%d) unable to allocate CCBs - skipping contoller\n", - ips_name, ips_next_controller); - free_irq(ha->irq, ha); - ips_abort_init(ha, sh, ips_next_controller); - ips_next_controller++; - ips_num_controllers--; - - continue; - } - - ips_next_controller++; - } - - /* - * Do Phase 2 Initialization - * Controller init - */ - for (i = 0; i < ips_next_controller; i++) { - - if (ips_ha[i] == 0) { - printk(KERN_WARNING "(%s%d) ignoring bad controller\n", ips_name, i); - continue; - } - - if (ips_init_phase2(i) != SUCCESS) - ips_num_controllers--; - - } - - if (ips_num_controllers > 0) - register_reboot_notifier(&ips_notifier); - - return (ips_num_controllers); -} -#endif - -/****************************************************************************/ /* configure the function pointers to use the functions that will work */ /* with the found version of the adapter */ /****************************************************************************/ -static void ips_setup_funclist(ips_ha_t *ha){ +static void +ips_setup_funclist(ips_ha_t * ha) +{ - /* - * Setup Functions - */ - if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { - /* morpheus / marco / sebring */ - ha->func.isintr = ips_isintr_morpheus; - ha->func.isinit = ips_isinit_morpheus; - ha->func.issue = ips_issue_i2o_memio; - ha->func.init = ips_init_morpheus; - ha->func.statupd = ips_statupd_morpheus; - ha->func.reset = ips_reset_morpheus; - ha->func.intr = ips_intr_morpheus; - ha->func.enableint = ips_enable_int_morpheus; - } else if (IPS_USE_MEMIO(ha)) { - /* copperhead w/MEMIO */ - ha->func.isintr = ips_isintr_copperhead_memio; - ha->func.isinit = ips_isinit_copperhead_memio; - ha->func.init = ips_init_copperhead_memio; - ha->func.statupd = ips_statupd_copperhead_memio; - ha->func.statinit = ips_statinit_memio; - ha->func.reset = ips_reset_copperhead_memio; - ha->func.intr = ips_intr_copperhead; - ha->func.erasebios = ips_erase_bios_memio; - ha->func.programbios = ips_program_bios_memio; - ha->func.verifybios = ips_verify_bios_memio; - ha->func.enableint = ips_enable_int_copperhead_memio; - if (IPS_USE_I2O_DELIVER(ha)) - ha->func.issue = ips_issue_i2o_memio; - else - ha->func.issue = ips_issue_copperhead_memio; - } else { - /* copperhead */ - ha->func.isintr = ips_isintr_copperhead; - ha->func.isinit = ips_isinit_copperhead; - ha->func.init = ips_init_copperhead; - ha->func.statupd = ips_statupd_copperhead; - ha->func.statinit = ips_statinit; - ha->func.reset = ips_reset_copperhead; - ha->func.intr = ips_intr_copperhead; - ha->func.erasebios = ips_erase_bios; - ha->func.programbios = ips_program_bios; - ha->func.verifybios = ips_verify_bios; - ha->func.enableint = ips_enable_int_copperhead; - - if (IPS_USE_I2O_DELIVER(ha)) - ha->func.issue = ips_issue_i2o; - else - ha->func.issue = ips_issue_copperhead; - } + /* + * Setup Functions + */ + if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { + /* morpheus / marco / sebring */ + ha->func.isintr = ips_isintr_morpheus; + ha->func.isinit = ips_isinit_morpheus; + ha->func.issue = ips_issue_i2o_memio; + ha->func.init = ips_init_morpheus; + ha->func.statupd = ips_statupd_morpheus; + ha->func.reset = ips_reset_morpheus; + ha->func.intr = ips_intr_morpheus; + ha->func.enableint = ips_enable_int_morpheus; + } else if (IPS_USE_MEMIO(ha)) { + /* copperhead w/MEMIO */ + ha->func.isintr = ips_isintr_copperhead_memio; + ha->func.isinit = ips_isinit_copperhead_memio; + ha->func.init = ips_init_copperhead_memio; + ha->func.statupd = ips_statupd_copperhead_memio; + ha->func.statinit = ips_statinit_memio; + ha->func.reset = ips_reset_copperhead_memio; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios_memio; + ha->func.programbios = ips_program_bios_memio; + ha->func.verifybios = ips_verify_bios_memio; + ha->func.enableint = ips_enable_int_copperhead_memio; + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o_memio; + else + ha->func.issue = ips_issue_copperhead_memio; + } else { + /* copperhead */ + ha->func.isintr = ips_isintr_copperhead; + ha->func.isinit = ips_isinit_copperhead; + ha->func.init = ips_init_copperhead; + ha->func.statupd = ips_statupd_copperhead; + ha->func.statinit = ips_statinit; + ha->func.reset = ips_reset_copperhead; + ha->func.intr = ips_intr_copperhead; + ha->func.erasebios = ips_erase_bios; + ha->func.programbios = ips_program_bios; + ha->func.verifybios = ips_verify_bios; + ha->func.enableint = ips_enable_int_copperhead; + + if (IPS_USE_I2O_DELIVER(ha)) + ha->func.issue = ips_issue_i2o; + else + ha->func.issue = ips_issue_copperhead; + } } /****************************************************************************/ @@ -1387,79 +651,72 @@ /* */ /****************************************************************************/ int -ips_release(struct Scsi_Host *sh) { - ips_scb_t *scb; - ips_ha_t *ha; - int i; - - METHOD_TRACE("ips_release", 1); - - for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++); - - if (i == IPS_MAX_ADAPTERS) { - printk(KERN_WARNING "(%s) release, invalid Scsi_Host pointer.\n", - ips_name); -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - BUG(); -#endif - return (FALSE); - } +ips_release(struct Scsi_Host *sh) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; - ha = IPS_HA(sh); + METHOD_TRACE("ips_release", 1); - if (!ha) - return (FALSE); + for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ; - /* flush the cache on the controller */ - scb = &ha->scbs[ha->max_cmds-1]; + if (i == IPS_MAX_ADAPTERS) { + printk(KERN_WARNING + "(%s) release, invalid Scsi_Host pointer.\n", ips_name); + BUG(); + return (FALSE); + } - ips_init_scb(ha, scb); + ha = IPS_HA(sh); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; + if (!ha) + return (FALSE); - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; - if (InitState != 0) /* If Not just Searching for the Adapter Order Table */ - printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); + ips_init_scb(ha, scb); - /* send command */ - if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) - printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, ha->host_num); + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; - if (InitState != 0) /* If Not just Searching for the Adapter Order Table */ - printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, ha->host_num); + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; - ips_sh[i] = NULL; - ips_ha[i] = NULL; + printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); - /* free extra memory */ - ips_free(ha); + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) + printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, + ha->host_num); - /* Free I/O Region */ - if (ha->io_addr) - release_region(ha->io_addr, ha->io_len); + printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, + ha->host_num); - /* free IRQ */ - free_irq(ha->irq, ha); + ips_sh[i] = NULL; + ips_ha[i] = NULL; - scsi_unregister(sh); + /* free extra memory */ + ips_free(ha); - if (InitState != 0) { - ips_released_controllers++; - if (ips_num_controllers == ips_released_controllers){ - unregister_reboot_notifier(&ips_notifier); - pci_unregister_driver(ha->pcidev->driver); - } - } + /* Free I/O Region */ + if (ha->io_addr) + release_region(ha->io_addr, ha->io_len); - return (FALSE); + /* free IRQ */ + free_irq(ha->irq, ha); + + scsi_unregister(sh); + + ips_released_controllers++; + + return (FALSE); } /****************************************************************************/ @@ -1472,50 +729,54 @@ /* */ /****************************************************************************/ static int -ips_halt(struct notifier_block *nb, ulong event, void *buf) { - ips_scb_t *scb; - ips_ha_t *ha; - int i; - - if ((event != SYS_RESTART) && (event != SYS_HALT) && - (event != SYS_POWER_OFF)) - return (NOTIFY_DONE); - - for (i = 0; i < ips_next_controller; i++) { - ha = (ips_ha_t *) ips_ha[i]; - - if (!ha) - continue; - - if (!ha->active) - continue; - - /* flush the cache on the controller */ - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; - - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; - - printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, ha->host_num); - - /* send command */ - if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) - printk(KERN_NOTICE "(%s%d) Incomplete Flush.\n", ips_name, ha->host_num); - else - printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", ips_name, ha->host_num); - } +ips_halt(struct notifier_block *nb, ulong event, void *buf) +{ + ips_scb_t *scb; + ips_ha_t *ha; + int i; + + if ((event != SYS_RESTART) && (event != SYS_HALT) && + (event != SYS_POWER_OFF)) return (NOTIFY_DONE); + + for (i = 0; i < ips_next_controller; i++) { + ha = (ips_ha_t *) ips_ha[i]; + + if (!ha) + continue; + + if (!ha->active) + continue; + + /* flush the cache on the controller */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + printk(KERN_NOTICE "(%s%d) Flushing Cache.\n", ips_name, + ha->host_num); + + /* send command */ + if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == + IPS_FAILURE) printk(KERN_NOTICE + "(%s%d) Incomplete Flush.\n", ips_name, + ha->host_num); + else + printk(KERN_NOTICE "(%s%d) Flushing Complete.\n", + ips_name, ha->host_num); + } - return (NOTIFY_OK); + return (NOTIFY_OK); } /****************************************************************************/ @@ -1528,50 +789,51 @@ /* Note: this routine is called under the io_request_lock */ /****************************************************************************/ int -ips_eh_abort(Scsi_Cmnd *SC) { - ips_ha_t *ha; - ips_copp_wait_item_t *item; - int ret; - - METHOD_TRACE("ips_eh_abort", 1); - - if (!SC) - return (FAILED); - - ha = (ips_ha_t *) SC->host->hostdata; - - if (!ha) - return (FAILED); - - if (!ha->active) - return (FAILED); - - if (SC->serial_number != SC->serial_number_at_timeout) { - /* HMM, looks like a bogus command */ - DEBUG(1, "Abort called with bogus scsi command"); - - return (FAILED); - } - - /* See if the command is on the copp queue */ - item = ha->copp_waitlist.head; - while ((item) && (item->scsi_cmd != SC)) - item = item->next; - - if (item) { - /* Found it */ - ips_removeq_copp(&ha->copp_waitlist, item); - ret = (SUCCESS); - - /* See if the command is on the wait queue */ - } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { - /* command not sent yet */ - ret = (SUCCESS); - } else { - /* command must have already been sent */ - ret = (FAILED); - } - return ret; +ips_eh_abort(Scsi_Cmnd * SC) +{ + ips_ha_t *ha; + ips_copp_wait_item_t *item; + int ret; + + METHOD_TRACE("ips_eh_abort", 1); + + if (!SC) + return (FAILED); + + ha = (ips_ha_t *) SC->host->hostdata; + + if (!ha) + return (FAILED); + + if (!ha->active) + return (FAILED); + + if (SC->serial_number != SC->serial_number_at_timeout) { + /* HMM, looks like a bogus command */ + DEBUG(1, "Abort called with bogus scsi command"); + + return (FAILED); + } + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + ret = (SUCCESS); + + /* See if the command is on the wait queue */ + } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + ret = (SUCCESS); + } else { + /* command must have already been sent */ + ret = (FAILED); + } + return ret; } /****************************************************************************/ @@ -1586,191 +848,194 @@ /* */ /****************************************************************************/ int -ips_eh_reset(Scsi_Cmnd *SC) { - int ret; - int i; - ips_ha_t *ha; - ips_scb_t *scb; - ips_copp_wait_item_t *item; +ips_eh_reset(Scsi_Cmnd * SC) +{ + int ret; + int i; + ips_ha_t *ha; + ips_scb_t *scb; + ips_copp_wait_item_t *item; - METHOD_TRACE("ips_eh_reset", 1); + METHOD_TRACE("ips_eh_reset", 1); #ifdef NO_IPS_RESET - return (FAILED); + return (FAILED); #else - if (!SC) { - DEBUG(1, "Reset called with NULL scsi command"); + if (!SC) { + DEBUG(1, "Reset called with NULL scsi command"); - return (FAILED); - } + return (FAILED); + } - ha = (ips_ha_t *) SC->host->hostdata; + ha = (ips_ha_t *) SC->host->hostdata; - if (!ha) { - DEBUG(1, "Reset called with NULL ha struct"); - - return (FAILED); - } - - if (!ha->active) - return (FAILED); - - /* See if the command is on the copp queue */ - item = ha->copp_waitlist.head; - while ((item) && (item->scsi_cmd != SC)) - item = item->next; - - if (item) { - /* Found it */ - ips_removeq_copp(&ha->copp_waitlist, item); - return (SUCCESS); - } - - /* See if the command is on the wait queue */ - if (ips_removeq_wait(&ha->scb_waitlist, SC)) { - /* command not sent yet */ - return (SUCCESS); - } - - /* An explanation for the casual observer: */ - /* Part of the function of a RAID controller is automatic error */ - /* detection and recovery. As such, the only problem that physically */ - /* resetting an adapter will ever fix is when, for some reason, */ - /* the driver is not successfully communicating with the adapter. */ - /* Therefore, we will attempt to flush this adapter. If that succeeds, */ - /* then there's no real purpose in a physical reset. This will complete */ - /* much faster and avoids any problems that might be caused by a */ - /* physical reset ( such as having to fail all the outstanding I/O's ). */ - - if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FLUSH; - - scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; - scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flush_cache.state = IPS_NORM_STATE; - scb->cmd.flush_cache.reserved = 0; - scb->cmd.flush_cache.reserved2 = 0; - scb->cmd.flush_cache.reserved3 = 0; - scb->cmd.flush_cache.reserved4 = 0; - - /* Attempt the flush command */ - ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); - if (ret == IPS_SUCCESS) { - printk(KERN_NOTICE "(%s%d) Reset Request - Flushed Cache\n", ips_name, ha->host_num); - return (SUCCESS); - } - } - - /* Either we can't communicate with the adapter or it's an IOCTL request */ - /* from a utility. A physical reset is needed at this point. */ - - ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ - - /* - * command must have already been sent - * reset the controller - */ - printk(KERN_NOTICE "(%s%d) Resetting controller.\n", - ips_name, ha->host_num); - ret = (*ha->func.reset)(ha); - - if (!ret) { - Scsi_Cmnd *scsi_cmd; - - printk(KERN_NOTICE - "(%s%d) Controller reset failed - controller now offline.\n", - ips_name, ha->host_num); - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Now fail all of the pending commands */ - DEBUG_VAR(1, "(%s%d) Failing pending commands", - ips_name, ha->host_num); - - while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { - scsi_cmd->result = DID_ERROR; - scsi_cmd->scsi_done(scsi_cmd); - } - - ha->active = FALSE; - return (FAILED); - } - - if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { - Scsi_Cmnd *scsi_cmd; - - printk(KERN_NOTICE - "(%s%d) Controller reset failed - controller now offline.\n", - ips_name, ha->host_num); - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Now fail all of the pending commands */ - DEBUG_VAR(1, "(%s%d) Failing pending commands", - ips_name, ha->host_num); - - while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { - scsi_cmd->result = DID_ERROR << 16; - scsi_cmd->scsi_done(scsi_cmd); - } - - ha->active = FALSE; - return (FAILED); - } - - /* FFDC */ - if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { - struct timeval tv; - - do_gettimeofday(&tv); - ha->last_ffdc = tv.tv_sec; - ha->reset_count++; - ips_ffdc_reset(ha, IPS_INTR_IORL); - } - - /* Now fail all of the active commands */ - DEBUG_VAR(1, "(%s%d) Failing active commands", - ips_name, ha->host_num); - - while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { - scb->scsi_cmd->result = (DID_RESET << 16) | (SUGGEST_RETRY << 24); - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); - } - - /* Reset DCDB active command bits */ - for (i = 1; i < ha->nbus; i++) - ha->dcdb_active[i-1] = 0; + if (!ha) { + DEBUG(1, "Reset called with NULL ha struct"); + + return (FAILED); + } + + if (!ha->active) + return (FAILED); + + /* See if the command is on the copp queue */ + item = ha->copp_waitlist.head; + while ((item) && (item->scsi_cmd != SC)) + item = item->next; + + if (item) { + /* Found it */ + ips_removeq_copp(&ha->copp_waitlist, item); + return (SUCCESS); + } + + /* See if the command is on the wait queue */ + if (ips_removeq_wait(&ha->scb_waitlist, SC)) { + /* command not sent yet */ + return (SUCCESS); + } + + /* An explanation for the casual observer: */ + /* Part of the function of a RAID controller is automatic error */ + /* detection and recovery. As such, the only problem that physically */ + /* resetting an adapter will ever fix is when, for some reason, */ + /* the driver is not successfully communicating with the adapter. */ + /* Therefore, we will attempt to flush this adapter. If that succeeds, */ + /* then there's no real purpose in a physical reset. This will complete */ + /* much faster and avoids any problems that might be caused by a */ + /* physical reset ( such as having to fail all the outstanding I/O's ). */ + + if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FLUSH; + + scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; + scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flush_cache.state = IPS_NORM_STATE; + scb->cmd.flush_cache.reserved = 0; + scb->cmd.flush_cache.reserved2 = 0; + scb->cmd.flush_cache.reserved3 = 0; + scb->cmd.flush_cache.reserved4 = 0; + + /* Attempt the flush command */ + ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); + if (ret == IPS_SUCCESS) { + printk(KERN_NOTICE + "(%s%d) Reset Request - Flushed Cache\n", + ips_name, ha->host_num); + return (SUCCESS); + } + } + + /* Either we can't communicate with the adapter or it's an IOCTL request */ + /* from a utility. A physical reset is needed at this point. */ + + ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ + + /* + * command must have already been sent + * reset the controller + */ + printk(KERN_NOTICE "(%s%d) Resetting controller.\n", + ips_name, ha->host_num); + ret = (*ha->func.reset) (ha); + + if (!ret) { + Scsi_Cmnd *scsi_cmd; + + printk(KERN_NOTICE + "(%s%d) Controller reset failed - controller now offline.\n", + ips_name, ha->host_num); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR; + scsi_cmd->scsi_done(scsi_cmd); + } + + ha->active = FALSE; + return (FAILED); + } + + if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { + Scsi_Cmnd *scsi_cmd; + + printk(KERN_NOTICE + "(%s%d) Controller reset failed - controller now offline.\n", + ips_name, ha->host_num); + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", + ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Now fail all of the pending commands */ + DEBUG_VAR(1, "(%s%d) Failing pending commands", + ips_name, ha->host_num); + + while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { + scsi_cmd->result = DID_ERROR << 16; + scsi_cmd->scsi_done(scsi_cmd); + } + + ha->active = FALSE; + return (FAILED); + } + + /* FFDC */ + if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { + struct timeval tv; + + do_gettimeofday(&tv); + ha->last_ffdc = tv.tv_sec; + ha->reset_count++; + ips_ffdc_reset(ha, IPS_INTR_IORL); + } + + /* Now fail all of the active commands */ + DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); + + while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { + scb->scsi_cmd->result = + (DID_RESET << 16) | (SUGGEST_RETRY << 24); + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + ips_freescb(ha, scb); + } + + /* Reset DCDB active command bits */ + for (i = 1; i < ha->nbus; i++) + ha->dcdb_active[i - 1] = 0; - /* Reset the number of active IOCTLs */ - ha->num_ioctl = 0; + /* Reset the number of active IOCTLs */ + ha->num_ioctl = 0; - ips_next(ha, IPS_INTR_IORL); + ips_next(ha, IPS_INTR_IORL); - return (SUCCESS); -#endif /* NO_IPS_RESET */ + return (SUCCESS); +#endif /* NO_IPS_RESET */ } @@ -1787,103 +1052,92 @@ /* */ /****************************************************************************/ int -ips_queue(Scsi_Cmnd *SC, void (*done) (Scsi_Cmnd *)) { - ips_ha_t *ha; - ips_passthru_t *pt; - - METHOD_TRACE("ips_queue", 1); - - ha = (ips_ha_t *) SC->host->hostdata; - - if (!ha) - return (1); - - if (!ha->active) - return (DID_ERROR); - - if (ips_is_passthru(SC)) { - if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - - return (0); - } - } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - - return (0); - } - - SC->scsi_done = done; - - DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", - ips_name, - ha->host_num, - SC->cmnd[0], - SC->channel, - SC->target, - SC->lun); - - /* Check for command to initiator IDs */ - if ((SC->channel > 0) && (SC->target == ha->ha_id[SC->channel])) { - SC->result = DID_NO_CONNECT << 16; - done(SC); - - return (0); - } - - if (ips_is_passthru(SC)) { - - ips_copp_wait_item_t *scratch; - - /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ - /* There can never be any system activity ( network or disk ), but check */ - /* anyway just as a good practice. */ - pt = (ips_passthru_t *) SC->request_buffer; - if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && - (pt->CoppCP.cmd.reset.adapter_flag == 1)) { - if (ha->scb_activelist.count != 0) { - SC->result = DID_BUS_BUSY << 16; - done(SC); - return (0); - } - ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ - ips_eh_reset(SC); - SC->result = DID_OK << 16; - SC->scsi_done(SC); - return (0); - } - - /* allocate space for the scribble */ - scratch = kmalloc(sizeof(ips_copp_wait_item_t), GFP_ATOMIC); - - if (!scratch) { - SC->result = DID_ERROR << 16; - done(SC); - - return (0); - } - - scratch->scsi_cmd = SC; - scratch->next = NULL; - - ips_putq_copp_tail(&ha->copp_waitlist, scratch); - } - else { - ips_putq_wait_tail(&ha->scb_waitlist, SC); - } - - ips_next(ha, IPS_INTR_IORL); - - /* If We were using the CD Boot Flash Buffer, Restore the Old Values */ - if ( ips_FlashData == ha->ioctl_data ) { - ha->ioctl_data = ha->flash_data; - ha->ioctl_order = ha->flash_order; - ha->ioctl_datasize = ha->flash_datasize; - ips_FlashDataInUse = 0; - } - return (0); +ips_queue(Scsi_Cmnd * SC, void (*done) (Scsi_Cmnd *)) +{ + ips_ha_t *ha; + ips_passthru_t *pt; + + METHOD_TRACE("ips_queue", 1); + + ha = (ips_ha_t *) SC->host->hostdata; + + if (!ha) + return (1); + + if (!ha->active) + return (DID_ERROR); + + if (ips_is_passthru(SC)) { + if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + + return (0); + } + + SC->scsi_done = done; + + DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", + ips_name, + ha->host_num, SC->cmnd[0], SC->channel, SC->target, SC->lun); + + /* Check for command to initiator IDs */ + if ((SC->channel > 0) && (SC->target == ha->ha_id[SC->channel])) { + SC->result = DID_NO_CONNECT << 16; + done(SC); + + return (0); + } + + if (ips_is_passthru(SC)) { + + ips_copp_wait_item_t *scratch; + + /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ + /* There can never be any system activity ( network or disk ), but check */ + /* anyway just as a good practice. */ + pt = (ips_passthru_t *) SC->request_buffer; + if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && + (pt->CoppCP.cmd.reset.adapter_flag == 1)) { + if (ha->scb_activelist.count != 0) { + SC->result = DID_BUS_BUSY << 16; + done(SC); + return (0); + } + ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ + ips_eh_reset(SC); + SC->result = DID_OK << 16; + SC->scsi_done(SC); + return (0); + } + + /* allocate space for the scribble */ + scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC); + + if (!scratch) { + SC->result = DID_ERROR << 16; + done(SC); + + return (0); + } + + scratch->scsi_cmd = SC; + scratch->next = NULL; + + ips_putq_copp_tail(&ha->copp_waitlist, scratch); + } else { + ips_putq_wait_tail(&ha->scb_waitlist, SC); + } + + ips_next(ha, IPS_INTR_IORL); + + return (0); } /****************************************************************************/ @@ -1896,48 +1150,49 @@ /* */ /****************************************************************************/ int -ips_biosparam(Disk *disk, kdev_t dev, int geom[]) { - ips_ha_t *ha; - int heads; - int sectors; - int cylinders; - - METHOD_TRACE("ips_biosparam", 1); - - ha = (ips_ha_t *) disk->device->host->hostdata; - - if (!ha) - /* ?!?! host adater info invalid */ - return (0); - - if (!ha->active) - return (0); - - if (!ips_read_adapter_status(ha, IPS_INTR_ON)) - /* ?!?! Enquiry command failed */ - return (0); - - if ((disk->capacity > 0x400000) && - ((ha->enq->ucMiscFlag & 0x8) == 0)) { - heads = IPS_NORM_HEADS; - sectors = IPS_NORM_SECTORS; - } else { - heads = IPS_COMP_HEADS; - sectors = IPS_COMP_SECTORS; - } - - cylinders = disk->capacity / (heads * sectors); - - DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", - heads, sectors, cylinders); - - geom[0] = heads; - geom[1] = sectors; - geom[2] = cylinders; +ips_biosparam(Disk * disk, kdev_t dev, int geom[]) +{ + ips_ha_t *ha; + int heads; + int sectors; + int cylinders; + + METHOD_TRACE("ips_biosparam", 1); - return (0); + ha = (ips_ha_t *) disk->device->host->hostdata; + + if (!ha) + /* ?!?! host adater info invalid */ + return (0); + + if (!ha->active) + return (0); + + if (!ips_read_adapter_status(ha, IPS_INTR_ON)) + /* ?!?! Enquiry command failed */ + return (0); + + if ((disk->capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = disk->capacity / (heads * sectors); + + DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", + heads, sectors, cylinders); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return (0); } +#if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) /****************************************************************************/ /* */ /* Routine Name: ips_select_queue_depth */ @@ -1948,39 +1203,67 @@ /* */ /****************************************************************************/ static void -ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs) { - Scsi_Device *device; - ips_ha_t *ha; - int count = 0; - int min; - - ha = IPS_HA(host); - min = ha->max_cmds / 4; - - for (device = scsi_devs; device; device = device->next) { - if (device->host == host) { - if ((device->channel == 0) && (device->type == 0)) - count++; - } - } - - for (device = scsi_devs; device; device = device->next) { - if (device->host == host) { - if ((device->channel == 0) && (device->type == 0)) { - device->queue_depth = ( ha->max_cmds - 1 ) / count; - if (device->queue_depth < min) - device->queue_depth = min; - } - else { - device->queue_depth = 2; - } - - if (device->queue_depth < 2) - device->queue_depth = 2; - } - } +ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device * scsi_devs) +{ + Scsi_Device *device; + ips_ha_t *ha; + int count = 0; + int min; + + ha = IPS_HA(host); + min = ha->max_cmds / 4; + + for (device = scsi_devs; device; device = device->next) { + if (device->host == host) { + if ((device->channel == 0) && (device->type == 0)) + count++; + } + } + + for (device = scsi_devs; device; device = device->next) { + if (device->host == host) { + if ((device->channel == 0) && (device->type == 0)) { + device->queue_depth = + (ha->max_cmds - 1) / count; + if (device->queue_depth < min) + device->queue_depth = min; + } else { + device->queue_depth = 2; + } + + if (device->queue_depth < 2) + device->queue_depth = 2; + } + } } +#else +/****************************************************************************/ +/* */ +/* Routine Name: ips_slave_configure */ +/* */ +/* Routine Description: */ +/* */ +/* Set queue depths on devices once scan is complete */ +/* */ +/****************************************************************************/ +int +ips_slave_configure(Scsi_Device * SDptr) +{ + ips_ha_t *ha; + int min; + + ha = IPS_HA(SDptr->host); + if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) { + min = ha->max_cmds / 2; + if (ha->enq->ucLogDriveCount <= 2) + min = ha->max_cmds - 1; + scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min); + } + return 0; +} +#endif + /****************************************************************************/ /* */ /* Routine Name: do_ipsintr */ @@ -1991,30 +1274,37 @@ /* */ /****************************************************************************/ void -do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) { - ips_ha_t *ha; - unsigned long cpu_flags; - struct Scsi_Host *host; - - METHOD_TRACE("do_ipsintr", 2); - - ha = (ips_ha_t *) dev_id; - if (!ha) - return; - host = ips_sh[ha->host_num]; - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - if (!ha->active) { - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - return; - } +do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) +{ + ips_ha_t *ha; + unsigned long cpu_flags; + struct Scsi_Host *host; + + METHOD_TRACE("do_ipsintr", 2); + + ha = (ips_ha_t *) dev_id; + if (!ha) + return; + host = ips_sh[ha->host_num]; + /* interrupt during initialization */ + if (!host) { + (*ha->func.intr) (ha); + return; + } + + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + if (!ha->active) { + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + return; + } - (*ha->func.intr)(ha); + (*ha->func.intr) (ha); - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - /* start the next command */ - ips_next(ha, IPS_INTR_ON); + /* start the next command */ + ips_next(ha, IPS_INTR_ON); } /****************************************************************************/ @@ -2029,54 +1319,55 @@ /* */ /****************************************************************************/ void -ips_intr_copperhead(ips_ha_t *ha) { - ips_stat_t *sp; - ips_scb_t *scb; - IPS_STATUS cstatus; - int intrstatus; - - METHOD_TRACE("ips_intr", 2); - - if (!ha) - return; - - if (!ha->active) - return; - - intrstatus = (*ha->func.isintr)(ha); - - if (!intrstatus) { - /* - * Unexpected/Shared interrupt - */ - - return; - } - - while (TRUE) { - sp = &ha->sp; - - intrstatus = (*ha->func.isintr)(ha); - - if (!intrstatus) - break; - else - cstatus.value = (*ha->func.statupd)(ha); - - if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { - /* Spurious Interupt ? */ - continue; - } - - ips_chkstatus(ha, &cstatus); - scb = (ips_scb_t *) sp->scb_addr; - - /* - * use the callback function to finish things up - * NOTE: interrupts are OFF for this - */ - (*scb->callback) (ha, scb); - } /* end while */ +ips_intr_copperhead(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; + + METHOD_TRACE("ips_intr", 2); + + if (!ha) + return; + + if (!ha->active) + return; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ + + return; + } + + while (TRUE) { + sp = &ha->sp; + + intrstatus = (*ha->func.isintr) (ha); + + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); + + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + /* Spurious Interupt ? */ + continue; + } + + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; + + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ } /****************************************************************************/ @@ -2091,60 +1382,62 @@ /* */ /****************************************************************************/ void -ips_intr_morpheus(ips_ha_t *ha) { - ips_stat_t *sp; - ips_scb_t *scb; - IPS_STATUS cstatus; - int intrstatus; +ips_intr_morpheus(ips_ha_t * ha) +{ + ips_stat_t *sp; + ips_scb_t *scb; + IPS_STATUS cstatus; + int intrstatus; - METHOD_TRACE("ips_intr_morpheus", 2); + METHOD_TRACE("ips_intr_morpheus", 2); - if (!ha) - return; + if (!ha) + return; - if (!ha->active) - return; + if (!ha->active) + return; - intrstatus = (*ha->func.isintr)(ha); + intrstatus = (*ha->func.isintr) (ha); - if (!intrstatus) { - /* - * Unexpected/Shared interrupt - */ + if (!intrstatus) { + /* + * Unexpected/Shared interrupt + */ - return; - } + return; + } - while (TRUE) { - sp = &ha->sp; + while (TRUE) { + sp = &ha->sp; - intrstatus = (*ha->func.isintr)(ha); + intrstatus = (*ha->func.isintr) (ha); - if (!intrstatus) - break; - else - cstatus.value = (*ha->func.statupd)(ha); + if (!intrstatus) + break; + else + cstatus.value = (*ha->func.statupd) (ha); - if (cstatus.value == 0xffffffff) - /* No more to process */ - break; + if (cstatus.value == 0xffffffff) + /* No more to process */ + break; - if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { - printk(KERN_WARNING "(%s%d) Spurious interrupt; no ccb.\n", - ips_name, ha->host_num); + if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { + printk(KERN_WARNING + "(%s%d) Spurious interrupt; no ccb.\n", ips_name, + ha->host_num); - continue; - } + continue; + } - ips_chkstatus(ha, &cstatus); - scb = (ips_scb_t *) sp->scb_addr; + ips_chkstatus(ha, &cstatus); + scb = (ips_scb_t *) sp->scb_addr; - /* - * use the callback function to finish things up - * NOTE: interrupts are OFF for this - */ - (*scb->callback) (ha, scb); - } /* end while */ + /* + * use the callback function to finish things up + * NOTE: interrupts are OFF for this + */ + (*scb->callback) (ha, scb); + } /* end while */ } /****************************************************************************/ @@ -2157,31 +1450,32 @@ /* */ /****************************************************************************/ const char * -ips_info(struct Scsi_Host *SH) { - static char buffer[256]; - char *bp; - ips_ha_t *ha; +ips_info(struct Scsi_Host *SH) +{ + static char buffer[256]; + char *bp; + ips_ha_t *ha; - METHOD_TRACE("ips_info", 1); + METHOD_TRACE("ips_info", 1); - ha = IPS_HA(SH); + ha = IPS_HA(SH); - if (!ha) - return (NULL); + if (!ha) + return (NULL); - bp = &buffer[0]; - memset(bp, 0, sizeof(buffer)); + bp = &buffer[0]; + memset(bp, 0, sizeof (buffer)); - sprintf(bp, "%s%s%s", "IBM PCI ServeRAID ", IPS_VERSION_HIGH, IPS_VERSION_LOW ); + sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ", + IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT); - if (ha->ad_type > 0 && - ha->ad_type <= MAX_ADAPTER_NAME) { - strcat(bp, " <"); - strcat(bp, ips_adapter_name[ha->ad_type-1]); - strcat(bp, ">"); - } + if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) { + strcat(bp, " <"); + strcat(bp, ips_adapter_name[ha->ad_type - 1]); + strcat(bp, ">"); + } - return (bp); + return (bp); } /****************************************************************************/ @@ -2195,38 +1489,39 @@ /****************************************************************************/ int ips_proc_info(char *buffer, char **start, off_t offset, - int length, int hostno, int func) { - int i; - int ret; - ips_ha_t *ha = NULL; - - METHOD_TRACE("ips_proc_info", 1); - - /* Find our host structure */ - for (i = 0; i < ips_next_controller; i++) { - if (ips_sh[i]) { - if (ips_sh[i]->host_no == hostno) { - ha = (ips_ha_t *) ips_sh[i]->hostdata; - break; - } - } - } - - if (!ha) - return (-EINVAL); - - if (func) { - /* write */ - return (0); - } else { - /* read */ - if (start) - *start = buffer; + int length, int hostno, int func) +{ + int i; + int ret; + ips_ha_t *ha = NULL; + + METHOD_TRACE("ips_proc_info", 1); + + /* Find our host structure */ + for (i = 0; i < ips_next_controller; i++) { + if (ips_sh[i]) { + if (ips_sh[i]->host_no == hostno) { + ha = (ips_ha_t *) ips_sh[i]->hostdata; + break; + } + } + } + + if (!ha) + return (-EINVAL); + + if (func) { + /* write */ + return (0); + } else { + /* read */ + if (start) + *start = buffer; - ret = ips_host_info(ha, buffer, offset, length); + ret = ips_host_info(ha, buffer, offset, length); - return (ret); - } + return (ret); + } } /*--------------------------------------------------------------------------*/ @@ -2243,32 +1538,65 @@ /* */ /****************************************************************************/ static int -ips_is_passthru(Scsi_Cmnd *SC) { - METHOD_TRACE("ips_is_passthru", 1); +ips_is_passthru(Scsi_Cmnd * SC) +{ + METHOD_TRACE("ips_is_passthru", 1); - if (!SC) - return (0); + if (!SC) + return (0); + + if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && + (SC->channel == 0) && + (SC->target == IPS_ADAPTER_ID) && + (SC->lun == 0) && SC->request_buffer) { + if ((!SC->use_sg) && SC->request_bufflen && + (((char *) SC->request_buffer)[0] == 'C') && + (((char *) SC->request_buffer)[1] == 'O') && + (((char *) SC->request_buffer)[2] == 'P') && + (((char *) SC->request_buffer)[3] == 'P')) + return 1; + else if (SC->use_sg) { + struct scatterlist *sg = SC->request_buffer; + char *buffer = IPS_SG_ADDRESS(sg); + if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && + buffer[2] == 'P' && buffer[3] == 'P') + return 1; + } + } + return 0; +} - if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && - (SC->channel == 0) && - (SC->target == IPS_ADAPTER_ID) && - (SC->lun == 0) && - SC->request_buffer){ - if((!SC->use_sg) && SC->request_bufflen && - (((char *) SC->request_buffer)[0] == 'C') && - (((char *) SC->request_buffer)[1] == 'O') && - (((char *) SC->request_buffer)[2] == 'P') && - (((char *) SC->request_buffer)[3] == 'P')) - return 1; - else if(SC->use_sg){ - struct scatterlist *sg = SC->request_buffer; - char *buffer = IPS_SG_ADDRESS(sg); - if(buffer && buffer[0] == 'C' && buffer[1] == 'O' && - buffer[2] == 'P' && buffer[3] == 'P') - return 1; - } - } - return 0; +/****************************************************************************/ +/* */ +/* Routine Name: ips_alloc_passthru_buffer */ +/* */ +/* Routine Description: */ +/* allocate a buffer large enough for the ioctl data if the ioctl buffer */ +/* is too small or doesn't exist */ +/****************************************************************************/ +static int +ips_alloc_passthru_buffer(ips_ha_t * ha, int length) +{ + void *bigger_buf; + int count; + int order; + + if (ha->ioctl_data && length <= (PAGE_SIZE << ha->ioctl_order)) + return 0; + /* there is no buffer or it's not big enough, allocate a new one */ + for (count = PAGE_SIZE, order = 0; + count < length; order++, count <<= 1) ; + bigger_buf = (void *) __get_free_pages(IPS_ATOMIC_GFP, order); + if (bigger_buf) { + /* free the old memory */ + free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); + /* use the new memory */ + ha->ioctl_data = (char *) bigger_buf; + ha->ioctl_order = order; + } else { + return -1; + } + return 0; } /****************************************************************************/ @@ -2281,127 +1609,96 @@ /* */ /****************************************************************************/ static int -ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) { - ips_passthru_t *pt; - char *buffer; - int length = 0; - - METHOD_TRACE("ips_make_passthru", 1); - - if(!SC->use_sg){ - buffer = SC->request_buffer; - length = SC->request_bufflen; - }else{ - struct scatterlist *sg = SC->request_buffer; - int i; - for(i = 0; i < SC->use_sg; i++) - length += sg[i].length; - - if (length < sizeof(ips_passthru_t)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - return (IPS_FAILURE); - }else if(!ha->ioctl_data || length > (PAGE_SIZE << ha->ioctl_order)){ - void *bigger_buf; - int count; - int order; - /* try to allocate a bigger buffer */ - for (count = PAGE_SIZE, order = 0; - count < length; - order++, count <<= 1); - bigger_buf = (void *) __get_free_pages(GFP_ATOMIC, order); - if (bigger_buf) { - /* free the old memory */ - free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); - /* use the new memory */ - ha->ioctl_data = (char *) bigger_buf; - ha->ioctl_order = order; - ha->ioctl_datasize = count; - } else { - pt = (ips_passthru_t*)IPS_SG_ADDRESS(sg); - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - SC->result = DID_ERROR << 16; - return (IPS_FAILURE); - } - } - ha->ioctl_datasize = length; - length = 0; - for(i = 0; i < SC->use_sg; i++){ - memcpy(&ha->ioctl_data[length], IPS_SG_ADDRESS(&sg[i]), sg[i].length); - length += sg[i].length; - } - pt = (ips_passthru_t *)ha->ioctl_data; - buffer = ha->ioctl_data; - } - if (!length || !buffer) { - /* no data */ - DEBUG_VAR(1, "(%s%d) No passthru structure", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - if (length < sizeof(ips_passthru_t)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - pt = (ips_passthru_t*) buffer; - /* - * Some notes about the passthru interface used - * - * IF the scsi op_code == 0x0d then we assume - * that the data came along with/goes with the - * packet we received from the sg driver. In this - * case the CmdBSize field of the pt structure is - * used for the size of the buffer. - */ - - switch (pt->CoppCmd) { - case IPS_NUMCTRLS: - memcpy(buffer + sizeof(ips_passthru_t), - &ips_num_controllers, sizeof(int)); - SC->result = DID_OK << 16; - - return (IPS_SUCCESS_IMM); - - case IPS_CTRLINFO: - memcpy(buffer + sizeof(ips_passthru_t), - ha, sizeof(ips_ha_t)); - SC->result = DID_OK << 16; - - return (IPS_SUCCESS_IMM); - - case IPS_COPPUSRCMD: - case IPS_COPPIOCCMD: - if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { - if (length < (sizeof(ips_passthru_t) + pt->CmdBSize)) { - /* wrong size */ - DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } - - if(ha->device_id == IPS_DEVICEID_COPPERHEAD && - pt->CoppCP.cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW) - return ips_flash_copperhead(ha, pt, scb); - - if (ips_usrcmd(ha, pt, scb)) - return (IPS_SUCCESS); - else - return (IPS_FAILURE); - } - - break; +ips_make_passthru(ips_ha_t * ha, Scsi_Cmnd * SC, ips_scb_t * scb, int intr) +{ + ips_passthru_t *pt; + int length = 0; + int ret; + + METHOD_TRACE("ips_make_passthru", 1); + + if (!SC->use_sg) { + length = SC->request_bufflen; + } else { + struct scatterlist *sg = SC->request_buffer; + int i; + for (i = 0; i < SC->use_sg; i++) + length += sg[i].length; + } + if (length < sizeof (ips_passthru_t)) { + /* wrong size */ + DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + return (IPS_FAILURE); + } + if (ips_alloc_passthru_buffer(ha, length)) { + /* allocation failure! If ha->ioctl_data exists, use it to return + some error codes. Return a failed command to the scsi layer. */ + if (ha->ioctl_data) { + pt = (ips_passthru_t *) ha->ioctl_data; + ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t)); + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t)); + } + return IPS_FAILURE; + } + ha->ioctl_datasize = length; + + ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize); + pt = (ips_passthru_t *) ha->ioctl_data; + + /* + * Some notes about the passthru interface used + * + * IF the scsi op_code == 0x0d then we assume + * that the data came along with/goes with the + * packet we received from the sg driver. In this + * case the CmdBSize field of the pt structure is + * used for the size of the buffer. + */ + + switch (pt->CoppCmd) { + case IPS_NUMCTRLS: + memcpy(ha->ioctl_data + sizeof (ips_passthru_t), + &ips_num_controllers, sizeof (int)); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t) + sizeof (int)); + SC->result = DID_OK << 16; + + return (IPS_SUCCESS_IMM); + + case IPS_COPPUSRCMD: + case IPS_COPPIOCCMD: + if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { + if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) { + /* wrong size */ + DEBUG_VAR(1, + "(%s%d) Passthru structure wrong size", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD && + pt->CoppCP.cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW) { + ret = ips_flash_copperhead(ha, pt, scb); + ips_scmd_buf_write(SC, ha->ioctl_data, + sizeof (ips_passthru_t)); + return ret; + } + if (ips_usrcmd(ha, pt, scb)) + return (IPS_SUCCESS); + else + return (IPS_FAILURE); + } - } /* end switch */ + break; - return (IPS_FAILURE); - } + } /* end switch */ + + return (IPS_FAILURE); +} /****************************************************************************/ /* Routine Name: ips_flash_copperhead */ @@ -2409,62 +1706,65 @@ /* Flash the BIOS/FW on a Copperhead style controller */ /****************************************************************************/ static int -ips_flash_copperhead(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb){ - int datasize, count; +ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + int datasize, count; - /* Trombone is the only copperhead that can do packet flash, but only - * for firmware. No one said it had to make sence. */ - if(IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE){ - if(ips_usrcmd(ha, pt, scb)) - return IPS_SUCCESS; - else - return IPS_FAILURE; - } - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0; - scb->scsi_cmd->result = DID_OK <<16; - /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ - /* avoid allocating a huge buffer per adapter ( which can fail ). */ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS){ - pt->BasicStatus = 0; - return ips_flash_bios(ha, pt, scb); - }else if(pt->CoppCP.cmd.flashfw.packet_num == 0){ - if(ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){ - ha->flash_data = ips_FlashData; - ha->flash_order = 7; - ha->flash_datasize = 0; - }else if(!ha->flash_data){ - datasize = pt->CoppCP.cmd.flashfw.total_packets * - pt->CoppCP.cmd.flashfw.count; - for (count = PAGE_SIZE, ha->flash_order = 0; count < datasize; - ha->flash_order++, count <<= 1); - ha->flash_data = (char *)__get_free_pages(GFP_ATOMIC, ha->flash_order); - ha->flash_datasize = 0; - }else - return IPS_FAILURE; - }else{ - if(pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > - (PAGE_SIZE << ha->flash_order)){ - ips_free_flash_copperhead(ha); - printk(KERN_WARNING "failed size sanity check\n"); - return IPS_FAILURE; - } - } - if(!ha->flash_data) - return IPS_FAILURE; - pt->BasicStatus = 0; - memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, - pt->CoppCP.cmd.flashfw.count); - ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; - if(pt->CoppCP.cmd.flashfw.packet_num == - pt->CoppCP.cmd.flashfw.total_packets - 1){ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) - return ips_flash_bios(ha, pt, scb); - else if(pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) - return ips_flash_firmware(ha, pt, scb); - } - return IPS_SUCCESS_IMM; + /* Trombone is the only copperhead that can do packet flash, but only + * for firmware. No one said it had to make sence. */ + if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) { + if (ips_usrcmd(ha, pt, scb)) + return IPS_SUCCESS; + else + return IPS_FAILURE; + } + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0; + scb->scsi_cmd->result = DID_OK << 16; + /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ + /* avoid allocating a huge buffer per adapter ( which can fail ). */ + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + pt->BasicStatus = 0; + return ips_flash_bios(ha, pt, scb); + } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) { + if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)) { + ha->flash_data = ips_FlashData; + ha->flash_order = 7; + ha->flash_datasize = 0; + } else if (!ha->flash_data) { + datasize = pt->CoppCP.cmd.flashfw.total_packets * + pt->CoppCP.cmd.flashfw.count; + for (count = PAGE_SIZE, ha->flash_order = 0; + count < datasize; ha->flash_order++, count <<= 1) ; + ha->flash_data = + (char *) __get_free_pages(IPS_ATOMIC_GFP, + ha->flash_order); + ha->flash_datasize = 0; + } else + return IPS_FAILURE; + } else { + if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > + (PAGE_SIZE << ha->flash_order)) { + ips_free_flash_copperhead(ha); + printk(KERN_WARNING "failed size sanity check\n"); + return IPS_FAILURE; + } + } + if (!ha->flash_data) + return IPS_FAILURE; + pt->BasicStatus = 0; + memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, + pt->CoppCP.cmd.flashfw.count); + ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; + if (pt->CoppCP.cmd.flashfw.packet_num == + pt->CoppCP.cmd.flashfw.total_packets - 1) { + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) + return ips_flash_bios(ha, pt, scb); + else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) + return ips_flash_firmware(ha, pt, scb); + } + return IPS_SUCCESS_IMM; } /****************************************************************************/ @@ -2473,46 +1773,95 @@ /* flashes the bios of a copperhead adapter */ /****************************************************************************/ static int -ips_flash_bios(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){ +ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ - if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS){ - if ((!ha->func.programbios) || (!ha->func.erasebios) || - (!ha->func.verifybios)) - goto error; - if((*ha->func.erasebios)(ha)){ - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", - ips_name, ha->host_num); - goto error; - }else if ((*ha->func.programbios)(ha, ha->flash_data + IPS_BIOS_HEADER, - ha->flash_datasize - IPS_BIOS_HEADER, 0 )) { - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to flash", - ips_name, ha->host_num); - goto error; - }else if ((*ha->func.verifybios)(ha, ha->flash_data + IPS_BIOS_HEADER, - ha->flash_datasize - IPS_BIOS_HEADER, 0 )) { - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to verify flash", - ips_name, ha->host_num); - goto error; - } - ips_free_flash_copperhead(ha); - return IPS_SUCCESS_IMM; - }else if(pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS){ - if(!ha->func.erasebios) - goto error; - if((*ha->func.erasebios)(ha)){ - DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", - ips_name, ha->host_num); - goto error; - } - return IPS_SUCCESS_IMM; - } -error: - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - ips_free_flash_copperhead(ha); - return IPS_FAILURE; + if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) { + if ((!ha->func.programbios) || (!ha->func.erasebios) || + (!ha->func.verifybios)) goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.programbios) + (ha, ha->flash_data + IPS_BIOS_HEADER, + ha->flash_datasize - IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to flash", + ips_name, ha->host_num); + goto error; + } else + if ((*ha->func.verifybios) + (ha, ha->flash_data + IPS_BIOS_HEADER, + ha->flash_datasize - IPS_BIOS_HEADER, 0)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to verify flash", + ips_name, ha->host_num); + goto error; + } + ips_free_flash_copperhead(ha); + return IPS_SUCCESS_IMM; + } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { + if (!ha->func.erasebios) + goto error; + if ((*ha->func.erasebios) (ha)) { + DEBUG_VAR(1, + "(%s%d) flash bios failed - unable to erase flash", + ips_name, ha->host_num); + goto error; + } + return IPS_SUCCESS_IMM; + } + error: + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_fill_scb_sg_single */ +/* */ +/* Routine Description: */ +/* Fill in a single scb sg_list element from an address */ +/* return a -1 if a breakup occured */ +/****************************************************************************/ +static inline int +ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr, + ips_scb_t * scb, int indx, unsigned int e_len) +{ + + int ret_val = 0; + + if ((scb->data_len + e_len) > ha->max_xfer) { + e_len = ha->max_xfer - scb->data_len; + scb->breakup = indx; + ++scb->sg_break; + ret_val = -1; + } else { + scb->breakup = 0; + scb->sg_break = 0; + } + if (IPS_USE_ENH_SGLIST(ha)) { + scb->sg_list.enh_list[indx].address_lo = + cpu_to_le32(pci_dma_lo32(busaddr)); + scb->sg_list.enh_list[indx].address_hi = + cpu_to_le32(pci_dma_hi32(busaddr)); + scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); + } else { + scb->sg_list.std_list[indx].address = + cpu_to_le32(pci_dma_lo32(busaddr)); + scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); + } + + ++scb->sg_len; + scb->data_len += e_len; + return ret_val; } /****************************************************************************/ @@ -2521,49 +1870,51 @@ /* flashes the firmware of a copperhead adapter */ /****************************************************************************/ static int -ips_flash_firmware(ips_ha_t * ha, ips_passthru_t *pt, ips_scb_t *scb){ - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr; - - if(pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && - pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW ){ - memset(&pt->CoppCP.cmd, 0, sizeof(IPS_HOST_COMMAND)); - pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; - pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); - }else{ - pt->BasicStatus = 0x0B; - pt->ExtendedStatus = 0x00; - ips_free_flash_copperhead(ha); - return IPS_FAILURE; - } - /* Save the S/G list pointer so it doesn't get clobbered */ - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - /* copy in the CP */ - memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD)); - /* FIX stuff that might be wrong */ - scb->sg_list = sg_list; - scb->scb_busaddr = cmd_busaddr; - scb->bus = scb->scsi_cmd->channel; - scb->target_id = scb->scsi_cmd->target; - scb->lun = scb->scsi_cmd->lun; - scb->sg_len = 0; - scb->data_len = 0; - scb->flags = 0; - scb->op_code = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - - scb->data_len = ha->flash_datasize; - scb->data_busaddr = pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flashfw.buffer_addr = scb->data_busaddr; - if (pt->TimeOut) - scb->timeout = pt->TimeOut; - scb->scsi_cmd->result = DID_OK <<16; - return IPS_SUCCESS; +ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && + pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) { + memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND)); + pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; + pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); + } else { + pt->BasicStatus = 0x0B; + pt->ExtendedStatus = 0x00; + ips_free_flash_copperhead(ha); + return IPS_FAILURE; + } + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->channel; + scb->target_id = scb->scsi_cmd->target; + scb->lun = scb->scsi_cmd->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + + scb->data_len = ha->flash_datasize; + scb->data_busaddr = + pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr); + if (pt->TimeOut) + scb->timeout = pt->TimeOut; + scb->scsi_cmd->result = DID_OK << 16; + return IPS_SUCCESS; } /****************************************************************************/ @@ -2572,12 +1923,13 @@ /* release the memory resources used to hold the flash image */ /****************************************************************************/ static void -ips_free_flash_copperhead(ips_ha_t *ha){ - if(ha->flash_data == ips_FlashData) - test_and_clear_bit(0, &ips_FlashDataInUse); - else if(ha->flash_data) - free_pages((unsigned long)ha->flash_data, ha->flash_order); - ha->flash_data = NULL; +ips_free_flash_copperhead(ips_ha_t * ha) +{ + if (ha->flash_data == ips_FlashData) + test_and_clear_bit(0, &ips_FlashDataInUse); + else if (ha->flash_data) + free_pages((unsigned long) ha->flash_data, ha->flash_order); + ha->flash_data = NULL; } /****************************************************************************/ @@ -2590,93 +1942,87 @@ /* */ /****************************************************************************/ static int -ips_usrcmd(ips_ha_t *ha, ips_passthru_t *pt, ips_scb_t *scb) { - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr; - - METHOD_TRACE("ips_usrcmd", 1); - - if ((!scb) || (!pt) || (!ha)) - return (0); - - /* Save the S/G list pointer so it doesn't get clobbered */ - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - /* copy in the CP */ - memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof(IPS_IOCTL_CMD)); - memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof(IPS_DCDB_TABLE)); - - /* FIX stuff that might be wrong */ - scb->sg_list = sg_list; - scb->scb_busaddr = cmd_busaddr; - scb->bus = scb->scsi_cmd->channel; - scb->target_id = scb->scsi_cmd->target; - scb->lun = scb->scsi_cmd->lun; - scb->sg_len = 0; - scb->data_len = 0; - scb->flags = 0; - scb->op_code = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - - /* we don't support DCDB/READ/WRITE Scatter Gather */ - if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || - (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || - (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) - return (0); - - if (pt->CmdBSize) { - if(!scb->scsi_cmd->use_sg){ - scb->data_len = pt->CmdBSize; - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - sizeof(ips_passthru_t), - pt->CmdBSize, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - } else { - scb->data_len = pt->CmdBSize; - scb->data_busaddr = pci_map_single(ha->pcidev, - ha->ioctl_data + - sizeof(ips_passthru_t), - pt->CmdBSize, - IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - } - } else { - scb->data_busaddr = 0L; - } - - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) - scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + - (unsigned long)&scb->dcdb - - (unsigned long)scb); - - if (pt->CmdBSize) { - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) - scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); - else - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - } - - /* set timeouts */ - if (pt->TimeOut) { - scb->timeout = pt->TimeOut; - - if (pt->TimeOut <= 10) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; - else if (pt->TimeOut <= 60) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; - else - scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; - } +ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr; + + METHOD_TRACE("ips_usrcmd", 1); + + if ((!scb) || (!pt) || (!ha)) + return (0); + + /* Save the S/G list pointer so it doesn't get clobbered */ + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + /* copy in the CP */ + memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); + memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE)); + + /* FIX stuff that might be wrong */ + scb->sg_list.list = sg_list.list; + scb->scb_busaddr = cmd_busaddr; + scb->bus = scb->scsi_cmd->channel; + scb->target_id = scb->scsi_cmd->target; + scb->lun = scb->scsi_cmd->lun; + scb->sg_len = 0; + scb->data_len = 0; + scb->flags = 0; + scb->op_code = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + + /* we don't support DCDB/READ/WRITE Scatter Gather */ + if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || + (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) + return (0); + + if (pt->CmdBSize) { + scb->data_len = pt->CmdBSize; + scb->data_busaddr = pci_map_single(ha->pcidev, + ha->ioctl_data + + sizeof (ips_passthru_t), + pt->CmdBSize, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + } else { + scb->data_busaddr = 0L; + } + + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + + if (pt->CmdBSize) { + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + else + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } + + /* set timeouts */ + if (pt->TimeOut) { + scb->timeout = pt->TimeOut; + + if (pt->TimeOut <= 10) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; + else if (pt->TimeOut <= 60) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; + else + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; + } - /* assume success */ - scb->scsi_cmd->result = DID_OK << 16; + /* assume success */ + scb->scsi_cmd->result = DID_OK << 16; - /* success */ - return (1); + /* success */ + return (1); } /****************************************************************************/ @@ -2689,43 +2035,34 @@ /* */ /****************************************************************************/ static void -ips_cleanup_passthru(ips_ha_t *ha, ips_scb_t *scb) { - ips_passthru_t *pt; +ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) +{ + ips_passthru_t *pt; + + METHOD_TRACE("ips_cleanup_passthru", 1); + + if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { + DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", + ips_name, ha->host_num); - METHOD_TRACE("ips_cleanup_passthru", 1); + return; + } + pt = (ips_passthru_t *) ha->ioctl_data; - if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { - DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", - ips_name, ha->host_num); - - return ; - } - if(!scb->scsi_cmd->use_sg) - pt = (ips_passthru_t *) scb->scsi_cmd->request_buffer; - else - pt = (ips_passthru_t *) ha->ioctl_data; - - /* Copy data back to the user */ - if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ - memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof(IPS_DCDB_TABLE)); - - pt->BasicStatus = scb->basic_status; - pt->ExtendedStatus = scb->extended_status; - pt->AdapterType = ha->ad_type; - - if(ha->device_id == IPS_DEVICEID_COPPERHEAD && - (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || - scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) - ips_free_flash_copperhead(ha); - - if(scb->scsi_cmd->use_sg){ - int i, length = 0; - struct scatterlist *sg = scb->scsi_cmd->request_buffer; - for(i = 0; i < scb->scsi_cmd->use_sg; i++){ - memcpy(IPS_SG_ADDRESS(&sg[i]), &ha->ioctl_data[length], sg[i].length); - length += sg[i].length; - } - } + /* Copy data back to the user */ + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ + memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE)); + + pt->BasicStatus = scb->basic_status; + pt->ExtendedStatus = scb->extended_status; + pt->AdapterType = ha->ad_type; + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD && + (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || + scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) + ips_free_flash_copperhead(ha); + + ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize); } /****************************************************************************/ @@ -2738,75 +2075,88 @@ /* */ /****************************************************************************/ static int -ips_host_info(ips_ha_t *ha, char *ptr, off_t offset, int len) { - IPS_INFOSTR info; +ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len) +{ + IPS_INFOSTR info; - METHOD_TRACE("ips_host_info", 1); + METHOD_TRACE("ips_host_info", 1); - info.buffer = ptr; - info.length = len; - info.offset = offset; - info.pos = 0; - info.localpos = 0; - - copy_info(&info, "\nIBM ServeRAID General Information:\n\n"); - - if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && - (le16_to_cpu(ha->nvram->adapter_type) != 0)) - copy_info(&info, "\tController Type : %s\n", ips_adapter_name[ha->ad_type-1]); - else - copy_info(&info, "\tController Type : Unknown\n"); - - if (ha->io_addr) - copy_info(&info, "\tIO region : 0x%lx (%d bytes)\n", - ha->io_addr, ha->io_len); - - if (ha->mem_addr) { - copy_info(&info, "\tMemory region : 0x%lx (%d bytes)\n", - ha->mem_addr, ha->mem_len); - copy_info(&info, "\tShared memory address : 0x%lx\n", ha->mem_ptr); - } - - copy_info(&info, "\tIRQ number : %d\n", ha->irq); - - if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) - copy_info(&info, "\tBIOS Version : %c%c%c%c%c%c%c%c\n", - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2], ha->nvram->bios_low[3]); - - copy_info(&info, "\tFirmware Version : %c%c%c%c%c%c%c%c\n", - ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], - ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], - ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], - ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); - - copy_info(&info, "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", - ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], - ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], - ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], - ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); - - copy_info(&info, "\tDriver Version : %s%s\n", - IPS_VERSION_HIGH, IPS_VERSION_LOW); - - copy_info(&info, "\tMax Physical Devices : %d\n", - ha->enq->ucMaxPhysicalDevices); - copy_info(&info, "\tMax Active Commands : %d\n", - ha->max_cmds); - copy_info(&info, "\tCurrent Queued Commands : %d\n", - ha->scb_waitlist.count); - copy_info(&info, "\tCurrent Active Commands : %d\n", - ha->scb_activelist.count - ha->num_ioctl); - copy_info(&info, "\tCurrent Queued PT Commands : %d\n", - ha->copp_waitlist.count); - copy_info(&info, "\tCurrent Active PT Commands : %d\n", - ha->num_ioctl); + info.buffer = ptr; + info.length = len; + info.offset = offset; + info.pos = 0; + info.localpos = 0; + + copy_info(&info, "\nIBM ServeRAID General Information:\n\n"); + + if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && + (le16_to_cpu(ha->nvram->adapter_type) != 0)) + copy_info(&info, "\tController Type : %s\n", + ips_adapter_name[ha->ad_type - 1]); + else + copy_info(&info, + "\tController Type : Unknown\n"); + + if (ha->io_addr) + copy_info(&info, + "\tIO region : 0x%lx (%d bytes)\n", + ha->io_addr, ha->io_len); + + if (ha->mem_addr) { + copy_info(&info, + "\tMemory region : 0x%lx (%d bytes)\n", + ha->mem_addr, ha->mem_len); + copy_info(&info, + "\tShared memory address : 0x%lx\n", + ha->mem_ptr); + } + + copy_info(&info, "\tIRQ number : %d\n", ha->irq); + + if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) + copy_info(&info, + "\tBIOS Version : %c%c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2], ha->nvram->bios_low[3]); + + copy_info(&info, + "\tFirmware Version : %c%c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); + + copy_info(&info, + "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); + + copy_info(&info, "\tDriver Version : %s%s\n", + IPS_VERSION_HIGH, IPS_VERSION_LOW); + + copy_info(&info, "\tDriver Build : %d\n", + IPS_BUILD_IDENT); + + copy_info(&info, "\tMax Physical Devices : %d\n", + ha->enq->ucMaxPhysicalDevices); + copy_info(&info, "\tMax Active Commands : %d\n", + ha->max_cmds); + copy_info(&info, "\tCurrent Queued Commands : %d\n", + ha->scb_waitlist.count); + copy_info(&info, "\tCurrent Active Commands : %d\n", + ha->scb_activelist.count - ha->num_ioctl); + copy_info(&info, "\tCurrent Queued PT Commands : %d\n", + ha->copp_waitlist.count); + copy_info(&info, "\tCurrent Active PT Commands : %d\n", + ha->num_ioctl); - copy_info(&info, "\n"); + copy_info(&info, "\n"); - return (info.localpos); + return (info.localpos); } /****************************************************************************/ @@ -2819,28 +2169,29 @@ /* */ /****************************************************************************/ static void -copy_mem_info(IPS_INFOSTR *info, char *data, int len) { - METHOD_TRACE("copy_mem_info", 1); +copy_mem_info(IPS_INFOSTR * info, char *data, int len) +{ + METHOD_TRACE("copy_mem_info", 1); - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - - if (info->pos < info->offset) { - data += (info->offset - info->pos); - len -= (info->offset - info->pos); - info->pos += (info->offset - info->pos); - } - - if (info->localpos + len > info->length) - len = info->length - info->localpos; - - if (len > 0) { - memcpy(info->buffer + info->localpos, data, len); - info->pos += len; - info->localpos += len; - } + if (info->pos + len < info->offset) { + info->pos += len; + return; + } + + if (info->pos < info->offset) { + data += (info->offset - info->pos); + len -= (info->offset - info->pos); + info->pos += (info->offset - info->pos); + } + + if (info->localpos + len > info->length) + len = info->length - info->localpos; + + if (len > 0) { + memcpy(info->buffer + info->localpos, data, len); + info->pos += len; + info->localpos += len; + } } /****************************************************************************/ @@ -2853,20 +2204,21 @@ /* */ /****************************************************************************/ static int -copy_info(IPS_INFOSTR *info, char *fmt, ...) { - va_list args; - char buf[128]; - int len; +copy_info(IPS_INFOSTR * info, char *fmt, ...) +{ + va_list args; + char buf[128]; + int len; - METHOD_TRACE("copy_info", 1); + METHOD_TRACE("copy_info", 1); - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); + va_start(args, fmt); + len = vsprintf(buf, fmt, args); + va_end(args); - copy_mem_info(info, buf, len); + copy_mem_info(info, buf, len); - return (len); + return (len); } /****************************************************************************/ @@ -2879,71 +2231,73 @@ /* */ /****************************************************************************/ static void -ips_identify_controller(ips_ha_t *ha) { - METHOD_TRACE("ips_identify_controller", 1); +ips_identify_controller(ips_ha_t * ha) +{ + METHOD_TRACE("ips_identify_controller", 1); - switch (ha->device_id) { - case IPS_DEVICEID_COPPERHEAD: - if (ha->revision_id <= IPS_REVID_SERVERAID) { - ha->ad_type = IPS_ADTYPE_SERVERAID; - } else if (ha->revision_id == IPS_REVID_SERVERAID2) { - ha->ad_type = IPS_ADTYPE_SERVERAID2; - } else if (ha->revision_id == IPS_REVID_NAVAJO) { - ha->ad_type = IPS_ADTYPE_NAVAJO; - } else if ((ha->revision_id == IPS_REVID_SERVERAID2) && (ha->slot_num == 0)) { - ha->ad_type = IPS_ADTYPE_KIOWA; - } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) && - (ha->revision_id <= IPS_REVID_CLARINETP3)) { - if (ha->enq->ucMaxPhysicalDevices == 15) - ha->ad_type = IPS_ADTYPE_SERVERAID3L; - else - ha->ad_type = IPS_ADTYPE_SERVERAID3; - } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) && - (ha->revision_id <= IPS_REVID_TROMBONE64)) { - ha->ad_type = IPS_ADTYPE_SERVERAID4H; - } - break; - - case IPS_DEVICEID_MORPHEUS: - switch (ha->subdevice_id) { - case IPS_SUBDEVICEID_4L: - ha->ad_type = IPS_ADTYPE_SERVERAID4L; - break; - - case IPS_SUBDEVICEID_4M: - ha->ad_type = IPS_ADTYPE_SERVERAID4M; - break; - - case IPS_SUBDEVICEID_4MX: - ha->ad_type = IPS_ADTYPE_SERVERAID4MX; - break; - - case IPS_SUBDEVICEID_4LX: - ha->ad_type = IPS_ADTYPE_SERVERAID4LX; - break; - - case IPS_SUBDEVICEID_5I2: - ha->ad_type = IPS_ADTYPE_SERVERAID5I2; - break; - - case IPS_SUBDEVICEID_5I1: - ha->ad_type = IPS_ADTYPE_SERVERAID5I1; - break; - } - - break; - - case IPS_DEVICEID_MARCO: - switch (ha->subdevice_id) { - case IPS_SUBDEVICEID_6M: - ha->ad_type = IPS_ADTYPE_SERVERAID6M; - break; - case IPS_SUBDEVICEID_6I: - ha->ad_type = IPS_ADTYPE_SERVERAID6I; - break; - } - break; - } + switch (ha->device_id) { + case IPS_DEVICEID_COPPERHEAD: + if (ha->revision_id <= IPS_REVID_SERVERAID) { + ha->ad_type = IPS_ADTYPE_SERVERAID; + } else if (ha->revision_id == IPS_REVID_SERVERAID2) { + ha->ad_type = IPS_ADTYPE_SERVERAID2; + } else if (ha->revision_id == IPS_REVID_NAVAJO) { + ha->ad_type = IPS_ADTYPE_NAVAJO; + } else if ((ha->revision_id == IPS_REVID_SERVERAID2) + && (ha->slot_num == 0)) { + ha->ad_type = IPS_ADTYPE_KIOWA; + } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) && + (ha->revision_id <= IPS_REVID_CLARINETP3)) { + if (ha->enq->ucMaxPhysicalDevices == 15) + ha->ad_type = IPS_ADTYPE_SERVERAID3L; + else + ha->ad_type = IPS_ADTYPE_SERVERAID3; + } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) && + (ha->revision_id <= IPS_REVID_TROMBONE64)) { + ha->ad_type = IPS_ADTYPE_SERVERAID4H; + } + break; + + case IPS_DEVICEID_MORPHEUS: + switch (ha->subdevice_id) { + case IPS_SUBDEVICEID_4L: + ha->ad_type = IPS_ADTYPE_SERVERAID4L; + break; + + case IPS_SUBDEVICEID_4M: + ha->ad_type = IPS_ADTYPE_SERVERAID4M; + break; + + case IPS_SUBDEVICEID_4MX: + ha->ad_type = IPS_ADTYPE_SERVERAID4MX; + break; + + case IPS_SUBDEVICEID_4LX: + ha->ad_type = IPS_ADTYPE_SERVERAID4LX; + break; + + case IPS_SUBDEVICEID_5I2: + ha->ad_type = IPS_ADTYPE_SERVERAID5I2; + break; + + case IPS_SUBDEVICEID_5I1: + ha->ad_type = IPS_ADTYPE_SERVERAID5I1; + break; + } + + break; + + case IPS_DEVICEID_MARCO: + switch (ha->subdevice_id) { + case IPS_SUBDEVICEID_6M: + ha->ad_type = IPS_ADTYPE_SERVERAID6M; + break; + case IPS_SUBDEVICEID_6I: + ha->ad_type = IPS_ADTYPE_SERVERAID6I; + break; + } + break; + } } /****************************************************************************/ @@ -2956,158 +2310,164 @@ /* */ /****************************************************************************/ static void -ips_get_bios_version(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - uint8_t major; - uint8_t minor; - uint8_t subminor; - uint8_t *buffer; - char hexDigits[] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - - METHOD_TRACE("ips_get_bios_version", 1); - - major = 0; - minor = 0; - - strncpy(ha->bios_version, " ?", 8); - - if (ha->device_id == IPS_DEVICEID_COPPERHEAD) { - if (IPS_USE_MEMIO(ha)) { - /* Memory Mapped I/O */ - - /* test 1st byte */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) - return; - - writel(1, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) - return; - - /* Get Major version */ - writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - major = readb(ha->mem_ptr + IPS_REG_FLDP); - - /* Get Minor version */ - writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - minor = readb(ha->mem_ptr + IPS_REG_FLDP); - - /* Get SubMinor version */ - writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - subminor = readb(ha->mem_ptr + IPS_REG_FLDP); - - } else { - /* Programmed I/O */ - - /* test 1st byte */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) - return ; - - outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) - return ; - - /* Get Major version */ - outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - major = inb(ha->io_addr + IPS_REG_FLDP); - - /* Get Minor version */ - outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - minor = inb(ha->io_addr + IPS_REG_FLDP); - - /* Get SubMinor version */ - outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - subminor = inb(ha->io_addr + IPS_REG_FLDP); - - } - } else { - /* Morpheus Family - Send Command to the card */ - - buffer = kmalloc(0x1000, GFP_ATOMIC); - if (!buffer) - return; - - memset(buffer, 0, 0x1000); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_RW_BIOSFW; - - scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; - scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.flashfw.type = 1; - scb->cmd.flashfw.direction = 0; - scb->cmd.flashfw.count = cpu_to_le32(0x800); - scb->cmd.flashfw.total_packets = 1; - scb->cmd.flashfw.packet_num = 0; - scb->data_len = 0x1000; - scb->data_busaddr = pci_map_single(ha->pcidev, buffer, scb->data_len, - IPS_DMA_DIR(scb)); - scb->cmd.flashfw.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue the command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - /* Error occurred */ - kfree(buffer); - - return; - } - - if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { - major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ - minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ - subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ - } else { - return; - } - - kfree(buffer); - } - - ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; - ha->bios_version[1] = '.'; - ha->bios_version[2] = hexDigits[major & 0x0F]; - ha->bios_version[3] = hexDigits[subminor]; - ha->bios_version[4] = '.'; - ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; - ha->bios_version[6] = hexDigits[minor & 0x0F]; - ha->bios_version[7] = 0; +ips_get_bios_version(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t *buffer; + char hexDigits[] = + { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', + 'D', 'E', 'F' }; + + METHOD_TRACE("ips_get_bios_version", 1); + + major = 0; + minor = 0; + + strncpy(ha->bios_version, " ?", 8); + + if (ha->device_id == IPS_DEVICEID_COPPERHEAD) { + if (IPS_USE_MEMIO(ha)) { + /* Memory Mapped I/O */ + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return; + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get Minor version */ + writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + minor = readb(ha->mem_ptr + IPS_REG_FLDP); + + /* Get SubMinor version */ + writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + subminor = readb(ha->mem_ptr + IPS_REG_FLDP); + + } else { + /* Programmed I/O */ + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return; + + outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return; + + /* Get Major version */ + outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + major = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get Minor version */ + outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + minor = inb(ha->io_addr + IPS_REG_FLDP); + + /* Get SubMinor version */ + outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + subminor = inb(ha->io_addr + IPS_REG_FLDP); + + } + } else { + /* Morpheus Family - Send Command to the card */ + + buffer = kmalloc(0x1000, IPS_ATOMIC_GFP); + if (!buffer) + return; + + memset(buffer, 0, 0x1000); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_BIOSFW; + + scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; + scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.flashfw.type = 1; + scb->cmd.flashfw.direction = 0; + scb->cmd.flashfw.count = cpu_to_le32(0x800); + scb->cmd.flashfw.total_packets = 1; + scb->cmd.flashfw.packet_num = 0; + scb->data_len = 0x1000; + scb->data_busaddr = + pci_map_single(ha->pcidev, buffer, scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.flashfw.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue the command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + /* Error occurred */ + kfree(buffer); + + return; + } + + if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { + major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ + minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ + subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ + } else { + kfree(buffer); + return; + } + + kfree(buffer); + } + + ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4]; + ha->bios_version[1] = '.'; + ha->bios_version[2] = hexDigits[major & 0x0F]; + ha->bios_version[3] = hexDigits[subminor]; + ha->bios_version[4] = '.'; + ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4]; + ha->bios_version[6] = hexDigits[minor & 0x0F]; + ha->bios_version[7] = 0; } /****************************************************************************/ @@ -3122,129 +2482,134 @@ /* */ /****************************************************************************/ static int -ips_hainit(ips_ha_t *ha) { - int i; - struct timeval tv; - - METHOD_TRACE("ips_hainit", 1); - - if (!ha) - return (0); - - if (ha->func.statinit) - (*ha->func.statinit)(ha); - - if (ha->func.enableint) - (*ha->func.enableint)(ha); - - /* Send FFDC */ - ha->reset_count = 1; - do_gettimeofday(&tv); - ha->last_ffdc = tv.tv_sec; - ips_ffdc_reset(ha, IPS_INTR_IORL); - - if (!ips_read_config(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read config from controller.\n", - ips_name, ha->host_num); - - return (0); - } /* end if */ - - if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read controller status.\n", - ips_name, ha->host_num); - - return (0); - } - - /* Identify this controller */ - ips_identify_controller(ha); - - if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to read subsystem parameters.\n", - ips_name, ha->host_num); - - return (0); - } - - /* write nvram user page 5 */ - if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { - printk(KERN_WARNING "(%s%d) unable to write driver info to controller.\n", - ips_name, ha->host_num); - - return (0); - } - - /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ - if ( (ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1) ) - ips_clear_adapter(ha, IPS_INTR_IORL); - - /* set limits on SID, LUN, BUS */ - ha->ntargets = IPS_MAX_TARGETS + 1; - ha->nlun = 1; - ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; - - switch (ha->conf->logical_drive[0].ucStripeSize) { - case 4: - ha->max_xfer = 0x10000; - break; - - case 5: - ha->max_xfer = 0x20000; - break; - - case 6: - ha->max_xfer = 0x40000; - break; - - case 7: - default: - ha->max_xfer = 0x80000; - break; - } - - /* setup max concurrent commands */ - if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { - /* Use the new method */ - ha->max_cmds = ha->enq->ucConcurrentCmdCount; - } else { - /* use the old method */ - switch (ha->conf->logical_drive[0].ucStripeSize) { - case 4: - ha->max_cmds = 32; - break; - - case 5: - ha->max_cmds = 16; - break; - - case 6: - ha->max_cmds = 8; - break; - - case 7: - default: - ha->max_cmds = 4; - break; - } - } - - /* Limit the Active Commands on a Lite Adapter */ - if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || - (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || - (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { - if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) - ha->max_cmds = MaxLiteCmds; - } - - /* set controller IDs */ - ha->ha_id[0] = IPS_ADAPTER_ID; - for (i = 1; i < ha->nbus; i++) { - ha->ha_id[i] = ha->conf->init_id[i-1] & 0x1f; - ha->dcdb_active[i-1] = 0; - } +ips_hainit(ips_ha_t * ha) +{ + int i; + struct timeval tv; + + METHOD_TRACE("ips_hainit", 1); - return (1); + if (!ha) + return (0); + + if (ha->func.statinit) + (*ha->func.statinit) (ha); + + if (ha->func.enableint) + (*ha->func.enableint) (ha); + + /* Send FFDC */ + ha->reset_count = 1; + do_gettimeofday(&tv); + ha->last_ffdc = tv.tv_sec; + ips_ffdc_reset(ha, IPS_INTR_IORL); + + if (!ips_read_config(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read config from controller.\n", + ips_name, ha->host_num); + + return (0); + } + /* end if */ + if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read controller status.\n", ips_name, + ha->host_num); + + return (0); + } + + /* Identify this controller */ + ips_identify_controller(ha); + + if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to read subsystem parameters.\n", + ips_name, ha->host_num); + + return (0); + } + + /* write nvram user page 5 */ + if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { + printk(KERN_WARNING + "(%s%d) unable to write driver info to controller.\n", + ips_name, ha->host_num); + + return (0); + } + + /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ + if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1)) + ips_clear_adapter(ha, IPS_INTR_IORL); + + /* set limits on SID, LUN, BUS */ + ha->ntargets = IPS_MAX_TARGETS + 1; + ha->nlun = 1; + ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; + + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_xfer = 0x10000; + break; + + case 5: + ha->max_xfer = 0x20000; + break; + + case 6: + ha->max_xfer = 0x40000; + break; + + case 7: + default: + ha->max_xfer = 0x80000; + break; + } + + /* setup max concurrent commands */ + if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { + /* Use the new method */ + ha->max_cmds = ha->enq->ucConcurrentCmdCount; + } else { + /* use the old method */ + switch (ha->conf->logical_drive[0].ucStripeSize) { + case 4: + ha->max_cmds = 32; + break; + + case 5: + ha->max_cmds = 16; + break; + + case 6: + ha->max_cmds = 8; + break; + + case 7: + default: + ha->max_cmds = 4; + break; + } + } + + /* Limit the Active Commands on a Lite Adapter */ + if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || + (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { + if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) + ha->max_cmds = MaxLiteCmds; + } + + /* set controller IDs */ + ha->ha_id[0] = IPS_ADAPTER_ID; + for (i = 1; i < ha->nbus; i++) { + ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f; + ha->dcdb_active[i - 1] = 0; + } + + return (1); } /****************************************************************************/ @@ -3257,275 +2622,238 @@ /* */ /****************************************************************************/ static void -ips_next(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - Scsi_Cmnd *SC; - Scsi_Cmnd *p; - Scsi_Cmnd *q; - ips_copp_wait_item_t *item; - int ret; - unsigned long cpu_flags = 0; - struct Scsi_Host *host; - METHOD_TRACE("ips_next", 1); - - if (!ha) - return ; - host = ips_sh[ha->host_num]; - /* - * Block access to the queue function so - * this command won't time out - */ - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - if ((ha->subsys->param[3] & 0x300000) && ( ha->scb_activelist.count == 0 )) { - struct timeval tv; - - do_gettimeofday(&tv); - - if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) { - ha->last_ffdc = tv.tv_sec; - ips_ffdc_time(ha); - } - } - - /* - * Send passthru commands - * These have priority over normal I/O - * but shouldn't affect performance too much - * since we limit the number that can be active - * on the card at any one time - */ - while ((ha->num_ioctl < IPS_MAX_IOCTL) && - (ha->copp_waitlist.head) && - (scb = ips_getscb(ha))) { - - item = ips_removeq_copp_head(&ha->copp_waitlist); - ha->num_ioctl++; - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); - scb->scsi_cmd = item->scsi_cmd; - kfree(item); - - ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); - - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - switch (ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_OK << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - if (ret != IPS_SUCCESS) { - ha->num_ioctl--; - continue; - } - - ret = ips_send_cmd(ha, scb); - - if (ret == IPS_SUCCESS) - ips_putq_scb_head(&ha->scb_activelist, scb); - else - ha->num_ioctl--; - - switch(ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - } - - - /* - * Send "Normal" I/O commands - */ - - p = ha->scb_waitlist.head; - while ((p) && (scb = ips_getscb(ha))) { - if ((p->channel > 0) && (ha->dcdb_active[p->channel-1] & (1 << p->target))) { - ips_freescb(ha, scb); - p = (Scsi_Cmnd *) p->host_scribble; - continue; - } - - q = p; - SC = ips_removeq_wait(&ha->scb_waitlist, q); - - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ - - SC->result = DID_OK; - SC->host_scribble = NULL; - - memset(SC->sense_buffer, 0, sizeof(SC->sense_buffer)); - - scb->target_id = SC->target; - scb->lun = SC->lun; - scb->bus = SC->channel; - scb->scsi_cmd = SC; - scb->breakup = 0; - scb->data_len = 0; - scb->callback = ipsintr_done; - scb->timeout = ips_cmd_timeout; - memset(&scb->cmd, 0, 16); - - /* copy in the CDB */ - memcpy(scb->cdb, SC->cmnd, SC->cmd_len); - - /* Now handle the data buffer */ - if (SC->use_sg) { - struct scatterlist *sg; - int i; - - sg = SC->request_buffer; - scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg, - scsi_to_pci_dma_dir(SC->sc_data_direction)); - scb->flags |= IPS_SCB_MAP_SG; - if (scb->sg_count == 1) { - if (sg_dma_len(sg) > ha->max_xfer) { - scb->breakup = 1; - scb->data_len = ha->max_xfer; - } else - scb->data_len = sg_dma_len(sg); - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = sg_dma_address(sg); - scb->sg_len = 0; - } else { - /* Check for the first Element being bigger than MAX_XFER */ - if (sg_dma_len(&sg[0]) > ha->max_xfer) { - scb->sg_list[0].address = cpu_to_le32(sg_dma_address(&sg[0])); - scb->sg_list[0].length = ha->max_xfer; - scb->data_len = ha->max_xfer; - scb->breakup = 0; - scb->sg_break=1; - scb->sg_len = 1; - } else { - for (i = 0; i < scb->sg_count; i++) { - scb->sg_list[i].address = cpu_to_le32(sg_dma_address(&sg[i])); - scb->sg_list[i].length = cpu_to_le32(sg_dma_len(&sg[i])); - - if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) { - /* - * Data Breakup required - */ - scb->breakup = i; - break; - } - - scb->data_len += sg_dma_len(&sg[i]); - } - - if (!scb->breakup) - scb->sg_len = scb->sg_count; - else - scb->sg_len = scb->breakup; - } - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = scb->sg_busaddr; - } - } else { - if (SC->request_bufflen) { - if (SC->request_bufflen > ha->max_xfer) { - /* - * Data breakup required - */ - scb->breakup = 1; - scb->data_len = ha->max_xfer; - } else { - scb->data_len = SC->request_bufflen; - } - - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = pci_map_single(ha->pcidev, SC->request_buffer, - scb->data_len, - scsi_to_pci_dma_dir(SC->sc_data_direction)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->sg_len = 0; - } else { - scb->data_busaddr = 0L; - scb->sg_len = 0; - scb->data_len = 0; - scb->dcdb.transfer_length = 0; - } - - } - - scb->dcdb.cmd_attribute = ips_command_direction[scb->scsi_cmd->cmnd[0]]; - - if (!scb->dcdb.cmd_attribute & 0x3) - scb->dcdb.transfer_length = 0; - - if (scb->data_len >= IPS_MAX_XFER) { - scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; - scb->dcdb.transfer_length = 0; - } - if(intr == IPS_INTR_ON) - IPS_LOCK_SAVE(host->host_lock, cpu_flags); - - ret = ips_send_cmd(ha, scb); - - switch(ret) { - case IPS_SUCCESS: - ips_putq_scb_head(&ha->scb_activelist, scb); - break; - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - if (scb->bus) - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - - if (scb->bus) - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ +ips_next(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + Scsi_Cmnd *SC; + Scsi_Cmnd *p; + Scsi_Cmnd *q; + ips_copp_wait_item_t *item; + int ret; + unsigned long cpu_flags = 0; + struct Scsi_Host *host; + METHOD_TRACE("ips_next", 1); + + if (!ha) + return; + host = ips_sh[ha->host_num]; + /* + * Block access to the queue function so + * this command won't time out + */ + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + if ((ha->subsys->param[3] & 0x300000) + && (ha->scb_activelist.count == 0)) { + struct timeval tv; + + do_gettimeofday(&tv); + + if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) { + ha->last_ffdc = tv.tv_sec; + ips_ffdc_time(ha); + } + } + + /* + * Send passthru commands + * These have priority over normal I/O + * but shouldn't affect performance too much + * since we limit the number that can be active + * on the card at any one time + */ + while ((ha->num_ioctl < IPS_MAX_IOCTL) && + (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) { + + item = ips_removeq_copp_head(&ha->copp_waitlist); + ha->num_ioctl++; + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + scb->scsi_cmd = item->scsi_cmd; + kfree(item); + + ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); + + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_OK << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + if (ret != IPS_SUCCESS) { + ha->num_ioctl--; + continue; + } + + ret = ips_send_cmd(ha, scb); + + if (ret == IPS_SUCCESS) + ips_putq_scb_head(&ha->scb_activelist, scb); + else + ha->num_ioctl--; + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + } + + /* + * Send "Normal" I/O commands + */ + + p = ha->scb_waitlist.head; + while ((p) && (scb = ips_getscb(ha))) { + if ((p->channel > 0) + && (ha->dcdb_active[p->channel - 1] & (1 << p->target))) { + ips_freescb(ha, scb); + p = (Scsi_Cmnd *) p->host_scribble; + continue; + } + + q = p; + SC = ips_removeq_wait(&ha->scb_waitlist, q); + + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ + + SC->result = DID_OK; + SC->host_scribble = NULL; + + memset(SC->sense_buffer, 0, sizeof (SC->sense_buffer)); + + scb->target_id = SC->target; + scb->lun = SC->lun; + scb->bus = SC->channel; + scb->scsi_cmd = SC; + scb->breakup = 0; + scb->data_len = 0; + scb->callback = ipsintr_done; + scb->timeout = ips_cmd_timeout; + memset(&scb->cmd, 0, 16); + + /* copy in the CDB */ + memcpy(scb->cdb, SC->cmnd, SC->cmd_len); + + /* Now handle the data buffer */ + if (SC->use_sg) { + struct scatterlist *sg; + int i; + + sg = SC->request_buffer; + scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg, + scsi_to_pci_dma_dir(SC-> + sc_data_direction)); + scb->flags |= IPS_SCB_MAP_SG; + for (i = 0; i < scb->sg_count; i++) { + if (ips_fill_scb_sg_single + (ha, sg_dma_address(&sg[i]), scb, i, + sg_dma_len(&sg[i])) < 0) + break; + } + scb->dcdb.transfer_length = scb->data_len; + } else { + if (SC->request_bufflen) { + scb->data_busaddr = + pci_map_single(ha->pcidev, + SC->request_buffer, + SC->request_bufflen, + scsi_to_pci_dma_dir(SC-> + sc_data_direction)); + scb->flags |= IPS_SCB_MAP_SINGLE; + ips_fill_scb_sg_single(ha, scb->data_busaddr, + scb, 0, + SC->request_bufflen); + scb->dcdb.transfer_length = scb->data_len; + } else { + scb->data_busaddr = 0L; + scb->sg_len = 0; + scb->data_len = 0; + scb->dcdb.transfer_length = 0; + } + + } + + scb->dcdb.cmd_attribute = + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + if (intr == IPS_INTR_ON) + IPS_LOCK_SAVE(host->host_lock, cpu_flags); + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_SUCCESS: + ips_putq_scb_head(&ha->scb_activelist, scb); + break; + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + + if (scb->bus) + ha->dcdb_active[scb->bus - 1] &= + ~(1 << scb->target_id); + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ - p = (Scsi_Cmnd *) p->host_scribble; + p = (Scsi_Cmnd *) p->host_scribble; - } /* end while */ + } /* end while */ - if(intr == IPS_INTR_ON) - IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); + if (intr == IPS_INTR_ON) + IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); } /****************************************************************************/ @@ -3540,19 +2868,20 @@ /* */ /****************************************************************************/ static inline void -ips_putq_scb_head(ips_scb_queue_t *queue, ips_scb_t *item) { - METHOD_TRACE("ips_putq_scb_head", 1); +ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item) +{ + METHOD_TRACE("ips_putq_scb_head", 1); - if (!item) - return ; + if (!item) + return; - item->q_next = queue->head; - queue->head = item; + item->q_next = queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3567,23 +2896,24 @@ /* */ /****************************************************************************/ static inline void -ips_putq_scb_tail(ips_scb_queue_t *queue, ips_scb_t *item) { - METHOD_TRACE("ips_putq_scb_tail", 1); +ips_putq_scb_tail(ips_scb_queue_t * queue, ips_scb_t * item) +{ + METHOD_TRACE("ips_putq_scb_tail", 1); - if (!item) - return ; + if (!item) + return; - item->q_next = NULL; + item->q_next = NULL; - if (queue->tail) - queue->tail->q_next = item; + if (queue->tail) + queue->tail->q_next = item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3598,26 +2928,27 @@ /* */ /****************************************************************************/ static inline ips_scb_t * -ips_removeq_scb_head(ips_scb_queue_t *queue) { - ips_scb_t *item; +ips_removeq_scb_head(ips_scb_queue_t * queue) +{ + ips_scb_t *item; - METHOD_TRACE("ips_removeq_scb_head", 1); + METHOD_TRACE("ips_removeq_scb_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = item->q_next; - item->q_next = NULL; + queue->head = item->q_next; + item->q_next = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3632,37 +2963,38 @@ /* */ /****************************************************************************/ static inline ips_scb_t * -ips_removeq_scb(ips_scb_queue_t *queue, ips_scb_t *item) { - ips_scb_t *p; +ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) +{ + ips_scb_t *p; - METHOD_TRACE("ips_removeq_scb", 1); + METHOD_TRACE("ips_removeq_scb", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_scb_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_scb_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != p->q_next)) - p = p->q_next; + while ((p) && (item != p->q_next)) + p = p->q_next; - if (p) { - /* found a match */ - p->q_next = item->q_next; + if (p) { + /* found a match */ + p->q_next = item->q_next; - if (!item->q_next) - queue->tail = p; + if (!item->q_next) + queue->tail = p; - item->q_next = NULL; - queue->count--; + item->q_next = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3677,19 +3009,20 @@ /* */ /****************************************************************************/ static inline void -ips_putq_wait_head(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - METHOD_TRACE("ips_putq_wait_head", 1); +ips_putq_wait_head(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + METHOD_TRACE("ips_putq_wait_head", 1); - if (!item) - return ; + if (!item) + return; - item->host_scribble = (char *) queue->head; - queue->head = item; + item->host_scribble = (char *) queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3704,23 +3037,24 @@ /* */ /****************************************************************************/ static inline void -ips_putq_wait_tail(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - METHOD_TRACE("ips_putq_wait_tail", 1); +ips_putq_wait_tail(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + METHOD_TRACE("ips_putq_wait_tail", 1); - if (!item) - return ; + if (!item) + return; - item->host_scribble = NULL; + item->host_scribble = NULL; - if (queue->tail) - queue->tail->host_scribble = (char *)item; + if (queue->tail) + queue->tail->host_scribble = (char *) item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3735,26 +3069,27 @@ /* */ /****************************************************************************/ static inline Scsi_Cmnd * -ips_removeq_wait_head(ips_wait_queue_t *queue) { - Scsi_Cmnd *item; +ips_removeq_wait_head(ips_wait_queue_t * queue) +{ + Scsi_Cmnd *item; - METHOD_TRACE("ips_removeq_wait_head", 1); + METHOD_TRACE("ips_removeq_wait_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = (Scsi_Cmnd *) item->host_scribble; - item->host_scribble = NULL; + queue->head = (Scsi_Cmnd *) item->host_scribble; + item->host_scribble = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3769,37 +3104,38 @@ /* */ /****************************************************************************/ static inline Scsi_Cmnd * -ips_removeq_wait(ips_wait_queue_t *queue, Scsi_Cmnd *item) { - Scsi_Cmnd *p; +ips_removeq_wait(ips_wait_queue_t * queue, Scsi_Cmnd * item) +{ + Scsi_Cmnd *p; - METHOD_TRACE("ips_removeq_wait", 1); + METHOD_TRACE("ips_removeq_wait", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_wait_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_wait_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != (Scsi_Cmnd *) p->host_scribble)) - p = (Scsi_Cmnd *) p->host_scribble; + while ((p) && (item != (Scsi_Cmnd *) p->host_scribble)) + p = (Scsi_Cmnd *) p->host_scribble; - if (p) { - /* found a match */ - p->host_scribble = item->host_scribble; + if (p) { + /* found a match */ + p->host_scribble = item->host_scribble; - if (!item->host_scribble) - queue->tail = p; + if (!item->host_scribble) + queue->tail = p; - item->host_scribble = NULL; - queue->count--; + item->host_scribble = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3814,19 +3150,20 @@ /* */ /****************************************************************************/ static inline void -ips_putq_copp_head(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - METHOD_TRACE("ips_putq_copp_head", 1); +ips_putq_copp_head(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + METHOD_TRACE("ips_putq_copp_head", 1); - if (!item) - return ; + if (!item) + return; - item->next = queue->head; - queue->head = item; + item->next = queue->head; + queue->head = item; - if (!queue->tail) - queue->tail = item; + if (!queue->tail) + queue->tail = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3841,23 +3178,24 @@ /* */ /****************************************************************************/ static inline void -ips_putq_copp_tail(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - METHOD_TRACE("ips_putq_copp_tail", 1); +ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + METHOD_TRACE("ips_putq_copp_tail", 1); - if (!item) - return ; + if (!item) + return; - item->next = NULL; + item->next = NULL; - if (queue->tail) - queue->tail->next = item; + if (queue->tail) + queue->tail->next = item; - queue->tail = item; + queue->tail = item; - if (!queue->head) - queue->head = item; + if (!queue->head) + queue->head = item; - queue->count++; + queue->count++; } /****************************************************************************/ @@ -3872,26 +3210,27 @@ /* */ /****************************************************************************/ static inline ips_copp_wait_item_t * -ips_removeq_copp_head(ips_copp_queue_t *queue) { - ips_copp_wait_item_t *item; +ips_removeq_copp_head(ips_copp_queue_t * queue) +{ + ips_copp_wait_item_t *item; - METHOD_TRACE("ips_removeq_copp_head", 1); + METHOD_TRACE("ips_removeq_copp_head", 1); - item = queue->head; + item = queue->head; - if (!item) { - return (NULL); - } + if (!item) { + return (NULL); + } - queue->head = item->next; - item->next = NULL; + queue->head = item->next; + item->next = NULL; - if (queue->tail == item) - queue->tail = NULL; + if (queue->tail == item) + queue->tail = NULL; - queue->count--; + queue->count--; - return (item); + return (item); } /****************************************************************************/ @@ -3906,37 +3245,38 @@ /* */ /****************************************************************************/ static inline ips_copp_wait_item_t * -ips_removeq_copp(ips_copp_queue_t *queue, ips_copp_wait_item_t *item) { - ips_copp_wait_item_t *p; +ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) +{ + ips_copp_wait_item_t *p; - METHOD_TRACE("ips_removeq_copp", 1); + METHOD_TRACE("ips_removeq_copp", 1); - if (!item) - return (NULL); + if (!item) + return (NULL); - if (item == queue->head) { - return (ips_removeq_copp_head(queue)); - } + if (item == queue->head) { + return (ips_removeq_copp_head(queue)); + } - p = queue->head; + p = queue->head; - while ((p) && (item != p->next)) - p = p->next; + while ((p) && (item != p->next)) + p = p->next; - if (p) { - /* found a match */ - p->next = item->next; + if (p) { + /* found a match */ + p->next = item->next; - if (!item->next) - queue->tail = p; + if (!item->next) + queue->tail = p; - item->next = NULL; - queue->count--; + item->next = NULL; + queue->count--; - return (item); - } + return (item); + } - return (NULL); + return (NULL); } /****************************************************************************/ @@ -3949,16 +3289,16 @@ /* */ /****************************************************************************/ static void -ipsintr_blocking(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ipsintr_blocking", 2); +ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_blocking", 2); - ips_freescb(ha, scb); - if ((ha->waitflag == TRUE) && - (ha->cmd_in_progress == scb->cdb[0])) { - ha->waitflag = FALSE; + ips_freescb(ha, scb); + if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) { + ha->waitflag = FALSE; - return ; - } + return; + } } /****************************************************************************/ @@ -3971,25 +3311,27 @@ /* */ /****************************************************************************/ static void -ipsintr_done(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ipsintr_done", 2); +ipsintr_done(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ipsintr_done", 2); - if (!scb) { - printk(KERN_WARNING "(%s%d) Spurious interrupt; scb NULL.\n", - ips_name, ha->host_num); + if (!scb) { + printk(KERN_WARNING "(%s%d) Spurious interrupt; scb NULL.\n", + ips_name, ha->host_num); - return ; - } + return; + } - if (scb->scsi_cmd == NULL) { - /* unexpected interrupt */ - printk(KERN_WARNING "(%s%d) Spurious interrupt; scsi_cmd not set.\n", - ips_name, ha->host_num); + if (scb->scsi_cmd == NULL) { + /* unexpected interrupt */ + printk(KERN_WARNING + "(%s%d) Spurious interrupt; scsi_cmd not set.\n", + ips_name, ha->host_num); - return; - } + return; + } - ips_done(ha, scb); + ips_done(ha, scb); } /****************************************************************************/ @@ -4002,180 +3344,118 @@ /* ASSUMED to be called form within the request lock */ /****************************************************************************/ static void -ips_done(ips_ha_t *ha, ips_scb_t *scb) { - int ret; +ips_done(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; - METHOD_TRACE("ips_done", 1); + METHOD_TRACE("ips_done", 1); - if (!scb) - return ; + if (!scb) + return; - if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { - ips_cleanup_passthru(ha, scb); - ha->num_ioctl--; - } else { - /* - * Check to see if this command had too much - * data and had to be broke up. If so, queue - * the rest of the data and continue. - */ - if ((scb->breakup) || (scb->sg_break)) { - /* we had a data breakup */ - uint8_t bk_save; - - bk_save = scb->breakup; - scb->breakup = 0; - - if (scb->sg_count) { - /* S/G request */ - struct scatterlist *sg; - int i; - - sg = scb->scsi_cmd->request_buffer; - - if (scb->sg_count == 1) { - if (sg_dma_len(sg) - (bk_save * ha->max_xfer) > ha->max_xfer) { - /* Further breakup required */ - scb->data_len = ha->max_xfer; - scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer); - scb->breakup = bk_save + 1; - } else { - scb->data_len = sg_dma_len(sg) - (bk_save * ha->max_xfer); - scb->data_busaddr = sg_dma_address(sg) + (bk_save * ha->max_xfer); - } - - scb->dcdb.transfer_length = scb->data_len; - scb->sg_len = 0; - } else { - /* We're here because there was MORE than one s/g unit. */ - /* bk_save points to which sg unit to look at */ - /* sg_break points to how far through this unit we are */ - /* NOTE: We will not move from one sg to another here, */ - /* just finish the one we are in. Not the most */ - /* efficient, but it keeps it from getting too hacky */ - - /* IF sg_break is non-zero, then just work on this current sg piece, */ - /* pointed to by bk_save */ - if (scb->sg_break) { - scb->sg_len = 1; - scb->sg_list[0].address = sg_dma_address(&sg[bk_save]) - + ha->max_xfer*scb->sg_break; - if (ha->max_xfer > sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break) - scb->sg_list[0].length = sg_dma_len(&sg[bk_save]) - ha->max_xfer * scb->sg_break; - else - scb->sg_list[0].length = ha->max_xfer; - scb->sg_break++; /* MUST GO HERE for math below to work */ - scb->data_len = scb->sg_list[0].length;; - - if (sg_dma_len(&sg[bk_save]) <= ha->max_xfer * scb->sg_break ) { - scb->sg_break = 0; /* No more work in this unit */ - if (( bk_save + 1 ) >= scb->sg_count) - scb->breakup = 0; - else - scb->breakup = bk_save + 1; - } - } else { - /* ( sg_break == 0 ), so this is our first look at a new sg piece */ - if (sg_dma_len(&sg[bk_save]) > ha->max_xfer) { - scb->sg_list[0].address = sg_dma_address(&sg[bk_save]); - scb->sg_list[0].length = ha->max_xfer; - scb->breakup = bk_save; - scb->sg_break = 1; - scb->data_len = ha->max_xfer; - scb->sg_len = 1; - } else { - /* OK, the next sg is a short one, so loop until full */ - scb->data_len = 0; - scb->sg_len = 0; - scb->sg_break = 0; - /* We're only doing full units here */ - for (i = bk_save; i < scb->sg_count; i++) { - scb->sg_list[i - bk_save].address = sg_dma_address(&sg[i]); - scb->sg_list[i - bk_save].length = cpu_to_le32(sg_dma_len(&sg[i])); - if (scb->data_len + sg_dma_len(&sg[i]) > ha->max_xfer) { - scb->breakup = i; /* sneaky, if not more work, than breakup is 0 */ - break; - } - scb->data_len += sg_dma_len(&sg[i]); - scb->sg_len++; /* only if we didn't get too big */ - } - } - } - - /* Also, we need to be sure we don't queue work ( breakup != 0 ) - if no more sg units for next time */ - scb->dcdb.transfer_length = scb->data_len; - scb->data_busaddr = scb->sg_busaddr; - } - - } else { - /* Non S/G Request */ - pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, - IPS_DMA_DIR(scb)); - if ((scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer)) > ha->max_xfer) { - /* Further breakup required */ - scb->data_len = ha->max_xfer; - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - (bk_save * ha->max_xfer), - scb->data_len, IPS_DMA_DIR(scb)); - scb->breakup = bk_save + 1; - } else { - scb->data_len = scb->scsi_cmd->request_bufflen - (bk_save * ha->max_xfer); - scb->data_busaddr = pci_map_single(ha->pcidev, - scb->scsi_cmd->request_buffer + - (bk_save * ha->max_xfer), - scb->data_len, IPS_DMA_DIR(scb)); - } - - scb->dcdb.transfer_length = scb->data_len; - scb->sg_len = 0; - } - - scb->dcdb.cmd_attribute |= ips_command_direction[scb->scsi_cmd->cmnd[0]]; - - if (!scb->dcdb.cmd_attribute & 0x3) - scb->dcdb.transfer_length = 0; - - if (scb->data_len >= IPS_MAX_XFER) { - scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; - scb->dcdb.transfer_length = 0; - } - - ret = ips_send_cmd(ha, scb); - - switch(ret) { - case IPS_FAILURE: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - case IPS_SUCCESS_IMM: - if (scb->scsi_cmd) { - scb->scsi_cmd->result = DID_ERROR << 16; - scb->scsi_cmd->scsi_done(scb->scsi_cmd); - } - - ips_freescb(ha, scb); - break; - default: - break; - } /* end case */ - - return ; - } - } /* end if passthru */ - - if (scb->bus) { - ha->dcdb_active[scb->bus-1] &= ~(1 << scb->target_id); - } + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { + ips_cleanup_passthru(ha, scb); + ha->num_ioctl--; + } else { + /* + * Check to see if this command had too much + * data and had to be broke up. If so, queue + * the rest of the data and continue. + */ + if ((scb->breakup) || (scb->sg_break)) { + /* we had a data breakup */ + scb->data_len = 0; + + if (scb->sg_count) { + /* S/G request */ + struct scatterlist *sg; + int ips_sg_index = 0; + int sg_dma_index; + + sg = scb->scsi_cmd->request_buffer; + + /* Spin forward to last dma chunk */ + sg_dma_index = scb->breakup; + + /* Take care of possible partial on last chunk */ + ips_fill_scb_sg_single(ha, + sg_dma_address(&sg + [sg_dma_index]), + scb, ips_sg_index++, + sg_dma_len(&sg + [sg_dma_index])); + + for (; sg_dma_index < scb->sg_count; + sg_dma_index++) { + if (ips_fill_scb_sg_single + (ha, + sg_dma_address(&sg[sg_dma_index]), + scb, ips_sg_index++, + sg_dma_len(&sg[sg_dma_index])) < 0) + break; + + } + + } else { + /* Non S/G Request */ + (void) ips_fill_scb_sg_single(ha, + scb-> + data_busaddr + + (scb->sg_break * + ha->max_xfer), + scb, 0, + scb->scsi_cmd-> + request_bufflen - + (scb->sg_break * + ha->max_xfer)); + } + + scb->dcdb.transfer_length = scb->data_len; + scb->dcdb.cmd_attribute |= + ips_command_direction[scb->scsi_cmd->cmnd[0]]; + + if (!(scb->dcdb.cmd_attribute & 0x3)) + scb->dcdb.transfer_length = 0; + + if (scb->data_len >= IPS_MAX_XFER) { + scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; + scb->dcdb.transfer_length = 0; + } + + ret = ips_send_cmd(ha, scb); + + switch (ret) { + case IPS_FAILURE: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + case IPS_SUCCESS_IMM: + if (scb->scsi_cmd) { + scb->scsi_cmd->result = DID_ERROR << 16; + scb->scsi_cmd->scsi_done(scb->scsi_cmd); + } + + ips_freescb(ha, scb); + break; + default: + break; + } /* end case */ + + return; + } + } /* end if passthru */ + + if (scb->bus) { + ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); + } - scb->scsi_cmd->scsi_done(scb->scsi_cmd); + scb->scsi_cmd->scsi_done(scb->scsi_cmd); - ips_freescb(ha, scb); + ips_freescb(ha, scb); } /****************************************************************************/ @@ -4188,118 +3468,130 @@ /* */ /****************************************************************************/ static int -ips_map_status(ips_ha_t *ha, ips_scb_t *scb, ips_stat_t *sp) { - int errcode; - int device_error; - uint32_t transfer_len; - IPS_DCDB_TABLE_TAPE *tapeDCDB; - - METHOD_TRACE("ips_map_status", 1); - - if (scb->bus) { - DEBUG_VAR(2, "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", - ips_name, - ha->host_num, - scb->scsi_cmd->channel, - scb->scsi_cmd->target, - scb->scsi_cmd->lun, - scb->basic_status, - scb->extended_status, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, - scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); - } - - /* default driver error */ - errcode = DID_ERROR; - device_error = 0; - - switch (scb->basic_status & IPS_GSC_STATUS_MASK) { - case IPS_CMD_TIMEOUT: - errcode = DID_TIME_OUT; - break; - - case IPS_INVAL_OPCO: - case IPS_INVAL_CMD_BLK: - case IPS_INVAL_PARM_BLK: - case IPS_LD_ERROR: - case IPS_CMD_CMPLT_WERROR: - break; - - case IPS_PHYS_DRV_ERROR: - switch (scb->extended_status) { - case IPS_ERR_SEL_TO: - if (scb->bus) - errcode = DID_NO_CONNECT; - - break; - - case IPS_ERR_OU_RUN: - if ( ( scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB ) || - ( scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG ) ) { - tapeDCDB = ( IPS_DCDB_TABLE_TAPE * ) &scb->dcdb; - transfer_len = tapeDCDB->transfer_length; - } else { - transfer_len = ( uint32_t ) scb->dcdb.transfer_length; - } - - if ((scb->bus) && (transfer_len < scb->data_len)) { - /* Underrun - set default to no error */ - errcode = DID_OK; - - /* Restrict access to physical DASD */ - if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && - ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == TYPE_DISK)) { - /* underflow -- no error */ - /* restrict access to physical DASD */ - errcode = DID_TIME_OUT; - break; - } - } else - errcode = DID_ERROR; - - break; - - case IPS_ERR_RECOVERY: - /* don't fail recovered errors */ - if (scb->bus) - errcode = DID_OK; - - break; - - case IPS_ERR_HOST_RESET: - case IPS_ERR_DEV_RESET: - errcode = DID_RESET; - break; - - case IPS_ERR_CKCOND: - if (scb->bus) { - if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || - (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { - tapeDCDB = (IPS_DCDB_TABLE_TAPE *) &scb->dcdb; - memcpy(scb->scsi_cmd->sense_buffer, tapeDCDB->sense_info, - sizeof(scb->scsi_cmd->sense_buffer)); - } else { - memcpy(scb->scsi_cmd->sense_buffer, scb->dcdb.sense_info, - sizeof(scb->scsi_cmd->sense_buffer)); - } - device_error = 2; /* check condition */ - } - - errcode = DID_OK; - - break; - - default: - errcode = DID_ERROR; - break; +ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp) +{ + int errcode; + int device_error; + uint32_t transfer_len; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + + METHOD_TRACE("ips_map_status", 1); + + if (scb->bus) { + DEBUG_VAR(2, + "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", + ips_name, ha->host_num, scb->scsi_cmd->channel, + scb->scsi_cmd->target, scb->scsi_cmd->lun, + scb->basic_status, scb->extended_status, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, + scb->extended_status == + IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); + } + + /* default driver error */ + errcode = DID_ERROR; + device_error = 0; + + switch (scb->basic_status & IPS_GSC_STATUS_MASK) { + case IPS_CMD_TIMEOUT: + errcode = DID_TIME_OUT; + break; + + case IPS_INVAL_OPCO: + case IPS_INVAL_CMD_BLK: + case IPS_INVAL_PARM_BLK: + case IPS_LD_ERROR: + case IPS_CMD_CMPLT_WERROR: + break; + + case IPS_PHYS_DRV_ERROR: + switch (scb->extended_status) { + case IPS_ERR_SEL_TO: + if (scb->bus) + errcode = DID_NO_CONNECT; + + break; + + case IPS_ERR_OU_RUN: + if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || + (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + transfer_len = tapeDCDB->transfer_length; + } else { + transfer_len = + (uint32_t) scb->dcdb.transfer_length; + } + + if ((scb->bus) && (transfer_len < scb->data_len)) { + /* Underrun - set default to no error */ + errcode = DID_OK; + + /* Restrict access to physical DASD */ + if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && + ((((char + *) scb->scsi_cmd->buffer)[0] & 0x1f) == + TYPE_DISK)) { + /* underflow -- no error */ + /* restrict access to physical DASD */ + errcode = DID_TIME_OUT; + break; + } + } else + errcode = DID_ERROR; + + break; + + case IPS_ERR_RECOVERY: + /* don't fail recovered errors */ + if (scb->bus) + errcode = DID_OK; + + break; + + case IPS_ERR_HOST_RESET: + case IPS_ERR_DEV_RESET: + errcode = DID_RESET; + break; + + case IPS_ERR_CKCOND: + if (scb->bus) { + if ( + (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB) + || (scb->cmd.dcdb.op_code == + IPS_CMD_EXTENDED_DCDB_SG)) { + tapeDCDB = + (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; + memcpy(scb->scsi_cmd->sense_buffer, + tapeDCDB->sense_info, + sizeof (scb->scsi_cmd-> + sense_buffer)); + } else { + memcpy(scb->scsi_cmd->sense_buffer, + scb->dcdb.sense_info, + sizeof (scb->scsi_cmd-> + sense_buffer)); + } + device_error = 2; /* check condition */ + } + + errcode = DID_OK; + + break; + + default: + errcode = DID_ERROR; + break; - } /* end switch */ - } /* end switch */ + } /* end switch */ + } /* end switch */ - scb->scsi_cmd->result = device_error | (errcode << 16); + scb->scsi_cmd->result = device_error | (errcode << 16); - return (1); + return (1); } /****************************************************************************/ @@ -4314,25 +3606,90 @@ /* actually need to wait. */ /****************************************************************************/ static int -ips_send_wait(ips_ha_t *ha, ips_scb_t *scb, int timeout, int intr) { - int ret; +ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) +{ + int ret; - METHOD_TRACE("ips_send_wait", 1); + METHOD_TRACE("ips_send_wait", 1); - if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ - ha->waitflag = TRUE; - ha->cmd_in_progress = scb->cdb[0]; - } - scb->callback = ipsintr_blocking; - ret = ips_send_cmd(ha, scb); + if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ + ha->waitflag = TRUE; + ha->cmd_in_progress = scb->cdb[0]; + } + scb->callback = ipsintr_blocking; + ret = ips_send_cmd(ha, scb); - if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) - return (ret); + if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) + return (ret); - if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ - ret = ips_wait(ha, timeout, intr); + if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ + ret = ips_wait(ha, timeout, intr); - return (ret); + return (ret); +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_write */ +/* */ +/* Routine Description: */ +/* Write data to Scsi_Cmnd request_buffer at proper offsets */ +/****************************************************************************/ +static void +ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned + int count) +{ + if (scmd->use_sg) { + int i; + unsigned int min_cnt, xfer_cnt; + char *cdata = (char *) data; + struct scatterlist *sg = scmd->request_buffer; + for (i = 0, xfer_cnt = 0; + (i < scmd->use_sg) && (xfer_cnt < count); i++) { + if (!IPS_SG_ADDRESS(&sg[i])) + return; + min_cnt = min(count - xfer_cnt, sg[i].length); + memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt], + min_cnt); + xfer_cnt += min_cnt; + } + + } else { + unsigned int min_cnt = min(count, scmd->request_bufflen); + memcpy(scmd->request_buffer, data, min_cnt); + } +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_scmd_buf_read */ +/* */ +/* Routine Description: */ +/* Copy data from a Scsi_Cmnd to a new, linear buffer */ +/****************************************************************************/ +static void +ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned + int count) +{ + if (scmd->use_sg) { + int i; + unsigned int min_cnt, xfer_cnt; + char *cdata = (char *) data; + struct scatterlist *sg = scmd->request_buffer; + for (i = 0, xfer_cnt = 0; + (i < scmd->use_sg) && (xfer_cnt < count); i++) { + if (!IPS_SG_ADDRESS(&sg[i])) + return; + min_cnt = min(count - xfer_cnt, sg[i].length); + memcpy(&cdata[xfer_cnt], IPS_SG_ADDRESS(&sg[i]), + min_cnt); + xfer_cnt += min_cnt; + } + + } else { + unsigned int min_cnt = min(count, scmd->request_bufflen); + memcpy(data, scmd->request_buffer, min_cnt); + } } /****************************************************************************/ @@ -4345,288 +3702,402 @@ /* */ /****************************************************************************/ static int -ips_send_cmd(ips_ha_t *ha, ips_scb_t *scb) { - int ret; - char *sp; - int device_error; - IPS_DCDB_TABLE_TAPE *tapeDCDB; - int TimeOut; - - METHOD_TRACE("ips_send_cmd", 1); - - ret = IPS_SUCCESS; - - if (!scb->scsi_cmd) { - /* internal command */ - - if (scb->bus > 0) { - /* Controller commands can't be issued */ - /* to real devices -- fail them */ - if ((ha->waitflag == TRUE) && - (ha->cmd_in_progress == scb->cdb[0])) { - ha->waitflag = FALSE; - } - - return (1); - } - } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { - /* command to logical bus -- interpret */ - if(ha->scb_waitlist.count + ha->scb_activelist.count > 32) - mod_timer(&scb->scsi_cmd->eh_timeout, jiffies + 120 * HZ); - ret = IPS_SUCCESS_IMM; - - switch (scb->scsi_cmd->cmnd[0]) { - case ALLOW_MEDIUM_REMOVAL: - case REZERO_UNIT: - case ERASE: - case WRITE_FILEMARKS: - case SPACE: - scb->scsi_cmd->result = DID_ERROR << 16; - break; - - case START_STOP: - scb->scsi_cmd->result = DID_OK << 16; - - case TEST_UNIT_READY: - case INQUIRY: - scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; - scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.logical_info.reserved = 0; - scb->cmd.logical_info.reserved2 = 0; - scb->data_len = sizeof(ha->adapt->logical_drive_info); - scb->data_busaddr = pci_map_single(ha->pcidev, - &ha->adapt->logical_drive_info, - scb->data_len, IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.logical_info.buffer_addr = scb->data_busaddr; - ret = IPS_SUCCESS; - break; - - case REQUEST_SENSE: - ips_reqsen(ha, scb); - scb->scsi_cmd->result = DID_OK << 16; - break; - - case READ_6: - case WRITE_6: - if (!scb->sg_len) { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; - } else { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; - } - - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.log_drv = scb->target_id; - scb->cmd.basic_io.sg_count = scb->sg_len; - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - - if (scb->cmd.basic_io.lba) - scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) + - le16_to_cpu(scb->cmd.basic_io.sector_count)); - else - scb->cmd.basic_io.lba = (((scb->scsi_cmd->cmnd[1] & 0x1f) << 16) | - (scb->scsi_cmd->cmnd[2] << 8) | - (scb->scsi_cmd->cmnd[3])); - - scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); - - if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) - scb->cmd.basic_io.sector_count = cpu_to_le16(256); - - scb->cmd.basic_io.reserved = 0; - ret = IPS_SUCCESS; - break; - - case READ_10: - case WRITE_10: - if (!scb->sg_len) { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; - } else { - scb->cmd.basic_io.op_code = - (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; - } - - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.log_drv = scb->target_id; - scb->cmd.basic_io.sg_count = scb->sg_len; - scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); - - if (scb->cmd.basic_io.lba) - scb->cmd.basic_io.lba = cpu_to_le32(le32_to_cpu(scb->cmd.basic_io.lba) + - le16_to_cpu(scb->cmd.basic_io.sector_count)); - else - scb->cmd.basic_io.lba = ((scb->scsi_cmd->cmnd[2] << 24) | - (scb->scsi_cmd->cmnd[3] << 16) | - (scb->scsi_cmd->cmnd[4] << 8) | - scb->scsi_cmd->cmnd[5]); - - scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); - - scb->cmd.basic_io.reserved = 0; - - if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { - /* - * This is a null condition - * we don't have to do anything - * so just return - */ - scb->scsi_cmd->result = DID_OK << 16; - } else - ret = IPS_SUCCESS; - - break; - - case RESERVE: - case RELEASE: - scb->scsi_cmd->result = DID_OK << 16; - break; - - case MODE_SENSE: - scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->data_len = sizeof(*ha->enq); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - ret = IPS_SUCCESS; - break; - - case READ_CAPACITY: - scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; - scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.logical_info.reserved = 0; - scb->cmd.logical_info.reserved2 = 0; - scb->cmd.logical_info.reserved3 = 0; - scb->data_len = sizeof(ha->adapt->logical_drive_info); - scb->data_busaddr = pci_map_single(ha->pcidev, - &ha->adapt->logical_drive_info, - scb->data_len, IPS_DMA_DIR(scb)); - scb->flags |= IPS_SCB_MAP_SINGLE; - scb->cmd.logical_info.buffer_addr = scb->data_busaddr; - ret = IPS_SUCCESS; - break; - - case SEND_DIAGNOSTIC: - case REASSIGN_BLOCKS: - case FORMAT_UNIT: - case SEEK_10: - case VERIFY: - case READ_DEFECT_DATA: - case READ_BUFFER: - case WRITE_BUFFER: - scb->scsi_cmd->result = DID_OK << 16; - break; - - default: - /* Set the Return Info to appear like the Command was */ - /* attempted, a Check Condition occurred, and Sense */ - /* Data indicating an Invalid CDB OpCode is returned. */ - sp = (char *) scb->scsi_cmd->sense_buffer; - memset(sp, 0, sizeof(scb->scsi_cmd->sense_buffer)); - - sp[0] = 0x70; /* Error Code */ - sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ - sp[7] = 0x0A; /* Additional Sense Length */ - sp[12] = 0x20; /* ASC = Invalid OpCode */ - sp[13] = 0x00; /* ASCQ */ - - device_error = 2; /* Indicate Check Condition */ - scb->scsi_cmd->result = device_error | (DID_OK << 16); - break; - } /* end switch */ - } /* end if */ - - if (ret == IPS_SUCCESS_IMM) - return (ret); - - /* setup DCDB */ - if (scb->bus > 0) { - if (!scb->sg_len) - scb->cmd.dcdb.op_code = IPS_CMD_DCDB; - else - scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; - - /* If we already know the Device is Not there, no need to attempt a Command */ - /* This also protects an NT FailOver Controller from getting CDB's sent to it */ - if ( ha->conf->dev[scb->bus-1][scb->target_id].ucState == 0 ) { - scb->scsi_cmd->result = DID_NO_CONNECT << 16; - return (IPS_SUCCESS_IMM); - } - - ha->dcdb_active[scb->bus-1] |= (1 << scb->target_id); - scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + - (unsigned long)&scb->dcdb - - (unsigned long)scb); - scb->cmd.dcdb.reserved = 0; - scb->cmd.dcdb.reserved2 = 0; - scb->cmd.dcdb.reserved3 = 0; - - TimeOut = scb->scsi_cmd->timeout_per_command; - - if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ - if (!scb->sg_len) - scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; - else - scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB_SG; - - tapeDCDB = (IPS_DCDB_TABLE_TAPE *) &scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ - tapeDCDB->device_address = ((scb->bus - 1) << 4) | scb->target_id; - tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; - tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ - - if (TimeOut) { - if (TimeOut < ( 10 * HZ )) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ - else if (TimeOut < (60 * HZ)) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ - else if (TimeOut < (1200 * HZ)) - tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ - } - - tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; - tapeDCDB->reserved_for_LUN = 0; - tapeDCDB->transfer_length = scb->data_len; - tapeDCDB->buffer_pointer = cpu_to_le32(scb->data_busaddr); - tapeDCDB->sg_count = scb->sg_len; - tapeDCDB->sense_length = sizeof(tapeDCDB->sense_info); - tapeDCDB->scsi_status = 0; - tapeDCDB->reserved = 0; - memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); - } else { - scb->dcdb.device_address = ((scb->bus - 1) << 4) | scb->target_id; - scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; - - if (TimeOut) { - if (TimeOut < (10 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ - else if (TimeOut < (60 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ - else if (TimeOut < (1200 * HZ)) - scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ - } - - scb->dcdb.transfer_length = scb->data_len; - if ( scb->dcdb.cmd_attribute & IPS_TRANSFER64K ) - scb->dcdb.transfer_length = 0; - scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); - scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; - scb->dcdb.sense_length = sizeof(scb->dcdb.sense_info); - scb->dcdb.sg_count = scb->sg_len; - scb->dcdb.reserved = 0; - memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); - scb->dcdb.scsi_status = 0; - scb->dcdb.reserved2[0] = 0; - scb->dcdb.reserved2[1] = 0; - scb->dcdb.reserved2[2] = 0; - } - } +ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) +{ + int ret; + char *sp; + int device_error; + IPS_DCDB_TABLE_TAPE *tapeDCDB; + int TimeOut; + + METHOD_TRACE("ips_send_cmd", 1); + + ret = IPS_SUCCESS; + + if (!scb->scsi_cmd) { + /* internal command */ + + if (scb->bus > 0) { + /* Controller commands can't be issued */ + /* to real devices -- fail them */ + if ((ha->waitflag == TRUE) && + (ha->cmd_in_progress == scb->cdb[0])) { + ha->waitflag = FALSE; + } + + return (1); + } + } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { + /* command to logical bus -- interpret */ + ret = IPS_SUCCESS_IMM; + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + scb->scsi_cmd->result = DID_ERROR << 16; + break; + + case START_STOP: + scb->scsi_cmd->result = DID_OK << 16; + + case TEST_UNIT_READY: + case INQUIRY: + if (scb->target_id == IPS_ADAPTER_ID) { + /* + * Either we have a TUR + * or we have a SCSI inquiry + */ + if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY) + scb->scsi_cmd->result = DID_OK << 16; + + if (scb->scsi_cmd->cmnd[0] == INQUIRY) { + IPS_SCSI_INQ_DATA inquiry; + + memset(&inquiry, 0, + sizeof (IPS_SCSI_INQ_DATA)); + + inquiry.DeviceType = + IPS_SCSI_INQ_TYPE_PROCESSOR; + inquiry.DeviceTypeQualifier = + IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = + IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = + IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | + IPS_SCSI_INQ_Sync; + strncpy(inquiry.VendorId, "IBM ", + 8); + strncpy(inquiry.ProductId, + "SERVERAID ", 16); + strncpy(inquiry.ProductRevisionLevel, + "1.00", 4); + + ips_scmd_buf_write(scb->scsi_cmd, + &inquiry, + sizeof (inquiry)); + + scb->scsi_cmd->result = DID_OK << 16; + } + } else { + scb->cmd.logical_info.op_code = + IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = + IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->data_len = + sizeof (ha->adapt->logical_drive_info); + scb->data_busaddr = + pci_map_single(ha->pcidev, + &ha->adapt-> + logical_drive_info, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.logical_info.buffer_addr = + scb->data_busaddr; + ret = IPS_SUCCESS; + } + + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + scb->scsi_cmd->result = DID_OK << 16; + break; + + case READ_6: + case WRITE_6: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_6) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + scb->cmd.basic_io.lba = + cpu_to_le32(le32_to_cpu + (scb->cmd.basic_io.lba) + + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + (((scb-> + scsi_cmd->cmnd[1] & 0x1f) << 16) | (scb-> + scsi_cmd-> + cmnd + [2] + << 8) + | (scb->scsi_cmd->cmnd[3])); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) + scb->cmd.basic_io.sector_count = + cpu_to_le16(256); + + ret = IPS_SUCCESS; + break; + + case READ_10: + case WRITE_10: + if (!scb->sg_len) { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; + scb->cmd.basic_io.enhanced_sg = 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->data_busaddr); + } else { + scb->cmd.basic_io.op_code = + (scb->scsi_cmd->cmnd[0] == + READ_10) ? IPS_CMD_READ_SG : + IPS_CMD_WRITE_SG; + scb->cmd.basic_io.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + scb->cmd.basic_io.sg_addr = + cpu_to_le32(scb->sg_busaddr); + } + + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.log_drv = scb->target_id; + scb->cmd.basic_io.sg_count = scb->sg_len; + + if (scb->cmd.basic_io.lba) + scb->cmd.basic_io.lba = + cpu_to_le32(le32_to_cpu + (scb->cmd.basic_io.lba) + + le16_to_cpu(scb->cmd.basic_io. + sector_count)); + else + scb->cmd.basic_io.lba = + ((scb-> + scsi_cmd->cmnd[2] << 24) | (scb-> + scsi_cmd-> + cmnd[3] << 16) + | (scb->scsi_cmd->cmnd[4] << 8) | scb-> + scsi_cmd->cmnd[5]); + + scb->cmd.basic_io.sector_count = + cpu_to_le16(scb->data_len / IPS_BLKSIZE); + + if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { + /* + * This is a null condition + * we don't have to do anything + * so just return + */ + scb->scsi_cmd->result = DID_OK << 16; + } else + ret = IPS_SUCCESS; + + break; + + case RESERVE: + case RELEASE: + scb->scsi_cmd->result = DID_OK << 16; + break; + + case MODE_SENSE: + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.segment_4G = 0; + scb->cmd.basic_io.enhanced_sg = 0; + scb->data_len = sizeof (*ha->enq); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + ret = IPS_SUCCESS; + break; + + case READ_CAPACITY: + scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; + scb->cmd.logical_info.command_id = + IPS_COMMAND_ID(ha, scb); + scb->cmd.logical_info.reserved = 0; + scb->cmd.logical_info.reserved2 = 0; + scb->cmd.logical_info.reserved3 = 0; + scb->data_len = sizeof (ha->adapt->logical_drive_info); + scb->data_busaddr = pci_map_single(ha->pcidev, + &ha->adapt-> + logical_drive_info, + scb->data_len, + IPS_DMA_DIR(scb)); + scb->flags |= IPS_SCB_MAP_SINGLE; + scb->cmd.logical_info.buffer_addr = scb->data_busaddr; + ret = IPS_SUCCESS; + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + case FORMAT_UNIT: + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + scb->scsi_cmd->result = DID_OK << 16; + break; + + default: + /* Set the Return Info to appear like the Command was */ + /* attempted, a Check Condition occurred, and Sense */ + /* Data indicating an Invalid CDB OpCode is returned. */ + sp = (char *) scb->scsi_cmd->sense_buffer; + memset(sp, 0, sizeof (scb->scsi_cmd->sense_buffer)); + + sp[0] = 0x70; /* Error Code */ + sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ + sp[7] = 0x0A; /* Additional Sense Length */ + sp[12] = 0x20; /* ASC = Invalid OpCode */ + sp[13] = 0x00; /* ASCQ */ + + device_error = 2; /* Indicate Check Condition */ + scb->scsi_cmd->result = device_error | (DID_OK << 16); + break; + } /* end switch */ + } + /* end if */ + if (ret == IPS_SUCCESS_IMM) + return (ret); + + /* setup DCDB */ + if (scb->bus > 0) { + + /* If we already know the Device is Not there, no need to attempt a Command */ + /* This also protects an NT FailOver Controller from getting CDB's sent to it */ + if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) { + scb->scsi_cmd->result = DID_NO_CONNECT << 16; + return (IPS_SUCCESS_IMM); + } + + ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id); + scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + + (unsigned long) &scb-> + dcdb - + (unsigned long) scb); + scb->cmd.dcdb.reserved = 0; + scb->cmd.dcdb.reserved2 = 0; + scb->cmd.dcdb.reserved3 = 0; + scb->cmd.dcdb.segment_4G = 0; + scb->cmd.dcdb.enhanced_sg = 0; + + TimeOut = scb->scsi_cmd->timeout_per_command; + + if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; + } else { + scb->cmd.dcdb.op_code = + IPS_CMD_EXTENDED_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ + tapeDCDB->device_address = + ((scb->bus - 1) << 4) | scb->target_id; + tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; + tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; + tapeDCDB->reserved_for_LUN = 0; + tapeDCDB->transfer_length = scb->data_len; + if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG) + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + tapeDCDB->buffer_pointer = + cpu_to_le32(scb->data_busaddr); + tapeDCDB->sg_count = scb->sg_len; + tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info); + tapeDCDB->scsi_status = 0; + tapeDCDB->reserved = 0; + memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + } else { + if (!scb->sg_len) { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB; + } else { + scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; + scb->cmd.dcdb.enhanced_sg = + IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; + } + + scb->dcdb.device_address = + ((scb->bus - 1) << 4) | scb->target_id; + scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; + + if (TimeOut) { + if (TimeOut < (10 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ + else if (TimeOut < (60 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ + else if (TimeOut < (1200 * HZ)) + scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ + } + + scb->dcdb.transfer_length = scb->data_len; + if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K) + scb->dcdb.transfer_length = 0; + if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG) + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->sg_busaddr); + else + scb->dcdb.buffer_pointer = + cpu_to_le32(scb->data_busaddr); + scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; + scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info); + scb->dcdb.sg_count = scb->sg_len; + scb->dcdb.reserved = 0; + memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, + scb->scsi_cmd->cmd_len); + scb->dcdb.scsi_status = 0; + scb->dcdb.reserved2[0] = 0; + scb->dcdb.reserved2[1] = 0; + scb->dcdb.reserved2[2] = 0; + } + } - return ((*ha->func.issue)(ha, scb)); + return ((*ha->func.issue) (ha, scb)); } /****************************************************************************/ @@ -4639,169 +4110,151 @@ /* Assumed to be called with the HA lock */ /****************************************************************************/ static void -ips_chkstatus(ips_ha_t *ha, IPS_STATUS *pstatus) { - ips_scb_t *scb; - ips_stat_t *sp; - uint8_t basic_status; - uint8_t ext_status; - int errcode; - - METHOD_TRACE("ips_chkstatus", 1); - - scb = &ha->scbs[pstatus->fields.command_id]; - scb->basic_status = basic_status = pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; - scb->extended_status = ext_status = pstatus->fields.extended_status; - - sp = &ha->sp; - sp->residue_len = 0; - sp->scb_addr = (void *) scb; - - /* Remove the item from the active queue */ - ips_removeq_scb(&ha->scb_activelist, scb); - - if (!scb->scsi_cmd) - /* internal commands are handled in do_ipsintr */ - return ; - - DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - - if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) - /* passthru - just returns the raw result */ - return ; - - errcode = DID_OK; - - if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || - ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { - - if (scb->bus == 0) { - if ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR) { - DEBUG_VAR(1, "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", - ips_name, ha->host_num, - scb->cmd.basic_io.op_code, basic_status, ext_status); - } - - switch (scb->scsi_cmd->cmnd[0]) { - case ALLOW_MEDIUM_REMOVAL: - case REZERO_UNIT: - case ERASE: - case WRITE_FILEMARKS: - case SPACE: - errcode = DID_ERROR; - break; - - case START_STOP: - break; - - case TEST_UNIT_READY: - if (scb->target_id == IPS_ADAPTER_ID) - break; - - if (!ips_online(ha, scb)) { - errcode = DID_TIME_OUT; - } - break; - - case INQUIRY: - if (scb->target_id == IPS_ADAPTER_ID) { - IPS_SCSI_INQ_DATA inquiry; - - memset(&inquiry, 0, sizeof(IPS_SCSI_INQ_DATA)); - - inquiry.DeviceType = IPS_SCSI_INQ_TYPE_PROCESSOR; - inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; - inquiry.Version = IPS_SCSI_INQ_REV2; - inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; - inquiry.AdditionalLength = 31; - inquiry.Flags[0] = IPS_SCSI_INQ_Address16; - inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync; - strncpy(inquiry.VendorId, "IBM ", 8); - strncpy(inquiry.ProductId, "SERVERAID ", 16); - strncpy(inquiry.ProductRevisionLevel, "1.00", 4); - - memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen); - - scb->scsi_cmd->result = DID_OK << 16; - break; - } - - if (ips_online(ha, scb)) { - ips_inquiry(ha, scb); - } else { - errcode = DID_TIME_OUT; - } - break; - - case REQUEST_SENSE: - ips_reqsen(ha, scb); - break; - - case READ_6: - case WRITE_6: - case READ_10: - case WRITE_10: - case RESERVE: - case RELEASE: - break; - - case MODE_SENSE: - if (!ips_online(ha, scb) || !ips_msense(ha, scb)) { - errcode = DID_ERROR; - } - break; - - case READ_CAPACITY: - if (ips_online(ha, scb)) - ips_rdcap(ha, scb); - else { - errcode = DID_TIME_OUT; - } - break; - - case SEND_DIAGNOSTIC: - case REASSIGN_BLOCKS: - break; - - case FORMAT_UNIT: - errcode = DID_ERROR; - break; - - case SEEK_10: - case VERIFY: - case READ_DEFECT_DATA: - case READ_BUFFER: - case WRITE_BUFFER: - break; - - default: - errcode = DID_ERROR; - } /* end switch */ - - scb->scsi_cmd->result = errcode << 16; - } else { /* bus == 0 */ - /* restrict access to physical drives */ - if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && - ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == TYPE_DISK)) { - - scb->scsi_cmd->result = DID_TIME_OUT << 16; - } - } /* else */ - } else { /* recovered error / success */ - if (scb->bus == 0) { - DEBUG_VAR(1, "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", - ips_name, ha->host_num, - scb->cmd.basic_io.op_code, basic_status, ext_status); - } +ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus) +{ + ips_scb_t *scb; + ips_stat_t *sp; + uint8_t basic_status; + uint8_t ext_status; + int errcode; + + METHOD_TRACE("ips_chkstatus", 1); + + scb = &ha->scbs[pstatus->fields.command_id]; + scb->basic_status = basic_status = + pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; + scb->extended_status = ext_status = pstatus->fields.extended_status; + + sp = &ha->sp; + sp->residue_len = 0; + sp->scb_addr = (void *) scb; + + /* Remove the item from the active queue */ + ips_removeq_scb(&ha->scb_activelist, scb); + + if (!scb->scsi_cmd) + /* internal commands are handled in do_ipsintr */ + return; + + DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + + if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) + /* passthru - just returns the raw result */ + return; + + errcode = DID_OK; + + if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || + ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { + + if (scb->bus == 0) { + if ((basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_RECOVERED_ERROR) { + DEBUG_VAR(1, + "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, + basic_status, ext_status); + } + + switch (scb->scsi_cmd->cmnd[0]) { + case ALLOW_MEDIUM_REMOVAL: + case REZERO_UNIT: + case ERASE: + case WRITE_FILEMARKS: + case SPACE: + errcode = DID_ERROR; + break; + + case START_STOP: + break; + + case TEST_UNIT_READY: + if (!ips_online(ha, scb)) { + errcode = DID_TIME_OUT; + } + break; + + case INQUIRY: + if (ips_online(ha, scb)) { + ips_inquiry(ha, scb); + } else { + errcode = DID_TIME_OUT; + } + break; + + case REQUEST_SENSE: + ips_reqsen(ha, scb); + break; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case RESERVE: + case RELEASE: + break; + + case MODE_SENSE: + if (!ips_online(ha, scb) + || !ips_msense(ha, scb)) { + errcode = DID_ERROR; + } + break; + + case READ_CAPACITY: + if (ips_online(ha, scb)) + ips_rdcap(ha, scb); + else { + errcode = DID_TIME_OUT; + } + break; + + case SEND_DIAGNOSTIC: + case REASSIGN_BLOCKS: + break; + + case FORMAT_UNIT: + errcode = DID_ERROR; + break; + + case SEEK_10: + case VERIFY: + case READ_DEFECT_DATA: + case READ_BUFFER: + case WRITE_BUFFER: + break; + + default: + errcode = DID_ERROR; + } /* end switch */ + + scb->scsi_cmd->result = errcode << 16; + } else { /* bus == 0 */ + /* restrict access to physical drives */ + if ((scb->scsi_cmd->cmnd[0] == INQUIRY) && + ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) == + TYPE_DISK)) { + + scb->scsi_cmd->result = DID_TIME_OUT << 16; + } + } /* else */ + } else { /* recovered error / success */ + if (scb->bus == 0) { + DEBUG_VAR(1, + "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", + ips_name, ha->host_num, + scb->cmd.basic_io.op_code, basic_status, + ext_status); + } - ips_map_status(ha, scb, sp); - } /* else */ + ips_map_status(ha, scb, sp); + } /* else */ } /****************************************************************************/ @@ -4814,25 +4267,30 @@ /* */ /****************************************************************************/ static int -ips_online(ips_ha_t *ha, ips_scb_t *scb) { - METHOD_TRACE("ips_online", 1); - - if (scb->target_id >= IPS_MAX_LD) - return (0); +ips_online(ips_ha_t * ha, ips_scb_t * scb) +{ + METHOD_TRACE("ips_online", 1); - if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { - memset(&ha->adapt->logical_drive_info, 0, sizeof(ha->adapt->logical_drive_info)); + if (scb->target_id >= IPS_MAX_LD) + return (0); - return (0); - } - - if (ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_OFFLINE && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_FREE && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_CRS && - ha->adapt->logical_drive_info.drive_info[scb->target_id].state != IPS_LD_SYS) - return (1); - else - return (0); + if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { + memset(&ha->adapt->logical_drive_info, 0, + sizeof (ha->adapt->logical_drive_info)); + + return (0); + } + + if (ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_OFFLINE + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_FREE + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_CRS + && ha->adapt->logical_drive_info.drive_info[scb->target_id].state != + IPS_LD_SYS) return (1); + else + return (0); } /****************************************************************************/ @@ -4845,27 +4303,29 @@ /* */ /****************************************************************************/ static int -ips_inquiry(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_INQ_DATA inquiry; +ips_inquiry(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_INQ_DATA inquiry; - METHOD_TRACE("ips_inquiry", 1); + METHOD_TRACE("ips_inquiry", 1); - memset(&inquiry, 0, sizeof(IPS_SCSI_INQ_DATA)); + memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA)); - inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; - inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; - inquiry.Version = IPS_SCSI_INQ_REV2; - inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; - inquiry.AdditionalLength = 31; - inquiry.Flags[0] = IPS_SCSI_INQ_Address16; - inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync; - strncpy(inquiry.VendorId, "IBM ", 8); - strncpy(inquiry.ProductId, "SERVERAID ", 16); - strncpy(inquiry.ProductRevisionLevel, "1.00", 4); + inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; + inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; + inquiry.Version = IPS_SCSI_INQ_REV2; + inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; + inquiry.AdditionalLength = 31; + inquiry.Flags[0] = IPS_SCSI_INQ_Address16; + inquiry.Flags[1] = + IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue; + strncpy(inquiry.VendorId, "IBM ", 8); + strncpy(inquiry.ProductId, "SERVERAID ", 16); + strncpy(inquiry.ProductRevisionLevel, "1.00", 4); - memcpy(scb->scsi_cmd->request_buffer, &inquiry, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry)); - return (1); + return (1); } /****************************************************************************/ @@ -4878,20 +4338,24 @@ /* */ /****************************************************************************/ static int -ips_rdcap(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_CAPACITY *cap; +ips_rdcap(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_CAPACITY cap; - METHOD_TRACE("ips_rdcap", 1); + METHOD_TRACE("ips_rdcap", 1); - if (scb->scsi_cmd->bufflen < 8) - return (0); + if (scb->scsi_cmd->bufflen < 8) + return (0); - cap = (IPS_SCSI_CAPACITY *) scb->scsi_cmd->request_buffer; + cap.lba = + cpu_to_be32(le32_to_cpu + (ha->adapt->logical_drive_info. + drive_info[scb->target_id].sector_count) - 1); + cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE); - cap->lba = cpu_to_be32(le32_to_cpu(ha->adapt->logical_drive_info.drive_info[scb->target_id].sector_count) - 1); - cap->len = cpu_to_be32((uint32_t) IPS_BLKSIZE); + ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap)); - return (1); + return (1); } /****************************************************************************/ @@ -4904,72 +4368,78 @@ /* */ /****************************************************************************/ static int -ips_msense(ips_ha_t *ha, ips_scb_t *scb) { - uint16_t heads; - uint16_t sectors; - uint32_t cylinders; - IPS_SCSI_MODE_PAGE_DATA mdata; - - METHOD_TRACE("ips_msense", 1); - - if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && - (ha->enq->ucMiscFlag & 0x8) == 0) { - heads = IPS_NORM_HEADS; - sectors = IPS_NORM_SECTORS; - } else { - heads = IPS_COMP_HEADS; - sectors = IPS_COMP_SECTORS; - } - - cylinders = (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - 1) / (heads * sectors); - - memset(&mdata, 0, sizeof(IPS_SCSI_MODE_PAGE_DATA)); - - mdata.hdr.BlockDescLength = 8; - - switch (scb->scsi_cmd->cmnd[2] & 0x3f) { - case 0x03: /* page 3 */ - mdata.pdata.pg3.PageCode = 3; - mdata.pdata.pg3.PageLength = sizeof(IPS_SCSI_MODE_PAGE3); - mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; - mdata.pdata.pg3.TracksPerZone = 0; - mdata.pdata.pg3.AltSectorsPerZone = 0; - mdata.pdata.pg3.AltTracksPerZone = 0; - mdata.pdata.pg3.AltTracksPerVolume = 0; - mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); - mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); - mdata.pdata.pg3.Interleave = cpu_to_be16(1); - mdata.pdata.pg3.TrackSkew = 0; - mdata.pdata.pg3.CylinderSkew = 0; - mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; - break; - - case 0x4: - mdata.pdata.pg4.PageCode = 4; - mdata.pdata.pg4.PageLength = sizeof(IPS_SCSI_MODE_PAGE4); - mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; - mdata.pdata.pg4.CylindersHigh = cpu_to_be16((cylinders >> 8) & 0xFFFF); - mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); - mdata.pdata.pg4.Heads = heads; - mdata.pdata.pg4.WritePrecompHigh = 0; - mdata.pdata.pg4.WritePrecompLow = 0; - mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; - mdata.pdata.pg4.ReducedWriteCurrentLow = 0; - mdata.pdata.pg4.StepRate = cpu_to_be16(1); - mdata.pdata.pg4.LandingZoneHigh = 0; - mdata.pdata.pg4.LandingZoneLow = 0; - mdata.pdata.pg4.flags = 0; - mdata.pdata.pg4.RotationalOffset = 0; - mdata.pdata.pg4.MediumRotationRate = 0; - break; - - default: - return (0); - } /* end switch */ +ips_msense(ips_ha_t * ha, ips_scb_t * scb) +{ + uint16_t heads; + uint16_t sectors; + uint32_t cylinders; + IPS_SCSI_MODE_PAGE_DATA mdata; + + METHOD_TRACE("ips_msense", 1); + + if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && + (ha->enq->ucMiscFlag & 0x8) == 0) { + heads = IPS_NORM_HEADS; + sectors = IPS_NORM_SECTORS; + } else { + heads = IPS_COMP_HEADS; + sectors = IPS_COMP_SECTORS; + } + + cylinders = + (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - + 1) / (heads * sectors); + + memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA)); + + mdata.hdr.BlockDescLength = 8; + + switch (scb->scsi_cmd->cmnd[2] & 0x3f) { + case 0x03: /* page 3 */ + mdata.pdata.pg3.PageCode = 3; + mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; + mdata.pdata.pg3.TracksPerZone = 0; + mdata.pdata.pg3.AltSectorsPerZone = 0; + mdata.pdata.pg3.AltTracksPerZone = 0; + mdata.pdata.pg3.AltTracksPerVolume = 0; + mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); + mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); + mdata.pdata.pg3.Interleave = cpu_to_be16(1); + mdata.pdata.pg3.TrackSkew = 0; + mdata.pdata.pg3.CylinderSkew = 0; + mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; + break; + + case 0x4: + mdata.pdata.pg4.PageCode = 4; + mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4); + mdata.hdr.DataLength = + 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; + mdata.pdata.pg4.CylindersHigh = + cpu_to_be16((cylinders >> 8) & 0xFFFF); + mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); + mdata.pdata.pg4.Heads = heads; + mdata.pdata.pg4.WritePrecompHigh = 0; + mdata.pdata.pg4.WritePrecompLow = 0; + mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; + mdata.pdata.pg4.ReducedWriteCurrentLow = 0; + mdata.pdata.pg4.StepRate = cpu_to_be16(1); + mdata.pdata.pg4.LandingZoneHigh = 0; + mdata.pdata.pg4.LandingZoneLow = 0; + mdata.pdata.pg4.flags = 0; + mdata.pdata.pg4.RotationalOffset = 0; + mdata.pdata.pg4.MediumRotationRate = 0; + break; + + default: + return (0); + } /* end switch */ - memcpy(scb->scsi_cmd->request_buffer, &mdata, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata)); - return (1); + return (1); } /****************************************************************************/ @@ -4982,21 +4452,23 @@ /* */ /****************************************************************************/ static int -ips_reqsen(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SCSI_REQSEN reqsen; +ips_reqsen(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SCSI_REQSEN reqsen; - METHOD_TRACE("ips_reqsen", 1); + METHOD_TRACE("ips_reqsen", 1); - memset(&reqsen, 0, sizeof(IPS_SCSI_REQSEN)); + memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN)); - reqsen.ResponseCode = IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; - reqsen.AdditionalLength = 10; - reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; - reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; + reqsen.ResponseCode = + IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; + reqsen.AdditionalLength = 10; + reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; + reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; - memcpy(scb->scsi_cmd->request_buffer, &reqsen, scb->scsi_cmd->request_bufflen); + ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen)); - return (1); + return (1); } /****************************************************************************/ @@ -5009,60 +4481,63 @@ /* */ /****************************************************************************/ static void -ips_free(ips_ha_t *ha) { +ips_free(ips_ha_t * ha) +{ - METHOD_TRACE("ips_free", 1); + METHOD_TRACE("ips_free", 1); - if (ha) { - if (ha->enq) { - kfree(ha->enq); - ha->enq = NULL; - } - - if (ha->conf) { - kfree(ha->conf); - ha->conf = NULL; - } - - if (ha->adapt) { - pci_free_consistent(ha->pcidev,sizeof(IPS_ADAPTER)+ sizeof(IPS_IO_CMD), - ha->adapt, ha->adapt->hw_status_start); - ha->adapt = NULL; - } - - if (ha->nvram) { - kfree(ha->nvram); - ha->nvram = NULL; - } - - if (ha->subsys) { - kfree(ha->subsys); - ha->subsys = NULL; - } - - if (ha->ioctl_data) { - free_pages((unsigned long) ha->ioctl_data, ha->ioctl_order); - ha->ioctl_data = NULL; - ha->ioctl_datasize = 0; - ha->ioctl_order = 0; - } - ips_deallocatescbs(ha, ha->max_cmds); - - /* free memory mapped (if applicable) */ - if (ha->mem_ptr) { - iounmap(ha->ioremap_ptr); - ha->ioremap_ptr = NULL; - ha->mem_ptr = NULL; - } - -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) - if (ha->mem_addr) - release_mem_region(ha->mem_addr, ha->mem_len); -#endif - ha->mem_addr = 0; + if (ha) { + if (ha->enq) { + kfree(ha->enq); + ha->enq = NULL; + } + + if (ha->conf) { + kfree(ha->conf); + ha->conf = NULL; + } + + if (ha->adapt) { + pci_free_consistent(ha->pcidev, + sizeof (IPS_ADAPTER) + + sizeof (IPS_IO_CMD), ha->adapt, + ha->adapt->hw_status_start); + ha->adapt = NULL; + } + + if (ha->nvram) { + kfree(ha->nvram); + ha->nvram = NULL; + } + + if (ha->subsys) { + kfree(ha->subsys); + ha->subsys = NULL; + } + + if (ha->ioctl_data) { + free_pages((unsigned long) ha->ioctl_data, + ha->ioctl_order); + ha->ioctl_data = NULL; + ha->ioctl_datasize = 0; + ha->ioctl_order = 0; + } + ips_deallocatescbs(ha, ha->max_cmds); + + /* free memory mapped (if applicable) */ + if (ha->mem_ptr) { + iounmap(ha->ioremap_ptr); + ha->ioremap_ptr = NULL; + ha->mem_ptr = NULL; + } + + if (ha->mem_addr) + release_mem_region(ha->mem_addr, ha->mem_len); + ha->mem_addr = 0; - } + } } + /****************************************************************************/ /* */ /* Routine Name: ips_deallocatescbs */ @@ -5073,15 +4548,18 @@ /* */ /****************************************************************************/ static int -ips_deallocatescbs(ips_ha_t *ha, int cmds) { - if (ha->scbs) { - pci_free_consistent(ha->pcidev,sizeof(IPS_SG_LIST) * IPS_MAX_SG * - cmds, ha->scbs->sg_list, ha->scbs->sg_busaddr); - pci_free_consistent(ha->pcidev, sizeof(ips_scb_t) * cmds, - ha->scbs, ha->scbs->scb_busaddr); - ha->scbs = NULL; - } /* end if */ -return 1; +ips_deallocatescbs(ips_ha_t * ha, int cmds) +{ + if (ha->scbs) { + pci_free_consistent(ha->pcidev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, + ha->scbs->sg_list.list, + ha->scbs->sg_busaddr); + pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds, + ha->scbs, ha->scbs->scb_busaddr); + ha->scbs = NULL; + } /* end if */ + return 1; } /****************************************************************************/ @@ -5094,44 +4572,59 @@ /* */ /****************************************************************************/ static int -ips_allocatescbs(ips_ha_t *ha) { - ips_scb_t *scb_p; - IPS_SG_LIST* ips_sg; - int i; - dma_addr_t command_dma, sg_dma; - - METHOD_TRACE("ips_allocatescbs", 1); - - /* Allocate memory for the SCBs */ - ha->scbs = pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof(ips_scb_t), - &command_dma); - if (ha->scbs == NULL) - return 0; - ips_sg = pci_alloc_consistent(ha->pcidev, sizeof(IPS_SG_LIST) * IPS_MAX_SG * - ha->max_cmds, &sg_dma); - if(ips_sg == NULL){ - pci_free_consistent(ha->pcidev,ha->max_cmds * sizeof(ips_scb_t),ha->scbs, command_dma); - return 0; - } - - memset(ha->scbs, 0, ha->max_cmds * sizeof(ips_scb_t)); - - for (i = 0; i < ha->max_cmds; i++) { - scb_p = &ha->scbs[i]; - scb_p->scb_busaddr = command_dma + sizeof(ips_scb_t) * i; - /* set up S/G list */ - scb_p->sg_list = ips_sg + i * IPS_MAX_SG; - scb_p->sg_busaddr = sg_dma + sizeof(IPS_SG_LIST) * IPS_MAX_SG * i; - - /* add to the free list */ - if (i < ha->max_cmds - 1) { - scb_p->q_next = ha->scb_freelist; - ha->scb_freelist = scb_p; - } - } +ips_allocatescbs(ips_ha_t * ha) +{ + ips_scb_t *scb_p; + IPS_SG_LIST ips_sg; + int i; + dma_addr_t command_dma, sg_dma; + + METHOD_TRACE("ips_allocatescbs", 1); + + /* Allocate memory for the SCBs */ + ha->scbs = + pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t), + &command_dma); + if (ha->scbs == NULL) + return 0; + ips_sg.list = + pci_alloc_consistent(ha->pcidev, + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * + ha->max_cmds, &sg_dma); + if (ips_sg.list == NULL) { + pci_free_consistent(ha->pcidev, + ha->max_cmds * sizeof (ips_scb_t), ha->scbs, + command_dma); + return 0; + } + + memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t)); + + for (i = 0; i < ha->max_cmds; i++) { + scb_p = &ha->scbs[i]; + scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i; + /* set up S/G list */ + if (IPS_USE_ENH_SGLIST(ha)) { + scb_p->sg_list.enh_list = + ips_sg.enh_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } else { + scb_p->sg_list.std_list = + ips_sg.std_list + i * IPS_MAX_SG; + scb_p->sg_busaddr = + sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; + } + + /* add to the free list */ + if (i < ha->max_cmds - 1) { + scb_p->q_next = ha->scb_freelist; + ha->scb_freelist = scb_p; + } + } - /* success */ - return (1); + /* success */ + return (1); } /****************************************************************************/ @@ -5144,36 +4637,37 @@ /* */ /****************************************************************************/ static void -ips_init_scb(ips_ha_t *ha, ips_scb_t *scb) { - IPS_SG_LIST *sg_list; - uint32_t cmd_busaddr, sg_busaddr; - METHOD_TRACE("ips_init_scb", 1); - - if (scb == NULL) - return ; - - sg_list = scb->sg_list; - cmd_busaddr = scb->scb_busaddr; - sg_busaddr = scb->sg_busaddr; - /* zero fill */ - memset(scb, 0, sizeof(ips_scb_t)); - memset(ha->dummy, 0, sizeof(IPS_IO_CMD)); - - /* Initialize dummy command bucket */ - ha->dummy->op_code = 0xFF; - ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start - + sizeof(IPS_ADAPTER)); - ha->dummy->command_id = IPS_MAX_CMDS; - - /* set bus address of scb */ - scb->scb_busaddr = cmd_busaddr; - scb->sg_busaddr = sg_busaddr; - scb->sg_list = sg_list; - - /* Neptune Fix */ - scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); - scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start - + sizeof(IPS_ADAPTER)); +ips_init_scb(ips_ha_t * ha, ips_scb_t * scb) +{ + IPS_SG_LIST sg_list; + uint32_t cmd_busaddr, sg_busaddr; + METHOD_TRACE("ips_init_scb", 1); + + if (scb == NULL) + return; + + sg_list.list = scb->sg_list.list; + cmd_busaddr = scb->scb_busaddr; + sg_busaddr = scb->sg_busaddr; + /* zero fill */ + memset(scb, 0, sizeof (ips_scb_t)); + memset(ha->dummy, 0, sizeof (IPS_IO_CMD)); + + /* Initialize dummy command bucket */ + ha->dummy->op_code = 0xFF; + ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); + ha->dummy->command_id = IPS_MAX_CMDS; + + /* set bus address of scb */ + scb->scb_busaddr = cmd_busaddr; + scb->sg_busaddr = sg_busaddr; + scb->sg_list.list = sg_list.list; + + /* Neptune Fix */ + scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); + scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start + + sizeof (IPS_ADAPTER)); } /****************************************************************************/ @@ -5188,22 +4682,23 @@ /* */ /****************************************************************************/ static ips_scb_t * -ips_getscb(ips_ha_t *ha) { - ips_scb_t *scb; +ips_getscb(ips_ha_t * ha) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_getscb", 1); + METHOD_TRACE("ips_getscb", 1); - if ((scb = ha->scb_freelist) == NULL) { + if ((scb = ha->scb_freelist) == NULL) { - return (NULL); - } + return (NULL); + } - ha->scb_freelist = scb->q_next; - scb->q_next = NULL; + ha->scb_freelist = scb->q_next; + scb->q_next = NULL; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - return (scb); + return (scb); } /****************************************************************************/ @@ -5218,22 +4713,22 @@ /* */ /****************************************************************************/ static void -ips_freescb(ips_ha_t *ha, ips_scb_t *scb) { +ips_freescb(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_freescb", 1); - if(scb->flags & IPS_SCB_MAP_SG) - pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, - scb->scsi_cmd->use_sg, - IPS_DMA_DIR(scb)); - else if(scb->flags & IPS_SCB_MAP_SINGLE) - pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, - IPS_DMA_DIR(scb)); - - /* check to make sure this is not our "special" scb */ - if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { - scb->q_next = ha->scb_freelist; - ha->scb_freelist = scb; - } + METHOD_TRACE("ips_freescb", 1); + if (scb->flags & IPS_SCB_MAP_SG) + pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, + scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb)); + else if (scb->flags & IPS_SCB_MAP_SINGLE) + pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, + IPS_DMA_DIR(scb)); + + /* check to make sure this is not our "special" scb */ + if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { + scb->q_next = ha->scb_freelist; + ha->scb_freelist = scb; + } } /****************************************************************************/ @@ -5246,19 +4741,20 @@ /* */ /****************************************************************************/ static int -ips_isinit_copperhead(ips_ha_t *ha) { - uint8_t scpr; - uint8_t isr; - - METHOD_TRACE("ips_isinit_copperhead", 1); - - isr = inb(ha->io_addr + IPS_REG_HISR); - scpr = inb(ha->io_addr + IPS_REG_SCPR); - - if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) - return (0); - else - return (1); +ips_isinit_copperhead(ips_ha_t * ha) +{ + uint8_t scpr; + uint8_t isr; + + METHOD_TRACE("ips_isinit_copperhead", 1); + + isr = inb(ha->io_addr + IPS_REG_HISR); + scpr = inb(ha->io_addr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); } /****************************************************************************/ @@ -5271,19 +4767,20 @@ /* */ /****************************************************************************/ static int -ips_isinit_copperhead_memio(ips_ha_t *ha) { - uint8_t isr=0; - uint8_t scpr; - - METHOD_TRACE("ips_is_init_copperhead_memio", 1); - - isr = readb(ha->mem_ptr + IPS_REG_HISR); - scpr = readb(ha->mem_ptr + IPS_REG_SCPR); - - if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) - return (0); - else - return (1); +ips_isinit_copperhead_memio(ips_ha_t * ha) +{ + uint8_t isr = 0; + uint8_t scpr; + + METHOD_TRACE("ips_is_init_copperhead_memio", 1); + + isr = readb(ha->mem_ptr + IPS_REG_HISR); + scpr = readb(ha->mem_ptr + IPS_REG_SCPR); + + if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) + return (0); + else + return (1); } /****************************************************************************/ @@ -5296,21 +4793,22 @@ /* */ /****************************************************************************/ static int -ips_isinit_morpheus(ips_ha_t *ha) { - uint32_t post; - uint32_t bits; - - METHOD_TRACE("ips_is_init_morpheus", 1); - - post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (post == 0) - return (0); - else if (bits & 0x3) - return (0); - else - return (1); +ips_isinit_morpheus(ips_ha_t * ha) +{ + uint32_t post; + uint32_t bits; + + METHOD_TRACE("ips_is_init_morpheus", 1); + + post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (post == 0) + return (0); + else if (bits & 0x3) + return (0); + else + return (1); } /****************************************************************************/ @@ -5322,10 +4820,12 @@ /* */ /****************************************************************************/ static void -ips_enable_int_copperhead(ips_ha_t *ha) { - METHOD_TRACE("ips_enable_int_copperhead", 1); +ips_enable_int_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead", 1); - outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); + outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); + inb(ha->io_addr + IPS_REG_HISR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5337,10 +4837,12 @@ /* */ /****************************************************************************/ static void -ips_enable_int_copperhead_memio(ips_ha_t *ha) { - METHOD_TRACE("ips_enable_int_copperhead_memio", 1); +ips_enable_int_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_enable_int_copperhead_memio", 1); - writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + readb(ha->mem_ptr + IPS_REG_HISR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5352,14 +4854,16 @@ /* */ /****************************************************************************/ static void -ips_enable_int_morpheus(ips_ha_t *ha) { - uint32_t Oimr; +ips_enable_int_morpheus(ips_ha_t * ha) +{ + uint32_t Oimr; - METHOD_TRACE("ips_enable_int_morpheus", 1); + METHOD_TRACE("ips_enable_int_morpheus", 1); - Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); - Oimr &= ~0x08; - writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x08; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + readl(ha->mem_ptr + IPS_REG_I960_OIMR); // Ensure PCI Posting Completes } /****************************************************************************/ @@ -5372,86 +4876,88 @@ /* */ /****************************************************************************/ static int -ips_init_copperhead(ips_ha_t *ha) { - uint8_t Isr; - uint8_t Cbsp; - uint8_t PostByte[IPS_MAX_POST_BYTES]; - uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; - int i, j; - - METHOD_TRACE("ips_init_copperhead", 1); - - for (i = 0; i < IPS_MAX_POST_BYTES; i++) { - for (j = 0; j < 45; j++) { - Isr = inb(ha->io_addr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 45) - /* error occurred */ - return (0); - - PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); - outb(Isr, ha->io_addr + IPS_REG_HISR); - } - - if (PostByte[0] < IPS_GOOD_POST_STATUS) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x %x).\n", - ips_name, ha->host_num, PostByte[0], PostByte[1]); - - return (0); - } - - for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { - for (j = 0; j < 240; j++) { - Isr = inb(ha->io_addr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 240) - /* error occurred */ - return (0); - - ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR); - outb(Isr, ha->io_addr + IPS_REG_HISR); - } - - for (i = 0; i < 240; i++) { - Cbsp = inb(ha->io_addr + IPS_REG_CBSP); - - if ((Cbsp & IPS_BIT_OP) == 0) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) - /* reset failed */ - return (0); - - /* setup CCCR */ - outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR); - - /* Enable busmastering */ - outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); - - if (ha->revision_id == IPS_REVID_TROMBONE64) - /* fix for anaconda64 */ - outl(0, ha->io_addr + IPS_REG_NDAE); +ips_init_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x %x).\n", + ips_name, ha->host_num, PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR); + outb(Isr, ha->io_addr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = inb(ha->io_addr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* reset failed */ + return (0); + + /* setup CCCR */ + outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR); + + /* Enable busmastering */ + outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); + + if (ha->revision_id == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + outl(0, ha->io_addr + IPS_REG_NDAE); - /* Enable interrupts */ - outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); + /* Enable interrupts */ + outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); - return (1); + return (1); } /****************************************************************************/ @@ -5464,87 +4970,89 @@ /* */ /****************************************************************************/ static int -ips_init_copperhead_memio(ips_ha_t *ha) { - uint8_t Isr=0; - uint8_t Cbsp; - uint8_t PostByte[IPS_MAX_POST_BYTES]; - uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; - int i, j; - - METHOD_TRACE("ips_init_copperhead_memio", 1); - - for (i = 0; i < IPS_MAX_POST_BYTES; i++) { - for (j = 0; j < 45; j++) { - Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 45) - /* error occurred */ - return (0); - - PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } - - if (PostByte[0] < IPS_GOOD_POST_STATUS) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x %x).\n", - ips_name, ha->host_num, PostByte[0], PostByte[1]); - - return (0); - } - - for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { - for (j = 0; j < 240; j++) { - Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr & IPS_BIT_GHI) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (j >= 240) - /* error occurred */ - return (0); - - ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } - - for (i = 0; i < 240; i++) { - Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); - - if ((Cbsp & IPS_BIT_OP) == 0) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) - /* error occurred */ - return (0); - - /* setup CCCR */ - writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); - - /* Enable busmastering */ - writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); - - if (ha->revision_id == IPS_REVID_TROMBONE64) - /* fix for anaconda64 */ - writel(0, ha->mem_ptr + IPS_REG_NDAE); +ips_init_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr = 0; + uint8_t Cbsp; + uint8_t PostByte[IPS_MAX_POST_BYTES]; + uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES]; + int i, j; + + METHOD_TRACE("ips_init_copperhead_memio", 1); + + for (i = 0; i < IPS_MAX_POST_BYTES; i++) { + for (j = 0; j < 45; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 45) + /* error occurred */ + return (0); + + PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + if (PostByte[0] < IPS_GOOD_POST_STATUS) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x %x).\n", + ips_name, ha->host_num, PostByte[0], PostByte[1]); + + return (0); + } + + for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { + for (j = 0; j < 240; j++) { + Isr = readb(ha->mem_ptr + IPS_REG_HISR); + if (Isr & IPS_BIT_GHI) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (j >= 240) + /* error occurred */ + return (0); + + ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + for (i = 0; i < 240; i++) { + Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); + + if ((Cbsp & IPS_BIT_OP) == 0) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) + /* error occurred */ + return (0); + + /* setup CCCR */ + writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); + + /* Enable busmastering */ + writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); + + if (ha->revision_id == IPS_REVID_TROMBONE64) + /* fix for anaconda64 */ + writel(0, ha->mem_ptr + IPS_REG_NDAE); - /* Enable interrupts */ - writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); + /* Enable interrupts */ + writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); - /* if we get here then everything went OK */ - return (1); + /* if we get here then everything went OK */ + return (1); } /****************************************************************************/ @@ -5557,109 +5065,112 @@ /* */ /****************************************************************************/ static int -ips_init_morpheus(ips_ha_t *ha) { - uint32_t Post; - uint32_t Config; - uint32_t Isr; - uint32_t Oimr; - int i; - - METHOD_TRACE("ips_init_morpheus", 1); - - /* Wait up to 45 secs for Post */ - for (i = 0; i < 45; i++) { - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Isr & IPS_BIT_I960_MSG0I) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 45) { - /* error occurred */ - printk(KERN_WARNING "(%s%d) timeout waiting for post.\n", - ips_name, ha->host_num); - - return (0); - } - - Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - - if (Post == 0x4F00) { /* If Flashing the Battery PIC */ - printk(KERN_WARNING "Flashing Battery PIC, Please wait ...\n" ); - - /* Clear the interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG0I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ - Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); - if (Post != 0x4F00) - break; - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 120) { - printk(KERN_WARNING "(%s%d) timeout waiting for Battery PIC Flash\n", - ips_name, ha->host_num); - return (0); - } - - } - - /* Clear the interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG0I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Post < (IPS_GOOD_POST_STATUS << 8)) { - printk(KERN_WARNING "(%s%d) reset controller fails (post status %x).\n", - ips_name, ha->host_num, Post); - - return (0); - } - - /* Wait up to 240 secs for config bytes */ - for (i = 0; i < 240; i++) { - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - - if (Isr & IPS_BIT_I960_MSG1I) - break; - - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - } - - if (i >= 240) { - /* error occurred */ - printk(KERN_WARNING "(%s%d) timeout waiting for config.\n", - ips_name, ha->host_num); - - return (0); - } - - Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); - - /* Clear interrupt bit */ - Isr = (uint32_t) IPS_BIT_I960_MSG1I; - writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); - - /* Turn on the interrupts */ - Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); - Oimr &= ~0x8; - writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); - - /* if we get here then everything went OK */ - - /* Since we did a RESET, an EraseStripeLock may be needed */ - if (Post == 0xEF10) { - if ( (Config == 0x000F) || (Config == 0x0009) ) - ha->requires_esl = 1; - } +ips_init_morpheus(ips_ha_t * ha) +{ + uint32_t Post; + uint32_t Config; + uint32_t Isr; + uint32_t Oimr; + int i; + + METHOD_TRACE("ips_init_morpheus", 1); + + /* Wait up to 45 secs for Post */ + for (i = 0; i < 45; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG0I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 45) { + /* error occurred */ + printk(KERN_WARNING "(%s%d) timeout waiting for post.\n", + ips_name, ha->host_num); + + return (0); + } + + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + + if (Post == 0x4F00) { /* If Flashing the Battery PIC */ + printk(KERN_WARNING "Flashing Battery PIC, Please wait ...\n"); + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ + Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); + if (Post != 0x4F00) + break; + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 120) { + printk(KERN_WARNING + "(%s%d) timeout waiting for Battery PIC Flash\n", + ips_name, ha->host_num); + return (0); + } + + } + + /* Clear the interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG0I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Post < (IPS_GOOD_POST_STATUS << 8)) { + printk(KERN_WARNING + "(%s%d) reset controller fails (post status %x).\n", + ips_name, ha->host_num, Post); + + return (0); + } + + /* Wait up to 240 secs for config bytes */ + for (i = 0; i < 240; i++) { + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + + if (Isr & IPS_BIT_I960_MSG1I) + break; + + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); + } + + if (i >= 240) { + /* error occurred */ + printk(KERN_WARNING "(%s%d) timeout waiting for config.\n", + ips_name, ha->host_num); + + return (0); + } + + Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + + /* Clear interrupt bit */ + Isr = (uint32_t) IPS_BIT_I960_MSG1I; + writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); + + /* Turn on the interrupts */ + Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); + Oimr &= ~0x8; + writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); + + /* if we get here then everything went OK */ + + /* Since we did a RESET, an EraseStripeLock may be needed */ + if (Post == 0xEF10) { + if ((Config == 0x000F) || (Config == 0x0009)) + ha->requires_esl = 1; + } - return (1); + return (1); } /****************************************************************************/ @@ -5672,38 +5183,39 @@ /* */ /****************************************************************************/ static int -ips_reset_copperhead(ips_ha_t *ha) { - int reset_counter; +ips_reset_copperhead(ips_ha_t * ha) +{ + int reset_counter; - METHOD_TRACE("ips_reset_copperhead", 1); + METHOD_TRACE("ips_reset_copperhead", 1); - DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", - ips_name, ha->host_num, ha->io_addr, ha->irq); + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", + ips_name, ha->host_num, ha->io_addr, ha->irq); - reset_counter = 0; + reset_counter = 0; - while (reset_counter < 2) { - reset_counter++; + while (reset_counter < 2) { + reset_counter++; - outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); + outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - outb(0, ha->io_addr + IPS_REG_SCPR); + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + outb(0, ha->io_addr + IPS_REG_SCPR); - return (0); - } - } + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - return (1); + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); } /****************************************************************************/ @@ -5716,38 +5228,39 @@ /* */ /****************************************************************************/ static int -ips_reset_copperhead_memio(ips_ha_t *ha) { - int reset_counter; +ips_reset_copperhead_memio(ips_ha_t * ha) +{ + int reset_counter; - METHOD_TRACE("ips_reset_copperhead_memio", 1); + METHOD_TRACE("ips_reset_copperhead_memio", 1); - DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", - ips_name, ha->host_num, ha->mem_addr, ha->irq); + DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->irq); - reset_counter = 0; + reset_counter = 0; - while (reset_counter < 2) { - reset_counter++; + while (reset_counter < 2) { + reset_counter++; - writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); + writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - writeb(0, ha->mem_ptr + IPS_REG_SCPR); + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - /* Delay for 1 Second */ - MDELAY(IPS_ONE_SEC); - - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + writeb(0, ha->mem_ptr + IPS_REG_SCPR); - return (0); - } - } + /* Delay for 1 Second */ + MDELAY(IPS_ONE_SEC); - return (1); + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { + + return (0); + } + } + + return (1); } /****************************************************************************/ @@ -5760,37 +5273,38 @@ /* */ /****************************************************************************/ static int -ips_reset_morpheus(ips_ha_t *ha) { - int reset_counter; - uint8_t junk; +ips_reset_morpheus(ips_ha_t * ha) +{ + int reset_counter; + uint8_t junk; + + METHOD_TRACE("ips_reset_morpheus", 1); - METHOD_TRACE("ips_reset_morpheus", 1); + DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", + ips_name, ha->host_num, ha->mem_addr, ha->irq); - DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", - ips_name, ha->host_num, ha->mem_addr, ha->irq); + reset_counter = 0; - reset_counter = 0; + while (reset_counter < 2) { + reset_counter++; - while (reset_counter < 2) { - reset_counter++; + writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); - writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); + /* Delay for 5 Seconds */ + MDELAY(5 * IPS_ONE_SEC); - /* Delay for 5 Seconds */ - MDELAY(5 * IPS_ONE_SEC); - - /* Do a PCI config read to wait for adapter */ - pci_read_config_byte(ha->pcidev, 4, &junk); + /* Do a PCI config read to wait for adapter */ + pci_read_config_byte(ha->pcidev, 4, &junk); - if ((*ha->func.init)(ha)) - break; - else if (reset_counter >= 2) { + if ((*ha->func.init) (ha)) + break; + else if (reset_counter >= 2) { - return (0); - } - } + return (0); + } + } - return (1); + return (1); } /****************************************************************************/ @@ -5803,22 +5317,25 @@ /* */ /****************************************************************************/ static void -ips_statinit(ips_ha_t *ha) { - uint32_t phys_status_start; +ips_statinit(ips_ha_t * ha) +{ + uint32_t phys_status_start; - METHOD_TRACE("ips_statinit", 1); + METHOD_TRACE("ips_statinit", 1); - ha->adapt->p_status_start = ha->adapt->status; - ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; - ha->adapt->p_status_tail = ha->adapt->status; - - phys_status_start = ha->adapt->hw_status_start; - outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR); - outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE), ha->io_addr + IPS_REG_SQER); - outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE), ha->io_addr + IPS_REG_SQHR); - outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR); + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR); + outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE), + ha->io_addr + IPS_REG_SQER); + outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE), + ha->io_addr + IPS_REG_SQHR); + outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR); - ha->adapt->hw_status_tail = phys_status_start; + ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ @@ -5831,22 +5348,24 @@ /* */ /****************************************************************************/ static void -ips_statinit_memio(ips_ha_t *ha) { - uint32_t phys_status_start; +ips_statinit_memio(ips_ha_t * ha) +{ + uint32_t phys_status_start; - METHOD_TRACE("ips_statinit_memio", 1); + METHOD_TRACE("ips_statinit_memio", 1); - ha->adapt->p_status_start = ha->adapt->status; - ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; - ha->adapt->p_status_tail = ha->adapt->status; - - phys_status_start = ha->adapt->hw_status_start; - writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); - writel(phys_status_start + IPS_STATUS_Q_SIZE, ha->mem_ptr + IPS_REG_SQER); - writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); - writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); + ha->adapt->p_status_start = ha->adapt->status; + ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; + ha->adapt->p_status_tail = ha->adapt->status; + + phys_status_start = ha->adapt->hw_status_start; + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); + writel(phys_status_start + IPS_STATUS_Q_SIZE, + ha->mem_ptr + IPS_REG_SQER); + writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); + writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); - ha->adapt->hw_status_tail = phys_status_start; + ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ @@ -5859,20 +5378,22 @@ /* */ /****************************************************************************/ static uint32_t -ips_statupd_copperhead(ips_ha_t *ha) { - METHOD_TRACE("ips_statupd_copperhead", 1); +ips_statupd_copperhead(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead", 1); - if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { - ha->adapt->p_status_tail++; - ha->adapt->hw_status_tail += sizeof(IPS_STATUS); - } else { - ha->adapt->p_status_tail = ha->adapt->p_status_start; - ha->adapt->hw_status_tail = ha->adapt->hw_status_start; - } + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } - outl(cpu_to_le32(ha->adapt->hw_status_tail), ha->io_addr + IPS_REG_SQTR); + outl(cpu_to_le32(ha->adapt->hw_status_tail), + ha->io_addr + IPS_REG_SQTR); - return (ha->adapt->p_status_tail->value); + return (ha->adapt->p_status_tail->value); } /****************************************************************************/ @@ -5885,20 +5406,21 @@ /* */ /****************************************************************************/ static uint32_t -ips_statupd_copperhead_memio(ips_ha_t *ha) { - METHOD_TRACE("ips_statupd_copperhead_memio", 1); +ips_statupd_copperhead_memio(ips_ha_t * ha) +{ + METHOD_TRACE("ips_statupd_copperhead_memio", 1); - if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { - ha->adapt->p_status_tail++; - ha->adapt->hw_status_tail += sizeof(IPS_STATUS); - } else { - ha->adapt->p_status_tail = ha->adapt->p_status_start; - ha->adapt->hw_status_tail = ha->adapt->hw_status_start; - } + if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { + ha->adapt->p_status_tail++; + ha->adapt->hw_status_tail += sizeof (IPS_STATUS); + } else { + ha->adapt->p_status_tail = ha->adapt->p_status_start; + ha->adapt->hw_status_tail = ha->adapt->hw_status_start; + } - writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); + writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); - return (ha->adapt->p_status_tail->value); + return (ha->adapt->p_status_tail->value); } /****************************************************************************/ @@ -5911,14 +5433,15 @@ /* */ /****************************************************************************/ static uint32_t -ips_statupd_morpheus(ips_ha_t *ha) { - uint32_t val; +ips_statupd_morpheus(ips_ha_t * ha) +{ + uint32_t val; - METHOD_TRACE("ips_statupd_morpheus", 1); + METHOD_TRACE("ips_statupd_morpheus", 1); - val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); + val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); - return (val); + return (val); } /****************************************************************************/ @@ -5931,50 +5454,49 @@ /* */ /****************************************************************************/ static int -ips_issue_copperhead(ips_ha_t *ha, ips_scb_t *scb) { - uint32_t TimeOut; - uint32_t val; - - METHOD_TRACE("ips_issue_copperhead", 1); - - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } - - TimeOut = 0; - - while ((val = le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) { - udelay(1000); - - if (++TimeOut >= IPS_SEM_TIMEOUT) { - if (!(val & IPS_BIT_START_STOP)) - break; - - printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", - ips_name, ha->host_num, val); - printk(KERN_WARNING "(%s%d) ips_issue semaphore chk timeout.\n", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } /* end if */ - } /* end while */ +ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; - outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR); - outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR); + METHOD_TRACE("ips_issue_copperhead", 1); - return (IPS_SUCCESS); + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & + IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", + ips_name, ha->host_num, val); + printk(KERN_WARNING + "(%s%d) ips_issue semaphore chk timeout.\n", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR); + outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR); + + return (IPS_SUCCESS); } /****************************************************************************/ @@ -5987,50 +5509,48 @@ /* */ /****************************************************************************/ static int -ips_issue_copperhead_memio(ips_ha_t *ha, ips_scb_t *scb) { - uint32_t TimeOut; - uint32_t val; - - METHOD_TRACE("ips_issue_copperhead_memio", 1); - - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } - - TimeOut = 0; - - while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { - udelay(1000); - - if (++TimeOut >= IPS_SEM_TIMEOUT) { - if (!(val & IPS_BIT_START_STOP)) - break; - - printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", - ips_name, ha->host_num, val); - printk(KERN_WARNING "(%s%d) ips_issue semaphore chk timeout.\n", - ips_name, ha->host_num); - - return (IPS_FAILURE); - } /* end if */ - } /* end while */ +ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb) +{ + uint32_t TimeOut; + uint32_t val; - writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); - writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); + METHOD_TRACE("ips_issue_copperhead_memio", 1); - return (IPS_SUCCESS); + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } + + TimeOut = 0; + + while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { + udelay(1000); + + if (++TimeOut >= IPS_SEM_TIMEOUT) { + if (!(val & IPS_BIT_START_STOP)) + break; + + printk(KERN_WARNING "(%s%d) ips_issue val [0x%x].\n", + ips_name, ha->host_num, val); + printk(KERN_WARNING + "(%s%d) ips_issue semaphore chk timeout.\n", + ips_name, ha->host_num); + + return (IPS_FAILURE); + } /* end if */ + } /* end while */ + + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); + writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); + + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6043,29 +5563,26 @@ /* */ /****************************************************************************/ static int -ips_issue_i2o(ips_ha_t *ha, ips_scb_t *scb) { +ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_issue_i2o", 1); + METHOD_TRACE("ips_issue_i2o", 1); - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } - outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ); + outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ); - return (IPS_SUCCESS); + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6078,29 +5595,26 @@ /* */ /****************************************************************************/ static int -ips_issue_i2o_memio(ips_ha_t *ha, ips_scb_t *scb) { +ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb) +{ - METHOD_TRACE("ips_issue_i2o_memio", 1); + METHOD_TRACE("ips_issue_i2o_memio", 1); - if (scb->scsi_cmd) { - DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", - ips_name, - ha->host_num, - scb->cdb[0], - scb->cmd.basic_io.command_id, - scb->bus, - scb->target_id, - scb->lun); - } else { - DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", - ips_name, - ha->host_num, - scb->cmd.basic_io.command_id); - } + if (scb->scsi_cmd) { + DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", + ips_name, + ha->host_num, + scb->cdb[0], + scb->cmd.basic_io.command_id, + scb->bus, scb->target_id, scb->lun); + } else { + DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", + ips_name, ha->host_num, scb->cmd.basic_io.command_id); + } - writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); + writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); - return (IPS_SUCCESS); + return (IPS_SUCCESS); } /****************************************************************************/ @@ -6113,26 +5627,27 @@ /* */ /****************************************************************************/ static int -ips_isintr_copperhead(ips_ha_t *ha) { - uint8_t Isr; +ips_isintr_copperhead(ips_ha_t * ha) +{ + uint8_t Isr; + + METHOD_TRACE("ips_isintr_copperhead", 2); - METHOD_TRACE("ips_isintr_copperhead", 2); + Isr = inb(ha->io_addr + IPS_REG_HISR); - Isr = inb(ha->io_addr + IPS_REG_HISR); + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); - if (Isr == 0xFF) - /* ?!?! Nothing really there */ - return (0); - - if (Isr & IPS_BIT_SCE) - return (1); - else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { - /* status queue overflow or GHI */ - /* just clear the interrupt */ - outb(Isr, ha->io_addr + IPS_REG_HISR); - } + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + outb(Isr, ha->io_addr + IPS_REG_HISR); + } - return (0); + return (0); } /****************************************************************************/ @@ -6145,26 +5660,27 @@ /* */ /****************************************************************************/ static int -ips_isintr_copperhead_memio(ips_ha_t *ha) { - uint8_t Isr; +ips_isintr_copperhead_memio(ips_ha_t * ha) +{ + uint8_t Isr; - METHOD_TRACE("ips_isintr_memio", 2); + METHOD_TRACE("ips_isintr_memio", 2); - Isr = readb(ha->mem_ptr + IPS_REG_HISR); + Isr = readb(ha->mem_ptr + IPS_REG_HISR); - if (Isr == 0xFF) - /* ?!?! Nothing really there */ - return (0); - - if (Isr & IPS_BIT_SCE) - return (1); - else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { - /* status queue overflow or GHI */ - /* just clear the interrupt */ - writeb(Isr, ha->mem_ptr + IPS_REG_HISR); - } + if (Isr == 0xFF) + /* ?!?! Nothing really there */ + return (0); - return (0); + if (Isr & IPS_BIT_SCE) + return (1); + else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { + /* status queue overflow or GHI */ + /* just clear the interrupt */ + writeb(Isr, ha->mem_ptr + IPS_REG_HISR); + } + + return (0); } /****************************************************************************/ @@ -6177,17 +5693,18 @@ /* */ /****************************************************************************/ static int -ips_isintr_morpheus(ips_ha_t *ha) { - uint32_t Isr; +ips_isintr_morpheus(ips_ha_t * ha) +{ + uint32_t Isr; - METHOD_TRACE("ips_isintr_morpheus", 2); + METHOD_TRACE("ips_isintr_morpheus", 2); - Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); + Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); - if (Isr & IPS_BIT_I2O_OPQI) - return (1); - else - return (0); + if (Isr & IPS_BIT_I2O_OPQI) + return (1); + else + return (0); } /****************************************************************************/ @@ -6200,51 +5717,52 @@ /* */ /****************************************************************************/ static int -ips_wait(ips_ha_t *ha, int time, int intr) { - int ret; - int done; - - METHOD_TRACE("ips_wait", 1); - - ret = IPS_FAILURE; - done = FALSE; - - time *= IPS_ONE_SEC; /* convert seconds */ - - while ((time > 0) && (!done)) { - if (intr == IPS_INTR_ON) { - if (ha->waitflag == FALSE) { - ret = IPS_SUCCESS; - done = TRUE; - break; - } - } else if (intr == IPS_INTR_IORL) { - if (ha->waitflag == FALSE) { - /* - * controller generated an interrupt to - * acknowledge completion of the command - * and ips_intr() has serviced the interrupt. - */ - ret = IPS_SUCCESS; - done = TRUE; - break; - } - - /* - * NOTE: we already have the io_request_lock so - * even if we get an interrupt it won't get serviced - * until after we finish. - */ - - (*ha->func.intr)(ha); - } - - /* This looks like a very evil loop, but it only does this during start-up */ - udelay(1000); - time--; - } +ips_wait(ips_ha_t * ha, int time, int intr) +{ + int ret; + int done; + + METHOD_TRACE("ips_wait", 1); - return (ret); + ret = IPS_FAILURE; + done = FALSE; + + time *= IPS_ONE_SEC; /* convert seconds */ + + while ((time > 0) && (!done)) { + if (intr == IPS_INTR_ON) { + if (ha->waitflag == FALSE) { + ret = IPS_SUCCESS; + done = TRUE; + break; + } + } else if (intr == IPS_INTR_IORL) { + if (ha->waitflag == FALSE) { + /* + * controller generated an interrupt to + * acknowledge completion of the command + * and ips_intr() has serviced the interrupt. + */ + ret = IPS_SUCCESS; + done = TRUE; + break; + } + + /* + * NOTE: we already have the io_request_lock so + * even if we get an interrupt it won't get serviced + * until after we finish. + */ + + (*ha->func.intr) (ha); + } + + /* This looks like a very evil loop, but it only does this during start-up */ + udelay(1000); + time--; + } + + return (ret); } /****************************************************************************/ @@ -6257,61 +5775,59 @@ /* */ /****************************************************************************/ static int -ips_write_driver_status(ips_ha_t *ha, int intr) { - METHOD_TRACE("ips_write_driver_status", 1); - - if (!ips_readwrite_page5(ha, FALSE, intr)) { - printk(KERN_WARNING "(%s%d) unable to read NVRAM page 5.\n", - ips_name, ha->host_num); - - return (0); - } - - /* check to make sure the page has a valid */ - /* signature */ - if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { - DEBUG_VAR(1, "(%s%d) NVRAM page 5 has an invalid signature: %X.", - ips_name, ha->host_num, ha->nvram->signature); - ha->nvram->signature = IPS_NVRAM_P5_SIG; - } - - DEBUG_VAR(2, "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", - ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), - ha->nvram->adapter_slot, - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2], ha->nvram->bios_low[3]); - - ips_get_bios_version(ha, intr); - - /* change values (as needed) */ - ha->nvram->operating_system = IPS_OS_LINUX; - ha->nvram->adapter_type = ha->ad_type; - strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); - strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); - strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4); - strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); - - ips_version_check(ha, intr); /* Check BIOS/FW/Driver Versions */ - - /* Save the First Copy of the Adapter Order that BIOS put in Page 5 */ - if ( (InitState == 0) && (AdapterOrder[0] == 0) ) - strncpy((char *) AdapterOrder, (char *) ha->nvram->adapter_order, sizeof(AdapterOrder) ); - - /* now update the page */ - if (!ips_readwrite_page5(ha, TRUE, intr)) { - printk(KERN_WARNING "(%s%d) unable to write NVRAM page 5.\n", - ips_name, ha->host_num); +ips_write_driver_status(ips_ha_t * ha, int intr) +{ + METHOD_TRACE("ips_write_driver_status", 1); - return (0); - } + if (!ips_readwrite_page5(ha, FALSE, intr)) { + printk(KERN_WARNING "(%s%d) unable to read NVRAM page 5.\n", + ips_name, ha->host_num); + + return (0); + } + + /* check to make sure the page has a valid */ + /* signature */ + if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { + DEBUG_VAR(1, + "(%s%d) NVRAM page 5 has an invalid signature: %X.", + ips_name, ha->host_num, ha->nvram->signature); + ha->nvram->signature = IPS_NVRAM_P5_SIG; + } + + DEBUG_VAR(2, + "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", + ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), + ha->nvram->adapter_slot, ha->nvram->bios_high[0], + ha->nvram->bios_high[1], ha->nvram->bios_high[2], + ha->nvram->bios_high[3], ha->nvram->bios_low[0], + ha->nvram->bios_low[1], ha->nvram->bios_low[2], + ha->nvram->bios_low[3]); + + ips_get_bios_version(ha, intr); + + /* change values (as needed) */ + ha->nvram->operating_system = IPS_OS_LINUX; + ha->nvram->adapter_type = ha->ad_type; + strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); + strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); + strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4); + strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); + + ips_version_check(ha, intr); /* Check BIOS/FW/Driver Versions */ + + /* now update the page */ + if (!ips_readwrite_page5(ha, TRUE, intr)) { + printk(KERN_WARNING "(%s%d) unable to write NVRAM page 5.\n", + ips_name, ha->host_num); - /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ - ha->slot_num = ha->nvram->adapter_slot; + return (0); + } + /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ + ha->slot_num = ha->nvram->adapter_slot; - return (1); + return (1); } /****************************************************************************/ @@ -6324,39 +5840,40 @@ /* */ /****************************************************************************/ static int -ips_read_adapter_status(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_read_adapter_status", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_ENQUIRY; - - scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.sg_count = 0; - scb->cmd.basic_io.lba = 0; - scb->cmd.basic_io.sector_count = 0; - scb->cmd.basic_io.log_drv = 0; - scb->cmd.basic_io.reserved = 0; - scb->data_len = sizeof(*ha->enq); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, scb->data_len, - IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_read_adapter_status(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_read_adapter_status", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; - return (1); + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_ENQUIRY; + + scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->enq); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->enq, scb->data_len, + IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); } /****************************************************************************/ @@ -6369,39 +5886,40 @@ /* */ /****************************************************************************/ static int -ips_read_subsystem_parameters(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_read_subsystem_parameters", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_GET_SUBSYS; - - scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.basic_io.sg_count = 0; - scb->cmd.basic_io.lba = 0; - scb->cmd.basic_io.sector_count = 0; - scb->cmd.basic_io.log_drv = 0; - scb->cmd.basic_io.reserved = 0; - scb->data_len = sizeof(*ha->subsys); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->subsys, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_read_subsystem_parameters(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; - return (1); + METHOD_TRACE("ips_read_subsystem_parameters", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_GET_SUBSYS; + + scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.basic_io.sg_count = 0; + scb->cmd.basic_io.lba = 0; + scb->cmd.basic_io.sector_count = 0; + scb->cmd.basic_io.log_drv = 0; + scb->data_len = sizeof (*ha->subsys); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->subsys, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + return (1); } /****************************************************************************/ @@ -6414,51 +5932,53 @@ /* */ /****************************************************************************/ static int -ips_read_config(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int i; - int ret; - - METHOD_TRACE("ips_read_config", 1); - - /* set defaults for initiator IDs */ - for (i = 0; i < 4; i++) - ha->conf->init_id[i] = 7; - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_READ_CONF; - - scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; - scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); - scb->data_len = sizeof(*ha->conf); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->conf, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.basic_io.sg_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* send command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - - memset(ha->conf, 0, sizeof(IPS_CONF)); - - /* reset initiator IDs */ - for (i = 0; i < 4; i++) - ha->conf->init_id[i] = 7; - - /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ - if ((scb->basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_CMPLT_WERROR) - return (1); - - return (0); - } +ips_read_config(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int i; + int ret; + + METHOD_TRACE("ips_read_config", 1); - return (1); + /* set defaults for initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_READ_CONF; + + scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; + scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); + scb->data_len = sizeof (*ha->conf); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->conf, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.basic_io.sg_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* send command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { + + memset(ha->conf, 0, sizeof (IPS_CONF)); + + /* reset initiator IDs */ + for (i = 0; i < 4; i++) + ha->conf->init_id[i] = 7; + + /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ + if ((scb->basic_status & IPS_GSC_STATUS_MASK) == + IPS_CMD_CMPLT_WERROR) return (1); + + return (0); + } + + return (1); } /****************************************************************************/ @@ -6471,42 +5991,44 @@ /* */ /****************************************************************************/ static int -ips_readwrite_page5(ips_ha_t *ha, int write, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_readwrite_page5", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; - - scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; - scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.nvram.page = 5; - scb->cmd.nvram.write = write; - scb->cmd.nvram.reserved = 0; - scb->cmd.nvram.reserved2 = 0; - scb->data_len = sizeof(*ha->nvram); - scb->data_busaddr = pci_map_single(ha->pcidev, ha->nvram, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.nvram.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue the command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { +ips_readwrite_page5(ips_ha_t * ha, int write, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_readwrite_page5", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; - memset(ha->nvram, 0, sizeof(IPS_NVRAM_P5)); + ips_init_scb(ha, scb); - return (0); - } + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; + + scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; + scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.nvram.page = 5; + scb->cmd.nvram.write = write; + scb->cmd.nvram.reserved = 0; + scb->cmd.nvram.reserved2 = 0; + scb->data_len = sizeof (*ha->nvram); + scb->data_busaddr = pci_map_single(ha->pcidev, ha->nvram, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.nvram.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue the command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { - return (1); + memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5)); + + return (0); + } + + return (1); } /****************************************************************************/ @@ -6519,54 +6041,57 @@ /* */ /****************************************************************************/ static int -ips_clear_adapter(ips_ha_t *ha, int intr) { - ips_scb_t *scb; - int ret; - - METHOD_TRACE("ips_clear_adapter", 1); - - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_reset_timeout; - scb->cdb[0] = IPS_CMD_CONFIG_SYNC; - - scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; - scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.config_sync.channel = 0; - scb->cmd.config_sync.source_target = IPS_POCL; - scb->cmd.config_sync.reserved = 0; - scb->cmd.config_sync.reserved2 = 0; - scb->cmd.config_sync.reserved3 = 0; - - /* issue command */ - if (((ret = ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); - - /* send unlock stripe command */ - ips_init_scb(ha, scb); - - scb->cdb[0] = IPS_CMD_ERROR_TABLE; - scb->timeout = ips_reset_timeout; - - scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; - scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.unlock_stripe.log_drv = 0; - scb->cmd.unlock_stripe.control = IPS_CSL; - scb->cmd.unlock_stripe.reserved = 0; - scb->cmd.unlock_stripe.reserved2 = 0; - scb->cmd.unlock_stripe.reserved3 = 0; - - /* issue command */ - if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || - (ret == IPS_SUCCESS_IMM) || - ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) - return (0); +ips_clear_adapter(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; + int ret; + + METHOD_TRACE("ips_clear_adapter", 1); + + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); + + scb->timeout = ips_reset_timeout; + scb->cdb[0] = IPS_CMD_CONFIG_SYNC; + + scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; + scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.config_sync.channel = 0; + scb->cmd.config_sync.source_target = IPS_POCL; + scb->cmd.config_sync.reserved = 0; + scb->cmd.config_sync.reserved2 = 0; + scb->cmd.config_sync.reserved3 = 0; + + /* issue command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_reset_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); + + /* send unlock stripe command */ + ips_init_scb(ha, scb); + + scb->cdb[0] = IPS_CMD_ERROR_TABLE; + scb->timeout = ips_reset_timeout; + + scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; + scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.unlock_stripe.log_drv = 0; + scb->cmd.unlock_stripe.control = IPS_CSL; + scb->cmd.unlock_stripe.reserved = 0; + scb->cmd.unlock_stripe.reserved2 = 0; + scb->cmd.unlock_stripe.reserved3 = 0; + + /* issue command */ + if ( + ((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == + IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) + || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) + return (0); - return (1); + return (1); } /****************************************************************************/ @@ -6579,27 +6104,28 @@ /* */ /****************************************************************************/ static void -ips_ffdc_reset(ips_ha_t *ha, int intr) { - ips_scb_t *scb; +ips_ffdc_reset(ips_ha_t * ha, int intr) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_ffdc_reset", 1); + METHOD_TRACE("ips_ffdc_reset", 1); - scb = &ha->scbs[ha->max_cmds-1]; + scb = &ha->scbs[ha->max_cmds - 1]; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FFDC; - scb->cmd.ffdc.op_code = IPS_CMD_FFDC; - scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.ffdc.reset_count = ha->reset_count; - scb->cmd.ffdc.reset_type = 0x80; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = ha->reset_count; + scb->cmd.ffdc.reset_type = 0x80; - /* convert time to what the card wants */ - ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); - /* issue command */ - ips_send_wait(ha, scb, ips_cmd_timeout, intr); + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, intr); } /****************************************************************************/ @@ -6612,30 +6138,30 @@ /* */ /****************************************************************************/ static void -ips_ffdc_time(ips_ha_t *ha) { - ips_scb_t *scb; +ips_ffdc_time(ips_ha_t * ha) +{ + ips_scb_t *scb; - METHOD_TRACE("ips_ffdc_time", 1); + METHOD_TRACE("ips_ffdc_time", 1); - DEBUG_VAR(1, "(%s%d) Sending time update.", - ips_name, ha->host_num); + DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num); - scb = &ha->scbs[ha->max_cmds-1]; + scb = &ha->scbs[ha->max_cmds - 1]; - ips_init_scb(ha, scb); + ips_init_scb(ha, scb); - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_FFDC; - scb->cmd.ffdc.op_code = IPS_CMD_FFDC; - scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.ffdc.reset_count = 0; - scb->cmd.ffdc.reset_type = 0x80; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_FFDC; + scb->cmd.ffdc.op_code = IPS_CMD_FFDC; + scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.ffdc.reset_count = 0; + scb->cmd.ffdc.reset_type = 0x80; - /* convert time to what the card wants */ - ips_fix_ffdc_time(ha, scb, ha->last_ffdc); + /* convert time to what the card wants */ + ips_fix_ffdc_time(ha, scb, ha->last_ffdc); - /* issue command */ - ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); + /* issue command */ + ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); } /****************************************************************************/ @@ -6647,57 +6173,59 @@ /* */ /****************************************************************************/ static void -ips_fix_ffdc_time(ips_ha_t *ha, ips_scb_t *scb, time_t current_time) { - long days; - long rem; - int i; - int year; - int yleap; - int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR }; - int month_lengths[12][2] = { {31, 31}, - {28, 29}, - {31, 31}, - {30, 30}, - {31, 31}, - {30, 30}, - {31, 31}, - {31, 31}, - {30, 30}, - {31, 31}, - {30, 30}, - {31, 31} }; - - METHOD_TRACE("ips_fix_ffdc_time", 1); - - days = current_time / IPS_SECS_DAY; - rem = current_time % IPS_SECS_DAY; - - scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR); - rem = rem % IPS_SECS_HOUR; - scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN); - scb->cmd.ffdc.second = (rem % IPS_SECS_MIN); - - year = IPS_EPOCH_YEAR; - while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) { - int newy; - - newy = year + (days / IPS_DAYS_NORMAL_YEAR); - if (days < 0) - --newy; - days -= (newy - year) * IPS_DAYS_NORMAL_YEAR + - IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) - - IPS_NUM_LEAP_YEARS_THROUGH(year - 1); - year = newy; - } +ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time) +{ + long days; + long rem; + int i; + int year; + int yleap; + int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR }; + int month_lengths[12][2] = { {31, 31}, + {28, 29}, + {31, 31}, + {30, 30}, + {31, 31}, + {30, 30}, + {31, 31}, + {31, 31}, + {30, 30}, + {31, 31}, + {30, 30}, + {31, 31} + }; + + METHOD_TRACE("ips_fix_ffdc_time", 1); + + days = current_time / IPS_SECS_DAY; + rem = current_time % IPS_SECS_DAY; + + scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR); + rem = rem % IPS_SECS_HOUR; + scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN); + scb->cmd.ffdc.second = (rem % IPS_SECS_MIN); + + year = IPS_EPOCH_YEAR; + while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) { + int newy; + + newy = year + (days / IPS_DAYS_NORMAL_YEAR); + if (days < 0) + --newy; + days -= (newy - year) * IPS_DAYS_NORMAL_YEAR + + IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) - + IPS_NUM_LEAP_YEARS_THROUGH(year - 1); + year = newy; + } - scb->cmd.ffdc.yearH = year / 100; - scb->cmd.ffdc.yearL = year % 100; + scb->cmd.ffdc.yearH = year / 100; + scb->cmd.ffdc.yearL = year % 100; - for (i = 0; days >= month_lengths[i][yleap]; ++i) - days -= month_lengths[i][yleap]; + for (i = 0; days >= month_lengths[i][yleap]; ++i) + days -= month_lengths[i][yleap]; - scb->cmd.ffdc.month = i + 1; - scb->cmd.ffdc.day = days + 1; + scb->cmd.ffdc.month = i + 1; + scb->cmd.ffdc.day = days + 1; } /**************************************************************************** @@ -6713,106 +6241,107 @@ /* */ /****************************************************************************/ static int -ips_erase_bios(ips_ha_t *ha) { - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_erase_bios", 1); - - status = 0; - - /* Clear the status register */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0x50, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Setup */ - outb(0x20, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Confirm */ - outb(0xD0, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Status */ - outb(0x70, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - timeout = 80000; /* 80 seconds */ - - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - /* check for timeout */ - if (timeout <= 0) { - /* timeout */ - - /* try to suspend the erase */ - outb(0xB0, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait for 10 seconds */ - timeout = 10000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0xC0) - break; - - MDELAY(1); - timeout--; - } - - return (1); - } - - /* check for valid VPP */ - if (status & 0x08) - /* VPP failure */ - return (1); - - /* check for succesful flash */ - if (status & 0x30) - /* sequence error */ - return (1); - - /* Otherwise, we were successful */ - /* clear status */ - outb(0x50, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* enable reads */ - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_erase_bios(ips_ha_t * ha) +{ + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_erase_bios", 1); + + status = 0; - return (0); + /* Clear the status register */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + outb(0x20, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + outb(0xD0, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + outb(0x70, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + outb(0xB0, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for succesful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + outb(0x50, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); } /****************************************************************************/ @@ -6824,106 +6353,107 @@ /* */ /****************************************************************************/ static int -ips_erase_bios_memio(ips_ha_t *ha) { - int timeout; - uint8_t status; - - METHOD_TRACE("ips_erase_bios_memio", 1); - - status = 0; - - /* Clear the status register */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Setup */ - writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Confirm */ - writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* Erase Status */ - writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - timeout = 80000; /* 80 seconds */ - - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - /* check for timeout */ - if (timeout <= 0) { - /* timeout */ - - /* try to suspend the erase */ - writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait for 10 seconds */ - timeout = 10000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0xC0) - break; - - MDELAY(1); - timeout--; - } - - return (1); - } - - /* check for valid VPP */ - if (status & 0x08) - /* VPP failure */ - return (1); - - /* check for succesful flash */ - if (status & 0x30) - /* sequence error */ - return (1); - - /* Otherwise, we were successful */ - /* clear status */ - writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* enable reads */ - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_erase_bios_memio(ips_ha_t * ha) +{ + int timeout; + uint8_t status; + + METHOD_TRACE("ips_erase_bios_memio", 1); + + status = 0; - return (0); + /* Clear the status register */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Setup */ + writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Confirm */ + writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* Erase Status */ + writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + timeout = 80000; /* 80 seconds */ + + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + /* check for timeout */ + if (timeout <= 0) { + /* timeout */ + + /* try to suspend the erase */ + writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait for 10 seconds */ + timeout = 10000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0xC0) + break; + + MDELAY(1); + timeout--; + } + + return (1); + } + + /* check for valid VPP */ + if (status & 0x08) + /* VPP failure */ + return (1); + + /* check for succesful flash */ + if (status & 0x30) + /* sequence error */ + return (1); + + /* Otherwise, we were successful */ + /* clear status */ + writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* enable reads */ + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (0); } /****************************************************************************/ @@ -6935,84 +6465,86 @@ /* */ /****************************************************************************/ static int -ips_program_bios(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - int i; - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_program_bios", 1); - - status = 0; - - for (i = 0; i < buffersize; i++) { - /* write a byte */ - outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0x40, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(buffer[i], ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait up to one second */ - timeout = 1000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - outl(0, ha->io_addr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = inb(ha->io_addr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - if (timeout == 0) { - /* timeout error */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - - /* check the status */ - if (status & 0x18) { - /* programming error */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - } /* end for */ - - /* Enable reading */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - outb(0xFF, ha->io_addr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0x40, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(buffer[i], ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + outl(0, ha->io_addr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = inb(ha->io_addr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + outb(0xFF, ha->io_addr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ - return (0); + return (0); } /****************************************************************************/ @@ -7024,84 +6556,86 @@ /* */ /****************************************************************************/ static int -ips_program_bios_memio(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - int i; - int timeout; - uint8_t status=0; - - METHOD_TRACE("ips_program_bios_memio", 1); - - status = 0; - - for (i = 0; i < buffersize; i++) { - /* write a byte */ - writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - /* wait up to one second */ - timeout = 1000; - while (timeout > 0) { - if (ha->revision_id == IPS_REVID_TROMBONE64) { - writel(0, ha->mem_ptr + IPS_REG_FLAP); - udelay(25); /* 25 us */ - } - - status = readb(ha->mem_ptr + IPS_REG_FLDP); - - if (status & 0x80) - break; - - MDELAY(1); - timeout--; - } - - if (timeout == 0) { - /* timeout error */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - - /* check the status */ - if (status & 0x18) { - /* programming error */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - return (1); - } - } /* end for */ - - /* Enable reading */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ +ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + int i; + int timeout; + uint8_t status = 0; + + METHOD_TRACE("ips_program_bios_memio", 1); + + status = 0; + + for (i = 0; i < buffersize; i++) { + /* write a byte */ + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + /* wait up to one second */ + timeout = 1000; + while (timeout > 0) { + if (ha->revision_id == IPS_REVID_TROMBONE64) { + writel(0, ha->mem_ptr + IPS_REG_FLAP); + udelay(25); /* 25 us */ + } + + status = readb(ha->mem_ptr + IPS_REG_FLDP); + + if (status & 0x80) + break; + + MDELAY(1); + timeout--; + } + + if (timeout == 0) { + /* timeout error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + + /* check the status */ + if (status & 0x18) { + /* programming error */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + return (1); + } + } /* end for */ + + /* Enable reading */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ - return (0); + return (0); } /****************************************************************************/ @@ -7113,42 +6647,44 @@ /* */ /****************************************************************************/ static int -ips_verify_bios(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - uint8_t checksum; - int i; - - METHOD_TRACE("ips_verify_bios", 1); - - /* test 1st byte */ - outl(0, ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) - return (1); - - outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) - return (1); - - checksum = 0xff; - for (i = 2; i < buffersize; i++) { - - outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); - } - - if (checksum != 0) - /* failure */ - return (1); - else - /* success */ - return (0); +ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios", 1); + + /* test 1st byte */ + outl(0, ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) + return (1); + + outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); } /****************************************************************************/ @@ -7160,42 +6696,45 @@ /* */ /****************************************************************************/ static int -ips_verify_bios_memio(ips_ha_t *ha, char *buffer, uint32_t buffersize, uint32_t offset) { - uint8_t checksum; - int i; - - METHOD_TRACE("ips_verify_bios_memio", 1); - - /* test 1st byte */ - writel(0, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) - return (1); - - writel(1, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) - return (1); - - checksum = 0xff; - for (i = 2; i < buffersize; i++) { - - writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); - if (ha->revision_id == IPS_REVID_TROMBONE64) - udelay(25); /* 25 us */ - - checksum = (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); - } - - if (checksum != 0) - /* failure */ - return (1); - else - /* success */ - return (0); +ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, + uint32_t offset) +{ + uint8_t checksum; + int i; + + METHOD_TRACE("ips_verify_bios_memio", 1); + + /* test 1st byte */ + writel(0, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) + return (1); + + writel(1, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) + return (1); + + checksum = 0xff; + for (i = 2; i < buffersize; i++) { + + writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); + if (ha->revision_id == IPS_REVID_TROMBONE64) + udelay(25); /* 25 us */ + + checksum = + (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); + } + + if (checksum != 0) + /* failure */ + return (1); + else + /* success */ + return (0); } /*---------------------------------------------------------------------------*/ @@ -7208,77 +6747,82 @@ /* Data is available. */ /* */ /*---------------------------------------------------------------------------*/ -static void ips_version_check(ips_ha_t *ha, int intr) { - IPS_VERSION_DATA VersionInfo; - uint8_t FirmwareVersion[ IPS_COMPAT_ID_LENGTH + 1 ]; - uint8_t BiosVersion[ IPS_COMPAT_ID_LENGTH + 1]; - int MatchError; - int rc; - char BiosString[10]; - char FirmwareString[10]; - - METHOD_TRACE("ips_version_check", 1); - - memset(FirmwareVersion, 0, IPS_COMPAT_ID_LENGTH + 1); - memset(BiosVersion, 0, IPS_COMPAT_ID_LENGTH + 1); - - /* Get the Compatible BIOS Version from NVRAM Page 5 */ - memcpy(BiosVersion, ha->nvram->BiosCompatibilityID, IPS_COMPAT_ID_LENGTH); - - rc = IPS_FAILURE; - if (ha->subsys->param[4] & IPS_GET_VERSION_SUPPORT) /* If Versioning is Supported */ - { - /* Get the Version Info with a Get Version Command */ - rc = ips_get_version_info(ha, &VersionInfo, intr); - if (rc == IPS_SUCCESS) - memcpy(FirmwareVersion, VersionInfo.compatibilityId, IPS_COMPAT_ID_LENGTH); - } - - if (rc != IPS_SUCCESS) /* If Data Not Obtainable from a GetVersion Command */ - { - /* Get the Firmware Version from Enquiry Data */ - memcpy(FirmwareVersion, ha->enq->CodeBlkVersion, IPS_COMPAT_ID_LENGTH); - } - - /* printk(KERN_WARNING "Adapter's BIOS Version = %s\n", BiosVersion); */ - /* printk(KERN_WARNING "BIOS Compatible Version = %s\n", IPS_COMPAT_BIOS); */ - /* printk(KERN_WARNING "Adapter's Firmware Version = %s\n", FirmwareVersion); */ - /* printk(KERN_WARNING "Firmware Compatible Version = %s \n", Compatable[ ha->nvram->adapter_type ]); */ - - MatchError = 0; - - if (strncmp(FirmwareVersion, Compatable[ ha->nvram->adapter_type ], IPS_COMPAT_ID_LENGTH) != 0) - MatchError = 1; - - if (strncmp(BiosVersion, IPS_COMPAT_BIOS, IPS_COMPAT_ID_LENGTH) != 0) - MatchError = 1; - - ha->nvram->versioning = 1; /* Indicate the Driver Supports Versioning */ - - if (MatchError) - { - ha->nvram->version_mismatch = 1; - if (ips_cd_boot == 0) - { - strncpy(&BiosString[0], ha->nvram->bios_high, 4); - strncpy(&BiosString[4], ha->nvram->bios_low, 4); - BiosString[8] = 0; - - strncpy(&FirmwareString[0], ha->enq->CodeBlkVersion, 8); - FirmwareString[8] = 0; - - printk(KERN_WARNING "Warning ! ! ! ServeRAID Version Mismatch\n"); - printk(KERN_WARNING "Bios = %s, Firmware = %s, Device Driver = %s%s\n", - BiosString, FirmwareString, IPS_VERSION_HIGH, IPS_VERSION_LOW ); - printk(KERN_WARNING "These levels should match to avoid possible compatibility problems.\n" ); - } - } - else - { - ha->nvram->version_mismatch = 0; - } +static void +ips_version_check(ips_ha_t * ha, int intr) +{ + IPS_VERSION_DATA VersionInfo; + uint8_t FirmwareVersion[IPS_COMPAT_ID_LENGTH + 1]; + uint8_t BiosVersion[IPS_COMPAT_ID_LENGTH + 1]; + int MatchError; + int rc; + char BiosString[10]; + char FirmwareString[10]; + + METHOD_TRACE("ips_version_check", 1); + + memset(FirmwareVersion, 0, IPS_COMPAT_ID_LENGTH + 1); + memset(BiosVersion, 0, IPS_COMPAT_ID_LENGTH + 1); + + /* Get the Compatible BIOS Version from NVRAM Page 5 */ + memcpy(BiosVersion, ha->nvram->BiosCompatibilityID, + IPS_COMPAT_ID_LENGTH); + + rc = IPS_FAILURE; + if (ha->subsys->param[4] & IPS_GET_VERSION_SUPPORT) { /* If Versioning is Supported */ + /* Get the Version Info with a Get Version Command */ + rc = ips_get_version_info(ha, &VersionInfo, intr); + if (rc == IPS_SUCCESS) + memcpy(FirmwareVersion, VersionInfo.compatibilityId, + IPS_COMPAT_ID_LENGTH); + } + + if (rc != IPS_SUCCESS) { /* If Data Not Obtainable from a GetVersion Command */ + /* Get the Firmware Version from Enquiry Data */ + memcpy(FirmwareVersion, ha->enq->CodeBlkVersion, + IPS_COMPAT_ID_LENGTH); + } + + /* printk(KERN_WARNING "Adapter's BIOS Version = %s\n", BiosVersion); */ + /* printk(KERN_WARNING "BIOS Compatible Version = %s\n", IPS_COMPAT_BIOS); */ + /* printk(KERN_WARNING "Adapter's Firmware Version = %s\n", FirmwareVersion); */ + /* printk(KERN_WARNING "Firmware Compatible Version = %s \n", Compatable[ ha->nvram->adapter_type ]); */ + + MatchError = 0; + + if (strncmp + (FirmwareVersion, Compatable[ha->nvram->adapter_type], + IPS_COMPAT_ID_LENGTH) != 0) + MatchError = 1; + + if (strncmp(BiosVersion, IPS_COMPAT_BIOS, IPS_COMPAT_ID_LENGTH) != 0) + MatchError = 1; + + ha->nvram->versioning = 1; /* Indicate the Driver Supports Versioning */ + + if (MatchError) { + ha->nvram->version_mismatch = 1; + if (ips_cd_boot == 0) { + strncpy(&BiosString[0], ha->nvram->bios_high, 4); + strncpy(&BiosString[4], ha->nvram->bios_low, 4); + BiosString[8] = 0; + + strncpy(&FirmwareString[0], ha->enq->CodeBlkVersion, 8); + FirmwareString[8] = 0; + + printk(KERN_WARNING + "Warning ! ! ! ServeRAID Version Mismatch\n"); + printk(KERN_WARNING + "Bios = %s, Firmware = %s, Device Driver = %s%s\n", + BiosString, FirmwareString, IPS_VERSION_HIGH, + IPS_VERSION_LOW); + printk(KERN_WARNING + "These levels should match to avoid possible compatibility problems.\n"); + } + } else { + ha->nvram->version_mismatch = 0; + } - return; + return; } /*---------------------------------------------------------------------------*/ @@ -7290,52 +6834,227 @@ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_get_version_info(ips_ha_t *ha, IPS_VERSION_DATA *Buffer, int intr ) { - ips_scb_t *scb; - int rc; - - METHOD_TRACE("ips_get_version_info", 1); - - memset(Buffer, 0, sizeof(IPS_VERSION_DATA)); - scb = &ha->scbs[ha->max_cmds-1]; - - ips_init_scb(ha, scb); - - scb->timeout = ips_cmd_timeout; - scb->cdb[0] = IPS_CMD_GET_VERSION_INFO; - scb->cmd.version_info.op_code = IPS_CMD_GET_VERSION_INFO; - scb->cmd.version_info.command_id = IPS_COMMAND_ID(ha, scb); - scb->cmd.version_info.reserved = 0; - scb->cmd.version_info.count = sizeof( IPS_VERSION_DATA); - scb->cmd.version_info.reserved2 = 0; - scb->data_len = sizeof(*Buffer); - scb->data_busaddr = pci_map_single(ha->pcidev, Buffer, - scb->data_len, IPS_DMA_DIR(scb)); - scb->cmd.version_info.buffer_addr = scb->data_busaddr; - scb->flags |= IPS_SCB_MAP_SINGLE; - - /* issue command */ - rc = ips_send_wait(ha, scb, ips_cmd_timeout, intr); - return( rc ); -} - - - -#if defined (MODULE) || (LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0)) -static Scsi_Host_Template driver_template = IPS; -#include "scsi_module.c" -#endif +static int +ips_get_version_info(ips_ha_t * ha, IPS_VERSION_DATA * Buffer, int intr) +{ + ips_scb_t *scb; + int rc; + + METHOD_TRACE("ips_get_version_info", 1); + + memset(Buffer, 0, sizeof (IPS_VERSION_DATA)); + scb = &ha->scbs[ha->max_cmds - 1]; + + ips_init_scb(ha, scb); -static int ips_abort_init(ips_ha_t *ha, struct Scsi_Host *sh, int index){ - ha->active = 0; - ips_free(ha); - scsi_unregister(sh); - ips_ha[index] = 0; - ips_sh[index] = 0; - return -1; + scb->timeout = ips_cmd_timeout; + scb->cdb[0] = IPS_CMD_GET_VERSION_INFO; + scb->cmd.version_info.op_code = IPS_CMD_GET_VERSION_INFO; + scb->cmd.version_info.command_id = IPS_COMMAND_ID(ha, scb); + scb->cmd.version_info.reserved = 0; + scb->cmd.version_info.count = sizeof (IPS_VERSION_DATA); + scb->cmd.version_info.reserved2 = 0; + scb->data_len = sizeof (*Buffer); + scb->data_busaddr = pci_map_single(ha->pcidev, Buffer, + scb->data_len, IPS_DMA_DIR(scb)); + scb->cmd.version_info.buffer_addr = scb->data_busaddr; + scb->flags |= IPS_SCB_MAP_SINGLE; + + /* issue command */ + rc = ips_send_wait(ha, scb, ips_cmd_timeout, intr); + return (rc); } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0) +/****************************************************************************/ +/* */ +/* Routine Name: ips_abort_init */ +/* */ +/* Routine Description: */ +/* cleanup routine for a failed adapter initialization */ +/****************************************************************************/ +static int +ips_abort_init(ips_ha_t * ha, int index) +{ + ha->active = 0; + ips_free(ha); + ips_ha[index] = 0; + ips_sh[index] = 0; + return -1; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_shift_controllers */ +/* */ +/* Routine Description: */ +/* helper function for ordering adapters */ +/****************************************************************************/ +static void +ips_shift_controllers(int lowindex, int highindex) +{ + ips_ha_t *ha_sav = ips_ha[highindex]; + struct Scsi_Host *sh_sav = ips_sh[highindex]; + int i; + + for (i = highindex; i > lowindex; i--) { + ips_ha[i] = ips_ha[i - 1]; + ips_sh[i] = ips_sh[i - 1]; + ips_ha[i]->host_num = i; + } + ha_sav->host_num = lowindex; + ips_ha[lowindex] = ha_sav; + ips_sh[lowindex] = sh_sav; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_order_controllers */ +/* */ +/* Routine Description: */ +/* place controllers is the "proper" boot order */ +/****************************************************************************/ +static void +ips_order_controllers(void) +{ + int i, j, tmp, position = 0; + IPS_NVRAM_P5 *nvram; + if (!ips_ha[0]) + return; + nvram = ips_ha[0]->nvram; + + if (nvram->adapter_order[0]) { + for (i = 1; i <= nvram->adapter_order[0]; i++) { + for (j = position; j < ips_num_controllers; j++) { + switch (ips_ha[j]->ad_type) { + case IPS_ADTYPE_SERVERAID6M: + if (nvram->adapter_order[i] == 'M') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID4L: + case IPS_ADTYPE_SERVERAID4M: + case IPS_ADTYPE_SERVERAID4MX: + case IPS_ADTYPE_SERVERAID4LX: + if (nvram->adapter_order[i] == 'N') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID6I: + case IPS_ADTYPE_SERVERAID5I2: + case IPS_ADTYPE_SERVERAID5I1: + if (nvram->adapter_order[i] == 'S') { + ips_shift_controllers(position, + j); + position++; + } + break; + case IPS_ADTYPE_SERVERAID: + case IPS_ADTYPE_SERVERAID2: + case IPS_ADTYPE_NAVAJO: + case IPS_ADTYPE_KIOWA: + case IPS_ADTYPE_SERVERAID3L: + case IPS_ADTYPE_SERVERAID3: + case IPS_ADTYPE_SERVERAID4H: + if (nvram->adapter_order[i] == 'A') { + ips_shift_controllers(position, + j); + position++; + } + break; + default: + break; + } + } + } + /* if adapter_order[0], then ordering is complete */ + return; + } + /* old bios, use older ordering */ + tmp = 0; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) { + ips_shift_controllers(position, i); + position++; + tmp = 1; + } + } + /* if there were no 5I cards, then don't do any extra ordering */ + if (!tmp) + return; + for (i = position; i < ips_num_controllers; i++) { + if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX || + ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) { + ips_shift_controllers(position, i); + position++; + } + } + + return; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_register_scsi */ +/* */ +/* Routine Description: */ +/* perform any registration and setup with the scsi layer */ +/****************************************************************************/ +static int +ips_register_scsi(int index) +{ + struct Scsi_Host *sh; + ips_ha_t *ha, *oldha; + sh = scsi_register(&ips_driver_template, sizeof (ips_ha_t)); + if (!sh) { + printk(KERN_WARNING + "Unable to register controller with SCSI subsystem\n"); + return -1; + } + oldha = ips_ha[index]; + ha = IPS_HA(sh); + memcpy(ha, oldha, sizeof (ips_ha_t)); + free_irq(oldha->irq, oldha); + /* Install the interrupt handler with the new ha */ + if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { + printk(KERN_WARNING "Unable to install interrupt handler\n"); + scsi_unregister(sh); + return -1; + } + + kfree(oldha); + ips_sh[index] = sh; + ips_ha[index] = ha; + scsi_set_pci_device(sh, ha->pcidev); + + /* Store away needed values for later use */ + sh->io_port = ha->io_addr; + sh->n_io_port = ha->io_addr ? 255 : 0; + sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; + sh->irq = ha->irq; + sh->sg_tablesize = sh->hostt->sg_tablesize; + sh->can_queue = sh->hostt->can_queue; + sh->cmd_per_lun = sh->hostt->cmd_per_lun; + sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; + sh->use_clustering = sh->hostt->use_clustering; + +#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,7) + sh->max_sectors = 128; +#endif + + sh->max_id = ha->ntargets; + sh->max_lun = ha->nlun; + sh->max_channel = ha->nbus - 1; + sh->can_queue = ha->max_cmds - 1; + + return 0; +} /*---------------------------------------------------------------------------*/ /* Routine Name: ips_remove_device */ @@ -7343,24 +7062,64 @@ /* Routine Description: */ /* Remove one Adapter ( Hot Plugging ) */ /*---------------------------------------------------------------------------*/ -static void ips_remove_device(struct pci_dev *pci_dev) +static void +ips_remove_device(struct pci_dev *pci_dev) { - int i; - struct Scsi_Host *sh; - ips_ha_t *ha; - - for (i = 0; i < IPS_MAX_ADAPTERS; i++) { - ha = ips_ha[i]; - if (ha) { - if ( (pci_dev->bus->number == ha->pcidev->bus->number) && - (pci_dev->devfn == ha->pcidev->devfn)) { - sh = ips_sh[i]; - ips_release(sh); - } - } - } + int i; + struct Scsi_Host *sh; + ips_ha_t *ha; + + for (i = 0; i < IPS_MAX_ADAPTERS; i++) { + ha = ips_ha[i]; + if (ha) { + if ((pci_dev->bus->number == ha->pcidev->bus->number) && + (pci_dev->devfn == ha->pcidev->devfn)) { + sh = ips_sh[i]; + ips_release(sh); + } + } + } } +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_init */ +/* */ +/* Routine Description: */ +/* function called on module load */ +/****************************************************************************/ +static int __init +ips_module_init(void) +{ + if (pci_module_init(&ips_pci_driver) < 0) + return -ENODEV; + ips_driver_template.module = THIS_MODULE; + ips_order_controllers(); + if (scsi_register_module(MODULE_SCSI_HA, &ips_driver_template)) { + pci_unregister_driver(&ips_pci_driver); + return -ENODEV; + } + register_reboot_notifier(&ips_notifier); + return 0; +} + +/****************************************************************************/ +/* */ +/* Routine Name: ips_module_exit */ +/* */ +/* Routine Description: */ +/* function called on module unload */ +/****************************************************************************/ +static void __exit +ips_module_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &ips_driver_template); + pci_unregister_driver(&ips_pci_driver); + unregister_reboot_notifier(&ips_notifier); +} + +module_init(ips_module_init); +module_exit(ips_module_exit); /*---------------------------------------------------------------------------*/ /* Routine Name: ips_insert_device */ @@ -7371,40 +7130,28 @@ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) +static int __devinit +ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) { - int index; - int rc; + int index; + int rc; - METHOD_TRACE("ips_insert_device", 1); + METHOD_TRACE("ips_insert_device", 1); - /* If we're still in Init State 0, and we've already found the Adapter */ - /* Ordering Table, there is no reason to continue. */ - if ( (InitState == 0) && (AdapterOrder[0]) ) - return -1; - - if (pci_enable_device(pci_dev)) + if (pci_enable_device(pci_dev)) return -1; - rc = ips_init_phase1(pci_dev, &index); - if (rc == SUCCESS) - rc = ips_init_phase2(index); - - /* If we're in Init State 0, we're done with the device for now. */ - /* Release the device and don't count it. */ - if ( InitState == 0 ) { - ips_remove_device(pci_dev); - return -1; - } + rc = ips_init_phase1(pci_dev, &index); + if (rc == SUCCESS) + rc = ips_init_phase2(index); - if (rc == SUCCESS) - ips_num_controllers++; + if (rc == SUCCESS) + ips_num_controllers++; - ips_next_controller = ips_num_controllers; - return rc; + ips_next_controller = ips_num_controllers; + return rc; } - /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase1 */ /* */ @@ -7414,248 +7161,232 @@ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr ) -{ - struct Scsi_Host *sh; - ips_ha_t *ha; - uint32_t io_addr; - uint32_t mem_addr; - uint32_t io_len; - uint32_t mem_len; - uint8_t revision_id; - uint8_t bus; - uint8_t func; - uint8_t irq; - uint16_t subdevice_id; - int j; - int index; - uint32_t count; - dma_addr_t dma_address; - char *ioremap_ptr; - char *mem_ptr; - uint32_t IsDead - - METHOD_TRACE("ips_init_phase1", 1); - index = IPS_MAX_ADAPTERS; - for (j = 0; j < IPS_MAX_ADAPTERS; j++) { - if (ips_ha[j] ==0) { - index = j; - break; - } - } - - if (index >= IPS_MAX_ADAPTERS) - return -1; - - /* stuff that we get in dev */ - irq = pci_dev->irq; - bus = pci_dev->bus->number; - func = pci_dev->devfn; - - /* Init MEM/IO addresses to 0 */ - mem_addr = 0; - io_addr = 0; - mem_len = 0; - io_len = 0; - - for (j = 0; j < 2; j++) { - if (!pci_resource_start(pci_dev, j)) - break; - - if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { - io_addr = pci_resource_start(pci_dev, j); - io_len = pci_resource_len(pci_dev, j); - } else { - mem_addr = pci_resource_start(pci_dev, j); - mem_len = pci_resource_len(pci_dev, j); - } - } - - /* setup memory mapped area (if applicable) */ - if (mem_addr) { - uint32_t base; - uint32_t offs; - - if (check_mem_region(mem_addr, mem_len)) { - printk(KERN_WARNING "Couldn't allocate IO Memory space %x len %d.\n", mem_addr, mem_len); - return -1; - } - - request_mem_region(mem_addr, mem_len, "ips"); - base = mem_addr & PAGE_MASK; - offs = mem_addr - base; - ioremap_ptr = ioremap(base, PAGE_SIZE); - mem_ptr = ioremap_ptr + offs; - } else { - ioremap_ptr = NULL; - mem_ptr = NULL; - } - - /* setup I/O mapped area (if applicable) */ - if (io_addr) { - if (check_region(io_addr, io_len)) { - printk(KERN_WARNING "Couldn't allocate IO space %x len %d.\n", io_addr, io_len); - return -1; - } - request_region(io_addr, io_len, "ips"); - } - - /* get the revision ID */ - if (pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id)) { - printk(KERN_WARNING "Can't get revision id.\n" ); - return -1; - } - - subdevice_id = pci_dev->subsystem_device; - - /* found a controller */ - sh = scsi_register(&driver_template, sizeof(ips_ha_t)); -#if LINUX_VERSION_CODE > LinuxVersionCode(2,5,0) - pci_set_dma_mask(pci_dev, (u64)0xffffffff); - scsi_set_pci_device(sh, pci_dev); -#endif - if (sh == NULL) { - printk(KERN_WARNING "Unable to register controller with SCSI subsystem\n" ); - return -1; - } - - ha = IPS_HA(sh); - memset(ha, 0, sizeof(ips_ha_t)); - - ips_sh[index] = sh; - ips_ha[index] = ha; - ha->active = 1; - - ha->enq = kmalloc(sizeof(IPS_ENQ), GFP_KERNEL); - - if (!ha->enq) { - printk(KERN_WARNING "Unable to allocate host inquiry structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->adapt = pci_alloc_consistent(pci_dev, sizeof(IPS_ADAPTER) + - sizeof(IPS_IO_CMD), &dma_address); - if (!ha->adapt) { - printk(KERN_WARNING "Unable to allocate host adapt & dummy structures\n"); - return ips_abort_init(ha, sh, index); - } - ha->adapt->hw_status_start = dma_address; - ha->dummy = (void *)(ha->adapt + 1); - - ha->conf = kmalloc(sizeof(IPS_CONF), GFP_KERNEL); - - if (!ha->conf) { - printk(KERN_WARNING "Unable to allocate host conf structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->nvram = kmalloc(sizeof(IPS_NVRAM_P5), GFP_KERNEL); - - if (!ha->nvram) { - printk(KERN_WARNING "Unable to allocate host NVRAM structure\n" ); - return ips_abort_init(ha, sh, index); - } - - ha->subsys = kmalloc(sizeof(IPS_SUBSYS), GFP_KERNEL); - - if (!ha->subsys) { - printk(KERN_WARNING "Unable to allocate host subsystem structure\n" ); - return ips_abort_init(ha, sh, index); - } - - for (count = PAGE_SIZE, ha->ioctl_order = 0; - count < ips_ioctlsize; - ha->ioctl_order++, count <<= 1); - - ha->ioctl_data = (char *) __get_free_pages(GFP_KERNEL, ha->ioctl_order); - ha->ioctl_datasize = count; - - if (!ha->ioctl_data) { - printk(KERN_WARNING "Unable to allocate IOCTL data\n" ); - ha->ioctl_data = NULL; - ha->ioctl_order = 0; - ha->ioctl_datasize = 0; - } - - /* Store away needed values for later use */ - sh->io_port = io_addr; - sh->n_io_port = io_addr ? 255 : 0; - sh->unique_id = (io_addr) ? io_addr : mem_addr; - sh->irq = irq; - sh->select_queue_depths = ips_select_queue_depth; - sh->sg_tablesize = sh->hostt->sg_tablesize; - sh->can_queue = sh->hostt->can_queue; - sh->cmd_per_lun = sh->hostt->cmd_per_lun; - sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; - sh->use_clustering = sh->hostt->use_clustering; +static int +ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr) +{ + ips_ha_t *ha; + uint32_t io_addr; + uint32_t mem_addr; + uint32_t io_len; + uint32_t mem_len; + uint8_t revision_id; + uint8_t bus; + uint8_t func; + uint8_t irq; + uint16_t subdevice_id; + int j; + int index; + uint32_t count; + dma_addr_t dma_address; + char *ioremap_ptr; + char *mem_ptr; + uint32_t IsDead; + + METHOD_TRACE("ips_init_phase1", 1); + index = IPS_MAX_ADAPTERS; + for (j = 0; j < IPS_MAX_ADAPTERS; j++) { + if (ips_ha[j] == 0) { + index = j; + break; + } + } -#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,7) - sh->max_sectors = 128; -#endif + if (index >= IPS_MAX_ADAPTERS) + return -1; - /* Store info in HA structure */ - ha->irq = irq; - ha->io_addr = io_addr; - ha->io_len = io_len; - ha->mem_addr = mem_addr; - ha->mem_len = mem_len; - ha->mem_ptr = mem_ptr; - ha->ioremap_ptr = ioremap_ptr; - ha->host_num = ( uint32_t) index; - ha->revision_id = revision_id; - ha->slot_num = PCI_SLOT(pci_dev->devfn); - ha->device_id = pci_dev->device; - ha->subdevice_id = subdevice_id; - ha->pcidev = pci_dev; - - /* - * Setup Functions - */ - ips_setup_funclist(ha); - - if ( ( IPS_IS_MORPHEUS( ha ) ) || ( IPS_IS_MARCO( ha ) ) ) { - /* If Morpheus appears dead, reset it */ - IsDead = readl( ha->mem_ptr + IPS_REG_I960_MSG1 ); - if ( IsDead == 0xDEADBEEF ) { - ips_reset_morpheus( ha ); - } - } - - /* - * Initialize the card if it isn't already - */ - - if (!(*ha->func.isinit)(ha)) { - if (!(*ha->func.init)(ha)) { - /* - * Initialization failed - */ - printk(KERN_WARNING "Unable to initialize controller\n" ); - return ips_abort_init(ha, sh, index); - } - } - - /* Install the interrupt handler */ - if (request_irq(irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { - printk(KERN_WARNING "Unable to install interrupt handler\n" ); - return ips_abort_init(ha, sh, index); - } - - /* - * Allocate a temporary SCB for initialization - */ - ha->max_cmds = 1; - if (!ips_allocatescbs(ha)) { - printk(KERN_WARNING "Unable to allocate a CCB\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } + /* stuff that we get in dev */ + irq = pci_dev->irq; + bus = pci_dev->bus->number; + func = pci_dev->devfn; + + /* Init MEM/IO addresses to 0 */ + mem_addr = 0; + io_addr = 0; + mem_len = 0; + io_len = 0; + + for (j = 0; j < 2; j++) { + if (!pci_resource_start(pci_dev, j)) + break; + + if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { + io_addr = pci_resource_start(pci_dev, j); + io_len = pci_resource_len(pci_dev, j); + } else { + mem_addr = pci_resource_start(pci_dev, j); + mem_len = pci_resource_len(pci_dev, j); + } + } + + /* setup memory mapped area (if applicable) */ + if (mem_addr) { + uint32_t base; + uint32_t offs; + + if (!request_mem_region(mem_addr, mem_len, "ips")) { + printk(KERN_WARNING + "Couldn't allocate IO Memory space %x len %d.\n", + mem_addr, mem_len); + return -1; + } + + base = mem_addr & PAGE_MASK; + offs = mem_addr - base; + ioremap_ptr = ioremap(base, PAGE_SIZE); + mem_ptr = ioremap_ptr + offs; + } else { + ioremap_ptr = NULL; + mem_ptr = NULL; + } + + /* setup I/O mapped area (if applicable) */ + if (io_addr) { + if (!request_region(io_addr, io_len, "ips")) { + printk(KERN_WARNING + "Couldn't allocate IO space %x len %d.\n", + io_addr, io_len); + return -1; + } + } + + /* get the revision ID */ + if (pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id)) { + printk(KERN_WARNING "Can't get revision id.\n"); + return -1; + } - *indexPtr = index; - return SUCCESS; -} + subdevice_id = pci_dev->subsystem_device; -#endif + /* found a controller */ + ha = kmalloc(sizeof (ips_ha_t), GFP_KERNEL); + if (ha == NULL) { + printk(KERN_WARNING "Unable to allocate temporary ha struct\n"); + return -1; + } + + memset(ha, 0, sizeof (ips_ha_t)); + + ips_sh[index] = NULL; + ips_ha[index] = ha; + ha->active = 1; + + /* Store info in HA structure */ + ha->irq = irq; + ha->io_addr = io_addr; + ha->io_len = io_len; + ha->mem_addr = mem_addr; + ha->mem_len = mem_len; + ha->mem_ptr = mem_ptr; + ha->ioremap_ptr = ioremap_ptr; + ha->host_num = (uint32_t) index; + ha->revision_id = revision_id; + ha->slot_num = PCI_SLOT(pci_dev->devfn); + ha->device_id = pci_dev->device; + ha->subdevice_id = subdevice_id; + ha->pcidev = pci_dev; + + /* + * Set the pci_dev's dma_mask. Not all adapters support 64bit + * addressing so don't enable it if the adapter can't support + * it! Also, don't use 64bit addressing if dma addresses + * are guaranteed to be < 4G. + */ + if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && + !pci_set_dma_mask(ha->pcidev, (u64) 0xffffffffffffffff)) { + (ha)->flags |= IPS_HA_ENH_SG; + } else { + if (pci_set_dma_mask(ha->pcidev, (u64) 0xffffffff) != 0) { + printk(KERN_WARNING "Unable to set DMA Mask\n"); + return ips_abort_init(ha, index); + } + } + + ha->enq = kmalloc(sizeof (IPS_ENQ), IPS_INIT_GFP); + + if (!ha->enq) { + printk(KERN_WARNING + "Unable to allocate host inquiry structure\n"); + return ips_abort_init(ha, index); + } + + ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) + + sizeof (IPS_IO_CMD), &dma_address); + if (!ha->adapt) { + printk(KERN_WARNING + "Unable to allocate host adapt & dummy structures\n"); + return ips_abort_init(ha, index); + } + ha->adapt->hw_status_start = dma_address; + ha->dummy = (void *) (ha->adapt + 1); + + ha->conf = kmalloc(sizeof (IPS_CONF), IPS_INIT_GFP); + + if (!ha->conf) { + printk(KERN_WARNING "Unable to allocate host conf structure\n"); + return ips_abort_init(ha, index); + } + + ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), IPS_INIT_GFP); + + if (!ha->nvram) { + printk(KERN_WARNING + "Unable to allocate host NVRAM structure\n"); + return ips_abort_init(ha, index); + } + + ha->subsys = kmalloc(sizeof (IPS_SUBSYS), IPS_INIT_GFP); + + if (!ha->subsys) { + printk(KERN_WARNING + "Unable to allocate host subsystem structure\n"); + return ips_abort_init(ha, index); + } + + for (count = PAGE_SIZE, ha->ioctl_order = 0; + count < ips_ioctlsize; ha->ioctl_order++, count <<= 1) ; + + ha->ioctl_data = + (char *) __get_free_pages(IPS_INIT_GFP, ha->ioctl_order); + ha->ioctl_datasize = count; + + if (!ha->ioctl_data) { + printk(KERN_WARNING "Unable to allocate IOCTL data\n"); + ha->ioctl_data = NULL; + ha->ioctl_order = 0; + ha->ioctl_datasize = 0; + } + + /* + * Setup Functions + */ + ips_setup_funclist(ha); + + if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) { + /* If Morpheus appears dead, reset it */ + IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1); + if (IsDead == 0xDEADBEEF) { + ips_reset_morpheus(ha); + } + } + + /* + * Initialize the card if it isn't already + */ + + if (!(*ha->func.isinit) (ha)) { + if (!(*ha->func.init) (ha)) { + /* + * Initialization failed + */ + printk(KERN_WARNING + "Unable to initialize controller\n"); + return ips_abort_init(ha, index); + } + } + + *indexPtr = index; + return SUCCESS; +} /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase2 */ @@ -7666,46 +7397,52 @@ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ -static int ips_init_phase2( int index ) -{ - struct Scsi_Host *sh; - ips_ha_t *ha; - - ha = ips_ha[index]; - sh = ips_sh[index]; - - METHOD_TRACE("ips_init_phase2", 1); - if (!ha->active) { - scsi_unregister(sh); - ips_ha[index] = NULL; - ips_sh[index] = NULL; - return -1;; - } - - if (!ips_hainit(ha)) { - printk(KERN_WARNING "Unable to initialize controller\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } - /* Free the temporary SCB */ - ips_deallocatescbs(ha, 1); - - /* allocate CCBs */ - if (!ips_allocatescbs(ha)) { - printk(KERN_WARNING "Unable to allocate CCBs\n" ); - free_irq(ha->irq, ha); - return ips_abort_init(ha, sh, index); - } - - /* finish setting values */ - sh->max_id = ha->ntargets; - sh->max_lun = ha->nlun; - sh->max_channel = ha->nbus - 1; - sh->can_queue = ha->max_cmds-1; +static int +ips_init_phase2(int index) +{ + ips_ha_t *ha; - return SUCCESS; -} + ha = ips_ha[index]; + + METHOD_TRACE("ips_init_phase2", 1); + if (!ha->active) { + ips_ha[index] = NULL; + return -1; + } + + /* Install the interrupt handler */ + if (request_irq(ha->irq, do_ipsintr, SA_SHIRQ, ips_name, ha)) { + printk(KERN_WARNING "Unable to install interrupt handler\n"); + return ips_abort_init(ha, index); + } + + /* + * Allocate a temporary SCB for initialization + */ + ha->max_cmds = 1; + if (!ips_allocatescbs(ha)) { + printk(KERN_WARNING "Unable to allocate a CCB\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + + if (!ips_hainit(ha)) { + printk(KERN_WARNING "Unable to initialize controller\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + /* Free the temporary SCB */ + ips_deallocatescbs(ha, 1); + + /* allocate CCBs */ + if (!ips_allocatescbs(ha)) { + printk(KERN_WARNING "Unable to allocate CCBs\n"); + free_irq(ha->irq, ha); + return ips_abort_init(ha, index); + } + return SUCCESS; +} #if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,9) MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/ips.h linux.22-ac2/drivers/scsi/ips.h --- linux.vanilla/drivers/scsi/ips.h 2003-06-14 00:11:36.000000000 +0100 +++ linux.22-ac2/drivers/scsi/ips.h 2003-09-01 13:54:30.000000000 +0100 @@ -69,6 +69,13 @@ #define LinuxVersionCode(x,y,z) (((x)<<16)+((y)<<8)+(z)) #endif + #if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,20) || defined CONFIG_HIGHIO + #define IPS_HIGHIO + #define IPS_HIGHMEM_IO .highmem_io = 1, + #else + #define IPS_HIGHMEM_IO + #endif + #define IPS_HA(x) ((ips_ha_t *) x->hostdata) #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ @@ -86,10 +93,42 @@ ((IPS_IS_TROMBONE(ha) || IPS_IS_CLARINET(ha)) && \ (ips_force_memio))) ? 1 : 0) + #define IPS_HAS_ENH_SGLIST(ha) (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) + #define IPS_USE_ENH_SGLIST(ha) ((ha)->flags & IPS_HA_ENH_SG) + #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \ + sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST)) + + #if LINUX_VERSION_CODE < LinuxVersionCode(2,4,4) + #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 ) + #define scsi_set_pci_device(sh,dev) (0) + #endif + #ifndef MDELAY #define MDELAY mdelay #endif - + + #ifndef min + #define min(x,y) ((x) < (y) ? x : y) + #endif + + #define pci_dma_lo32(a) (a & 0xffffffff) + + #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO) + #define IPS_ENABLE_DMA64 (1) + #define pci_dma_hi32(a) (a >> 32) + #else + #define IPS_ENABLE_DMA64 (0) + #define pci_dma_hi32(a) (0) + #endif + + #if defined(__ia64__) + #define IPS_ATOMIC_GFP (GFP_DMA | GFP_ATOMIC) + #define IPS_INIT_GFP GFP_DMA + #else + #define IPS_ATOMIC_GFP GFP_ATOMIC + #define IPS_INIT_GFP GFP_KERNEL + #endif + /* * Adapter address map equates */ @@ -354,6 +393,12 @@ #define IPS_SCSI_MP3_AllocateSurface 0x08 /* + * HA Flags + */ + + #define IPS_HA_ENH_SG 0x1 + + /* * SCB Flags */ #define IPS_SCB_MAP_SG 0x00008 @@ -390,93 +435,45 @@ /* * Scsi_Host Template */ -#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0) - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - proc_dir : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - use_new_eh_code : 1 \ -} -#elif LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - use_new_eh_code : 1 \ +#if LINUX_VERSION_CODE < LinuxVersionCode(2,5,0) + static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *); +#define IPS { \ + .detect = ips_detect, \ + .release = ips_release, \ + .info = ips_info, \ + .queuecommand = ips_queue, \ + .eh_abort_handler = ips_eh_abort, \ + .eh_host_reset_handler = ips_eh_reset, \ + .bios_param = ips_biosparam,\ + .select_queue_depths = ips_select_queue_depth, \ + .can_queue = 0, \ + .this_id = -1, \ + .sg_tablesize = IPS_MAX_SG, \ + .cmd_per_lun = 16, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING,\ + .use_new_eh_code = 1, \ + IPS_HIGHMEM_IO \ } #else - #define IPS { \ - next : NULL, \ - module : NULL, \ - proc_info : NULL, \ - name : NULL, \ - detect : ips_detect, \ - release : ips_release, \ - info : ips_info, \ - command : NULL, \ - queuecommand : ips_queue, \ - eh_strategy_handler : NULL, \ - eh_abort_handler : ips_eh_abort, \ - eh_device_reset_handler : NULL, \ - eh_bus_reset_handler : NULL, \ - eh_host_reset_handler : ips_eh_reset, \ - abort : NULL, \ - reset : NULL, \ - slave_attach : NULL, \ - bios_param : ips_biosparam, \ - can_queue : 0, \ - this_id: -1, \ - sg_tablesize : IPS_MAX_SG, \ - cmd_per_lun: 16, \ - present : 0, \ - unchecked_isa_dma : 0, \ - use_clustering : ENABLE_CLUSTERING, \ - highmem_io : 1 \ +#define IPS { \ + .detect = ips_detect, \ + .release = ips_release, \ + .info = ips_info, \ + .queuecommand = ips_queue, \ + .eh_abort_handler = ips_eh_abort, \ + .eh_host_reset_handler = ips_eh_reset, \ + .slave_configure = ips_slave_configure, \ + .bios_param = ips_biosparam, \ + .can_queue = 0, \ + .this_id = -1, \ + .sg_tablesize = IPS_MAX_SG, \ + .cmd_per_lun = 3, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING, \ + .highmem_io = 1 \ } #endif @@ -491,7 +488,8 @@ uint32_t lba; uint32_t sg_addr; uint16_t sector_count; - uint16_t reserved; + uint8_t segment_4G; + uint8_t enhanced_sg; uint32_t ccsar; uint32_t cccr; } IPS_IO_CMD, *PIPS_IO_CMD; @@ -542,7 +540,9 @@ uint16_t reserved; uint32_t reserved2; uint32_t dcdb_address; - uint32_t reserved3; + uint16_t reserved3; + uint8_t segment_4G; + uint8_t enhanced_sg; uint32_t ccsar; uint32_t cccr; } IPS_DCDB_CMD, *PIPS_DCDB_CMD; @@ -986,7 +986,20 @@ typedef struct ips_sglist { uint32_t address; uint32_t length; -} IPS_SG_LIST, *PIPS_SG_LIST; +} IPS_STD_SG_LIST; + +typedef struct ips_enh_sglist { + uint32_t address_lo; + uint32_t address_hi; + uint32_t length; + uint32_t reserved; +} IPS_ENH_SG_LIST; + +typedef union { + void *list; + IPS_STD_SG_LIST *std_list; + IPS_ENH_SG_LIST *enh_list; +} IPS_SG_LIST; typedef struct _IPS_INFOSTR { char *buffer; @@ -1086,6 +1099,7 @@ char *ioctl_data; /* IOCTL data area */ uint32_t ioctl_datasize; /* IOCTL data size */ uint32_t cmd_in_progress; /* Current command in progress*/ + int flags; /* */ uint8_t waitflag; /* are we waiting for cmd */ uint8_t active; int ioctl_reset; /* IOCTL Requested Reset Flag */ @@ -1133,7 +1147,7 @@ uint32_t sg_len; uint32_t flags; uint32_t op_code; - IPS_SG_LIST *sg_list; + IPS_SG_LIST sg_list; Scsi_Cmnd *scsi_cmd; struct ips_scb *q_next; ips_scb_callback callback; @@ -1194,11 +1208,13 @@ #define IPS_VER_MAJOR 6 #define IPS_VER_MAJOR_STRING "6" -#define IPS_VER_MINOR 00 -#define IPS_VER_MINOR_STRING "00" -#define IPS_VER_BUILD 26 -#define IPS_VER_BUILD_STRING "26" -#define IPS_VER_STRING "6.00.26" +#define IPS_VER_MINOR 10 +#define IPS_VER_MINOR_STRING "10" +#define IPS_VER_BUILD 24 +#define IPS_VER_BUILD_STRING "24" +#define IPS_VER_STRING "6.10.24" +#define IPS_RELEASE_ID 0x00010000 +#define IPS_BUILD_IDENT 1250 #define IPS_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2003. All Rights Reserved." #define IPS_ADAPTECCOPYRIGHT_STRING "(c) Copyright Adaptec, Inc. 2002 to present. All Rights Reserved." #define IPS_NT_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2003." @@ -1207,31 +1223,33 @@ #define IPS_VER_SERVERAID1 "2.25.01" #define IPS_VER_SERVERAID2 "2.88.13" #define IPS_VER_NAVAJO "2.88.13" -#define IPS_VER_SERVERAID3 "6.00.26" -#define IPS_VER_SERVERAID4H "6.00.26" -#define IPS_VER_SERVERAID4MLx "6.00.26" -#define IPS_VER_SARASOTA "6.00.26" -#define IPS_VER_MARCO "6.00.26" +#define IPS_VER_SERVERAID3 "6.10.24" +#define IPS_VER_SERVERAID4H "6.10.24" +#define IPS_VER_SERVERAID4MLx "6.10.24" +#define IPS_VER_SARASOTA "6.10.24" +#define IPS_VER_MARCO "6.10.24" +#define IPS_VER_SEBRING "6.10.24" /* Compatability IDs for various adapters */ #define IPS_COMPAT_UNKNOWN "" -#define IPS_COMPAT_CURRENT "MR600" +#define IPS_COMPAT_CURRENT "SB610" #define IPS_COMPAT_SERVERAID1 "2.25.01" #define IPS_COMPAT_SERVERAID2 "2.88.13" #define IPS_COMPAT_NAVAJO "2.88.13" #define IPS_COMPAT_KIOWA "2.88.13" -#define IPS_COMPAT_SERVERAID3H "MR600" -#define IPS_COMPAT_SERVERAID3L "MR600" -#define IPS_COMPAT_SERVERAID4H "MR600" -#define IPS_COMPAT_SERVERAID4M "MR600" -#define IPS_COMPAT_SERVERAID4L "MR600" -#define IPS_COMPAT_SERVERAID4Mx "MR600" -#define IPS_COMPAT_SERVERAID4Lx "MR600" -#define IPS_COMPAT_SARASOTA "MR600" -#define IPS_COMPAT_MARCO "MR600" -#define IPS_COMPAT_BIOS "MR600" +#define IPS_COMPAT_SERVERAID3H "SB610" +#define IPS_COMPAT_SERVERAID3L "SB610" +#define IPS_COMPAT_SERVERAID4H "SB610" +#define IPS_COMPAT_SERVERAID4M "SB610" +#define IPS_COMPAT_SERVERAID4L "SB610" +#define IPS_COMPAT_SERVERAID4Mx "SB610" +#define IPS_COMPAT_SERVERAID4Lx "SB610" +#define IPS_COMPAT_SARASOTA "SB610" +#define IPS_COMPAT_MARCO "SB610" +#define IPS_COMPAT_SEBRING "SB610" +#define IPS_COMPAT_BIOS "SB610" -#define IPS_COMPAT_MAX_ADAPTER_TYPE 15 +#define IPS_COMPAT_MAX_ADAPTER_TYPE 16 #define IPS_COMPAT_ID_LENGTH 8 #define IPS_DEFINE_COMPAT_TABLE(tablename) \ @@ -1250,7 +1268,8 @@ IPS_COMPAT_SERVERAID4Lx, \ IPS_COMPAT_SARASOTA, /* one-channel variety of SARASOTA */ \ IPS_COMPAT_SARASOTA, /* two-channel variety of SARASOTA */ \ - IPS_COMPAT_MARCO \ + IPS_COMPAT_MARCO, \ + IPS_COMPAT_SEBRING \ } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/libata-core.c linux.22-ac2/drivers/scsi/libata-core.c --- linux.vanilla/drivers/scsi/libata-core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/libata-core.c 2003-09-01 13:25:08.000000000 +0100 @@ -0,0 +1,3128 @@ +/* + libata-core.c - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include +#include +#include + +#include "libata.h" + +static void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, + unsigned int done_late); +static void atapi_cdb_send(struct ata_port *ap); +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout); +static u8 __ata_dev_select (struct ata_port *ap, unsigned int device); +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append); +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late); + +static unsigned int ata_unique_id = 1; +static LIST_HEAD(ata_probe_list); +static spinlock_t ata_module_lock = SPIN_LOCK_UNLOCKED; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Library module for ATA devices"); +MODULE_LICENSE("GPL"); + +static const char * thr_state_name[] = { + "THR_UNKNOWN", + "THR_CHECKPORT", + "THR_BUS_RESET", + "THR_AWAIT_DEATH", + "THR_IDENTIFY", + "THR_CONFIG_TIMINGS", + "THR_CONFIG_DMA", + "THR_PROBE_FAILED", + "THR_IDLE", + "THR_PROBE_SUCCESS", + "THR_PROBE_START", + "THR_CONFIG_FORCE_PIO", + "THR_PIO_POLL", + "THR_PIO_TMOUT", + "THR_PIO", + "THR_PIO_LAST", + "THR_PIO_LAST_POLL", + "THR_PIO_ERR", + "THR_PACKET", +}; + +/** + * ata_thr_state_name - convert thread state enum to string + * @thr_state: thread state to be converted to string + * + * Converts the specified thread state id to a constant C string. + * + * LOCKING: + * None. + * + * RETURNS: + * The THR_xxx-prefixed string naming the specified thread + * state id, or the string "". + */ + +static const char *ata_thr_state_name(unsigned int thr_state) +{ + if (thr_state < ARRAY_SIZE(thr_state_name)) + return thr_state_name[thr_state]; + return ""; +} + +/** + * msleep - sleep for a number of milliseconds + * @msecs: number of milliseconds to sleep + * + * Issues schedule_timeout call for the specified number + * of milliseconds. + * + * LOCKING: + * None. + */ + +static void msleep(unsigned long msecs) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(msecs)); +} + +/** + * ata_tf_load_pio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + outb(tf->ctl, ioaddr->ctl_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + outb(tf->hob_feature, ioaddr->cmd_addr + ATA_REG_FEATURE); + outb(tf->hob_nsect, ioaddr->cmd_addr + ATA_REG_NSECT); + outb(tf->hob_lbal, ioaddr->cmd_addr + ATA_REG_LBAL); + outb(tf->hob_lbam, ioaddr->cmd_addr + ATA_REG_LBAM); + outb(tf->hob_lbah, ioaddr->cmd_addr + ATA_REG_LBAH); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + outb(tf->feature, ioaddr->cmd_addr + ATA_REG_FEATURE); + outb(tf->nsect, ioaddr->cmd_addr + ATA_REG_NSECT); + outb(tf->lbal, ioaddr->cmd_addr + ATA_REG_LBAL); + outb(tf->lbam, ioaddr->cmd_addr + ATA_REG_LBAM); + outb(tf->lbah, ioaddr->cmd_addr + ATA_REG_LBAH); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + outb(tf->device, ioaddr->cmd_addr + ATA_REG_DEVICE); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_tf_load_mmio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + void *mmio = (void *) ap->ioaddr.cmd_addr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + writeb(tf->ctl, ap->ioaddr.ctl_addr); + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writeb(tf->hob_feature, mmio + ATA_REG_FEATURE); + writeb(tf->hob_nsect, mmio + ATA_REG_NSECT); + writeb(tf->hob_lbal, mmio + ATA_REG_LBAL); + writeb(tf->hob_lbam, mmio + ATA_REG_LBAM); + writeb(tf->hob_lbah, mmio + ATA_REG_LBAH); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + writeb(tf->feature, mmio + ATA_REG_FEATURE); + writeb(tf->nsect, mmio + ATA_REG_NSECT); + writeb(tf->lbal, mmio + ATA_REG_LBAL); + writeb(tf->lbam, mmio + ATA_REG_LBAM); + writeb(tf->lbah, mmio + ATA_REG_LBAH); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + writeb(tf->device, mmio + ATA_REG_DEVICE); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * __ata_exec - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static inline void __ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.cmd_addr; + writeb(tf->command, mmio + ATA_REG_CMD); + } else + outb(tf->command, ap->ioaddr.cmd_addr + ATA_REG_CMD); + ata_pause(ap); +} + +/** + * ata_exec - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +{ + unsigned long flags; + + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + spin_lock_irqsave(&ap->host_set->lock, flags); + __ata_exec(ap, tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); +} + +/** + * ata_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + + ata_exec(ap, tf); +} + +/** + * ata_tf_to_host_nolock - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + + __ata_exec(ap, tf); +} + +/** + * ata_tf_read_pio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = inb(ioaddr->cmd_addr + ATA_REG_NSECT); + tf->lbal = inb(ioaddr->cmd_addr + ATA_REG_LBAL); + tf->lbam = inb(ioaddr->cmd_addr + ATA_REG_LBAM); + tf->lbah = inb(ioaddr->cmd_addr + ATA_REG_LBAH); + tf->device = inb(ioaddr->cmd_addr + ATA_REG_DEVICE); + + if (tf->flags & ATA_TFLAG_LBA48) { + outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = inb(ioaddr->cmd_addr + ATA_REG_FEATURE); + tf->hob_nsect = inb(ioaddr->cmd_addr + ATA_REG_NSECT); + tf->hob_lbal = inb(ioaddr->cmd_addr + ATA_REG_LBAL); + tf->hob_lbam = inb(ioaddr->cmd_addr + ATA_REG_LBAM); + tf->hob_lbah = inb(ioaddr->cmd_addr + ATA_REG_LBAH); + } +} + +/** + * ata_tf_read_mmio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + void *mmio = (void *) ap->ioaddr.cmd_addr; + + tf->nsect = readb(mmio + ATA_REG_NSECT); + tf->lbal = readb(mmio + ATA_REG_LBAL); + tf->lbam = readb(mmio + ATA_REG_LBAM); + tf->lbah = readb(mmio + ATA_REG_LBAH); + tf->device = readb(mmio + ATA_REG_DEVICE); + + if (tf->flags & ATA_TFLAG_LBA48) { + writeb(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); + tf->hob_feature = readb(mmio + ATA_REG_FEATURE); + tf->hob_nsect = readb(mmio + ATA_REG_NSECT); + tf->hob_lbal = readb(mmio + ATA_REG_LBAL); + tf->hob_lbam = readb(mmio + ATA_REG_LBAM); + tf->hob_lbah = readb(mmio + ATA_REG_LBAH); + } +} + +static const char * udma_str[] = { + "UDMA/16", + "UDMA/25", + "UDMA/33", + "UDMA/44", + "UDMA/66", + "UDMA/100", + "UDMA/133", + "UDMA7", +}; + +/** + * ata_udma_string - convert UDMA bit offset to string + * @udma_mask: mask of bits supported; only highest bit counts. + * + * Determine string which represents the highest speed + * (highest bit in @udma_mask). + * + * LOCKING: + * None. + * + * RETURNS: + * Constant C string representing highest speed listed in + * @udma_mask, or the constant C string "". + */ + +static const char *ata_udma_string(unsigned int udma_mask) +{ + unsigned int i; + + for (i = 7; i >= 0; i--) { + if (udma_mask & (1 << i)) + return udma_str[i]; + } + + return ""; +} + +/** + * ata_pio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_pio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + outb(0x55, ioaddr->cmd_addr + ATA_REG_NSECT); + outb(0xaa, ioaddr->cmd_addr + ATA_REG_LBAL); + + outb(0xaa, ioaddr->cmd_addr + ATA_REG_NSECT); + outb(0x55, ioaddr->cmd_addr + ATA_REG_LBAL); + + outb(0x55, ioaddr->cmd_addr + ATA_REG_NSECT); + outb(0xaa, ioaddr->cmd_addr + ATA_REG_LBAL); + + nsect = inb(ioaddr->cmd_addr + ATA_REG_NSECT); + lbal = inb(ioaddr->cmd_addr + ATA_REG_LBAL); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_mmio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_mmio_devchk(struct ata_port *ap, + unsigned int device) +{ + void *mmio = (void *) ap->ioaddr.cmd_addr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + writeb(0x55, mmio + ATA_REG_NSECT); + writeb(0xaa, mmio + ATA_REG_LBAL); + + writeb(0xaa, mmio + ATA_REG_NSECT); + writeb(0x55, mmio + ATA_REG_LBAL); + + writeb(0x55, mmio + ATA_REG_NSECT); + writeb(0xaa, mmio + ATA_REG_LBAL); + + nsect = readb(mmio + ATA_REG_NSECT); + lbal = readb(mmio + ATA_REG_LBAL); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_dev_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_dev_devchk(struct ata_port *ap, + unsigned int device) +{ + if (ap->flags & ATA_FLAG_MMIO) + return ata_mmio_devchk(ap, device); + return ata_pio_devchk(ap, device); +} + +/** + * ata_dev_classify - determine device type based on ATA-spec signature + * @tf: ATA taskfile register set for device to be identified + * + * Determine from taskfile register contents whether a device is + * ATA or ATAPI, as per "Signature and persistence" section + * of ATA/PI spec (volume 1, sect 5.14). + * + * LOCKING: + * None. + * + * RETURNS: + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN + * the event of failure. + */ + +static unsigned int ata_dev_classify(struct ata_taskfile *tf) +{ + /* Apple's open source Darwin code hints that some devices only + * put a proper signature into the LBA mid/high registers, + * So, we only check those. It's sufficient for uniqueness. + */ + + if (((tf->lbam == 0) && (tf->lbah == 0)) || + ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { + DPRINTK("found ATA device by sig\n"); + return ATA_DEV_ATA; + } + + if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || + ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { + DPRINTK("found ATAPI device by sig\n"); + return ATA_DEV_ATAPI; + } + + DPRINTK("unknown device\n"); + return ATA_DEV_UNKNOWN; +} + +/** + * ata_dev_try_classify - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device, + unsigned int maybe_have_dev) +{ + struct ata_device *dev = &ap->device[device]; + struct ata_taskfile tf; + unsigned int class; + u8 err; + + __ata_dev_select(ap, device); + + memset(&tf, 0, sizeof(tf)); + + err = ata_chk_err(ap); + ap->ops->tf_read(ap, &tf); + + dev->class = ATA_DEV_NONE; + + /* see if device passed diags */ + if (err == 1) + /* do nothing */ ; + else if ((device == 0) && (err == 0x81)) + /* do nothing */ ; + else + return err; + + /* determine if device if ATA or ATAPI */ + class = ata_dev_classify(&tf); + if (class == ATA_DEV_UNKNOWN) + return err; + if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) + return err; + + dev->class = class; + + return err; +} + +/** + * ata_dev_id_string - + * @dev: + * @s: + * @ofs: + * @len: + * + * LOCKING: + * + * RETURNS: + * + */ + +unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len) +{ + unsigned int c, ret = 0; + + while (len > 0) { + c = dev->id[ofs] >> 8; + *s = c; + s++; + + ret = c = dev->id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } + + return ret; +} + +/** + * ata_dev_parse_strings - + * @dev: + * + * LOCKING: + */ + +static void ata_dev_parse_strings(struct ata_device *dev) +{ + assert (dev->class == ATA_DEV_ATA); + memcpy(dev->vendor, "ATA ", 8); + + ata_dev_id_string(dev, dev->product, ATA_ID_PROD_OFS, + sizeof(dev->product)); +} + +/** + * __ata_dev_select - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static u8 __ata_dev_select (struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.cmd_addr; + writeb(tmp, mmio + ATA_REG_DEVICE); + } else { + outb(tmp, ap->ioaddr.cmd_addr + ATA_REG_DEVICE); + } + ata_pause(ap); /* needed; also flushes, for mmio */ + + return tmp; +} + +/** + * ata_dev_select - + * @ap: + * @device: + * @wait: + * @can_sleep: + * + * LOCKING: + * + * RETURNS: + * + */ + +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + VPRINTK("ENTER, ata%u: device %u, wait %u\n", + ap->id, device, wait); + + if (wait) + ata_wait_idle(ap); + + ap->devsel = __ata_dev_select(ap, device); + + if (wait) { + if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) + msleep(150); + ata_wait_idle(ap); + } +} + +/** + * ata_dump_id - + * @dev: + * + * LOCKING: + */ + +static inline void ata_dump_id(struct ata_device *dev) +{ + DPRINTK("49==0x%04x " + "53==0x%04x " + "63==0x%04x " + "64==0x%04x " + "75==0x%04x \n", + dev->id[49], + dev->id[53], + dev->id[63], + dev->id[64], + dev->id[75]); + DPRINTK("80==0x%04x " + "81==0x%04x " + "82==0x%04x " + "83==0x%04x " + "84==0x%04x \n", + dev->id[80], + dev->id[81], + dev->id[82], + dev->id[83], + dev->id[84]); + DPRINTK("88==0x%04x " + "93==0x%04x\n", + dev->id[88], + dev->id[93]); +} + +/** + * ata_dev_identify - obtain IDENTIFY x DEVICE page + * @ap: port on which device we wish to probe resides + * @device: device bus address, starting at zero + * + * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE + * command, and read back the 512-byte device information page. + * The device information page is fed to us via the standard + * PIO-IN protocol, but we hand-code it here. (TODO: investigate + * using standard PIO-IN paths) + * + * After reading the device information page, we use several + * bits of information from it to initialize data structures + * that will be used during the lifetime of the ata_device. + * Other data from the info page is used to disqualify certain + * older ATA devices we do not wish to support. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + */ + +static void ata_dev_identify(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + unsigned int i; + u16 tmp, udma_modes; + u8 status; + struct ata_taskfile tf; +#if 0 + unsigned int use_software_reset = ap->flags & ATA_FLAG_SRST; +#else + unsigned int use_software_reset = 0; +#endif + + if (!ata_dev_present(dev)) { + DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", + ap->id, device); + return; + } + + DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); + + assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || + dev->class == ATA_DEV_NONE); + + ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ + +retry: + ata_tf_init(ap, &tf); + tf.ctl |= ATA_NIEN; + tf.protocol = ATA_PROT_PIO_READ; + + if (dev->class == ATA_DEV_ATA) { + tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) + goto err_out; + + status = ata_chk_status(ap); + if (status & ATA_ERR) { + /* + * arg! ATA software reset (SRST) correctly places the + * the signatures in the taskfile registers... but kills + * one of my test devices. EDD works for all test cases, + * but seems to return the ATA signature for some ATAPI + * devices. Until the reason for this is found and fixed, + * we fix up the mess here. If IDENTIFY DEVICE returns + * command aborted (as ATAPI devices do), then we + * issue an IDENTIFY PACKET DEVICE. + */ + if ((!use_software_reset) && (tf.command == ATA_CMD_ID_ATA)) { + u8 err = ata_chk_err(ap); + if (err & ATA_ABORTED) { + dev->class = ATA_DEV_ATAPI; + goto retry; + } + } + goto err_out; + } + + /* make sure we have BSY=0, DRQ=1 */ + if ((status & ATA_DRQ) == 0) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) not returning id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + /* read IDENTIFY [X] DEVICE page */ + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.cmd_addr; + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = readw(mmio + ATA_REG_DATA); + } else + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = inw(ap->ioaddr.cmd_addr + ATA_REG_DATA); + + /* wait for host_idle */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) error after id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + ata_irq_on(ap); /* re-enable interrupts */ + + /* print device capabilities */ + printk(KERN_DEBUG "ata%u: dev %u cfg " + "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", + ap->id, device, dev->id[49], + dev->id[82], dev->id[83], dev->id[84], + dev->id[85], dev->id[86], dev->id[87], + dev->id[88]); + + /* + * common ATA, ATAPI feature tests + */ + + /* we require LBA and DMA support (bits 8 & 9 of word 49) */ + if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) { + printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); + goto err_out_nosup; + } + + /* we require UDMA support */ + udma_modes = + tmp = dev->id[ATA_ID_UDMA_MODES]; + if ((tmp & 0xff) == 0) { + printk(KERN_DEBUG "ata%u: no udma\n", ap->id); + goto err_out_nosup; + } + + ata_dump_id(dev); + + /* ATA-specific feature tests */ + if (dev->class == ATA_DEV_ATA) { + if (!ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + tmp = dev->id[ATA_ID_MAJOR_VER]; + for (i = 14; i >= 1; i--) + if (tmp & (1 << i)) + break; + + /* we require at least ATA-3 */ + if (i < 3) { + printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); + goto err_out_nosup; + } + + if (ata_id_has_lba48(dev)) { + dev->flags |= ATA_DFLAG_LBA48; + dev->n_sectors = ata_id_u64(dev, 100); + } else { + dev->n_sectors = ata_id_u32(dev, 60); + } + + ata_dev_parse_strings(dev); + + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors%s\n", + ap->id, device, + ata_udma_string(udma_modes), + dev->n_sectors, + dev->flags & ATA_DFLAG_LBA48 ? " (lba48)" : ""); + } + + /* ATAPI-specific feature tests */ + else { + if (ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + /* see if 16-byte commands supported */ + tmp = dev->id[0] & 0x3; + if (tmp == 1) + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", + ap->id, device, + ata_udma_string(udma_modes)); + } + + DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); + return; + +err_out_nosup: + printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", + ap->id, device); +err_out: + ata_irq_on(ap); /* re-enable interrupts */ + dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ + DPRINTK("EXIT, err\n"); +} + +/** + * ata_port_probe - + * @ap: + * + * LOCKING: + */ + +void ata_port_probe(struct ata_port *ap) +{ + ap->flags &= ~ATA_FLAG_PORT_DISABLED; +} + +/** + * ata_port_disable - + * @ap: + * + * LOCKING: + */ + +void ata_port_disable(struct ata_port *ap) +{ + ap->device[0].class = ATA_DEV_NONE; + ap->device[1].class = ATA_DEV_NONE; + ap->flags |= ATA_FLAG_PORT_DISABLED; +} + +/** + * ata_busy_sleep - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @tmout_pat: impatience timeout + * @tmout: overall timeout + * + * LOCKING: + * + */ + +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout) +{ + unsigned long timer_start, timeout; + u8 status; + + status = ata_busy_wait(ap, ATA_BUSY, 300); + timer_start = jiffies; + timeout = timer_start + tmout_pat; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_busy_wait(ap, ATA_BUSY, 3); + } + + if (status & ATA_BUSY) + printk(KERN_WARNING "ata%u is slow to respond, " + "please be patient\n", ap->id); + + timeout = timer_start + tmout; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_chk_status(ap); + } + + if (status & ATA_BUSY) { + printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", + ap->id, tmout / HZ); + return 1; + } + + return 0; +} + +static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + unsigned long timeout; + + /* if device 0 was found in ata_dev_devchk, wait for its + * BSY bit to clear + */ + if (dev0) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* if device 1 was found in ata_dev_devchk, wait for + * register access, then wait for BSY to clear + */ + timeout = jiffies + ATA_TMOUT_BOOT; + while (dev1) { + u8 nsect, lbal; + + __ata_dev_select(ap, 1); + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ioaddr->cmd_addr; + nsect = readb(mmio + ATA_REG_NSECT); + lbal = readb(mmio + ATA_REG_LBAL); + } else { + nsect = inb(ioaddr->cmd_addr + ATA_REG_NSECT); + lbal = inb(ioaddr->cmd_addr + ATA_REG_LBAL); + } + if ((nsect == 1) && (lbal == 1)) + break; + if (time_after(jiffies, timeout)) { + dev1 = 0; + break; + } + msleep(50); /* give drive a breather */ + } + if (dev1) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* is all this really necessary? */ + __ata_dev_select(ap, 0); + if (dev1) + __ata_dev_select(ap, 1); + if (dev0) + __ata_dev_select(ap, 0); +} + +/** + * ata_bus_edd - + * @ap: + * + * LOCKING: + * + */ + +static unsigned int ata_bus_edd(struct ata_port *ap) +{ + struct ata_taskfile tf; + + /* set up execute-device-diag (bus reset) taskfile */ + /* also, take interrupts to a known state (disabled) */ + DPRINTK("execute-device-diag\n"); + ata_tf_init(ap, &tf); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_EDD; + tf.protocol = ATA_PROT_NODATA; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* spec says at least 2ms. but who knows with those + * crazy ATAPI devices... + */ + msleep(150); + + return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); +} + +static unsigned int ata_bus_softreset(struct ata_port *ap, + unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + printk(KERN_DEBUG "ata%u: bus reset via SRST\n", ap->id); + + /* software reset. causes dev0 to be selected */ + if (ap->flags & ATA_FLAG_MMIO) { + writeb(ap->ctl, ioaddr->ctl_addr); + udelay(10); /* FIXME: flush */ + writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); /* FIXME: flush */ + writeb(ap->ctl, ioaddr->ctl_addr); + } else { + outb(ap->ctl, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl, ioaddr->ctl_addr); + } + + /* spec mandates ">= 2ms" before checking status. + * We wait 150ms, because that was the magic delay used for + * ATAPI devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + */ + msleep(150); + + ata_bus_post_reset(ap, devmask); + + return 0; +} + +/** + * ata_bus_reset - reset host port and associated ATA channel + * @ap: port to reset + * + * This is typically the first time we actually start issuing + * commands to the ATA channel. We wait for BSY to clear, then + * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its + * result. Determine what devices, if any, are on the channel + * by looking at the device 0/1 error register. Look at the signature + * stored in each device's taskfile registers, to determine if + * the device is ATA or ATAPI. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + * + * SIDE EFFECTS: + * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. + */ + +static void ata_bus_reset(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + u8 err; + unsigned int dev0, dev1 = 0, rc, devmask = 0; + unsigned int use_software_reset = ap->flags & ATA_FLAG_SRST; + + DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); + + /* set up device control */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + + /* determine if device 0/1 are present */ + dev0 = ata_dev_devchk(ap, 0); + if (slave_possible) + dev1 = ata_dev_devchk(ap, 1); + + if (dev0) + devmask |= (1 << 0); + if (dev1) + devmask |= (1 << 1); + + /* select device 0 again */ + __ata_dev_select(ap, 0); + + /* issue bus reset */ + if (use_software_reset) + rc = ata_bus_softreset(ap, devmask); + else + rc = ata_bus_edd(ap); + + if (rc) + goto err_out; + + /* + * determine by signature whether we have ATA or ATAPI devices + */ + err = ata_dev_try_classify(ap, 0, dev0); + if ((slave_possible) && (err != 0x81)) + ata_dev_try_classify(ap, 1, dev1); + + /* re-enable interrupts */ + ata_irq_on(ap); + + /* is double-select really necessary? */ + if (ap->device[1].class != ATA_DEV_NONE) + __ata_dev_select(ap, 1); + if (ap->device[0].class != ATA_DEV_NONE) + __ata_dev_select(ap, 0); + + /* if no devices were detected, disable this port */ + if ((ap->device[0].class == ATA_DEV_NONE) && + (ap->device[1].class == ATA_DEV_NONE)) + goto err_out; + + DPRINTK("EXIT\n"); + return; + +err_out: + printk(KERN_ERR "ata%u: disabling port\n", ap->id); + ap->ops->port_disable(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_host_set_pio - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_pio(struct ata_port *ap) +{ + struct ata_device *master, *slave; + unsigned int pio, i; + u16 mask; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + + mask = ap->pio_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_PIO_MODES] & 0x03); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_PIO_MODES] & 0x03); + + /* require pio mode 3 or 4 support for host and all devices */ + if (mask == 0) { + printk(KERN_WARNING "ata%u: no PIO3/4 support, ignoring\n", + ap->id); + goto err_out; + } + + pio = (mask & ATA_ID_PIO4) ? 4 : 3; + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].pio_mode = (pio == 3) ? + XFER_PIO_3 : XFER_PIO_4; + ap->ops->set_piomode(ap, &ap->device[i], pio); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_host_set_udma - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_udma(struct ata_port *ap) +{ + struct ata_device *master, *slave; + u16 mask; + unsigned int i, j; + int udma_mode = -1; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + assert ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0); + + DPRINTK("udma masks: host 0x%X, master 0x%X, slave 0x%X\n", + ap->udma_mask, + (!ata_dev_present(master)) ? 0xff : + (master->id[ATA_ID_UDMA_MODES] & 0xff), + (!ata_dev_present(slave)) ? 0xff : + (slave->id[ATA_ID_UDMA_MODES] & 0xff)); + + mask = ap->udma_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); + + i = XFER_UDMA_7; + while (i >= XFER_UDMA_0) { + j = i - XFER_UDMA_0; + DPRINTK("mask 0x%X i 0x%X j %u\n", mask, i, j); + if (mask & (1 << j)) { + udma_mode = i; + break; + } + + i--; + } + + /* require udma for host and all attached devices */ + if (udma_mode < 0) { + printk(KERN_WARNING "ata%u: no UltraDMA support, ignoring\n", + ap->id); + goto err_out; + } + + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].udma_mode = udma_mode; + ap->ops->set_udmamode(ap, &ap->device[i], udma_mode); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_dev_set_xfermode - + * @ap: + * @dev: + * + * LOCKING: + */ + +static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) +{ + struct ata_taskfile tf; + + /* set up set-features taskfile */ + DPRINTK("set features - xfer mode\n"); + ata_tf_init(ap, &tf); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_SET_FEATURES; + tf.feature = SETFEATURES_XFER; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + if (dev->flags & ATA_DFLAG_PIO) + tf.nsect = dev->pio_mode; + else + tf.nsect = dev->udma_mode; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + ata_irq_on(ap); /* re-enable interrupts */ + + ata_wait_idle(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_dev_set_udma - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_udma(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->udma_mode >= XFER_UDMA_0) && + (dev->udma_mode <= XFER_UDMA_7)); + printk(KERN_INFO "ata%u: dev %u configured for %s\n", + ap->id, device, + udma_str[dev->udma_mode - XFER_UDMA_0]); +} + +/** + * ata_dev_set_pio - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_pio(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + /* force PIO mode */ + dev->flags |= ATA_DFLAG_PIO; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->pio_mode >= XFER_PIO_3) && + (dev->pio_mode <= XFER_PIO_4)); + printk(KERN_INFO "ata%u: dev %u configured for PIO%c\n", + ap->id, device, + dev->pio_mode == 3 ? '3' : '4'); +} + +/** + * ata_sg_clean - + * @qc: + * + * LOCKING: + */ + +static void ata_sg_clean(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + struct scatterlist *sg; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + + assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE); + assert(qc->flags & ATA_QCFLAG_SG); + + if (cmd->use_sg) { + sg = (struct scatterlist *)qc->scsicmd->request_buffer; + } else { + sg = &qc->sgent; + assert(qc->n_elem == 1); + } + + DPRINTK("unmapping %u sg elements\n", qc->n_elem); + + if (cmd->use_sg) + pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir); + else + pci_unmap_single(ap->host_set->pdev, sg[0].dma_address, + sg[0].length, dir); + + qc->flags &= ~ATA_QCFLAG_SG; +} + +/** + * ata_sg_setup_one - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup_one(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + struct scatterlist *sg = &qc->sgent; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + qc->n_elem = 1; + sg->address = cmd->request_buffer; + sg->page = virt_to_page(cmd->request_buffer); + sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK; + sg->length = cmd->request_bufflen; + + if (!have_sg) + return 0; + + sg->dma_address = pci_map_single(ap->host_set->pdev, + cmd->request_buffer, + cmd->request_bufflen, dir); + + DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen, + qc->flags & ATA_QCFLAG_WRITE ? "write" : "read"); + + ap->prd[0].addr = cpu_to_le32(sg->dma_address); + ap->prd[0].flags_len = cpu_to_le32(sg->length | ATA_PRD_EOT); + VPRINTK("PRD[0] = (0x%X, 0x%X)\n", + ap->prd[0].addr, ap->prd[0].flags_len); + + return 0; +} + +/** + * ata_sg_setup - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + struct scatterlist *sg; + int n_elem; + unsigned int i; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg); + assert(cmd->use_sg > 0); + + sg = (struct scatterlist *)cmd->request_buffer; + if (have_sg) { + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir); + if (n_elem < 1) + return -1; + DPRINTK("%d sg elements mapped\n", n_elem); + } else { + n_elem = cmd->use_sg; + } + qc->n_elem = n_elem; + + +#ifndef ATA_DEBUG + if (!have_sg) + return 0; +#endif + + for (i = 0; i < n_elem; i++) { + ap->prd[i].addr = cpu_to_le32(sg[i].dma_address); + ap->prd[i].flags_len = cpu_to_le32(sg[i].length); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", + i, ap->prd[i].addr, ap->prd[i].flags_len); + } + ap->prd[n_elem - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); + +#ifdef ATA_DEBUG + i = n_elem - 1; + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", + i, ap->prd[i].addr, ap->prd[i].flags_len); + + for (i = n_elem; i < ATA_MAX_PRD; i++) { + ap->prd[i].addr = 0; + ap->prd[i].flags_len = cpu_to_le32(ATA_PRD_EOT); + } +#endif + + return 0; +} + +/** + * ata_pio_poll - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_pio_poll(struct ata_port *ap) +{ + u8 status; + unsigned int poll_state = THR_UNKNOWN; + unsigned int reg_state = THR_UNKNOWN; + const unsigned int tmout_state = THR_PIO_TMOUT; + + switch (ap->thr_state) { + case THR_PIO: + case THR_PIO_POLL: + poll_state = THR_PIO_POLL; + reg_state = THR_PIO; + break; + case THR_PIO_LAST: + case THR_PIO_LAST_POLL: + poll_state = THR_PIO_LAST_POLL; + reg_state = THR_PIO_LAST; + break; + default: + BUG(); + break; + } + + status = ata_chk_status(ap); + if (status & ATA_BUSY) { + if (time_after(jiffies, ap->thr_timeout)) { + ap->thr_state = tmout_state; + return 0; + } + ap->thr_state = poll_state; + if (current->need_resched) + return 0; + return ATA_SHORT_PAUSE; + } + + ap->thr_state = reg_state; + return 0; +} + +/** + * ata_pio_start - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_pio_start (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + assert((qc->tf.protocol == ATA_PROT_PIO_READ) || + (qc->tf.protocol == ATA_PROT_PIO_WRITE)); + + qc->flags |= ATA_QCFLAG_POLL; + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + ata_thread_wake(ap, THR_PIO); +} + +/** + * ata_pio_complete - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_complete (struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + u8 drv_stat; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + msleep(2); + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_LAST_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + drv_stat = ata_wait_idle(ap); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->thr_state = THR_IDLE; + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + ata_irq_on(ap); + + ata_qc_complete(qc, drv_stat, 0); +} + +/** + * ata_pio_sector - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_sector(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + struct scatterlist *sg; + Scsi_Cmnd *cmd; + unsigned char *buf; + u8 status; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ap->thr_state = THR_PIO_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + /* handle BSY=0, DRQ=0 as error */ + if ((status & ATA_DRQ) == 0) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + cmd = qc->scsicmd; + if (cmd->use_sg) + sg = (struct scatterlist *)cmd->request_buffer; + else + sg = &qc->sgent; + if (qc->cursect == (qc->nsect - 1)) + ap->thr_state = THR_PIO_LAST; + + buf = kmap(sg[qc->cursg].page) + + sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE); + + qc->cursect++; + qc->cursg_ofs++; + + if (cmd->use_sg) + if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg[qc->cursg].length) { + qc->cursg++; + qc->cursg_ofs = 0; + } + + DPRINTK("data %s, drv_stat 0x%X\n", + qc->flags & ATA_QCFLAG_WRITE ? "write" : "read", + status); + + /* do the actual data transfer */ + /* FIXME: mmio-ize */ + if (qc->flags & ATA_QCFLAG_WRITE) + outsl(ap->ioaddr.cmd_addr + ATA_REG_DATA, buf, ATA_SECT_DWORDS); + else + insl(ap->ioaddr.cmd_addr + ATA_REG_DATA, buf, ATA_SECT_DWORDS); + + kunmap(sg[qc->cursg].page); +} + +/** + * ata_eng_schedule - run an iteration of the pio/dma/whatever engine + * @ap: port on which activity will occur + * @eng: instance of engine + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_eng_schedule (struct ata_port *ap, struct ata_engine *eng) +{ + /* FIXME */ +} + +/** + * ata_eng_timeout - Handle timeout of queued command + * @ap: Port on which timed-out command is active + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ + +void ata_eng_timeout(struct ata_port *ap) +{ + u8 host_stat, drv_stat; + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n", + ap->id, host_stat); + + ata_dma_complete(ap, host_stat, 1); + break; + + case ATA_PROT_NODATA: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + + default: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + } + +out: + DPRINTK("EXIT\n"); +} + +/** + * ata_qc_new - + * @ap: + * @dev: + * + * LOCKING: + */ + +static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) +{ + struct ata_queued_cmd *qc = NULL; + unsigned int i; + + for (i = 0; i < ATA_MAX_QUEUE; i++) + if (!test_and_set_bit(i, &ap->qactive)) { + qc = ata_qc_from_tag(ap, i); + break; + } + + if (qc) + qc->tag = i; + + return qc; +} + +/** + * ata_qc_new_init - + * @ap: + * @cmd: + * @done: + * + * LOCKING: + */ + +struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev, + Scsi_Cmnd *cmd, + void (*done)(Scsi_Cmnd *)) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new(ap); + if (!qc) { + cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); + done(cmd); + } else { + qc->flags = 0; + qc->scsicmd = cmd; + qc->scsidone = done; + qc->ap = ap; + qc->dev = dev; + INIT_LIST_HEAD(&qc->node); + init_MUTEX_LOCKED(&qc->sem); + + ata_tf_init(ap, &qc->tf); + + if (likely((dev->flags & ATA_DFLAG_PIO) == 0)) + qc->flags |= ATA_QCFLAG_DMA; + if (dev->flags & ATA_DFLAG_LBA48) + qc->tf.flags |= ATA_TFLAG_LBA48; + } + + return qc; +} + +/** + * ata_qc_complete - + * @qc: + * @drv_stat: + * @done_late: + * + * LOCKING: + * + */ + +static void ata_qc_complete(struct ata_queued_cmd *qc, + u8 drv_stat, unsigned int done_late) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + unsigned int tag, do_clear = 0; + + assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + if (likely(qc->flags & ATA_QCFLAG_SG)) + ata_sg_clean(qc); + + if (cmd) { + if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { + if (qc->flags & ATA_QCFLAG_ATAPI) + cmd->result = SAM_STAT_CHECK_CONDITION; + else + ata_to_sense_error(qc); + } else { + if (done_late) + cmd->done_late = 1; + cmd->result = SAM_STAT_GOOD; + } + + qc->scsidone(cmd); + } + + qc->flags &= ~ATA_QCFLAG_ACTIVE; + tag = qc->tag; + if (likely(ata_tag_valid(tag))) { + if (tag == ap->active_tag) + ap->active_tag = ATA_TAG_POISON; + qc->tag = ATA_TAG_POISON; + do_clear = 1; + } + + up(&qc->sem); + + if (likely(do_clear)) + clear_bit(tag, &ap->qactive); +} + +/** + * ata_qc_push - + * @qc: + * @append: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append) +{ + struct ata_port *ap = qc->ap; + struct ata_engine *eng = &ap->eng; + + if (likely(append)) + list_add_tail(&qc->node, &eng->q); + else + list_add(&qc->node, &eng->q); + + if (!test_and_set_bit(ATA_EFLG_ACTIVE, &eng->flags)) + ata_eng_schedule(ap, eng); +} + +/** + * ata_qc_issue - + * @qc: + * + * LOCKING: + * + * RETURNS: + * + */ +int ata_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + Scsi_Cmnd *cmd = qc->scsicmd; + unsigned int dma = qc->flags & ATA_QCFLAG_DMA; + + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* set up SG table */ + if (cmd->use_sg) { + if (ata_sg_setup(qc)) + goto err_out; + } else { + if (ata_sg_setup_one(qc)) + goto err_out; + } + + qc->ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE; + + if (likely(dma)) { + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + } else + /* load tf registers, initiate polling pio */ + ata_pio_start(qc); + + return 0; + +err_out: + return -1; +} + +/** + * ata_bmdma_start_mmio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE); + u8 host_stat, dmactl; + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* load PRD table addr. */ + writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + + /* specify data direction */ + /* FIXME: redundant to later start-dma command? */ + writeb(rw ? 0 : ATA_DMA_WR, mmio + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = readb(mmio + ATA_DMA_STATUS); + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS); + + /* issue r/w command */ + __ata_exec(ap, &qc->tf); + + /* start host DMA transaction */ + dmactl = readb(mmio + ATA_DMA_CMD); + writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); + + /* Strictly, one may wish to issue a readb() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + */ +} + +/** + * ata_bmdma_start_pio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_pio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE); + u8 host_stat, dmactl; + + /* load PRD table addr. */ + outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction */ + /* FIXME: redundant to later start-dma command? */ + outb(rw ? 0 : ATA_DMA_WR, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + /* issue r/w command */ + __ata_exec(ap, &qc->tf); + + /* start host DMA transaction */ + dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + outb(dmactl | ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +/** + * ata_dma_complete - + * @ap: + * @host_stat: + * @done_late: + * + * LOCKING: + */ + +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late) +{ + VPRINTK("ENTER\n"); + + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + writeb(0, mmio + ATA_DMA_CMD); + + /* ack intr, err bits */ + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + mmio + ATA_DMA_STATUS); + } else { + /* clear start/stop bit */ + outb(0, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* ack intr, err bits */ + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_altstatus(ap); /* dummy read */ + + DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n", + ap->id, (u32) host_stat, (u32) ata_chk_status(ap)); + + /* get drive status; clear intr; complete txn */ + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap), done_late); +} + +/** + * ata_host_intr - Handle host interrupt for given (port, task) + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle host interrupt for given queued command. Currently, + * only DMA interrupts are handled. All other commands are + * handled via polling with interrupts disabled (nIEN bit). + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ + +static inline unsigned int ata_host_intr (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status, host_stat; + unsigned int handled = 0; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA_READ: + case ATA_PROT_DMA_WRITE: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat); + + if (!(host_stat & ATA_DMA_INTR)) { + ap->stats.idle_irq++; + break; + } + + ata_dma_complete(ap, host_stat, 0); + handled = 1; + break; + + case ATA_PROT_NODATA: /* command completion, but no data xfer */ + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + break; + + default: + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + handled = 1; + ata_irq_ack(ap, 0); /* debug trap */ + printk(KERN_WARNING "ata%d: irq trap\n", ap->id); + } +#endif + break; + } + + return handled; +} + +/** + * ata_interrupt - + * @irq: + * @dev_instance: + * @regs: + * + * LOCKING: + * + * RETURNS: + * + */ + +static irqreturn_t ata_interrupt (int irq, void *dev_instance, + struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_hosts; i++) { + struct ata_port *ap; + + ap = host_set->hosts[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += ata_host_intr(ap, qc); + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +/** + * ata_thread_wake - + * @ap: + * @thr_state: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_thread_wake(struct ata_port *ap, unsigned int thr_state) +{ + assert(ap->thr_state == THR_IDLE); + ap->thr_state = thr_state; + up(&ap->thr_sem); +} + +/** + * ata_thread_timer - + * @opaque: + * + * LOCKING: + */ + +static void ata_thread_timer(unsigned long opaque) +{ + struct ata_port *ap = (struct ata_port *) opaque; + + up(&ap->thr_sem); +} + +/** + * ata_thread_iter - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_thread_iter(struct ata_port *ap) +{ + long timeout = 0; + + DPRINTK("ata%u: thr_state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + + switch (ap->thr_state) { + case THR_UNKNOWN: + ap->thr_state = THR_CHECKPORT; + break; + + case THR_PROBE_START: + down(&ap->sem); + ap->thr_state = THR_CHECKPORT; + break; + + case THR_CHECKPORT: + ap->ops->port_probe(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + ap->thr_state = THR_PROBE_FAILED; + else + ap->thr_state = THR_BUS_RESET; + break; + + case THR_BUS_RESET: + ata_bus_reset(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + ap->thr_state = THR_PROBE_FAILED; + else + ap->thr_state = THR_IDENTIFY; + break; + + case THR_IDENTIFY: + ata_dev_identify(ap, 0); + ata_dev_identify(ap, 1); + + if (!ata_dev_present(&ap->device[0]) && + !ata_dev_present(&ap->device[1])) { + ap->ops->port_disable(ap); + ap->thr_state = THR_PROBE_FAILED; + } else + ap->thr_state = THR_CONFIG_TIMINGS; + break; + + case THR_CONFIG_TIMINGS: + ata_host_set_pio(ap); + if ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0) + ata_host_set_udma(ap); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + ap->thr_state = THR_PROBE_FAILED; + else +#ifdef ATA_FORCE_PIO + ap->thr_state = THR_CONFIG_FORCE_PIO; +#else + ap->thr_state = THR_CONFIG_DMA; +#endif + break; + + case THR_CONFIG_FORCE_PIO: + ata_dev_set_pio(ap, 0); + ata_dev_set_pio(ap, 1); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + ap->thr_state = THR_PROBE_FAILED; + else + ap->thr_state = THR_PROBE_SUCCESS; + break; + + case THR_CONFIG_DMA: + ata_dev_set_udma(ap, 0); + ata_dev_set_udma(ap, 1); + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + ap->thr_state = THR_PROBE_FAILED; + else + ap->thr_state = THR_PROBE_SUCCESS; + break; + + case THR_PROBE_SUCCESS: + up(&ap->probe_sem); + ap->thr_state = THR_IDLE; + break; + + case THR_PROBE_FAILED: + up(&ap->probe_sem); + ap->thr_state = THR_AWAIT_DEATH; + break; + + case THR_AWAIT_DEATH: + timeout = -1; + break; + + case THR_IDLE: + timeout = 30 * HZ; + break; + + case THR_PIO: + ata_pio_sector(ap); + break; + + case THR_PIO_LAST: + ata_pio_complete(ap); + break; + + case THR_PIO_POLL: + case THR_PIO_LAST_POLL: + timeout = ata_pio_poll(ap); + break; + + case THR_PIO_TMOUT: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_TMOUT\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PIO_ERR: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_ERR\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PACKET: + atapi_cdb_send(ap); + break; + + default: + printk(KERN_DEBUG "ata%u: unknown thr state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + break; + } + + DPRINTK("ata%u: new thr_state %s, returning %ld\n", + ap->id, ata_thr_state_name(ap->thr_state), timeout); + return timeout; +} + +/** + * ata_thread - + * @data: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int ata_thread (void *data) +{ + struct ata_port *ap = data; + long timeout; + + daemonize (); + reparent_to_init(); + spin_lock_irq(¤t->sigmask_lock); + sigemptyset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + sprintf(current->comm, "katad-%u", ap->id); + + while (1) { + cond_resched(); + + timeout = ata_thread_iter(ap); + + if (signal_pending (current)) { + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + } + + if ((timeout < 0) || (ap->time_to_die)) + break; + + /* note sleeping for full timeout not guaranteed (that's ok) */ + if (timeout) { + mod_timer(&ap->thr_timer, jiffies + timeout); + down_interruptible(&ap->thr_sem); + + if (signal_pending (current)) { + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + } + + if (ap->time_to_die) + break; + } + } + + printk(KERN_DEBUG "ata%u: thread exiting\n", ap->id); + ap->thr_pid = -1; + complete_and_exit (&ap->thr_exited, 0); +} + +/** + * ata_thread_kill - kill per-port kernel thread + * @ap: port those thread is to be killed + * + * LOCKING: + * + */ + +static int ata_thread_kill(struct ata_port *ap) +{ + int ret = 0; + + if (ap->thr_pid >= 0) { + ap->time_to_die = 1; + wmb(); + ret = kill_proc(ap->thr_pid, SIGTERM, 1); + if (ret) + printk(KERN_ERR "ata%d: unable to kill kernel thread\n", + ap->id); + else + wait_for_completion(&ap->thr_exited); + } + + return ret; +} + +/** + * atapi_cdb_send - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * If DMA is to be performed, exit immediately. + * Otherwise, we are in polling mode, so poll + * status under operation succeeds or fails. + * + * LOCKING: + * Kernel thread context (may sleep) + */ + +static void atapi_cdb_send(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* send SCSI cdb */ + /* FIXME: mmio-ize */ + DPRINTK("send cdb\n"); + outsl(ap->ioaddr.cmd_addr + ATA_REG_DATA, + qc->scsicmd->cmnd, ap->host->max_cmd_len / 4); + + /* if we are DMA'ing, irq handler takes over from here */ + if (qc->tf.feature == ATAPI_PKT_DMA) + goto out; + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait 2\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* wait for BSY,DRQ to clear */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) + goto err_out; + + /* transaction completed, indicate such to scsi stack */ + ata_qc_complete(qc, status, 0); + ata_irq_on(ap); + +out: + ap->thr_state = THR_IDLE; + return; + +err_out: + ata_qc_complete(qc, ATA_ERR, 0); + goto out; +} + +/** + * ata_host_remove - + * @ap: + * @do_unregister: + * + * LOCKING: + */ + +static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) +{ + struct Scsi_Host *sh = ap->host; + + DPRINTK("ENTER\n"); + + if (do_unregister) + scsi_unregister(sh); + + pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); +} + +/** + * ata_host_init - + * @host: + * @ent: + * @port_no: + * + * LOCKING: + * + */ + +static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, + struct ata_host_set *host_set, + struct ata_probe_ent *ent, unsigned int port_no) +{ + unsigned int i; + + host->max_id = 16; + host->max_lun = 1; + host->max_channel = 1; + host->unique_id = ata_unique_id++; + host->max_cmd_len = 12; + host->pci_dev = ent->pdev; + + ap->flags = ATA_FLAG_PORT_DISABLED; + ap->id = host->unique_id; + ap->host = host; + ap->ctl = ATA_DEVCTL_OBS; + ap->host_set = host_set; + ap->port_no = port_no; + ap->pio_mask = ent->pio_mask; + ap->udma_mask = ent->udma_mask; + ap->flags |= ent->host_flags; + ap->ops = ent->host_info; + ap->thr_state = THR_PROBE_START; + ap->cbl = ATA_CBL_NONE; + ap->device[0].flags = ATA_DFLAG_MASTER; + ap->active_tag = ATA_TAG_POISON; + + /* ata_engine init */ + ap->eng.flags = 0; + INIT_LIST_HEAD(&ap->eng.q); + + for (i = 0; i < ATA_MAX_DEVICES; i++) + ap->device[i].devno = i; + + init_completion(&ap->thr_exited); + init_MUTEX_LOCKED(&ap->probe_sem); + init_MUTEX_LOCKED(&ap->sem); + init_MUTEX_LOCKED(&ap->thr_sem); + + init_timer(&ap->thr_timer); + ap->thr_timer.function = ata_thread_timer; + ap->thr_timer.data = (unsigned long) ap; + +#ifdef ATA_IRQ_TRAP + ap->stats.unhandled_irq = 1; + ap->stats.idle_irq = 1; +#endif + + memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); +} + +/** + * ata_host_add - + * @ent: + * @host_set: + * @port_no: + * + * LOCKING: + * + * RETURNS: + * + */ + +static struct ata_port * ata_host_add(struct ata_probe_ent *ent, + struct ata_host_set *host_set, + unsigned int port_no) +{ + struct pci_dev *pdev = ent->pdev; + struct Scsi_Host *host; + struct ata_port *ap; + + DPRINTK("ENTER\n"); + host = scsi_register(ent->sht, sizeof(struct ata_port)); + if (!host) + return NULL; + + ap = (struct ata_port *) &host->hostdata[0]; + + ata_host_init(ap, host, host_set, ent, port_no); + + ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma); + if (!ap->prd) + goto err_out; + DPRINTK("prd alloc, virt %p, dma %x\n", ap->prd, ap->prd_dma); + + ap->thr_pid = kernel_thread(ata_thread, ap, CLONE_FS | CLONE_FILES); + if (ap->thr_pid < 0) { + printk(KERN_ERR "ata%d: unable to start kernel thread\n", + ap->id); + goto err_out_free; + } + + return ap; + +err_out_free: + pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); +err_out: + scsi_unregister(host); + return NULL; +} + +/** + * ata_device_add - + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int ata_device_add(struct ata_probe_ent *ent) +{ + unsigned int count = 0, i; + struct pci_dev *pdev = ent->pdev; + struct ata_host_set *host_set; + + DPRINTK("ENTER\n"); + /* alloc a container for our list of ATA ports (buses) */ + host_set = kmalloc(sizeof(struct ata_host_set) + + (ent->n_ports * sizeof(void *)), GFP_KERNEL); + if (!host_set) + return 0; + memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *))); + spin_lock_init(&host_set->lock); + + host_set->pdev = pdev; + host_set->n_hosts = ent->n_ports; + host_set->irq = ent->irq; + + /* register each port bound to this device */ + for (i = 0; i < ent->n_ports; i++) { + struct ata_port *ap; + + ap = ata_host_add(ent, host_set, i); + if (!ap) + goto err_out; + + host_set->hosts[i] = ap; + + /* print per-port info to dmesg */ + printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " + "bmdma 0x%lX irq %lu\n", + ap->id, + ap->flags & ATA_FLAG_SATA ? 'S' : 'P', + ata_udma_string(ent->udma_mask), + ap->ioaddr.cmd_addr, + ap->ioaddr.ctl_addr, + ap->ioaddr.bmdma_addr, + ent->irq); + + count++; + } + + if (!count) { + kfree(host_set); + return 0; + } + + /* obtain irq, that is shared between channels */ + if (request_irq(ent->irq, ata_interrupt, ent->irq_flags, + DRV_NAME, host_set)) + goto err_out; + + /* perform each probe synchronously */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap; + + ap = host_set->hosts[i]; + + DPRINTK("ata%u: probe begin\n", ap->id); + up(&ap->sem); /* start probe */ + + DPRINTK("ata%u: probe-wait begin\n", ap->id); + down(&ap->probe_sem); /* wait for end */ + + DPRINTK("ata%u: probe-wait end\n", ap->id); + } + + pci_set_drvdata(pdev, host_set); + + VPRINTK("EXIT, returning %u\n", ent->n_ports); + return ent->n_ports; /* success */ + +err_out: + for (i = 0; i < count; i++) { + ata_host_remove(host_set->hosts[i], 1); + } + kfree(host_set); + VPRINTK("EXIT, returning 0\n"); + return 0; +} + +/** + * ata_scsi_detect - + * @sht: + * + * LOCKING: + * + * RETURNS: + * + */ + +int ata_scsi_detect(Scsi_Host_Template *sht) +{ + struct list_head *node; + struct ata_probe_ent *ent; + int count = 0; + + VPRINTK("ENTER\n"); + + sht->use_new_eh_code = 1; /* IORL hack, part deux */ + + spin_lock(&ata_module_lock); + while (!list_empty(&ata_probe_list)) { + node = ata_probe_list.next; + ent = list_entry(node, struct ata_probe_ent, node); + list_del(node); + + spin_unlock(&ata_module_lock); + + count += ata_device_add(ent); + kfree(ent); + + spin_lock(&ata_module_lock); + } + spin_unlock(&ata_module_lock); + + VPRINTK("EXIT, returning %d\n", count); + return count; +} + +/** + * ata_scsi_release - SCSI layer callback hook for host unload + * @host: libata host to be unloaded + * + * Performs all duties necessary to shut down a libata port: + * Kill port kthread, disable port, and release resources. + * + * LOCKING: + * Inherited from SCSI layer. + * + * RETURNS: + * One. + */ + +int ata_scsi_release(struct Scsi_Host *host) +{ + struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + + DPRINTK("ENTER\n"); + + ata_thread_kill(ap); /* FIXME: check return val */ + + ap->ops->port_disable(ap); + ata_host_remove(ap, 0); + + DPRINTK("EXIT\n"); + return 1; +} + +/** + * ata_pci_init_one - + * @pdev: + * @boards: + * @n_boards: + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * + */ + +int ata_pci_init_one (struct pci_dev *pdev, struct ata_board **boards, + unsigned int n_boards) +{ + struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; + struct ata_board *board1, *board2; + u8 tmp8, mask; + unsigned int legacy_mode = 0; + int rc; + + DPRINTK("ENTER\n"); + + board1 = boards[0]; + if (n_boards > 1) + board2 = boards[1]; + else + board2 = board1; + + if ((board1->host_flags & ATA_FLAG_NO_LEGACY) == 0) { + /* TODO: support transitioning to native mode? */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + + /* FIXME... */ + if ((!legacy_mode) && (n_boards > 1)) { + printk(KERN_ERR "ata: BUG: native mode, n_boards > 1\n"); + return -EINVAL; + } + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + if (legacy_mode) { + probe_ent2 = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent2) { + rc = -ENOMEM; + goto err_out_free_ent; + } + + memset(probe_ent2, 0, sizeof(*probe_ent)); + probe_ent2->pdev = pdev; + INIT_LIST_HEAD(&probe_ent2->node); + } + + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + probe_ent->sht = board1->sht; + probe_ent->host_flags = board1->host_flags; + probe_ent->pio_mask = board1->pio_mask; + probe_ent->udma_mask = board1->udma_mask; + probe_ent->host_info = board1->host_info; + + if (legacy_mode) { + probe_ent->port[0].cmd_addr = 0x1f0; + probe_ent->port[0].ctl_addr = 0x3f6; + probe_ent->n_ports = 1; + probe_ent->irq = 14; + + probe_ent2->port[0].cmd_addr = 0x170; + probe_ent2->port[0].ctl_addr = 0x376; + probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; + probe_ent2->n_ports = 1; + probe_ent2->irq = 15; + + probe_ent2->sht = board2->sht; + probe_ent2->host_flags = board2->host_flags; + probe_ent2->pio_mask = board2->pio_mask; + probe_ent2->udma_mask = board2->udma_mask; + probe_ent2->host_info = board2->host_info; + } else { + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + } + + pci_set_master(pdev); + + spin_lock(&ata_module_lock); + list_add_tail(&probe_ent->node, &ata_probe_list); + if (legacy_mode) + list_add_tail(&probe_ent2->node, &ata_probe_list); + spin_unlock(&ata_module_lock); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +/** + * ata_pci_remove_one - PCI layer callback for device removal + * @pdev: PCI device that was removed + * + * PCI layer indicates to libata via this hook that + * hot-unplug or module unload event has occured. + * Handle this by unregistering all objects associated + * with this PCI device. Free those objects. Then finally + * release PCI resources and disable device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ + +void ata_pci_remove_one (struct pci_dev *pdev) +{ + struct ata_host_set *host_set = pci_get_drvdata(pdev); + struct ata_port *ap; + Scsi_Host_Template *sht; + int rc; + + /* FIXME: this unregisters all hosts attached to the + * Scsi_Host_Template given. We _might_ have multiple + * templates (though we don't ATM), so this is ok... for now. + */ + ap = host_set->hosts[0]; + sht = ap->host->hostt; + rc = scsi_unregister_module(MODULE_SCSI_HA, sht); + /* FIXME: handle 'rc' failure? */ + + free_irq(host_set->irq, host_set); + kfree(host_set); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +/* move to PCI subsystem */ +int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +{ + unsigned long tmp = 0; + + switch (bits->width) { + case 1: { + u8 tmp8 = 0; + pci_read_config_byte(pdev, bits->reg, &tmp8); + tmp = tmp8; + break; + } + case 2: { + u16 tmp16 = 0; + pci_read_config_word(pdev, bits->reg, &tmp16); + tmp = tmp16; + break; + } + case 4: { + u32 tmp32 = 0; + pci_read_config_dword(pdev, bits->reg, &tmp32); + tmp = tmp32; + break; + } + + default: + return -EINVAL; + } + + tmp &= bits->mask; + + return (tmp == bits->val) ? 1 : 0; +} + +/** + * ata_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init ata_init(void) +{ + printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); + return 0; +} + +module_init(ata_init); + +EXPORT_SYMBOL(pci_test_config_bits); +EXPORT_SYMBOL(ata_tf_load_pio); +EXPORT_SYMBOL(ata_tf_load_mmio); +EXPORT_SYMBOL(ata_tf_read_pio); +EXPORT_SYMBOL(ata_tf_read_mmio); +EXPORT_SYMBOL(ata_bmdma_start_pio); +EXPORT_SYMBOL(ata_bmdma_start_mmio); +EXPORT_SYMBOL(ata_port_probe); +EXPORT_SYMBOL(ata_port_disable); +EXPORT_SYMBOL(ata_pci_init_one); +EXPORT_SYMBOL(ata_pci_remove_one); +EXPORT_SYMBOL(ata_scsi_detect); +EXPORT_SYMBOL(ata_scsi_release); +EXPORT_SYMBOL(ata_scsi_queuecmd); +EXPORT_SYMBOL(ata_scsi_error); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/libata.h linux.22-ac2/drivers/scsi/libata.h --- linux.vanilla/drivers/scsi/libata.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/libata.h 2003-09-01 13:25:08.000000000 +0100 @@ -0,0 +1,97 @@ +/* + libata.h - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LIBATA_H__ +#define __LIBATA_H__ + +#define DRV_NAME "libata" +#define DRV_VERSION "0.71" /* must be exactly four chars */ + +struct ata_scsi_args { + struct ata_port *ap; + struct ata_device *dev; + Scsi_Cmnd *cmd; + void (*done)(Scsi_Cmnd *); +}; + + +/* libata-core.c */ +extern unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len); +extern void ata_eng_timeout(struct ata_port *ap); +extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev, + Scsi_Cmnd *cmd, + void (*done)(Scsi_Cmnd *)); +extern int ata_qc_issue(struct ata_queued_cmd *qc); +extern void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep); +extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_thread_wake(struct ata_port *ap, unsigned int thr_state); + + +/* libata-scsi.c */ +extern void ata_to_sense_error(struct ata_queued_cmd *qc); +extern void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), + unsigned int cmd_size); +extern int ata_scsi_error(struct Scsi_Host *host); +extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern void ata_scsi_badcmd(Scsi_Cmnd *cmd, + void (*done)(Scsi_Cmnd *), + u8 asc, u8 ascq); +extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)); + +static inline void ata_bad_scsiop(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x20, 0x00); +} + +static inline void ata_bad_cdb(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x24, 0x00); +} + +#endif /* __LIBATA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/libata-scsi.c linux.22-ac2/drivers/scsi/libata-scsi.c --- linux.vanilla/drivers/scsi/libata-scsi.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/libata-scsi.c 2003-09-01 13:25:08.000000000 +0100 @@ -0,0 +1,1074 @@ +/* + libata-scsi.c - helper library for ATA + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#include "libata.h" + +/** + * ata_to_sense_error - + * @qc: + * @cmd: + * + * LOCKING: + */ + +void ata_to_sense_error(struct ata_queued_cmd *qc) +{ + Scsi_Cmnd *cmd = qc->scsicmd; + + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = MEDIUM_ERROR; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + + /* additional-sense-code[-qualifier] */ + if ((qc->flags & ATA_QCFLAG_WRITE) == 0) { + cmd->sense_buffer[12] = 0x11; /* "unrecovered read error" */ + cmd->sense_buffer[13] = 0x04; + } else { + cmd->sense_buffer[12] = 0x0C; /* "write error - */ + cmd->sense_buffer[13] = 0x02; /* auto-reallocation failed" */ + } +} + +/** + * ata_scsi_error - SCSI layer error handler callback + * @host: SCSI host on which error occurred + * + * Handles SCSI-layer-thrown error events. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + * + * RETURNS: + * Zero. + */ + +int ata_scsi_error(struct Scsi_Host *host) +{ + struct ata_port *ap; + + DPRINTK("ENTER\n"); + + ap = (struct ata_port *) &host->hostdata[0]; + ata_eng_timeout(ap); + + DPRINTK("EXIT\n"); + return 0; +} + +/** + * ata_scsi_rw_xlat - + * @qc: + * @scsicmd: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd, + unsigned int cmd_size) +{ + struct ata_taskfile *tf = &qc->tf; + unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; + unsigned int dma = qc->flags & ATA_QCFLAG_DMA; + + qc->cursect = qc->cursg = qc->cursg_ofs = 0; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->hob_nsect = 0; + tf->hob_lbal = 0; + tf->hob_lbam = 0; + tf->hob_lbah = 0; + + if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || + scsicmd[0] == READ_16) { + if (likely(dma)) { + if (lba48) + tf->command = ATA_CMD_READ_EXT; + else + tf->command = ATA_CMD_READ; + tf->protocol = ATA_PROT_DMA_READ; + } else { + if (lba48) + tf->command = ATA_CMD_PIO_READ_EXT; + else + tf->command = ATA_CMD_PIO_READ; + tf->protocol = ATA_PROT_PIO_READ; + } + qc->flags &= ~ATA_QCFLAG_WRITE; + VPRINTK("reading\n"); + } else { + if (likely(dma)) { + if (lba48) + tf->command = ATA_CMD_WRITE_EXT; + else + tf->command = ATA_CMD_WRITE; + tf->protocol = ATA_PROT_DMA_WRITE; + } else { + if (lba48) + tf->command = ATA_CMD_PIO_WRITE_EXT; + else + tf->command = ATA_CMD_PIO_WRITE; + tf->protocol = ATA_PROT_PIO_WRITE; + } + qc->flags |= ATA_QCFLAG_WRITE; + VPRINTK("writing\n"); + } + + if (cmd_size == 10) { + if (lba48) { + tf->hob_nsect = scsicmd[7]; + tf->hob_lbal = scsicmd[2]; + + qc->nsect = ((unsigned int)scsicmd[7] << 8) | + scsicmd[8]; + } else { + /* if we don't support LBA48 addressing, the request + * -may- be too large. */ + if ((scsicmd[2] & 0xf0) || scsicmd[7]) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[8]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[8]; + tf->lbal = scsicmd[5]; + tf->lbam = scsicmd[4]; + tf->lbah = scsicmd[3]; + + VPRINTK("ten-byte command\n"); + return 0; + } + + if (cmd_size == 6) { + qc->nsect = tf->nsect = scsicmd[4]; + tf->lbal = scsicmd[3]; + tf->lbam = scsicmd[2]; + tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ + + VPRINTK("six-byte command\n"); + return 0; + } + + if (cmd_size == 16) { + /* rule out impossible LBAs and sector counts */ + if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) + return 1; + + if (lba48) { + tf->hob_nsect = scsicmd[12]; + tf->hob_lbal = scsicmd[6]; + tf->hob_lbam = scsicmd[5]; + tf->hob_lbah = scsicmd[4]; + + qc->nsect = ((unsigned int)scsicmd[12] << 8) | + scsicmd[13]; + } else { + /* once again, filter out impossible non-zero values */ + if (scsicmd[4] || scsicmd[5] || scsicmd[12] || + (scsicmd[6] & 0xf0)) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[13]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[13]; + tf->lbal = scsicmd[9]; + tf->lbam = scsicmd[8]; + tf->lbah = scsicmd[7]; + + VPRINTK("sixteen-byte command\n"); + return 0; + } + + DPRINTK("no-byte command\n"); + return 1; +} + +/** + * ata_scsi_rw_queue - + * @ap: + * @dev: + * @cmd: + * @done: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), + unsigned int cmd_size) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd; + + VPRINTK("ENTER\n"); + + if (unlikely(cmd->request_bufflen < 1)) { + printk(KERN_WARNING "ata%u(%u): empty request buffer\n", + ap->id, dev->devno); + goto err_out; + } + + qc = ata_qc_new_init(ap, dev, cmd, done); + if (!qc) + return; + + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + + if (ata_scsi_rw_xlat(qc, scsicmd, cmd_size)) + goto err_out; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + + VPRINTK("EXIT\n"); + return; + +err_out: + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_rbuf_get - Map response buffer. + * @cmd: SCSI command containing buffer to be mapped. + * @buf_out: Pointer to mapped area. + * + * Maps buffer contained within SCSI command @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * FIXME: kmap inside spin_lock_irqsave ok? + * + * RETURNS: + * Length of response buffer. + */ + +static unsigned int ata_scsi_rbuf_get(Scsi_Cmnd *cmd, u8 **buf_out) +{ + u8 *buf; + unsigned int buflen; + + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + buf = kmap(sg->page) + sg->offset; + buflen = sg->length; + } else { + buf = cmd->request_buffer; + buflen = cmd->request_bufflen; + } + + memset(buf, 0, buflen); + *buf_out = buf; + return buflen; +} + +/** + * ata_scsi_rbuf_put - Unmap response buffer. + * @cmd: SCSI command containing buffer to be unmapped. + * + * Unmaps response buffer contained within @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static inline void ata_scsi_rbuf_put(Scsi_Cmnd *cmd) +{ + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap(sg->page); + } +} + +/** + * ata_scsi_rbuf_fill - wrapper for SCSI command simulators + * @args: Port / device / SCSI command of interest. + * @actor: Callback hook for desired SCSI command simulator + * + * Takes care of the hard work of simulating a SCSI command... + * Mapping the response buffer, calling the command's handler, + * and handling the handler's return value. This return value + * indicates whether the handler wishes the SCSI command to be + * completed successfully, or not. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)) +{ + u8 *rbuf; + unsigned int buflen, rc; + Scsi_Cmnd *cmd = args->cmd; + + buflen = ata_scsi_rbuf_get(cmd, &rbuf); + rc = actor(args, rbuf, buflen); + ata_scsi_rbuf_put(cmd); + + if (rc) + ata_bad_cdb(cmd, args->done); + else { + cmd->result = SAM_STAT_GOOD; + args->done(cmd); + } +} + +/** + * ata_scsiop_inq_std - Simulate INQUIRY command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns standard device identification data associated + * with non-EVPD INQUIRY command output. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + TYPE_DISK, + 0, + 0x5, /* claim SPC-3 version compatibility */ + 2, + 96 - 4 + }; + + VPRINTK("ENTER\n"); + + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > 36) { + memcpy(&rbuf[8], args->dev->vendor, 8); + memcpy(&rbuf[16], args->dev->product, 16); + memcpy(&rbuf[32], DRV_VERSION, 4); + } + + if (buflen > 63) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; + + memcpy(rbuf + 59, versions, sizeof(versions)); + } + + return 0; +} + +/** + * ata_scsiop_inq_00 - Simulate INQUIRY EVPD page 0, list of pages + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns list of inquiry EVPD pages available. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 pages[] = { + 0x00, /* page 0x00, this page */ + 0x80, /* page 0x80, unit serial no page */ + 0x83 /* page 0x83, device ident page */ + }; + rbuf[3] = sizeof(pages); /* number of supported EVPD pages */ + + if (buflen > 6) + memcpy(rbuf + 4, pages, sizeof(pages)); + + return 0; +} + +/** + * ata_scsiop_inq_80 - Simulate INQUIRY EVPD page 80, device serial number + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns ATA device serial number. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + 0, + 0x80, /* this page code */ + 0, + ATA_SERNO_LEN, /* page len */ + }; + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > (ATA_SERNO_LEN + 4)) + ata_dev_id_string(args->dev, (unsigned char *) &rbuf[4], + ATA_ID_SERNO_OFS, ATA_SERNO_LEN); + + return 0; +} + +static const char *inq_83_str = "Linux ATA-SCSI simulator"; + +/** + * ata_scsiop_inq_83 - Simulate INQUIRY EVPD page 83, device identity + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns device identification. Currently hardcoded to + * return "Linux ATA-SCSI simulator". + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + rbuf[1] = 0x83; /* this page code */ + rbuf[3] = 4 + strlen(inq_83_str); /* page len */ + + /* our one and only identification descriptor (vendor-specific) */ + if (buflen > (strlen(inq_83_str) + 4 + 4)) { + rbuf[4 + 0] = 2; /* code set: ASCII */ + rbuf[4 + 3] = strlen(inq_83_str); + memcpy(rbuf + 4 + 4, inq_83_str, strlen(inq_83_str)); + } + + return 0; +} + +/** + * ata_scsiop_noop - + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * No operation. Simply returns success to caller, to indicate + * that the caller should successfully complete this SCSI command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + return 0; +} + +/** + * ata_scsiop_sync_cache - Simulate SYNCHRONIZE CACHE command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Initiates flush of device's cache. + * + * TODO: + * Actually do this :) + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + + /* FIXME */ + return 1; +} + +/** + * ata_msense_push - Push data onto MODE SENSE data output buffer + * @ptr_io: (input/output) Location to store more output data + * @last: End of output data buffer + * @buf: Pointer to BLOB being added to output buffer + * @buflen: Length of BLOB + * + * Store MODE SENSE data on an output buffer. + * + * LOCKING: + * None. + */ + +static void ata_msense_push(u8 **ptr_io, const u8 *last, + const u8 *buf, unsigned int buflen) +{ + u8 *ptr = *ptr_io; + + if ((ptr + buflen - 1) > last) + return; + + memcpy(ptr, buf, buflen); + + ptr += buflen; + + *ptr_io = ptr; +} + +/** + * ata_msense_caching - Simulate MODE SENSE caching info page + * @dev: + * @ptr_io: + * @last: + * + * Generate a caching info page, which conditionally indicates + * write caching to the SCSI layer, depending on device + * capabilities. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_caching(struct ata_device *dev, u8 **ptr_io, + const u8 *last) +{ + u8 page[7] = { 0xf, 0, 0x10, 0, 0x8, 0xa, 0 }; + if (dev->flags & ATA_DFLAG_WCACHE) + page[6] = 0x4; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_msense_ctl_mode - Simulate MODE SENSE control mode page + * @dev: + * @ptr_io: + * @last: + * + * Generate a generic MODE SENSE control mode page. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) +{ + const u8 page[] = {0xa, 0xa, 2, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30}; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate MODE SENSE commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u8 *scsicmd = args->cmd->cmnd, *p, *last; + struct ata_device *dev = args->dev; + unsigned int page_control, six_byte, output_len; + + VPRINTK("ENTER\n"); + + six_byte = (scsicmd[0] == MODE_SENSE); + + /* we only support saved and current values (which we treat + * in the same manner) + */ + page_control = scsicmd[2] >> 6; + if ((page_control != 0) && (page_control != 3)) + return 1; + + if (six_byte) + output_len = 4; + else + output_len = 8; + + p = rbuf + output_len; + last = rbuf + buflen - 1; + + switch(scsicmd[2] & 0x3f) { + case 0x08: /* caching */ + output_len += ata_msense_caching(dev, &p, last); + break; + + case 0x0a: { /* control mode */ + output_len += ata_msense_ctl_mode(&p, last); + break; + } + + case 0x3f: /* all pages */ + output_len += ata_msense_caching(dev, &p, last); + output_len += ata_msense_ctl_mode(&p, last); + break; + + default: /* invalid page code */ + return 1; + } + + if (six_byte) { + output_len--; + rbuf[0] = output_len; + } else { + output_len -= 2; + rbuf[0] = output_len >> 8; + rbuf[1] = output_len; + } + + return 0; +} + +/** + * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate READ CAPACITY commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u64 n_sectors = args->dev->n_sectors; + u32 tmp; + + VPRINTK("ENTER\n"); + + n_sectors--; /* one off */ + + tmp = n_sectors; /* note: truncates, if lba48 */ + if (args->cmd->cmnd[0] == READ_CAPACITY) { + rbuf[0] = tmp >> (8 * 3); + rbuf[1] = tmp >> (8 * 2); + rbuf[2] = tmp >> (8 * 1); + rbuf[3] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[6] = tmp >> 8; + rbuf[7] = tmp; + + } else { + rbuf[2] = n_sectors >> (8 * 7); + rbuf[3] = n_sectors >> (8 * 6); + rbuf[4] = n_sectors >> (8 * 5); + rbuf[5] = n_sectors >> (8 * 4); + rbuf[6] = tmp >> (8 * 3); + rbuf[7] = tmp >> (8 * 2); + rbuf[8] = tmp >> (8 * 1); + rbuf[9] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[12] = tmp >> 8; + rbuf[13] = tmp; + } + + return 0; +} + +/** + * ata_scsiop_report_luns - Simulate REPORT LUNS command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate REPORT LUNS command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ + + return 0; +} + +/** + * ata_scsi_badcmd - + * @cmd: + * @done: + * @asc: + * @ascq: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_badcmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), u8 asc, u8 ascq) +{ + DPRINTK("ENTER\n"); + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = ILLEGAL_REQUEST; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + cmd->sense_buffer[12] = asc; + cmd->sense_buffer[13] = ascq; + + done(cmd); +} + +/** + * atapi_scsi_queuecmd - Send CDB to ATAPI device + * @ap: Port to which ATAPI device is attached. + * @dev: Target device for CDB. + * @cmd: SCSI command being sent to device. + * @done: SCSI command completion function. + * + * Sends CDB to ATAPI device. If the Linux SCSI layer sends a + * non-data command, then this function handles the command + * directly, via polling. Otherwise, the bmdma engine is started. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void atapi_scsi_queuecmd(struct ata_port *ap, struct ata_device *dev, + Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd, status; + unsigned int doing_dma = 0; + + VPRINTK("ENTER, drv_stat = 0x%x\n", ata_chk_status(ap)); + + if (cmd->sc_data_direction == SCSI_DATA_UNKNOWN) { + DPRINTK("unknown data, scsicmd 0x%x\n", scsicmd[0]); + ata_bad_cdb(cmd, done); + return; + } + + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + case MODE_SELECT: + case MODE_SENSE: + DPRINTK("read6/write6/modesel/modesense trap\n"); + ata_bad_scsiop(cmd, done); + return; + + default: + /* do nothing */ + break; + } + + qc = ata_qc_new_init(ap, dev, cmd, done); + if (!qc) { + printk(KERN_ERR "ata%u: command queue empty\n", ap->id); + return; + } + + qc->flags |= ATA_QCFLAG_ATAPI; + + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + if (cmd->sc_data_direction == SCSI_DATA_WRITE) { + qc->flags |= ATA_QCFLAG_WRITE; + DPRINTK("direction: write\n"); + } + + qc->tf.command = ATA_CMD_PACKET; + + /* set up SG table */ + if (cmd->sc_data_direction == SCSI_DATA_NONE) { + ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE | ATA_QCFLAG_POLL; + qc->tf.protocol = ATA_PROT_ATAPI; + + ata_dev_select(ap, dev->devno, 1, 0); + + DPRINTK("direction: none\n"); + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + } else { + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + qc->tf.feature = ATAPI_PKT_DMA; + qc->tf.protocol = ATA_PROT_ATAPI_DMA; + + doing_dma = 1; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + } + + status = ata_busy_wait(ap, ATA_BUSY, 1000); + if (status & ATA_BUSY) { + ata_thread_wake(ap, THR_PACKET); + return; + } + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* FIXME: mmio-ize */ + DPRINTK("writing cdb\n"); + outsl(ap->ioaddr.cmd_addr + ATA_REG_DATA, + scsicmd, ap->host->max_cmd_len / 4); + + if (!doing_dma) + ata_thread_wake(ap, THR_PACKET); + + VPRINTK("EXIT\n"); + return; + +err_out: + if (!doing_dma) + ata_irq_on(ap); /* re-enable interrupts */ + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @cmd: SCSI command to be sent + * @done: Completion function, called when command is complete + * + * In some cases, this function translates SCSI commands into + * ATA taskfiles, and queues the taskfiles to be sent to + * hardware. In other cases, this function simulates a + * SCSI device by evaluating and responding to certain + * SCSI commands. This creates the overall effect of + * ATA and ATAPI devices appearing as SCSI devices. + * + * LOCKING: + * Releases scsi-layer-held lock, and obtains host_set lock. + * + * RETURNS: + * Zero. + */ + +int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +{ + u8 *scsicmd = cmd->cmnd; + struct ata_port *ap; + struct ata_device *dev; + struct ata_scsi_args args; + const unsigned int atapi_support = +#ifdef CONFIG_SCSI_ATA_ATAPI + 1; +#else + 0; +#endif + + /* Note: spin_lock_irqsave is held by caller... */ + spin_unlock(&io_request_lock); + + ap = (struct ata_port *) &cmd->host->hostdata[0]; + + DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + ap->id, + cmd->channel, cmd->target, cmd->lun, + scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], + scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], + scsicmd[8]); + + /* skip commands not addressed to targets we care about */ + if ((cmd->channel != 0) || (cmd->lun != 0) || + (cmd->target >= ATA_MAX_DEVICES)) { + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out; + } + + spin_lock(&ap->host_set->lock); + + dev = &ap->device[cmd->target]; + + if (!ata_dev_present(dev)) { + DPRINTK("no device\n"); + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out_unlock; + } + + if (dev->class == ATA_DEV_ATAPI) { + if (atapi_support) + atapi_scsi_queuecmd(ap, dev, cmd, done); + else { + cmd->result = (DID_BAD_TARGET << 16); /* correct? */ + done(cmd); + } + goto out_unlock; + } + + /* fast path */ + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + ata_scsi_rw_queue(ap, dev, cmd, done, 6); + goto out_unlock; + + case READ_10: + case WRITE_10: + ata_scsi_rw_queue(ap, dev, cmd, done, 10); + goto out_unlock; + + case READ_16: + case WRITE_16: + ata_scsi_rw_queue(ap, dev, cmd, done, 16); + goto out_unlock; + + default: + /* do nothing */ + break; + } + + /* + * slow path + */ + + args.ap = ap; + args.dev = dev; + args.cmd = cmd; + args.done = done; + + switch(scsicmd[0]) { + case TEST_UNIT_READY: /* FIXME: correct? */ + case FORMAT_UNIT: /* FIXME: correct? */ + case SEND_DIAGNOSTIC: /* FIXME: correct? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); + break; + + case INQUIRY: + if (scsicmd[1] & 2) /* is CmdDt set? */ + ata_bad_cdb(cmd, done); + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); + else if (scsicmd[2] == 0x00) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); + else if (scsicmd[2] == 0x80) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); + else if (scsicmd[2] == 0x83) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); + else + ata_bad_cdb(cmd, done); + break; + + case MODE_SENSE: + case MODE_SENSE_10: + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); + break; + + case MODE_SELECT: /* unconditionally return */ + case MODE_SELECT_10: /* bad-field-in-cdb */ + ata_bad_cdb(cmd, done); + break; + + case SYNCHRONIZE_CACHE: + if ((dev->flags & ATA_DFLAG_WCACHE) == 0) + ata_bad_scsiop(cmd, done); + else + ata_scsi_rbuf_fill(&args, ata_scsiop_sync_cache); + break; + + case READ_CAPACITY: + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + break; + + case SERVICE_ACTION_IN: + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + else + ata_bad_cdb(cmd, done); + break; + + case REPORT_LUNS: + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); + break; + + /* mandantory commands we haven't implemented yet */ + case REQUEST_SENSE: + + /* all other commands */ + default: + ata_bad_scsiop(cmd, done); + break; + } + +out_unlock: + spin_unlock(&ap->host_set->lock); +out: + spin_lock(&io_request_lock); + return 0; +} + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/Makefile linux.22-ac2/drivers/scsi/Makefile --- linux.vanilla/drivers/scsi/Makefile 2003-08-28 16:45:37.000000000 +0100 +++ linux.22-ac2/drivers/scsi/Makefile 2003-09-01 13:25:08.000000000 +0100 @@ -21,7 +21,7 @@ O_TARGET := scsidrv.o -export-objs := scsi_syms.o 53c700.o +export-objs := scsi_syms.o 53c700.o libata-core.o mod-subdirs := pcmcia ../acorn/scsi @@ -110,6 +110,7 @@ obj-$(CONFIG_SCSI_DC390T) += tmscsim.o obj-$(CONFIG_SCSI_AM53C974) += AM53C974.o obj-$(CONFIG_SCSI_MEGARAID) += megaraid.o +obj-$(CONFIG_SCSI_MEGARAID2) += megaraid2.o obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp.o obj-$(CONFIG_SCSI_GDTH) += gdth.o @@ -132,6 +133,8 @@ obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o +obj-$(CONFIG_SCSI_ATA_PIIX) += ata_piix.o libata.o +obj-$(CONFIG_SCSI_SATA_VIA) += sata_via.o libata.o subdir-$(CONFIG_ARCH_ACORN) += ../acorn/scsi obj-$(CONFIG_ARCH_ACORN) += ../acorn/scsi/acorn-scsi.o @@ -143,7 +146,7 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o list-multi := scsi_mod.o sd_mod.o sr_mod.o initio.o a100u2w.o cpqfc.o \ - zalon7xx_mod.o + zalon7xx_mod.o libata.o scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o \ scsicam.o scsi_proc.o scsi_error.o \ scsi_obsolete.o scsi_queue.o scsi_lib.o \ @@ -156,6 +159,7 @@ zalon7xx_mod-objs := zalon7xx.o ncr53c8xx.o cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \ cpqfcTSworker.o cpqfcTStrigger.o +libata-objs := libata-core.o libata-scsi.o include $(TOPDIR)/Rules.make @@ -181,6 +185,9 @@ cpqfc.o: $(cpqfc-objs) $(LD) -r -o $@ $(cpqfc-objs) +libata.o: $(libata-objs) + $(LD) -r -o $@ $(libata-objs) + 53c8xx_d.h: 53c7,8xx.scr script_asm.pl ln -sf 53c7,8xx.scr fake8.c $(CPP) $(CPPFLAGS) -traditional -DCHIP=810 fake8.c | grep -v '^#' | $(PERL) script_asm.pl diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/megaraid2.c linux.22-ac2/drivers/scsi/megaraid2.c --- linux.vanilla/drivers/scsi/megaraid2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/megaraid2.c 2003-08-16 20:38:17.000000000 +0100 @@ -0,0 +1,5615 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2002 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2002 Red Hat, Inc. All rights reserved. + * - fixes + * - speed-ups (list handling fixes, issued_list, optimizations.) + * - lots of cleanups. + * + * Version : v2.00.7 (Aug 01, 2003) - Atul Mukker + * + * Description: Linux device driver for LSI Logic MegaRAID controller + * + * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 + * 518, 520, 531, 532 + * + * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, + * and others. Please send updates to the public mailing list + * linux-megaraid-devel@dell.com, and subscribe to and read archives of this + * list at http://lists.us.dell.com/. + * + * For history of changes, see ChangeLog.megaraid. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sd.h" +#include "scsi.h" +#include "hosts.h" + +#include "megaraid2.h" + +MODULE_AUTHOR ("LSI Logic Corporation"); +MODULE_DESCRIPTION ("LSI Logic MegaRAID driver"); +MODULE_LICENSE ("GPL"); + +static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; +MODULE_PARM(max_cmd_per_lun, "i"); +MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); + +static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; +MODULE_PARM(max_sectors_per_io, "h"); +MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); + + +static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; +MODULE_PARM(max_mbox_busy_wait, "h"); +MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); + +#define RDINDOOR(adapter) readl((adapter)->base + 0x20) +#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C) +#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20) +#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C) + +/* + * Global variables + */ + +static int hba_count; +static adapter_t *hba_soft_state[MAX_CONTROLLERS]; +#ifdef CONFIG_PROC_FS +static struct proc_dir_entry *mega_proc_dir_entry; +#endif + +static struct notifier_block mega_notifier = { + .notifier_call = megaraid_reboot_notify +}; + +/* For controller re-ordering */ +static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; + +/* + * The File Operations structure for the serial/ioctl interface of the driver + */ +static struct file_operations megadev_fops = { + .ioctl = megadev_ioctl, + .open = megadev_open, + .release = megadev_close, + .owner = THIS_MODULE, +}; + +/* + * Array to structures for storing the information about the controllers. This + * information is sent to the user level applications, when they do an ioctl + * for this information. + */ +static struct mcontroller mcontroller[MAX_CONTROLLERS]; + +/* The current driver version */ +static u32 driver_ver = 0x02000000; + +/* major number used by the device for character interface */ +static int major; + +#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) + + +/* + * Debug variable to print some diagnostic messages + */ +static int trace_level; + +/* + * megaraid_validate_parms() + * + * Validate that any module parms passed in + * have proper values. + */ +static void +megaraid_validate_parms(void) +{ + if( (max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN) ) + max_cmd_per_lun = MAX_CMD_PER_LUN; + if( max_mbox_busy_wait > MBOX_BUSY_WAIT ) + max_mbox_busy_wait = MBOX_BUSY_WAIT; +} + + +/** + * megaraid_detect() + * @host_template - Our soft state maintained by mid-layer + * + * the detect entry point for the mid-layer. + * We scan the PCI bus for our controllers and start them. + * + * Note: PCI_DEVICE_ID_PERC4_DI below represents the PERC4/Di class of + * products. All of them share the same vendor id, device id, and subsystem + * vendor id but different subsystem ids. As of now, driver does not use the + * subsystem id. + */ +static int +megaraid_detect(Scsi_Host_Template *host_template) +{ + int i; + u16 dev_sw_table[] = { /* Table of all supported + vendor/device ids */ + + PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DISCOVERY, + PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI, + PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_PERC4_QC_VERDE, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, + PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID3 }; + + + printk(KERN_NOTICE "megaraid: " MEGARAID_VERSION); + + megaraid_validate_parms(); + + /* + * Scan PCI bus for our all devices. + */ + for( i = 0; i < sizeof(dev_sw_table)/sizeof(u16); i += 2 ) { + + mega_find_card(host_template, dev_sw_table[i], + dev_sw_table[i+1]); + } + + if(hba_count) { + /* + * re-order hosts so that one with bootable logical drive + * comes first + */ + mega_reorder_hosts(); + +#ifdef CONFIG_PROC_FS + mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root); + + if(!mega_proc_dir_entry) { + printk(KERN_WARNING + "megaraid: failed to create megaraid root\n"); + } + else { + for(i = 0; i < hba_count; i++) { + mega_create_proc_entry(i, mega_proc_dir_entry); + } + } +#endif + + /* + * Register the driver as a character device, for applications + * to access it for ioctls. + * First argument (major) to register_chrdev implies a dynamic + * major number allocation. + */ + major = register_chrdev(0, "megadev", &megadev_fops); + + /* + * Register the Shutdown Notification hook in kernel + */ + if(register_reboot_notifier(&mega_notifier)) { + printk(KERN_WARNING + "MegaRAID Shutdown routine not registered!!\n"); + } + + } + + return hba_count; +} + + + +/** + * mega_find_card() - find and start this controller + * @host_template - Our soft state maintained by mid-layer + * @pci_vendor - pci vendor id for this controller + * @pci_device - pci device id for this controller + * + * Scans the PCI bus for this vendor and device id combination, setup the + * resources, and register ourselves as a SCSI HBA driver, and setup all + * parameters for our soft state. + * + * This routine also checks for some buggy firmware and ajust the flags + * accordingly. + */ +static void +mega_find_card(Scsi_Host_Template *host_template, u16 pci_vendor, + u16 pci_device) +{ + struct Scsi_Host *host = NULL; + adapter_t *adapter = NULL; + u32 magic64; + unsigned long mega_baseport; + u16 subsysid, subsysvid; + u8 pci_bus; + u8 pci_dev_func; + u8 irq; + struct pci_dev *pdev = NULL; + u8 did_ioremap_f = 0; + u8 did_req_region_f = 0; + u8 did_scsi_reg_f = 0; + u8 got_ipdev_f = 0; + u8 alloc_int_buf_f = 0; + u8 alloc_scb_f = 0; + u8 got_irq_f = 0; + u8 did_setup_mbox_f = 0; + unsigned long tbase; + unsigned long flag = 0; + int i, j; + + while((pdev = pci_find_device(pci_vendor, pci_device, pdev))) { + + // reset flags for all controllers in this class + did_ioremap_f = 0; + did_req_region_f = 0; + did_scsi_reg_f = 0; + got_ipdev_f = 0; + alloc_int_buf_f = 0; + alloc_scb_f = 0; + got_irq_f = 0; + did_setup_mbox_f = 0; + + if(pci_enable_device (pdev)) continue; + + pci_bus = pdev->bus->number; + pci_dev_func = pdev->devfn; + + /* + * For these vendor and device ids, signature offsets are not + * valid and 64 bit is implicit + */ + if( (pci_vendor == PCI_VENDOR_ID_DELL && + pci_device == PCI_DEVICE_ID_PERC4_DI) || + (pci_vendor == PCI_VENDOR_ID_LSI_LOGIC && + pci_device == PCI_DEVICE_ID_PERC4_QC_VERDE) ) { + + flag |= BOARD_64BIT; + } + else { + pci_read_config_dword(pdev, PCI_CONF_AMISIG64, + &magic64); + + if (magic64 == HBA_SIGNATURE_64BIT) + flag |= BOARD_64BIT; + } + + subsysvid = pdev->subsystem_vendor; + subsysid = pdev->subsystem_device; + + /* + * If we do not find the valid subsys vendor id, refuse to + * load the driver. This is part of PCI200X compliance + * We load the driver if subsysvid is 0. + */ + if( subsysvid && (subsysvid != AMI_SUBSYS_VID) && + (subsysvid != DELL_SUBSYS_VID) && + (subsysvid != HP_SUBSYS_VID) && + (subsysvid != INTEL_SUBSYS_VID) && + (subsysvid != LSI_SUBSYS_VID) ) continue; + + + printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:", + pci_vendor, pci_device, pci_bus); + + printk("slot %d:func %d\n", + PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func)); + + /* Read the base port and IRQ from PCI */ + mega_baseport = pci_resource_start(pdev, 0); + irq = pdev->irq; + + tbase = mega_baseport; + + if( pci_resource_flags(pdev, 0) & IORESOURCE_MEM ) { + + if( check_mem_region(mega_baseport, 128) ) { + printk(KERN_WARNING + "megaraid: mem region busy!\n"); + continue; + } + request_mem_region(mega_baseport, 128, + "MegaRAID: LSI Logic Corporation."); + + mega_baseport = + (unsigned long)ioremap(mega_baseport, 128); + + if( !mega_baseport ) { + printk(KERN_WARNING + "megaraid: could not map hba memory\n"); + + release_mem_region(tbase, 128); + + continue; + } + + flag |= BOARD_MEMMAP; + + did_ioremap_f = 1; + } + else { + mega_baseport += 0x10; + + if( !request_region(mega_baseport, 16, "megaraid") ) + goto fail_attach; + + flag |= BOARD_IOMAP; + + did_req_region_f = 1; + } + + /* Initialize SCSI Host structure */ + host = scsi_register(host_template, sizeof(adapter_t)); + + if(!host) goto fail_attach; + + did_scsi_reg_f = 1; + + scsi_set_pci_device(host, pdev); + + adapter = (adapter_t *)host->hostdata; + memset(adapter, 0, sizeof(adapter_t)); + + /* + * Allocate a pci device structure for allocations done + * internally - all of which would be in memory <4GB + */ + adapter->ipdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); + + if( adapter->ipdev == NULL ) goto fail_attach; + + got_ipdev_f = 1; + + memcpy(adapter->ipdev, pdev, sizeof(struct pci_dev)); + + if( pci_set_dma_mask(adapter->ipdev, 0xffffffff) != 0 ) + goto fail_attach; + + printk(KERN_NOTICE + "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", + host->host_no, mega_baseport, irq); + + adapter->base = mega_baseport; + + /* Copy resource info into structure */ + INIT_LIST_HEAD(&adapter->free_list); + INIT_LIST_HEAD(&adapter->pending_list); + + adapter->flag = flag; + spin_lock_init(&adapter->lock); + + adapter->host_lock = &io_request_lock; + + host->cmd_per_lun = max_cmd_per_lun; + host->max_sectors = max_sectors_per_io; + + adapter->dev = pdev; + adapter->host = host; + + adapter->host->irq = irq; + + if( flag & BOARD_MEMMAP ) { + adapter->host->base = tbase; + } + else { + adapter->host->io_port = tbase; + adapter->host->n_io_port = 16; + } + + adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; + + /* + * Allocate buffer to issue internal commands. + */ + adapter->mega_buffer = pci_alloc_consistent(adapter->dev, + MEGA_BUFFER_SIZE, &adapter->buf_dma_handle); + + if( !adapter->mega_buffer ) { + printk(KERN_WARNING "megaraid: out of RAM.\n"); + goto fail_attach; + } + alloc_int_buf_f = 1; + + adapter->scb_list = kmalloc(sizeof(scb_t)*MAX_COMMANDS, + GFP_KERNEL); + + if(!adapter->scb_list) { + printk(KERN_WARNING "megaraid: out of RAM.\n"); + goto fail_attach; + } + + alloc_scb_f = 1; + + /* Request our IRQ */ + if( adapter->flag & BOARD_MEMMAP ) { + if(request_irq(irq, megaraid_isr_memmapped, SA_SHIRQ, + "megaraid", adapter)) { + printk(KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", + irq); + goto fail_attach; + } + } + else { + if(request_irq(irq, megaraid_isr_iomapped, SA_SHIRQ, + "megaraid", adapter)) { + printk(KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", + irq); + goto fail_attach; + } + } + got_irq_f = 1; + + if( mega_setup_mailbox(adapter) != 0 ) + goto fail_attach; + + did_setup_mbox_f = 1; + + if( mega_query_adapter(adapter) != 0 ) + goto fail_attach; + + /* + * Have checks for some buggy f/w + */ + if((subsysid == 0x1111) && (subsysvid == 0x1111)) { + /* + * Which firmware + */ + if (!strcmp(adapter->fw_version, "3.00") || + !strcmp(adapter->fw_version, "3.01")) { + + printk( KERN_WARNING + "megaraid: Your card is a Dell PERC " + "2/SC RAID controller with " + "firmware\nmegaraid: 3.00 or 3.01. " + "This driver is known to have " + "corruption issues\nmegaraid: with " + "those firmware versions on this " + "specific card. In order\nmegaraid: " + "to protect your data, please upgrade " + "your firmware to version\nmegaraid: " + "3.10 or later, available from the " + "Dell Technical Support web\n" + "megaraid: site at\nhttp://support." + "dell.com/us/en/filelib/download/" + "index.asp?fileid=2940\n" + ); + } + } + + /* + * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with + * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit + * support, since this firmware cannot handle 64 bit + * addressing + */ + + if((subsysvid == HP_SUBSYS_VID) && + ((subsysid == 0x60E7)||(subsysid == 0x60E8))) { + + /* + * which firmware + */ + if( !strcmp(adapter->fw_version, "H01.07") || + !strcmp(adapter->fw_version, "H01.08") || + !strcmp(adapter->fw_version, "H01.09") ) { + + printk(KERN_WARNING + "megaraid: Firmware H.01.07, " + "H.01.08, and H.01.09 on 1M/2M " + "controllers\n" + "megaraid: do not support 64 bit " + "addressing.\nmegaraid: DISABLING " + "64 bit support.\n"); + adapter->flag &= ~BOARD_64BIT; + } + } + + + if(mega_is_bios_enabled(adapter)) { + mega_hbas[hba_count].is_bios_enabled = 1; + } + mega_hbas[hba_count].hostdata_addr = adapter; + + /* + * Find out which channel is raid and which is scsi. This is + * for ROMB support. + */ + mega_enum_raid_scsi(adapter); + + /* + * Find out if a logical drive is set as the boot drive. If + * there is one, will make that as the first logical drive. + * ROMB: Do we have to boot from a physical drive. Then all + * the physical drives would appear before the logical disks. + * Else, all the physical drives would be exported to the mid + * layer after logical drives. + */ + mega_get_boot_drv(adapter); + + if( ! adapter->boot_pdrv_enabled ) { + for( i = 0; i < NVIRT_CHAN; i++ ) + adapter->logdrv_chan[i] = 1; + + for( i = NVIRT_CHAN; ilogdrv_chan[i] = 0; + + adapter->mega_ch_class <<= NVIRT_CHAN; + } + else { + j = adapter->product_info.nchannels; + for( i = 0; i < j; i++ ) + adapter->logdrv_chan[i] = 0; + + for( i = j; i < NVIRT_CHAN + j; i++ ) + adapter->logdrv_chan[i] = 1; + } + + + /* + * Do we support random deletion and addition of logical + * drives + */ + adapter->read_ldidmap = 0; /* set it after first logdrv + delete cmd */ + adapter->support_random_del = mega_support_random_del(adapter); + + /* Initialize SCBs */ + if(mega_init_scb(adapter)) { + goto fail_attach; + } + + /* + * Reset the pending commands counter + */ + atomic_set(&adapter->pend_cmds, 0); + + /* + * Reset the adapter quiescent flag + */ + atomic_set(&adapter->quiescent, 0); + + hba_soft_state[hba_count] = adapter; + + /* + * Fill in the structure which needs to be passed back to the + * application when it does an ioctl() for controller related + * information. + */ + i = hba_count; + + mcontroller[i].base = mega_baseport; + mcontroller[i].irq = irq; + mcontroller[i].numldrv = adapter->numldrv; + mcontroller[i].pcibus = pci_bus; + mcontroller[i].pcidev = pci_device; + mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); + mcontroller[i].pciid = -1; + mcontroller[i].pcivendor = pci_vendor; + mcontroller[i].pcislot = PCI_SLOT (pci_dev_func); + mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; + + + /* Set the Mode of addressing to 64 bit if we can */ + if((adapter->flag & BOARD_64BIT)&&(sizeof(dma_addr_t) == 8)) { + pci_set_dma_mask(pdev, 0xffffffffffffffff); + adapter->has_64bit_addr = 1; + } + else { + pci_set_dma_mask(pdev, 0xffffffff); + adapter->has_64bit_addr = 0; + } + + init_MUTEX(&adapter->int_mtx); + init_waitqueue_head(&adapter->int_waitq); + + adapter->this_id = DEFAULT_INITIATOR_ID; + adapter->host->this_id = DEFAULT_INITIATOR_ID; + +#if MEGA_HAVE_CLUSTERING + /* + * Is cluster support enabled on this controller + * Note: In a cluster the HBAs ( the initiators ) will have + * different target IDs and we cannot assume it to be 7. Call + * to mega_support_cluster() will get the target ids also if + * the cluster support is available + */ + adapter->has_cluster = mega_support_cluster(adapter); + + if( adapter->has_cluster ) { + printk(KERN_NOTICE + "megaraid: Cluster driver, initiator id:%d\n", + adapter->this_id); + } +#endif + + hba_count++; + continue; + +fail_attach: + if( did_setup_mbox_f ) { + pci_free_consistent(adapter->dev, sizeof(mbox64_t), + (void *)adapter->una_mbox64, + adapter->una_mbox64_dma); + } + + if( got_irq_f ) { + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + } + + if( alloc_scb_f ) { + kfree(adapter->scb_list); + } + + if( alloc_int_buf_f ) { + pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, + (void *)adapter->mega_buffer, + adapter->buf_dma_handle); + } + + if( got_ipdev_f ) kfree(adapter->ipdev); + + if( did_scsi_reg_f ) scsi_unregister(host); + + if( did_ioremap_f ) { + iounmap((void *)mega_baseport); + release_mem_region(tbase, 128); + } + + if( did_req_region_f ) + release_region(mega_baseport, 16); + } + + return; +} + + +/** + * mega_setup_mailbox() + * @adapter - pointer to our soft state + * + * Allocates a 8 byte aligned memory for the handshake mailbox. + */ +static int +mega_setup_mailbox(adapter_t *adapter) +{ + unsigned long align; + + adapter->una_mbox64 = pci_alloc_consistent(adapter->dev, + sizeof(mbox64_t), &adapter->una_mbox64_dma); + + if( !adapter->una_mbox64 ) return -1; + + adapter->mbox = &adapter->una_mbox64->mbox; + + adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & + (~0UL ^ 0xFUL)); + + adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); + + align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); + + adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; + + /* + * Register the mailbox if the controller is an io-mapped controller + */ + if( adapter->flag & BOARD_IOMAP ) { + + outb_p(adapter->mbox_dma & 0xFF, + adapter->host->io_port + MBOX_PORT0); + + outb_p((adapter->mbox_dma >> 8) & 0xFF, + adapter->host->io_port + MBOX_PORT1); + + outb_p((adapter->mbox_dma >> 16) & 0xFF, + adapter->host->io_port + MBOX_PORT2); + + outb_p((adapter->mbox_dma >> 24) & 0xFF, + adapter->host->io_port + MBOX_PORT3); + + outb_p(ENABLE_MBOX_BYTE, + adapter->host->io_port + ENABLE_MBOX_REGION); + + irq_ack(adapter); + + irq_enable(adapter); + } + + return 0; +} + + +/* + * mega_query_adapter() + * @adapter - pointer to our soft state + * + * Issue the adapter inquiry commands to the controller and find out + * information and parameter about the devices attached + */ +static int +mega_query_adapter(adapter_t *adapter) +{ + dma_addr_t prod_info_dma_handle; + mega_inquiry3 *inquiry3; + u8 raw_mbox[16]; + mbox_t *mbox; + int retval; + + /* Initialize adapter inquiry mailbox */ + + mbox = (mbox_t *)raw_mbox; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + memset(mbox, 0, 16); + + /* + * Try to issue Inquiry3 command + * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and + * update enquiry3 structure + */ + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ + raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ + + /* Issue a blocking command to the card */ + if ((retval = issue_scb_block(adapter, raw_mbox))) { + /* the adapter does not support 40ld */ + + mraid_ext_inquiry *ext_inq; + mraid_inquiry *inq; + dma_addr_t dma_handle; + + ext_inq = pci_alloc_consistent(adapter->dev, + sizeof(mraid_ext_inquiry), &dma_handle); + + if( ext_inq == NULL ) return -1; + + inq = &ext_inq->raid_inq; + + mbox->xferaddr = (u32)dma_handle; + + /*issue old 0x04 command to adapter */ + mbox->cmd = MEGA_MBOXCMD_ADPEXTINQ; + + issue_scb_block(adapter, raw_mbox); + + /* + * update Enquiry3 and ProductInfo structures with + * mraid_inquiry structure + */ + mega_8_to_40ld(inq, inquiry3, + (mega_product_info *)&adapter->product_info); + + pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry), + ext_inq, dma_handle); + + } else { /*adapter supports 40ld */ + adapter->flag |= BOARD_40LD; + + /* + * get product_info, which is static information and will be + * unchanged + */ + prod_info_dma_handle = pci_map_single(adapter->dev, (void *) + &adapter->product_info, + sizeof(mega_product_info), PCI_DMA_FROMDEVICE); + + mbox->xferaddr = prod_info_dma_handle; + + raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ + raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ + + if ((retval = issue_scb_block(adapter, raw_mbox))) + printk(KERN_WARNING + "megaraid: Product_info cmd failed with error: %d\n", + retval); + + pci_dma_sync_single(adapter->dev, prod_info_dma_handle, + sizeof(mega_product_info), + PCI_DMA_FROMDEVICE); + + pci_unmap_single(adapter->dev, prod_info_dma_handle, + sizeof(mega_product_info), PCI_DMA_FROMDEVICE); + } + + + /* + * kernel scans the channels from 0 to <= max_channel + */ + adapter->host->max_channel = + adapter->product_info.nchannels + NVIRT_CHAN -1; + + adapter->host->max_id = 16; /* max targets per channel */ + + adapter->host->max_lun = 7; /* Upto 7 luns for non disk devices */ + + adapter->host->cmd_per_lun = max_cmd_per_lun; + + adapter->numldrv = inquiry3->num_ldrv; + + adapter->max_cmds = adapter->product_info.max_commands; + + if(adapter->max_cmds > MAX_COMMANDS) + adapter->max_cmds = MAX_COMMANDS; + + adapter->host->can_queue = adapter->max_cmds - 1; + + /* + * Get the maximum number of scatter-gather elements supported by this + * firmware + */ + mega_get_max_sgl(adapter); + + adapter->host->sg_tablesize = adapter->sglen; + + + /* use HP firmware and bios version encoding */ + if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { + sprintf (adapter->fw_version, "%c%d%d.%d%d", + adapter->product_info.fw_version[2], + adapter->product_info.fw_version[1] >> 8, + adapter->product_info.fw_version[1] & 0x0f, + adapter->product_info.fw_version[0] >> 8, + adapter->product_info.fw_version[0] & 0x0f); + sprintf (adapter->bios_version, "%c%d%d.%d%d", + adapter->product_info.bios_version[2], + adapter->product_info.bios_version[1] >> 8, + adapter->product_info.bios_version[1] & 0x0f, + adapter->product_info.bios_version[0] >> 8, + adapter->product_info.bios_version[0] & 0x0f); + } else { + memcpy(adapter->fw_version, + (char *)adapter->product_info.fw_version, 4); + adapter->fw_version[4] = 0; + + memcpy(adapter->bios_version, + (char *)adapter->product_info.bios_version, 4); + + adapter->bios_version[4] = 0; + } + + printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n", + adapter->fw_version, adapter->bios_version, adapter->numldrv); + + /* + * Do we support extended (>10 bytes) cdbs + */ + adapter->support_ext_cdb = mega_support_ext_cdb(adapter); + if (adapter->support_ext_cdb) + printk(KERN_NOTICE "megaraid: supports extended CDBs.\n"); + + + return 0; +} + + +/* + * megaraid_queue() + * @scmd - Issue this scsi command + * @done - the callback hook into the scsi mid-layer + * + * The command queuing entry point for the mid-layer. + */ +static int +megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) +{ + adapter_t *adapter; + scb_t *scb; + int busy=0; + + adapter = (adapter_t *)scmd->host->hostdata; + + scmd->scsi_done = done; + + + /* + * Allocate and build a SCB request + * busy flag will be set if mega_build_cmd() command could not + * allocate scb. We will return non-zero status in that case. + * NOTE: scb can be null even though certain commands completed + * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would + * return 0 in that case. + */ + + scb = mega_build_cmd(adapter, scmd, &busy); + + if(scb) { + scb->state |= SCB_PENDQ; + list_add_tail(&scb->list, &adapter->pending_list); + + /* + * Check if the HBA is in quiescent state, e.g., during a + * delete logical drive opertion. If it is, don't run + * the pending_list. + */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + return 0; + } + + return busy; +} + + +/** + * mega_build_cmd() + * @adapter - pointer to our soft state + * @cmd - Prepare using this scsi command + * @busy - busy flag if no resources + * + * Prepares a command and scatter gather list for the controller. This routine + * also finds out if the commands is intended for a logical drive or a + * physical device and prepares the controller command accordingly. + * + * We also re-order the logical drives and physical devices based on their + * boot settings. + */ +static scb_t * +mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) +{ + mega_ext_passthru *epthru; + mega_passthru *pthru; + scb_t *scb; + mbox_t *mbox; + long seg; + char islogical; + int max_ldrv_num; + int channel = 0; + int target = 0; + int ldrv_num = 0; /* logical drive number */ + + + /* + * filter the internal and ioctl commands + */ + if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) { + return cmd->buffer; + } + + + /* + * We know what channels our logical drives are on - mega_find_card() + */ + islogical = adapter->logdrv_chan[cmd->channel]; + + /* + * The theory: If physical drive is chosen for boot, all the physical + * devices are exported before the logical drives, otherwise physical + * devices are pushed after logical drives, in which case - Kernel sees + * the physical devices on virtual channel which is obviously converted + * to actual channel on the HBA. + */ + if( adapter->boot_pdrv_enabled ) { + if( islogical ) { + /* logical channel */ + channel = cmd->channel - + adapter->product_info.nchannels; + } + else { + channel = cmd->channel; /* this is physical channel */ + target = cmd->target; + + /* + * boot from a physical disk, that disk needs to be + * exposed first IF both the channels are SCSI, then + * booting from the second channel is not allowed. + */ + if( target == 0 ) { + target = adapter->boot_pdrv_tgt; + } + else if( target == adapter->boot_pdrv_tgt ) { + target = 0; + } + } + } + else { + if( islogical ) { + channel = cmd->channel; /* this is the logical channel + */ + } + else { + channel = cmd->channel - NVIRT_CHAN; /* physical + channel */ + target = cmd->target; + } + } + + + if(islogical) { + + /* have just LUN 0 for each target on virtual channels */ + if (cmd->lun) { + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); + + + max_ldrv_num = (adapter->flag & BOARD_40LD) ? + MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; + + /* + * max_ldrv_num increases by 0x80 if some logical drive was + * deleted. + */ + if(adapter->read_ldidmap) + max_ldrv_num += 0x80; + + if(ldrv_num > max_ldrv_num ) { + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + } + else { + if( cmd->lun > 7) { + /* + * Do not support lun >7 for physically accessed + * devices + */ + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + } + + /* + * + * Logical drive commands + * + */ + if(islogical) { + switch (cmd->cmnd[0]) { + case TEST_UNIT_READY: + memset(cmd->request_buffer, 0, cmd->request_bufflen); + +#if MEGA_HAVE_CLUSTERING + /* + * Do we support clustering and is the support enabled + * If no, return success always + */ + if( !adapter->has_cluster ) { + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; + } + + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = PCI_DMA_NONE; + + return scb; +#else + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; +#endif + + case MODE_SENSE: + memset(cmd->request_buffer, 0, cmd->cmnd[4]); + cmd->result = (DID_OK << 16); + cmd->scsi_done(cmd); + return NULL; + + case READ_CAPACITY: + case INQUIRY: + + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d ", + adapter->host->host_no, + cmd->channel); + printk("for logical drives.\n"); + + adapter->flag |= (1L << cmd->channel); + } + + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + pthru = scb->pthru; + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + memset(pthru, 0, sizeof(mega_passthru)); + + pthru->timeout = 0; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 1; + pthru->logdrv = ldrv_num; + pthru->cdblen = cmd->cmd_len; + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + if( adapter->has_64bit_addr ) { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU; + } + + scb->dma_direction = PCI_DMA_FROMDEVICE; + + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + + mbox->xferaddr = scb->pthru_dma_addr; + + return scb; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + mbox = (mbox_t *)scb->raw_mbox; + + memset(mbox, 0, sizeof(scb->raw_mbox)); + mbox->logdrv = ldrv_num; + + /* + * A little hack: 2nd bit is zero for all scsi read + * commands and is set for all scsi write commands + */ + if( adapter->has_64bit_addr ) { + mbox->cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE64: + MEGA_MBOXCMD_LREAD64 ; + } + else { + mbox->cmd = (*cmd->cmnd & 0x02) ? + MEGA_MBOXCMD_LWRITE: + MEGA_MBOXCMD_LREAD ; + } + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if( cmd->cmd_len == 6 ) { + mbox->numsectors = (u32) cmd->cmnd[4]; + mbox->lba = + ((u32)cmd->cmnd[1] << 16) | + ((u32)cmd->cmnd[2] << 8) | + (u32)cmd->cmnd[3]; + + mbox->lba &= 0x1FFFFF; + +#if MEGA_HAVE_STATS + /* + * Take modulo 0x80, since the logical drive + * number increases by 0x80 when a logical + * drive was deleted + */ + if (*cmd->cmnd == READ_6) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + if( cmd->cmd_len == 10 ) { + mbox->numsectors = + (u32)cmd->cmnd[8] | + ((u32)cmd->cmnd[7] << 8); + mbox->lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_10) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + if( cmd->cmd_len == 12 ) { + mbox->lba = + ((u32)cmd->cmnd[2] << 24) | + ((u32)cmd->cmnd[3] << 16) | + ((u32)cmd->cmnd[4] << 8) | + (u32)cmd->cmnd[5]; + + mbox->numsectors = + ((u32)cmd->cmnd[6] << 24) | + ((u32)cmd->cmnd[7] << 16) | + ((u32)cmd->cmnd[8] << 8) | + (u32)cmd->cmnd[9]; + +#if MEGA_HAVE_STATS + if (*cmd->cmnd == READ_12) { + adapter->nreads[ldrv_num%0x80]++; + adapter->nreadblocks[ldrv_num%0x80] += + mbox->numsectors; + } else { + adapter->nwrites[ldrv_num%0x80]++; + adapter->nwriteblocks[ldrv_num%0x80] += + mbox->numsectors; + } +#endif + } + + /* + * If it is a read command + */ + if( (*cmd->cmnd & 0x0F) == 0x08 ) { + scb->dma_direction = PCI_DMA_FROMDEVICE; + } + else { + scb->dma_direction = PCI_DMA_TODEVICE; + } + + /* Calculate Scatter-Gather info */ + mbox->numsgelements = mega_build_sglist(adapter, scb, + (u32 *)&mbox->xferaddr, (u32 *)&seg); + + return scb; + +#if MEGA_HAVE_CLUSTERING + case RESERVE: /* Fall through */ + case RELEASE: + + /* + * Do we support clustering and is the support enabled + */ + if( ! adapter->has_cluster ) { + + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + + /* Allocate a SCB and initialize mailbox */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + scb->raw_mbox[0] = MEGA_CLUSTER_CMD; + scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? + MEGA_RESERVE_LD : MEGA_RELEASE_LD; + + scb->raw_mbox[3] = ldrv_num; + + scb->dma_direction = PCI_DMA_NONE; + + return scb; +#endif + + default: + cmd->result = (DID_BAD_TARGET << 16); + cmd->scsi_done(cmd); + return NULL; + } + } + + /* + * Passthru drive commands + */ + else { + /* Allocate a SCB and initialize passthru */ + if(!(scb = mega_allocate_scb(adapter, cmd))) { + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + *busy = 1; + + return NULL; + } + + mbox = (mbox_t *)scb->raw_mbox; + memset(mbox, 0, sizeof(scb->raw_mbox)); + + if( adapter->support_ext_cdb ) { + + epthru = mega_prepare_extpassthru(adapter, scb, cmd, + channel, target); + + mbox->cmd = MEGA_MBOXCMD_EXTPTHRU; + + mbox->xferaddr = scb->epthru_dma_addr; + + } + else { + + pthru = mega_prepare_passthru(adapter, scb, cmd, + channel, target); + + /* Initialize mailbox */ + if( adapter->has_64bit_addr ) { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU64; + } + else { + mbox->cmd = MEGA_MBOXCMD_PASSTHRU; + } + + mbox->xferaddr = scb->pthru_dma_addr; + + } + return scb; + } + return NULL; +} + + +/** + * mega_prepare_passthru() + * @adapter - pointer to our soft state + * @scb - our scsi control block + * @cmd - scsi command from the mid-layer + * @channel - actual channel on the controller + * @target - actual id on the controller. + * + * prepare a command for the scsi physical devices. + */ +static mega_passthru * +mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, + int channel, int target) +{ + mega_passthru *pthru; + + pthru = scb->pthru; + memset(pthru, 0, sizeof (mega_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + pthru->timeout = 2; + + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + + pthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + pthru->cdblen = cmd->cmd_len; + pthru->logdrv = cmd->lun; + + memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = PCI_DMA_BIDIRECTIONAL; + + /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ + switch (cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d [P%d] ", + adapter->host->host_no, + cmd->channel, channel); + printk("for physical devices.\n"); + + adapter->flag |= (1L << cmd->channel); + } + /* Fall through */ + default: + pthru->numsgelements = mega_build_sglist(adapter, scb, + &pthru->dataxferaddr, &pthru->dataxferlen); + break; + } + return pthru; +} + + +/** + * mega_prepare_extpassthru() + * @adapter - pointer to our soft state + * @scb - our scsi control block + * @cmd - scsi command from the mid-layer + * @channel - actual channel on the controller + * @target - actual id on the controller. + * + * prepare a command for the scsi physical devices. This rountine prepares + * commands for devices which can take extended CDBs (>10 bytes) + */ +static mega_ext_passthru * +mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, + int channel, int target) +{ + mega_ext_passthru *epthru; + + epthru = scb->epthru; + memset(epthru, 0, sizeof(mega_ext_passthru)); + + /* 0=6sec/1=60sec/2=10min/3=3hrs */ + epthru->timeout = 2; + + epthru->ars = 1; + epthru->reqsenselen = 14; + epthru->islogical = 0; + + epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; + epthru->target = (adapter->flag & BOARD_40LD) ? + (channel << 4) | target : target; + + epthru->cdblen = cmd->cmd_len; + epthru->logdrv = cmd->lun; + + memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); + + /* Not sure about the direction */ + scb->dma_direction = PCI_DMA_BIDIRECTIONAL; + + switch(cmd->cmnd[0]) { + case INQUIRY: + case READ_CAPACITY: + if(!(adapter->flag & (1L << cmd->channel))) { + + printk(KERN_NOTICE + "scsi%d: scanning scsi channel %d [P%d] ", + adapter->host->host_no, + cmd->channel, channel); + printk("for physical devices.\n"); + + adapter->flag |= (1L << cmd->channel); + } + /* Fall through */ + default: + epthru->numsgelements = mega_build_sglist(adapter, scb, + &epthru->dataxferaddr, &epthru->dataxferlen); + break; + } + + return epthru; +} + + +/** + * mega_allocate_scb() + * @adapter - pointer to our soft state + * @cmd - scsi command from the mid-layer + * + * Allocate a SCB structure. This is the central structure for controller + * commands. + */ +static inline scb_t * +mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd) +{ + struct list_head *head = &adapter->free_list; + scb_t *scb; + + /* Unlink command from Free List */ + if( !list_empty(head) ) { + + scb = list_entry(head->next, scb_t, list); + + list_del_init(head->next); + + scb->state = SCB_ACTIVE; + scb->cmd = cmd; + scb->dma_type = MEGA_DMA_TYPE_NONE; + + return scb; + } + + return NULL; +} + + +/** + * mega_runpendq() + * @adapter - pointer to our soft state + * + * Runs through the list of pending requests. + */ +static inline void +mega_runpendq(adapter_t *adapter) +{ + if(!list_empty(&adapter->pending_list)) + __mega_runpendq(adapter); +} + +static void +__mega_runpendq(adapter_t *adapter) +{ + scb_t *scb; + struct list_head *pos, *next; + + /* Issue any pending commands to the card */ + list_for_each_safe(pos, next, &adapter->pending_list) { + + scb = list_entry(pos, scb_t, list); + + if( !(scb->state & SCB_ISSUED) ) { + + if( issue_scb(adapter, scb) != 0 ) + return; + } + } + + return; +} + + +/** + * issue_scb() + * @adapter - pointer to our soft state + * @scb - scsi control block + * + * Post a command to the card if the mailbox is available, otherwise return + * busy. We also take the scb from the pending list if the mailbox is + * available. + */ +static inline int +issue_scb(adapter_t *adapter, scb_t *scb) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + unsigned int i = 0; + + if(unlikely(mbox->busy)) { + do { + udelay(1); + i++; + } while( mbox->busy && (i < max_mbox_busy_wait) ); + + if(mbox->busy) return -1; + } + + /* Copy mailbox data into host structure */ + memcpy((char *)mbox, (char *)scb->raw_mbox, 16); + + mbox->cmdid = scb->idx; /* Set cmdid */ + mbox->busy = 1; /* Set busy */ + + + /* + * Increment the pending queue counter + */ + atomic_inc((atomic_t *)&adapter->pend_cmds); + + switch (mbox->cmd) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + /* + * post the command + */ + scb->state |= SCB_ISSUED; + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->poll = 0; + mbox->ack = 0; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + } + else { + irq_enable(adapter); + issue_command(adapter); + } + + return 0; +} + + +/** + * issue_scb_block() + * @adapter - pointer to our soft state + * @raw_mbox - the mailbox + * + * Issue a scb in synchronous and non-interrupt mode + */ +static int +issue_scb_block(adapter_t *adapter, u_char *raw_mbox) +{ + volatile mbox64_t *mbox64 = adapter->mbox64; + volatile mbox_t *mbox = adapter->mbox; + u8 byte; + + raw_mbox[0x1] = 0xFE; /* Set cmdid */ + raw_mbox[0xF] = 1; /* Set busy */ + + /* Wait until mailbox is free */ + if(mega_busywait_mbox (adapter)) + goto bug_blocked_mailbox; + + /* Copy mailbox data into host structure */ + memcpy((char *) mbox, raw_mbox, 16); + + switch (raw_mbox[0]) { + case MEGA_MBOXCMD_LREAD64: + case MEGA_MBOXCMD_LWRITE64: + case MEGA_MBOXCMD_PASSTHRU64: + case MEGA_MBOXCMD_EXTPTHRU: + mbox64->xfer_segment_lo = mbox->xferaddr; + mbox64->xfer_segment_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + break; + default: + mbox64->xfer_segment_lo = 0; + mbox64->xfer_segment_hi = 0; + } + + if( likely(adapter->flag & BOARD_MEMMAP) ) { + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + WRINDOOR(adapter, adapter->mbox_dma | 0x1); + + while((u8)mbox->numstatus == 0xFF) + cpu_relax(); + + mbox->numstatus = 0xFF; + + while((u8)mbox->status == 0xFF) + cpu_relax(); + + while( (u8)mbox->poll != 0x77 ) + cpu_relax(); + + mbox->poll = 0; + mbox->ack = 0x77; + + WRINDOOR(adapter, adapter->mbox_dma | 0x2); + + while(RDINDOOR(adapter) & 0x2) + cpu_relax(); + } + else { + irq_disable(adapter); + issue_command(adapter); + + while (!((byte = irq_state(adapter)) & INTR_VALID)) + cpu_relax(); + + set_irq_state(adapter, byte); + irq_enable(adapter); + irq_ack(adapter); + } + + return mbox->status; + +bug_blocked_mailbox: + printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n"); + udelay (1000); + return -1; +} + + +/** + * megaraid_isr_iomapped() + * @irq - irq + * @devp - pointer to our soft state + * @regs - unused + * + * Interrupt service routine for io-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static void +megaraid_isr_iomapped(int irq, void *devp, struct pt_regs *regs) +{ + adapter_t *adapter = devp; + unsigned long flags; + + + spin_lock_irqsave(adapter->host_lock, flags); + + megaraid_iombox_ack_sequence(adapter); + + /* Loop through any pending requests */ + if( atomic_read(&adapter->quiescent ) == 0) { + mega_runpendq(adapter); + } + + spin_unlock_irqrestore(adapter->host_lock, flags); + + return; +} + + +/** + * megaraid_iombox_ack_sequence - interrupt ack sequence for IO mapped HBAs + * @adapter - controller's soft state + * + * Interrupt ackrowledgement sequence for IO mapped HBAs + */ +static inline void +megaraid_iombox_ack_sequence(adapter_t *adapter) +{ + u8 status; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + u8 byte; + + + /* + * loop till F/W has more commands for us to complete. + */ + do { + /* Check if a valid interrupt is pending */ + byte = irq_state(adapter); + if( (byte & VALID_INTR_BYTE) == 0 ) { + return; + } + set_irq_state(adapter, byte); + + while((nstatus = (u8)adapter->mbox->numstatus) + == 0xFF) + cpu_relax(); + adapter->mbox->numstatus = 0xFF; + + while((status = (u8)adapter->mbox->status) == 0xFF) + cpu_relax(); + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, (atomic_t *)&adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->completed, nstatus); + + /* Acknowledge interrupt */ + irq_ack(adapter); + + mega_cmd_done(adapter, completed, nstatus, status); + + } while(1); +} + + +/** + * megaraid_isr_memmapped() + * @irq - irq + * @devp - pointer to our soft state + * @regs - unused + * + * Interrupt service routine for memory-mapped controllers. + * Find out if our device is interrupting. If yes, acknowledge the interrupt + * and service the completed commands. + */ +static void +megaraid_isr_memmapped(int irq, void *devp, struct pt_regs *regs) +{ + adapter_t *adapter = devp; + unsigned long flags; + + + spin_lock_irqsave(adapter->host_lock, flags); + + megaraid_memmbox_ack_sequence(adapter); + + /* Loop through any pending requests */ + if(atomic_read(&adapter->quiescent) == 0) { + mega_runpendq(adapter); + } + + spin_unlock_irqrestore(adapter->host_lock, flags); + + return; +} + + +/** + * megaraid_memmbox_ack_sequence - interrupt ack sequence for memory mapped HBAs + * @adapter - controller's soft state + * + * Interrupt ackrowledgement sequence for memory mapped HBAs + */ +static inline void +megaraid_memmbox_ack_sequence(adapter_t *adapter) +{ + u8 status; + u32 dword = 0; + u8 nstatus; + u8 completed[MAX_FIRMWARE_STATUS]; + + + /* + * loop till F/W has more commands for us to complete. + */ + do { + /* Check if a valid interrupt is pending */ + dword = RDOUTDOOR(adapter); + if( dword != 0x10001234 ) { + /* + * No more pending commands + */ + return; + } + WROUTDOOR(adapter, 0x10001234); + + while((nstatus = (u8)adapter->mbox->numstatus) + == 0xFF) + cpu_relax(); + adapter->mbox->numstatus = 0xFF; + + while((status = (u8)adapter->mbox->status) == 0xFF) + cpu_relax(); + + /* + * decrement the pending queue counter + */ + atomic_sub(nstatus, (atomic_t *)&adapter->pend_cmds); + + memcpy(completed, (void *)adapter->mbox->completed, nstatus); + + /* Acknowledge interrupt */ + WRINDOOR(adapter, 0x2); + + while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); + + mega_cmd_done(adapter, completed, nstatus, status); + + } while(1); +} + + +/** + * mega_cmd_done() + * @adapter - pointer to our soft state + * @completed - array of ids of completed commands + * @nstatus - number of completed commands + * @status - status of the last command completed + * + * Complete the comamnds and call the scsi mid-layer callback hooks. + */ +static inline void +mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) +{ + mega_ext_passthru *epthru = NULL; + struct scatterlist *sgl; + Scsi_Cmnd *cmd = NULL; + mega_passthru *pthru = NULL; + mbox_t *mbox = NULL; + u8 c; + scb_t *scb; + int cmdid; + int i; + + /* + * for all the commands completed, call the mid-layer callback routine + * and free the scb. + */ + for( i = 0; i < nstatus; i++ ) { + + cmdid = completed[i]; + + if( cmdid == CMDID_INT_CMDS ) { /* internal command */ + scb = &adapter->int_scb; + cmd = scb->cmd; + mbox = (mbox_t *)scb->raw_mbox; + + /* + * Internal command interface do not fire the extended + * passthru or 64-bit passthru + */ + pthru = scb->pthru; + + } + else { + scb = &adapter->scb_list[cmdid]; + cmd = scb->cmd; + pthru = scb->pthru; + epthru = scb->epthru; + mbox = (mbox_t *)scb->raw_mbox; + + /* + * Make sure f/w has completed a valid command + */ + if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { + printk(KERN_CRIT + "megaraid: invalid command "); + printk("Id %d, scb->state:%x, scsi cmd:%p\n", + cmdid, scb->state, scb->cmd); + + continue; + } + + /* + * Was an abort issued for this command + */ + if( scb->state & SCB_ABORT ) { + + printk(KERN_NOTICE + "megaraid: aborted cmd %lx[%x] complete.\n", + scb->cmd->serial_number, scb->idx); + + cmd->result = (DID_ABORT << 16); + + mega_free_scb(adapter, scb); + + cmd->scsi_done(cmd); + + continue; + } + + /* + * Was a reset issued for this command + */ + if( scb->state & SCB_RESET ) { + + printk(KERN_WARNING + "megaraid: reset cmd %lx[%x] complete.\n", + scb->cmd->serial_number, scb->idx); + + scb->cmd->result = (DID_RESET << 16); + + mega_free_scb (adapter, scb); + + cmd->scsi_done(cmd); + + continue; + } + +#if MEGA_HAVE_STATS + { + + int islogical = adapter->logdrv_chan[cmd->channel]; + int logdrv = mbox->logdrv; + + /* + * Maintain an error counter for the logical drive. + * Some application like SNMP agent need such + * statistics + */ + if( status && islogical && (cmd->cmnd[0] == READ_6 || + cmd->cmnd[0] == READ_10 || + cmd->cmnd[0] == READ_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->rd_errors[logdrv%0x80]++; + } + + if( status && islogical && (cmd->cmnd[0] == WRITE_6 || + cmd->cmnd[0] == WRITE_10 || + cmd->cmnd[0] == WRITE_12)) { + /* + * Logical drive number increases by 0x80 when + * a logical drive is deleted + */ + adapter->wr_errors[logdrv%0x80]++; + } + + } +#endif + } + + /* + * Do not return the presence of hard disk on the channel so, + * inquiry sent, and returned data==hard disk or removable + * hard disk and not logical, request should return failure! - + * PJ + */ + if(cmd->cmnd[0] == INQUIRY) { + int islogical = adapter->logdrv_chan[cmd->channel]; + + if(!islogical) { + if( cmd->use_sg ) { + sgl = (struct scatterlist *) + cmd->request_buffer; + c = *(u8 *)sgl[0].address; + } + else { + c = *(u8 *)cmd->request_buffer; + } + + if(IS_RAID_CH(adapter, cmd->channel) && + ((c & 0x1F ) == TYPE_DISK)) { + status = 0xF0; + } + } + } + + /* clear result; otherwise, success returns corrupt value */ + cmd->result = 0; + + /* Convert MegaRAID status to Linux error code */ + switch (status) { + case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ + cmd->result |= (DID_OK << 16); + break; + + case 0x02: /* ERROR_ABORTED, i.e. + SCSI_STATUS_CHECK_CONDITION */ + + /* set sense_buffer and result fields */ + if( mbox->cmd == MEGA_MBOXCMD_PASSTHRU || + mbox->cmd == MEGA_MBOXCMD_PASSTHRU64 ) { + + memcpy(cmd->sense_buffer, pthru->reqsensearea, + 14); + + cmd->result = (DRIVER_SENSE << 24) | + (DID_OK << 16) | + (CHECK_CONDITION << 1); + } + else { + if (mbox->cmd == MEGA_MBOXCMD_EXTPTHRU) { + + memcpy(cmd->sense_buffer, + epthru->reqsensearea, 14); + + cmd->result = (DRIVER_SENSE << 24) | + (DID_OK << 16) | + (CHECK_CONDITION << 1); + } else { + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = ABORTED_COMMAND; + cmd->result |= (CHECK_CONDITION << 1); + } + } + break; + + case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. + SCSI_STATUS_BUSY */ + cmd->result |= (DID_BUS_BUSY << 16) | status; + break; + + default: +#if MEGA_HAVE_CLUSTERING + /* + * If TEST_UNIT_READY fails, we know + * MEGA_RESERVATION_STATUS failed + */ + if( cmd->cmnd[0] == TEST_UNIT_READY ) { + cmd->result |= (DID_ERROR << 16) | + (RESERVATION_CONFLICT << 1); + } + else + /* + * Error code returned is 1 if Reserve or Release + * failed or the input parameter is invalid + */ + if( status == 1 && + (cmd->cmnd[0] == RESERVE || + cmd->cmnd[0] == RELEASE) ) { + + cmd->result |= (DID_ERROR << 16) | + (RESERVATION_CONFLICT << 1); + } + else +#endif + cmd->result |= (DID_BAD_TARGET << 16)|status; + } + + /* + * Only free SCBs for the commands coming down from the + * mid-layer, not for which were issued internally + * + * For internal command, restore the status returned by the + * firmware so that user can interpret it. + */ + if( cmdid == CMDID_INT_CMDS ) { /* internal command */ + cmd->result = status; + + /* + * Remove the internal command from the pending list + */ + list_del_init(&scb->list); + scb->state = SCB_FREE; + } + else { + mega_free_scb(adapter, scb); + } + + /* + * Call the mid-layer callback for this command + */ + cmd->scsi_done(cmd); + } +} + + +/* + * Free a SCB structure + * Note: We assume the scsi commands associated with this scb is not free yet. + */ +static void +mega_free_scb(adapter_t *adapter, scb_t *scb) +{ + switch( scb->dma_type ) { + + case MEGA_DMA_TYPE_NONE: + break; + + case MEGA_BULK_DATA: + pci_unmap_page(adapter->host->pci_dev, scb->dma_h_bulkdata, + scb->cmd->request_bufflen, scb->dma_direction); + + if( scb->dma_direction == PCI_DMA_FROMDEVICE ) { + pci_dma_sync_single(adapter->host->pci_dev, + scb->dma_h_bulkdata, + scb->cmd->request_bufflen, + PCI_DMA_FROMDEVICE); + } + + break; + + case MEGA_SGLIST: + pci_unmap_sg(adapter->host->pci_dev, scb->cmd->request_buffer, + scb->cmd->use_sg, scb->dma_direction); + + if( scb->dma_direction == PCI_DMA_FROMDEVICE ) { + pci_dma_sync_sg(adapter->host->pci_dev, + scb->cmd->request_buffer, + scb->cmd->use_sg, PCI_DMA_FROMDEVICE); + } + + break; + + default: + break; + } + + /* + * Remove from the pending list + */ + list_del_init(&scb->list); + + /* Link the scb back into free list */ + scb->state = SCB_FREE; + scb->cmd = NULL; + + list_add(&scb->list, &adapter->free_list); +} + + +/* + * Wait until the controller's mailbox is available + */ +static inline int +mega_busywait_mbox (adapter_t *adapter) +{ + if (adapter->mbox->busy) + return __mega_busywait_mbox(adapter); + return 0; +} + +static int +__mega_busywait_mbox (adapter_t *adapter) +{ + volatile mbox_t *mbox = adapter->mbox; + long counter; + + for (counter = 0; counter < 10000; counter++) { + if (!mbox->busy) + return 0; + udelay(100); yield(); + } + return -1; /* give up after 1 second */ +} + +/* + * Copies data to SGLIST + * Note: For 64 bit cards, we need a minimum of one SG element for read/write + */ +static int +mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) +{ + struct scatterlist *sgl; + struct page *page; + unsigned long offset; + Scsi_Cmnd *cmd; + int sgcnt; + int idx; + + cmd = scb->cmd; + + /* Scatter-gather not used */ + if( !cmd->use_sg ) { + + page = virt_to_page(cmd->request_buffer); + + offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK); + + scb->dma_h_bulkdata = pci_map_page(adapter->host->pci_dev, + page, offset, + cmd->request_bufflen, + scb->dma_direction); + scb->dma_type = MEGA_BULK_DATA; + + /* + * We need to handle special 64-bit commands that need a + * minimum of 1 SG + */ + if( adapter->has_64bit_addr ) { + scb->sgl64[0].address = scb->dma_h_bulkdata; + scb->sgl64[0].length = cmd->request_bufflen; + *buf = (u32)scb->sgl_dma_addr; + *len = (u32)cmd->request_bufflen; + return 1; + } + else { + *buf = (u32)scb->dma_h_bulkdata; + *len = (u32)cmd->request_bufflen; + } + + if( scb->dma_direction == PCI_DMA_TODEVICE ) { + pci_dma_sync_single(adapter->host->pci_dev, + scb->dma_h_bulkdata, + cmd->request_bufflen, + PCI_DMA_TODEVICE); + } + + return 0; + } + + sgl = (struct scatterlist *)cmd->request_buffer; + + /* + * Copy Scatter-Gather list info into controller structure. + * + * The number of sg elements returned must not exceed our limit + */ + sgcnt = pci_map_sg(adapter->host->pci_dev, sgl, cmd->use_sg, + scb->dma_direction); + + scb->dma_type = MEGA_SGLIST; + + if( sgcnt > adapter->sglen ) BUG(); + + for( idx = 0; idx < sgcnt; idx++, sgl++ ) { + + if( adapter->has_64bit_addr ) { + scb->sgl64[idx].address = sg_dma_address(sgl); + scb->sgl64[idx].length = sg_dma_len(sgl); + } + else { + scb->sgl[idx].address = sg_dma_address(sgl); + scb->sgl[idx].length = sg_dma_len(sgl); + } + } + + /* Reset pointer and length fields */ + *buf = scb->sgl_dma_addr; + + /* + * For passthru command, dataxferlen must be set, even for commands + * with a sg list + */ + *len = (u32)cmd->request_bufflen; + + if( scb->dma_direction == PCI_DMA_TODEVICE ) { + pci_dma_sync_sg(adapter->host->pci_dev, sgl, cmd->use_sg, + PCI_DMA_TODEVICE); + } + + /* Return count of SG requests */ + return sgcnt; +} + + +/* + * mega_8_to_40ld() + * + * takes all info in AdapterInquiry structure and puts it into ProductInfo and + * Enquiry3 structures for later use + */ +static void +mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, + mega_product_info *product_info) +{ + int i; + + product_info->max_commands = inquiry->adapter_info.max_commands; + enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; + product_info->nchannels = inquiry->adapter_info.nchannels; + + for (i = 0; i < 4; i++) { + product_info->fw_version[i] = + inquiry->adapter_info.fw_version[i]; + + product_info->bios_version[i] = + inquiry->adapter_info.bios_version[i]; + } + enquiry3->cache_flush_interval = + inquiry->adapter_info.cache_flush_interval; + + product_info->dram_size = inquiry->adapter_info.dram_size; + + enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; + + for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { + enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; + enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; + enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; + } + + for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) + enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; +} + + +/* + * Release the controller's resources + */ +static int +megaraid_release(struct Scsi_Host *host) +{ + adapter_t *adapter; + mbox_t *mbox; + u_char raw_mbox[16]; +#ifdef CONFIG_PROC_FS + char buf[12] = { 0 }; +#endif + + adapter = (adapter_t *)host->hostdata; + mbox = (mbox_t *)raw_mbox; + + printk(KERN_NOTICE "megaraid: being unloaded..."); + + /* Flush adapter cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_ADAPTER; + + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + /* Flush disks cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_SYSTEM; + + /* Issue a blocking (interrupts disabled) command to the card */ + issue_scb_block(adapter, raw_mbox); + + + /* Free our resources */ + if( adapter->flag & BOARD_MEMMAP ) { + iounmap((void *)adapter->base); + release_mem_region(adapter->host->base, 128); + } + else { + release_region(adapter->base, 16); + } + + mega_free_sgl(adapter); + +#ifdef CONFIG_PROC_FS + if( adapter->controller_proc_dir_entry ) { + remove_proc_entry("stat", adapter->controller_proc_dir_entry); + remove_proc_entry("config", + adapter->controller_proc_dir_entry); + remove_proc_entry("mailbox", + adapter->controller_proc_dir_entry); +#if MEGA_HAVE_ENH_PROC + remove_proc_entry("rebuild-rate", + adapter->controller_proc_dir_entry); + remove_proc_entry("battery-status", + adapter->controller_proc_dir_entry); + + remove_proc_entry("diskdrives-ch0", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch1", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch2", + adapter->controller_proc_dir_entry); + remove_proc_entry("diskdrives-ch3", + adapter->controller_proc_dir_entry); + + remove_proc_entry("raiddrives-0-9", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-10-19", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-20-29", + adapter->controller_proc_dir_entry); + remove_proc_entry("raiddrives-30-39", + adapter->controller_proc_dir_entry); +#endif + + sprintf(buf, "hba%d", adapter->host->host_no); + remove_proc_entry(buf, mega_proc_dir_entry); + } +#endif + + pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE, + adapter->mega_buffer, adapter->buf_dma_handle); + kfree(adapter->scb_list); + pci_free_consistent(adapter->dev, sizeof(mbox64_t), + (void *)adapter->una_mbox64, adapter->una_mbox64_dma); + + kfree(adapter->ipdev); + + hba_count--; + + if( hba_count == 0 ) { + + /* + * Unregister the character device interface to the driver. + */ + unregister_chrdev(major, "megadev"); + + unregister_reboot_notifier(&mega_notifier); + +#ifdef CONFIG_PROC_FS + if( adapter->controller_proc_dir_entry ) { + remove_proc_entry ("megaraid", &proc_root); + } +#endif + + } + + /* + * Release the controller memory. A word of warning this frees + * hostdata and that includes adapter-> so be careful what you + * dereference beyond this point + */ + scsi_unregister(host); + + + printk("ok.\n"); + + return 0; +} + +static inline void +mega_free_sgl(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for(i = 0; i < adapter->max_cmds; i++) { + + scb = &adapter->scb_list[i]; + + if( scb->sgl64 ) { + pci_free_consistent(adapter->dev, + sizeof(mega_sgl64) * adapter->sglen, + scb->sgl64, + scb->sgl_dma_addr); + + scb->sgl64 = NULL; + } + + if( scb->pthru ) { + pci_free_consistent(adapter->dev, sizeof(mega_passthru), + scb->pthru, scb->pthru_dma_addr); + + scb->pthru = NULL; + } + + if( scb->epthru ) { + pci_free_consistent(adapter->dev, + sizeof(mega_ext_passthru), + scb->epthru, scb->epthru_dma_addr); + + scb->epthru = NULL; + } + + } +} + + +/* + * Get information about the card/driver + */ +const char * +megaraid_info(struct Scsi_Host *host) +{ + static char buffer[512]; + adapter_t *adapter; + + adapter = (adapter_t *)host->hostdata; + + sprintf (buffer, + "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", + adapter->fw_version, adapter->product_info.max_commands, + adapter->host->max_id, adapter->host->max_channel, + adapter->host->max_lun); + return buffer; +} + +/* shouldn't be used, but included for completeness */ +static int +megaraid_command (Scsi_Cmnd *cmd) +{ + printk(KERN_WARNING + "megaraid critcal error: synchronous interface is not implemented.\n"); + + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + + return 1; +} + + +/** + * megaraid_abort - abort the scsi command + * @scp - command to be aborted + * + * Abort a previous SCSI request. Only commands on the pending list can be + * aborted. All the commands issued to the F/W must complete. + */ +static int +megaraid_abort(Scsi_Cmnd *scp) +{ + adapter_t *adapter; + struct list_head *pos, *next; + scb_t *scb; + long iter; + int rval = SUCCESS; + + adapter = (adapter_t *)scp->host->hostdata; + + ASSERT( spin_is_locked(adapter->host_lock) ); + + printk("megaraid: aborting-%ld cmd=%x \n", + scp->serial_number, scp->cmnd[0], scp->channel, scp->target, + scp->lun); + + + list_for_each_safe( pos, next, &adapter->pending_list ) { + + scb = list_entry(pos, scb_t, list); + + if( scb->cmd == scp ) { /* Found command */ + + scb->state |= SCB_ABORT; + + /* + * Check if this command was never issued. If this is + * the case, take it off from the pending list and + * complete. + */ + if( !(scb->state & SCB_ISSUED) ) { + + printk(KERN_WARNING + "megaraid: %ld:%d, driver owner.\n", + scp->serial_number, scb->idx); + + scp->result = (DID_ABORT << 16); + + mega_free_scb(adapter, scb); + + scp->scsi_done(scp); + + break; + } + } + } + + /* + * By this time, either all commands are completed or aborted by + * mid-layer. Do not return until all the commands are actually + * completed by the firmware + */ + iter = 0; + while( atomic_read(&adapter->pend_cmds) > 0 ) { + /* + * Perform the ack sequence, since interrupts are not + * available right now! + */ + if( adapter->flag & BOARD_MEMMAP ) { + megaraid_memmbox_ack_sequence(adapter); + } + else { + megaraid_iombox_ack_sequence(adapter); + } + + /* + * print a message once every second only + */ + if( !(iter % 1000) ) { + printk( + "megarid: Waiting for %d commands to flush: iter:%ld\n", + atomic_read(&adapter->pend_cmds), iter); + } + + if( iter++ < MBOX_ABORT_SLEEP*1000 ) { + mdelay(1); + } + else { + printk(KERN_WARNING + "megaraid: critical hardware error!\n"); + + rval = FAILED; + + break; + } + } + + if( rval == SUCCESS ) { + printk(KERN_INFO + "megaraid: abort sequence successfully complete.\n"); + } + + return rval; +} + + +static int +megaraid_reset(Scsi_Cmnd *cmd) +{ + adapter_t *adapter; + megacmd_t mc; + long iter; + int rval = SUCCESS; + + adapter = (adapter_t *)cmd->host->hostdata; + + ASSERT( spin_is_locked(adapter->host_lock) ); + + printk("megaraid: reset-%ld cmd=%x \n", + cmd->serial_number, cmd->cmnd[0], cmd->channel, cmd->target, + cmd->lun); + + +#if MEGA_HAVE_CLUSTERING + mc.cmd = MEGA_CLUSTER_CMD; + mc.opcode = MEGA_RESET_RESERVATIONS; + + spin_unlock_irq(adapter->host_lock); + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) != 0 ) { + printk(KERN_WARNING + "megaraid: reservation reset failed.\n"); + } + else { + printk(KERN_INFO "megaraid: reservation reset.\n"); + } + spin_lock_irq(adapter->host_lock); +#endif + + /* + * Do not return until all the commands are actually completed by the + * firmware + */ + iter = 0; + while( atomic_read(&adapter->pend_cmds) > 0 ) { + /* + * Perform the ack sequence, since interrupts are not + * available right now! + */ + if( adapter->flag & BOARD_MEMMAP ) { + megaraid_memmbox_ack_sequence(adapter); + } + else { + megaraid_iombox_ack_sequence(adapter); + } + + /* + * print a message once every second only + */ + if( !(iter % 1000) ) { + printk( + "megarid: Waiting for %d commands to flush: iter:%ld\n", + atomic_read(&adapter->pend_cmds), iter); + } + + if( iter++ < MBOX_RESET_SLEEP*1000 ) { + mdelay(1); + } + else { + printk(KERN_WARNING + "megaraid: critical hardware error!\n"); + + rval = FAILED; + + break; + } + } + + if( rval == SUCCESS ) { + printk(KERN_INFO + "megaraid: reset sequence successfully complete.\n"); + } + + return rval; +} + + +#ifdef CONFIG_PROC_FS +/* Following code handles /proc fs */ + +#define CREATE_READ_PROC(string, func) create_proc_read_entry(string, \ + S_IRUSR | S_IFREG, \ + controller_proc_dir_entry, \ + func, adapter) + +/** + * mega_create_proc_entry() + * @index - index in soft state array + * @parent - parent node for this /proc entry + * + * Creates /proc entries for our controllers. + */ +static void +mega_create_proc_entry(int index, struct proc_dir_entry *parent) +{ + struct proc_dir_entry *controller_proc_dir_entry = NULL; + u8 string[64] = { 0 }; + adapter_t *adapter = hba_soft_state[index]; + + sprintf(string, "hba%d", adapter->host->host_no); + + controller_proc_dir_entry = + adapter->controller_proc_dir_entry = proc_mkdir(string, parent); + + if(!controller_proc_dir_entry) { + printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n"); + return; + } + adapter->proc_read = CREATE_READ_PROC("config", proc_read_config); + adapter->proc_stat = CREATE_READ_PROC("stat", proc_read_stat); + adapter->proc_mbox = CREATE_READ_PROC("mailbox", proc_read_mbox); +#if MEGA_HAVE_ENH_PROC + adapter->proc_rr = CREATE_READ_PROC("rebuild-rate", proc_rebuild_rate); + adapter->proc_battery = CREATE_READ_PROC("battery-status", + proc_battery); + + /* + * Display each physical drive on its channel + */ + adapter->proc_pdrvstat[0] = CREATE_READ_PROC("diskdrives-ch0", + proc_pdrv_ch0); + adapter->proc_pdrvstat[1] = CREATE_READ_PROC("diskdrives-ch1", + proc_pdrv_ch1); + adapter->proc_pdrvstat[2] = CREATE_READ_PROC("diskdrives-ch2", + proc_pdrv_ch2); + adapter->proc_pdrvstat[3] = CREATE_READ_PROC("diskdrives-ch3", + proc_pdrv_ch3); + + /* + * Display a set of up to 10 logical drive through each of following + * /proc entries + */ + adapter->proc_rdrvstat[0] = CREATE_READ_PROC("raiddrives-0-9", + proc_rdrv_10); + adapter->proc_rdrvstat[1] = CREATE_READ_PROC("raiddrives-10-19", + proc_rdrv_20); + adapter->proc_rdrvstat[2] = CREATE_READ_PROC("raiddrives-20-29", + proc_rdrv_30); + adapter->proc_rdrvstat[3] = CREATE_READ_PROC("raiddrives-30-39", + proc_rdrv_40); +#endif +} + + +/** + * proc_read_config() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display configuration information about the controller. + */ +static int +proc_read_config(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + + adapter_t *adapter = (adapter_t *)data; + int len = 0; + + len += sprintf(page+len, "%s", MEGARAID_VERSION); + + if(adapter->product_info.product_name[0]) + len += sprintf(page+len, "%s\n", + adapter->product_info.product_name); + + len += sprintf(page+len, "Controller Type: "); + + if( adapter->flag & BOARD_MEMMAP ) { + len += sprintf(page+len, + "438/466/467/471/493/518/520/531/532\n"); + } + else { + len += sprintf(page+len, + "418/428/434\n"); + } + + if(adapter->flag & BOARD_40LD) { + len += sprintf(page+len, + "Controller Supports 40 Logical Drives\n"); + } + + if(adapter->flag & BOARD_64BIT) { + len += sprintf(page+len, + "Controller capable of 64-bit memory addressing\n"); + } + if( adapter->has_64bit_addr ) { + len += sprintf(page+len, + "Controller using 64-bit memory addressing\n"); + } + else { + len += sprintf(page+len, + "Controller is not using 64-bit memory addressing\n"); + } + + len += sprintf(page+len, "Base = %08lx, Irq = %d, ", adapter->base, + adapter->host->irq); + + len += sprintf(page+len, "Logical Drives = %d, Channels = %d\n", + adapter->numldrv, adapter->product_info.nchannels); + + len += sprintf(page+len, "Version =%s:%s, DRAM = %dMb\n", + adapter->fw_version, adapter->bios_version, + adapter->product_info.dram_size); + + len += sprintf(page+len, + "Controller Queue Depth = %d, Driver Queue Depth = %d\n", + adapter->product_info.max_commands, adapter->max_cmds); + + len += sprintf(page+len, "support_ext_cdb = %d\n", + adapter->support_ext_cdb); + len += sprintf(page+len, "support_random_del = %d\n", + adapter->support_random_del); + len += sprintf(page+len, "boot_ldrv_enabled = %d\n", + adapter->boot_ldrv_enabled); + len += sprintf(page+len, "boot_ldrv = %d\n", + adapter->boot_ldrv); + len += sprintf(page+len, "boot_pdrv_enabled = %d\n", + adapter->boot_pdrv_enabled); + len += sprintf(page+len, "boot_pdrv_ch = %d\n", + adapter->boot_pdrv_ch); + len += sprintf(page+len, "boot_pdrv_tgt = %d\n", + adapter->boot_pdrv_tgt); + len += sprintf(page+len, "quiescent = %d\n", + atomic_read(&adapter->quiescent)); + len += sprintf(page+len, "has_cluster = %d\n", + adapter->has_cluster); + + len += sprintf(page+len, "\nModule Parameters:\n"); + len += sprintf(page+len, "max_cmd_per_lun = %d\n", + max_cmd_per_lun); + len += sprintf(page+len, "max_sectors_per_io = %d\n", + max_sectors_per_io); + + *eof = 1; + + return len; +} + + + +/** + * proc_read_stat() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Diaplay statistical information about the I/O activity. + */ +static int +proc_read_stat(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter; + int len; + int i; + + i = 0; /* avoid compilation warnings */ + len = 0; + adapter = (adapter_t *)data; + + len = sprintf(page, "Statistical Information for this controller\n"); + len += sprintf(page+len, "pend_cmds = %d\n", + atomic_read(&adapter->pend_cmds)); +#if MEGA_HAVE_STATS + for(i = 0; i < adapter->numldrv; i++) { + len += sprintf(page+len, "Logical Drive %d:\n", i); + + len += sprintf(page+len, + "\tReads Issued = %lu, Writes Issued = %lu\n", + adapter->nreads[i], adapter->nwrites[i]); + + len += sprintf(page+len, + "\tSectors Read = %lu, Sectors Written = %lu\n", + adapter->nreadblocks[i], adapter->nwriteblocks[i]); + + len += sprintf(page+len, + "\tRead errors = %lu, Write errors = %lu\n\n", + adapter->rd_errors[i], adapter->wr_errors[i]); + } +#else + len += sprintf(page+len, + "IO and error counters not compiled in driver.\n"); +#endif + + *eof = 1; + + return len; +} + + +/** + * proc_read_mbox() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display mailbox information for the last command issued. This information + * is good for debugging. + */ +static int +proc_read_mbox(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + + adapter_t *adapter = (adapter_t *)data; + volatile mbox_t *mbox = adapter->mbox; + int len = 0; + + len = sprintf(page, "Contents of Mail Box Structure\n"); + len += sprintf(page+len, " Fw Command = 0x%02x\n", mbox->cmd); + len += sprintf(page+len, " Cmd Sequence = 0x%02x\n", mbox->cmdid); + len += sprintf(page+len, " No of Sectors= %04d\n", mbox->numsectors); + len += sprintf(page+len, " LBA = 0x%02x\n", mbox->lba); + len += sprintf(page+len, " DTA = 0x%08x\n", mbox->xferaddr); + len += sprintf(page+len, " Logical Drive= 0x%02x\n", mbox->logdrv); + len += sprintf(page+len, " No of SG Elmt= 0x%02x\n", + mbox->numsgelements); + len += sprintf(page+len, " Busy = %01x\n", mbox->busy); + len += sprintf(page+len, " Status = 0x%02x\n", mbox->status); + + *eof = 1; + + return len; +} + + +/** + * proc_rebuild_rate() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display current rebuild rate + */ +static int +proc_rebuild_rate(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + int len = 0; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + *eof = 1; + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + len = sprintf(page, "Rebuild Rate: [%d%%]\n", + ((mega_inquiry3 *)inquiry)->rebuild_rate); + } + else { + len = sprintf(page, "Rebuild Rate: [%d%%]\n", + ((mraid_ext_inquiry *) + inquiry)->raid_inq.adapter_info.rebuild_rate); + } + + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; +} + + +/** + * proc_battery() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the battery module on the controller. + */ +static int +proc_battery(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + dma_addr_t dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 battery_status = 0; + char str[256]; + int len = 0; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + *eof = 1; + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + battery_status = ((mega_inquiry3 *)inquiry)->battery_status; + } + else { + battery_status = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.adapter_info.battery_status; + } + + /* + * Decode the battery status + */ + sprintf(str, "Battery Status:[%d]", battery_status); + + if(battery_status == MEGA_BATT_CHARGE_DONE) + strcat(str, " Charge Done"); + + if(battery_status & MEGA_BATT_MODULE_MISSING) + strcat(str, " Module Missing"); + + if(battery_status & MEGA_BATT_LOW_VOLTAGE) + strcat(str, " Low Voltage"); + + if(battery_status & MEGA_BATT_TEMP_HIGH) + strcat(str, " Temperature High"); + + if(battery_status & MEGA_BATT_PACK_MISSING) + strcat(str, " Pack Missing"); + + if(battery_status & MEGA_BATT_CHARGE_INPROG) + strcat(str, " Charge In-progress"); + + if(battery_status & MEGA_BATT_CHARGE_FAIL) + strcat(str, " Charge Fail"); + + if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) + strcat(str, " Cycles Exceeded"); + + len = sprintf(page, "%s\n", str); + + + mega_free_inquiry(inquiry, dma_handle, pdev); + + *eof = 1; + + return len; +} + + +/** + * proc_pdrv_ch0() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 0. + */ +static int +proc_pdrv_ch0(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 0)); +} + + +/** + * proc_pdrv_ch1() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 1. + */ +static int +proc_pdrv_ch1(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 1)); +} + + +/** + * proc_pdrv_ch2() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 2. + */ +static int +proc_pdrv_ch2(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 2)); +} + + +/** + * proc_pdrv_ch3() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display information about the physical drives on physical channel 3. + */ +static int +proc_pdrv_ch3(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_pdrv(adapter, page, 3)); +} + + +/** + * proc_pdrv() + * @page - buffer to write the data in + * @adapter - pointer to our soft state + * + * Display information about the physical drives. + */ +static int +proc_pdrv(adapter_t *adapter, char *page, int channel) +{ + dma_addr_t dma_handle; + char *scsi_inq; + dma_addr_t scsi_inq_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *pdrv_state; + u8 state; + int tgt; + int max_channels; + int len = 0; + char str[80]; + int i; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + + scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle); + + if( scsi_inq == NULL ) { + len = sprintf(page, "memory not available for scsi inq.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + if( adapter->flag & BOARD_40LD ) { + pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; + } + else { + pdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.pdrv_info.pdrv_state; + } + + max_channels = adapter->product_info.nchannels; + + if( channel >= max_channels ) return 0; + + for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { + + i = channel*16 + tgt; + + state = *(pdrv_state + i); + + switch( state & 0x0F ) { + + case PDRV_ONLINE: + sprintf(str, + "Channel:%2d Id:%2d State: Online", + channel, tgt); + break; + + case PDRV_FAILED: + sprintf(str, + "Channel:%2d Id:%2d State: Failed", + channel, tgt); + break; + + case PDRV_RBLD: + sprintf(str, + "Channel:%2d Id:%2d State: Rebuild", + channel, tgt); + break; + + case PDRV_HOTSPARE: + sprintf(str, + "Channel:%2d Id:%2d State: Hot spare", + channel, tgt); + break; + + default: + sprintf(str, + "Channel:%2d Id:%2d State: Un-configured", + channel, tgt); + break; + + } + + /* + * This interface displays inquiries for disk drives + * only. Inquries for logical drives and non-disk + * devices are available through /proc/scsi/scsi + */ + memset(scsi_inq, 0, 256); + if( mega_internal_dev_inquiry(adapter, channel, tgt, + scsi_inq_dma_handle) || + (scsi_inq[0] & 0x1F) != TYPE_DISK ) { + continue; + } + + /* + * Check for overflow. We print less than 240 + * characters for inquiry + */ + if( (len + 240) >= PAGE_SIZE ) break; + + len += sprintf(page+len, "%s.\n", str); + + len += mega_print_inquiry(page+len, scsi_inq); + } + + pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; +} + + +/* + * Display scsi inquiry + */ +static int +mega_print_inquiry(char *page, char *scsi_inq) +{ + int len = 0; + int i; + + len = sprintf(page, " Vendor: "); + for( i = 8; i < 16; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, " Model: "); + + for( i = 16; i < 32; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, " Rev: "); + + for( i = 32; i < 36; i++ ) { + len += sprintf(page+len, "%c", scsi_inq[i]); + } + + len += sprintf(page+len, "\n"); + + i = scsi_inq[0] & 0x1f; + + len += sprintf(page+len, " Type: %s ", + i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : + "Unknown "); + + len += sprintf(page+len, + " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); + + if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) + len += sprintf(page+len, " CCS\n"); + else + len += sprintf(page+len, "\n"); + + return len; +} + + +/** + * proc_rdrv_10() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_10(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 0, 9)); +} + + +/** + * proc_rdrv_20() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_20(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 10, 19)); +} + + +/** + * proc_rdrv_30() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_30(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 20, 29)); +} + + +/** + * proc_rdrv_40() + * @page - buffer to write the data in + * @start - where the actual data has been written in page + * @offset - same meaning as the read system call + * @count - same meaning as the read system call + * @eof - set if no more data needs to be returned + * @data - pointer to our soft state + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof, + void *data) +{ + adapter_t *adapter = (adapter_t *)data; + + *eof = 1; + + return (proc_rdrv(adapter, page, 30, 39)); +} + + +/** + * proc_rdrv() + * @page - buffer to write the data in + * @adapter - pointer to our soft state + * @start - starting logical drive to display + * @end - ending logical drive to display + * + * We do not print the inquiry information since its already available through + * /proc/scsi/scsi interface + */ +static int +proc_rdrv(adapter_t *adapter, char *page, int start, int end ) +{ + dma_addr_t dma_handle; + logdrv_param *lparam; + megacmd_t mc; + char *disk_array; + dma_addr_t disk_array_dma_handle; + caddr_t inquiry; + struct pci_dev *pdev; + u8 *rdrv_state; + int num_ldrv; + u32 array_sz; + int len = 0; + int i; + u8 span8_flag = 1; + + pdev = adapter->ipdev; + + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + return len; + } + + if( mega_adapinq(adapter, dma_handle) != 0 ) { + + len = sprintf(page, "Adapter inquiry failed.\n"); + + printk(KERN_WARNING "megaraid: inquiry failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + + array_sz = sizeof(disk_array_40ld); + + rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; + + num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; + } + else { + /* + * 'array_sz' is either the size of diskarray_span4_t or the + * size of disk_array_span8_t. We use span8_t's size because + * it is bigger of the two. + */ + array_sz = sizeof( diskarray_span8_t ); + + rdrv_state = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.ldrv_state; + + num_ldrv = ((mraid_ext_inquiry *)inquiry)-> + raid_inq.logdrv_info.num_ldrv; + } + + disk_array = pci_alloc_consistent(pdev, array_sz, + &disk_array_dma_handle); + + if( disk_array == NULL ) { + len = sprintf(page, "memory not available.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + return len; + } + + mc.xferaddr = (u32)disk_array_dma_handle; + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = OP_DCMD_READ_CONFIG; + + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) ) { + + len = sprintf(page, "40LD read config failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, disk_array, + disk_array_dma_handle); + + return len; + } + + } + else { + /* + * Try 8-Span "read config" command + */ + mc.cmd = NEW_READ_CONFIG_8LD; + + if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) ) { + + /* + * 8-Span command failed; try 4-Span command + */ + span8_flag = 0; + mc.cmd = READ_CONFIG_8LD; + + if( mega_internal_command(adapter, LOCK_INT, &mc, + NULL) ){ + + len = sprintf(page, + "8LD read config failed.\n"); + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, + disk_array, + disk_array_dma_handle); + + return len; + } + } + } + + for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { + + if( adapter->flag & BOARD_40LD ) { + lparam = + &((disk_array_40ld *)disk_array)->ldrv[i].lparam; + } + else { + if( span8_flag ) { + lparam = (logdrv_param*) &((diskarray_span8_t*) + (disk_array))->log_drv[i]; + } + else { + lparam = (logdrv_param*) &((diskarray_span4_t*) + (disk_array))->log_drv[i]; + } + } + + /* + * Check for overflow. We print less than 240 characters for + * information about each logical drive. + */ + if( (len + 240) >= PAGE_SIZE ) break; + + len += sprintf(page+len, "Logical drive:%2d:, ", i); + + switch( rdrv_state[i] & 0x0F ) { + case RDRV_OFFLINE: + len += sprintf(page+len, "state: offline"); + break; + + case RDRV_DEGRADED: + len += sprintf(page+len, "state: degraded"); + break; + + case RDRV_OPTIMAL: + len += sprintf(page+len, "state: optimal"); + break; + + case RDRV_DELETED: + len += sprintf(page+len, "state: deleted"); + break; + + default: + len += sprintf(page+len, "state: unknown"); + break; + } + + /* + * Check if check consistency or initialization is going on + * for this logical drive. + */ + if( (rdrv_state[i] & 0xF0) == 0x20 ) { + len += sprintf(page+len, + ", check-consistency in progress"); + } + else if( (rdrv_state[i] & 0xF0) == 0x10 ) { + len += sprintf(page+len, + ", initialization in progress"); + } + + len += sprintf(page+len, "\n"); + + len += sprintf(page+len, "Span depth:%3d, ", + lparam->span_depth); + + len += sprintf(page+len, "RAID level:%3d, ", + lparam->level); + + len += sprintf(page+len, "Stripe size:%3d, ", + lparam->stripe_sz ? lparam->stripe_sz/2: 128); + + len += sprintf(page+len, "Row size:%3d\n", + lparam->row_size); + + + len += sprintf(page+len, "Read Policy: "); + + switch(lparam->read_ahead) { + + case NO_READ_AHEAD: + len += sprintf(page+len, "No read ahead, "); + break; + + case READ_AHEAD: + len += sprintf(page+len, "Read ahead, "); + break; + + case ADAP_READ_AHEAD: + len += sprintf(page+len, "Adaptive, "); + break; + + } + + len += sprintf(page+len, "Write Policy: "); + + switch(lparam->write_mode) { + + case WRMODE_WRITE_THRU: + len += sprintf(page+len, "Write thru, "); + break; + + case WRMODE_WRITE_BACK: + len += sprintf(page+len, "Write back, "); + break; + } + + len += sprintf(page+len, "Cache Policy: "); + + switch(lparam->direct_io) { + + case CACHED_IO: + len += sprintf(page+len, "Cached IO\n\n"); + break; + + case DIRECT_IO: + len += sprintf(page+len, "Direct IO\n\n"); + break; + } + } + + mega_free_inquiry(inquiry, dma_handle, pdev); + + pci_free_consistent(pdev, array_sz, disk_array, + disk_array_dma_handle); + + return len; +} + +#endif + + +/** + * megaraid_biosparam() + * @disk + * @dev + * @geom + * + * Return the disk geometry for a particular disk + * Input: + * Disk *disk - Disk geometry + * kdev_t dev - Device node + * int *geom - Returns geometry fields + * geom[0] = heads + * geom[1] = sectors + * geom[2] = cylinders + */ +static int +megaraid_biosparam(Disk *disk, kdev_t dev, int *geom) +{ + int heads, sectors, cylinders; + adapter_t *adapter; + + /* Get pointer to host config structure */ + adapter = (adapter_t *)disk->device->host->hostdata; + + if (IS_RAID_CH(adapter, disk->device->channel)) { + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = disk->capacity / (heads * sectors); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if (disk->capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = disk->capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + else { + if( !mega_partsize(disk, dev, geom) ) + return 0; + + printk(KERN_WARNING + "megaraid: invalid partition on this disk on channel %d\n", + disk->device->channel); + + /* Default heads (64) & sectors (32) */ + heads = 64; + sectors = 32; + cylinders = disk->capacity / (heads * sectors); + + /* Handle extended translation size for logical drives > 1Gb */ + if (disk->capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = disk->capacity / (heads * sectors); + } + + /* return result */ + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + } + + return 0; +} + +/* + * mega_partsize() + * @disk + * @geom + * + * Purpose : to determine the BIOS mapping used to create the partition + * table, storing the results (cyls, hds, and secs) in geom + * + * Note: Code is picked from scsicam.h + * + * Returns : -1 on failure, 0 on success. + */ +static int +mega_partsize(Disk *disk, kdev_t dev, int *geom) +{ + struct buffer_head *bh; + struct partition *p, *largest = NULL; + int i, largest_cyl; + int heads, cyls, sectors; + int capacity = disk->capacity; + + int ma = MAJOR(dev); + int mi = (MINOR(dev) & ~0xf); + + int block = 1024; + + if (blksize_size[ma]) + block = blksize_size[ma][mi]; + + if (!(bh = bread(MKDEV(ma,mi), 0, block))) + return -1; + + if (*(unsigned short *)(bh->b_data + 510) == 0xAA55 ) { + + for (largest_cyl = -1, + p = (struct partition *)(0x1BE + bh->b_data), i = 0; + i < 4; ++i, ++p) { + + if (!p->sys_ind) continue; + + cyls = p->end_cyl + ((p->end_sector & 0xc0) << 2); + + if (cyls >= largest_cyl) { + largest_cyl = cyls; + largest = p; + } + } + } + + if (largest) { + heads = largest->end_head + 1; + sectors = largest->end_sector & 0x3f; + + if (!heads || !sectors) { + brelse(bh); + return -1; + } + + cyls = capacity/(heads * sectors); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cyls; + + brelse(bh); + return 0; + } + + brelse(bh); + return -1; +} + + +/** + * megaraid_reboot_notify() + * @this - unused + * @code - shutdown code + * @unused - unused + * + * This routine will be called when the use has done a forced shutdown on the + * system. Flush the Adapter and disks cache. + */ +static int +megaraid_reboot_notify (struct notifier_block *this, unsigned long code, + void *unused) +{ + adapter_t *adapter; + struct Scsi_Host *host; + u8 raw_mbox[16]; + mbox_t *mbox; + int i; + + /* + * Flush the controller's cache irrespective of the codes coming down. + * SYS_DOWN, SYS_HALT, SYS_RESTART, SYS_POWER_OFF + */ + for( i = 0; i < hba_count; i++ ) { + printk(KERN_INFO "megaraid: flushing adapter %d..", i); + host = hba_soft_state[i]->host; + + adapter = (adapter_t *)host->hostdata; + mbox = (mbox_t *)raw_mbox; + + /* Flush adapter cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_ADAPTER; + + irq_disable(adapter); + free_irq(adapter->host->irq, adapter); + + /* + * Issue a blocking (interrupts disabled) command to + * the card + */ + issue_scb_block(adapter, raw_mbox); + + /* Flush disks cache */ + memset(mbox, 0, 16); + raw_mbox[0] = FLUSH_SYSTEM; + + issue_scb_block(adapter, raw_mbox); + + printk("Done.\n"); + + if( atomic_read(&adapter->pend_cmds) > 0 ) { + printk(KERN_WARNING "megaraid: pending commands!!\n"); + } + } + + /* + * Have a delibrate delay to make sure all the caches are + * actually flushed. + */ + printk(KERN_INFO "megaraid: cache flush delay: "); + for( i = 9; i >= 0; i-- ) { + printk("\b\b\b[%d]", i); + mdelay(1000); + } + printk("\b\b\b[done]\n"); + mdelay(1000); + + return NOTIFY_DONE; +} + +/** + * mega_init_scb() + * @adapter - pointer to our soft state + * + * Allocate memory for the various pointers in the scb structures: + * scatter-gather list pointer, passthru and extended passthru structure + * pointers. + */ +static int +mega_init_scb(adapter_t *adapter) +{ + scb_t *scb; + int i; + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->sgl64 = NULL; + scb->sgl = NULL; + scb->pthru = NULL; + scb->epthru = NULL; + } + + for( i = 0; i < adapter->max_cmds; i++ ) { + + scb = &adapter->scb_list[i]; + + scb->idx = i; + + scb->sgl64 = pci_alloc_consistent(adapter->dev, + sizeof(mega_sgl64) * adapter->sglen, + &scb->sgl_dma_addr); + + scb->sgl = (mega_sglist *)scb->sgl64; + + if( !scb->sgl ) { + printk(KERN_WARNING "RAID: Can't allocate sglist.\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->pthru = pci_alloc_consistent(adapter->dev, + sizeof(mega_passthru), + &scb->pthru_dma_addr); + + if( !scb->pthru ) { + printk(KERN_WARNING "RAID: Can't allocate passthru.\n"); + mega_free_sgl(adapter); + return -1; + } + + scb->epthru = pci_alloc_consistent(adapter->dev, + sizeof(mega_ext_passthru), + &scb->epthru_dma_addr); + + if( !scb->epthru ) { + printk(KERN_WARNING + "Can't allocate extended passthru.\n"); + mega_free_sgl(adapter); + return -1; + } + + + scb->dma_type = MEGA_DMA_TYPE_NONE; + + /* + * Link to free list + * lock not required since we are loading the driver, so no + * commands possible right now. + */ + scb->state = SCB_FREE; + scb->cmd = NULL; + list_add(&scb->list, &adapter->free_list); + } + + return 0; +} + + +/** + * megadev_open() + * @inode - unused + * @filep - unused + * + * Routines for the character/ioctl interface to the driver. Find out if this + * is a valid open. If yes, increment the module use count so that it cannot + * be unloaded. + */ +static int +megadev_open (struct inode *inode, struct file *filep) +{ + /* + * Only allow superuser to access private ioctl interface + */ + if( !capable(CAP_SYS_ADMIN) ) return -EACCES; + + MOD_INC_USE_COUNT; + return 0; +} + + +/** + * megadev_ioctl() + * @inode - Our device inode + * @filep - unused + * @cmd - ioctl command + * @arg - user buffer + * + * ioctl entry point for our private ioctl interface. We move the data in from + * the user space, prepare the command (if necessary, convert the old MIMD + * ioctl to new ioctl command), and issue a synchronous command to the + * controller. + */ +static int +megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, + unsigned long arg) +{ + adapter_t *adapter; + nitioctl_t uioc; + int adapno; + int rval; + mega_passthru *upthru; /* user address for passthru */ + mega_passthru *pthru; /* copy user passthru here */ + dma_addr_t pthru_dma_hndl; + void *data = NULL; /* data to be transferred */ + dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ + megacmd_t mc; + megastat_t *ustats; + int num_ldrv; + u32 uxferaddr = 0; + struct pci_dev *pdev; + + ustats = NULL; /* avoid compilation warnings */ + num_ldrv = 0; + + /* + * Make sure only USCSICMD are issued through this interface. + * MIMD application would still fire different command. + */ + if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { + return -EINVAL; + } + + /* + * Check and convert a possible MIMD command to NIT command. + * mega_m_to_n() copies the data from the user space, so we do not + * have to do it here. + * NOTE: We will need some user address to copyout the data, therefore + * the inteface layer will also provide us with the required user + * addresses. + */ + memset(&uioc, 0, sizeof(nitioctl_t)); + if( (rval = mega_m_to_n( (void *)arg, &uioc)) != 0 ) + return rval; + + + switch( uioc.opcode ) { + + case GET_DRIVER_VER: + if( put_user(driver_ver, (u32 *)uioc.uioc_uaddr) ) + return (-EFAULT); + + break; + + case GET_N_ADAP: + if( put_user(hba_count, (u32 *)uioc.uioc_uaddr) ) + return (-EFAULT); + + /* + * Shucks. MIMD interface returns a positive value for number + * of adapters. TODO: Change it to return 0 when there is no + * applicatio using mimd interface. + */ + return hba_count; + + case GET_ADAP_INFO: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, + sizeof(struct mcontroller)) ) + return (-EFAULT); + break; + +#if MEGA_HAVE_STATS + + case GET_STATS: + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + ustats = (megastat_t *)uioc.uioc_uaddr; + + if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) + return (-EFAULT); + + /* + * Check for the validity of the logical drive number + */ + if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; + + if( copy_to_user(ustats->nreads, adapter->nreads, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwrites, adapter->nwrites, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->rd_errors, adapter->rd_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + if( copy_to_user(ustats->wr_errors, adapter->wr_errors, + num_ldrv*sizeof(u32)) ) + return -EFAULT; + + return 0; + +#endif + case MBOX_CMD: + + /* + * Which adapter + */ + if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) + return (-ENODEV); + + adapter = hba_soft_state[adapno]; + + /* + * Deletion of logical drive is a special case. The adapter + * should be quiescent before this command is issued. + */ + if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && + uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { + + /* + * Do we support this feature + */ + if( !adapter->support_random_del ) { + printk(KERN_WARNING "megaraid: logdrv "); + printk("delete on non-supporting F/W.\n"); + + return (-EINVAL); + } + + rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); + + if( rval == 0 ) { + memset(&mc, 0, sizeof(megacmd_t)); + + mc.status = rval; + + rval = mega_n_to_m((void *)arg, &mc); + } + + return rval; + } + /* + * This interface only support the regular passthru commands. + * Reject extended passthru and 64-bit passthru + */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || + uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { + + printk(KERN_WARNING "megaraid: rejected passthru.\n"); + + return (-EINVAL); + } + + /* + * For all internal commands, the buffer must be allocated in + * <4GB address range + */ + pdev = adapter->ipdev; + + /* Is it a passthru command or a DCMD */ + if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { + /* Passthru commands */ + + pthru = pci_alloc_consistent(pdev, + sizeof(mega_passthru), + &pthru_dma_hndl); + + if( pthru == NULL ) { + return (-ENOMEM); + } + + /* + * The user passthru structure + */ + upthru = (mega_passthru *)MBOX(uioc)->xferaddr; + + /* + * Copy in the user passthru here. + */ + if( copy_from_user(pthru, (char *)upthru, + sizeof(mega_passthru)) ) { + + pci_free_consistent(pdev, + sizeof(mega_passthru), pthru, + pthru_dma_hndl); + + return (-EFAULT); + } + + /* + * Is there a data transfer + */ + if( pthru->dataxferlen ) { + data = pci_alloc_consistent(pdev, + pthru->dataxferlen, + &data_dma_hndl); + + if( data == NULL ) { + pci_free_consistent(pdev, + sizeof(mega_passthru), + pthru, + pthru_dma_hndl); + + return (-ENOMEM); + } + + /* + * Save the user address and point the kernel + * address at just allocated memory + */ + uxferaddr = pthru->dataxferaddr; + pthru->dataxferaddr = data_dma_hndl; + } + + + /* + * Is data coming down-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char *)uxferaddr, + pthru->dataxferlen) ) { + rval = (-EFAULT); + goto freemem_and_return; + } + } + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, LOCK_INT, &mc, pthru); + + rval = mega_n_to_m((void *)arg, &mc); + + if( rval ) goto freemem_and_return; + + + /* + * Is data going up-stream + */ + if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char *)uxferaddr, data, + pthru->dataxferlen) ) { + rval = (-EFAULT); + } + } + + /* + * Send the request sense data also, irrespective of + * whether the user has asked for it or not. + */ + copy_to_user(upthru->reqsensearea, + pthru->reqsensearea, 14); + +freemem_and_return: + if( pthru->dataxferlen ) { + pci_free_consistent(pdev, + pthru->dataxferlen, data, + data_dma_hndl); + } + + pci_free_consistent(pdev, sizeof(mega_passthru), + pthru, pthru_dma_hndl); + + return rval; + } + else { + /* DCMD commands */ + + /* + * Is there a data transfer + */ + if( uioc.xferlen ) { + data = pci_alloc_consistent(pdev, + uioc.xferlen, &data_dma_hndl); + + if( data == NULL ) { + return (-ENOMEM); + } + + uxferaddr = MBOX(uioc)->xferaddr; + } + + /* + * Is data coming down-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { + /* + * Get the user data + */ + if( copy_from_user(data, (char *)uxferaddr, + uioc.xferlen) ) { + + pci_free_consistent(pdev, + uioc.xferlen, + data, data_dma_hndl); + + return (-EFAULT); + } + } + + memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); + + mc.xferaddr = (u32)data_dma_hndl; + + /* + * Issue the command + */ + mega_internal_command(adapter, LOCK_INT, &mc, NULL); + + rval = mega_n_to_m((void *)arg, &mc); + + if( rval ) { + if( uioc.xferlen ) { + pci_free_consistent(pdev, + uioc.xferlen, data, + data_dma_hndl); + } + + return rval; + } + + /* + * Is data going up-stream + */ + if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { + if( copy_to_user((char *)uxferaddr, data, + uioc.xferlen) ) { + + rval = (-EFAULT); + } + } + + if( uioc.xferlen ) { + pci_free_consistent(pdev, + uioc.xferlen, data, + data_dma_hndl); + } + + return rval; + } + + default: + return (-EINVAL); + } + + return 0; +} + +/** + * mega_m_to_n() + * @arg - user address + * @uioc - new ioctl structure + * + * A thin layer to convert older mimd interface ioctl structure to NIT ioctl + * structure + * + * Converts the older mimd ioctl structure to newer NIT structure + */ +static int +mega_m_to_n(void *arg, nitioctl_t *uioc) +{ + struct uioctl_t uioc_mimd; + char signature[8] = {0}; + u8 opcode; + u8 subopcode; + + + /* + * check is the application conforms to NIT. We do not have to do much + * in that case. + * We exploit the fact that the signature is stored in the very + * begining of the structure. + */ + + if( copy_from_user(signature, (char *)arg, 7) ) + return (-EFAULT); + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + /* + * NOTE NOTE: The nit ioctl is still under flux because of + * change of mailbox definition, in HPE. No applications yet + * use this interface and let's not have applications use this + * interface till the new specifitions are in place. + */ + return -EINVAL; +#if 0 + if( copy_from_user(uioc, (char *)arg, sizeof(nitioctl_t)) ) + return (-EFAULT); + return 0; +#endif + } + + /* + * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t + * + * Get the user ioctl structure + */ + if( copy_from_user(&uioc_mimd, (char *)arg, sizeof(struct uioctl_t)) ) + return (-EFAULT); + + + /* + * Get the opcode and subopcode for the commands + */ + opcode = uioc_mimd.ui.fcs.opcode; + subopcode = uioc_mimd.ui.fcs.subopcode; + + switch (opcode) { + case 0x82: + + switch (subopcode) { + + case MEGAIOC_QDRVRVER: /* Query driver version */ + uioc->opcode = GET_DRIVER_VER; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QNADAP: /* Get # of adapters */ + uioc->opcode = GET_N_ADAP; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + case MEGAIOC_QADAPINFO: /* Get adapter information */ + uioc->opcode = GET_ADAP_INFO; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + uioc->uioc_uaddr = uioc_mimd.data; + break; + + default: + return(-EINVAL); + } + + break; + + + case 0x81: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + uioc->xferlen = uioc_mimd.ui.fcs.length; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + case 0x80: + + uioc->opcode = MBOX_CMD; + uioc->adapno = uioc_mimd.ui.fcs.adapno; + + memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); + + /* + * Choose the xferlen bigger of input and output data + */ + uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? + uioc_mimd.outlen : uioc_mimd.inlen; + + if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; + if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; + + break; + + default: + return (-EINVAL); + + } + + return 0; +} + +/* + * mega_n_to_m() + * @arg - user address + * @mc - mailbox command + * + * Updates the status information to the application, depending on application + * conforms to older mimd ioctl interface or newer NIT ioctl interface + */ +static int +mega_n_to_m(void *arg, megacmd_t *mc) +{ + nitioctl_t *uiocp; + megacmd_t *umc; + mega_passthru *upthru; + struct uioctl_t *uioc_mimd; + char signature[8] = {0}; + + /* + * check is the application conforms to NIT. + */ + if( copy_from_user(signature, (char *)arg, 7) ) + return -EFAULT; + + if( memcmp(signature, "MEGANIT", 7) == 0 ) { + + uiocp = (nitioctl_t *)arg; + + if( put_user(mc->status, (u8 *)&MBOX_P(uiocp)->status) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = MBOX_P(uiocp); + + upthru = (mega_passthru *)umc->xferaddr; + + if( put_user(mc->status, (u8 *)&upthru->scsistatus) ) + return (-EFAULT); + } + } + else { + uioc_mimd = (struct uioctl_t *)arg; + + if( put_user(mc->status, (u8 *)&uioc_mimd->mbox[17]) ) + return (-EFAULT); + + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + umc = (megacmd_t *)uioc_mimd->mbox; + + upthru = (mega_passthru *)umc->xferaddr; + + if( put_user(mc->status, (u8 *)&upthru->scsistatus) ) + return (-EFAULT); + } + } + + return 0; +} + + +static int +megadev_close (struct inode *inode, struct file *filep) +{ + MOD_DEC_USE_COUNT; + return 0; +} + + +/* + * MEGARAID 'FW' commands. + */ + +/** + * mega_is_bios_enabled() + * @adapter - pointer to our soft state + * + * issue command to find out if the BIOS is enabled for this controller + */ +static int +mega_is_bios_enabled(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int ret; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, 16); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = IS_BIOS_ENABLED; + raw_mbox[2] = GET_BIOS; + + + ret = issue_scb_block(adapter, raw_mbox); + + return *(char *)adapter->mega_buffer; +} + + +/** + * mega_enum_raid_scsi() + * @adapter - pointer to our soft state + * + * Find out what channels are RAID/SCSI. This information is used to + * differentiate the virtual channels and physical channels and to support + * ROMB feature and non-disk devices. + */ +static void +mega_enum_raid_scsi(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int i; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, 16); + + /* + * issue command to find out what channels are raid/scsi + */ + raw_mbox[0] = CHNL_CLASS; + raw_mbox[2] = GET_CHNL_CLASS; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Non-ROMB firware fail this command, so all channels + * must be shown RAID + */ + adapter->mega_ch_class = 0xFF; + + if(!issue_scb_block(adapter, raw_mbox)) { + adapter->mega_ch_class = *((char *)adapter->mega_buffer); + + } + + for( i = 0; i < adapter->product_info.nchannels; i++ ) { + if( (adapter->mega_ch_class >> i) & 0x01 ) { + printk(KERN_INFO "megaraid: channel[%d] is raid.\n", + i); + } + else { + printk(KERN_INFO "megaraid: channel[%d] is scsi.\n", + i); + } + } + + return; +} + + +/** + * mega_get_boot_drv() + * @adapter - pointer to our soft state + * + * Find out which device is the boot device. Note, any logical drive or any + * phyical device (e.g., a CDROM) can be designated as a boot device. + */ +static void +mega_get_boot_drv(adapter_t *adapter) +{ + struct private_bios_data *prv_bios_data; + unsigned char raw_mbox[16]; + mbox_t *mbox; + u16 cksum = 0; + u8 *cksum_p; + u8 boot_pdrv; + int i; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + raw_mbox[0] = BIOS_PVT_DATA; + raw_mbox[2] = GET_BIOS_PVT_DATA; + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + adapter->boot_ldrv_enabled = 0; + adapter->boot_ldrv = 0; + + adapter->boot_pdrv_enabled = 0; + adapter->boot_pdrv_ch = 0; + adapter->boot_pdrv_tgt = 0; + + if(issue_scb_block(adapter, raw_mbox) == 0) { + prv_bios_data = + (struct private_bios_data *)adapter->mega_buffer; + + cksum = 0; + cksum_p = (char *)prv_bios_data; + for (i = 0; i < 14; i++ ) { + cksum += (u16)(*cksum_p++); + } + + if (prv_bios_data->cksum == (u16)(0-cksum) ) { + + /* + * If MSB is set, a physical drive is set as boot + * device + */ + if( prv_bios_data->boot_drv & 0x80 ) { + adapter->boot_pdrv_enabled = 1; + boot_pdrv = prv_bios_data->boot_drv & 0x7F; + adapter->boot_pdrv_ch = boot_pdrv / 16; + adapter->boot_pdrv_tgt = boot_pdrv % 16; + } + else { + adapter->boot_ldrv_enabled = 1; + adapter->boot_ldrv = prv_bios_data->boot_drv; + } + } + } + +} + +/** + * mega_support_random_del() + * @adapter - pointer to our soft state + * + * Find out if this controller supports random deletion and addition of + * logical drives + */ +static int +mega_support_random_del(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, 16); + + /* + * issue command + */ + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_SUP_DEL_LOGDRV; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_support_ext_cdb() + * @adapter - pointer to our soft state + * + * Find out if this firmware support cdblen > 10 + */ +static int +mega_support_ext_cdb(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, 16); + /* + * issue command to find out if controller supports extended CDBs. + */ + raw_mbox[0] = 0xA4; + raw_mbox[2] = 0x16; + + rval = issue_scb_block(adapter, raw_mbox); + + return !rval; +} + + +/** + * mega_del_logdrv() + * @adapter - pointer to our soft state + * @logdrv - logical drive to be deleted + * + * Delete the specified logical drive. It is the responsibility of the user + * app to let the OS know about this operation. + */ +static int +mega_del_logdrv(adapter_t *adapter, int logdrv) +{ + DECLARE_WAIT_QUEUE_HEAD(wq); + unsigned long flags; + scb_t *scb; + int rval; + + ASSERT( !spin_is_locked(adapter->host_lock) ); + + /* + * Stop sending commands to the controller, queue them internally. + * When deletion is complete, ISR will flush the queue. + */ + atomic_set(&adapter->quiescent, 1); + + /* + * Wait till all the issued commands are complete and there are no + * commands in the pending queue + */ + while( atomic_read(&adapter->pend_cmds) > 0 ) { + + sleep_on_timeout( &wq, 1*HZ ); /* sleep for 1s */ + } + + rval = mega_do_del_logdrv(adapter, logdrv); + + + spin_lock_irqsave(adapter->host_lock, flags); + + /* + * If delete operation was successful, add 0x80 to the logical drive + * ids for commands in the pending queue. + */ + if (adapter->read_ldidmap) { + struct list_head *pos; + list_for_each(pos, &adapter->pending_list) { + scb = list_entry(pos, scb_t, list); + if (((mbox_t *)scb->raw_mbox)->logdrv < 0x80 ) + ((mbox_t *)scb->raw_mbox)->logdrv += 0x80 ; + } + } + + atomic_set(&adapter->quiescent, 0); + + mega_runpendq(adapter); + + spin_unlock_irqrestore(adapter->host_lock, flags); + + return rval; +} + + +static int +mega_do_del_logdrv(adapter_t *adapter, int logdrv) +{ + int rval; + u8 raw_mbox[16]; + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_DEL_LOGDRV; + raw_mbox[3] = logdrv; + + /* Issue a blocking command to the card */ + rval = issue_scb_block(adapter, raw_mbox); + + /* log this event */ + if(rval) { + printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv); + return rval; + } + + /* + * After deleting first logical drive, the logical drives must be + * addressed by adding 0x80 to the logical drive id. + */ + adapter->read_ldidmap = 1; + + return rval; +} + + +/** + * mega_get_max_sgl() + * @adapter - pointer to our soft state + * + * Find out the maximum number of scatter-gather elements supported by this + * version of the firmware + */ +static void +mega_get_max_sgl(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = GET_MAX_SG_SUPPORT; + + + if( issue_scb_block(adapter, raw_mbox) ) { + /* + * f/w does not support this command. Choose the default value + */ + adapter->sglen = MIN_SGLIST; + } + else { + adapter->sglen = *((char *)adapter->mega_buffer); + + /* + * Make sure this is not more than the resources we are + * planning to allocate + */ + if ( adapter->sglen > MAX_SGLIST ) + adapter->sglen = MAX_SGLIST; + } + + return; +} + + +/** + * mega_support_cluster() + * @adapter - pointer to our soft state + * + * Find out if this firmware support cluster calls. + */ +static int +mega_support_cluster(adapter_t *adapter) +{ + unsigned char raw_mbox[16]; + mbox_t *mbox; + + mbox = (mbox_t *)raw_mbox; + + memset(mbox, 0, sizeof(raw_mbox)); + + memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); + + mbox->xferaddr = (u32)adapter->buf_dma_handle; + + /* + * Try to get the initiator id. This command will succeed iff the + * clustering is available on this HBA. + */ + raw_mbox[0] = MEGA_GET_TARGET_ID; + + if( issue_scb_block(adapter, raw_mbox) == 0 ) { + + /* + * Cluster support available. Get the initiator target id. + * Tell our id to mid-layer too. + */ + adapter->this_id = *(u32 *)adapter->mega_buffer; + adapter->host->this_id = adapter->this_id; + + return 1; + } + + return 0; +} + + + +/** + * mega_get_ldrv_num() + * @adapter - pointer to our soft state + * @cmd - scsi mid layer command + * @channel - channel on the controller + * + * Calculate the logical drive number based on the information in scsi command + * and the channel number. + */ +static inline int +mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel) +{ + int tgt; + int ldrv_num; + + tgt = cmd->target; + + if ( tgt > adapter->this_id ) + tgt--; /* we do not get inquires for initiator id */ + + ldrv_num = (channel * 15) + tgt; + + + /* + * If we have a logical drive with boot enabled, project it first + */ + if( adapter->boot_ldrv_enabled ) { + if( ldrv_num == 0 ) { + ldrv_num = adapter->boot_ldrv; + } + else { + if( ldrv_num <= adapter->boot_ldrv ) { + ldrv_num--; + } + } + } + + /* + * If "delete logical drive" feature is enabled on this controller. + * Do only if at least one delete logical drive operation was done. + * + * Also, after logical drive deletion, instead of logical drive number, + * the value returned should be 0x80+logical drive id. + * + * These is valid only for IO commands. + */ + + if (adapter->support_random_del && adapter->read_ldidmap ) + switch (cmd->cmnd[0]) { + case READ_6: /* fall through */ + case WRITE_6: /* fall through */ + case READ_10: /* fall through */ + case WRITE_10: + ldrv_num += 0x80; + } + + return ldrv_num; +} + + +/** + * mega_reorder_hosts() + * + * Hack: reorder the scsi hosts in mid-layer so that the controller with the + * boot device on it appears first in the list. + */ +static void +mega_reorder_hosts(void) +{ + struct Scsi_Host *shpnt; + struct Scsi_Host *shone; + struct Scsi_Host *shtwo; + adapter_t *boot_host; + int i; + + /* + * Find the (first) host which has it's BIOS enabled + */ + boot_host = NULL; + for (i = 0; i < MAX_CONTROLLERS; i++) { + if (mega_hbas[i].is_bios_enabled) { + boot_host = mega_hbas[i].hostdata_addr; + break; + } + } + + if (!boot_host) { + printk(KERN_NOTICE "megaraid: no BIOS enabled.\n"); + return; + } + + /* + * Traverse through the list of SCSI hosts for our HBA locations + */ + shone = shtwo = NULL; + for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { + /* Is it one of ours? */ + for (i = 0; i < MAX_CONTROLLERS; i++) { + if ((adapter_t *) shpnt->hostdata == + mega_hbas[i].hostdata_addr) { + /* Does this one has BIOS enabled */ + if (mega_hbas[i].hostdata_addr == boot_host) { + + /* Are we first */ + if (!shtwo) /* Yes! */ + return; + else /* :-( */ + shone = shpnt; + } else { + if (!shtwo) { + /* were we here before? xchng + * first */ + shtwo = shpnt; + } + } + break; + } + } + /* + * Have we got the boot host and one which does not have the + * bios enabled. + */ + if (shone && shtwo) + break; + } + if (shone && shtwo) { + mega_swap_hosts (shone, shtwo); + } + + return; +} + + +static void +mega_swap_hosts (struct Scsi_Host *shone, struct Scsi_Host *shtwo) +{ + struct Scsi_Host *prevtoshtwo; + struct Scsi_Host *prevtoshone; + struct Scsi_Host *save = NULL; + + /* Are these two nodes adjacent */ + if (shtwo->next == shone) { + + if (shtwo == scsi_hostlist && !shone->next) { + + /* just two nodes */ + scsi_hostlist = shone; + shone->next = shtwo; + shtwo->next = NULL; + } else if (shtwo == scsi_hostlist) { + /* first two nodes of the list */ + + scsi_hostlist = shone; + shtwo->next = shone->next; + scsi_hostlist->next = shtwo; + } else if (!shone->next) { + /* last two nodes of the list */ + + prevtoshtwo = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + + prevtoshtwo->next = shone; + shone->next = shtwo; + shtwo->next = NULL; + } else { + prevtoshtwo = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + + prevtoshtwo->next = shone; + shtwo->next = shone->next; + shone->next = shtwo; + } + + } else if (shtwo == scsi_hostlist && !shone->next) { + /* shtwo at head, shone at tail, not adjacent */ + + prevtoshone = scsi_hostlist; + + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + scsi_hostlist = shone; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = NULL; + } else if (shtwo == scsi_hostlist && shone->next) { + /* shtwo at head, shone is not at tail */ + + prevtoshone = scsi_hostlist; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + scsi_hostlist = shone; + prevtoshone->next = shtwo; + save = shtwo->next; + shtwo->next = shone->next; + shone->next = save; + } else if (!shone->next) { + /* shtwo not at head, shone at tail */ + + prevtoshtwo = scsi_hostlist; + prevtoshone = scsi_hostlist; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + prevtoshtwo->next = shone; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = NULL; + + } else { + prevtoshtwo = scsi_hostlist; + prevtoshone = scsi_hostlist; + save = NULL; + + while (prevtoshtwo->next != shtwo) + prevtoshtwo = prevtoshtwo->next; + while (prevtoshone->next != shone) + prevtoshone = prevtoshone->next; + + prevtoshtwo->next = shone; + save = shone->next; + shone->next = shtwo->next; + prevtoshone->next = shtwo; + shtwo->next = save; + } + return; +} + + + +#ifdef CONFIG_PROC_FS +/** + * mega_adapinq() + * @adapter - pointer to our soft state + * @dma_handle - DMA address of the buffer + * + * Issue internal comamnds while interrupts are available. + * We only issue direct mailbox commands from within the driver. ioctl() + * interface using these routines can issue passthru commands. + */ +static int +mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) +{ + megacmd_t mc; + + memset(&mc, 0, sizeof(megacmd_t)); + + if( adapter->flag & BOARD_40LD ) { + mc.cmd = FC_NEW_CONFIG; + mc.opcode = NC_SUBOP_ENQUIRY3; + mc.subopcode = ENQ3_GET_SOLICITED_FULL; + } + else { + mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; + } + + mc.xferaddr = (u32)dma_handle; + + if ( mega_internal_command(adapter, LOCK_INT, &mc, NULL) != 0 ) { + return -1; + } + + return 0; +} + + +/** + * mega_allocate_inquiry() + * @dma_handle - handle returned for dma address + * @pdev - handle to pci device + * + * allocates memory for inquiry structure + */ +static inline caddr_t +mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) +{ + return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle); +} + + +static inline void +mega_free_inquiry(caddr_t inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) +{ + pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle); +} + + +/** mega_internal_dev_inquiry() + * @adapter - pointer to our soft state + * @ch - channel for this device + * @tgt - ID of this device + * @buf_dma_handle - DMA address of the buffer + * + * Issue the scsi inquiry for the specified device. + */ +static int +mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, + dma_addr_t buf_dma_handle) +{ + mega_passthru *pthru; + dma_addr_t pthru_dma_handle; + megacmd_t mc; + int rval; + struct pci_dev *pdev; + + + /* + * For all internal commands, the buffer must be allocated in <4GB + * address range + */ + pdev = adapter->ipdev; + + pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru), + &pthru_dma_handle); + + if( pthru == NULL ) { + return -1; + } + + pthru->timeout = 2; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 0; + + pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; + + pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; + + pthru->cdblen = 6; + + pthru->cdb[0] = INQUIRY; + pthru->cdb[1] = 0; + pthru->cdb[2] = 0; + pthru->cdb[3] = 0; + pthru->cdb[4] = 255; + pthru->cdb[5] = 0; + + + pthru->dataxferaddr = (u32)buf_dma_handle; + pthru->dataxferlen = 256; + + memset(&mc, 0, sizeof(megacmd_t)); + + mc.cmd = MEGA_MBOXCMD_PASSTHRU; + mc.xferaddr = (u32)pthru_dma_handle; + + rval = mega_internal_command(adapter, LOCK_INT, &mc, pthru); + + pci_free_consistent(pdev, sizeof(mega_passthru), pthru, + pthru_dma_handle); + + return rval; +} +#endif // #ifdef CONFIG_PROC_FS + + +/** + * mega_internal_command() + * @adapter - pointer to our soft state + * @ls - the scope of the exclusion lock. + * @mc - the mailbox command + * @pthru - Passthru structure for DCDB commands + * + * Issue the internal commands in interrupt mode. + * The last argument is the address of the passthru structure if the command + * to be fired is a passthru command + * + * lockscope specifies whether the caller has already acquired the lock. Of + * course, the caller must know which lock we are talking about. + * + * Note: parameter 'pthru' is null for non-passthru commands. + */ +static int +mega_internal_command(adapter_t *adapter, lockscope_t ls, megacmd_t *mc, + mega_passthru *pthru ) +{ + Scsi_Cmnd *scmd; + unsigned long flags = 0; + scb_t *scb; + int rval; + + /* + * The internal commands share one command id and hence are + * serialized. This is so because we want to reserve maximum number of + * available command ids for the I/O commands. + */ + down(&adapter->int_mtx); + + scb = &adapter->int_scb; + memset(scb, 0, sizeof(scb_t)); + + scmd = &adapter->int_scmd; + memset(scmd, 0, sizeof(Scsi_Cmnd)); + + scmd->host = adapter->host; + scmd->buffer = (void *)scb; + scmd->cmnd[0] = MEGA_INTERNAL_CMD; + + scb->state |= SCB_ACTIVE; + scb->cmd = scmd; + + memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); + + /* + * Is it a passthru command + */ + if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { + + scb->pthru = pthru; + } + + scb->idx = CMDID_INT_CMDS; + + scmd->state = 0; + + /* + * Get the lock only if the caller has not acquired it already + */ + if( ls == LOCK_INT ) spin_lock_irqsave(adapter->host_lock, flags); + + megaraid_queue(scmd, mega_internal_done); + + if( ls == LOCK_INT ) spin_unlock_irqrestore(adapter->host_lock, flags); + + /* + * Wait till this command finishes. Do not use + * wait_event_interruptible(). It causes panic if CTRL-C is hit when + * dumping e.g., physical disk information through /proc interface. + * Catching the return value should solve the issue but for now keep + * the call non-interruptible. + */ +#if 0 + wait_event_interruptible(adapter->int_waitq, scmd->state); +#endif + wait_event(adapter->int_waitq, scmd->state); + + rval = scmd->result; + mc->status = scmd->result; + + /* + * Print a debug message for all failed commands. Applications can use + * this information. + */ + if( scmd->result && trace_level ) { + printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", + mc->cmd, mc->opcode, mc->subopcode, scmd->result); + } + + up(&adapter->int_mtx); + + return rval; +} + + +/** + * mega_internal_done() + * @scmd - internal scsi command + * + * Callback routine for internal commands. + */ +static void +mega_internal_done(Scsi_Cmnd *scmd) +{ + adapter_t *adapter; + + adapter = (adapter_t *)scmd->host->hostdata; + + scmd->state = 1; /* thread waiting for its command to complete */ + + /* + * See comment in mega_internal_command() routine for + * wait_event_interruptible() + */ +#if 0 + wake_up_interruptible(&adapter->int_waitq); +#endif + wake_up(&adapter->int_waitq); + +} + +static Scsi_Host_Template driver_template = MEGARAID; + +#include "scsi_module.c" + +/* vi: set ts=8 sw=8 tw=78: */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/megaraid2.h linux.22-ac2/drivers/scsi/megaraid2.h --- linux.vanilla/drivers/scsi/megaraid2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/megaraid2.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,1187 @@ +#ifndef __MEGARAID_H__ +#define __MEGARAID_H__ + +#include +#include + + +#define MEGARAID_VERSION \ + "v2.00.7 (Release Date: Fri Aug 1 11:01:11 EDT 2003)\n" + +/* + * Driver features - change the values to enable or disable features in the + * driver. + */ + +/* + * Comand coalescing - This feature allows the driver to be able to combine + * two or more commands and issue as one command in order to boost I/O + * performance. Useful if the nature of the I/O is sequential. It is not very + * useful for random natured I/Os. + */ +#define MEGA_HAVE_COALESCING 0 + +/* + * Clustering support - Set this flag if you are planning to use the + * clustering services provided by the megaraid controllers and planning to + * setup a cluster + */ +#define MEGA_HAVE_CLUSTERING 1 + +/* + * Driver statistics - Set this flag if you are interested in statics about + * number of I/O completed on each logical drive and how many interrupts + * generated. If enabled, this information is available through /proc + * interface and through the private ioctl. Setting this flag has a + * performance penalty. + */ +#define MEGA_HAVE_STATS 0 + +/* + * Enhanced /proc interface - This feature will allow you to have a more + * detailed /proc interface for megaraid driver. E.g., a real time update of + * the status of the logical drives, battery status, physical drives etc. + */ +#define MEGA_HAVE_ENH_PROC 1 + +#define MAX_DEV_TYPE 32 + +#ifndef PCI_VENDOR_ID_LSI_LOGIC +#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 +#endif + +#ifndef PCI_VENDOR_ID_AMI +#define PCI_VENDOR_ID_AMI 0x101E +#endif + +#ifndef PCI_VENDOR_ID_DELL +#define PCI_VENDOR_ID_DELL 0x1028 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID +#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID2 +#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 +#endif + +#ifndef PCI_DEVICE_ID_AMI_MEGARAID3 +#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 +#endif + +#define PCI_DEVICE_ID_DISCOVERY 0x000E +#define PCI_DEVICE_ID_PERC4_DI 0x000F +#define PCI_DEVICE_ID_PERC4_QC_VERDE 0x0407 + +/* Sub-System Vendor IDs */ +#define AMI_SUBSYS_VID 0x101E +#define DELL_SUBSYS_VID 0x1028 +#define HP_SUBSYS_VID 0x103C +#define LSI_SUBSYS_VID 0x1000 +#define INTEL_SUBSYS_VID 0x8086 + +#define HBA_SIGNATURE 0x3344 +#define HBA_SIGNATURE_471 0xCCCC +#define HBA_SIGNATURE_64BIT 0x0299 + +#define MBOX_BUSY_WAIT 10 /* wait for up to 10 usec for + mailbox to be free */ +#define DEFAULT_INITIATOR_ID 7 + +#define MAX_SGLIST 64 /* max supported in f/w */ +#define MIN_SGLIST 26 /* guaranteed to support these many */ +#define MAX_COMMANDS 126 +#define CMDID_INT_CMDS MAX_COMMANDS+1 /* make sure CMDID_INT_CMDS + is less than max commands + supported by any f/w */ + +#define MAX_CDB_LEN 10 +#define MAX_EXT_CDB_LEN 16 /* we support cdb length up to 16 */ + +#define DEF_CMD_PER_LUN 63 +#define MAX_CMD_PER_LUN MAX_COMMANDS +#define MAX_FIRMWARE_STATUS 46 +#define MAX_XFER_PER_CMD (64*1024) +#define MAX_SECTORS_PER_IO 128 + +#define MAX_LOGICAL_DRIVES_40LD 40 +#define FC_MAX_PHYSICAL_DEVICES 256 +#define MAX_LOGICAL_DRIVES_8LD 8 +#define MAX_CHANNELS 5 +#define MAX_TARGET 15 +#define MAX_PHYSICAL_DRIVES MAX_CHANNELS*MAX_TARGET +#define MAX_ROW_SIZE_40LD 32 +#define MAX_ROW_SIZE_8LD 8 +#define MAX_SPAN_DEPTH 8 + +#define NVIRT_CHAN 4 /* # of virtual channels to represent + up to 60 logical drives */ + +#define MEGARAID \ +{ \ + .name = "MegaRAID", \ + .proc_name = "megaraid", \ + .detect = megaraid_detect, \ + .release = megaraid_release, \ + .info = megaraid_info, \ + .command = megaraid_command, \ + .queuecommand = megaraid_queue, \ + .bios_param = megaraid_biosparam, \ + .max_sectors = MAX_SECTORS_PER_IO, \ + .can_queue = MAX_COMMANDS, \ + .this_id = DEFAULT_INITIATOR_ID, \ + .sg_tablesize = MAX_SGLIST, \ + .cmd_per_lun = DEF_CMD_PER_LUN, \ + .present = 0, \ + .unchecked_isa_dma = 0, \ + .use_clustering = ENABLE_CLUSTERING, \ + .use_new_eh_code = 1, \ + .eh_abort_handler = megaraid_abort, \ + .eh_device_reset_handler = megaraid_reset, \ + .eh_bus_reset_handler = megaraid_reset, \ + .eh_host_reset_handler = megaraid_reset, \ + .highmem_io = 1 \ +} + + + +typedef struct { + /* 0x0 */ u8 cmd; + /* 0x1 */ u8 cmdid; + /* 0x2 */ u16 numsectors; + /* 0x4 */ u32 lba; + /* 0x8 */ u32 xferaddr; + /* 0xC */ u8 logdrv; + /* 0xD */ u8 numsgelements; + /* 0xE */ u8 resvd; + /* 0xF */ volatile u8 busy; + /* 0x10 */ volatile u8 numstatus; + /* 0x11 */ volatile u8 status; + /* 0x12 */ volatile u8 completed[MAX_FIRMWARE_STATUS]; + volatile u8 poll; + volatile u8 ack; +} __attribute__ ((packed)) mbox_t; + +typedef struct { + u32 xfer_segment_lo; + u32 xfer_segment_hi; + mbox_t mbox; +} __attribute__ ((packed)) mbox64_t; + + +/* + * Passthru definitions + */ +#define MAX_REQ_SENSE_LEN 0x20 + +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 reserved:3; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdb[MAX_CDB_LEN]; + u8 cdblen; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 numsgelements; + u8 scsistatus; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_passthru; + + +/* + * Extended passthru: support CDB > 10 bytes + */ +typedef struct { + u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */ + u8 ars:1; + u8 rsvd1:1; + u8 cd_rom:1; + u8 rsvd2:1; + u8 islogical:1; + u8 logdrv; /* if islogical == 1 */ + u8 channel; /* if islogical == 0 */ + u8 target; /* if islogical == 0 */ + u8 queuetag; /* unused */ + u8 queueaction; /* unused */ + u8 cdblen; + u8 rsvd3; + u8 cdb[MAX_EXT_CDB_LEN]; + u8 numsgelements; + u8 status; + u8 reqsenselen; + u8 reqsensearea[MAX_REQ_SENSE_LEN]; + u8 rsvd4; + u32 dataxferaddr; + u32 dataxferlen; +} __attribute__ ((packed)) mega_ext_passthru; + +typedef struct { + u64 address; + u32 length; +} __attribute__ ((packed)) mega_sgl64; + +typedef struct { + u32 address; + u32 length; +} __attribute__ ((packed)) mega_sglist; + + +/* Queued command data */ +typedef struct { + int idx; + u32 state; + struct list_head list; + u8 raw_mbox[66]; + u32 dma_type; + u32 dma_direction; + + Scsi_Cmnd *cmd; + dma_addr_t dma_h_bulkdata; + dma_addr_t dma_h_sgdata; + + mega_sglist *sgl; + mega_sgl64 *sgl64; + dma_addr_t sgl_dma_addr; + + mega_passthru *pthru; + dma_addr_t pthru_dma_addr; + mega_ext_passthru *epthru; + dma_addr_t epthru_dma_addr; +} scb_t; + +/* + * Flags to follow the scb as it transitions between various stages + */ +#define SCB_FREE 0x0000 /* on the free list */ +#define SCB_ACTIVE 0x0001 /* off the free list */ +#define SCB_PENDQ 0x0002 /* on the pending queue */ +#define SCB_ISSUED 0x0004 /* issued - owner f/w */ +#define SCB_ABORT 0x0008 /* Got an abort for this one */ +#define SCB_RESET 0x0010 /* Got a reset for this one */ + +/* + * Utilities declare this strcture size as 1024 bytes. So more fields can + * be added in future. + */ +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + u32 config_signature; + /* Current value is 0x00282008 + * 0x28=MAX_LOGICAL_DRIVES, + * 0x20=Number of stripes and + * 0x08=Number of spans */ + + u8 fw_version[16]; /* printable ASCI string */ + u8 bios_version[16]; /* printable ASCI string */ + u8 product_name[80]; /* printable ASCI string */ + + u8 max_commands; /* Max. concurrent commands supported */ + u8 nchannels; /* Number of SCSI Channels detected */ + u8 fc_loop_present; /* Number of Fibre Loops detected */ + u8 mem_type; /* EDO, FPM, SDRAM etc */ + + u32 signature; + u16 dram_size; /* In terms of MB */ + u16 subsysid; + + u16 subsysvid; + u8 notify_counters; + u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */ +} __attribute__ ((packed)) mega_product_info; + +struct notify { + u32 global_counter; /* Any change increments this counter */ + + u8 param_counter; /* Indicates any params changed */ + u8 param_id; /* Param modified - defined below */ + u16 param_val; /* New val of last param modified */ + + u8 write_config_counter; /* write config occurred */ + u8 write_config_rsvd[3]; + + u8 ldrv_op_counter; /* Indicates ldrv op started/completed */ + u8 ldrv_opid; /* ldrv num */ + u8 ldrv_opcmd; /* ldrv operation - defined below */ + u8 ldrv_opstatus; /* status of the operation */ + + u8 ldrv_state_counter; /* Indicates change of ldrv state */ + u8 ldrv_state_id; /* ldrv num */ + u8 ldrv_state_new; /* New state */ + u8 ldrv_state_old; /* old state */ + + u8 pdrv_state_counter; /* Indicates change of ldrv state */ + u8 pdrv_state_id; /* pdrv id */ + u8 pdrv_state_new; /* New state */ + u8 pdrv_state_old; /* old state */ + + u8 pdrv_fmt_counter; /* Indicates pdrv format started/over */ + u8 pdrv_fmt_id; /* pdrv id */ + u8 pdrv_fmt_val; /* format started/over */ + u8 pdrv_fmt_rsvd; + + u8 targ_xfer_counter; /* Indicates SCSI-2 Xfer rate change */ + u8 targ_xfer_id; /* pdrv Id */ + u8 targ_xfer_val; /* new Xfer params of last pdrv */ + u8 targ_xfer_rsvd; + + u8 fcloop_id_chg_counter; /* Indicates loopid changed */ + u8 fcloopid_pdrvid; /* pdrv id */ + u8 fcloop_id0; /* loopid on fc loop 0 */ + u8 fcloop_id1; /* loopid on fc loop 1 */ + + u8 fcloop_state_counter; /* Indicates loop state changed */ + u8 fcloop_state0; /* state of fc loop 0 */ + u8 fcloop_state1; /* state of fc loop 1 */ + u8 fcloop_state_rsvd; +} __attribute__ ((packed)); + +#define MAX_NOTIFY_SIZE 0x80 +#define CUR_NOTIFY_SIZE sizeof(struct notify) + +typedef struct { + u32 data_size; /* current size in bytes (not including resvd) */ + + struct notify notify; + + u8 notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE]; + + u8 rebuild_rate; /* Rebuild rate (0% - 100%) */ + u8 cache_flush_interval; /* In terms of Seconds */ + u8 sense_alert; + u8 drive_insert_count; /* drive insertion count */ + + u8 battery_status; + u8 num_ldrv; /* No. of Log Drives configured */ + u8 recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; /* State of + reconstruct */ + u16 ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; /* logdrv + Status */ + + u32 ldrv_size[MAX_LOGICAL_DRIVES_40LD];/* Size of each log drv */ + u8 ldrv_prop[MAX_LOGICAL_DRIVES_40LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_40LD];/* State of log drives */ + u8 pdrv_state[FC_MAX_PHYSICAL_DEVICES];/* State of phys drvs. */ + u16 pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16]; + + u8 targ_xfer[80]; /* phys device transfer rate */ + u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */ +} __attribute__ ((packed)) mega_inquiry3; + + +/* Structures */ +typedef struct { + u8 max_commands; /* Max concurrent commands supported */ + u8 rebuild_rate; /* Rebuild rate - 0% thru 100% */ + u8 max_targ_per_chan; /* Max targ per channel */ + u8 nchannels; /* Number of channels on HBA */ + u8 fw_version[4]; /* Firmware version */ + u16 age_of_flash; /* Number of times FW has been flashed */ + u8 chip_set_value; /* Contents of 0xC0000832 */ + u8 dram_size; /* In MB */ + u8 cache_flush_interval; /* in seconds */ + u8 bios_version[4]; + u8 board_type; + u8 sense_alert; + u8 write_config_count; /* Increase with every configuration + change */ + u8 drive_inserted_count; /* Increase with every drive inserted + */ + u8 inserted_drive; /* Channel:Id of inserted drive */ + u8 battery_status; /* + * BIT 0: battery module missing + * BIT 1: VBAD + * BIT 2: temprature high + * BIT 3: battery pack missing + * BIT 4,5: + * 00 - charge complete + * 01 - fast charge in progress + * 10 - fast charge fail + * 11 - undefined + * Bit 6: counter > 1000 + * Bit 7: Undefined + */ + u8 dec_fault_bus_info; +} __attribute__ ((packed)) mega_adp_info; + + +typedef struct { + u8 num_ldrv; /* Number of logical drives configured */ + u8 rsvd[3]; + u32 ldrv_size[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_prop[MAX_LOGICAL_DRIVES_8LD]; + u8 ldrv_state[MAX_LOGICAL_DRIVES_8LD]; +} __attribute__ ((packed)) mega_ldrv_info; + +typedef struct { + u8 pdrv_state[MAX_PHYSICAL_DRIVES]; + u8 rsvd; +} __attribute__ ((packed)) mega_pdrv_info; + +/* RAID inquiry: Mailbox command 0x05*/ +typedef struct { + mega_adp_info adapter_info; + mega_ldrv_info logdrv_info; + mega_pdrv_info pdrv_info; +} __attribute__ ((packed)) mraid_inquiry; + + +/* RAID extended inquiry: Mailbox command 0x04*/ +typedef struct { + mraid_inquiry raid_inq; + u16 phys_drv_format[MAX_CHANNELS]; + u8 stack_attn; + u8 modem_status; + u8 rsvd[2]; +} __attribute__ ((packed)) mraid_ext_inquiry; + + +typedef struct { + u8 channel; + u8 target; +}__attribute__ ((packed)) adp_device; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_40LD]; +}__attribute__ ((packed)) adp_span_40ld; + +typedef struct { + u32 start_blk; /* starting block */ + u32 num_blks; /* # of blocks */ + adp_device device[MAX_ROW_SIZE_8LD]; +}__attribute__ ((packed)) adp_span_8ld; + +typedef struct { + u8 span_depth; /* Total # of spans */ + u8 level; /* RAID level */ + u8 read_ahead; /* read ahead, no read ahead, adaptive read + ahead */ + u8 stripe_sz; /* Encoded stripe size */ + u8 status; /* Status of the logical drive */ + u8 write_mode; /* write mode, write_through/write_back */ + u8 direct_io; /* direct io or through cache */ + u8 row_size; /* Number of stripes in a row */ +} __attribute__ ((packed)) logdrv_param; + +typedef struct { + logdrv_param lparam; + adp_span_40ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_40ld; + +typedef struct { + logdrv_param lparam; + adp_span_8ld span[MAX_SPAN_DEPTH]; +}__attribute__ ((packed)) logdrv_8ld; + +typedef struct { + u8 type; /* Type of the device */ + u8 cur_status; /* current status of the device */ + u8 tag_depth; /* Level of tagging */ + u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ + u32 size; /* configurable size in terms of 512 byte + blocks */ +}__attribute__ ((packed)) phys_drv; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_40ld ldrv[MAX_LOGICAL_DRIVES_40LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_40ld; + +typedef struct { + u8 nlog_drives; /* number of logical drives */ + u8 resvd[3]; + logdrv_8ld ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drv pdrv[MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld; + +/* + * FW Definitions & Data Structures for 8LD 4-Span and 8-Span Controllers + */ +#define MAX_STRIPES 8 +#define SPAN4_DEPTH 4 +#define SPAN8_DEPTH 8 +#define MAX_PHYDRVS 5 * 16 /* 5 Channels * 16 Targets */ + +typedef struct { + unsigned char channel; + unsigned char target; +}__attribute__ ((packed)) device_t; + +typedef struct { + unsigned long start_blk; + unsigned long total_blks; + device_t device[ MAX_STRIPES ]; +}__attribute__ ((packed)) span_t; + +typedef struct { + unsigned char type; + unsigned char curr_status; + unsigned char tag_depth; + unsigned char resvd1; + unsigned long size; +}__attribute__ ((packed)) phydrv_t; + +typedef struct { + unsigned char span_depth; + unsigned char raid; + unsigned char read_ahead; /* 0=No rdahead,1=RDAHEAD,2=adaptive */ + unsigned char stripe_sz; + unsigned char status; + unsigned char write_policy; /* 0=wrthru,1=wrbak */ + unsigned char direct_io; /* 1=directio,0=cached */ + unsigned char no_stripes; + span_t span[ SPAN4_DEPTH ]; +}__attribute__ ((packed)) ld_span4_t; + +typedef struct { + unsigned char span_depth; + unsigned char raid; + unsigned char read_ahead; /* 0=No rdahead,1=RDAHEAD,2=adaptive */ + unsigned char stripe_sz; + unsigned char status; + unsigned char write_policy; /* 0=wrthru,1=wrbak */ + unsigned char direct_io; /* 1=directio,0=cached */ + unsigned char no_stripes; + span_t span[ SPAN8_DEPTH ]; +}__attribute__ ((packed)) ld_span8_t; + +typedef struct { + unsigned char no_log_drives; + unsigned char pad[3]; + ld_span4_t log_drv[ MAX_LOGICAL_DRIVES_8LD ]; + phydrv_t phys_drv[ MAX_PHYDRVS ]; +}__attribute__ ((packed)) diskarray_span4_t; + +typedef struct { + unsigned char no_log_drives; + unsigned char pad[3]; + ld_span8_t log_drv[ MAX_LOGICAL_DRIVES_8LD ]; + phydrv_t phys_drv[ MAX_PHYDRVS ]; +}__attribute__ ((packed)) diskarray_span8_t; + +/* + * User ioctl structure. + * This structure will be used for Traditional Method ioctl interface + * commands (0x80),Alternate Buffer Method (0x81) ioctl commands and the + * Driver ioctls. + * The Driver ioctl interface handles the commands at the driver level, + * without being sent to the card. + */ +/* system call imposed limit. Change accordingly */ +#define IOCTL_MAX_DATALEN 4096 + +struct uioctl_t { + u32 inlen; + u32 outlen; + union { + u8 fca[16]; + struct { + u8 opcode; + u8 subopcode; + u16 adapno; +#if BITS_PER_LONG == 32 + u8 *buffer; + u8 pad[4]; +#endif +#if BITS_PER_LONG == 64 + u8 *buffer; +#endif + u32 length; + } __attribute__ ((packed)) fcs; + } __attribute__ ((packed)) ui; + u8 mbox[18]; /* 16 bytes + 2 status bytes */ + mega_passthru pthru; +#if BITS_PER_LONG == 32 + char *data; /* buffer <= 4096 for 0x80 commands */ + char pad[4]; +#endif +#if BITS_PER_LONG == 64 + char *data; +#endif +} __attribute__ ((packed)); + +/* + * struct mcontroller is used to pass information about the controllers in the + * system. Its upto the application how to use the information. We are passing + * as much info about the cards as possible and useful. Before issuing the + * call to find information about the cards, the applicaiton needs to issue a + * ioctl first to find out the number of controllers in the system. + */ +#define MAX_CONTROLLERS 32 + +struct mcontroller { + u64 base; + u8 irq; + u8 numldrv; + u8 pcibus; + u16 pcidev; + u8 pcifun; + u16 pciid; + u16 pcivendor; + u8 pcislot; + u32 uid; +}; + +/* + * mailbox structure used for internal commands + */ +typedef struct { + u8 cmd; + u8 cmdid; + u8 opcode; + u8 subopcode; + u32 lba; + u32 xferaddr; + u8 logdrv; + u8 rsvd[3]; + u8 numstatus; + u8 status; +} __attribute__ ((packed)) megacmd_t; + +/* + * Defines for Driver IOCTL interface + */ +#define MEGAIOC_MAGIC 'm' + +#define MEGAIOC_QNADAP 'm' /* Query # of adapters */ +#define MEGAIOC_QDRVRVER 'e' /* Query driver version */ +#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */ +#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) ) +#define GETADAP(mkadap) ( (mkadap) ^ MEGAIOC_MAGIC << 8 ) + +/* + * Definition for the new ioctl interface (NIT) + */ + +/* + * Vendor specific Group-7 commands + */ +#define VENDOR_SPECIFIC_COMMANDS 0xE0 +#define MEGA_INTERNAL_CMD VENDOR_SPECIFIC_COMMANDS + 0x01 + +/* + * The ioctl command. No other command shall be used for this interface + */ +#define USCSICMD VENDOR_SPECIFIC_COMMANDS + +/* + * Data direction flags + */ +#define UIOC_RD 0x00001 +#define UIOC_WR 0x00002 + +/* + * ioctl opcodes + */ +#define MBOX_CMD 0x00000 /* DCMD or passthru command */ +#define GET_DRIVER_VER 0x10000 /* Get driver version */ +#define GET_N_ADAP 0x20000 /* Get number of adapters */ +#define GET_ADAP_INFO 0x30000 /* Get information about a adapter */ +#define GET_CAP 0x40000 /* Get ioctl capabilities */ +#define GET_STATS 0x50000 /* Get statistics, including error info */ + + +/* + * The ioctl structure. + * MBOX macro converts a nitioctl_t structure to megacmd_t pointer and + * MBOX_P macro converts a nitioctl_t pointer to megacmd_t pointer. + */ +typedef struct { + char signature[8]; /* Must contain "MEGANIT" */ + u32 opcode; /* opcode for the command */ + u32 adapno; /* adapter number */ + union { + u8 __raw_mbox[18]; + caddr_t __uaddr; /* xferaddr for non-mbox cmds */ + }__ua; + +#define uioc_rmbox __ua.__raw_mbox +#define MBOX(uioc) ((megacmd_t *)&((uioc).__ua.__raw_mbox[0])) +#define MBOX_P(uioc) ((megacmd_t *)&((uioc)->__ua.__raw_mbox[0])) +#define uioc_uaddr __ua.__uaddr + + u32 xferlen; /* xferlen for DCMD and non-mbox + commands */ + u32 flags; /* data direction flags */ +}nitioctl_t; + + +/* + * I/O statistics for some applications like SNMP agent. The caller must + * provide the number of logical drives for which status should be reported. + */ +typedef struct { + int num_ldrv; /* Number for logical drives for which the + status should be reported. */ + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; /* number of reads for + each logical drive */ + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + read for each logical + drive */ + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; /* number of writes + for each logical + drive */ + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks + writes for each + logical drive */ + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of read + errors for each + logical drive */ + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of write + errors for each + logical drive */ +}megastat_t; + + +struct private_bios_data { + u8 geometry:4; /* + * bits 0-3 - BIOS geometry + * 0x0001 - 1GB + * 0x0010 - 2GB + * 0x1000 - 8GB + * Others values are invalid + */ + u8 unused:4; /* bits 4-7 are unused */ + u8 boot_drv; /* + * logical drive set as boot drive + * 0..7 - for 8LD cards + * 0..39 - for 40LD cards + */ + u8 rsvd[12]; + u16 cksum; /* 0-(sum of first 13 bytes of this structure) */ +} __attribute__ ((packed)); + + + + +/* + * Mailbox and firmware commands and subopcodes used in this driver. + */ + +#define MEGA_MBOXCMD_LREAD 0x01 +#define MEGA_MBOXCMD_LWRITE 0x02 +#define MEGA_MBOXCMD_PASSTHRU 0x03 +#define MEGA_MBOXCMD_ADPEXTINQ 0x04 +#define MEGA_MBOXCMD_ADAPTERINQ 0x05 +#define MEGA_MBOXCMD_LREAD64 0xA7 +#define MEGA_MBOXCMD_LWRITE64 0xA8 +#define MEGA_MBOXCMD_PASSTHRU64 0xC3 +#define MEGA_MBOXCMD_EXTPTHRU 0xE3 + +#define MAIN_MISC_OPCODE 0xA4 /* f/w misc opcode */ +#define GET_MAX_SG_SUPPORT 0x01 /* get max sg len supported by f/w */ + +#define FC_NEW_CONFIG 0xA1 +#define NC_SUBOP_PRODUCT_INFO 0x0E +#define NC_SUBOP_ENQUIRY3 0x0F +#define ENQ3_GET_SOLICITED_FULL 0x02 +#define OP_DCMD_READ_CONFIG 0x04 +#define NEW_READ_CONFIG_8LD 0x67 +#define READ_CONFIG_8LD 0x07 +#define FLUSH_ADAPTER 0x0A +#define FLUSH_SYSTEM 0xFE + +/* + * Command for random deletion of logical drives + */ +#define FC_DEL_LOGDRV 0xA4 /* f/w command */ +#define OP_SUP_DEL_LOGDRV 0x2A /* is feature supported */ +#define OP_GET_LDID_MAP 0x18 /* get ldid and logdrv number map */ +#define OP_DEL_LOGDRV 0x1C /* delete logical drive */ + +/* + * BIOS commands + */ +#define IS_BIOS_ENABLED 0x62 +#define GET_BIOS 0x01 +#define CHNL_CLASS 0xA9 +#define GET_CHNL_CLASS 0x00 +#define SET_CHNL_CLASS 0x01 +#define CH_RAID 0x01 +#define CH_SCSI 0x00 +#define BIOS_PVT_DATA 0x40 +#define GET_BIOS_PVT_DATA 0x00 + + +/* + * Commands to support clustering + */ +#define MEGA_GET_TARGET_ID 0x7D +#define MEGA_CLUSTER_OP 0x70 +#define MEGA_GET_CLUSTER_MODE 0x02 +#define MEGA_CLUSTER_CMD 0x6E +#define MEGA_RESERVE_LD 0x01 +#define MEGA_RELEASE_LD 0x02 +#define MEGA_RESET_RESERVATIONS 0x03 +#define MEGA_RESERVATION_STATUS 0x04 +#define MEGA_RESERVE_PD 0x05 +#define MEGA_RELEASE_PD 0x06 + + +/* + * Module battery status + */ +#define MEGA_BATT_MODULE_MISSING 0x01 +#define MEGA_BATT_LOW_VOLTAGE 0x02 +#define MEGA_BATT_TEMP_HIGH 0x04 +#define MEGA_BATT_PACK_MISSING 0x08 +#define MEGA_BATT_CHARGE_MASK 0x30 +#define MEGA_BATT_CHARGE_DONE 0x00 +#define MEGA_BATT_CHARGE_INPROG 0x10 +#define MEGA_BATT_CHARGE_FAIL 0x20 +#define MEGA_BATT_CYCLES_EXCEEDED 0x40 + +/* + * Physical drive states. + */ +#define PDRV_UNCNF 0 +#define PDRV_ONLINE 3 +#define PDRV_FAILED 4 +#define PDRV_RBLD 5 +#define PDRV_HOTSPARE 6 + + +/* + * Raid logical drive states. + */ +#define RDRV_OFFLINE 0 +#define RDRV_DEGRADED 1 +#define RDRV_OPTIMAL 2 +#define RDRV_DELETED 3 + +/* + * Read, write and cache policies + */ +#define NO_READ_AHEAD 0 +#define READ_AHEAD 1 +#define ADAP_READ_AHEAD 2 +#define WRMODE_WRITE_THRU 0 +#define WRMODE_WRITE_BACK 1 +#define CACHED_IO 0 +#define DIRECT_IO 1 + + +/* + * Each controller's soft state + */ +typedef struct { + int this_id; /* our id, may set to different than 7 if + clustering is available */ + u32 flag; + + unsigned long base; + + /* mbox64 with mbox not aligned on 16-byte boundry */ + mbox64_t *una_mbox64; + dma_addr_t una_mbox64_dma; + + volatile mbox64_t *mbox64;/* ptr to 64-bit mailbox */ + volatile mbox_t *mbox; /* ptr to standard mailbox */ + dma_addr_t mbox_dma; + + struct pci_dev *dev; + struct pci_dev *ipdev; /* for internal allocation */ + + struct list_head free_list; + struct list_head pending_list; + + struct Scsi_Host *host; + +#define MEGA_BUFFER_SIZE (2*1024) + u8 *mega_buffer; + dma_addr_t buf_dma_handle; + + mega_product_info product_info; + + u8 max_cmds; + scb_t *scb_list; + + atomic_t pend_cmds; /* maintain a counter for + pending commands in firmware */ + +#if MEGA_HAVE_STATS + u32 nreads[MAX_LOGICAL_DRIVES_40LD]; + u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; + u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; + u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; + u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; +#endif + + /* Host adapter parameters */ + u8 numldrv; + u8 fw_version[7]; + u8 bios_version[7]; + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *controller_proc_dir_entry; + struct proc_dir_entry *proc_read; + struct proc_dir_entry *proc_stat; + struct proc_dir_entry *proc_mbox; + +#if MEGA_HAVE_ENH_PROC + struct proc_dir_entry *proc_rr; + struct proc_dir_entry *proc_battery; +#define MAX_PROC_CHANNELS 4 + struct proc_dir_entry *proc_pdrvstat[MAX_PROC_CHANNELS]; + struct proc_dir_entry *proc_rdrvstat[MAX_PROC_CHANNELS]; +#endif + +#endif + + int has_64bit_addr; /* are we using 64-bit addressing */ + int support_ext_cdb; + int boot_ldrv_enabled; + int boot_ldrv; + int boot_pdrv_enabled; /* boot from physical drive */ + int boot_pdrv_ch; /* boot physical drive channel */ + int boot_pdrv_tgt; /* boot physical drive target */ + + + int support_random_del; /* Do we support random deletion of + logdrvs */ + int read_ldidmap; /* set after logical drive deltion. The + logical drive number must be read from the + map */ + atomic_t quiescent; /* a stage reached when delete logical + drive needs to be done. Stop + sending requests to the hba till + delete operation is completed */ + spinlock_t lock; + spinlock_t *host_lock; // pointer to appropriate lock + + u8 logdrv_chan[MAX_CHANNELS+NVIRT_CHAN]; /* logical drive are on + what channels. */ + int mega_ch_class; + + u8 sglen; /* f/w supported scatter-gather list length */ + + scb_t int_scb; + Scsi_Cmnd int_scmd; + struct semaphore int_mtx; /* To synchronize the internal + commands */ + wait_queue_head_t int_waitq; /* wait queue for internal + cmds */ + + int has_cluster; /* cluster support on this HBA */ +}adapter_t; + + +struct mega_hbas { + int is_bios_enabled; + adapter_t *hostdata_addr; +}; + + +/* + * For state flag. Do not use LSB(8 bits) which are + * reserved for storing info about channels. + */ +#define IN_ABORT 0x80000000L +#define IN_RESET 0x40000000L +#define BOARD_MEMMAP 0x20000000L +#define BOARD_IOMAP 0x10000000L +#define BOARD_40LD 0x08000000L +#define BOARD_64BIT 0x04000000L + +#define INTR_VALID 0x40 + +#define PCI_CONF_AMISIG 0xa0 +#define PCI_CONF_AMISIG64 0xa4 + + +#define MEGA_DMA_TYPE_NONE 0xFFFF +#define MEGA_BULK_DATA 0x0001 +#define MEGA_SGLIST 0x0002 + +/* + * lockscope definitions, callers can specify the lock scope with this data + * type. LOCK_INT would mean the caller has not acquired the lock before + * making the call and LOCK_EXT would mean otherwise. + */ +typedef enum { LOCK_INT, LOCK_EXT } lockscope_t; + +/* + * Parameters for the io-mapped controllers + */ + +/* I/O Port offsets */ +#define CMD_PORT 0x00 +#define ACK_PORT 0x00 +#define TOGGLE_PORT 0x01 +#define INTR_PORT 0x0a + +#define MBOX_BUSY_PORT 0x00 +#define MBOX_PORT0 0x04 +#define MBOX_PORT1 0x05 +#define MBOX_PORT2 0x06 +#define MBOX_PORT3 0x07 +#define ENABLE_MBOX_REGION 0x0B + +/* I/O Port Values */ +#define ISSUE_BYTE 0x10 +#define ACK_BYTE 0x08 +#define ENABLE_INTR_BYTE 0xc0 +#define DISABLE_INTR_BYTE 0x00 +#define VALID_INTR_BYTE 0x40 +#define MBOX_BUSY_BYTE 0x10 +#define ENABLE_MBOX_BYTE 0x00 + + +/* Setup some port macros here */ +#define issue_command(adapter) \ + outb_p(ISSUE_BYTE, (adapter)->base + CMD_PORT) + +#define irq_state(adapter) inb_p((adapter)->base + INTR_PORT) + +#define set_irq_state(adapter, value) \ + outb_p((value), (adapter)->base + INTR_PORT) + +#define irq_ack(adapter) \ + outb_p(ACK_BYTE, (adapter)->base + ACK_PORT) + +#define irq_enable(adapter) \ + outb_p(ENABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + +#define irq_disable(adapter) \ + outb_p(DISABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT) + + +/* + * This is our SYSDEP area. All kernel specific detail should be placed here - + * as much as possible + */ + +/* + * End of SYSDEP area + */ + +/* + * ASSERT macro for megaraid. This should panic but printk should do for now + */ +#ifdef DEBUG +#define ASSERT( expression ) \ + if( !(expression) ) { \ + panic("assertion failed: %s, file: %s, line: %d\n", \ + #expression, __FILE__, __LINE__); \ + } +#else +#define ASSERT(expression) +#endif + +#define MBOX_ABORT_SLEEP 60 +#define MBOX_RESET_SLEEP 30 + +const char *megaraid_info (struct Scsi_Host *); + +static int megaraid_detect(Scsi_Host_Template *); +static void mega_find_card(Scsi_Host_Template *, u16, u16); +static int mega_query_adapter(adapter_t *); +static inline int issue_scb(adapter_t *, scb_t *); +static int mega_setup_mailbox(adapter_t *); + +static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); +static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); +static inline scb_t *mega_allocate_scb(adapter_t *, Scsi_Cmnd *); +static void __mega_runpendq(adapter_t *); +static inline void mega_runpendq(adapter_t *); +static int issue_scb_block(adapter_t *, u_char *); + +static void megaraid_isr_memmapped(int, void *, struct pt_regs *); +static inline void megaraid_memmbox_ack_sequence(adapter_t *); +static void megaraid_isr_iomapped(int, void *, struct pt_regs *); +static inline void megaraid_iombox_ack_sequence(adapter_t *); + +static void mega_free_scb(adapter_t *, scb_t *); + +static int megaraid_release (struct Scsi_Host *); +static int megaraid_command (Scsi_Cmnd *); +static int megaraid_abort(Scsi_Cmnd *); +static int megaraid_reset(Scsi_Cmnd *); +static int megaraid_biosparam (Disk *, kdev_t, int *); + +static int mega_build_sglist (adapter_t *adapter, scb_t *scb, + u32 *buffer, u32 *length); +static inline int mega_busywait_mbox (adapter_t *); +static int __mega_busywait_mbox (adapter_t *); +static inline void mega_cmd_done(adapter_t *, u8 [], int, int); +static inline void mega_free_sgl (adapter_t *adapter); +static void mega_8_to_40ld (mraid_inquiry *inquiry, + mega_inquiry3 *enquiry3, mega_product_info *); + +static int megaraid_reboot_notify (struct notifier_block *, + unsigned long, void *); +static int megadev_open (struct inode *, struct file *); +static int megadev_ioctl (struct inode *, struct file *, unsigned int, + unsigned long); +static int mega_m_to_n(void *, nitioctl_t *); +static int mega_n_to_m(void *, megacmd_t *); +static int megadev_close (struct inode *, struct file *); + +static int mega_init_scb (adapter_t *); + +static int mega_is_bios_enabled (adapter_t *); +static void mega_reorder_hosts (void); +static void mega_swap_hosts (struct Scsi_Host *, struct Scsi_Host *); + +#ifdef CONFIG_PROC_FS +static void mega_create_proc_entry(int, struct proc_dir_entry *); +static int proc_read_config(char *, char **, off_t, int, int *, void *); +static int proc_read_stat(char *, char **, off_t, int, int *, void *); +static int proc_read_mbox(char *, char **, off_t, int, int *, void *); +static int proc_rebuild_rate(char *, char **, off_t, int, int *, void *); +static int proc_battery(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch0(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch1(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch2(char *, char **, off_t, int, int *, void *); +static int proc_pdrv_ch3(char *, char **, off_t, int, int *, void *); +static int proc_pdrv(adapter_t *, char *, int); +static int proc_rdrv_10(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_20(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_30(char *, char **, off_t, int, int *, void *); +static int proc_rdrv_40(char *, char **, off_t, int, int *, void *); +static int proc_rdrv(adapter_t *, char *, int, int); + +static int mega_adapinq(adapter_t *, dma_addr_t); +static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); +static inline caddr_t mega_allocate_inquiry(dma_addr_t *, struct pci_dev *); +static inline void mega_free_inquiry(caddr_t, dma_addr_t, struct pci_dev *); +static int mega_print_inquiry(char *, char *); +#endif + +static int mega_support_ext_cdb(adapter_t *); +static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, + Scsi_Cmnd *, int, int); +static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *, + scb_t *, Scsi_Cmnd *, int, int); +static void mega_enum_raid_scsi(adapter_t *); +static int mega_partsize(Disk *, kdev_t, int *); +static void mega_get_boot_drv(adapter_t *); +static inline int mega_get_ldrv_num(adapter_t *, Scsi_Cmnd *, int); +static int mega_support_random_del(adapter_t *); +static int mega_del_logdrv(adapter_t *, int); +static int mega_do_del_logdrv(adapter_t *, int); +static void mega_get_max_sgl(adapter_t *); +static int mega_internal_command(adapter_t *, lockscope_t, megacmd_t *, + mega_passthru *); +static void mega_internal_done(Scsi_Cmnd *); +static int mega_support_cluster(adapter_t *); +#endif + +/* vi: set ts=8 sw=8 tw=78: */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/pcmcia/nsp_cs.c linux.22-ac2/drivers/scsi/pcmcia/nsp_cs.c --- linux.vanilla/drivers/scsi/pcmcia/nsp_cs.c 2003-08-28 16:45:38.000000000 +0100 +++ linux.22-ac2/drivers/scsi/pcmcia/nsp_cs.c 2003-08-28 22:34:26.000000000 +0100 @@ -168,12 +168,12 @@ #endif nsp_hw_data *data = &nsp_data; - DEBUG(0, __FUNCTION__ "() SCpnt=0x%p target=%d lun=%d buff=0x%p bufflen=%d use_sg=%d\n", - SCpnt, target, SCpnt->lun, SCpnt->request_buffer, SCpnt->request_bufflen, SCpnt->use_sg); + DEBUG(0, "%s() SCpnt=0x%p target=%d lun=%d buff=0x%p bufflen=%d use_sg=%d\n", + __FUNCTION__, SCpnt, target, SCpnt->lun, SCpnt->request_buffer, SCpnt->request_bufflen, SCpnt->use_sg); //DEBUG(0, " before CurrentSC=0x%p\n", data->CurrentSC); if(data->CurrentSC != NULL) { - printk(KERN_DEBUG " " __FUNCTION__ "() CurrentSC!=NULL this can't be happen\n"); + printk(KERN_DEBUG " %s() CurrentSC!=NULL this can't be happen\n", __FUNCTION__); data->CurrentSC = NULL; SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); @@ -219,7 +219,7 @@ } - //DEBUG(0, __FUNCTION__ "() out\n"); + //DEBUG(0, "%s() out\n", __FUNCTION__); return 0; } @@ -231,7 +231,7 @@ unsigned int base = data->BaseAddress; unsigned char transfer_mode_reg; - //DEBUG(0, __FUNCTION__ "() enabled=%d\n", enabled); + //DEBUG(0, "%s() enabled=%d\n", __FUNCTION__, enabled); if (enabled != FALSE) { transfer_mode_reg = TRANSFER_GO | BRAIND; @@ -256,7 +256,7 @@ SyncOffset: 0 }; - DEBUG(0, __FUNCTION__ "() in base=0x%x\n", base); + DEBUG(0, "%s() in base=0x%x\n", __FUNCTION__, base); data->ScsiClockDiv = CLOCK_40M; data->CurrentSC = NULL; @@ -324,7 +324,7 @@ int wait_count; unsigned char phase, arbit; - //DEBUG(0, __FUNCTION__ "()in\n"); + //DEBUG(0, "%s()in\n", __FUNCTION__); phase = nsp_index_read(base, SCSIBUSMON); if(phase != BUSMON_BUS_FREE) { @@ -406,7 +406,7 @@ int i; - DEBUG(0, __FUNCTION__ "()\n"); + DEBUG(0, "%s()\n", __FUNCTION__); /**!**/ @@ -461,7 +461,7 @@ { unsigned int base = SCpnt->host->io_port; - //DEBUG(0, __FUNCTION__ "() in SCpnt=0x%p, time=%d\n", SCpnt, time); + //DEBUG(0, "%s() in SCpnt=0x%p, time=%d\n", __FUNCTION__, SCpnt, time); data->TimerCount = time; nsp_index_write(base, TIMERCOUNT, time); } @@ -475,7 +475,7 @@ unsigned char reg; int count, i = TRUE; - //DEBUG(0, __FUNCTION__ "()\n"); + //DEBUG(0, "%s()\n", __FUNCTION__); count = jiffies + HZ; @@ -487,7 +487,7 @@ } while ((i = time_before(jiffies, count)) && (reg & mask) != 0); if (!i) { - printk(KERN_DEBUG __FUNCTION__ " %s signal off timeut\n", str); + printk(KERN_DEBUG "%s %s signal off timeut\n", __FUNCTION__, str); } return 0; @@ -504,7 +504,7 @@ int wait_count; unsigned char phase, i_src; - //DEBUG(0, __FUNCTION__ "() current_phase=0x%x, mask=0x%x\n", current_phase, mask); + //DEBUG(0, "%s() current_phase=0x%x, mask=0x%x\n", __FUNCTION__, current_phase, mask); wait_count = jiffies + HZ; do { @@ -524,7 +524,7 @@ } } while(time_before(jiffies, wait_count)); - //DEBUG(0, __FUNCTION__ " : " __FUNCTION__ " timeout\n"); + //DEBUG(0, "%s: timeout\n", __FUNCTION__); return -1; } @@ -539,7 +539,7 @@ int ptr; int ret; - //DEBUG(0, __FUNCTION__ "()\n"); + //DEBUG(0, "%s()\n", __FUNCTION__); for (ptr = 0; len > 0; len --, ptr ++) { ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ); @@ -574,7 +574,7 @@ { unsigned int count; - //DEBUG(0, __FUNCTION__ "()\n"); + //DEBUG(0, "%s()\n", __FUNCTION__); if (SCpnt->SCp.have_data_in != IO_IN) { return 0; @@ -606,7 +606,7 @@ unsigned int base = SCpnt->host->io_port; unsigned char reg; - //DEBUG(0, __FUNCTION__ "()\n"); + //DEBUG(0, "%s()\n", __FUNCTION__); nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect"); @@ -635,7 +635,7 @@ count = (h << 16) | (m << 8) | (l << 0); - //DEBUG(0, __FUNCTION__ "() =0x%x\n", count); + //DEBUG(0, "%s() =0x%x\n", __FUNCTION__, count); return count; } @@ -656,7 +656,7 @@ ocount = data->FifoCount; - DEBUG(0, __FUNCTION__ "() in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d\n", SCpnt, RESID, ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual); + DEBUG(0, "%s() in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d\n", __FUNCTION__, SCpnt, RESID, ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual); time_out = jiffies + 10 * HZ; @@ -722,7 +722,7 @@ data->FifoCount = ocount; if (!i) { - printk(KERN_DEBUG __FUNCTION__ "() pio read timeout resid=%d this_residual=%d buffers_residual=%d\n", RESID, SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); + printk(KERN_DEBUG "%s() pio read timeout resid=%d this_residual=%d buffers_residual=%d\n", __FUNCTION__, RESID, SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); } DEBUG(0, " read ocount=0x%x\n", ocount); } @@ -739,7 +739,7 @@ ocount = data->FifoCount; - DEBUG(0, __FUNCTION__ "() in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x\n", data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, RESID); + DEBUG(0, "%s() in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x\n", __FUNCTION__, data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, RESID); time_out = jiffies + 10 * HZ; @@ -795,7 +795,7 @@ data->FifoCount = ocount; if (!i) { - printk(KERN_DEBUG __FUNCTION__ "() pio write timeout resid=%d\n", RESID); + printk(KERN_DEBUG "%s() pio write timeout resid=%d\n", __FUNCTION__, RESID); } //DEBUG(0, " write ocount=%d\n", ocount); } @@ -813,7 +813,7 @@ unsigned char lun = SCpnt->lun; sync_data *sync = &(data->Sync[target][lun]); - //DEBUG(0, __FUNCTION__ "() in SCpnt=0x%p\n", SCpnt); + //DEBUG(0, "%s() in SCpnt=0x%p\n", __FUNCTION__, SCpnt); /* setup synch transfer registers */ nsp_index_write(base, SYNCREG, sync->SyncRegister); @@ -916,7 +916,7 @@ nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR); if (data->CurrentSC == NULL) { - printk(KERN_DEBUG __FUNCTION__ " CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen\n", i_src, phase, irq_phase); + printk(KERN_DEBUG "%s CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen\n", __FUNCTION__, i_src, phase, irq_phase); return; } else { tmpSC = data->CurrentSC; @@ -930,7 +930,7 @@ */ if ((i_src & IRQSTATUS_SCSI) != 0) { if ((irq_phase & SCSI_RESET_IRQ) != 0) { - printk(KERN_DEBUG " " __FUNCTION__ "() bus reset (power off?)\n"); + printk(KERN_DEBUG " %s() bus reset (power off?)\n", __FUNCTION__); *sync_neg = SYNC_NOT_YET; data->CurrentSC = NULL; tmpSC->result = DID_RESET << 16; @@ -1028,7 +1028,7 @@ /* check unexpected bus free state */ if (phase == 0) { - printk(KERN_DEBUG " " __FUNCTION__ " unexpected bus free. i_src=0x%x, phase=0x%x, irq_phase=0x%x\n", i_src, phase, irq_phase); + printk(KERN_DEBUG " %s unexpected bus free. i_src=0x%x, phase=0x%x, irq_phase=0x%x\n", __FUNCTION__, i_src, phase, irq_phase); *sync_neg = SYNC_NOT_YET; data->CurrentSC = NULL; @@ -1163,7 +1163,7 @@ break; } - //DEBUG(0, __FUNCTION__ "() out\n"); + //DEBUG(0, "%s() out\n", __FUNCTION__); return; timer_out: @@ -1183,7 +1183,7 @@ struct Scsi_Host *host; /* registered host structure */ nsp_hw_data *data = &nsp_data; - DEBUG(0, __FUNCTION__ " this_id=%d\n", sht->this_id); + DEBUG(0, "%s this_id=%d\n", __FUNCTION__, sht->this_id); request_region(data->BaseAddress, data->NumAddress, "nsp_cs"); host = scsi_register(sht, 0); @@ -1202,7 +1202,7 @@ host->irq); sht->name = nspinfo; - DEBUG(0, __FUNCTION__ " end\n"); + DEBUG(0, "%s end\n", __FUNCTION__); return 1; /* detect done. */ } @@ -1234,7 +1234,7 @@ /*---------------------------------------------------------------*/ static int nsp_reset(Scsi_Cmnd *SCpnt, unsigned int why) { - DEBUG(0, __FUNCTION__ " SCpnt=0x%p why=%d\n", SCpnt, why); + DEBUG(0, "%s SCpnt=0x%p why=%d\n", __FUNCTION__, SCpnt, why); nsp_eh_bus_reset(SCpnt); @@ -1243,7 +1243,7 @@ static int nsp_abort(Scsi_Cmnd *SCpnt) { - DEBUG(0, __FUNCTION__ " SCpnt=0x%p\n", SCpnt); + DEBUG(0, "%s SCpnt=0x%p\n", __FUNCTION__, SCpnt); nsp_eh_bus_reset(SCpnt); @@ -1257,7 +1257,7 @@ static int nsp_eh_abort(Scsi_Cmnd *SCpnt) { - DEBUG(0, __FUNCTION__ " SCpnt=0x%p\n", SCpnt); + DEBUG(0, "%s SCpnt=0x%p\n", __FUNCTION__, SCpnt); nsp_eh_bus_reset(SCpnt); @@ -1266,7 +1266,7 @@ static int nsp_eh_device_reset(Scsi_Cmnd *SCpnt) { - DEBUG(0, __FUNCTION__ " SCpnt=0x%p\n", SCpnt); + DEBUG(0, "%s SCpnt=0x%p\n", __FUNCTION__, SCpnt); return FAILED; } @@ -1276,7 +1276,7 @@ unsigned int base = SCpnt->host->io_port; int i; - DEBUG(0, __FUNCTION__ "() SCpnt=0x%p base=0x%x\n", SCpnt, base); + DEBUG(0, "%s() SCpnt=0x%p base=0x%x\n", __FUNCTION__, SCpnt, base); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); @@ -1296,7 +1296,7 @@ { nsp_hw_data *data = &nsp_data; - DEBUG(0, __FUNCTION__ "\n"); + DEBUG(0, "%s\n", __FUNCTION__); nsphw_init(data); @@ -1331,7 +1331,7 @@ dev_link_t *link; int ret, i; - DEBUG(0, __FUNCTION__ "()\n"); + DEBUG(0, "%s()\n", __FUNCTION__); /* Create new SCSI device */ info = kmalloc(sizeof(*info), GFP_KERNEL); @@ -1398,7 +1398,7 @@ { dev_link_t **linkp; - DEBUG(0, __FUNCTION__ "(0x%p)\n", link); + DEBUG(0, "%s(0x%p)\n", __FUNCTION__, link); /* Locate device structure */ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next) { @@ -1455,7 +1455,7 @@ struct Scsi_Host *host; nsp_hw_data *data = &nsp_data; - DEBUG(0, __FUNCTION__ "() in\n"); + DEBUG(0, "%s() in\n", __FUNCTION__); tuple.DesiredTuple = CISTPL_CONFIG; tuple.Attributes = 0; @@ -1488,7 +1488,7 @@ break; } next_entry: - DEBUG(0, __FUNCTION__ " next\n"); + DEBUG(0, "%s next\n", __FUNCTION__); CS_CHECK(GetNextTuple, handle, &tuple); } @@ -1503,8 +1503,8 @@ data->NumAddress = link->io.NumPorts1; data->IrqNumber = link->irq.AssignedIRQ; - DEBUG(0, __FUNCTION__ " I/O[0x%x+0x%x] IRQ %d\n", - data->BaseAddress, data->NumAddress, data->IrqNumber); + DEBUG(0, "%s I/O[0x%x+0x%x] IRQ %d\n", + __FUNCTION__, data->BaseAddress, data->NumAddress, data->IrqNumber); if(nsphw_init(data) == FALSE) { goto cs_failed; @@ -1592,7 +1592,7 @@ { dev_link_t *link = (dev_link_t *)arg; - DEBUG(0, __FUNCTION__ "(0x%p)\n", link); + DEBUG(0, "%s(0x%p)\n", __FUNCTION__, link); /* * If the device is currently in use, we won't release until it @@ -1646,7 +1646,7 @@ dev_link_t *link = args->client_data; scsi_info_t *info = link->priv; - DEBUG(1, __FUNCTION__ "(0x%06x)\n", event); + DEBUG(1, "%s(0x%06x)\n", __FUNCTION__, event); switch (event) { case CS_EVENT_CARD_REMOVAL: @@ -1695,7 +1695,7 @@ DEBUG(0, " event: unknown\n"); break; } - DEBUG(0, __FUNCTION__ " end\n"); + DEBUG(0, "%s end\n", __FUNCTION__); return 0; } /* nsp_cs_event */ @@ -1706,7 +1706,7 @@ { servinfo_t serv; - DEBUG(0, __FUNCTION__ "() in\n"); + DEBUG(0, "%s() in\n", __FUNCTION__); DEBUG(0, "%s\n", version); CardServices(GetCardServicesInfo, &serv); if (serv.Revision != CS_RELEASE_CODE) { @@ -1716,14 +1716,14 @@ } register_pcmcia_driver(&dev_info, &nsp_cs_attach, &nsp_cs_detach); - DEBUG(0, __FUNCTION__ "() out\n"); + DEBUG(0, "%s() out\n", __FUNCTION__); return 0; } static void __exit nsp_cs_cleanup(void) { - DEBUG(0, __FUNCTION__ "() unloading\n"); + DEBUG(0, "%s() unloading\n", __FUNCTION__); unregister_pcmcia_driver(&dev_info); while (dev_list != NULL) { if (dev_list->state & DEV_CONFIG) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/pcmcia/nsp_message.c linux.22-ac2/drivers/scsi/pcmcia/nsp_message.c --- linux.vanilla/drivers/scsi/pcmcia/nsp_message.c 2001-10-11 17:04:57.000000000 +0100 +++ linux.22-ac2/drivers/scsi/pcmcia/nsp_message.c 2003-08-28 22:34:26.000000000 +0100 @@ -64,7 +64,7 @@ DEBUG(0, " msgout loop\n"); do { if (nsp_xfer(SCpnt, data, BUSPHASE_MESSAGE_OUT)) { - printk(KERN_DEBUG " " __FUNCTION__ " msgout: xfer short\n"); + printk(KERN_DEBUG " %s msgout: xfer short\n", __FUNCTION__); } /* catch a next signal */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/sata_via.c linux.22-ac2/drivers/scsi/sata_via.c --- linux.vanilla/drivers/scsi/sata_via.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/scsi/sata_via.c 2003-09-01 13:25:08.000000000 +0100 @@ -0,0 +1,254 @@ +/* + sata_via.c - VIA Serial ATA controllers + + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_via" +#define DRV_VERSION "0.10" + +enum { + via_sata = 0, +}; + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void svia_port_probe(struct ata_port *ap); +static void svia_port_disable(struct ata_port *ap); +static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); + +static unsigned int in_module_init = 1; + +static struct pci_device_id svia_pci_tbl[] = { + { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, via_sata }, + + { } /* terminate list */ +}; + +struct pci_driver svia_pci_driver = { + .name = DRV_NAME, + .id_table = svia_pci_tbl, + .probe = svia_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template svia_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, +}; + +struct ata_host_info svia_sata_ops = { + .port_probe = svia_port_probe, + .port_disable = svia_port_disable, + .set_piomode = svia_set_piomode, + .set_udmamode = svia_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + + .bmdma_start = ata_bmdma_start_pio, +}; + +struct ata_board svia_board_tbl[] = { + /* via_sata */ + { + .sht = &svia_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY + | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .host_info = &svia_sata_ops, + }, +}; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, svia_pci_tbl); + +/** + * svia_port_probe - + * @ap: + * + * LOCKING: + * + */ + +static void svia_port_probe(struct ata_port *ap) +{ + /* FIXME */ + + ata_port_probe(ap); +} + +/** + * svia_port_disable - + * @ap: + * + * LOCKING: + * + */ + +static void svia_port_disable(struct ata_port *ap) +{ + ata_port_disable(ap); + + /* FIXME */ +} + +/** + * svia_set_piomode - + * @ap: + * @adev: + * @pio: + * + * LOCKING: + * + */ + +static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + /* FIXME: needed? */ +} + +/** + * svia_set_udmamode - + * @ap: + * @adev: + * @udma: + * + * LOCKING: + * + */ + +static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + /* FIXME: needed? */ +} + +/** + * svia_init_one - + * @pdev: + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_board *boards[1]; + unsigned int n_boards = 1; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* no hotplugging support (FIXME) */ + if (!in_module_init) + return -ENODEV; + + boards[0] = &svia_board_tbl[ent->driver_data]; + + return ata_pci_init_one(pdev, boards, n_boards); +} + +/** + * svia_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init svia_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&svia_pci_driver); + if (rc) + return rc; + + in_module_init = 0; + + DPRINTK("scsi_register_host\n"); + rc = scsi_register_module(MODULE_SCSI_HA, &svia_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + DPRINTK("done\n"); + return 0; + +err_out: + pci_unregister_driver(&svia_pci_driver); + return rc; +} + +/** + * svia_exit - + * + * LOCKING: + * + */ + +static void __exit svia_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &svia_sht); + pci_unregister_driver(&svia_pci_driver); +} + +module_init(svia_init); +module_exit(svia_exit); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi.c linux.22-ac2/drivers/scsi/scsi.c --- linux.vanilla/drivers/scsi/scsi.c 2003-08-28 16:45:38.000000000 +0100 +++ linux.22-ac2/drivers/scsi/scsi.c 2003-07-06 18:18:19.000000000 +0100 @@ -353,8 +353,9 @@ int interruptable) { struct Scsi_Host *host; - Scsi_Cmnd *SCpnt = NULL; + Scsi_Cmnd *SCpnt; Scsi_Device *SDpnt; + struct list_head *lp; unsigned long flags; if (!device) @@ -365,7 +366,6 @@ spin_lock_irqsave(&device_request_lock, flags); while (1 == 1) { - SCpnt = NULL; if (!device->device_blocked) { if (device->single_lun) { /* @@ -405,26 +405,21 @@ * If asked to wait, we need to wait, otherwise * return NULL. */ - SCpnt = NULL; goto busy; } } /* - * Now we can check for a free command block for this device. + * Is there a free command block for this device? */ - for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) { - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - } + if (!list_empty(&device->sdev_free_q)) + goto found; } + /* - * If we couldn't find a free command block, and we have been + * Couldn't find a free command block, and we have been * asked to wait, then do so. */ - if (SCpnt) { - break; - } - busy: +busy: /* * If we have been asked to wait for a free block, then * wait here. @@ -476,12 +471,20 @@ return NULL; } } + continue; } else { spin_unlock_irqrestore(&device_request_lock, flags); return NULL; } } +found: + lp = device->sdev_free_q.next; + list_del(lp); + SCpnt = list_entry(lp, Scsi_Cmnd, sc_list); + if (SCpnt->request.rq_status != RQ_INACTIVE) + BUG(); + SCpnt->request.rq_status = RQ_SCSI_BUSY; SCpnt->request.waiting = NULL; /* And no one is waiting for this * to complete */ @@ -527,6 +530,9 @@ SDpnt = SCpnt->device; + /* command is now free - add to list */ + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -1334,14 +1340,10 @@ */ int scsi_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; + /* + * Restore the SCSI command state. + */ + scsi_setup_cmd_retry(SCpnt); /* * Zero the sense information from the last time we tried @@ -1449,6 +1451,7 @@ spin_lock_irqsave(&device_request_lock, flags); for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) { SDpnt->device_queue = SCnext = SCpnt->next; + list_del(&SCpnt->sc_list); kfree((char *) SCpnt); } SDpnt->has_cmdblocks = 0; @@ -1485,6 +1488,7 @@ SDpnt->queue_depth = 1; /* live to fight another day */ } SDpnt->device_queue = NULL; + INIT_LIST_HEAD(&SDpnt->sdev_free_q); for (j = 0; j < SDpnt->queue_depth; j++) { SCpnt = (Scsi_Cmnd *) @@ -1514,6 +1518,7 @@ SDpnt->device_queue = SCpnt; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); } if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */ printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n", diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi_debug.c linux.22-ac2/drivers/scsi/scsi_debug.c --- linux.vanilla/drivers/scsi/scsi_debug.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/scsi/scsi_debug.c 2003-07-07 15:53:33.000000000 +0100 @@ -578,7 +578,7 @@ pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; msense_6 = (MODE_SENSE == cmd[0]); - alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[6]); + alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); /* printk(KERN_INFO "msense: dbd=%d pcontrol=%d pcode=%d " "msense_6=%d alloc_len=%d\n", dbd, pcontrol, pcode, " "msense_6, alloc_len); */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi_error.c linux.22-ac2/drivers/scsi/scsi_error.c --- linux.vanilla/drivers/scsi/scsi_error.c 2002-11-29 21:27:18.000000000 +0000 +++ linux.22-ac2/drivers/scsi/scsi_error.c 2003-06-29 16:10:05.000000000 +0100 @@ -385,16 +385,10 @@ */ STATIC int scsi_eh_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; - - scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + do { + scsi_setup_cmd_retry(SCpnt); + scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + } while (SCpnt->eh_state == NEEDS_RETRY); /* * Hey, we are done. Let's look to see what happened. @@ -425,12 +419,6 @@ ASSERT_LOCK(&io_request_lock, 0); - memcpy((void *) SCpnt->cmnd, (void *) generic_sense, - sizeof(generic_sense)); - - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; - scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma) ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA); @@ -438,24 +426,40 @@ printk("cannot allocate scsi_result in scsi_request_sense.\n"); return FAILED; } - /* - * Zero the sense buffer. Some host adapters automatically always request - * sense, so it is not a good idea that SCpnt->request_buffer and - * SCpnt->sense_buffer point to the same address (DB). - * 0 is not a valid sense code. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); - memset((void *) scsi_result, 0, 256); saved_result = SCpnt->result; - SCpnt->request_buffer = scsi_result; - SCpnt->request_bufflen = 256; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->sc_data_direction = SCSI_DATA_READ; - SCpnt->underflow = 0; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + do { + memcpy((void *) SCpnt->cmnd, (void *) generic_sense, + sizeof(generic_sense)); + + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; + + /* + * Zero the sense buffer. Some host adapters automatically + * always request sense, so it is not a good idea that + * SCpnt->request_buffer and SCpnt->sense_buffer point to + * the same address (DB). 0 is not a valid sense code. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); + memset((void *) scsi_result, 0, 256); + + SCpnt->request_buffer = scsi_result; + SCpnt->request_bufflen = 256; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->sc_data_direction = SCSI_DATA_READ; + SCpnt->underflow = 0; + + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* Last chance to have valid sense data */ if (!scsi_sense_valid(SCpnt)) @@ -497,26 +501,34 @@ static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; - memcpy((void *) SCpnt->cmnd, (void *) tur_command, - sizeof(tur_command)); + do { + memcpy((void *) SCpnt->cmnd, (void *) tur_command, + sizeof(tur_command)); - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; - /* - * Zero the sense buffer. The SCSI spec mandates that any - * untransferred sense data should be interpreted as being zero. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); + /* + * Zero the sense buffer. The SCSI spec mandates that any + * untransferred sense data should be interpreted as being zero. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); - SCpnt->request_buffer = NULL; - SCpnt->request_bufflen = 0; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->underflow = 0; - SCpnt->sc_data_direction = SCSI_DATA_NONE; + SCpnt->request_buffer = NULL; + SCpnt->request_bufflen = 0; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->underflow = 0; + SCpnt->sc_data_direction = SCSI_DATA_NONE; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* * When we eventually call scsi_finish, we really wish to complete @@ -589,7 +601,6 @@ host = SCpnt->host; - retry: /* * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. @@ -672,14 +683,13 @@ SCSI_LOG_ERROR_RECOVERY(3, printk("scsi_send_eh_cmnd: scsi_eh_completed_normally %x\n", ret)); switch (ret) { - case SUCCESS: - SCpnt->eh_state = SUCCESS; - break; - case NEEDS_RETRY: - goto retry; - case FAILED: default: - SCpnt->eh_state = FAILED; + ret = FAILED; + /*FALLTHROUGH*/ + case FAILED: + case NEEDS_RETRY: + case SUCCESS: + SCpnt->eh_state = ret; break; } } else { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi.h linux.22-ac2/drivers/scsi/scsi.h --- linux.vanilla/drivers/scsi/scsi.h 2003-08-28 16:45:38.000000000 +0100 +++ linux.22-ac2/drivers/scsi/scsi.h 2003-09-09 22:27:29.000000000 +0100 @@ -465,6 +465,7 @@ int sectors); extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *); extern int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt); +extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt); extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int); extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, int block_sectors); @@ -558,6 +559,7 @@ int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ + struct list_head sdev_free_q; /* list of free cmds */ /* public: */ unsigned int id, lun, channel; @@ -776,6 +778,8 @@ * received on original command * (auto-sense) */ + struct list_head sc_list; /* Inactive cmd list linkage, guarded + * by device_request_lock. */ unsigned flags; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi_lib.c linux.22-ac2/drivers/scsi/scsi_lib.c --- linux.vanilla/drivers/scsi/scsi_lib.c 2003-08-28 16:45:38.000000000 +0100 +++ linux.22-ac2/drivers/scsi/scsi_lib.c 2003-07-06 18:18:19.000000000 +0100 @@ -208,6 +208,30 @@ } /* + * Function: scsi_setup_cmd_retry() + * + * Purpose: Restore the command state for a retry + * + * Arguments: SCpnt - command to be restored + * + * Returns: Nothing + * + * Notes: Immediately prior to retrying a command, we need + * to restore certain fields that we saved above. + */ +void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) +{ + memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, + sizeof(SCpnt->data_cmnd)); + SCpnt->request_buffer = SCpnt->buffer; + SCpnt->request_bufflen = SCpnt->bufflen; + SCpnt->use_sg = SCpnt->old_use_sg; + SCpnt->cmd_len = SCpnt->old_cmd_len; + SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; + SCpnt->underflow = SCpnt->old_underflow; +} + +/* * Function: scsi_queue_next_request() * * Purpose: Handle post-processing of completed commands. @@ -723,7 +747,7 @@ printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", SCpnt->host->host_no, (int) SCpnt->channel, (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); + print_command(SCpnt->data_cmnd); print_sense("sd", SCpnt); SCpnt = scsi_end_request(SCpnt, 0, block_sectors); return; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/scsi_scan.c linux.22-ac2/drivers/scsi/scsi_scan.c --- linux.vanilla/drivers/scsi/scsi_scan.c 2003-08-28 16:45:38.000000000 +0100 +++ linux.22-ac2/drivers/scsi/scsi_scan.c 2003-07-09 12:51:32.000000000 +0100 @@ -147,7 +147,7 @@ {"EMULEX", "MD21/S2 ESDI", "*", BLIST_SINGLELUN}, {"CANON", "IPUBJD", "*", BLIST_SPARSELUN}, {"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN}, - {"DEC","HSG80","*", BLIST_FORCELUN | BLIST_NOSTARTONADD}, + {"DEC","HSG80","*", BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_NOSTARTONADD}, {"COMPAQ","LOGICAL VOLUME","*", BLIST_FORCELUN}, {"COMPAQ","CR3500","*", BLIST_FORCELUN}, {"NEC", "PD-1 ODX654P", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, @@ -204,6 +204,7 @@ {"HP", "C7200", "*", BLIST_SPARSELUN}, /* Medium Changer */ {"SMSC", "USB 2 HS", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"SUN", "StorEdge", "*", BLIST_SPARSELUN}, /* * Must be at end of list... @@ -363,6 +364,7 @@ SDpnt->queue_depth = 1; SDpnt->host = shpnt; SDpnt->online = TRUE; + SDpnt->access_count = 1; initialize_merge_fn(SDpnt); @@ -618,6 +620,7 @@ } else { /* assume no peripheral if any other sort of error */ scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; } } @@ -641,6 +644,7 @@ */ if ((scsi_result[0] >> 5) == 3) { scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; /* assume no peripheral if any sort of error */ } /* The Toshiba ROM was "gender-changed" here as an inline hack. @@ -731,7 +735,6 @@ */ SDpnt->disconnect = 0; - /* * Set the tagged_queue flag for SCSI-II devices that purport to support * tagged queuing in the INQUIRY data. @@ -809,6 +812,7 @@ SDpnt->host = shpnt; SDpnt->online = TRUE; SDpnt->scsi_level = scsi_level; + SDpnt->access_count = 1; /* * Register the queue for the device. All I/O requests will come diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/scsi/sd.h linux.22-ac2/drivers/scsi/sd.h --- linux.vanilla/drivers/scsi/sd.h 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/scsi/sd.h 2003-09-09 22:27:29.000000000 +0100 @@ -24,13 +24,13 @@ #endif typedef struct scsi_disk { - unsigned capacity; /* size in blocks */ Scsi_Device *device; - unsigned char ready; /* flag ready for FLOPTICAL */ - unsigned char write_prot; /* flag write_protect for rmvable dev */ - unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ - unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ + unsigned capacity; /* size in blocks */ + unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ + unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ unsigned has_part_table:1; /* has partition table */ + unsigned ready:1; /* flag ready for FLOPTICAL */ + unsigned write_prot:1; /* flag write_protect for rmvable dev */ } Scsi_Disk; extern int revalidate_scsidisk(kdev_t dev, int maxusage); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/ac97_plugin_wm97xx.c linux.22-ac2/drivers/sound/ac97_plugin_wm97xx.c --- linux.vanilla/drivers/sound/ac97_plugin_wm97xx.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/sound/ac97_plugin_wm97xx.c 2003-07-31 14:00:07.000000000 +0100 @@ -0,0 +1,1408 @@ +/* + * ac97_plugin_wm97xx.c -- Touch screen driver for Wolfson WM9705 and WM9712 + * AC97 Codecs. + * + * Copyright 2003 Wolfson Microelectronics PLC. + * Author: Liam Girdwood + * liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Notes: + * + * Features: + * - supports WM9705, WM9712 + * - polling mode + * - coordinate polling + * - adjustable rpu/dpp settings + * - adjustable pressure current + * - adjustable sample settle delay + * - 4 and 5 wire touchscreens (5 wire is WM9712 only) + * - pen down detection + * - battery monitor + * - sample AUX adc's + * - power management + * - direct AC97 IO from userspace (#define WM97XX_TS_DEBUG) + * + * TODO: + * - continuous mode + * - adjustable sample rate + * - AUX adc in coordinate / continous modes + * - Official device identifier or misc device ? + * + * Revision history + * 7th May 2003 Initial version. + * 6th June 2003 Added non module support and AC97 registration. + * 18th June 2003 Added AUX adc sampling. + * 23rd June 2003 Did some minimal reformatting, fixed a couple of + * locking bugs and noted a race to fix. + * 24th June 2003 Added power management and fixed race condition. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* WM97xx registers and bits */ +#include /* get_user,copy_to_user */ +#include + +#define TS_NAME "ac97_plugin_wm97xx" +#define TS_MINOR 16 +#define WM_TS_VERSION "0.6" +#define AC97_NUM_REG 64 + + +/* + * Debug + */ + +#define PFX TS_NAME +#define WM97XX_TS_DEBUG 0 + +#ifdef WM97XX_TS_DEBUG +#define dbg(format, arg...) printk(KERN_DEBUG PFX ": " format "\n" , ## arg) +#else +#define dbg(format, arg...) do {} while (0) +#endif +#define err(format, arg...) printk(KERN_ERR PFX ": " format "\n" , ## arg) +#define info(format, arg...) printk(KERN_INFO PFX ": " format "\n" , ## arg) +#define warn(format, arg...) printk(KERN_WARNING PFX ": " format "\n" , ## arg) + +/* + * Module parameters + */ + + +/* + * Set the codec sample mode. + * + * The WM9712 can sample touchscreen data in 3 different operating + * modes. i.e. polling, coordinate and continous. + * + * Polling:- The driver polls the codec and issues 3 seperate commands + * over the AC97 link to read X,Y and pressure. + * + * Coordinate: - The driver polls the codec and only issues 1 command over + * the AC97 link to read X,Y and pressure. This mode has + * strict timing requirements and may drop samples if + * interrupted. However, it is less demanding on the AC97 + * link. Note: this mode requires a larger delay than polling + * mode. + * + * Continuous:- The codec automatically samples X,Y and pressure and then + * sends the data over the AC97 link in slots. This is the + * same method used by the codec when recording audio. + * + * Set mode = 0 for polling, 1 for coordinate and 2 for continuous. + * + */ +MODULE_PARM(mode,"i"); +MODULE_PARM_DESC(mode, "Set WM97XX operation mode"); +static int mode = 0; + +/* + * WM9712 - Set internal pull up for pen detect. + * + * Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive) + * i.e. pull up resistance = 64k Ohms / rpu. + * + * Adjust this value if you are having problems with pen detect not + * detecting any down events. + */ +MODULE_PARM(rpu,"i"); +MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); +static int rpu = 0; + +/* + * WM9705 - Pen detect comparator threshold. + * + * 0 to Vmid in 15 steps, 0 = use zero power comparator with Vmid threshold + * i.e. 1 = Vmid/15 threshold + * 15 = Vmid/1 threshold + * + * Adjust this value if you are having problems with pen detect not + * detecting any down events. + */ +MODULE_PARM(pdd,"i"); +MODULE_PARM_DESC(pdd, "Set pen detect comparator threshold"); +static int pdd = 0; + +/* + * Set current used for pressure measurement. + * + * Set pil = 2 to use 400uA + * pil = 1 to use 200uA and + * pil = 0 to disable pressure measurement. + * + * This is used to increase the range of values returned by the adc + * when measureing touchpanel pressure. + */ +MODULE_PARM(pil,"i"); +MODULE_PARM_DESC(pil, "Set current used for pressure measurement."); +static int pil = 0; + +/* + * WM9712 - Set five_wire = 1 to use a 5 wire touchscreen. + * + * NOTE: Five wire mode does not allow for readback of pressure. + */ +MODULE_PARM(five_wire,"i"); +MODULE_PARM_DESC(five_wire, "Set 5 wire touchscreen."); +static int five_wire = 0; + +/* + * Set adc sample delay. + * + * For accurate touchpanel measurements, some settling time may be + * required between the switch matrix applying a voltage across the + * touchpanel plate and the ADC sampling the signal. + * + * This delay can be set by setting delay = n, where n is the array + * position of the delay in the array delay_table below. + * Long delays > 1ms are supported for completeness, but are not + * recommended. + */ +MODULE_PARM(delay,"i"); +MODULE_PARM_DESC(delay, "Set adc sample delay."); +static int delay = 4; + + +/* +++++++++++++ Lifted from include/linux/h3600_ts.h ++++++++++++++*/ +typedef struct { + unsigned short pressure; // touch pressure + unsigned short x; // calibrated X + unsigned short y; // calibrated Y + unsigned short millisecs; // timestamp of this event +} TS_EVENT; + +typedef struct { + int xscale; + int xtrans; + int yscale; + int ytrans; + int xyswap; +} TS_CAL; + +/* Use 'f' as magic number */ +#define IOC_MAGIC 'f' + +#define TS_GET_RATE _IO(IOC_MAGIC, 8) +#define TS_SET_RATE _IO(IOC_MAGIC, 9) +#define TS_GET_CAL _IOR(IOC_MAGIC, 10, TS_CAL) +#define TS_SET_CAL _IOW(IOC_MAGIC, 11, TS_CAL) + +/* +++++++++++++ Done lifted from include/linux/h3600_ts.h +++++++++*/ + +#define TS_GET_COMP1 _IOR(IOC_MAGIC, 12, short) +#define TS_GET_COMP2 _IOR(IOC_MAGIC, 13, short) +#define TS_GET_BMON _IOR(IOC_MAGIC, 14, short) +#define TS_GET_WIPER _IOR(IOC_MAGIC, 15, short) + +#ifdef WM97XX_TS_DEBUG +/* debug get/set ac97 codec register ioctl's */ +#define TS_GET_AC97_REG _IOR(IOC_MAGIC, 20, short) +#define TS_SET_AC97_REG _IOW(IOC_MAGIC, 21, short) +#define TS_SET_AC97_INDEX _IOW(IOC_MAGIC, 22, short) +#endif + +#define EVENT_BUFSIZE 128 + +typedef struct { + TS_CAL cal; /* Calibration values */ + TS_EVENT event_buf[EVENT_BUFSIZE];/* The event queue */ + int nextIn, nextOut; + int event_count; + int is_wm9712:1; /* are we a WM912 or a WM9705 */ + int is_registered:1; /* Is the driver AC97 registered */ + int line_pgal:5; + int line_pgar:5; + int phone_pga:5; + int mic_pgal:5; + int mic_pgar:5; + int overruns; /* event buffer overruns */ + int adc_errs; /* sample read back errors */ +#ifdef WM97XX_TS_DEBUG + short ac97_index; +#endif + struct fasync_struct *fasync; /* asynch notification */ + struct timer_list acq_timer; /* Timer for triggering acquisitions */ + wait_queue_head_t wait; /* read wait queue */ + spinlock_t lock; + struct ac97_codec *codec; + struct proc_dir_entry *wm97xx_ts_ps; +#ifdef WM97XX_TS_DEBUG + struct proc_dir_entry *wm97xx_debug_ts_ps; +#endif + struct pm_dev * pm; +} wm97xx_ts_t; + +static inline void poll_delay (void); +static int __init wm97xx_ts_init_module(void); +static int wm97xx_poll_read_adc (wm97xx_ts_t* ts, u16 adcsel, u16* sample); +static int wm97xx_coord_read_adc (wm97xx_ts_t* ts, u16* x, u16* y, + u16* pressure); +static inline int pendown (wm97xx_ts_t *ts); +static void wm97xx_acq_timer(unsigned long data); +static int wm97xx_fasync(int fd, struct file *filp, int mode); +static int wm97xx_ioctl(struct inode * inode, struct file *filp, + unsigned int cmd, unsigned long arg); +static unsigned int wm97xx_poll(struct file * filp, poll_table * wait); +static ssize_t wm97xx_read(struct file * filp, char * buf, size_t count, + loff_t * l); +static int wm97xx_open(struct inode * inode, struct file * filp); +static int wm97xx_release(struct inode * inode, struct file * filp); +static void init_wm97xx_phy(void); +static int adc_get (wm97xx_ts_t *ts, unsigned short *value, int id); +static int wm97xx_probe(struct ac97_codec *codec, struct ac97_driver *driver); +static void wm97xx_remove(struct ac97_codec *codec, struct ac97_driver *driver); +static void wm97xx_ts_cleanup_module(void); +static int wm97xx_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data); +static void wm97xx_suspend(void); +static void wm97xx_resume(void); +static void wm9712_pga_save(wm97xx_ts_t* ts); +static void wm9712_pga_restore(wm97xx_ts_t* ts); + +/* AC97 registration info */ +static struct ac97_driver wm9705_driver = { + codec_id: 0x574D4C05, + codec_mask: 0xFFFFFFFF, + name: "Wolfson WM9705 Touchscreen/BMON", + probe: wm97xx_probe, + remove: __devexit_p(wm97xx_remove), +}; + +static struct ac97_driver wm9712_driver = { + codec_id: 0x574D4C12, + codec_mask: 0xFFFFFFFF, + name: "Wolfson WM9712 Touchscreen/BMON", + probe: wm97xx_probe, + remove: __devexit_p(wm97xx_remove), +}; + +/* we only support a single touchscreen */ +static wm97xx_ts_t wm97xx_ts; + +/* + * ADC sample delay times in uS + */ +static const int delay_table[16] = { + 21, // 1 AC97 Link frames + 42, // 2 + 84, // 4 + 167, // 8 + 333, // 16 + 667, // 32 + 1000, // 48 + 1333, // 64 + 2000, // 96 + 2667, // 128 + 3333, // 160 + 4000, // 192 + 4667, // 224 + 5333, // 256 + 6000, // 288 + 0 // No delay, switch matrix always on +}; + +/* + * Delay after issuing a POLL command. + * + * The delay is 3 AC97 link frames + the touchpanel settling delay + */ + +static inline void poll_delay(void) +{ + int pdelay = 3 * AC97_LINK_FRAME + delay_table[delay]; + udelay (pdelay); +} + + +/* + * sample the auxillary ADC's + */ + +static int adc_get(wm97xx_ts_t* ts, unsigned short * value, int id) +{ + short adcsel = 0; + + /* first find out our adcsel flag */ + if (ts->is_wm9712) { + switch (id) { + case TS_COMP1: + adcsel = WM9712_ADCSEL_COMP1; + break; + case TS_COMP2: + adcsel = WM9712_ADCSEL_COMP2; + break; + case TS_BMON: + adcsel = WM9712_ADCSEL_BMON; + break; + case TS_WIPER: + adcsel = WM9712_ADCSEL_WIPER; + break; + } + } else { + switch (id) { + case TS_COMP1: + adcsel = WM9705_ADCSEL_PCBEEP; + break; + case TS_COMP2: + adcsel = WM9705_ADCSEL_PHONE; + break; + case TS_BMON: + adcsel = WM9705_ADCSEL_BMON; + break; + case TS_WIPER: + adcsel = WM9705_ADCSEL_AUX; + break; + } + } + + /* now sample the adc */ + if (mode == 1) { + /* coordinate mode - not currently available (TODO) */ + return 0; + } + else + { + /* polling mode */ + if (!wm97xx_poll_read_adc(ts, adcsel, value)) + return 0; + } + + return 1; +} + + +/* + * Read a sample from the adc in polling mode. + */ +static int wm97xx_poll_read_adc (wm97xx_ts_t* ts, u16 adcsel, u16* sample) +{ + u16 dig1; + int timeout = 5 * delay; + + /* set up digitiser */ + dig1 = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER1); + dig1&=0x0fff; + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER1, dig1 | adcsel | + WM97XX_POLL); + + /* wait 3 AC97 time slots + delay for conversion */ + poll_delay(); + + /* wait for POLL to go low */ + while ((ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER1) & WM97XX_POLL) && timeout) { + udelay(AC97_LINK_FRAME); + timeout--; + } + if (timeout > 0) + *sample = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD); + else { + ts->adc_errs++; + err ("adc sample timeout"); + return 0; + } + + /* check we have correct sample */ + if ((*sample & 0x7000) != adcsel ) { + err ("adc wrong sample, read %x got %x", adcsel, *sample & 0x7000); + return 0; + } + return 1; +} + +/* + * Read a sample from the adc in coordinate mode. + */ +static int wm97xx_coord_read_adc(wm97xx_ts_t* ts, u16* x, u16* y, u16* pressure) +{ + u16 dig1; + int timeout = 5 * delay; + + /* set up digitiser */ + dig1 = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER1); + dig1&=0x0fff; + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER1, dig1 | WM97XX_ADCSEL_PRES | + WM97XX_POLL); + + /* wait 3 AC97 time slots + delay for conversion */ + poll_delay(); + + /* read X then wait for 1 AC97 link frame + settling delay */ + *x = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD); + udelay (AC97_LINK_FRAME + delay_table[delay]); + + /* read Y */ + *y = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD); + + /* wait for POLL to go low and then read pressure */ + while ((ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)&& timeout) { + udelay(AC97_LINK_FRAME); + timeout--; + } + if (timeout > 0) + *pressure = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD); + else { + ts->adc_errs++; + err ("adc sample timeout"); + return 0; + } + + /* check we have correct samples */ + if (((*x & 0x7000) == 0x1000) && ((*y & 0x7000) == 0x2000) && + ((*pressure & 0x7000) == 0x3000)) { + return 1; + } else { + ts->adc_errs++; + err ("adc got wrong samples, got x 0x%x y 0x%x pressure 0x%x", *x, *y, *pressure); + return 0; + } +} + +/* + * Is the pen down ? + */ +static inline int pendown (wm97xx_ts_t *ts) +{ + return ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD) & WM97XX_PEN_DOWN; +} + +/* + * X,Y coordinates and pressure aquisition function. + * This function is run by a kernel timer and it's frequency between + * calls is the touchscreen polling rate; + */ + +static void wm97xx_acq_timer(unsigned long data) +{ + wm97xx_ts_t* ts = (wm97xx_ts_t*)data; + unsigned long flags; + long x,y; + TS_EVENT event; + + spin_lock_irqsave(&ts->lock, flags); + + /* are we still registered ? */ + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return; /* we better stop then */ + } + + /* read coordinates if pen is down */ + if (!pendown(ts)) + goto acq_exit; + + if (mode == 1) { + /* coordinate mode */ + if (!wm97xx_coord_read_adc(ts, (u16*)&x, (u16*)&y, &event.pressure)) + goto acq_exit; + } else + { + /* polling mode */ + if (!wm97xx_poll_read_adc(ts, WM97XX_ADCSEL_X, (u16*)&x)) + goto acq_exit; + if (!wm97xx_poll_read_adc(ts, WM97XX_ADCSEL_Y, (u16*)&y)) + goto acq_exit; + + /* only read pressure if we have to */ + if (!five_wire && pil) { + if (!wm97xx_poll_read_adc(ts, WM97XX_ADCSEL_PRES, &event.pressure)) + goto acq_exit; + } + else + event.pressure = 0; + } + /* timestamp this new event. */ + event.millisecs = jiffies; + + /* calibrate and remove unwanted bits from samples */ + event.pressure &= 0x0fff; + + x &= 0x00000fff; + x = ((ts->cal.xscale * x) >> 8) + ts->cal.xtrans; + event.x = (u16)x; + + y &= 0x00000fff; + y = ((ts->cal.yscale * y) >> 8) + ts->cal.ytrans; + event.y = (u16)y; + + /* add this event to the event queue */ + ts->event_buf[ts->nextIn++] = event; + if (ts->nextIn == EVENT_BUFSIZE) + ts->nextIn = 0; + if (ts->event_count < EVENT_BUFSIZE) { + ts->event_count++; + } else { + /* throw out the oldest event */ + if (++ts->nextOut == EVENT_BUFSIZE) { + ts->nextOut = 0; + ts->overruns++; + } + } + + /* async notify */ + if (ts->fasync) + kill_fasync(&ts->fasync, SIGIO, POLL_IN); + /* wake up any read call */ + if (waitqueue_active(&ts->wait)) + wake_up_interruptible(&ts->wait); + + /* schedule next acquire */ +acq_exit: + ts->acq_timer.expires = jiffies + HZ / 100; + add_timer(&ts->acq_timer); + + spin_unlock_irqrestore(&ts->lock, flags); +} + + +/* +++++++++++++ File operations ++++++++++++++*/ + +static int wm97xx_fasync(int fd, struct file *filp, int mode) +{ + wm97xx_ts_t* ts = (wm97xx_ts_t*)filp->private_data; + return fasync_helper(fd, filp, mode, &ts->fasync); +} + +static int wm97xx_ioctl(struct inode * inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + unsigned short adc_value; +#ifdef WM97XX_TS_DEBUG + short data; +#endif + wm97xx_ts_t* ts = (wm97xx_ts_t*)filp->private_data; + + switch(cmd) { + case TS_GET_RATE: /* TODO: what is this? */ + break; + case TS_SET_RATE: /* TODO: what is this? */ + break; + case TS_GET_CAL: + if(copy_to_user((char *)arg, (char *)&ts->cal, sizeof(TS_CAL))) + return -EFAULT; + break; + case TS_SET_CAL: + if(copy_from_user((char *)&ts->cal, (char *)arg, sizeof(TS_CAL))) + return -EFAULT; + break; + case TS_GET_COMP1: + if (adc_get(ts, &adc_value, TS_COMP1)) { + if(copy_to_user((char *)arg, (char *)&adc_value, sizeof(adc_value))) + return -EFAULT; + } + else + return -EIO; + break; + case TS_GET_COMP2: + if (adc_get(ts, &adc_value, TS_COMP2)) { + if(copy_to_user((char *)arg, (char *)&adc_value, sizeof(adc_value))) + return -EFAULT; + } + else + return -EIO; + break; + case TS_GET_BMON: + if (adc_get(ts, &adc_value, TS_BMON)) { + if(copy_to_user((char *)arg, (char *)&adc_value, sizeof(adc_value))) + return -EFAULT; + } + else + return -EIO; + break; + case TS_GET_WIPER: + if (adc_get(ts, &adc_value, TS_WIPER)) { + if(copy_to_user((char *)arg, (char *)&adc_value, sizeof(adc_value))) + return -EFAULT; + } + else + return -EIO; + break; +#ifdef WM97XX_TS_DEBUG + /* debug get/set ac97 codec register ioctl's + * + * This is direct IO to the codec registers - BE CAREFULL + */ + case TS_GET_AC97_REG: /* read from ac97 reg (index) */ + data = ts->codec->codec_read(ts->codec, ts->ac97_index); + if(copy_to_user((char *)arg, (char *)&data, sizeof(data))) + return -EFAULT; + break; + case TS_SET_AC97_REG: /* write to ac97 reg (index) */ + if(copy_from_user((char *)&data, (char *)arg, sizeof(data))) + return -EFAULT; + ts->codec->codec_write(ts->codec, ts->ac97_index, data); + break; + case TS_SET_AC97_INDEX: /* set ac97 reg index */ + if(copy_from_user((char *)&ts->ac97_index, (char *)arg, sizeof(ts->ac97_index))) + return -EFAULT; + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +static unsigned int wm97xx_poll(struct file * filp, poll_table * wait) +{ + wm97xx_ts_t *ts = (wm97xx_ts_t *)filp->private_data; + poll_wait(filp, &ts->wait, wait); + if (ts->event_count) + return POLLIN | POLLRDNORM; + return 0; +} + +static ssize_t wm97xx_read(struct file *filp, char *buf, size_t count, loff_t *l) +{ + wm97xx_ts_t* ts = (wm97xx_ts_t*)filp->private_data; + unsigned long flags; + TS_EVENT event; + int i; + + /* are we still registered with AC97 layer ? */ + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return -ENXIO; + } + + if (ts->event_count == 0) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + spin_unlock_irqrestore(&ts->lock, flags); + + wait_event_interruptible(ts->wait, ts->event_count != 0); + + /* are we still registered after sleep ? */ + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return -ENXIO; + } + if (signal_pending(current)) + return -ERESTARTSYS; + } + + for (i = count; i >= sizeof(TS_EVENT); + i -= sizeof(TS_EVENT), buf += sizeof(TS_EVENT)) { + if (ts->event_count == 0) + break; + spin_lock_irqsave(&ts->lock, flags); + event = ts->event_buf[ts->nextOut++]; + if (ts->nextOut == EVENT_BUFSIZE) + ts->nextOut = 0; + if (ts->event_count) + ts->event_count--; + spin_unlock_irqrestore(&ts->lock, flags); + if(copy_to_user(buf, &event, sizeof(TS_EVENT))) + return i != count ? count - i : -EFAULT; + } + return count - i; +} + + +static int wm97xx_open(struct inode * inode, struct file * filp) +{ + wm97xx_ts_t* ts; + unsigned long flags; + u16 val; + int minor = MINOR(inode->i_rdev); + + if (minor != TS_MINOR) + return -ENODEV; + + filp->private_data = ts = &wm97xx_ts; + + spin_lock_irqsave(&ts->lock, flags); + + /* are we registered with AC97 layer ? */ + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return -ENXIO; + } + + /* start digitiser */ + val = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER2); + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER2, + val | WM97XX_PRP_DET_DIG); + + /* flush event queue */ + ts->nextIn = ts->nextOut = ts->event_count = 0; + + /* Set up timer. */ + init_timer(&ts->acq_timer); + ts->acq_timer.function = wm97xx_acq_timer; + ts->acq_timer.data = (unsigned long)ts; + ts->acq_timer.expires = jiffies + HZ / 100; + add_timer(&ts->acq_timer); + + spin_unlock_irqrestore(&ts->lock, flags); + return 0; +} + +static int wm97xx_release(struct inode * inode, struct file * filp) +{ + wm97xx_ts_t* ts = (wm97xx_ts_t*)filp->private_data; + unsigned long flags; + u16 val; + + wm97xx_fasync(-1, filp, 0); + del_timer_sync(&ts->acq_timer); + + spin_lock_irqsave(&ts->lock, flags); + + /* stop digitiser */ + val = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER2); + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER2, + val & ~WM97XX_PRP_DET_DIG); + + spin_unlock_irqrestore(&ts->lock, flags); + return 0; +} + +static struct file_operations ts_fops = { + owner: THIS_MODULE, + read: wm97xx_read, + poll: wm97xx_poll, + ioctl: wm97xx_ioctl, + fasync: wm97xx_fasync, + open: wm97xx_open, + release: wm97xx_release, +}; + +/* +++++++++++++ End File operations ++++++++++++++*/ + +#ifdef CONFIG_PROC_FS +static int wm97xx_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0, prpu; + u16 dig1, dig2, digrd, adcsel, adcsrc, slt, prp, rev; + unsigned long flags; + char srev = ' '; + + wm97xx_ts_t* ts; + + if ((ts = data) == NULL) + return -ENODEV; + + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + len += sprintf (page+len, "No device registered\n"); + return len; + } + + dig1 = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER1); + dig2 = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER2); + digrd = ts->codec->codec_read(ts->codec, AC97_WM97XX_DIGITISER_RD); + rev = (ts->codec->codec_read(ts->codec, AC97_WM9712_REV) & 0x000c) >> 2; + + spin_unlock_irqrestore(&ts->lock, flags); + + adcsel = dig1 & 0x7000; + adcsrc = digrd & 0x7000; + slt = (dig1 & 0x7) + 5; + prp = dig2 & 0xc000; + prpu = dig2 & 0x003f; + + /* driver version */ + len += sprintf (page+len, "Wolfson WM97xx Version %s\n", WM_TS_VERSION); + + /* what we are using */ + len += sprintf (page+len, "Using %s", ts->is_wm9712 ? "WM9712" : "WM9705"); + if (ts->is_wm9712) { + switch (rev) { + case 0x0: + srev = 'A'; + break; + case 0x1: + srev = 'B'; + break; + case 0x2: + srev = 'D'; + break; + case 0x3: + srev = 'E'; + break; + } + len += sprintf (page+len, " silicon rev %c\n",srev); + } else + len += sprintf (page+len, "\n"); + + /* WM97xx settings */ + len += sprintf (page+len, "Settings :\n%s%s%s%s", + dig1 & WM97XX_POLL ? " -sampling adc data(poll)\n" : "", + adcsel == WM97XX_ADCSEL_X ? " -adc set to X coordinate\n" : "", + adcsel == WM97XX_ADCSEL_Y ? " -adc set to Y coordinate\n" : "", + adcsel == WM97XX_ADCSEL_PRES ? " -adc set to pressure\n" : ""); + if (ts->is_wm9712) { + len += sprintf (page+len, "%s%s%s%s", + adcsel == WM9712_ADCSEL_COMP1 ? " -adc set to COMP1/AUX1\n" : "", + adcsel == WM9712_ADCSEL_COMP2 ? " -adc set to COMP2/AUX2\n" : "", + adcsel == WM9712_ADCSEL_BMON ? " -adc set to BMON\n" : "", + adcsel == WM9712_ADCSEL_WIPER ? " -adc set to WIPER\n" : ""); + } else { + len += sprintf (page+len, "%s%s%s%s", + adcsel == WM9705_ADCSEL_PCBEEP ? " -adc set to PCBEEP\n" : "", + adcsel == WM9705_ADCSEL_PHONE ? " -adc set to PHONE\n" : "", + adcsel == WM9705_ADCSEL_BMON ? " -adc set to BMON\n" : "", + adcsel == WM9705_ADCSEL_AUX ? " -adc set to AUX\n" : ""); + } + + len += sprintf (page+len, "%s%s%s%s%s%s", + dig1 & WM97XX_COO ? " -coordinate sampling\n" : " -individual sampling\n", + dig1 & WM97XX_CTC ? " -continuous mode\n" : " -polling mode\n", + prp == WM97XX_PRP_DET ? " -pen detect enabled, no wake up\n" : "", + prp == WM97XX_PRP_DETW ? " -pen detect enabled, wake up\n" : "", + prp == WM97XX_PRP_DET_DIG ? " -pen digitiser and pen detect enabled\n" : "", + dig1 & WM97XX_SLEN ? " -read back using slot " : " -read back using AC97\n"); + + if ((dig1 & WM97XX_SLEN) && slt !=12) + len += sprintf(page+len, "%d\n", slt); + len += sprintf (page+len, " -adc sample delay %d uSecs\n", delay_table[(dig1 & 0x00f0) >> 4]); + + if (ts->is_wm9712) { + if (prpu) + len += sprintf (page+len, " -rpu %d Ohms\n", 64000/ prpu); + len += sprintf (page+len, " -pressure current %s uA\n", dig2 & WM9712_PIL ? "400" : "200"); + len += sprintf (page+len, " -using %s wire touchscreen mode", dig2 & WM9712_45W ? "5" : "4"); + } else { + len += sprintf (page+len, " -pressure current %s uA\n", dig2 & WM9705_PIL ? "400" : "200"); + len += sprintf (page+len, " -%s impedance for PHONE and PCBEEP\n", dig2 & WM9705_PHIZ ? "high" : "low"); + } + + /* WM97xx digitiser read */ + len += sprintf(page+len, "\nADC data:\n%s%d\n%s%s\n", + " -adc value (decimal) : ", digrd & 0x0fff, + " -pen ", digrd & 0x8000 ? "Down" : "Up"); + if (ts->is_wm9712) { + len += sprintf (page+len, "%s%s%s%s", + adcsrc == WM9712_ADCSEL_COMP1 ? " -adc value is COMP1/AUX1\n" : "", + adcsrc == WM9712_ADCSEL_COMP2 ? " -adc value is COMP2/AUX2\n" : "", + adcsrc == WM9712_ADCSEL_BMON ? " -adc value is BMON\n" : "", + adcsrc == WM9712_ADCSEL_WIPER ? " -adc value is WIPER\n" : ""); + } else { + len += sprintf (page+len, "%s%s%s%s", + adcsrc == WM9705_ADCSEL_PCBEEP ? " -adc value is PCBEEP\n" : "", + adcsrc == WM9705_ADCSEL_PHONE ? " -adc value is PHONE\n" : "", + adcsrc == WM9705_ADCSEL_BMON ? " -adc value is BMON\n" : "", + adcsrc == WM9705_ADCSEL_AUX ? " -adc value is AUX\n" : ""); + } + + /* register dump */ + len += sprintf(page+len, "\nRegisters:\n%s%x\n%s%x\n%s%x\n", + " -digitiser 1 (0x76) : 0x", dig1, + " -digitiser 2 (0x78) : 0x", dig2, + " -digitiser read (0x7a) : 0x", digrd); + + /* errors */ + len += sprintf(page+len, "\nErrors:\n%s%d\n%s%d\n", + " -buffer overruns ", ts->overruns, + " -coordinate errors ", ts->adc_errs); + + return len; +} + +#ifdef WM97XX_TS_DEBUG +/* dump all the AC97 register space */ +static int wm_debug_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0, i; + unsigned long flags; + wm97xx_ts_t* ts; + u16 reg[AC97_NUM_REG]; + + if ((ts = data) == NULL) + return -ENODEV; + + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + len += sprintf (page+len, "Not registered\n"); + return len; + } + + for (i=0; i < AC97_NUM_REG; i++) { + reg[i] = ts->codec->codec_read(ts->codec, i * 2); + } + spin_unlock_irqrestore(&ts->lock, flags); + + for (i=0; i < AC97_NUM_REG; i++) { + len += sprintf (page+len, "0x%2.2x : 0x%4.4x\n",i * 2, reg[i]); + } + + return len; +} +#endif + +#endif + +#ifdef CONFIG_PM +/* WM97xx Power Management + * The WM9712 has extra powerdown states that are controlled in + * seperate registers from the AC97 power management. + * We will only power down into the extra WM9712 states and leave + * the AC97 power management to the sound driver. + */ +static int wm97xx_pm_event(struct pm_dev *dev, pm_request_t rqst, void *data) +{ + switch(rqst) { + case PM_SUSPEND: + wm97xx_suspend(); + break; + case PM_RESUME: + wm97xx_resume(); + break; + } + return 0; +} + +/* + * Power down the codec + */ +static void wm97xx_suspend(void) +{ + wm97xx_ts_t* ts = &wm97xx_ts; + u16 reg; + unsigned long flags; + + /* are we registered */ + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return; + } + + /* wm9705 does not have extra PM */ + if (!ts->is_wm9712) { + spin_unlock_irqrestore(&ts->lock, flags); + return; + } + + /* save and mute the PGA's */ + wm9712_pga_save(ts); + + reg = ts->codec->codec_read(ts->codec, AC97_PHONE_VOL); + ts->codec->codec_write(ts->codec, AC97_PHONE_VOL, reg | 0x001f); + + reg = ts->codec->codec_read(ts->codec, AC97_MIC_VOL); + ts->codec->codec_write(ts->codec, AC97_MIC_VOL, reg | 0x1f1f); + + reg = ts->codec->codec_read(ts->codec, AC97_LINEIN_VOL); + ts->codec->codec_write(ts->codec, AC97_LINEIN_VOL, reg | 0x1f1f); + + /* power down, dont disable the AC link */ + ts->codec->codec_write(ts->codec, AC97_WM9712_POWER, WM9712_PD(14) | WM9712_PD(13) | + WM9712_PD(12) | WM9712_PD(11) | WM9712_PD(10) | + WM9712_PD(9) | WM9712_PD(8) | WM9712_PD(7) | + WM9712_PD(6) | WM9712_PD(5) | WM9712_PD(4) | + WM9712_PD(3) | WM9712_PD(2) | WM9712_PD(1) | + WM9712_PD(0)); + + spin_unlock_irqrestore(&ts->lock, flags); +} + +/* + * Power up the Codec + */ +static void wm97xx_resume(void) +{ + wm97xx_ts_t* ts = &wm97xx_ts; + unsigned long flags; + + /* are we registered */ + spin_lock_irqsave(&ts->lock, flags); + if (!ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return; + } + + /* wm9705 does not have extra PM */ + if (!ts->is_wm9712) { + spin_unlock_irqrestore(&ts->lock, flags); + return; + } + + /* power up */ + ts->codec->codec_write(ts->codec, AC97_WM9712_POWER, 0x0); + + /* restore PGA state */ + wm9712_pga_restore(ts); + + spin_unlock_irqrestore(&ts->lock, flags); +} + + +/* save state of wm9712 PGA's */ +static void wm9712_pga_save(wm97xx_ts_t* ts) +{ + ts->phone_pga = ts->codec->codec_read(ts->codec, AC97_PHONE_VOL) & 0x001f; + ts->line_pgal = ts->codec->codec_read(ts->codec, AC97_LINEIN_VOL) & 0x1f00; + ts->line_pgar = ts->codec->codec_read(ts->codec, AC97_LINEIN_VOL) & 0x001f; + ts->mic_pgal = ts->codec->codec_read(ts->codec, AC97_MIC_VOL) & 0x1f00; + ts->mic_pgar = ts->codec->codec_read(ts->codec, AC97_MIC_VOL) & 0x001f; +} + +/* restore state of wm9712 PGA's */ +static void wm9712_pga_restore(wm97xx_ts_t* ts) +{ + u16 reg; + + reg = ts->codec->codec_read(ts->codec, AC97_PHONE_VOL); + ts->codec->codec_write(ts->codec, AC97_PHONE_VOL, reg | ts->phone_pga); + + reg = ts->codec->codec_read(ts->codec, AC97_LINEIN_VOL); + ts->codec->codec_write(ts->codec, AC97_LINEIN_VOL, reg | ts->line_pgar | (ts->line_pgal << 8)); + + reg = ts->codec->codec_read(ts->codec, AC97_MIC_VOL); + ts->codec->codec_write(ts->codec, AC97_MIC_VOL, reg | ts->mic_pgar | (ts->mic_pgal << 8)); +} + +#endif + +/* + * set up the physical settings of the device + */ + +static void init_wm97xx_phy(void) +{ + u16 dig1, dig2, aux, vid; + wm97xx_ts_t *ts = &wm97xx_ts; + + /* default values */ + dig1 = WM97XX_DELAY(4) | WM97XX_SLT(6); + if (ts->is_wm9712) + dig2 = WM9712_RPU(1); + else { + dig2 = 0x0; + + /* + * mute VIDEO and AUX as they share X and Y touchscreen + * inputs on the WM9705 + */ + aux = ts->codec->codec_read(ts->codec, AC97_AUX_VOL); + if (!(aux & 0x8000)) { + info("muting AUX mixer as it shares X touchscreen coordinate"); + ts->codec->codec_write(ts->codec, AC97_AUX_VOL, 0x8000 | aux); + } + + vid = ts->codec->codec_read(ts->codec, AC97_VIDEO_VOL); + if (!(vid & 0x8000)) { + info("muting VIDEO mixer as it shares Y touchscreen coordinate"); + ts->codec->codec_write(ts->codec, AC97_VIDEO_VOL, 0x8000 | vid); + } + } + + /* WM9712 rpu */ + if (ts->is_wm9712 && rpu) { + dig2 &= 0xffc0; + dig2 |= WM9712_RPU(rpu); + info("setting pen detect pull-up to %d Ohms",64000 / rpu); + } + + /* touchpanel pressure */ + if (pil == 2) { + if (ts->is_wm9712) + dig2 |= WM9712_PIL; + else + dig2 |= WM9705_PIL; + info("setting pressure measurement current to 400uA."); + } else if (pil) + info ("setting pressure measurement current to 200uA."); + + /* WM9712 five wire */ + if (ts->is_wm9712 && five_wire) { + dig2 |= WM9712_45W; + info("setting 5-wire touchscreen mode."); + } + + /* sample settling delay */ + if (delay!=4) { + if (delay < 0 || delay > 15) { + info ("supplied delay out of range."); + delay = 4; + } + dig1 &= 0xff0f; + dig1 |= WM97XX_DELAY(delay); + info("setting adc sample delay to %d u Secs.", delay_table[delay]); + } + + /* coordinate mode */ + if (mode == 1) { + dig1 |= WM97XX_COO; + info("using coordinate mode"); + } + + /* WM9705 pdd */ + if (pdd && !ts->is_wm9712) { + dig2 |= (pdd & 0x000f); + info("setting pdd to Vmid/%d", 1 - (pdd & 0x000f)); + } + + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER1, dig1); + ts->codec->codec_write(ts->codec, AC97_WM97XX_DIGITISER2, dig2); +} + + +/* + * Called by the audio codec initialisation to register + * the touchscreen driver. + */ + +static int wm97xx_probe(struct ac97_codec *codec, struct ac97_driver *driver) +{ + unsigned long flags; + u16 id1, id2; + wm97xx_ts_t *ts = &wm97xx_ts; + + spin_lock_irqsave(&ts->lock, flags); + + /* we only support 1 touchscreen at the moment */ + if (ts->is_registered) { + spin_unlock_irqrestore(&ts->lock, flags); + return -1; + } + + /* + * We can only use a WM9705 or WM9712 that has been *first* initialised + * by the AC97 audio driver. This is because we have to use the audio + * drivers codec read() and write() functions to sample the touchscreen + * + * If an initialsed WM97xx is found then get the codec read and write + * functions. + */ + + /* test for a WM9712 or a WM9705 */ + id1 = codec->codec_read(codec, AC97_VENDOR_ID1); + id2 = codec->codec_read(codec, AC97_VENDOR_ID2); + if (id1 == WM97XX_ID1 && id2 == WM9712_ID2) { + ts->is_wm9712 = 1; + info("registered a WM9712"); + } else if (id1 == WM97XX_ID1 && id2 == WM9705_ID2) { + ts->is_wm9712 = 0; + info("registered a WM9705"); + } else { + err("could not find a WM97xx codec. Found a 0x%4x:0x%4x instead", + id1, id2); + spin_unlock_irqrestore(&ts->lock, flags); + return -1; + } + + /* set up AC97 codec interface */ + ts->codec = codec; + codec->driver_private = (void*)&ts; + codec->codec_unregister = 0; + + /* set up physical characteristics */ + init_wm97xx_phy(); + + ts->is_registered = 1; + spin_unlock_irqrestore(&ts->lock, flags); + return 0; +} + +/* this is called by the audio driver when ac97_codec is unloaded */ + +static void wm97xx_remove(struct ac97_codec *codec, struct ac97_driver *driver) +{ + unsigned long flags; + u16 dig1, dig2; + wm97xx_ts_t *ts = codec->driver_private; + + spin_lock_irqsave(&ts->lock, flags); + + /* check that are registered */ + if (!ts->is_registered) { + err("double unregister"); + spin_unlock_irqrestore(&ts->lock, flags); + return; + } + + ts->is_registered = 0; + wake_up_interruptible(&ts->wait); /* So we see its gone */ + + /* restore default digitiser values */ + dig1 = WM97XX_DELAY(4) | WM97XX_SLT(6); + if (ts->is_wm9712) + dig2 = WM9712_RPU(1); + else + dig2 = 0x0; + + codec->codec_write(codec, AC97_WM97XX_DIGITISER1, dig1); + codec->codec_write(codec, AC97_WM97XX_DIGITISER2, dig2); + ts->codec = NULL; + + spin_unlock_irqrestore(&ts->lock, flags); +} + +static struct miscdevice wm97xx_misc = { + minor: TS_MINOR, + name: "touchscreen/wm97xx", + fops: &ts_fops, +}; + +static int __init wm97xx_ts_init_module(void) +{ + wm97xx_ts_t* ts = &wm97xx_ts; + int ret; + char proc_str[64]; + + info("Wolfson WM9705/WM9712 Touchscreen Controller"); + info("Version %s liam.girdwood@wolfsonmicro.com", WM_TS_VERSION); + + memset(ts, 0, sizeof(wm97xx_ts_t)); + + /* register our misc device */ + if ((ret = misc_register(&wm97xx_misc)) < 0) { + err("can't register misc device"); + return ret; + } + + init_waitqueue_head(&ts->wait); + spin_lock_init(&ts->lock); + + // initial calibration values + ts->cal.xscale = 256; + ts->cal.xtrans = 0; + ts->cal.yscale = 256; + ts->cal.ytrans = 0; + + /* reset error counters */ + ts->overruns = 0; + ts->adc_errs = 0; + + /* register with the AC97 layer */ + ac97_register_driver(&wm9705_driver); + ac97_register_driver(&wm9712_driver); + +#ifdef CONFIG_PROC_FS + /* register proc interface */ + sprintf(proc_str, "driver/%s", TS_NAME); + if ((ts->wm97xx_ts_ps = create_proc_read_entry (proc_str, 0, NULL, + wm97xx_read_proc, ts)) == 0) + err("could not register proc interface /proc/%s", proc_str); +#ifdef WM97XX_TS_DEBUG + if ((ts->wm97xx_debug_ts_ps = create_proc_read_entry ("driver/ac97_registers", + 0, NULL,wm_debug_read_proc, ts)) == 0) + err("could not register proc interface /proc/driver/ac97_registers"); +#endif +#endif +#ifdef CONFIG_PM + if ((ts->pm = pm_register(PM_UNKNOWN_DEV, PM_SYS_UNKNOWN, wm97xx_pm_event)) == 0) + err("could not register with power management"); +#endif + return 0; +} + +static void wm97xx_ts_cleanup_module(void) +{ + wm97xx_ts_t* ts = &wm97xx_ts; + +#ifdef CONFIG_PM + pm_unregister (ts->pm); +#endif + ac97_unregister_driver(&wm9705_driver); + ac97_unregister_driver(&wm9712_driver); + misc_deregister(&wm97xx_misc); +} + +/* Module information */ +MODULE_AUTHOR("Liam Girdwood, liam.girdwood@wolfsonmicro.com, www.wolfsonmicro.com"); +MODULE_DESCRIPTION("WM9705/WM9712 Touch Screen / BMON Driver"); +MODULE_LICENSE("GPL"); + +module_init(wm97xx_ts_init_module); +module_exit(wm97xx_ts_cleanup_module); + +#ifndef MODULE + +static int __init wm97xx_ts_setup(char *options) +{ + char *this_opt = options; + + if (!options || !*options) + return 0; + + /* parse the options and check for out of range values */ + for(this_opt=strtok(options, ","); + this_opt; this_opt=strtok(NULL, ",")) { + if (!strncmp(this_opt, "pil:", 4)) { + this_opt+=4; + pil = simple_strtol(this_opt, NULL, 0); + if (pil < 0 || pil > 2) + pil = 0; + continue; + } + if (!strncmp(this_opt, "rpu:", 4)) { + this_opt+=4; + rpu = simple_strtol(this_opt, NULL, 0); + if (rpu < 0 || rpu > 31) + rpu = 0; + continue; + } + if (!strncmp(this_opt, "pdd:", 4)) { + this_opt+=4; + pdd = simple_strtol(this_opt, NULL, 0); + if (pdd < 0 || pdd > 15) + pdd = 0; + continue; + } + if (!strncmp(this_opt, "delay:", 6)) { + this_opt+=6; + delay = simple_strtol(this_opt, NULL, 0); + if (delay < 0 || delay > 15) + delay = 4; + continue; + } + if (!strncmp(this_opt, "five_wire:", 10)) { + this_opt+=10; + five_wire = simple_strtol(this_opt, NULL, 0); + if (five_wire < 0 || five_wire > 1) + five_wire = 0; + continue; + } + if (!strncmp(this_opt, "mode:", 5)) { + this_opt+=5; + mode = simple_strtol(this_opt, NULL, 0); + if (mode < 0 || mode > 2) + mode = 0; + continue; + } + } + return 1; +} + +__setup("wm97xx_ts=", wm97xx_ts_setup); + +#endif /* MODULE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/ad1889.c linux.22-ac2/drivers/sound/ad1889.c --- linux.vanilla/drivers/sound/ad1889.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/ad1889.c 2003-08-13 14:43:27.000000000 +0100 @@ -236,16 +236,24 @@ for (i = 0; i < AD_MAX_STATES; i++) { dmabuf = &dev->state[i].dmabuf; - if ((dmabuf->rawbuf = kmalloc(DMA_SIZE, GFP_KERNEL|GFP_DMA)) == NULL) - return NULL; + dmabuf->rawbuf = kmalloc(DMA_SIZE, GFP_KERNEL|GFP_DMA); + if (!dmabuf->rawbuf) + goto err_free_dmabuf; dmabuf->rawbuf_size = DMA_SIZE; dmabuf->dma_handle = 0; dmabuf->rd_ptr = dmabuf->wr_ptr = dmabuf->dma_len = 0UL; dmabuf->ready = 0; dmabuf->rate = 44100; } - +out: return dev; + +err_free_dmabuf: + while (--i >= 0) + kfree(dev->state[i].dmabuf.rawbuf); + kfree(dev); + dev = NULL; + goto out; } static void ad1889_free_dev(ad1889_dev_t *dev) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/ali5455.c linux.22-ac2/drivers/sound/ali5455.c --- linux.vanilla/drivers/sound/ali5455.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/ali5455.c 2003-07-29 17:55:34.000000000 +0100 @@ -1,5 +1,5 @@ /* - * ALI ali5455 and friends ICH driver for Linux + * Driver for ALI 5455 Audio PCI soundcard * LEI HU * * Built from: @@ -23,10 +23,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * - * ALi 5455 theory of operation - * - * The chipset provides three DMA channels that talk to an AC97 + * The chipset provides five DMA channels that talk to an AC97 * CODEC (AC97 is a digital/analog mixer standard). At its simplest * you get 48Khz audio with basic volume and mixer controls. At the * best you get rate adaption in the codec. We set the card up so @@ -42,6 +39,13 @@ * esd working you need to use esd -r 48000 as it won't probe 48KHz * by default. mpg123 can't handle 48Khz only audio so use xmms. * + * + * Not everyone uses 48KHz. We know of no way to detect this reliably + * and certainly not to get the right data. If your ali audio sounds + * stupid you may need to investigate other speeds. According to Analog + * they tend to use a 14.318MHz clock which gives you a base rate of + * 41194Hz. + * * If you need to force a specific rate set the clocking= option * */ @@ -78,31 +82,45 @@ static int strict_clocking = 0; static unsigned int clocking = 0; -static unsigned int codec_pcmout_share_spdif_locked = 0; -static unsigned int codec_independent_spdif_locked = 0; +#ifdef CONFIG_SOUND_ALI5455_CODECSPDIFOUT_PCMOUTSHARE +static unsigned int codec_pcmout_share_spdif_locked = 48000; +#else + static unsigned int codec_pcmout_share_spdif_locked = 0; +#endif +#ifdef CONFIG_SOUND_ALI5455_CODECSPDIFOUT_CODECINDEPENDENTDMA +static unsigned int codec_independent_spdif_locked = 48000; +#else +static unsigned int codec_independent_spdif_locked = 0; +#endif +#ifdef CONFIG_SOUND_ALI5455_CONTROLLERSPDIFOUT_PCMOUTSHARE +static unsigned int controller_pcmout_share_spdif_locked = 48000; +#else static unsigned int controller_pcmout_share_spdif_locked = 0; +#endif +#ifdef CONFIG_SOUND_ALI5455_CONTROLLERSPDIFOUT_CONTROLLERINDEPENDENTDMA +static unsigned int controller_independent_spdif_locked = 48000; +#else static unsigned int controller_independent_spdif_locked = 0; -static unsigned int globel = 0; - -#define ADC_RUNNING 1 -#define DAC_RUNNING 2 -#define CODEC_SPDIFOUT_RUNNING 8 -#define CONTROLLER_SPDIFOUT_RUNNING 4 - -#define SPDIF_ENABLE_OUTPUT 4 /* bits 0,1 are PCM */ - -#define ALI5455_FMT_16BIT 1 -#define ALI5455_FMT_STEREO 2 -#define ALI5455_FMT_MASK 3 +#endif -#define SPDIF_ON 0x0004 -#define SURR_ON 0x0010 -#define CENTER_LFE_ON 0x0020 -#define VOL_MUTED 0x8000 +#define ADC_RUNNING 1 +#define DAC_RUNNING 2 +#define CONTROLLER_SPDIFOUT_RUNNING 4 +#define CODEC_SPDIFOUT_RUNNING 8 + +#define ALI5455_FMT_16BIT 1 +#define ALI5455_FMT_STEREO 2 +#define ALI5455_FMT_MASK 3 + +#define SPDIF_ON 0x0004 +#define SURR_ON 0x0010 +#define CENTER_LFE_ON 0x0020 +#define VOL_MUTED 0x8000 +#define SPDIF_ENABLE_OUTPUT 0x00000004 -#define ALI_SPDIF_OUT_CH_STATUS 0xbf -/* the 810's array of pointers to data buffers */ +#define ALI_SPDIF_OUT_CH_STATUS 0xbf +/* the ali5455 's array of pointers to data buffers */ struct sg_item { #define BUSADDR_MASK 0xFFFFFFFE @@ -113,20 +131,21 @@ u32 control; }; -/* an instance of the ali channel */ +/* An instance of the ali 5455 channel */ #define SG_LEN 32 struct ali_channel { /* these sg guys should probably be allocated seperately as nocache. Must be 8 byte aligned */ struct sg_item sg[SG_LEN]; /* 32*8 */ - u32 offset; /* 4 */ - u32 port; /* 4 */ + u32 offset; /* 4 */ + u32 port; /* 4 */ u32 used; u32 num; }; /* - * we have 3 seperate dma engines. pcm in, pcm out, and mic. + * we have 5 seperate dma engines. pcm in, pcm out, mc in, + * codec independant DMA, and controller independant DMA * each dma engine has controlling registers. These goofy * names are from the datasheet, but make it easy to write * code while leafing through it. @@ -142,11 +161,11 @@ PRE##_CR = 0x##DIG##b /* Control Register */ \ } -ENUM_ENGINE(OFF, 0); /* Offsets */ -ENUM_ENGINE(PI, 4); /* PCM In */ -ENUM_ENGINE(PO, 5); /* PCM Out */ -ENUM_ENGINE(MC, 6); /* Mic In */ -ENUM_ENGINE(CODECSPDIFOUT, 7); /* CODEC SPDIF OUT */ +ENUM_ENGINE(OFF, 0); /* Offsets */ +ENUM_ENGINE(PI, 4); /* PCM In */ +ENUM_ENGINE(PO, 5); /* PCM Out */ +ENUM_ENGINE(MC, 6); /* Mic In */ +ENUM_ENGINE(CODECSPDIFOUT, 7); /* CODEC SPDIF OUT */ ENUM_ENGINE(CONTROLLERSPDIFIN, A); /* CONTROLLER SPDIF In */ ENUM_ENGINE(CONTROLLERSPDIFOUT, B); /* CONTROLLER SPDIF OUT */ @@ -171,7 +190,7 @@ ALI_SPDIFICS = 0xfc /* spdif interface control/status */ }; -// x-status register(x:pcm in ,pcm out, mic in,) +/* x-status register(x:pcm in ,pcm out, mic in,codec independent DMA.controller independent DMA) */ /* interrupts for a dma engine */ #define DMA_INT_FIFO (1<<4) /* fifo under/over flow */ #define DMA_INT_COMPLETE (1<<3) /* buffer read/write complete and ioc set */ @@ -180,8 +199,7 @@ #define DMA_INT_DCH (1) /* DMA Controller Halted (happens on LVI interrupts) */ //not eqult intel #define DMA_INT_MASK (DMA_INT_FIFO|DMA_INT_COMPLETE|DMA_INT_LVI) -/* interrupts for the whole chip */// by interrupt status register finish - +/* interrupts for the whole chip */ #define INT_SPDIFOUT (1<<23) /* controller spdif out INTERRUPT */ #define INT_SPDIFIN (1<<22) #define INT_CODECSPDIFOUT (1<<19) @@ -193,13 +211,14 @@ #define INT_GPIO (1<<1) #define INT_MASK (INT_SPDIFOUT|INT_CODECSPDIFOUT|INT_MICIN|INT_PCMOUT|INT_PCMIN) -#define DRIVER_VERSION "0.02ac" +#define DRIVER_VERSION "0.03-ac" /* magic numbers to protect our data structures */ #define ALI5455_CARD_MAGIC 0x5072696E /* "Prin" */ #define ALI5455_STATE_MAGIC 0x63657373 /* "cess" */ #define ALI5455_DMA_MASK 0xffffffff /* DMA buffer mask for pci_alloc_consist */ -#define NR_HW_CH 5 //I think 5 channel + +#define NR_HW_CH 5 /* I think 5 channel */ /* maxinum number of AC97 codecs connected, AC97 2.0 defined 4 */ #define NR_AC97 2 @@ -216,7 +235,7 @@ "ALI 5455" }; -static struct pci_device_id ali_pci_tbl[] __initdata = { +static struct pci_device_id ali_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ALI5455}, {0,} @@ -267,12 +286,12 @@ unsigned fragshift; /* our buffer acts like a circular ring */ - unsigned hwptr; /* where dma last started, updated by update_ptr */ - unsigned swptr; /* where driver last clear/filled, updated by read/write */ - int count; /* bytes to be consumed or been generated by dma machine */ + unsigned hwptr; /* where dma last started, updated by update_ptr */ + unsigned swptr; /* where driver last clear/filled, updated by read/write */ + int count; /* bytes to be consumed or been generated by dma machine */ unsigned total_bytes; /* total bytes dmaed by hardware */ - unsigned error; /* number of over/underruns */ + unsigned error; /* number of over/underruns */ wait_queue_head_t wait; /* put process on wait queue when no more space in buffer */ /* redundant, but makes calculations easier */ @@ -399,7 +418,11 @@ } -//add support codec spdif out +/* + * we use ALC650 which only support 48k sample rate, so we test firstly + * spdifout 's sample rate validity + */ + static int ali_valid_spdif_rate(struct ac97_codec *codec, int rate) { unsigned long id = 0L; @@ -428,16 +451,13 @@ /* ali_set_spdif_output * - * Configure the S/PDIF output transmitter. When we turn on - * S/PDIF, we turn off the analog output. This may not be - * the right thing to do. + * Configure the S/PDIF output transmitter. * * Assumptions: * The DSP sample rate must already be set to a supported * S/PDIF rate (32kHz, 44.1kHz, or 48kHz) or we abort. */ -static void ali_set_spdif_output(struct ali_state *state, int slots, - int rate) +static void ali_set_spdif_output(struct ali_state *state, int slots, int rate) { int vol; int aud_reg; @@ -453,8 +473,7 @@ /* If the volume wasn't muted before we turned on S/PDIF, unmute it */ if (!(state->card->ac97_status & VOL_MUTED)) { aud_reg = ali_ac97_get(codec, AC97_MASTER_VOL_STEREO); - ali_ac97_set(codec, AC97_MASTER_VOL_STEREO, - (aud_reg & ~VOL_MUTED)); + ali_ac97_set(codec, AC97_MASTER_VOL_STEREO, (aud_reg & ~VOL_MUTED)); } state->card->ac97_status &= ~(VOL_MUTED | SPDIF_ON); return; @@ -486,7 +505,7 @@ ali_ac97_set(codec, AC97_SPDIF_CONTROL, aud_reg); aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS); - aud_reg = (aud_reg & AC97_EA_SLOT_MASK) | slots | AC97_EA_SPDIF; + aud_reg = (aud_reg & AC97_EA_SLOT_MASK) | slots | AC97_EA_SPDIF; /* ALC650 don't support VRA */ ali_ac97_set(codec, AC97_EXTENDED_STATUS, aud_reg); aud_reg = ali_ac97_get(codec, AC97_POWER_CONTROL); @@ -508,8 +527,6 @@ aud_reg = ali_ac97_get(codec, 0x6a); ali_ac97_set(codec, 0x6a, (aud_reg & 0xefff)); } - /* Mute the analog output */ - /* Should this only mute the PCM volume??? */ } } @@ -517,13 +534,6 @@ * * Configure the codec's multi-channel DACs * - * The logic is backwards. Setting the bit to 1 turns off the DAC. - * - * What about the ICH? We currently configure it using the - * SNDCTL_DSP_CHANNELS ioctl. If we're turnning on the DAC, - * does that imply that we want the ICH set to support - * these channels? - * * TODO: * vailidate that the codec really supports these DACs * before turning them on. @@ -556,8 +566,7 @@ } /* set playback sample rate */ -static unsigned int ali_set_dac_rate(struct ali_state *state, - unsigned int rate) +static unsigned int ali_set_dac_rate(struct ali_state *state, unsigned int rate) { struct dmabuf *dmabuf = &state->dmabuf; u32 new_rate; @@ -594,8 +603,7 @@ } /* set recording sample rate */ -static unsigned int ali_set_adc_rate(struct ali_state *state, - unsigned int rate) +static unsigned int ali_set_adc_rate(struct ali_state *state, unsigned int rate) { struct dmabuf *dmabuf = &state->dmabuf; u32 new_rate; @@ -632,11 +640,10 @@ } /* set codec independent spdifout sample rate */ -static unsigned int ali_set_codecspdifout_rate(struct ali_state *state, - unsigned int rate) +static unsigned int ali_set_codecspdifout_rate(struct ali_state *state, unsigned int rate) { struct dmabuf *dmabuf = &state->dmabuf; - + if (!(state->card->ac97_features & 0x0001)) { dmabuf->rate = clocking; return clocking; @@ -652,8 +659,7 @@ } /* set controller independent spdif out function sample rate */ -static void ali_set_spdifout_rate(struct ali_state *state, - unsigned int rate) +static void ali_set_spdifout_rate(struct ali_state *state, unsigned int rate) { unsigned char ch_st_sel; unsigned short status_rate; @@ -729,7 +735,9 @@ data = ((civ + 1) * dmabuf->fragsize - (2 * offset)) % dmabuf->dmasize; if (inw(port_picb) == 0) data -= 2048; - + /* It is hardware bug when read port 's PICB ==0 */ + if ( inw(port_picb) == 0 ) + data -= 2048; return data; } @@ -745,6 +753,8 @@ udelay(1); outb(0, card->iobase + PI_CR); + + // wait for the card to acknowledge shutdown while (inb(card->iobase + PI_CR) != 0); // now clear any latent interrupt bits (like the halt bit) @@ -771,10 +781,8 @@ outb((1 << 4) | (1 << 2), state->card->iobase + PI_CR); if (state->card->channel[0].used == 1) outl(1, state->card->iobase + ALI_DMACR); // DMA CONTROL REGISTRER - udelay(100); if (state->card->channel[2].used == 1) outl((1 << 2), state->card->iobase + ALI_DMACR); //DMA CONTROL REGISTER - udelay(100); } } @@ -965,7 +973,7 @@ static int prog_dmabuf(struct ali_state *state, unsigned rec) { struct dmabuf *dmabuf = &state->dmabuf; - struct ali_channel *c = NULL; + struct ali_channel *c = NULL ; struct sg_item *sg; unsigned long flags; int ret; @@ -1248,9 +1256,7 @@ } } -static inline int ali_get_free_write_space(struct - ali_state - *state) +static inline int ali_get_free_write_space(struct ali_state *state) { struct dmabuf *dmabuf = &state->dmabuf; int free; @@ -1267,9 +1273,7 @@ return (free); } -static inline int ali_get_available_read_data(struct - ali_state - *state) +static inline int ali_get_available_read_data(struct ali_state *state) { struct dmabuf *dmabuf = &state->dmabuf; int avail; @@ -1288,12 +1292,12 @@ static int drain_dac(struct ali_state *state, int signals_allowed) { - DECLARE_WAITQUEUE(wait, current); struct dmabuf *dmabuf = &state->dmabuf; unsigned long flags; unsigned long tmo; int count; + if (!dmabuf->ready) return 0; if (dmabuf->mapped) { @@ -1302,11 +1306,11 @@ } add_wait_queue(&dmabuf->wait, &wait); for (;;) { - spin_lock_irqsave(&state->card->lock, flags); ali_update_ptr(state); count = dmabuf->count; spin_unlock_irqrestore(&state->card->lock, flags); + if (count <= 0) break; /* @@ -1355,7 +1359,6 @@ static int drain_spdifout(struct ali_state *state, int signals_allowed) { - DECLARE_WAITQUEUE(wait, current); struct dmabuf *dmabuf = &state->dmabuf; unsigned long flags; @@ -1548,7 +1551,6 @@ { struct ali_card *card = (struct ali_card *) dev_id; u32 status; - u16 status2; spin_lock(&card->lock); status = inl(card->iobase + ALI_INTERRUPTSR); @@ -1557,19 +1559,8 @@ return; /* not for us */ } - if (codec_independent_spdif_locked > 0) { - if (globel == 0) { - globel += 1; - status2 = inw(card->iobase + 0x76); - outw(status2 | 0x000c, card->iobase + 0x76); - } else { - if (status & (INT_PCMOUT | INT_PCMIN | INT_MICIN | INT_SPDIFOUT | INT_CODECSPDIFOUT)) - ali_channel_interrupt(card); - } - } else { - if (status & (INT_PCMOUT | INT_PCMIN | INT_MICIN | INT_SPDIFOUT | INT_CODECSPDIFOUT)) - ali_channel_interrupt(card); - } + if (status & (INT_PCMOUT | INT_PCMIN | INT_MICIN | INT_SPDIFOUT | INT_CODECSPDIFOUT)) + ali_channel_interrupt(card); /* clear 'em */ outl(status & INT_MASK, card->iobase + ALI_INTERRUPTSR); @@ -1580,8 +1571,7 @@ waiting to be copied to the user's buffer. It is filled by the dma machine and drained by this loop. */ -static ssize_t ali_read(struct file *file, char *buffer, - size_t count, loff_t * ppos) +static ssize_t ali_read(struct file *file, char *buffer, size_t count, loff_t * ppos) { struct ali_state *state = (struct ali_state *) file->private_data; struct ali_card *card = state ? state->card : 0; @@ -1721,8 +1711,7 @@ /* in this loop, dmabuf.count signifies the amount of data that is waiting to be dma to the soundcard. it is drained by the dma machine and filled by this loop. */ -static ssize_t ali_write(struct file *file, - const char *buffer, size_t count, loff_t * ppos) +static ssize_t ali_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) { struct ali_state *state = (struct ali_state *) file->private_data; struct ali_card *card = state ? state->card : 0; @@ -1908,8 +1897,7 @@ } /* No kernel lock - we have our own spinlock */ -static unsigned int ali_poll(struct file *file, struct poll_table_struct - *wait) +static unsigned int ali_poll(struct file *file, struct poll_table_struct *wait) { struct ali_state *state = (struct ali_state *) file->private_data; struct dmabuf *dmabuf = &state->dmabuf; @@ -2014,8 +2002,7 @@ } if (c != NULL) { outb(2, state->card->iobase + c->port + OFF_CR); /* reset DMA machine */ - outl(virt_to_bus(&c->sg[0]), - state->card->iobase + c->port + OFF_BDBAR); + outl(virt_to_bus(&c->sg[0]), state->card->iobase + c->port + OFF_BDBAR); outb(0, state->card->iobase + c->port + OFF_CIV); outb(0, state->card->iobase + c->port + OFF_LVI); } @@ -2990,7 +2977,6 @@ { struct ali_card *card = dev->private_data; int count1 = 100; - unsigned long flags; char val; unsigned short int count; @@ -3660,12 +3646,17 @@ MODULE_PARM(codec_independent_spdif_locked, "i"); MODULE_PARM(controller_pcmout_share_spdif_locked, "i"); MODULE_PARM(controller_independent_spdif_locked, "i"); + #define ALI5455_MODULE_NAME "ali5455" + static struct pci_driver ali_pci_driver = { - name:ALI5455_MODULE_NAME, id_table:ali_pci_tbl, probe:ali_probe, - remove:__devexit_p(ali_remove), + name: ALI5455_MODULE_NAME, + id_table: ali_pci_tbl, + probe: ali_probe, + remove: __devexit_p(ali_remove), #ifdef CONFIG_PM - suspend:ali_pm_suspend, resume:ali_pm_resume, + suspend: ali_pm_suspend, + resume: ali_pm_resume, #endif /* CONFIG_PM */ }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/btaudio.c linux.22-ac2/drivers/sound/btaudio.c --- linux.vanilla/drivers/sound/btaudio.c 2002-11-29 21:27:19.000000000 +0000 +++ linux.22-ac2/drivers/sound/btaudio.c 2003-06-29 16:10:12.000000000 +0100 @@ -1,7 +1,7 @@ /* - btaudio - bt878 audio dma driver for linux 2.4.x + btaudio - bt878 audio dma driver for linux 2.4 / 2.5 - (c) 2000-2002 Gerd Knorr + (c) 2000-2003 Gerd Knorr This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -19,14 +19,12 @@ */ -#include #include #include #include #include #include #include -#include #include #include #include @@ -37,6 +35,9 @@ #include #include +# define irqreturn_t void +# define IRQ_RETVAL(foobar) +# define strlcpy(dest,src,len) strncpy(dest,src,(len)-1) /* mmio access */ #define btwrite(dat,adr) writel((dat), (bta->mmio+(adr))) @@ -152,9 +153,10 @@ int rate; }; -static struct btaudio *btaudios = NULL; -static unsigned int debug = 0; -static unsigned int irq_debug = 0; +static struct btaudio *btaudios = NULL; +static unsigned int btcount = 0; +static unsigned int debug = 0; +static unsigned int irq_debug = 0; /* -------------------------------------------------------------- */ @@ -330,8 +332,8 @@ if (cmd == SOUND_MIXER_INFO) { mixer_info info; memset(&info,0,sizeof(info)); - strncpy(info.id,"bt878",sizeof(info.id)-1); - strncpy(info.name,"Brooktree Bt878 audio",sizeof(info.name)-1); + strlcpy(info.id,"bt878",sizeof(info.id)); + strlcpy(info.name,"Brooktree Bt878 audio",sizeof(info.name)); info.modify_counter = bta->mixcount; if (copy_to_user((void *)arg, &info, sizeof(info))) return -EFAULT; @@ -340,8 +342,8 @@ if (cmd == SOUND_OLD_MIXER_INFO) { _old_mixer_info info; memset(&info,0,sizeof(info)); - strncpy(info.id,"bt878",sizeof(info.id)-1); - strncpy(info.name,"Brooktree Bt878 audio",sizeof(info.name)-1); + strlcpy(info.id,"bt878",sizeof(info.id)); + strlcpy(info.name,"Brooktree Bt878 audio",sizeof(info.name)); if (copy_to_user((void *)arg, &info, sizeof(info))) return -EFAULT; return 0; @@ -426,11 +428,11 @@ } static struct file_operations btaudio_mixer_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - open: btaudio_mixer_open, - release: btaudio_mixer_release, - ioctl: btaudio_mixer_ioctl, + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = btaudio_mixer_open, + .release = btaudio_mixer_release, + .ioctl = btaudio_mixer_ioctl, }; /* -------------------------------------------------------------- */ @@ -791,25 +793,25 @@ } static struct file_operations btaudio_digital_dsp_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - open: btaudio_dsp_open_digital, - release: btaudio_dsp_release, - read: btaudio_dsp_read, - write: btaudio_dsp_write, - ioctl: btaudio_dsp_ioctl, - poll: btaudio_dsp_poll, + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = btaudio_dsp_open_digital, + .release = btaudio_dsp_release, + .read = btaudio_dsp_read, + .write = btaudio_dsp_write, + .ioctl = btaudio_dsp_ioctl, + .poll = btaudio_dsp_poll, }; static struct file_operations btaudio_analog_dsp_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - open: btaudio_dsp_open_analog, - release: btaudio_dsp_release, - read: btaudio_dsp_read, - write: btaudio_dsp_write, - ioctl: btaudio_dsp_ioctl, - poll: btaudio_dsp_poll, + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = btaudio_dsp_open_analog, + .release = btaudio_dsp_release, + .read = btaudio_dsp_read, + .write = btaudio_dsp_write, + .ioctl = btaudio_dsp_ioctl, + .poll = btaudio_dsp_poll, }; /* -------------------------------------------------------------- */ @@ -818,18 +820,20 @@ "RISCI", "FBUS", "FTRGT", "FDSR", "PPERR", "RIPERR", "PABORT", "OCERR", "SCERR" }; -static void btaudio_irq(int irq, void *dev_id, struct pt_regs * regs) +static irqreturn_t btaudio_irq(int irq, void *dev_id, struct pt_regs * regs) { int count = 0; u32 stat,astat; struct btaudio *bta = dev_id; + int handled = 0; for (;;) { count++; stat = btread(REG_INT_STAT); astat = stat & btread(REG_INT_MASK); if (!astat) - return; + break; + handled = 1; btwrite(astat,REG_INT_STAT); if (irq_debug) { @@ -865,18 +869,20 @@ btwrite(0, REG_INT_MASK); } } - return; + return IRQ_RETVAL(handled); } /* -------------------------------------------------------------- */ -static unsigned int dsp1 = -1; -static unsigned int dsp2 = -1; -static unsigned int mixer = -1; +#define BTAUDIO_MAX 16 + +static unsigned int dsp1[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = -1 }; +static unsigned int dsp2[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = -1 }; +static unsigned int mixer[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = -1 }; +static int digital[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = 1 }; +static int analog[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = 1 }; +static int rate[BTAUDIO_MAX] = { [ 0 ... BTAUDIO_MAX-1 ] = 0 }; static int latency = -1; -static int digital = 1; -static int analog = 1; -static int rate = 0; #define BTA_OSPREY200 1 @@ -899,6 +905,9 @@ unsigned char revision,lat; int rc = -EBUSY; + if (BTAUDIO_MAX == btcount) + return -EBUSY; + if (pci_enable_device(pci_dev)) return -EIO; if (!request_mem_region(pci_resource_start(pci_dev,0), @@ -932,8 +941,8 @@ /* sample rate */ bta->rate = card->rate; - if (rate) - bta->rate = rate; + if (rate[btcount]) + bta->rate = rate[btcount]; init_MUTEX(&bta->lock); init_waitqueue_head(&bta->readq); @@ -955,7 +964,7 @@ /* init hw */ btwrite(0, REG_GPIO_DMA_CTL); btwrite(0, REG_INT_MASK); - btwrite(~0x0UL, REG_INT_STAT); + btwrite(~(u32)0, REG_INT_STAT); pci_set_master(pci_dev); if ((rc = request_irq(bta->irq, btaudio_irq, SA_SHIRQ|SA_INTERRUPT, @@ -966,9 +975,9 @@ } /* register devices */ - if (digital) { + if (digital[btcount]) { rc = bta->dsp_digital = - register_sound_dsp(&btaudio_digital_dsp_fops,dsp1); + register_sound_dsp(&btaudio_digital_dsp_fops,dsp1[btcount]); if (rc < 0) { printk(KERN_WARNING "btaudio: can't register digital dsp (rc=%d)\n",rc); @@ -977,9 +986,9 @@ printk(KERN_INFO "btaudio: registered device dsp%d [digital]\n", bta->dsp_digital >> 4); } - if (analog) { + if (analog[btcount]) { rc = bta->dsp_analog = - register_sound_dsp(&btaudio_analog_dsp_fops,dsp2); + register_sound_dsp(&btaudio_analog_dsp_fops,dsp2[btcount]); if (rc < 0) { printk(KERN_WARNING "btaudio: can't register analog dsp (rc=%d)\n",rc); @@ -987,7 +996,8 @@ } printk(KERN_INFO "btaudio: registered device dsp%d [analog]\n", bta->dsp_analog >> 4); - rc = bta->mixer_dev = register_sound_mixer(&btaudio_mixer_fops,mixer); + rc = bta->mixer_dev = register_sound_mixer(&btaudio_mixer_fops, + mixer[btcount]); if (rc < 0) { printk(KERN_WARNING "btaudio: can't register mixer (rc=%d)\n",rc); @@ -1000,6 +1010,7 @@ /* hook into linked list */ bta->next = btaudios; btaudios = bta; + btcount++; pci_set_drvdata(pci_dev,bta); return 0; @@ -1027,7 +1038,7 @@ /* turn off all DMA / IRQs */ btand(~15, REG_GPIO_DMA_CTL); btwrite(0, REG_INT_MASK); - btwrite(~0x0UL, REG_INT_STAT); + btwrite(~(u32)0, REG_INT_STAT); /* unregister devices */ if (digital) { @@ -1052,6 +1063,7 @@ ; /* if (NULL == walk->next) BUG(); */ walk->next = bta->next; } + btcount--; pci_set_drvdata(pci_dev, NULL); kfree(bta); @@ -1062,31 +1074,31 @@ static struct pci_device_id btaudio_pci_tbl[] __devinitdata = { { - vendor: PCI_VENDOR_ID_BROOKTREE, - device: 0x0878, - subvendor: 0x0070, - subdevice: 0xff01, - driver_data: BTA_OSPREY200, + .vendor = PCI_VENDOR_ID_BROOKTREE, + .device = 0x0878, + .subvendor = 0x0070, + .subdevice = 0xff01, + .driver_data = BTA_OSPREY200, },{ - vendor: PCI_VENDOR_ID_BROOKTREE, - device: 0x0878, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, + .vendor = PCI_VENDOR_ID_BROOKTREE, + .device = 0x0878, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, },{ - vendor: PCI_VENDOR_ID_BROOKTREE, - device: 0x0878, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, + .vendor = PCI_VENDOR_ID_BROOKTREE, + .device = 0x0879, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, },{ /* --- end of list --- */ } }; static struct pci_driver btaudio_pci_driver = { - name: "btaudio", - id_table: btaudio_pci_tbl, - probe: btaudio_probe, - remove: __devexit_p(btaudio_remove), + .name = "btaudio", + .id_table = btaudio_pci_tbl, + .probe = btaudio_probe, + .remove = __devexit_p(btaudio_remove), }; static int btaudio_init_module(void) @@ -1107,15 +1119,21 @@ module_init(btaudio_init_module); module_exit(btaudio_cleanup_module); -MODULE_PARM(dsp1,"i"); -MODULE_PARM(dsp2,"i"); -MODULE_PARM(mixer,"i"); -MODULE_PARM(debug,"i"); -MODULE_PARM(irq_debug,"i"); -MODULE_PARM(digital,"i"); -MODULE_PARM(analog,"i"); -MODULE_PARM(rate,"i"); -MODULE_PARM(latency,"i"); +MODULE_PARM(dsp1, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(dsp1,"digital dsp nr"); +MODULE_PARM(dsp2, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(dsp2,"analog dsp nr"); +MODULE_PARM(mixer, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(mixer,"mixer nr"); +MODULE_PARM(debug, "i"); +MODULE_PARM(irq_debug, "i"); +MODULE_PARM(digital, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(digital,"register digital dsp device"); +MODULE_PARM(analog, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(analog,"register analog dsp device (and mixer)"); +MODULE_PARM(rate, "1-" __stringify(BTAUDIO_MAX) "i"); +MODULE_PARM_DESC(rate,"sample rate supported by the hardware"); +MODULE_PARM(latency, "i"); MODULE_PARM_DESC(latency,"pci latency timer"); MODULE_DEVICE_TABLE(pci, btaudio_pci_tbl); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/cmpci.c linux.22-ac2/drivers/sound/cmpci.c --- linux.vanilla/drivers/sound/cmpci.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/cmpci.c 2003-09-01 13:32:58.000000000 +0100 @@ -1,4 +1,3 @@ -/*****************************************************************************/ /* * cmpci.c -- C-Media PCI audio driver. * @@ -8,7 +7,6 @@ * Based on the PCI drivers by Thomas Sailer (sailer@ife.ee.ethz.ch) * * For update, visit: - * http://members.home.net/puresoft/cmedia.html * http://www.cmedia.com.tw * * This program is free software; you can redistribute it and/or modify @@ -82,8 +80,8 @@ * - speaker mixer support * Mon Aug 13 2001 * - optimizations and cleanups - * 03/01/2003 - open_mode fixes from Georg Acher * + * 03/01/2003 - open_mode fixes from Georg Acher */ /*****************************************************************************/ @@ -98,15 +96,16 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include +#include #include #include "dm.h" @@ -178,26 +177,143 @@ #define DSP_MIX_TREBLEIDX_R ((unsigned char)(0x45)) #define DSP_MIX_BASSIDX_L ((unsigned char)(0x46)) #define DSP_MIX_BASSIDX_R ((unsigned char)(0x47)) +#define DSP_MIX_EXTENSION ((unsigned char)(0xf0)) +// pseudo register for AUX +#define DSP_MIX_AUXVOL_L ((unsigned char)(0x50)) +#define DSP_MIX_AUXVOL_R ((unsigned char)(0x51)) + +// I/O length +#define CM_EXTENT_CODEC 0x100 +#define CM_EXTENT_MIDI 0x2 +#define CM_EXTENT_SYNTH 0x4 +#define CM_EXTENT_GAME 0x8 + +// Function Control Register 0 (00h) +#define CHADC0 0x01 +#define CHADC1 0x02 +#define PAUSE0 0x04 +#define PAUSE1 0x08 + +// Function Control Register 0+2 (02h) +#define CHEN0 0x01 +#define CHEN1 0x02 +#define RST_CH0 0x04 +#define RST_CH1 0x08 + +// Function Control Register 1 (04h) +#define JYSTK_EN 0x02 +#define UART_EN 0x04 +#define SPDO2DAC 0x40 +#define SPDFLOOP 0x80 + +// Function Control Register 1+1 (05h) +#define SPDF_0 0x01 +#define SPDF_1 0x02 +#define ASFC 0xe0 +#define DSFC 0x1c +#define SPDIF2DAC (SPDF_0 << 8 | SPDO2DAC) + +// Channel Format Register (08h) +#define CM_CFMT_STEREO 0x01 +#define CM_CFMT_16BIT 0x02 +#define CM_CFMT_MASK 0x03 +#define POLVALID 0x20 +#define INVSPDIFI 0x80 + +// Channel Format Register+2 (0ah) +#define SPD24SEL 0x20 + +// Channel Format Register+3 (0bh) +#define CHB3D 0x20 +#define CHB3D5C 0x80 + +// Interrupt Hold/Clear Register+2 (0eh) +#define CH0_INT_EN 0x01 +#define CH1_INT_EN 0x02 + +// Interrupt Register (10h) +#define CHINT0 0x01 +#define CHINT1 0x02 +#define CH0BUSY 0x04 +#define CH1BUSY 0x08 + +// Legacy Control/Status Register+1 (15h) +#define EXBASEN 0x10 +#define BASE2LIN 0x20 +#define CENTR2LIN 0x40 +#define CB2LIN (BASE2LIN|CENTR2LIN) +#define CHB3D6C 0x80 + +// Legacy Control/Status Register+2 (16h) +#define DAC2SPDO 0x20 +#define SPDCOPYRHT 0x40 +#define ENSPDOUT 0x80 + +// Legacy Control/Status Register+3 (17h) +#define FMSEL 0x03 +#define VSBSEL 0x0c +#define VMPU 0x60 +#define NXCHG 0x80 + +// Miscellaneous Control Register (18h) +#define REAR2LIN 0x20 +#define MUTECH1 0x40 +#define ENCENTER 0x80 + +// Miscellaneous Control Register+1 (19h) +#define SELSPDIFI2 0x01 +#define SPDF_AC97 0x80 + +// Miscellaneous Control Register+2 (1ah) +#define AC3_EN 0x04 +#define FM_EN 0x08 +#define SPD32SEL 0x20 +#define XCHGDAC 0x40 +#define ENDBDAC 0x80 + +// Miscellaneous Control Register+3 (1bh) +#define SPDIFI48K 0x01 +#define SPDO5V 0x02 +#define N4SPK3D 0x04 +#define RESET 0x40 +#define PWD 0x80 +#define SPDIF48K (SPDIFI48K << 24 | SPDF_AC97 << 8) + +// Mixer1 (24h) +#define CDPLAY 0x01 +#define X3DEN 0x02 +#define REAR2FRONT 0x10 +#define SPK4 0x20 +#define WSMUTE 0x40 + +// Miscellaneous Register (27h) +#define SPDVALID 0x02 +#define CENTR2MIC 0x04 + +#define CM_CFMT_DACSHIFT 0 +#define CM_CFMT_ADCSHIFT 2 +#define CM_FREQ_DACSHIFT 2 +#define CM_FREQ_ADCSHIFT 5 +#define RSTDAC RST_CH0 +#define RSTADC RST_CH1 +#define ENDAC CHEN0 +#define ENADC CHEN1 +#define PAUSEDAC PAUSE0 +#define PAUSEADC PAUSE1 +#define CODEC_CMI_DAC_FRAME1 CODEC_CMI_CH0_FRAME1 +#define CODEC_CMI_DAC_FRAME2 CODEC_CMI_CH0_FRAME2 +#define CODEC_CMI_ADC_FRAME1 CODEC_CMI_CH1_FRAME1 +#define CODEC_CMI_ADC_FRAME2 CODEC_CMI_CH1_FRAME2 +#define DACINT CHINT0 +#define ADCINT CHINT1 +#define DACBUSY CH0BUSY +#define ADCBUSY CH1BUSY +#define ENDACINT CH0_INT_EN +#define ENADCINT CH1_INT_EN -#define CM_CH0_RESET 0x04 -#define CM_CH1_RESET 0x08 -#define CM_EXTENT_CODEC 0x100 -#define CM_EXTENT_MIDI 0x2 -#define CM_EXTENT_SYNTH 0x4 -#define CM_INT_CH0 1 -#define CM_INT_CH1 2 - -#define CM_CFMT_STEREO 0x01 -#define CM_CFMT_16BIT 0x02 -#define CM_CFMT_MASK 0x03 -#define CM_CFMT_DACSHIFT 2 -#define CM_CFMT_ADCSHIFT 0 - +static const unsigned sample_size[] = { 1, 2, 2, 4 }; static const unsigned sample_shift[] = { 0, 1, 1, 2 }; -#define CM_ENABLE_CH1 0x2 -#define CM_ENABLE_CH0 0x1 - /* MIDI buffer sizes **************************/ #define MIDIINBUF 256 @@ -213,11 +329,19 @@ #define NR_DEVICE 3 /* maximum number of devices */ -/*********************************************/ +static unsigned int devindex = 0; + +//*********************************************/ struct cm_state { - unsigned int magic; /* magic */ - struct cm_state *next; /* we keep cm cards in a linked list */ + /* magic */ + unsigned int magic; + + /* list of cmedia devices */ + struct list_head devs; + + /* the corresponding pci_dev structure */ + struct pci_dev *dev; int dev_audio; /* soundcore stuff */ int dev_mixer; @@ -243,7 +367,7 @@ struct dmabuf { void *rawbuf; - unsigned rawphys; + dma_addr_t dmaaddr; unsigned buforder; unsigned numfrag; unsigned fragshift; @@ -261,6 +385,7 @@ unsigned mapped:1; /* OSS stuff */ unsigned ready:1; unsigned endcleared:1; + unsigned enabled:1; unsigned ossfragshift; int ossmaxfrags; unsigned subdivision; @@ -275,7 +400,9 @@ unsigned char ibuf[MIDIINBUF]; unsigned char obuf[MIDIOUTBUF]; } midi; - + + struct gameport gameport; + int chip_version; int max_channels; int curr_channels; @@ -312,8 +439,7 @@ #define DO_SPDIF_IN 0x00000200 #define DO_SPDIF_LOOP 0x00000400 -static struct cm_state *devs; -static unsigned long wavetable_mem; +static LIST_HEAD(devs); /* --------------------------------------------------------------------- */ @@ -356,55 +482,55 @@ static void set_dmadac1(struct cm_state *s, unsigned int addr, unsigned int count) { if (addr) - outl(addr, s->iobase + CODEC_CMI_CH0_FRAME1); - outw(count - 1, s->iobase + CODEC_CMI_CH0_FRAME2); - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~1, 0); + outl(addr, s->iobase + CODEC_CMI_ADC_FRAME1); + outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~CHADC1, 0); } static void set_dmaadc(struct cm_state *s, unsigned int addr, unsigned int count) { - outl(addr, s->iobase + CODEC_CMI_CH0_FRAME1); - outw(count - 1, s->iobase + CODEC_CMI_CH0_FRAME2); - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, 1); + outl(addr, s->iobase + CODEC_CMI_ADC_FRAME1); + outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, CHADC1); } static void set_dmadac(struct cm_state *s, unsigned int addr, unsigned int count) { - outl(addr, s->iobase + CODEC_CMI_CH1_FRAME1); - outw(count - 1, s->iobase + CODEC_CMI_CH1_FRAME2); - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~2, 0); + outl(addr, s->iobase + CODEC_CMI_DAC_FRAME1); + outw(count - 1, s->iobase + CODEC_CMI_DAC_FRAME2); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~CHADC0, 0); if (s->status & DO_DUAL_DAC) set_dmadac1(s, 0, count); } static void set_countadc(struct cm_state *s, unsigned count) { - outw(count - 1, s->iobase + CODEC_CMI_CH0_FRAME2 + 2); + outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2 + 2); } static void set_countdac(struct cm_state *s, unsigned count) { - outw(count - 1, s->iobase + CODEC_CMI_CH1_FRAME2 + 2); + outw(count - 1, s->iobase + CODEC_CMI_DAC_FRAME2 + 2); if (s->status & DO_DUAL_DAC) set_countadc(s, count); } -static inline unsigned get_dmadac(struct cm_state *s) +static unsigned get_dmadac(struct cm_state *s) { unsigned int curr_addr; - curr_addr = inw(s->iobase + CODEC_CMI_CH1_FRAME2) + 1; + curr_addr = inw(s->iobase + CODEC_CMI_DAC_FRAME2) + 1; curr_addr <<= sample_shift[(s->fmt >> CM_CFMT_DACSHIFT) & CM_CFMT_MASK]; curr_addr = s->dma_dac.dmasize - curr_addr; return curr_addr; } -static inline unsigned get_dmaadc(struct cm_state *s) +static unsigned get_dmaadc(struct cm_state *s) { unsigned int curr_addr; - curr_addr = inw(s->iobase + CODEC_CMI_CH0_FRAME2) + 1; + curr_addr = inw(s->iobase + CODEC_CMI_ADC_FRAME2) + 1; curr_addr <<= sample_shift[(s->fmt >> CM_CFMT_ADCSHIFT) & CM_CFMT_MASK]; curr_addr = s->dma_adc.dmasize - curr_addr; @@ -413,23 +539,79 @@ static void wrmixer(struct cm_state *s, unsigned char idx, unsigned char data) { + unsigned char regval, pseudo; + + // pseudo register + if (idx == DSP_MIX_AUXVOL_L) { + data >>= 4; + data &= 0x0f; + regval = inb(s->iobase + CODEC_CMI_AUX_VOL) & ~0x0f; + outb(regval | data, s->iobase + CODEC_CMI_AUX_VOL); + return; + } + if (idx == DSP_MIX_AUXVOL_R) { + data &= 0xf0; + regval = inb(s->iobase + CODEC_CMI_AUX_VOL) & ~0xf0; + outb(regval | data, s->iobase + CODEC_CMI_AUX_VOL); + return; + } outb(idx, s->iobase + CODEC_SB16_ADDR); udelay(10); + // pseudo bits + if (idx == DSP_MIX_OUTMIXIDX) { + pseudo = data & ~0x1f; + pseudo >>= 1; + regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x30; + outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2); + } + if (idx == DSP_MIX_ADCMIXIDX_L) { + pseudo = data & 0x80; + pseudo >>= 1; + regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x40; + outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2); + } + if (idx == DSP_MIX_ADCMIXIDX_R) { + pseudo = data & 0x80; + regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x80; + outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2); + } outb(data, s->iobase + CODEC_SB16_DATA); udelay(10); } static unsigned char rdmixer(struct cm_state *s, unsigned char idx) { - unsigned char v; - unsigned long flags; + unsigned char v, pseudo; - spin_lock_irqsave(&s->lock, flags); + // pseudo register + if (idx == DSP_MIX_AUXVOL_L) { + v = inb(s->iobase + CODEC_CMI_AUX_VOL) & 0x0f; + v <<= 4; + return v; + } + if (idx == DSP_MIX_AUXVOL_L) { + v = inb(s->iobase + CODEC_CMI_AUX_VOL) & 0xf0; + return v; + } outb(idx, s->iobase + CODEC_SB16_ADDR); udelay(10); v = inb(s->iobase + CODEC_SB16_DATA); udelay(10); - spin_unlock_irqrestore(&s->lock, flags); + // pseudo bits + if (idx == DSP_MIX_OUTMIXIDX) { + pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x30; + pseudo <<= 1; + v |= pseudo; + } + if (idx == DSP_MIX_ADCMIXIDX_L) { + pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x40; + pseudo <<= 1; + v |= pseudo; + } + if (idx == DSP_MIX_ADCMIXIDX_R) { + pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x80; + v |= pseudo; + } return v; } @@ -479,24 +661,88 @@ { 48000, (44100 + 48000) / 2, 48000, 7 } }; +static void set_spdif_copyright(struct cm_state *s, int spdif_copyright) +{ + /* enable SPDIF-in Copyright */ + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~SPDCOPYRHT, spdif_copyright ? SPDCOPYRHT : 0); +} + +static void set_spdif_loop(struct cm_state *s, int spdif_loop) +{ + /* enable SPDIF loop */ + if (spdif_loop) { + s->status |= DO_SPDIF_LOOP; + /* turn on spdif-in to spdif-out */ + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, SPDFLOOP); + } else { + s->status &= ~DO_SPDIF_LOOP; + /* turn off spdif-in to spdif-out */ + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDFLOOP, 0); + } +} + +static void set_spdif_monitor(struct cm_state *s, int channel) +{ + // SPDO2DAC + maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDO2DAC, channel == 2 ? SPDO2DAC : 0); + // CDPLAY + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_MIXER1, ~CDPLAY, channel ? CDPLAY : 0); +} + +static void set_spdifout_level(struct cm_state *s, int level5v) +{ + /* SPDO5V */ + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~SPDO5V, level5v ? SPDO5V : 0); +} + +static void set_spdifin_inverse(struct cm_state *s, int spdif_inverse) +{ + if (spdif_inverse) { + /* turn on spdif-in inverse */ + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_CHFORMAT, ~0, INVSPDIFI); + else + maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 1); + } else { + /* turn off spdif-ininverse */ + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_CHFORMAT, ~INVSPDIFI, 0); + else + maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~1, 0); + } +} + +static void set_spdifin_channel2(struct cm_state *s, int channel2) +{ + /* SELSPDIFI2 */ + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 1, ~SELSPDIFI2, channel2 ? SELSPDIFI2 : 0); +} + +static void set_spdifin_valid(struct cm_state *s, int valid) +{ + /* SPDVALID */ + maskb(s->iobase + CODEC_CMI_MISC, ~SPDVALID, valid ? SPDVALID : 0); +} + static void set_spdifout_unlocked(struct cm_state *s, unsigned rate) { if (rate == 48000 || rate == 44100) { + // SPDF_0 + maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~0, SPDF_0); // SPDIFI48K SPDF_ACc97 - maskl(s->iobase + CODEC_CMI_MISC_CTRL, ~0x01008000, rate == 48000 ? 0x01008000 : 0); + maskl(s->iobase + CODEC_CMI_MISC_CTRL, ~SPDIF48K, rate == 48000 ? SPDIF48K : 0); // ENSPDOUT - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~0, 0x80); - // SPDF_1 SPD2DAC - maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x240); - // CDPLAY - if (s->chip_version >= 39) - maskb(s->iobase + CODEC_CMI_MIXER1, ~0, 1); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~0, ENSPDOUT); + // monitor SPDIF out + set_spdif_monitor(s, 2); s->status |= DO_SPDIF_OUT; } else { - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~0x80, 0); - maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~0x240, 0); - if (s->chip_version >= 39) - maskb(s->iobase + CODEC_CMI_MIXER1, ~1, 0); + maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDF_0, 0); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~ENSPDOUT, 0); + // monitor none + set_spdif_monitor(s, 0); s->status &= ~DO_SPDIF_OUT; } } @@ -510,7 +756,30 @@ spin_unlock_irqrestore(&s->lock, flags); } -/* find parity for bit 4~30 */ +static void set_spdifin_unlocked(struct cm_state *s, unsigned rate) +{ + if (rate == 48000 || rate == 44100) { + // SPDF_1 + maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~0, SPDF_1); + // SPDIFI48K SPDF_AC97 + maskl(s->iobase + CODEC_CMI_MISC_CTRL, ~SPDIF48K, rate == 48000 ? SPDIF48K : 0); + s->status |= DO_SPDIF_IN; + } else { + maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDF_1, 0); + s->status &= ~DO_SPDIF_IN; + } +} + +static void set_spdifin(struct cm_state *s, unsigned rate) +{ + unsigned long flags; + + spin_lock_irqsave(&s->lock, flags); + set_spdifin_unlocked(s,rate); + spin_unlock_irqrestore(&s->lock, flags); +} + +//* find parity for bit 4~30 */ static unsigned parity(unsigned data) { unsigned parity = 0; @@ -531,21 +800,26 @@ /* enable AC3 */ if (rate == 48000 || rate == 44100) { // mute DAC - maskb(s->iobase + CODEC_CMI_MIXER1, ~0, 0x40); - // AC3EN for 037, 0x10 - maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x10); + maskb(s->iobase + CODEC_CMI_MIXER1, ~0, WSMUTE); // AC3EN for 039, 0x04 - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 0x04); + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, AC3_EN); + // AC3EN for 037, 0x10 + else if (s->chip_version == 37) + maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x10); if (s->capability & CAN_AC3_HW) { - // SPD24SEL for 037, 0x02 // SPD24SEL for 039, 0x20, but cannot be set - maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x02); + if (s->chip_version >= 39) + maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, SPD24SEL); + // SPD24SEL for 037, 0x02 + else if (s->chip_version == 37) + maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x02); s->status |= DO_AC3_HW; if (s->chip_version >= 39) - maskb(s->iobase + CODEC_CMI_MIXER1, ~1, 0); + maskb(s->iobase + CODEC_CMI_MIXER1, ~CDPLAY, 0); } else { // SPD32SEL for 037 & 039, 0x20 - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 0x20); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, SPD32SEL); // set 176K sample rate to fix 033 HW bug if (s->chip_version == 33) { if (rate == 48000) @@ -556,18 +830,16 @@ s->status |= DO_AC3_SW; } } else { - maskb(s->iobase + CODEC_CMI_MIXER1, ~0x40, 0); + maskb(s->iobase + CODEC_CMI_MIXER1, ~WSMUTE, 0); maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0x32, 0); - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0x24, 0); - maskb(s->iobase + CODEC_CMI_CHFORMAT + 1, ~0x08, 0); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~(SPD32SEL|AC3_EN), 0); if (s->chip_version == 33) maskb(s->iobase + CODEC_CMI_CHFORMAT + 1, ~0x08, 0); if (s->chip_version >= 39) - maskb(s->iobase + CODEC_CMI_MIXER1, ~0, 1); + maskb(s->iobase + CODEC_CMI_MIXER1, ~0, CDPLAY); s->status &= ~DO_AC3; } s->spdif_counter = 0; - } static void set_ac3(struct cm_state *s, unsigned rate) @@ -629,9 +901,9 @@ } } s->rateadc = rate; - freq <<= 2; + freq <<= CM_FREQ_ADCSHIFT; - maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~0x1c, freq); + maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~ASFC, freq); } static void set_adc_rate(struct cm_state *s, unsigned rate) @@ -652,10 +924,10 @@ } } s->rateadc = rate; - freq <<= 2; + freq <<= CM_FREQ_ADCSHIFT; spin_lock_irqsave(&s->lock, flags); - maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~0x1c, freq); + maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~ASFC, freq); spin_unlock_irqrestore(&s->lock, flags); } @@ -677,46 +949,45 @@ } } s->ratedac = rate; - freq <<= 5; + freq <<= CM_FREQ_DACSHIFT; spin_lock_irqsave(&s->lock, flags); - maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~0xe0, freq); - + maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~DSFC, freq); + spin_unlock_irqrestore(&s->lock, flags); if (s->curr_channels <= 2) - set_spdifout_unlocked(s, rate); + set_spdifout(s, rate); if (s->status & DO_DUAL_DAC) - set_adc_rate_unlocked(s, rate); - - spin_unlock_irqrestore(&s->lock, flags); + set_adc_rate(s, rate); } /* --------------------------------------------------------------------- */ static inline void reset_adc(struct cm_state *s) { /* reset bus master */ - outb(s->enable | CM_CH0_RESET, s->iobase + CODEC_CMI_FUNCTRL0 + 2); + outb(s->enable | RSTADC, s->iobase + CODEC_CMI_FUNCTRL0 + 2); udelay(10); - outb(s->enable & ~CM_CH0_RESET, s->iobase + CODEC_CMI_FUNCTRL0 + 2); + outb(s->enable & ~RSTADC, s->iobase + CODEC_CMI_FUNCTRL0 + 2); } static inline void reset_dac(struct cm_state *s) { /* reset bus master */ - outb(s->enable | CM_CH1_RESET, s->iobase + CODEC_CMI_FUNCTRL0 + 2); - outb(s->enable & ~CM_CH1_RESET, s->iobase + CODEC_CMI_FUNCTRL0 + 2); + outb(s->enable | RSTDAC, s->iobase + CODEC_CMI_FUNCTRL0 + 2); + udelay(10); + outb(s->enable & ~RSTDAC, s->iobase + CODEC_CMI_FUNCTRL0 + 2); if (s->status & DO_DUAL_DAC) reset_adc(s); } static inline void pause_adc(struct cm_state *s) { - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, 4); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, PAUSEADC); } static inline void pause_dac(struct cm_state *s) { - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, 8); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, PAUSEDAC); if (s->status & DO_DUAL_DAC) pause_adc(s); } @@ -724,7 +995,7 @@ static inline void disable_adc(struct cm_state *s) { /* disable channel */ - s->enable &= ~CM_ENABLE_CH0; + s->enable &= ~ENADC; outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2); reset_adc(s); } @@ -732,7 +1003,7 @@ static inline void disable_dac(struct cm_state *s) { /* disable channel */ - s->enable &= ~CM_ENABLE_CH1; + s->enable &= ~ENDAC; outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2); reset_dac(s); if (s->status & DO_DUAL_DAC) @@ -741,22 +1012,22 @@ static inline void enable_adc(struct cm_state *s) { - if (!(s->enable & CM_ENABLE_CH0)) { + if (!(s->enable & ENADC)) { /* enable channel */ - s->enable |= CM_ENABLE_CH0; + s->enable |= ENADC; outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2); } - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~4, 0); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~PAUSEADC, 0); } static inline void enable_dac_unlocked(struct cm_state *s) { - if (!(s->enable & CM_ENABLE_CH1)) { + if (!(s->enable & ENDAC)) { /* enable channel */ - s->enable |= CM_ENABLE_CH1; + s->enable |= ENDAC; outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2); } - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~8, 0); + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~PAUSEDAC, 0); if (s->status & DO_DUAL_DAC) enable_adc(s); @@ -773,9 +1044,9 @@ static inline void stop_adc_unlocked(struct cm_state *s) { - if (s->enable & CM_ENABLE_CH0) { + if (s->enable & ENADC) { /* disable interrupt */ - maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~1, 0); + maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~ENADCINT, 0); disable_adc(s); } } @@ -792,9 +1063,9 @@ static inline void stop_dac_unlocked(struct cm_state *s) { - if (s->enable & CM_ENABLE_CH1) { + if (s->enable & ENDAC) { /* disable interrupt */ - maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~2, 0); + maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~ENDACINT, 0); disable_dac(s); } if (s->status & DO_DUAL_DAC) @@ -810,12 +1081,12 @@ spin_unlock_irqrestore(&s->lock, flags); } -static void start_adc_unlocked(struct cm_state *s) +static inline void start_adc_unlocked(struct cm_state *s) { if ((s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize)) && s->dma_adc.ready) { /* enable interrupt */ - maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, 1); + maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENADCINT); enable_adc(s); } } @@ -833,7 +1104,7 @@ { if ((s->dma_adc.mapped || s->dma_adc.count > 0) && s->dma_adc.ready) { /* enable interrupt */ -// maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, 1); + maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENADCINT); enable_dac_unlocked(s); } } @@ -842,11 +1113,11 @@ { if ((s->dma_dac.mapped || s->dma_dac.count > 0) && s->dma_dac.ready) { /* enable interrupt */ - maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, 2); + maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENDACINT); enable_dac_unlocked(s); } - if (s->status & DO_DUAL_DAC) - start_dac1_unlocked(s); + if (s->status & DO_DUAL_DAC) + start_dac1_unlocked(s); } static void start_dac(struct cm_state *s) @@ -870,13 +1141,13 @@ set_spdifout_unlocked(s, 0); if (s->capability & CAN_MULTI_CH_HW) { // NXCHG - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0, 0x80); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0, NXCHG); // CHB3D or CHB3D5C - maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~0xa0, channels > 4 ? 0x80 : 0x20); + maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~(CHB3D5C|CHB3D), channels > 4 ? CHB3D5C : CHB3D); // CHB3D6C - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0x80, channels == 6 ? 0x80 : 0); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CHB3D6C, channels == 6 ? CHB3D6C : 0); // ENCENTER - maskb(s->iobase + CODEC_CMI_MISC_CTRL, ~0x80, channels == 6 ? 0x80 : 0); + maskb(s->iobase + CODEC_CMI_MISC_CTRL, ~ENCENTER, channels == 6 ? ENCENTER : 0); s->status |= DO_MULTI_CH_HW; } else if (s->capability & CAN_DUAL_DAC) { unsigned char fmtm = ~0, fmts = 0; @@ -884,7 +1155,7 @@ // ENDBDAC, turn on double DAC mode // XCHGDAC, CH0 -> back, CH1->front - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 0xC0); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, ENDBDAC|XCHGDAC); s->status |= DO_DUAL_DAC; // prepare secondary buffer @@ -907,20 +1178,21 @@ } + // N4SPK3D, disable 4 speaker mode (analog duplicate) if (s->speakers > 2) - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~0x04, 0); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~N4SPK3D, 0); s->curr_channels = channels; } else { if (s->status & DO_MULTI_CH_HW) { - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0x80, 0); - maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~0xa0, 0); - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0x80, 0); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~NXCHG, 0); + maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~(CHB3D5C|CHB3D), 0); + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CHB3D6C, 0); } else if (s->status & DO_DUAL_DAC) { - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0x80, 0); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~ENDBDAC, 0); } // N4SPK3D, enable 4 speaker mode (analog duplicate) if (s->speakers > 2) - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~0, 0x04); + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~0, N4SPK3D); s->status &= ~DO_MULTI_CH; s->curr_channels = s->fmt & (CM_CFMT_STEREO << CM_CFMT_DACSHIFT) ? 2 : 1; } @@ -934,7 +1206,7 @@ #define DMABUF_DEFAULTORDER (16-PAGE_SHIFT) #define DMABUF_MINORDER 1 -static void dealloc_dmabuf(struct dmabuf *db) +static void dealloc_dmabuf(struct cm_state *s, struct dmabuf *db) { struct page *pstart, *pend; @@ -943,7 +1215,7 @@ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++) mem_map_unreserve(pstart); - free_pages((unsigned long)db->rawbuf, db->buforder); + pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr); } db->rawbuf = NULL; db->mapped = db->ready = 0; @@ -976,18 +1248,11 @@ if (!db->rawbuf) { db->ready = db->mapped = 0; for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) - if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order))) + if ((db->rawbuf = pci_alloc_consistent(s->dev, PAGE_SIZE << order, &db->dmaaddr))) break; - if (!db->rawbuf) + if (!db->rawbuf || !db->dmaaddr) return -ENOMEM; db->buforder = order; - db->rawphys = virt_to_bus(db->rawbuf); - if ((db->rawphys ^ (db->rawphys + (PAGE_SIZE << db->buforder) - 1)) & ~0xffff) - printk(KERN_DEBUG "cmpci: DMA buffer crosses 64k boundary: busaddr 0x%lx size %ld\n", - (long) db->rawphys, PAGE_SIZE << db->buforder); - if ((db->rawphys + (PAGE_SIZE << db->buforder) - 1) & ~0xffffff) - printk(KERN_DEBUG "cmpci: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n", - (long) db->rawphys, PAGE_SIZE << db->buforder); /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1); for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++) @@ -1021,17 +1286,18 @@ spin_lock_irqsave(&s->lock, flags); if (rec) { if (s->status & DO_DUAL_DAC) - set_dmadac1(s, db->rawphys, db->dmasize >> sample_shift[fmt]); + set_dmadac1(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]); else - set_dmaadc(s, db->rawphys, db->dmasize >> sample_shift[fmt]); + set_dmaadc(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]); /* program sample counts */ set_countdac(s, db->fragsamples); } else { - set_dmadac(s, db->rawphys, db->dmasize >> sample_shift[fmt]); + set_dmadac(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]); /* program sample counts */ set_countdac(s, db->fragsamples); } spin_unlock_irqrestore(&s->lock, flags); + db->enabled = 1; db->ready = 1; return 0; } @@ -1084,7 +1350,7 @@ clear_advance(s); s->dma_adc.endcleared = 1; } - if (s->dma_dac.count + (signed)s->dma_dac.fragsize <= (signed)s->dma_dac.dmasize) + if (s->dma_adc.count + (signed)s->dma_adc.fragsize <= (signed)s->dma_adc.dmasize) wake_up(&s->dma_adc.wait); } } else { @@ -1173,10 +1439,10 @@ spin_lock(&s->lock); intstat = inb(s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* acknowledge interrupt */ - if (intsrc & CM_INT_CH0) - mask |= 1; - if (intsrc & CM_INT_CH1) - mask |= 2; + if (intsrc & ADCINT) + mask |= ENADCINT; + if (intsrc & DACINT) + mask |= ENDACINT; outb(intstat & ~mask, s->iobase + CODEC_CMI_INT_HLDCLR + 2); outb(intstat | mask, s->iobase + CODEC_CMI_INT_HLDCLR + 2); cm_update_ptr(s); @@ -1237,7 +1503,8 @@ [SOUND_MIXER_SYNTH] = { DSP_MIX_FMVOLIDX_L, DSP_MIX_FMVOLIDX_R, MT_5MUTE, 0x40, 0x00 }, [SOUND_MIXER_VOLUME] = { DSP_MIX_MASTERVOLIDX_L, DSP_MIX_MASTERVOLIDX_R, MT_5MUTE, 0x00, 0x00 }, [SOUND_MIXER_PCM] = { DSP_MIX_VOICEVOLIDX_L, DSP_MIX_VOICEVOLIDX_R, MT_5MUTE, 0x00, 0x00 }, - [SOUND_MIXER_SPEAKER]= { DSP_MIX_SPKRVOLIDX, DSP_MIX_SPKRVOLIDX, MT_5MUTEMONO, 0x01, 0x01 } + [SOUND_MIXER_LINE1] = { DSP_MIX_AUXVOL_L, DSP_MIX_AUXVOL_R, MT_5MUTE, 0x80, 0x20 }, + [SOUND_MIXER_SPEAKER]= { DSP_MIX_SPKRVOLIDX, DSP_MIX_SPKRVOLIDX, MT_5MUTEMONO, 0x00, 0x01 } }; static const unsigned char volidx[SOUND_MIXER_NRDEVICES] = @@ -1248,15 +1515,32 @@ [SOUND_MIXER_SYNTH] = 4, [SOUND_MIXER_VOLUME] = 5, [SOUND_MIXER_PCM] = 6, - [SOUND_MIXER_SPEAKER]= 7 + [SOUND_MIXER_LINE1] = 7, + [SOUND_MIXER_SPEAKER]= 8 }; +static unsigned mixer_outmask(struct cm_state *s) +{ + unsigned long flags; + int i, j, k; + + spin_lock_irqsave(&s->lock, flags); + j = rdmixer(s, DSP_MIX_OUTMIXIDX); + spin_unlock_irqrestore(&s->lock, flags); + for (k = i = 0; i < SOUND_MIXER_NRDEVICES; i++) + if (j & mixtable[i].play) + k |= 1 << i; + return k; +} + static unsigned mixer_recmask(struct cm_state *s) { + unsigned long flags; int i, j, k; + spin_lock_irqsave(&s->lock, flags); j = rdmixer(s, DSP_MIX_ADCMIXIDX_L); - j &= 0x7f; + spin_unlock_irqrestore(&s->lock, flags); for (k = i = 0; i < SOUND_MIXER_NRDEVICES; i++) if (j & mixtable[i].rec) k |= 1 << i; @@ -1289,15 +1573,17 @@ } if (cmd == OSS_GETVERSION) return put_user(SOUND_VERSION, (int *)arg); - if (_IOC_TYPE(cmd) != 'M' || _IOC_SIZE(cmd) != sizeof(int)) + if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int)) return -EINVAL; - if (_IOC_DIR(cmd) == _IOC_READ) { + if (_SIOC_DIR(cmd) == _SIOC_READ) { switch (_IOC_NR(cmd)) { case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */ - return put_user(mixer_recmask(s), (int *)arg); + val = mixer_recmask(s); + return put_user(val, (int *)arg); case SOUND_MIXER_OUTSRC: /* Arg contains a bit for each recording source */ - return put_user(mixer_recmask(s), (int *)arg);//need fix + val = mixer_outmask(s); + return put_user(val, (int *)arg); case SOUND_MIXER_DEVMASK: /* Arg contains a bit for each supported device */ for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++) @@ -1335,7 +1621,7 @@ return put_user(s->mix.vol[volidx[i]-1], (int *)arg); } } - if (_IOC_DIR(cmd) != (_IOC_READ|_IOC_WRITE)) + if (_SIOC_DIR(cmd) != (_SIOC_READ|_SIOC_WRITE)) return -EINVAL; s->mix.modcnt++; switch (_IOC_NR(cmd)) { @@ -1354,7 +1640,7 @@ } spin_lock_irqsave(&s->lock, flags); wrmixer(s, DSP_MIX_ADCMIXIDX_L, j); - wrmixer(s, DSP_MIX_ADCMIXIDX_R, (j & 1) | (j>>1)); + wrmixer(s, DSP_MIX_ADCMIXIDX_R, (j & 1) | (j>>1) | (j & 0x80)); spin_unlock_irqrestore(&s->lock, flags); return 0; @@ -1371,11 +1657,11 @@ j |= mixtable[i].play; } spin_lock_irqsave(&s->lock, flags); - frobindir(s, DSP_MIX_OUTMIXIDX, 0x1f, j); + wrmixer(s, DSP_MIX_OUTMIXIDX, j); spin_unlock_irqrestore(&s->lock, flags); return 0; - default: + default: i = _IOC_NR(cmd); if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].type) return -EINVAL; @@ -1402,15 +1688,27 @@ rl = (l < 4 ? 0 : (l - 5) / 3) & 31; rr = (rl >> 2) & 7; wrmixer(s, mixtable[i].left, rl<<3); - maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1); + if (i == SOUND_MIXER_MIC) + maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1); break; case MT_5MUTEMONO: - r = l; rl = l < 4 ? 0 : (l - 5) / 3; - rr = rl >> 2; wrmixer(s, mixtable[i].left, rl<<3); - maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1); + l = rdmixer(s, DSP_MIX_OUTMIXIDX) & ~mixtable[i].play; + r = rl ? mixtable[i].play : 0; + wrmixer(s, DSP_MIX_OUTMIXIDX, l | r); + /* for recording */ + if (i == SOUND_MIXER_MIC) { + if (s->chip_version >= 37) { + rr = rl >> 1; + maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, (rr&0x07)<<1); + frobindir(s, DSP_MIX_EXTENSION, ~0x01, rr>>3); + } else { + rr = rl >> 2; + maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1); + } + } break; case MT_5MUTE: @@ -1418,6 +1716,10 @@ rr = r < 4 ? 0 : (r - 5) / 3; wrmixer(s, mixtable[i].left, rl<<3); wrmixer(s, mixtable[i].right, rr<<3); + l = rdmixer(s, DSP_MIX_OUTMIXIDX); + l &= ~mixtable[i].play; + r = (rl|rr) ? mixtable[i].play : 0; + wrmixer(s, DSP_MIX_OUTMIXIDX, l | r); break; case MT_6MUTE: @@ -1447,12 +1749,16 @@ static int cm_open_mixdev(struct inode *inode, struct file *file) { int minor = MINOR(inode->i_rdev); - struct cm_state *s = devs; + struct list_head *list; + struct cm_state *s; - while (s && s->dev_mixer != minor) - s = s->next; - if (!s) - return -ENODEV; + for (list = devs.next; ; list = list->next) { + if (list == &devs) + return -ENODEV; + s = list_entry(list, struct cm_state, devs); + if (s->dev_mixer == minor) + break; + } VALIDATE_STATE(s); file->private_data = s; return 0; @@ -1490,9 +1796,9 @@ if (s->dma_dac.mapped || !s->dma_dac.ready) return 0; - set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&s->dma_dac.wait, &wait); for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&s->lock, flags); count = s->dma_dac.count; spin_unlock_irqrestore(&s->lock, flags); @@ -1522,6 +1828,7 @@ static ssize_t cm_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { struct cm_state *s = (struct cm_state *)file->private_data; + DECLARE_WAITQUEUE(wait, current); ssize_t ret; unsigned long flags; unsigned swptr; @@ -1538,37 +1845,50 @@ return -EFAULT; ret = 0; + add_wait_queue(&s->dma_adc.wait, &wait); while (count > 0) { spin_lock_irqsave(&s->lock, flags); swptr = s->dma_adc.swptr; cnt = s->dma_adc.dmasize-swptr; if (s->dma_adc.count < cnt) cnt = s->dma_adc.count; + if (cnt <= 0) + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&s->lock, flags); if (cnt > count) cnt = count; if (cnt <= 0) { - start_adc(s); - if (file->f_flags & O_NONBLOCK) - return ret ? ret : -EAGAIN; - if (!interruptible_sleep_on_timeout(&s->dma_adc.wait, HZ)) { + if (s->dma_adc.enabled) + start_adc(s); + if (file->f_flags & O_NONBLOCK) { + if (!ret) + ret = -EAGAIN; + goto out; + } + if (!schedule_timeout(HZ)) { printk(KERN_DEBUG "cmpci: read: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n", s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count, s->dma_adc.hwptr, s->dma_adc.swptr); spin_lock_irqsave(&s->lock, flags); stop_adc_unlocked(s); - set_dmaadc(s, s->dma_adc.rawphys, s->dma_adc.dmasamples); + set_dmaadc(s, s->dma_adc.dmaaddr, s->dma_adc.dmasamples); /* program sample counts */ set_countadc(s, s->dma_adc.fragsamples); s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0; spin_unlock_irqrestore(&s->lock, flags); } - if (signal_pending(current)) - return ret ? ret : -ERESTARTSYS; + if (signal_pending(current)) { + if (!ret) + ret = -ERESTARTSYS; + goto out; + } continue; } - if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) - return ret ? ret : -EFAULT; + if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) { + if (!ret) + ret = -EFAULT; + goto out; + } swptr = (swptr + cnt) % s->dma_adc.dmasize; spin_lock_irqsave(&s->lock, flags); s->dma_adc.swptr = swptr; @@ -1576,15 +1896,20 @@ count -= cnt; buffer += cnt; ret += cnt; - start_adc_unlocked(s); + if (s->dma_adc.enabled) + start_adc_unlocked(s); spin_unlock_irqrestore(&s->lock, flags); } +out: + remove_wait_queue(&s->dma_adc.wait, &wait); + set_current_state(TASK_RUNNING); return ret; } static ssize_t cm_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { struct cm_state *s = (struct cm_state *)file->private_data; + DECLARE_WAITQUEUE(wait, current); ssize_t ret; unsigned long flags; unsigned swptr; @@ -1609,6 +1934,7 @@ return -EFAULT; ret = 0; + add_wait_queue(&s->dma_dac.wait, &wait); while (count > 0) { spin_lock_irqsave(&s->lock, flags); if (s->dma_dac.count < 0) { @@ -1629,33 +1955,42 @@ if (s->dma_dac.count + cnt > s->dma_dac.dmasize) cnt = s->dma_dac.dmasize - s->dma_dac.count; } + if (cnt <= 0) + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&s->lock, flags); if (cnt > count) cnt = count; if ((s->status & DO_DUAL_DAC) && (cnt > count / 2)) cnt = count / 2; if (cnt <= 0) { - start_dac(s); - if (file->f_flags & O_NONBLOCK) - return ret ? ret : -EAGAIN; - if (!interruptible_sleep_on_timeout(&s->dma_dac.wait, HZ)) { + if (s->dma_dac.enabled) + start_dac(s); + if (file->f_flags & O_NONBLOCK) { + if (!ret) + ret = -EAGAIN; + goto out; + } + if (!schedule_timeout(HZ)) { printk(KERN_DEBUG "cmpci: write: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n", s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count, s->dma_dac.hwptr, s->dma_dac.swptr); spin_lock_irqsave(&s->lock, flags); stop_dac_unlocked(s); - set_dmadac(s, s->dma_dac.rawphys, s->dma_dac.dmasamples); + set_dmadac(s, s->dma_dac.dmaaddr, s->dma_dac.dmasamples); /* program sample counts */ set_countdac(s, s->dma_dac.fragsamples); s->dma_dac.count = s->dma_dac.hwptr = s->dma_dac.swptr = 0; if (s->status & DO_DUAL_DAC) { - set_dmadac1(s, s->dma_adc.rawphys, s->dma_adc.dmasamples); + set_dmadac1(s, s->dma_adc.dmaaddr, s->dma_adc.dmasamples); s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0; } spin_unlock_irqrestore(&s->lock, flags); } - if (signal_pending(current)) - return ret ? ret : -ERESTARTSYS; + if (signal_pending(current)) { + if (!ret) + ret = -ERESTARTSYS; + goto out; + } continue; } if (s->status & DO_AC3_SW) { @@ -1664,8 +1999,10 @@ // clip exceeded data, caught by 033 and 037 if (swptr + 2 * cnt > s->dma_dac.dmasize) cnt = (s->dma_dac.dmasize - swptr) / 2; - if ((err = trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt))) - return err; + if ((err = trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt))) { + ret = err; + goto out; + } swptr = (swptr + 2 * cnt) % s->dma_dac.dmasize; } else if (s->status & DO_DUAL_DAC) { int i, err; @@ -1676,15 +2013,22 @@ dst1 = (unsigned long *) (s->dma_adc.rawbuf + swptr); // copy left/right sample at one time for (i = 0; i <= cnt / 4; i++) { - if ((err = __get_user(*dst0++, src++))) - return err; - if ((err = __get_user(*dst1++, src++))) - return err; + if ((err = __get_user(*dst0++, src++))) { + ret = err; + goto out; + } + if ((err = __get_user(*dst1++, src++))) { + ret = err; + goto out; + } } swptr = (swptr + cnt) % s->dma_dac.dmasize; } else { - if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) - return ret ? ret : -EFAULT; + if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) { + if (!ret) + ret = -EFAULT; + goto out; + } swptr = (swptr + cnt) % s->dma_dac.dmasize; } spin_lock_irqsave(&s->lock, flags); @@ -1702,8 +2046,12 @@ buffer += cnt; ret += cnt; } - start_dac(s); + if (s->dma_dac.enabled) + start_dac(s); } +out: + remove_wait_queue(&s->dma_dac.wait, &wait); + set_current_state(TASK_RUNNING); return ret; } @@ -1714,10 +2062,16 @@ unsigned int mask = 0; VALIDATE_STATE(s); - if (file->f_mode & FMODE_WRITE) + if (file->f_mode & FMODE_WRITE) { + if (!s->dma_dac.ready && prog_dmabuf(s, 0)) + return 0; poll_wait(file, &s->dma_dac.wait, wait); - if (file->f_mode & FMODE_READ) + } + if (file->f_mode & FMODE_READ) { + if (!s->dma_adc.ready && prog_dmabuf(s, 1)) + return 0; poll_wait(file, &s->dma_adc.wait, wait); + } spin_lock_irqsave(&s->lock, flags); cm_update_ptr(s); if (file->f_mode & FMODE_READ) { @@ -1772,6 +2126,16 @@ return ret; } +#define SNDCTL_SPDIF_COPYRIGHT _SIOW('S', 0, int) // set/reset S/PDIF copy protection +#define SNDCTL_SPDIF_LOOP _SIOW('S', 1, int) // set/reset S/PDIF loop +#define SNDCTL_SPDIF_MONITOR _SIOW('S', 2, int) // set S/PDIF monitor +#define SNDCTL_SPDIF_LEVEL _SIOW('S', 3, int) // set/reset S/PDIF out level +#define SNDCTL_SPDIF_INV _SIOW('S', 4, int) // set/reset S/PDIF in inverse +#define SNDCTL_SPDIF_SEL2 _SIOW('S', 5, int) // set S/PDIF in #2 +#define SNDCTL_SPDIF_VALID _SIOW('S', 6, int) // set S/PDIF valid +#define SNDCTL_SPDIFOUT _SIOW('S', 7, int) // set S/PDIF out +#define SNDCTL_SPDIFIN _SIOW('S', 8, int) // set S/PDIF out + static int cm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct cm_state *s = (struct cm_state *)file->private_data; @@ -1955,14 +2319,14 @@ val = 0; if (s->status & DO_DUAL_DAC) { if (file->f_mode & FMODE_WRITE && - (s->enable & CM_ENABLE_CH1) && - (s->enable & CM_ENABLE_CH0)) + (s->enable & ENDAC) && + (s->enable & ENADC)) val |= PCM_ENABLE_OUTPUT; return put_user(val, (int *)arg); } - if (file->f_mode & FMODE_READ && s->enable & CM_ENABLE_CH0) + if (file->f_mode & FMODE_READ && s->enable & ENADC) val |= PCM_ENABLE_INPUT; - if (file->f_mode & FMODE_WRITE && s->enable & CM_ENABLE_CH1) + if (file->f_mode & FMODE_WRITE && s->enable & ENDAC) val |= PCM_ENABLE_OUTPUT; return put_user(val, (int *)arg); @@ -1973,9 +2337,12 @@ if (val & PCM_ENABLE_INPUT) { if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1))) return ret; + s->dma_adc.enabled = 1; start_adc(s); - } else + } else { + s->dma_adc.enabled = 0; stop_adc(s); + } } if (file->f_mode & FMODE_WRITE) { if (val & PCM_ENABLE_OUTPUT) { @@ -1985,16 +2352,19 @@ if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1))) return ret; } + s->dma_dac.enabled = 1; start_dac(s); - } else + } else { + s->dma_dac.enabled = 0; stop_dac(s); + } } return 0; case SNDCTL_DSP_GETOSPACE: if (!(file->f_mode & FMODE_WRITE)) return -EINVAL; - if (!(s->enable & CM_ENABLE_CH1) && (val = prog_dmabuf(s, 0)) != 0) + if (!(s->enable & ENDAC) && (val = prog_dmabuf(s, 0)) != 0) return val; spin_lock_irqsave(&s->lock, flags); cm_update_ptr(s); @@ -2008,7 +2378,7 @@ case SNDCTL_DSP_GETISPACE: if (!(file->f_mode & FMODE_READ)) return -EINVAL; - if (!(s->enable & CM_ENABLE_CH0) && (val = prog_dmabuf(s, 1)) != 0) + if (!(s->enable & ENADC) && (val = prog_dmabuf(s, 1)) != 0) return val; spin_lock_irqsave(&s->lock, flags); cm_update_ptr(s); @@ -2155,6 +2525,11 @@ if (file->f_mode & FMODE_READ) { stop_adc(s); s->dma_adc.ready = 0; + if (val & DSP_BIND_SPDIF) { + set_spdifin(s, s->rateadc); + if (!(s->status & DO_SPDIF_OUT)) + val &= ~DSP_BIND_SPDIF; + } } if (file->f_mode & FMODE_WRITE) { stop_dac(s); @@ -2194,7 +2569,51 @@ case SNDCTL_DSP_MAPOUTBUF: case SNDCTL_DSP_SETSYNCRO: return -EINVAL; - + case SNDCTL_SPDIF_COPYRIGHT: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdif_copyright(s, val); + return 0; + case SNDCTL_SPDIF_LOOP: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdif_loop(s, val); + return 0; + case SNDCTL_SPDIF_MONITOR: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdif_monitor(s, val); + return 0; + case SNDCTL_SPDIF_LEVEL: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifout_level(s, val); + return 0; + case SNDCTL_SPDIF_INV: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifin_inverse(s, val); + return 0; + case SNDCTL_SPDIF_SEL2: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifin_channel2(s, val); + return 0; + case SNDCTL_SPDIF_VALID: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifin_valid(s, val); + return 0; + case SNDCTL_SPDIFOUT: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifout(s, val ? s->ratedac : 0); + return 0; + case SNDCTL_SPDIFIN: + if (get_user(val, (int *)arg)) + return -EFAULT; + set_spdifin(s, val ? s->rateadc : 0); + return 0; } return mixer_ioctl(s, cmd, arg); } @@ -2202,13 +2621,18 @@ static int cm_open(struct inode *inode, struct file *file) { int minor = MINOR(inode->i_rdev); - struct cm_state *s = devs; + DECLARE_WAITQUEUE(wait, current); unsigned char fmtm = ~0, fmts = 0; + struct list_head *list; + struct cm_state *s; - while (s && ((s->dev_audio ^ minor) & ~0xf)) - s = s->next; - if (!s) - return -ENODEV; + for (list = devs.next; ; list = list->next) { + if (list == &devs) + return -ENODEV; + s = list_entry(list, struct cm_state, devs); + if (!((s->dev_audio ^ minor) & ~0xf)) + break; + } VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ @@ -2218,8 +2642,12 @@ up(&s->open_sem); return -EBUSY; } + add_wait_queue(&s->open_wait, &wait); + __set_current_state(TASK_INTERRUPTIBLE); up(&s->open_sem); - interruptible_sleep_on(&s->open_wait); + schedule(); + remove_wait_queue(&s->open_wait, &wait); + set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; down(&s->open_sem); @@ -2229,17 +2657,22 @@ if ((minor & 0xf) == SND_DEV_DSP16) fmts |= CM_CFMT_16BIT << CM_CFMT_ADCSHIFT; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; + s->dma_adc.enabled = 1; set_adc_rate(s, 8000); + // spdif-in is turnned off by default + set_spdifin(s, 0); } if (file->f_mode & FMODE_WRITE) { fmtm &= ~((CM_CFMT_STEREO | CM_CFMT_16BIT) << CM_CFMT_DACSHIFT); if ((minor & 0xf) == SND_DEV_DSP16) fmts |= CM_CFMT_16BIT << CM_CFMT_DACSHIFT; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; + s->dma_dac.enabled = 1; set_dac_rate(s, 8000); // clear previous multichannel, spdif, ac3 state set_spdifout(s, 0); - if (s->deviceid == PCI_DEVICE_ID_CMEDIA_CM8738) { + if (s->deviceid == PCI_DEVICE_ID_CMEDIA_CM8738 + || s->deviceid == PCI_DEVICE_ID_CMEDIA_CM8738B) { set_ac3(s, 0); set_dac_channels(s, 1); } @@ -2262,9 +2695,9 @@ if (file->f_mode & FMODE_WRITE) { stop_dac(s); - dealloc_dmabuf(&s->dma_dac); + dealloc_dmabuf(s, &s->dma_dac); if (s->status & DO_DUAL_DAC) - dealloc_dmabuf(&s->dma_adc); + dealloc_dmabuf(s, &s->dma_adc); if (s->status & DO_MULTI_CH) set_dac_channels(s, 0); @@ -2275,7 +2708,7 @@ } if (file->f_mode & FMODE_READ) { stop_adc(s); - dealloc_dmabuf(&s->dma_adc); + dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); up(&s->open_sem); @@ -2321,6 +2754,8 @@ cnt = MIDIINBUF - ptr; if (s->midi.icnt < cnt) cnt = s->midi.icnt; + if (cnt <= 0) + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&s->lock, flags); if (cnt > count) cnt = count; @@ -2331,7 +2766,6 @@ ret = -EAGAIN; break; } - __set_current_state(TASK_INTERRUPTIBLE); schedule(); if (signal_pending(current)) { @@ -2386,8 +2820,10 @@ cnt = MIDIOUTBUF - ptr; if (s->midi.ocnt + cnt > MIDIOUTBUF) cnt = MIDIOUTBUF - s->midi.ocnt; - if (cnt <= 0) + if (cnt <= 0) { + __set_current_state(TASK_INTERRUPTIBLE); cm_handle_midi(s); + } spin_unlock_irqrestore(&s->lock, flags); if (cnt > count) cnt = count; @@ -2398,7 +2834,6 @@ ret = -EAGAIN; break; } - __set_current_state(TASK_INTERRUPTIBLE); schedule(); if (signal_pending(current)) { if (!ret) @@ -2457,13 +2892,18 @@ static int cm_midi_open(struct inode *inode, struct file *file) { int minor = MINOR(inode->i_rdev); - struct cm_state *s = devs; + DECLARE_WAITQUEUE(wait, current); unsigned long flags; + struct list_head *list; + struct cm_state *s; - while (s && s->dev_midi != minor) - s = s->next; - if (!s) - return -ENODEV; + for (list = devs.next; ; list = list->next) { + if (list == &devs) + return -ENODEV; + s = list_entry(list, struct cm_state, devs); + if (s->dev_midi == minor) + break; + } VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ @@ -2473,8 +2913,12 @@ up(&s->open_sem); return -EBUSY; } + add_wait_queue(&s->open_wait, &wait); + __set_current_state(TASK_INTERRUPTIBLE); up(&s->open_sem); - interruptible_sleep_on(&s->open_wait); + schedule(); + remove_wait_queue(&s->open_wait, &wait); + set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; down(&s->open_sem); @@ -2484,7 +2928,7 @@ s->midi.ird = s->midi.iwr = s->midi.icnt = 0; s->midi.ord = s->midi.owr = s->midi.ocnt = 0; /* enable MPU-401 */ - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 4); + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, UART_EN); outb(0xff, s->iomidi+1); /* reset command */ if (!(inb(s->iomidi+1) & 0x80)) inb(s->iomidi); @@ -2507,7 +2951,6 @@ spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); up(&s->open_sem); - MOD_INC_USE_COUNT; return 0; } @@ -2522,9 +2965,9 @@ lock_kernel(); if (file->f_mode & FMODE_WRITE) { - __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&s->midi.owait, &wait); for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&s->lock, flags); count = s->midi.ocnt; spin_unlock_irqrestore(&s->lock, flags); @@ -2532,12 +2975,8 @@ break; if (signal_pending(current)) break; - if (file->f_flags & O_NONBLOCK) { - remove_wait_queue(&s->midi.owait, &wait); - set_current_state(TASK_RUNNING); - unlock_kernel(); - return -EBUSY; - } + if (file->f_flags & O_NONBLOCK) + break; tmo = (count * HZ) / 3100; if (!schedule_timeout(tmo ? : 1) && tmo) printk(KERN_DEBUG "cmpci: midi timed out??\n"); @@ -2554,7 +2993,7 @@ if (!(inb(s->iomidi+1) & 0x80)) inb(s->iomidi); /* disable MPU-401 */ - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~4, 0); + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~UART_EN, 0); } spin_unlock_irqrestore(&s->lock, flags); up(&s->open_sem); @@ -2675,12 +3114,17 @@ static int cm_dmfm_open(struct inode *inode, struct file *file) { int minor = MINOR(inode->i_rdev); - struct cm_state *s = devs; + DECLARE_WAITQUEUE(wait, current); + struct list_head *list; + struct cm_state *s; - while (s && s->dev_dmfm != minor) - s = s->next; - if (!s) - return -ENODEV; + for (list = devs.next; ; list = list->next) { + if (list == &devs) + return -ENODEV; + s = list_entry(list, struct cm_state, devs); + if (s->dev_dmfm == minor) + break; + } VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ @@ -2690,8 +3134,12 @@ up(&s->open_sem); return -EBUSY; } + add_wait_queue(&s->open_wait, &wait); + __set_current_state(TASK_INTERRUPTIBLE); up(&s->open_sem); - interruptible_sleep_on(&s->open_wait); + schedule(); + remove_wait_queue(&s->open_wait, &wait); + set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; down(&s->open_sem); @@ -2705,7 +3153,6 @@ outb(1, s->iosynth+3); /* enable OPL3 */ s->open_mode |= FMODE_DMFM; up(&s->open_sem); - MOD_INC_USE_COUNT; return 0; } @@ -2836,11 +3283,17 @@ #else static int use_line_as_bass; #endif +#ifdef CONFIG_SOUND_CMPCI_MIC_BASS +static int use_mic_as_bass = 1; +#else +static int use_mic_as_bass = 0; +#endif #ifdef CONFIG_SOUND_CMPCI_JOYSTICK static int joystick = 1; #else static int joystick; #endif +static int mic_boost = 0; MODULE_PARM(mpuio, "i"); MODULE_PARM(fmio, "i"); MODULE_PARM(spdif_inverse, "i"); @@ -2848,7 +3301,9 @@ MODULE_PARM(speakers, "i"); MODULE_PARM(use_line_as_rear, "i"); MODULE_PARM(use_line_as_bass, "i"); +MODULE_PARM(use_mic_as_bass, "i"); MODULE_PARM(joystick, "i"); +MODULE_PARM(mic_boost, "i"); MODULE_PARM_DESC(mpuio, "(0x330, 0x320, 0x310, 0x300) Base of MPU-401, 0 to disable"); MODULE_PARM_DESC(fmio, "(0x388, 0x3C8, 0x3E0) Base of OPL3, 0 to disable"); MODULE_PARM_DESC(spdif_inverse, "(1/0) Invert S/PDIF-in signal"); @@ -2856,24 +3311,15 @@ MODULE_PARM_DESC(speakers, "(2-6) Number of speakers you connect"); MODULE_PARM_DESC(use_line_as_rear, "(1/0) Use line-in jack as rear-out"); MODULE_PARM_DESC(use_line_as_bass, "(1/0) Use line-in jack as bass/center"); +MODULE_PARM_DESC(use_mic_as_bass, "(1/0) Use mic-in jack as bass/center"); MODULE_PARM_DESC(joystick, "(1/0) Enable joystick interface, still need joystick driver"); +MODULE_PARM_DESC(mic_boost, "(1/0) Enable microphone boost"); -static struct pci_device_id cmpci_pci_tbl[] = { - { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0 } -}; -MODULE_DEVICE_TABLE(pci, cmpci_pci_tbl); - -void initialize_chip(struct pci_dev *pcidev) +static int __devinit cm_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid) { struct cm_state *s; mm_segment_t fs; - int i, val; + int i, val, ret; unsigned char reg_mask = 0; struct { unsigned short deviceid; @@ -2886,277 +3332,282 @@ { PCI_DEVICE_ID_CMEDIA_CM8738B, "CM8738B" }, }; char *devicename = "unknown"; - { - if (pci_enable_device(pcidev)) - return; - if (pcidev->irq == 0) - return; - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) { - printk(KERN_WARNING "cmpci: out of memory\n"); - return; - } - /* search device name */ - for (i = 0; i < sizeof(devicetable) / sizeof(devicetable[0]); i++) { - if (devicetable[i].deviceid == pcidev->device) - { - devicename = devicetable[i].devicename; - break; - } - } - memset(s, 0, sizeof(struct cm_state)); - init_waitqueue_head(&s->dma_adc.wait); - init_waitqueue_head(&s->dma_dac.wait); - init_waitqueue_head(&s->open_wait); - init_waitqueue_head(&s->midi.iwait); - init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); - spin_lock_init(&s->lock); - s->magic = CM_MAGIC; - s->iobase = pci_resource_start(pcidev, 0); - s->iosynth = fmio; - s->iomidi = mpuio; - s->status = 0; - /* range check */ - if (speakers < 2) - speakers = 2; - else if (speakers > 6) - speakers = 6; - s->speakers = speakers; - if (s->iobase == 0) - return; - s->irq = pcidev->irq; - - if (!request_region(s->iobase, CM_EXTENT_CODEC, "cmpci")) { - printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iobase, s->iobase+CM_EXTENT_CODEC-1); - goto err_region5; + if ((ret = pci_enable_device(pcidev))) + return ret; + if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_IO)) + return -ENODEV; + if (pcidev->irq == 0) + return -ENODEV; + i = pci_set_dma_mask(pcidev, 0xffffffff); + if (i) { + printk(KERN_WARNING "cmpci: architecture does not support 32bit PCI busmaster DMA\n"); + return i; + } + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + printk(KERN_WARNING "cmpci: out of memory\n"); + return -ENOMEM; + } + /* search device name */ + for (i = 0; i < sizeof(devicetable) / sizeof(devicetable[0]); i++) { + if (devicetable[i].deviceid == pcidev->device) + { + devicename = devicetable[i].devicename; + break; } + } + memset(s, 0, sizeof(struct cm_state)); + init_waitqueue_head(&s->dma_adc.wait); + init_waitqueue_head(&s->dma_dac.wait); + init_waitqueue_head(&s->open_wait); + init_waitqueue_head(&s->midi.iwait); + init_waitqueue_head(&s->midi.owait); + init_MUTEX(&s->open_sem); + spin_lock_init(&s->lock); + s->magic = CM_MAGIC; + s->dev = pcidev; + s->iobase = pci_resource_start(pcidev, 0); + s->iosynth = fmio; + s->iomidi = mpuio; + s->gameport.io = 0x200; + s->status = 0; + /* range check */ + if (speakers < 2) + speakers = 2; + else if (speakers > 6) + speakers = 6; + s->speakers = speakers; + if (s->iobase == 0) + return -ENODEV; + s->irq = pcidev->irq; + + if (!request_region(s->iobase, CM_EXTENT_CODEC, "cmpci")) { + printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iobase, s->iobase+CM_EXTENT_CODEC-1); + ret = -EBUSY; + goto err_region5; + } #ifdef CONFIG_SOUND_CMPCI_MIDI - /* disable MPU-401 */ - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x04, 0); - if (s->iomidi) { - if (!request_region(s->iomidi, CM_EXTENT_MIDI, "cmpci Midi")) { - printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iomidi, s->iomidi+CM_EXTENT_MIDI-1); + /* disable MPU-401 */ + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x04, 0); + if (s->iomidi) { + if (!request_region(s->iomidi, CM_EXTENT_MIDI, "cmpci Midi")) { + printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iomidi, s->iomidi+CM_EXTENT_MIDI-1); + s->iomidi = 0; + } else { + /* set IO based at 0x330 */ + switch (s->iomidi) { + case 0x330: + reg_mask = 0; + break; + case 0x320: + reg_mask = 0x20; + break; + case 0x310: + reg_mask = 0x40; + break; + case 0x300: + reg_mask = 0x60; + break; + default: s->iomidi = 0; - } else { - /* set IO based at 0x330 */ - switch (s->iomidi) { - case 0x330: - reg_mask = 0; - break; - case 0x320: - reg_mask = 0x20; - break; - case 0x310: - reg_mask = 0x40; - break; - case 0x300: - reg_mask = 0x60; - break; - default: - s->iomidi = 0; - break; - } - outb((inb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3) & ~0x60) | reg_mask, s->iobase + CODEC_CMI_LEGACY_CTRL + 3); - /* enable MPU-401 */ - if (s->iomidi) { - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x04); - } - } + break; } + outb((inb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3) & ~0x60) | reg_mask, s->iobase + CODEC_CMI_LEGACY_CTRL + 3); + /* enable MPU-401 */ + if (s->iomidi) { + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x04); + } + } + } #endif #ifdef CONFIG_SOUND_CMPCI_FM - /* disable FM */ - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~8, 0); - if (s->iosynth) { - if (!request_region(s->iosynth, CM_EXTENT_SYNTH, "cmpci FM")) { - printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iosynth, s->iosynth+CM_EXTENT_SYNTH-1); + /* disable FM */ + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~8, 0); + if (s->iosynth) { + if (!request_region(s->iosynth, CM_EXTENT_SYNTH, "cmpci FM")) { + printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iosynth, s->iosynth+CM_EXTENT_SYNTH-1); + s->iosynth = 0; + } else { + /* set IO based at 0x388 */ + switch (s->iosynth) { + case 0x388: + reg_mask = 0; + break; + case 0x3C8: + reg_mask = 0x01; + break; + case 0x3E0: + reg_mask = 0x02; + break; + case 0x3E8: + reg_mask = 0x03; + break; + default: s->iosynth = 0; - } else { - /* set IO based at 0x388 */ - switch (s->iosynth) { - case 0x388: - reg_mask = 0; - break; - case 0x3C8: - reg_mask = 0x01; - break; - case 0x3E0: - reg_mask = 0x02; - break; - case 0x3E8: - reg_mask = 0x03; - break; - default: - s->iosynth = 0; - break; - } - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0x03, reg_mask); - /* enable FM */ - if (s->iosynth) { - maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 8); - } - } + break; } + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0x03, reg_mask); + /* enable FM */ + if (s->iosynth) { + maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 8); + } + } + } #endif - /* enable joystick */ - if (joystick) + /* enable joystick */ + if (joystick) { + if (s->gameport.io && !request_region(s->gameport.io, CM_EXTENT_GAME, "cmpci GAME")) { + printk(KERN_ERR "cmpci: gameport io ports in use\n"); + s->gameport.io = 0; + } else maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x02); - else - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x02, 0); - /* initialize codec registers */ - outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */ - outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */ - /* reset mixer */ - wrmixer(s, DSP_MIX_DATARESETIDX, 0); - - /* request irq */ - if (request_irq(s->irq, cm_interrupt, SA_SHIRQ, "cmpci", s)) { - printk(KERN_ERR "cmpci: irq %u in use\n", s->irq); - goto err_irq; - } - printk(KERN_INFO "cmpci: found %s adapter at io %#06x irq %u\n", - devicename, s->iobase, s->irq); - /* register devices */ - if ((s->dev_audio = register_sound_dsp(&cm_audio_fops, -1)) < 0) - goto err_dev1; - if ((s->dev_mixer = register_sound_mixer(&cm_mixer_fops, -1)) < 0) - goto err_dev2; + } else { + maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x02, 0); + s->gameport.io = 0; + } + /* initialize codec registers */ + outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */ + outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */ + /* reset mixer */ + wrmixer(s, DSP_MIX_DATARESETIDX, 0); + + /* request irq */ + if ((ret = request_irq(s->irq, cm_interrupt, SA_SHIRQ, "cmpci", s))) { + printk(KERN_ERR "cmpci: irq %u in use\n", s->irq); + goto err_irq; + } + printk(KERN_INFO "cmpci: found %s adapter at io %#x irq %u\n", + devicename, s->iobase, s->irq); + /* register devices */ + if ((s->dev_audio = register_sound_dsp(&cm_audio_fops, -1)) < 0) { + ret = s->dev_audio; + goto err_dev1; + } + if ((s->dev_mixer = register_sound_mixer(&cm_mixer_fops, -1)) < 0) { + ret = s->dev_mixer; + goto err_dev2; + } #ifdef CONFIG_SOUND_CMPCI_MIDI - if ((s->dev_midi = register_sound_midi(&cm_midi_fops, -1)) < 0) - goto err_dev3; + if ((s->dev_midi = register_sound_midi(&cm_midi_fops, -1)) < 0) { + ret = s->dev_midi; + goto err_dev3; + } #endif #ifdef CONFIG_SOUND_CMPCI_FM - if ((s->dev_dmfm = register_sound_special(&cm_dmfm_fops, 15 /* ?? */)) < 0) - goto err_dev4; -#endif - pci_set_master(pcidev); /* enable bus mastering */ - /* initialize the chips */ - fs = get_fs(); - set_fs(KERNEL_DS); - /* set mixer output */ - frobindir(s, DSP_MIX_OUTMIXIDX, 0x1f, 0x1f); - /* set mixer input */ - val = SOUND_MASK_LINE|SOUND_MASK_SYNTH|SOUND_MASK_CD|SOUND_MASK_MIC; - mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val); - for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) { - val = initvol[i].vol; - mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val); - } - /* use channel 0 for record, channel 1 for play */ - maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~2, 1); - s->deviceid = pcidev->device; - - if (pcidev->device == PCI_DEVICE_ID_CMEDIA_CM8738) { - - /* chip version and hw capability check */ - s->chip_version = query_chip(s); - printk(KERN_INFO "cmpci: chip version = 0%d\n", s->chip_version); - - /* seet SPDIF-in inverse before enable SPDIF loop */ - if (spdif_inverse) { - /* turn on spdif-in inverse */ - maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 1); - printk(KERN_INFO "cmpci: Inverse SPDIF-in\n"); - } else { - /* turn off spdif-ininverse */ - maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~1, 0); - } - - /* enable SPDIF loop */ - if (spdif_loop) { - s->status |= DO_SPDIF_LOOP; - /* turn on spdif-in to spdif-out */ - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x80); - printk(KERN_INFO "cmpci: Enable SPDIF loop\n"); - } else { - s->status &= ~DO_SPDIF_LOOP; - /* turn off spdif-in to spdif-out */ - maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x80, 0); - } - if (use_line_as_rear) { - s->capability |= CAN_LINE_AS_REAR; - s->status |= DO_LINE_AS_REAR; - maskb(s->iobase + CODEC_CMI_MIXER1, ~0, 0x20); + if ((s->dev_dmfm = register_sound_special(&cm_dmfm_fops, 15 /* ?? */)) < 0) { + ret = s->dev_dmfm; + goto err_dev4; + } +#endif + pci_set_master(pcidev); /* enable bus mastering */ + /* initialize the chips */ + fs = get_fs(); + set_fs(KERNEL_DS); + /* set mixer output */ + frobindir(s, DSP_MIX_OUTMIXIDX, 0x1f, 0x1f); + /* set mixer input */ + val = SOUND_MASK_LINE|SOUND_MASK_SYNTH|SOUND_MASK_CD|SOUND_MASK_MIC; + mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val); + for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) { + val = initvol[i].vol; + mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val); + } + set_fs(fs); + /* use channel 1 for record, channel 0 for play */ + maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, CHADC1); + /* turn off VMIC3 - mic boost */ + if (mic_boost) + maskb(s->iobase + CODEC_CMI_MIXER2, ~1, 0); + else + maskb(s->iobase + CODEC_CMI_MIXER2, ~0, 1); + s->deviceid = pcidev->device; + + if (pcidev->device == PCI_DEVICE_ID_CMEDIA_CM8738 + || pcidev->device == PCI_DEVICE_ID_CMEDIA_CM8738B) { + + /* chip version and hw capability check */ + s->chip_version = query_chip(s); + printk(KERN_INFO "cmpci: chip version = 0%d\n", s->chip_version); + + /* seet SPDIF-in inverse before enable SPDIF loop */ + set_spdifin_inverse(s, spdif_inverse); + + /* enable SPDIF loop */ + set_spdif_loop(s, spdif_loop); + + /* use SPDIF in #1 */ + set_spdifin_channel2(s, 0); + + if (use_line_as_rear) { + s->capability |= CAN_LINE_AS_REAR; + s->status |= DO_LINE_AS_REAR; + maskb(s->iobase + CODEC_CMI_MIXER1, ~0, SPK4); + } else + maskb(s->iobase + CODEC_CMI_MIXER1, ~SPK4, 0); + if (s->chip_version >= 39) { + if (use_line_as_bass) { + s->capability |= CAN_LINE_AS_BASS; + s->status |= DO_LINE_AS_BASS; + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0, CB2LIN); } else - maskb(s->iobase + CODEC_CMI_MIXER1, ~0x20, 0); - if (s->chip_version >= 39) { - if (use_line_as_bass) { - s->capability |= CAN_LINE_AS_BASS; - s->status |= DO_LINE_AS_BASS; - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0, 0x60); - } else - maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0x60, 0); - } - } else { - /* 8338 will fall here */ - s->max_channels = 2; + maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CB2LIN, 0); + if (use_mic_as_bass) { + s->capability |= CAN_MIC_AS_BASS; + s->status |= DO_MIC_AS_BASS; + maskb(s->iobase + CODEC_CMI_MISC, ~0, 0x04); + } else + maskb(s->iobase + CODEC_CMI_MISC, ~0x04, 0); } - /* queue it for later freeing */ - s->next = devs; - devs = s; - return; + } else { + s->chip_version = 0; + /* 8338 will fall here */ + s->max_channels = 2; + } +#ifdef CONFIG_SOUND_CMPCI_JOYSTICK + /* register gameport */ + if (joystick) + gameport_register_port(&s->gameport); +#endif + /* store it in the driver field */ + pci_set_drvdata(pcidev, s); + /* put it into driver list */ + list_add_tail(&s->devs, &devs); + /* increment devindex */ + if (devindex < NR_DEVICE-1) + devindex++; + return 0; #ifdef CONFIG_SOUND_CMPCI_FM - unregister_sound_special(s->dev_dmfm); - err_dev4: + unregister_sound_special(s->dev_dmfm); +err_dev4: #endif #ifdef CONFIG_SOUND_CMPCI_MIDI - unregister_sound_midi(s->dev_midi); - err_dev3: + unregister_sound_midi(s->dev_midi); +err_dev3: +#endif + unregister_sound_mixer(s->dev_mixer); +err_dev2: + unregister_sound_dsp(s->dev_audio); +err_dev1: + printk(KERN_ERR "cmpci: cannot register misc device\n"); + free_irq(s->irq, s); +err_irq: + if (s->gameport.io) { +#ifdef CONFIG_SOUND_CMPCI_JOYSTICK + gameport_unregister_port(&s->gameport); #endif - unregister_sound_mixer(s->dev_mixer); - err_dev2: - unregister_sound_dsp(s->dev_audio); - err_dev1: - printk(KERN_ERR "cmpci: cannot register misc device\n"); - free_irq(s->irq, s); - err_irq: + release_region(s->gameport.io, CM_EXTENT_GAME); + } #ifdef CONFIG_SOUND_CMPCI_FM - if (s->iosynth) release_region(s->iosynth, CM_EXTENT_SYNTH); + if (s->iosynth) release_region(s->iosynth, CM_EXTENT_SYNTH); #endif #ifdef CONFIG_SOUND_CMPCI_MIDI - if (s->iomidi) release_region(s->iomidi, CM_EXTENT_MIDI); -#endif - release_region(s->iobase, CM_EXTENT_CODEC); - err_region5: - kfree(s); - } - if (!devs) { - if (wavetable_mem) - free_pages(wavetable_mem, 20-PAGE_SHIFT); - return; - } - return; -} - -static int __init init_cmpci(void) -{ - struct pci_dev *pcidev = NULL; - int index = 0; - -#ifdef CONFIG_PCI - if (!pci_present()) /* No PCI bus in this machine! */ + if (s->iomidi) release_region(s->iomidi, CM_EXTENT_MIDI); #endif - return -ENODEV; - printk(KERN_INFO "cmpci: version $Revision: 5.64 $ time " __TIME__ " " __DATE__ "\n"); - - while (index < NR_DEVICE && ( - (pcidev = pci_find_device(PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, pcidev)))) { - initialize_chip(pcidev); - index++; - } - while (index < NR_DEVICE && ( - (pcidev = pci_find_device(PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, pcidev)))) { - initialize_chip(pcidev); - index++; - } - while (index < NR_DEVICE && ( - (pcidev = pci_find_device(PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, pcidev)))) { - initialize_chip(pcidev); - index++; - } - return 0; + release_region(s->iobase, CM_EXTENT_CODEC); +err_region5: + kfree(s); + return ret; } /* --------------------------------------------------------------------- */ @@ -3165,42 +3616,77 @@ MODULE_DESCRIPTION("CM8x38 Audio Driver"); MODULE_LICENSE("GPL"); - -static void __exit cleanup_cmpci(void) +static void __devinit cm_remove(struct pci_dev *dev) { - struct cm_state *s; + struct cm_state *s = pci_get_drvdata(dev); - while ((s = devs)) { - devs = devs->next; - outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */ - synchronize_irq(); - outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */ - free_irq(s->irq, s); + if (!s) + return; + list_del(&s->devs); + outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */ + synchronize_irq(); + outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */ + free_irq(s->irq, s); - /* reset mixer */ - wrmixer(s, DSP_MIX_DATARESETIDX, 0); + /* reset mixer */ + wrmixer(s, DSP_MIX_DATARESETIDX, 0); - release_region(s->iobase, CM_EXTENT_CODEC); + if (s->gameport.io) { +#ifdef CONFIG_SOUND_CMPCI_JOYSTICK + gameport_unregister_port(&s->gameport); +#endif + release_region(s->gameport.io, CM_EXTENT_GAME); + } + release_region(s->iobase, CM_EXTENT_CODEC); #ifdef CONFIG_SOUND_CMPCI_MIDI - if (s->iomidi) release_region(s->iomidi, CM_EXTENT_MIDI); + if (s->iomidi) release_region(s->iomidi, CM_EXTENT_MIDI); #endif #ifdef CONFIG_SOUND_CMPCI_FM - if (s->iosynth) release_region(s->iosynth, CM_EXTENT_SYNTH); + if (s->iosynth) release_region(s->iosynth, CM_EXTENT_SYNTH); #endif - unregister_sound_dsp(s->dev_audio); - unregister_sound_mixer(s->dev_mixer); + unregister_sound_dsp(s->dev_audio); + unregister_sound_mixer(s->dev_mixer); #ifdef CONFIG_SOUND_CMPCI_MIDI - unregister_sound_midi(s->dev_midi); + unregister_sound_midi(s->dev_midi); #endif #ifdef CONFIG_SOUND_CMPCI_FM - unregister_sound_special(s->dev_dmfm); + unregister_sound_special(s->dev_dmfm); #endif - kfree(s); - } - if (wavetable_mem) - free_pages(wavetable_mem, 20-PAGE_SHIFT); + kfree(s); + pci_set_drvdata(dev, NULL); +} + +static struct pci_device_id id_table[] __devinitdata = { + { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, id_table); + +static struct pci_driver cm_driver = { + name: "cmpci", + id_table: id_table, + probe: cm_probe, + remove: cm_remove +}; + +static int __init init_cmpci(void) +{ + if (!pci_present()) /* No PCI bus in this machine! */ + return -ENODEV; + printk(KERN_INFO "cmpci: version $Revision: 6.36 $ time " __TIME__ " " __DATE__ "\n"); + return pci_module_init(&cm_driver); +} + +static void __exit cleanup_cmpci(void) +{ printk(KERN_INFO "cmpci: unloading\n"); + pci_unregister_driver(&cm_driver); } module_init(init_cmpci); module_exit(cleanup_cmpci); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/Config.in linux.22-ac2/drivers/sound/Config.in --- linux.vanilla/drivers/sound/Config.in 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/Config.in 2003-08-28 17:03:59.000000000 +0100 @@ -7,6 +7,12 @@ # Prompt user for primary drivers. dep_tristate ' ALi5455 audio support' CONFIG_SOUND_ALI5455 $CONFIG_SOUND $CONFIG_PCI +if [ "$CONFIG_SOUND_ALI5455" = "y" -o "$CONFIG_SOUND_ALI5455" = "m" ]; then + bool ' Enable Codec SPDIF OUT ( Pcm Out Share )' CONFIG_SOUND_ALI5455_CODECSPDIFOUT_PCMOUTSHARE + bool ' Enable Codec SPDIF OUT ( Codec Independent DMA )' CONFIG_SOUND_ALI5455_CODECSPDIFOUT_CODECINDEPENDENTDMA + bool ' Enable Controller SPDIF OUT ( Pcm Out Share )' CONFIG_SOUND_ALI5455_CONTROLLERSPDIFOUT_PCMOUTSHARE + bool ' Enable Controller SPDIF OUT ( Controller Independent DMA )' CONFIG_SOUND_ALI5455_CONTROLLERSPDIFOUT_CONTROLLERINDEPENDENTDMA +fi dep_tristate ' BT878 audio dma' CONFIG_SOUND_BT878 $CONFIG_SOUND $CONFIG_PCI dep_tristate ' C-Media PCI (CMI8338/8738)' CONFIG_SOUND_CMPCI $CONFIG_SOUND $CONFIG_PCI if [ "$CONFIG_SOUND_CMPCI" = "y" -o "$CONFIG_SOUND_CMPCI" = "m" ]; then @@ -226,6 +232,10 @@ dep_tristate ' TV card (bt848) mixer support' CONFIG_SOUND_TVMIXER $CONFIG_SOUND $CONFIG_I2C +dep_tristate ' AD1980 front/back switch plugin' CONFIG_SOUND_AD1980 $CONFIG_SOUND + +dep_tristate ' Wolfson Touchscreen/BMON plugin' CONFIG_SOUND_WM97XX $CONFIG_SOUND + # A cross directory dependence. The sound modules will need gameport.o compiled in, # but it resides in the drivers/char/joystick directory. This define_tristate takes # care of that. --Vojtech diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/i810_audio.c linux.22-ac2/drivers/sound/i810_audio.c --- linux.vanilla/drivers/sound/i810_audio.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/i810_audio.c 2003-08-09 16:07:52.000000000 +0100 @@ -3266,6 +3266,8 @@ free_irq(card->irq, devs); release_region(card->iobase, 64); release_region(card->ac97base, 256); + pci_free_consistent(pci_dev, sizeof(struct i810_channel)*NR_HW_CH, + card->channel, card->chandma); if (card->use_mmio) { iounmap(card->ac97base_mmio); iounmap(card->iobase_mmio); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/Makefile linux.22-ac2/drivers/sound/Makefile --- linux.vanilla/drivers/sound/Makefile 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/Makefile 2003-08-28 17:04:21.000000000 +0100 @@ -81,6 +81,8 @@ obj-$(CONFIG_SOUND_RME96XX) += rme96xx.o obj-$(CONFIG_SOUND_BT878) += btaudio.o obj-$(CONFIG_SOUND_IT8172) += ite8172.o ac97_codec.o +obj-$(CONFIG_SOUND_AD1980) += ac97_plugin_ad1980.o +obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o ifeq ($(CONFIG_MIDI_EMU10K1),y) obj-$(CONFIG_SOUND_EMU10K1) += sound.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/opl3sa2.c linux.22-ac2/drivers/sound/opl3sa2.c --- linux.vanilla/drivers/sound/opl3sa2.c 2002-08-03 16:08:27.000000000 +0100 +++ linux.22-ac2/drivers/sound/opl3sa2.c 2003-08-29 12:33:52.000000000 +0100 @@ -390,6 +390,7 @@ static int opl3sa2_mixer_ioctl(int dev, unsigned int cmd, caddr_t arg) { int cmdf = cmd & 0xff; + int val; opl3sa2_state_t* devc = (opl3sa2_state_t *) mixer_devs[dev]->devc; @@ -416,18 +417,19 @@ arg_to_vol_stereo(*(unsigned int*)arg, &devc->volume_l, &devc->volume_r); opl3sa2_set_volume(devc, devc->volume_l, devc->volume_r); - *(int*)arg = ret_vol_stereo(devc->volume_l, devc->volume_r); - return 0; + val = ret_vol_stereo(devc->volume_l, devc->volume_r); + break; case SOUND_MIXER_MIC: arg_to_vol_mono(*(unsigned int*)arg, &devc->mic); opl3sa2_set_mic(devc, devc->mic); - *(int*)arg = ret_vol_mono(devc->mic); - return 0; - + val = ret_vol_mono(devc->mic); + break; + default: return -EINVAL; } + return put_user(val, (int *)arg); } else { /* @@ -435,36 +437,40 @@ */ switch (cmdf) { case SOUND_MIXER_DEVMASK: - *(int*)arg = (SOUND_MASK_VOLUME | SOUND_MASK_MIC); - return 0; + val = (SOUND_MASK_VOLUME | SOUND_MASK_MIC); + break; case SOUND_MIXER_STEREODEVS: - *(int*)arg = SOUND_MASK_VOLUME; - return 0; + val = SOUND_MASK_VOLUME; + break; case SOUND_MIXER_RECMASK: /* No recording devices */ - return (*(int*)arg = 0); + val = 0; + break; case SOUND_MIXER_CAPS: - *(int*)arg = SOUND_CAP_EXCL_INPUT; + val = SOUND_CAP_EXCL_INPUT; + break; return 0; case SOUND_MIXER_RECSRC: /* No recording source */ - return (*(int*)arg = 0); + val = 0; + break; case SOUND_MIXER_VOLUME: - *(int*)arg = ret_vol_stereo(devc->volume_l, devc->volume_r); - return 0; + val = ret_vol_stereo(devc->volume_l, devc->volume_r); + break; case SOUND_MIXER_MIC: - *(int*)arg = ret_vol_mono(devc->mic); - return 0; + val = ret_vol_mono(devc->mic); + break; default: return -EINVAL; } + return put_user(val, (int *)arg); } } /* opl3sa2_mixer_ioctl end */ @@ -473,6 +479,7 @@ static int opl3sa3_mixer_ioctl(int dev, unsigned int cmd, caddr_t arg) { int cmdf = cmd & 0xff; + int val; opl3sa2_state_t* devc = (opl3sa2_state_t *) mixer_devs[dev]->devc; @@ -497,26 +504,27 @@ arg_to_vol_stereo(*(unsigned int*)arg, &devc->bass_l, &devc->bass_r); opl3sa3_set_bass(devc, devc->bass_l, devc->bass_r); - *(int*)arg = ret_vol_stereo(devc->bass_l, devc->bass_r); - return 0; - + val = ret_vol_stereo(devc->bass_l, devc->bass_r); + break; + case SOUND_MIXER_TREBLE: arg_to_vol_stereo(*(unsigned int*)arg, &devc->treble_l, &devc->treble_r); opl3sa3_set_treble(devc, devc->treble_l, devc->treble_r); - *(int*)arg = ret_vol_stereo(devc->treble_l, devc->treble_r); - return 0; + val = ret_vol_stereo(devc->treble_l, devc->treble_r); + break; case SOUND_MIXER_DIGITAL1: arg_to_vol_stereo(*(unsigned int*)arg, &devc->wide_l, &devc->wide_r); opl3sa3_set_wide(devc, devc->wide_l, devc->wide_r); - *(int*)arg = ret_vol_stereo(devc->wide_l, devc->wide_r); - return 0; + val = ret_vol_stereo(devc->wide_l, devc->wide_r); + break; default: return -EINVAL; } + return put_user(val, (int *)arg); } else { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/trident.c linux.22-ac2/drivers/sound/trident.c --- linux.vanilla/drivers/sound/trident.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/trident.c 2003-08-28 22:10:19.000000000 +0100 @@ -3015,6 +3015,7 @@ data = inl(TRID_REG(card, address)); + releasecodecaccess(card); spin_unlock_irqrestore(&card->lock, flags); return ((u16) (data >> 16)); @@ -3156,9 +3157,9 @@ unsigned char ch; char temp; - struct pci_dev *pci_dev = NULL; + struct pci_dev *pci_dev; - pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (pci_dev == NULL) return; pci_read_config_byte(pci_dev, 0x61, &temp); @@ -3372,16 +3373,16 @@ static int ali_close_multi_channels(void) { char temp = 0; - struct pci_dev *pci_dev = NULL; + struct pci_dev *pci_dev; - pci_dev = pci_find_device(PCI_VENDOR_ID_AL,PCI_DEVICE_ID_AL_M1533, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL,PCI_DEVICE_ID_AL_M1533, NULL); if (pci_dev == NULL) return -1; pci_read_config_byte(pci_dev, 0x59, &temp); temp &= ~0x80; pci_write_config_byte(pci_dev, 0x59, temp); - pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); if (pci_dev == NULL) return -1; @@ -3396,16 +3397,16 @@ { unsigned long dwValue; char temp = 0; - struct pci_dev *pci_dev = NULL; + struct pci_dev *pci_dev; - pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (pci_dev == NULL) return -1; pci_read_config_byte(pci_dev, 0x59, &temp); temp |= 0x80; pci_write_config_byte(pci_dev, 0x59, temp); - pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); if (pci_dev == NULL) return -1; pci_read_config_byte(pci_dev, (int)0xB8, &temp); @@ -3918,11 +3919,11 @@ static int ali_reset_5451(struct trident_card *card) { - struct pci_dev *pci_dev = NULL; + struct pci_dev *pci_dev; unsigned int dwVal; unsigned short wCount, wReg; - pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pci_dev); + pci_dev = pci_find_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (pci_dev == NULL) return -1; @@ -4078,7 +4079,7 @@ u8 revision; int i = 0; u16 temp; - struct pci_dev *pci_dev_m1533 = NULL; + struct pci_dev *pci_dev_m1533; int rc = -ENODEV; u64 dma_mask; @@ -4163,7 +4164,7 @@ /* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */ card->hwvolctl = 0; - pci_dev_m1533 = pci_find_device(PCI_VENDOR_ID_AL,PCI_DEVICE_ID_AL_M1533, pci_dev_m1533); + pci_dev_m1533 = pci_find_device(PCI_VENDOR_ID_AL,PCI_DEVICE_ID_AL_M1533, NULL); rc = -ENODEV; if (pci_dev_m1533 == NULL) goto out_proc_fs; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/sound/ymfpci.c linux.22-ac2/drivers/sound/ymfpci.c --- linux.vanilla/drivers/sound/ymfpci.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/sound/ymfpci.c 2003-08-28 22:24:51.000000000 +0100 @@ -2474,7 +2474,6 @@ eid = ymfpci_codec_read(codec, AC97_EXTENDED_ID); if (eid==0xFFFF) { printk(KERN_WARNING "ymfpci: no codec attached ?\n"); - goto out_kfree; } unit->ac97_features = eid; @@ -2626,7 +2625,6 @@ out_release_region: release_mem_region(pci_resource_start(pcidev, 0), 0x8000); out_free: - ac97_release_codec(codec->ac97_codec[0]); return -ENODEV; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/tc/zs.c linux.22-ac2/drivers/tc/zs.c --- linux.vanilla/drivers/tc/zs.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/tc/zs.c 2003-08-28 17:05:46.000000000 +0100 @@ -1374,7 +1374,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttyS%02d, count = %d\n", info->line, info->count); #endif - if ((tty->count == 1) && (info->count != 1)) { + if ((atomic_read(&tty->count) == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. Info->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503.c linux.22-ac2/drivers/usb/atmel/at76c503.c --- linux.vanilla/drivers/usb/atmel/at76c503.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503.c 2003-08-15 15:03:11.000000000 +0100 @@ -0,0 +1,5235 @@ +/* -*- linux-c -*- */ +/* $Id: at76c503.c,v 1.35 2003/07/30 06:31:51 jal2 Exp $ + * + * USB at76c503/at76c505 driver + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * History: + * + * 2002_12_31: + * - first ping, ah-hoc mode works, fw version 0.90.2 only + * + * 2003_01_07 0.1: + * - first release + * + * 2003_01_08 0.2: + * - moved rx code into tasklet + * - added locking in keventd handler + * - support big endian in ieee802_11.h + * - external firmware downloader now in this module + * + * 2003_01_10 0.3: + * - now using 8 spaces for tab indentations + * - added tx rate settings (patch from Joerg Albert (jal)) + * - created functions for mib settings + * + * 2003_01_19 0.4: + * - use usbdfu for the internal firmware + * + * 2003_01_27 0.5: + * - implemented WEP. Thanks to jal + * - added frag and rts ioctl calls (jal again) + * - module parameter to give names other than eth + * + * 2003_01_28 0.6: + * - make it compile with kernel < 2.4.20 (there is no owner field + * in struct usb_driver) + * - fixed a small bug for the module param eth_name + * - do not use GFP_DMA, GFP_KERNEL is enough + * - no down() in _tx() because that's in interrupt. Use + * spin_lock_irq() instead + * - should not stop net queue on urb errors + * - cleanup in ioctl(): locked it altogether, this makes it easier + * to maintain + * - tried to implement promisc. mode: does not work with this device + * - tried to implement setting mac address: does not + * seem to work with this device + * - now use fw version 0.90.2 #140 (prev. was #93). Does not help... + * + * 2003_01_30 0.7: + * - now works with fw 0.100.2 (solution was: wait for completion + * of commands) + * - setting MAC address now works (thx to a small fix by jal) + * - it turned out that promisc. mode is not possible. The firmware + * does not allow it. I hope that it will be implemented in a future + * version. + * + * 2003_02_13 0.8: + * - scan mode implemented by jal + * - infra structure mode by jal + * - some small cleanups (removed dead code) + * + * history can now be found in the cvs log at http://at76c503a.berlios.de + * + * TODO: + * - monitor mode + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for rtnl_lock() */ + +#include "at76c503.h" +#include "ieee802_11.h" + +/* debug bits */ +#define DBG_PROGRESS 0x000001 /* progress of scan-join-(auth-assoc)-connected */ +#define DBG_BSS_TABLE 0x000002 /* show the bss table after scans */ +#define DBG_IOCTL 0x000004 /* ioctl calls / settings */ +#define DBG_KEVENT 0x000008 /* kevents */ +#define DBG_TX_DATA 0x000010 /* tx header */ +#define DBG_TX_DATA_CONTENT 0x000020 /* tx content */ +#define DBG_TX_MGMT 0x000040 +#define DBG_RX_DATA 0x000080 /* rx data header */ +#define DBG_RX_DATA_CONTENT 0x000100 /* rx data content */ +#define DBG_RX_MGMT 0x000200 /* rx mgmt header except beacon and probe responses */ +#define DBG_RX_BEACON 0x000400 /* rx beacon */ +#define DBG_RX_CTRL 0x000800 /* rx control */ +#define DBG_RX_MGMT_CONTENT 0x001000 /* rx mgmt content */ +#define DBG_RX_FRAGS 0x002000 /* rx data fragment handling */ +#define DBG_DEVSTART 0x004000 /* fw download, device start */ +#define DBG_URB 0x008000 /* rx urb status, ... */ +#define DBG_RX_ATMEL_HDR 0x010000 /* the atmel specific header of each rx packet */ +#define DBG_PROC_ENTRY 0x020000 /* procedure entries and exits */ +#define DBG_PM 0x040000 /* power management settings */ +#define DBG_BSS_MATCH 0x080000 /* show why a certain bss did not match */ +#define DBG_PARAMS 0x100000 /* show the configured parameters */ +#define DBG_WAIT_COMPLETE 0x200000 /* show the wait_completion progress */ +#define DBG_RX_FRAGS_SKB 0x400000 /* show skb header for incoming rx fragments */ +#define DBG_BSS_TABLE_RM 0x800000 /* inform on removal of old bss table entries */ + +#ifdef CONFIG_USB_DEBUG +#define DBG_DEFAULTS (DBG_PROGRESS | DBG_PARAMS | DBG_BSS_TABLE) +#else +#define DBG_DEFAULTS 0 +#endif +static int debug = DBG_DEFAULTS; + +static const u8 zeros[32]; + +/* Use our own dbg macro */ +#undef dbg +#define dbg(bits, format, arg...) \ + do { \ + if (debug & (bits)) \ + printk(KERN_DEBUG __FILE__ ": " format "\n" , ## arg);\ + } while (0) + +/* uncond. debug output */ +#define dbg_uc(format, arg...) \ + printk(KERN_DEBUG __FILE__ ": " format "\n" , ## arg) + +#define assert(x) \ + do {\ + if (!(x)) \ + err(__FILE__ ":%d assertion " #x " failed", __LINE__);\ + } while (0) + +/* how often do we re-try these packets ? */ +#define AUTH_RETRIES 3 +#define ASSOC_RETRIES 3 +#define DISASSOC_RETRIES 3 + +#define NEW_STATE(dev,newstate) \ + do {\ + dbg(DBG_PROGRESS, "%s: state %d -> %d (" #newstate ")",\ + dev->netdev->name, dev->istate, newstate);\ + dev->istate = newstate;\ + } while (0) + +/* the beacon timeout in infra mode when we are connected (in seconds) */ +#define BEACON_TIMEOUT 10 + +/* after how many seconds do we re-scan the channels in infra mode */ +#define RESCAN_TIME 10 + +/* Version Information */ +#define DRIVER_DESC "Generic Atmel at76c503/at76c505 routines" + +/* Module paramaters */ +MODULE_PARM(debug, "i"); +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +MODULE_PARM_DESC(debug, "Debugging level"); + +static int rx_copybreak = 200; +MODULE_PARM(rx_copybreak, "i"); +MODULE_PARM_DESC(rx_copybreak, "rx packet copy threshold"); + +static int scan_min_time = 10; +MODULE_PARM(scan_min_time, "i"); +MODULE_PARM_DESC(scan_min_time, "scan min channel time (default: 10)"); + +static int scan_max_time = 120; +MODULE_PARM(scan_max_time, "i"); +MODULE_PARM_DESC(scan_max_time, "scan max channel time (default: 120)"); + +static int scan_mode = SCAN_TYPE_ACTIVE; +MODULE_PARM(scan_mode, "i"); +MODULE_PARM_DESC(scan_mode, "scan mode: 0 active (with ProbeReq, default), 1 passive"); + +static int preamble_type = PREAMBLE_TYPE_LONG; +MODULE_PARM(preamble_type, "i"); +MODULE_PARM_DESC(preamble_type, "preamble type: 0 long (default), 1 short"); + +static int auth_mode = 0; +MODULE_PARM(auth_mode, "i"); +MODULE_PARM_DESC(auth_mode, "authentication mode: 0 open system (default), " + "1 shared secret"); + +static int pm_mode = PM_ACTIVE; +MODULE_PARM(pm_mode, "i"); +MODULE_PARM_DESC(pm_mode, "power management mode: 1 active (def.), 2 powersave, 3 smart save"); + +static int pm_period = 0; +MODULE_PARM(pm_period, "i"); +MODULE_PARM_DESC(pm_period, "period of waking up the device in usec"); + +struct header_struct { + /* 802.3 */ + u8 dest[ETH_ALEN]; + u8 src[ETH_ALEN]; + u16 len; + /* 802.2 */ + u8 dsap; + u8 ssap; + u8 ctrl; + /* SNAP */ + u8 oui[3]; + u16 ethertype; +} __attribute__ ((packed)); + +#define DEF_RTS_THRESHOLD 1536 +#define DEF_FRAG_THRESHOLD 1536 +#define DEF_ESSID "okuwlan" +#define DEF_ESSID_LEN 7 +#define DEF_CHANNEL 10 + +#define MAX_RTS_THRESHOLD 2347 +#define MAX_FRAG_THRESHOLD 2346 +#define MIN_FRAG_THRESHOLD 256 + +/* The frequency of each channel in MHz */ +const long channel_frequency[] = { + 2412, 2417, 2422, 2427, 2432, 2437, 2442, + 2447, 2452, 2457, 2462, 2467, 2472, 2484 +}; +#define NUM_CHANNELS ( sizeof(channel_frequency) / sizeof(channel_frequency[0]) ) + +/* the broadcast address */ +const u8 bc_addr[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; + +/* the supported rates of this hardware, bit7 marks a mandantory rate */ +const u8 hw_rates[4] = {0x82,0x84,0x0b,0x16}; + +/* the max padding size for tx in bytes (see calc_padding)*/ +#define MAX_PADDING_SIZE 53 + +/* a ieee820.11 frame header without addr4 */ +struct ieee802_11_mgmt { + u16 frame_ctl; + u16 duration_id; + u8 addr1[ETH_ALEN]; /* destination addr */ + u8 addr2[ETH_ALEN]; /* source addr */ + u8 addr3[ETH_ALEN]; /* BSSID */ + u16 seq_ctl; + u8 data[1508]; + u32 fcs; +} __attribute__ ((packed)); + +/* the size of the ieee802.11 header (excl. the at76c503 tx header) */ +#define IEEE802_11_MGMT_HEADER_SIZE offsetof(struct ieee802_11_mgmt, data) + +/* beacon in ieee802_11_mgmt.data */ +struct ieee802_11_beacon_data { + u8 timestamp[8]; // TSFTIMER + u16 beacon_interval; // Kms between TBTTs (Target Beacon Transmission Times) + u16 capability_information; + u8 data[1500]; /* contains: SSID (tag,length,value), + Supported Rates (tlv), channel */ +} __attribute__ ((packed)); + +/* disassoc frame in ieee802_11_mgmt.data */ +struct ieee802_11_disassoc_frame { + u16 reason; +} __attribute__ ((packed)); + +#define DISASSOC_FRAME_SIZE \ + (AT76C503_TX_HDRLEN + IEEE802_11_MGMT_HEADER_SIZE +\ + sizeof(struct ieee802_11_disassoc_frame)) + +/* assoc request in ieee802_11_mgmt.data */ +struct ieee802_11_assoc_req { + u16 capability; + u16 listen_interval; + u8 data[1]; /* variable number of bytes for SSID + and supported rates (tlv coded) */ +}; + +/* the maximum size of an AssocReq packet */ +#define ASSOCREQ_MAX_SIZE \ + (AT76C503_TX_HDRLEN + IEEE802_11_MGMT_HEADER_SIZE +\ + offsetof(struct ieee802_11_assoc_req,data) +\ + 1+1+IW_ESSID_MAX_SIZE + 1+1+4) + +/* reassoc request in ieee802_11_mgmt.data */ +struct ieee802_11_reassoc_req { + u16 capability; + u16 listen_interval; + u8 curr_ap[ETH_ALEN]; /* the bssid of the AP we are + currently associated to */ + u8 data[1]; /* variable number of bytes for SSID + and supported rates (tlv coded) */ +} __attribute__ ((packed)); + +/* the maximum size of an AssocReq packet */ +#define REASSOCREQ_MAX_SIZE \ + (AT76C503_TX_HDRLEN + IEEE802_11_MGMT_HEADER_SIZE +\ + offsetof(struct ieee802_11_reassoc_req,data) +\ + 1+1+IW_ESSID_MAX_SIZE + 1+1+4) + + +/* assoc/reassoc response */ +struct ieee802_11_assoc_resp { + u16 capability; + u16 status; + u16 assoc_id; + u8 data[1]; /* variable number of bytes for + supported rates (tlv coded) */ +} __attribute__ ((packed)); + +/* auth. request/response in ieee802_11_mgmt.data */ +struct ieee802_11_auth_frame { + u16 algorithm; + u16 seq_nr; + u16 status; + u8 challenge[0]; +} __attribute__ ((packed)); + +/* for shared secret auth, add the challenge text size */ +#define AUTH_FRAME_SIZE \ + (AT76C503_TX_HDRLEN + IEEE802_11_MGMT_HEADER_SIZE +\ + sizeof(struct ieee802_11_auth_frame)) + +/* deauth frame in ieee802_11_mgmt.data */ +struct ieee802_11_deauth_frame { + u16 reason; +} __attribute__ ((packed)); + +#define DEAUTH_FRAME_SIZE \ + (AT76C503_TX_HDRLEN + IEEE802_11_MGMT_HEADER_SIZE +\ + sizeof(struct ieee802_11_disauth_frame)) + + +#define KEVENT_CTRL_HALT 1 +#define KEVENT_NEW_BSS 2 +#define KEVENT_SET_PROMISC 3 +#define KEVENT_MGMT_TIMEOUT 4 +#define KEVENT_SCAN 5 +#define KEVENT_JOIN 6 +#define KEVENT_STARTIBSS 7 +#define KEVENT_SUBMIT_RX 8 +#define KEVENT_RESTART 9 /* restart the device */ +#define KEVENT_ASSOC_DONE 10 /* execute the power save settings: + listen interval, pm mode, assoc id */ + +static DECLARE_WAIT_QUEUE_HEAD(wait_queue); + +static u8 snapsig[] = {0xaa, 0xaa, 0x03}; +#ifdef COLLAPSE_RFC1042 +/* RFC 1042 encapsulates Ethernet frames in 802.2 SNAP (0xaa, 0xaa, 0x03) with + * a SNAP OID of 0 (0x00, 0x00, 0x00) */ +static u8 rfc1042sig[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; +#endif /* COLLAPSE_RFC1042 */ + +/* local function prototypes */ + +#if IW_MAX_SPY > 0 +static void iwspy_update(struct at76c503 *dev, struct at76c503_rx_buffer *buf); +#endif +static void at76c503_read_bulk_callback (struct urb *urb); +static void at76c503_write_bulk_callback(struct urb *urb); +static void defer_kevent (struct at76c503 *dev, int flag); +static struct bss_info *find_matching_bss(struct at76c503 *dev, + struct bss_info *curr); +static int auth_req(struct at76c503 *dev, struct bss_info *bss, int seq_nr, + u8 *challenge); +static int disassoc_req(struct at76c503 *dev, struct bss_info *bss); +static int assoc_req(struct at76c503 *dev, struct bss_info *bss); +static int reassoc_req(struct at76c503 *dev, struct bss_info *curr, + struct bss_info *new); +static void dump_bss_table(struct at76c503 *dev, int force_output); +static int submit_rx_urb(struct at76c503 *dev); +static int startup_device(struct at76c503 *dev); + +/** + * hex2str - hexdump + * @obuf: output buffer + * @buf: buffer to dump + * @len: length of data to dump + * @delim: delimniter + * + * Hexdump len many bytes from buf into obuf, separated by delim, + * add a trailing \0 into obuf. Caller must ensure obuf is big enough. + */ + +static char *hex2str(char *obuf, u8 *buf, int len, char delim) +{ + static char hex[16]="0123456789ABCDEF"; + char *ret = obuf; + while (len--) { + *obuf++ = hex[*buf>>4]; + *obuf++ = hex[*buf&0xf]; + if (delim != '\0') + *obuf++ = delim; + buf++; + } + if (delim != '\0' && obuf > ret) + obuf--; // remove last inserted delimiter + *obuf = '\0'; + return ret; +} + +/** + * free_bss_list - free a device BSS list + * @dev: device + * + * Clear the BSS list for this device and free up the memory + */ + +static inline void free_bss_list(struct at76c503 *dev) +{ + struct list_head *next, *ptr; + unsigned long flags; + + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + + dev->curr_bss = dev->new_bss = NULL; + + list_for_each_safe(ptr, next, &dev->bss_list) { + list_del(ptr); + kfree(list_entry(ptr, struct bss_info, list)); + } + + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); +} + +/** + * mac2str - turn MAC address into string + * @mac: MAC address to convert + * + * Returns a static string holding the MAC address. + * BUGS: not re-entrant but used that way. + */ + +static inline char *mac2str(u8 *mac) +{ + static char str [6*3]; + + sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + return str; +} + + +/** + * usb_debug_data - debug dump + * @function: caller function name + * @data: data block + * @size: size of data block + * + * Dump a block of data to the kernel system log, formatted up nicely + * and with headers indicating what it is. Used for debugging + */ + +static inline void usb_debug_data (const char *function, const unsigned char *data, int size) +{ + int i; + + if (!debug) + return; + + printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", + function, size); + for (i = 0; i < size; ++i) { + if((i % 8) == 0) + printk ("\n"); + printk ("%.2x ", data[i]); + } + printk ("\n"); +} + +/** + * at76c503_remap - send a remap request + * @dev: USB device to use + * + * Send out a remap request to the device, allowing it one second + * to complete. + */ + +int at76c503_remap(struct usb_device *udev) +{ + int ret; + ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), + 0x0a, INTERFACE_VENDOR_REQUEST_OUT, + 0, 0, + NULL, 0, HZ); + if (ret < 0) + return ret; + + return 0; +} + +/** + * at76c503_get_op_mode - get the mode of the device + * @dev: USB device to use + * + * Get the current operating mode of the device and return it. Modes + * are positive, errors are returned as negative values. + */ + +static int at76c503_get_op_mode(struct usb_device *udev) +{ + int ret; + u8 op_mode; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + 0x33, INTERFACE_VENDOR_REQUEST_IN, + 0x01, 0, + &op_mode, 1, HZ); + if(ret < 0) + return ret; + return op_mode; +} + +/** + * load_ext_firmware_block - load a firmware block + * @udev: USB device + * @i: block number + * @buf: data + * @bsize: size of block + * + * This loads a block of the second part of the firmware into the + * wireless card + */ + +static int load_ext_fw_block(struct usb_device *udev, + int i, unsigned char *buf, int bsize) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev,0), + 0x0e, DEVICE_VENDOR_REQUEST_OUT, + 0x0802, i, + buf, bsize, HZ); +} + +/** + * get_hw_cfg_rfmd - load RFMD config block + * @udev: USB device + * @buf: data buffer + * @buf_size: buffer size + * + * Read the configuration block from an RFMD radio chip. + */ + +static int get_hw_cfg_rfmd(struct usb_device *udev, + unsigned char *buf, int buf_size) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev,0), + 0x33, INTERFACE_VENDOR_REQUEST_IN, + ((0x0a << 8) | 0x02), 0, + buf, buf_size, HZ); +} + +/** + * get_hw_cfg_intersil - load intersil config block + * @udev: USB device + * @buf: data buffer + * @buf_size: buffer size + * + * Read the configuration block from an intersil radio chip. + */ + +static int get_hw_cfg_intersil(struct usb_device *udev, + unsigned char *buf, int buf_size) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev,0), + 0x33, INTERFACE_VENDOR_REQUEST_IN, + ((0x09 << 8) | 0x02), 0, + buf, buf_size, HZ); +} + +/** + * get_hw_config - get hardware configuration + * @dev: device to query + * + * Get the hardware configuration for the adapter and place the + * appropriate data in the appropriate fields of 'dev' (the + * GetHWConfig request and interpretation of the result depends + * on the type of board we're dealing with). + */ + +static int get_hw_config(struct at76c503 *dev) +{ + int ret; + union { + struct hwcfg_intersil i; + struct hwcfg_rfmd r3; + struct hwcfg_r505 r5; + } *hwcfg = kmalloc(sizeof(*hwcfg), GFP_KERNEL); + + if (!hwcfg) + return -ENOMEM; + + switch (dev->board_type) { + case BOARDTYPE_INTERSIL: + ret = get_hw_cfg_intersil(dev->udev, (unsigned char *)&hwcfg->i, sizeof(hwcfg->i)); + if (ret < 0) break; + memcpy(dev->mac_addr, hwcfg->i.mac_addr, ETH_ALEN); + memcpy(dev->cr31_values, hwcfg->i.cr31_values, 14); + memcpy(dev->cr58_values, hwcfg->i.cr58_values, 14); + memcpy(dev->pidvid, hwcfg->i.pidvid, 4); + dev->regulatory_domain = hwcfg->i.regulatory_domain; + break; + case BOARDTYPE_RFMD: + ret = get_hw_cfg_rfmd(dev->udev, (unsigned char *)&hwcfg->r3, sizeof(hwcfg->r3)); + if (ret < 0) break; + memcpy(dev->cr20_values, hwcfg->r3.cr20_values, 14); + memcpy(dev->cr21_values, hwcfg->r3.cr21_values, 14); + memcpy(dev->bb_cr, hwcfg->r3.bb_cr, 14); + memcpy(dev->pidvid, hwcfg->r3.pidvid, 4); + memcpy(dev->mac_addr, hwcfg->r3.mac_addr, ETH_ALEN); + dev->regulatory_domain = hwcfg->r3.regulatory_domain; + memcpy(dev->low_power_values, hwcfg->r3.low_power_values, 14); + memcpy(dev->normal_power_values, hwcfg->r3.normal_power_values, 14); + break; + case BOARDTYPE_R505: + ret = get_hw_cfg_rfmd(dev->udev, (unsigned char *)&hwcfg->r5, sizeof(hwcfg->r5)); + if (ret < 0) break; + memcpy(dev->cr39_values, hwcfg->r5.cr39_values, 14); + memcpy(dev->bb_cr, hwcfg->r5.bb_cr, 14); + memcpy(dev->pidvid, hwcfg->r5.pidvid, 4); + memcpy(dev->mac_addr, hwcfg->r5.mac_addr, ETH_ALEN); + dev->regulatory_domain = hwcfg->r5.regulatory_domain; + memcpy(dev->cr15_values, hwcfg->r5.cr15_values, 14); + break; + default: + err("Bad board type set (%d). Unable to get hardware config.", dev->board_type); + ret = -EINVAL; + } + + kfree(hwcfg); + + if (ret < 0) { + err("Get HW Config failed (%d)", ret); + } + return ret; +} + +/** + * get_reg_domain - regulatory domain + * @code: domain code + * + * Turn a regulatory domain code into a name and also + * list of the channels permitted in this domain + */ + +static struct reg_domain const *get_reg_domain(u16 code) +{ + static struct reg_domain const fd_tab[] = { + {0x10, "FCC (U.S)", 0x7ff}, /* ch 1-11 */ + {0x20, "IC (Canada)", 0x7ff}, /* ch 1-11 */ + {0x30, "ETSI (Europe - (Spain+France)", 0x1fff}, /* ch 1-13 */ + {0x31, "Spain", 0x600}, /* ch 10,11 */ + {0x32, "France", 0x1e00}, /* ch 10-13 */ + {0x40, "MKK (Japan)", 0x2000}, /* ch 14 */ + {0x41, "MKK1 (Japan)", 0x3fff}, /* ch 1-14 */ + {0x50, "Israel", 0x3fc}, /* ch 3-9 */ + }; + static int const tab_len = sizeof(fd_tab) / sizeof(struct reg_domain); + + /* use this if an unknown code comes in */ + static struct reg_domain const unknown = + {0, "", 0xffffffff}; + + int i; + + for(i=0; i < tab_len; i++) + if (code == fd_tab[i].code) + break; + + return (i >= tab_len) ? &unknown : &fd_tab[i]; +} + +/** + * get_mib - get a MIB from the device + * @udev: USB device + * @mib: MIB number + * @buf: buffer to store data + * @buf_size: max size of block + * + * Retrieve a MIB maintained by the USB device. Returns an + * error code if this fails. + */ + +static int get_mib(struct usb_device *udev, + u16 mib, u8 *buf, int buf_size) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev,0), + 0x33, INTERFACE_VENDOR_REQUEST_IN, + mib << 8, 0, + buf, buf_size, HZ); +} + +/** + * get_cmd_status - command status + * @udev: USB device + * @cmd: command + * @cmd_status: used to store status + * + * Retrieve the command status from the USB device. Returns + * an error code if this fails. + */ + +static int get_cmd_status(struct usb_device *udev, + u8 cmd, u8 *cmd_status) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev,0), + 0x22, INTERFACE_VENDOR_REQUEST_IN, + cmd, 0, + cmd_status, 40, HZ); +} + +#define EXT_FW_BLOCK_SIZE 1024 + +/** + * at76c503_download_external_fw - firmware download + * @udev: USB device + * @buf: firmware + * @size: firmware size + * + * Downloads any needed external firmware into the device. If the + * device is already running (previously loaded, or has flash firmware) + * then we don't do anything + */ + +int at76c503_download_external_fw(struct usb_device *udev, u8 *buf, int size) +{ + int i = 0, ret = 0; + u8 op_mode; + u8 *block; + + if (size < 0) + BUG(); + + op_mode = at76c503_get_op_mode(udev); + if (op_mode <= 0) { + err("Internal firmware not loaded (%d)", op_mode); + return -EPROTO; + } + if (op_mode == OPMODE_NETCARD) { + /* don't need firmware downloaded, it's already ready to go */ + return 0; + } + if (op_mode != OPMODE_NOFLASHNETCARD) { + dbg(DBG_DEVSTART, + "Unexpected operating mode (%d)." + "Attempting to download firmware anyway.", op_mode); + } + + block = kmalloc(EXT_FW_BLOCK_SIZE, GFP_KERNEL); + if (block == NULL) + return -ENOMEM; + + dbg(DBG_DEVSTART, "Downloading external firmware..."); + + while(size > 0){ + int bsize = size > EXT_FW_BLOCK_SIZE ? EXT_FW_BLOCK_SIZE : size; + + memcpy(block, buf, bsize); + + dbg(DBG_DEVSTART, "ext fw, size left = %5d, bsize = %4d, i = %2d", size, bsize, i); + + if((ret = load_ext_fw_block(udev, i, block, bsize)) < 0){ + err("load_ext_fw_block failed: %d, i = %d", ret, i); + goto exit; + } + buf += bsize; + size -= bsize; + i++; + } + + /* for fw >= 0.100, the device needs + an extra empty block: */ + if((ret = load_ext_fw_block(udev, i, block, 0)) < 0){ + err("load_ext_fw_block failed: %d, i = %d", ret, i); + goto exit; + } + +exit: + kfree(block); + return ret; +} + +/** + * set_card_command - set up a command + * @udev: USB device + * @cmd: command + * @buf: buffer for command + * @buf_size: size of data buffer for command + * + * Builds a message and sends the command off to the card + * for processing. + */ + +static int set_card_command(struct usb_device *udev, int cmd, + unsigned char *buf, int buf_size) +{ + int ret; + struct at76c503_command *cmd_buf = kmalloc(sizeof(struct at76c503_command) + buf_size, + GFP_KERNEL); + + if(cmd_buf){ + cmd_buf->cmd = cmd; + cmd_buf->reserved = 0; + cmd_buf->size = cpu_to_le16(buf_size); + if(buf_size > 0) + memcpy(&(cmd_buf[1]), buf, buf_size); + ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), + 0x0e, DEVICE_VENDOR_REQUEST_OUT, + 0, 0, + cmd_buf, + sizeof(struct at76c503_command) + buf_size, + HZ); + kfree(cmd_buf); + return ret; + } + + return -ENOMEM; +} + +/* TODO: should timeout */ +int wait_completion(struct at76c503 *dev, int cmd) +{ + u8 *cmd_status = kmalloc(40, GFP_KERNEL); + struct net_device *netdev = dev->netdev; + int ret = 0; + unsigned long timeout = jiffies + 30 * HZ; + + if(cmd_status == NULL) + return -ENOMEM; + + do{ + ret = get_cmd_status(dev->udev, cmd, cmd_status); + if(ret < 0){ + err("%s: get_cmd_status failed: %d", netdev->name, ret); + break; + } + + dbg(DBG_WAIT_COMPLETE, "%s: cmd %d,cmd_status[5] = %d", + dev->netdev->name, cmd, cmd_status[5]); + + if(cmd_status[5] == CMD_STATUS_IN_PROGRESS || + cmd_status[5] == CMD_STATUS_IDLE){ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/10); // 100 ms + } + else break; + } + while(time_before(jiffies, timeout)); + + if (ret >= 0) + /* if get_cmd_status did not fail, return the status + retrieved */ + ret = cmd_status[5]; + kfree(cmd_status); + return ret; +} + +/** + * set_mib - set a mib value + * @dev: Radio device + * @buf: A MIB set buffer command block + * + * Change a MIB and wait until the MIB change has occurred. + * Return an error code if the change failed or was rejected. + */ + +static int set_mib(struct at76c503 *dev, struct set_mib_buffer *buf) +{ + struct usb_device *udev = dev->udev; + int ret; + struct at76c503_command *cmd_buf = kmalloc(sizeof(struct at76c503_command) + buf->size + 4, + GFP_KERNEL); + + if(cmd_buf){ + cmd_buf->cmd = CMD_SET_MIB; + cmd_buf->reserved = 0; + cmd_buf->size = cpu_to_le16(buf->size + 4); + memcpy(&(cmd_buf[1]), buf, buf->size + 4); + ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), + 0x0e, DEVICE_VENDOR_REQUEST_OUT, + 0, 0, + cmd_buf, + sizeof(struct at76c503_command) + buf->size + 4, + HZ); + if (ret >= 0) + if ((ret=wait_completion(dev, CMD_SET_MIB)) != CMD_STATUS_COMPLETE) { + info("%s: set_mib: wait_completion failed with %d", + dev->netdev->name, ret); + ret = -EINVAL; /* ??? */ + } + kfree(cmd_buf); + return ret; + } + + return -ENOMEM; +} + +/** + * set_radio - control radio + * @dev: device + * @on_off: radio mode + * + * Set the radio operating mode. Return < 0 on error, == 0 if no + * command sent, == 1 if cmd sent + */ + +static int set_radio(struct at76c503 *dev, int on_off) +{ + int ret; + + if(dev->radio_on != on_off){ + ret = set_card_command(dev->udev, CMD_RADIO, NULL, 0); + if(ret < 0){ + err("%s: set_card_command(CMD_RADIO) failed: %d", dev->netdev->name, ret); + } else + ret = 1; + dev->radio_on = on_off; + } else + ret = 0; + return ret; +} + + +/* == PROC set_pm_mode == + sets power save modi (PM_ACTIVE/PM_SAVE/PM_SMART_SAVE) */ +static int set_pm_mode(struct at76c503 *dev, u8 mode) __attribute__ ((unused)); +static int set_pm_mode(struct at76c503 *dev, u8 mode) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_MGMT; + mib_buf.size = 1; + mib_buf.index = POWER_MGMT_MODE_OFFSET; + + mib_buf.data[0] = mode; + + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (pm_mode) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +/* == PROC set_associd == + sets the assoc id for power save mode */ +static int set_associd(struct at76c503 *dev, u16 id) __attribute__ ((unused)); +static int set_associd(struct at76c503 *dev, u16 id) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_MGMT; + mib_buf.size = 2; + mib_buf.index = STATION_ID_OFFSET; + + mib_buf.data[0] = id & 0xff; + mib_buf.data[1] = id >> 8; + + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (associd) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +/* == PROC set_listen_interval == + sets the listen interval for power save mode. + really needed, as we have a similar parameter in the assocreq ??? */ +static int set_listen_interval(struct at76c503 *dev, u16 interval) __attribute__ ((unused)); +static int set_listen_interval(struct at76c503 *dev, u16 interval) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC; + mib_buf.size = 2; + mib_buf.index = STATION_ID_OFFSET; + + mib_buf.data[0] = interval & 0xff; + mib_buf.data[1] = interval >> 8; + + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (listen_interval) failed: %d", + dev->netdev->name, ret); + } + return ret; +} + +/** + * set_premable - change premable mode + * @dev: USB device + * @type: preamble type + * + * Issue a premable change in the local MIB. + */ + +static int set_preamble(struct at76c503 *dev, u8 type) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_LOCAL; + mib_buf.size = 1; + mib_buf.index = PREAMBLE_TYPE_OFFSET; + mib_buf.data[0] = type; + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (preamble) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +/** + * set_frag - set fragment threshold + * @dev: Radio device + * @size: size + * + * Set the size of frame at which the hardware should begin + * using fragmentation. Splitting the packet up helps in a + * noisy environment but is overhead otherwise. + */ + +static int set_frag(struct at76c503 *dev, u16 size) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC; + mib_buf.size = 2; + mib_buf.index = FRAGMENTATION_OFFSET; + *(u16*)mib_buf.data = cpu_to_le16(size); + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (frag threshold) failed: %d", dev->netdev->name, ret); + } + return ret; +} + + +/** + * set_rts - set RTS threshold + * @dev: Radio device + * @size: size + * + * Set the packet size at which we should begin using RTS handshakes. + * This adds overhead but increases performance markedly with a large + * number of nodes or hidden transmitters. + */ + +static int set_rts(struct at76c503 *dev, u16 size) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC; + mib_buf.size = 2; + mib_buf.index = RTS_OFFSET; + *(u16*)mib_buf.data = cpu_to_le16(size); + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (rts) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +/** + * set_autorate_fallback - set rate fallback + * @dev: Radio device + * @onoff; enable/disable + * + * Set the fallback behaviour for speed handling. Right now this + * is configured once and not reconfigurable by the wireless + * extensions + */ + +static int set_autorate_fallback(struct at76c503 *dev, int onoff) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_LOCAL; + mib_buf.size = 1; + mib_buf.index = TX_AUTORATE_FALLBACK_OFFSET; + mib_buf.data[0] = onoff; + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (autorate fallback) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +/** + * set_mac_address - set the MAC address + * @dev: Radio device + * @addr: 6 byte MAC address + * + * Change the MAC address the firmware is currently using. + */ + +static int set_mac_address(struct at76c503 *dev, void *addr) +{ + struct set_mib_buffer mib_buf; + int ret = 0; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_ADD; + mib_buf.size = ETH_ALEN; + mib_buf.index = offsetof(struct mib_mac_addr, mac_addr); + memcpy(mib_buf.data, addr, ETH_ALEN); + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (MAC_ADDR, mac_addr) failed: %d", + dev->netdev->name, ret); + } + return ret; +} + +#if 0 +/* implemented to get promisc. mode working, but does not help. + May still be useful for multicast eventually. */ +static int set_group_address(struct at76c503 *dev, u8 *addr, int n) +{ + struct set_mib_buffer mib_buf; + int ret = 0; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_ADD; + mib_buf.size = ETH_ALEN; + mib_buf.index = offsetof(struct mib_mac_addr, group_addr) + n*ETH_ALEN; + memcpy(mib_buf.data, addr, ETH_ALEN); + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (MIB_MAC_ADD, group_addr) failed: %d", + dev->netdev->name, ret); + } + +#if 1 + /* I do not know anything about the group_addr_status field... (oku)*/ + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_ADD; + mib_buf.size = 1; + mib_buf.index = offsetof(struct mib_mac_addr, group_addr_status) + n; + mib_buf.data[0] = 1; + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (MIB_MAC_ADD, group_addr_status) failed: %d", + dev->netdev->name, ret); + } +#endif + return ret; +} +#endif + +/** + * set_promisc - promiscuous on/off + * @dev: Radio device + * @onoff; enable/disable + * + * Turn promiscuous mode on or off in the MIB. + */ + + +static int set_promisc(struct at76c503 *dev, int onoff) +{ + int ret = 0; + struct set_mib_buffer mib_buf; + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_LOCAL; + mib_buf.size = 1; + mib_buf.index = offsetof(struct mib_local, promiscuous_mode); + mib_buf.data[0] = onoff ? 1 : 0; + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (promiscous_mode) failed: %d", dev->netdev->name, ret); + } + return ret; +} + +static int dump_mib_mac_addr(struct at76c503 *dev) __attribute__ ((unused)); +static int dump_mib_mac_addr(struct at76c503 *dev) +{ + int ret = 0; + struct mib_mac_addr *mac_addr = + kmalloc(sizeof(struct mib_mac_addr), GFP_KERNEL); + char abuf[2*(4*ETH_ALEN)+1] __attribute__ ((unused)); + + if(!mac_addr){ + ret = -ENOMEM; + goto exit; + } + + ret = get_mib(dev->udev, MIB_MAC_ADD, + (u8*)mac_addr, sizeof(struct mib_mac_addr)); + if(ret < 0){ + err("%s: get_mib (MAC_ADDR) failed: %d", dev->netdev->name, ret); + goto err; + } + + dbg_uc("%s: MIB MAC_ADDR: mac_addr %s group_addr %s status %d %d %d %d", + dev->netdev->name, mac2str(mac_addr->mac_addr), + hex2str(abuf, (u8 *)mac_addr->group_addr, (sizeof(abuf)-1)/2,'\0'), + mac_addr->group_addr_status[0], mac_addr->group_addr_status[1], + mac_addr->group_addr_status[2], mac_addr->group_addr_status[3]); + + err: + kfree(mac_addr); + exit: + return ret; +} + +static int dump_mib_mac_wep(struct at76c503 *dev) __attribute__ ((unused)); +static int dump_mib_mac_wep(struct at76c503 *dev) +{ + int ret = 0; + struct mib_mac_wep *mac_wep = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL); + char kbuf[2*WEP_KEY_SIZE+1] __attribute__ ((unused)); + + if(!mac_wep){ + ret = -ENOMEM; + goto exit; + } + + ret = get_mib(dev->udev, MIB_MAC_WEP, + (u8*)mac_wep, sizeof(struct mib_mac_wep)); + if(ret < 0){ + err("%s: get_mib (MAC_WEP) failed: %d", dev->netdev->name, ret); + goto err; + } + + dbg_uc("%s: MIB MAC_WEP: priv_invoked %u def_key_id %u key_len %u " + "excl_unencr %u wep_icv_err %u wep_excluded %u encr_level %u key %d: %s", + dev->netdev->name, mac_wep->privacy_invoked, + mac_wep->wep_default_key_id, mac_wep->wep_key_mapping_len, + mac_wep->exclude_unencrypted,le32_to_cpu( mac_wep->wep_icv_error_count), + le32_to_cpu(mac_wep->wep_excluded_count), + mac_wep->encryption_level, mac_wep->wep_default_key_id, + mac_wep->wep_default_key_id < 4 ? + hex2str(kbuf, mac_wep->wep_default_keyvalue[mac_wep->wep_default_key_id], + mac_wep->encryption_level == 2 ? 13 : 5, '\0') : + ""); + + err: + kfree(mac_wep); + exit: + return ret; +} + +static int dump_mib_mac_mgmt(struct at76c503 *dev) __attribute__ ((unused)); +static int dump_mib_mac_mgmt(struct at76c503 *dev) +{ + int ret = 0; + struct mib_mac_mgmt *mac_mgmt = kmalloc(sizeof(struct mib_mac_mgmt), GFP_KERNEL); + + if(mac_mgmt == NULL) { + ret = -ENOMEM; + goto exit; + } + + ret = get_mib(dev->udev, MIB_MAC_MGMT, + (u8*)mac_mgmt, sizeof(struct mib_mac_mgmt)); + if(ret < 0){ + err("%s: get_mib failed: %d", dev->netdev->name, ret); + goto err; + } + + dbg_uc("%s: MIB MAC_MGMT: station_id x%x pm_mode %d\n", + dev->netdev->name, le16_to_cpu(mac_mgmt->station_id), + mac_mgmt->power_mgmt_mode); +err: + kfree(mac_mgmt); +exit: + return ret; +} + +static int dump_mib_mac(struct at76c503 *dev) __attribute__ ((unused)); +static int dump_mib_mac(struct at76c503 *dev) +{ + int ret = 0; + struct mib_mac *mac = kmalloc(sizeof(struct mib_mac), GFP_KERNEL); + + if(mac == NULL ) { + ret = -ENOMEM; + goto exit; + } + + ret = get_mib(dev->udev, MIB_MAC, + (u8*)mac, sizeof(struct mib_mac)); + if(ret < 0) { + err("%s: get_mib failed: %d", dev->netdev->name, ret); + goto err; + } + + dbg_uc("%s: MIB MAC: listen_int %d", + dev->netdev->name, le16_to_cpu(mac->listen_interval)); +err: + kfree(mac); +exit: + return ret; +} + +/** + * get_current_bssid - get our BSS + * @dev: Radio device + * + * Retrieve the current BSS ident from the management MIB. + * The bssid is updated in the device structure rather than + * returned. + */ + +static int get_current_bssid(struct at76c503 *dev) +{ + int ret = 0; + struct mib_mac_mgmt *mac_mgmt = kmalloc(sizeof(struct mib_mac_mgmt), GFP_KERNEL); + + if(mac_mgmt == NULL) { + ret = -ENOMEM; + goto exit; + } + + ret = get_mib(dev->udev, MIB_MAC_MGMT, + (u8*)mac_mgmt, sizeof(struct mib_mac_mgmt)); + if(ret < 0){ + err("%s: get_mib failed: %d", dev->netdev->name, ret); + goto err; + } + memcpy(dev->bssid, mac_mgmt->current_bssid, ETH_ALEN); + info("using BSSID %s", mac2str(dev->bssid)); +err: + kfree(mac_mgmt); +exit: + return ret; +} + +/** + * get_current_channel - get our channel + * @dev: Radio device + * + * Retrieve the current channel number from the PHY MIB. + * The channel is updated in the device structure rather than + * returned. + */ + +static int get_current_channel(struct at76c503 *dev) +{ + int ret = 0; + struct mib_phy *phy = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); + + if(phy == NULL) { + ret = -ENOMEM; + goto exit; + } + ret = get_mib(dev->udev, MIB_PHY, (u8*)phy, + sizeof(struct mib_phy)); + if(ret < 0){ + err("%s: get_mib(MIB_PHY) failed: %d", dev->netdev->name, ret); + goto err; + } + dev->channel = phy->channel_id; +err: + kfree(phy); +exit: + return ret; +} + +/** + * start_scan - scan wireless channels + * @dev: Radio device + * @use_essid: set if we want to ESSID match + * + * Set the firmware scanning the wireless network looking for + * nodes. The scan itself is asynchronous. + */ + +static int start_scan(struct at76c503 *dev, int use_essid) +{ + struct at76c503_start_scan scan; + + memset(&scan, 0, sizeof(struct at76c503_start_scan)); + memset(scan.bssid, 0xff, ETH_ALEN); + + if (use_essid) { + memcpy(scan.essid, dev->essid, IW_ESSID_MAX_SIZE); + scan.essid_size = dev->essid_size; + } else + scan.essid_size = 0; + + scan.probe_delay = cpu_to_le16(10000); + //jal: why should we start at a certain channel? we do scan the whole range + //allowed by reg domain. + scan.channel = dev->channel; + + /* atmelwlandriver differs between scan type 0 and 1 (active/passive) + For ad-hoc mode, it uses type 0 only.*/ + scan.scan_type = dev->scan_mode; + scan.min_channel_time = cpu_to_le16(dev->scan_min_time); + scan.max_channel_time = cpu_to_le16(dev->scan_max_time); + /* other values are set to 0 for type 0 */ + + return set_card_command(dev->udev, CMD_SCAN, + (unsigned char*)&scan, sizeof(scan)); +} + +/** + * start_ibss - start adhoc IBSS + * @dev: Radio device + * + * Issue an adhoc IBSS start. This command kicks off into the background + */ + +static int start_ibss(struct at76c503 *dev) +{ + struct at76c503_start_bss bss; + + memset(&bss, 0, sizeof(struct at76c503_start_bss)); + memset(bss.bssid, 0xff, ETH_ALEN); + memcpy(bss.essid, dev->essid, IW_ESSID_MAX_SIZE); + bss.essid_size = dev->essid_size; + bss.bss_type = ADHOC_MODE; + bss.channel = dev->channel; + + return set_card_command(dev->udev, CMD_START_IBSS, + (unsigned char*)&bss, sizeof(struct at76c503_start_bss)); +} + +/** + * join_bss - begin a join + * @dev: Radio device + * @ptr: BSS we wish to join + * + * Commence joining a BSS. + */ + +static int join_bss(struct at76c503 *dev, struct bss_info *ptr) +{ + struct at76c503_join join; + + assert(ptr != NULL); + + memset(&join, 0, sizeof(struct at76c503_join)); + memcpy(join.bssid, ptr->bssid, ETH_ALEN); + memcpy(join.essid, ptr->ssid, ptr->ssid_len); + join.essid_size = ptr->ssid_len; + join.bss_type = (dev->iw_mode == IW_MODE_ADHOC ? 1 : 2); + join.channel = ptr->channel; + join.timeout = cpu_to_le16(2000); + + dbg(DBG_PROGRESS, "%s join addr %s ssid %s type %d ch %d timeout %d", + dev->netdev->name, mac2str(join.bssid), + join.essid, join.bss_type, join.channel, le16_to_cpu(join.timeout)); + return set_card_command(dev->udev, CMD_JOIN, + (unsigned char*)&join, + sizeof(struct at76c503_join)); +} + +/** + * restart_timeout - timer callback + * @par: device pointer + * + * The restart timer timed out. Take appropriate action (pass the + * mallet) + */ + +static void restart_timeout(unsigned long par) +{ + struct at76c503 *dev = (struct at76c503 *)par; + defer_kevent(dev, KEVENT_RESTART); +} + +/** + * bss_list_timeout - bss timer callback + * + * We need to check the bss_list for old entries + */ + +static void bss_list_timeout(unsigned long par) +{ + struct at76c503 *dev = (struct at76c503 *)par; + unsigned long flags; + struct list_head *lptr, *nptr; + struct bss_info *ptr; + + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + + list_for_each_safe(lptr, nptr, &dev->bss_list) { + + ptr = list_entry(lptr, struct bss_info, list); + + /* FIXME: Do we need a ref count here really so we + can nicely handle BSS joins etc without races */ + if (ptr != dev->curr_bss && ptr != dev->new_bss && + time_after(jiffies, ptr->last_rx+BSS_LIST_TIMEOUT)) { + dbg(DBG_BSS_TABLE_RM, + "%s: bss_list: removing old BSS %s ch %d", + dev->netdev->name, mac2str(ptr->bssid), ptr->channel); + list_del(&ptr->list); + kfree(ptr); + } + } + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); + /* restart the timer */ + mod_timer(&dev->bss_list_timer, jiffies+BSS_LIST_TIMEOUT); + +} + +/** + * mgmt_timeout - management timeout callback + * @par: device pointer + * + * We got a timeout for a infrastructure mgmt packet + */ + +void mgmt_timeout(unsigned long par) +{ + struct at76c503 *dev = (struct at76c503 *)par; + defer_kevent(dev, KEVENT_MGMT_TIMEOUT); +} + +/** + * handle_mgmt_timeout - management event timed out + * @dev: Radio device + * + * The deferred procedure called from kevent() when a management + * timeout has occurred. What is actually required depends on the + * current mode we are in. It is our job to handle rescanning, + * lot beacons and authentication timeouts. + */ + +void handle_mgmt_timeout(struct at76c503 *dev) +{ + + if (dev->istate != SCANNING) + /* this is normal behaviour in state SCANNING ... */ + dbg(DBG_PROGRESS, "%s: timeout, state %d", dev->netdev->name, + dev->istate); + + switch(dev->istate) { + + case SCANNING: /* we use the mgmt_timer to delay the next scan for some time */ + defer_kevent(dev, KEVENT_SCAN); + break; + + case JOINING: + assert(0); + break; + + case CONNECTED: /* we haven't received the beacon of this BSS for + BEACON_TIMEOUT seconds */ + info("%s: lost beacon bssid %s", + dev->netdev->name, mac2str(dev->curr_bss->bssid)); + /* jal: starting mgmt_timer in adhoc mode is questionable, + but I'll leave it here to track down another lockup problem */ + if (dev->iw_mode != IW_MODE_ADHOC) { + netif_carrier_off(dev->netdev); + netif_stop_queue(dev->netdev); + NEW_STATE(dev,SCANNING); + defer_kevent(dev,KEVENT_SCAN); + } + break; + + case AUTHENTICATING: + if (dev->retries-- >= 0) { + auth_req(dev, dev->curr_bss, 1, NULL); + mod_timer(&dev->mgmt_timer, jiffies+HZ); + } else { + /* try to get next matching BSS */ + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + } + break; + + case ASSOCIATING: + if (dev->retries-- >= 0) { + assoc_req(dev,dev->curr_bss); + mod_timer(&dev->mgmt_timer, jiffies+HZ); + } else { + /* jal: TODO: we may be authenticated to several + BSS and may try to associate to the next of them here + in the future ... */ + + /* try to get next matching BSS */ + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + } + break; + + case REASSOCIATING: + if (dev->retries-- >= 0) + reassoc_req(dev, dev->curr_bss, dev->new_bss); + else { + /* we disassociate from the curr_bss and + scan again ... */ + NEW_STATE(dev,DISASSOCIATING); + dev->retries = DISASSOC_RETRIES; + disassoc_req(dev, dev->curr_bss); + } + mod_timer(&dev->mgmt_timer, jiffies+HZ); + break; + + case DISASSOCIATING: + if (dev->retries-- >= 0) { + disassoc_req(dev, dev->curr_bss); + mod_timer(&dev->mgmt_timer,jiffies+HZ); + } else { + /* we scan again ... */ + NEW_STATE(dev,SCANNING); + defer_kevent(dev,KEVENT_SCAN); + } + break; + + case INIT: + break; + + default: + assert(0); + } /* switch (dev->istate) */ + +} + +/** + * calc_padding - padding size + * @wlen: length + * + * calc. the padding from txbuf->wlength (which excludes the USB TX + * header) guess this is needed to compensate a flaw in the AT76C503A + * USB part ... + */ + +static int calc_padding(int wlen) +{ + /* add the USB TX header */ + wlen += AT76C503_TX_HDRLEN; + + wlen = wlen % 64; + + if (wlen < 50) + return 50 - wlen; + + if (wlen >=61) + return 64 + 50 - wlen; + + return 0; +} + +/** + * send_mgmt_bulk - send management bulk frame + * @dev: Radio device + * @txbuf: the buffer + * + * send a management frame on bulk-out. txbuf->wlength must be set + * (in LE format !). Handles all the locking against the main packet + * transmit paths for you. + */ + +static int send_mgmt_bulk(struct at76c503 *dev, struct at76c503_tx_buffer *txbuf) +{ + unsigned long flags; + char obuf[3*64+1] __attribute__ ((unused)); + int ret = 0; + int urb_status; + void *oldbuf = NULL; + + netif_carrier_off(dev->netdev); /* disable running netdev watchdog */ + netif_stop_queue(dev->netdev); /* stop tx data packets */ + + spin_lock_irqsave(&dev->mgmt_spinlock, flags); + + if ((urb_status = dev->write_urb->status) == USB_ST_URB_PENDING) { + oldbuf = dev->next_mgmt_bulk; /* to kfree below */ + dev->next_mgmt_bulk = txbuf; + txbuf = NULL; + } + spin_unlock_irqrestore(&dev->mgmt_spinlock, flags); + + if (oldbuf) { + /* a data/mgmt tx is already pending in the URB - + if this is no error in some situations we must + implement a queue or silently modify the old msg */ + err("%s: %s removed pending mgmt buffer %s", + dev->netdev->name, __FUNCTION__, + hex2str(obuf, (u8 *)dev->next_mgmt_bulk, 64,' ')); + kfree(dev->next_mgmt_bulk); + } + + if (txbuf) { + + txbuf->tx_rate = 0; + txbuf->padding = cpu_to_le16(calc_padding(le16_to_cpu(txbuf->wlength))); + + if (dev->next_mgmt_bulk) { + err("%s: %s URB status %d, but mgmt is pending", + dev->netdev->name, __FUNCTION__, urb_status); + } + + dbg(DBG_TX_MGMT, "%s: tx mgmt: wlen %d tx_rate %d pad %d %s", + dev->netdev->name, le16_to_cpu(txbuf->wlength), + txbuf->tx_rate, le16_to_cpu(txbuf->padding), + hex2str(obuf,txbuf->packet, + min((sizeof(obuf)-1)/2, + (size_t)le16_to_cpu(txbuf->wlength)),'\0')); + + /* txbuf was not consumed above -> send mgmt msg immediately */ + memcpy(dev->bulk_out_buffer, txbuf, + le16_to_cpu(txbuf->wlength) + AT76C503_TX_HDRLEN); + FILL_BULK_URB(dev->write_urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out_endpointAddr), + dev->bulk_out_buffer, + le16_to_cpu(txbuf->wlength) + + le16_to_cpu(txbuf->padding) + + AT76C503_TX_HDRLEN, + at76c503_write_bulk_callback, dev); + ret = usb_submit_urb(dev->write_urb); + if (ret) { + err("%s: %s error in tx submit urb: %d", + dev->netdev->name, __FUNCTION__, ret); + } + kfree(txbuf); + } /* if (txbuf) */ + return ret; +} + +/** + * disassoc_req - send disassociate + * @dev: Radio device + * @bss: BSS + * + * Send a disassociate to the BSS. + */ + +static int disassoc_req(struct at76c503 *dev, struct bss_info *bss) +{ + struct at76c503_tx_buffer *tx_buffer; + struct ieee802_11_mgmt *mgmt; + struct ieee802_11_disassoc_frame *req; + + assert(bss != NULL); + if (bss == NULL) + return -EFAULT; + + tx_buffer = kmalloc(DISASSOC_FRAME_SIZE + MAX_PADDING_SIZE, + GFP_ATOMIC); + if (!tx_buffer) + return -ENOMEM; + + mgmt = (struct ieee802_11_mgmt *)&(tx_buffer->packet); + req = (struct ieee802_11_disassoc_frame *)&(mgmt->data); + + /* make wireless header */ + mgmt->frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT|IEEE802_11_STYPE_AUTH); + mgmt->duration_id = cpu_to_le16(0x8000); + memcpy(mgmt->addr1, bss->bssid, ETH_ALEN); + memcpy(mgmt->addr2, dev->netdev->dev_addr, ETH_ALEN); + memcpy(mgmt->addr3, bss->bssid, ETH_ALEN); + mgmt->seq_ctl = cpu_to_le16(0); + + req->reason = 0; + + /* init. at76c503 tx header */ + tx_buffer->wlength = cpu_to_le16(DISASSOC_FRAME_SIZE - + AT76C503_TX_HDRLEN); + + dbg(DBG_TX_MGMT, "%s: DisAssocReq bssid %s", + dev->netdev->name, mac2str(mgmt->addr3)); + + /* either send immediately (if no data tx is pending + or put it in pending list */ + return send_mgmt_bulk(dev, tx_buffer); + +} + +/** + * auth_req - authentication request + * @dev: radio device + * @bss: BSS + * @seq_nr: sequence + * @challenge: challenge string + * + * Challenge is the challenge string (in TLV format) + * we got with seq_nr 2 for shared secret authentication only and + * send in seq_nr 3 WEP encrypted to prove we have the correct WEP + * key; otherwise it is NULL + */ + +static int auth_req(struct at76c503 *dev, struct bss_info *bss, int seq_nr, u8 *challenge) +{ + struct at76c503_tx_buffer *tx_buffer; + struct ieee802_11_mgmt *mgmt; + struct ieee802_11_auth_frame *req; + + int buf_len = (seq_nr != 3 ? AUTH_FRAME_SIZE : + AUTH_FRAME_SIZE + 1 + 1 + challenge[1]); + + assert(bss != NULL); + assert(seq_nr != 3 || challenge != NULL); + + tx_buffer = kmalloc(buf_len + MAX_PADDING_SIZE, GFP_ATOMIC); + if (!tx_buffer) + return -ENOMEM; + + mgmt = (struct ieee802_11_mgmt *)&(tx_buffer->packet); + req = (struct ieee802_11_auth_frame *)&(mgmt->data); + + /* make wireless header */ + /* first auth msg is not encrypted, only the second (seq_nr == 3) */ + mgmt->frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | IEEE802_11_STYPE_AUTH | + (seq_nr == 3 ? IEEE802_11_FCTL_WEP : 0)); + + mgmt->duration_id = cpu_to_le16(0x8000); + memcpy(mgmt->addr1, bss->bssid, ETH_ALEN); + memcpy(mgmt->addr2, dev->netdev->dev_addr, ETH_ALEN); + memcpy(mgmt->addr3, bss->bssid, ETH_ALEN); + mgmt->seq_ctl = cpu_to_le16(0); + + req->algorithm = cpu_to_le16(dev->auth_mode); + req->seq_nr = cpu_to_le16(seq_nr); + req->status = cpu_to_le16(0); + + if (seq_nr == 3) + memcpy(req->challenge, challenge, 1+1+challenge[1]); + + /* init. at76c503 tx header */ + tx_buffer->wlength = cpu_to_le16(buf_len - AT76C503_TX_HDRLEN); + + dbg(DBG_TX_MGMT, "%s: AuthReq bssid %s alg %d seq_nr %d", + dev->netdev->name, mac2str(mgmt->addr3), + le16_to_cpu(req->algorithm), le16_to_cpu(req->seq_nr)); + if (seq_nr == 3) { + char obuf[18*3] __attribute__ ((unused)); + dbg(DBG_TX_MGMT, "%s: AuthReq challenge: %s ...", + dev->netdev->name, + hex2str(obuf, req->challenge, sizeof(obuf)/3,' ')); + } + + /* either send immediately (if no data tx is pending + or put it in pending list */ + return send_mgmt_bulk(dev, tx_buffer); + +} + +/** + * assoc_req - send disassociate + * @dev: Radio device + * @bss: BSS + * + * Send an associate to the BSS. + */ + +static int assoc_req(struct at76c503 *dev, struct bss_info *bss) +{ + struct at76c503_tx_buffer *tx_buffer; + struct ieee802_11_mgmt *mgmt; + struct ieee802_11_assoc_req *req; + u8 *tlv; + + assert(bss != NULL); + + tx_buffer = kmalloc(ASSOCREQ_MAX_SIZE + MAX_PADDING_SIZE, + GFP_ATOMIC); + if (!tx_buffer) + return -ENOMEM; + + mgmt = (struct ieee802_11_mgmt *)&(tx_buffer->packet); + req = (struct ieee802_11_assoc_req *)&(mgmt->data); + tlv = req->data; + + /* make wireless header */ + mgmt->frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT|IEEE802_11_STYPE_ASSOC_REQ); + + mgmt->duration_id = cpu_to_le16(0x8000); + memcpy(mgmt->addr1, bss->bssid, ETH_ALEN); + memcpy(mgmt->addr2, dev->netdev->dev_addr, ETH_ALEN); + memcpy(mgmt->addr3, bss->bssid, ETH_ALEN); + mgmt->seq_ctl = cpu_to_le16(0); + + /* we must set the Privacy bit in the capabilites to assure an + Agere-based AP with optional WEP transmits encrypted frames + to us. AP only set the Privacy bit in their capabilities + if WEP is mandatory in the BSS! */ + req->capability = cpu_to_le16(bss->capa | + (dev->wep_enabled ? IEEE802_11_CAPA_PRIVACY : 0) | + (dev->preamble_type == PREAMBLE_TYPE_SHORT ? + IEEE802_11_CAPA_SHORT_PREAMBLE : 0)); + + req->listen_interval = cpu_to_le16(2 * bss->beacon_interval); + + /* write TLV data elements */ + + *tlv++ = IE_ID_SSID; + *tlv++ = bss->ssid_len; + memcpy(tlv, bss->ssid, bss->ssid_len); + tlv += bss->ssid_len; + + *tlv++ = IE_ID_SUPPORTED_RATES; + *tlv++ = sizeof(hw_rates); + memcpy(tlv, hw_rates, sizeof(hw_rates)); + tlv += sizeof(hw_rates); /* tlv points behind the supp_rates field */ + + /* init. at76c503 tx header */ + tx_buffer->wlength = cpu_to_le16(tlv-(u8 *)mgmt); + + { + /* output buffer for ssid and rates */ + char ossid[IW_ESSID_MAX_SIZE+1] __attribute__ ((unused)); + char orates[4*2+1] __attribute__ ((unused)); + int len; + + tlv = req->data; + len = min(sizeof(ossid)-1,(size_t)*(tlv+1)); + memcpy(ossid, tlv+2, len); + ossid[len] = '\0'; + tlv += (1 + 1 + *(tlv+1)); /* points to IE of rates now */ + dbg(DBG_TX_MGMT, "%s: AssocReq bssid %s capa x%04x ssid %s rates %s", + dev->netdev->name, mac2str(mgmt->addr3), + le16_to_cpu(req->capability), ossid, + hex2str(orates,tlv+2,min((sizeof(orates)-1)/2,(size_t)*(tlv+1)), + '\0')); + } + + /* either send immediately (if no data tx is pending + or put it in pending list */ + return send_mgmt_bulk(dev, tx_buffer); + +} + +/** + * reassoc_req - send disassociate + * @dev: Radio device + * @curr_bss: Current BSS + * @new_bss: New BSS + * + * We are currently associated to curr_bss and want to reassoc + * to new_bss. Send a reassociate request + */ + +static int reassoc_req(struct at76c503 *dev, struct bss_info *curr_bss, + struct bss_info *new_bss) +{ + struct at76c503_tx_buffer *tx_buffer; + struct ieee802_11_mgmt *mgmt; + struct ieee802_11_reassoc_req *req; + + u8 *tlv; + + assert(curr_bss != NULL); + assert(new_bss != NULL); + if (curr_bss == NULL || new_bss == NULL) + return -EFAULT; + + tx_buffer = kmalloc(REASSOCREQ_MAX_SIZE + MAX_PADDING_SIZE, + GFP_ATOMIC); + if (!tx_buffer) + return -ENOMEM; + + mgmt = (struct ieee802_11_mgmt *)&(tx_buffer->packet); + req = (struct ieee802_11_reassoc_req *)&(mgmt->data); + tlv = req->data; + + /* make wireless header */ + /* jal: encrypt this packet if wep_enabled is TRUE ??? */ + mgmt->frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT|IEEE802_11_STYPE_REASSOC_REQ); + mgmt->duration_id = cpu_to_le16(0x8000); + memcpy(mgmt->addr1, new_bss->bssid, ETH_ALEN); + memcpy(mgmt->addr2, dev->netdev->dev_addr, ETH_ALEN); + memcpy(mgmt->addr3, new_bss->bssid, ETH_ALEN); + mgmt->seq_ctl = cpu_to_le16(0); + + /* we must set the Privacy bit in the capabilites to assure an + Agere-based AP with optional WEP transmits encrypted frames + to us. AP only set the Privacy bit in their capabilities + if WEP is mandatory in the BSS! */ + req->capability = cpu_to_le16(new_bss->capa | + (dev->wep_enabled ? IEEE802_11_CAPA_PRIVACY : 0) | + (dev->preamble_type == PREAMBLE_TYPE_SHORT ? + IEEE802_11_CAPA_SHORT_PREAMBLE : 0)); + + req->listen_interval = cpu_to_le16(2 * new_bss->beacon_interval); + + memcpy(req->curr_ap, curr_bss->bssid, ETH_ALEN); + + /* write TLV data elements */ + + *tlv++ = IE_ID_SSID; + *tlv++ = new_bss->ssid_len; + memcpy(tlv,new_bss->ssid, new_bss->ssid_len); + tlv += new_bss->ssid_len; + + *tlv++ = IE_ID_SUPPORTED_RATES; + *tlv++ = sizeof(hw_rates); + memcpy(tlv, hw_rates, sizeof(hw_rates)); + tlv += sizeof(hw_rates); /* tlv points behind the supp_rates field */ + + /* init. at76c503 tx header */ + tx_buffer->wlength = cpu_to_le16(tlv-(u8 *)mgmt); + + { + /* output buffer for ssid and rates */ + char ossid[IW_ESSID_MAX_SIZE+1] __attribute__ ((unused)); + char orates[4*2+1] __attribute__ ((unused)); + char ocurr[6*3+1] __attribute__ ((unused)); + tlv = req->data; + memcpy(ossid, tlv+2, min(sizeof(ossid),(size_t)*(tlv+1))); + ossid[sizeof(ossid)-1] = '\0'; + tlv += (1 + 1 + *(tlv+1)); /* points to IE of rates now */ + dbg(DBG_TX_MGMT, "%s: ReAssocReq curr %s new %s capa x%04x ssid %s rates %s", + dev->netdev->name, + hex2str(ocurr, req->curr_ap, ETH_ALEN, ':'), + mac2str(mgmt->addr3), le16_to_cpu(req->capability), ossid, + hex2str(orates,tlv+2,min((sizeof(orates)-1)/2,(size_t)*(tlv+1)), + '\0')); + } + + /* either send immediately (if no data tx is pending + or put it in pending list */ + return send_mgmt_bulk(dev, tx_buffer); + +} /* reassoc_req */ + + +/* shamelessly copied from usbnet.c (oku) */ + +/** + * defer_kevent - queue and event + * @dev: radio device + * @flag: event bit + * + * Queue an event for processing. We don't count events so + * multiple queueings of the same event may cause only one + * processing of the event. Each event type will however be + * processed at least once. + */ + +static void defer_kevent (struct at76c503 *dev, int flag) +{ + set_bit (flag, &dev->kevent_flags); + if (!schedule_task (&dev->kevent)) + dbg(DBG_KEVENT, "%s: kevent %d may have been dropped", + dev->netdev->name, flag); + else + dbg(DBG_KEVENT, "%s: kevent %d scheduled", + dev->netdev->name, flag); +} + +static void kevent(void *data) +{ + struct at76c503 *dev = data; + int ret; + unsigned long flags; + + /* on errors, bits aren't cleared, but no reschedule + is done. So work will be done next time something + else has to be done. This is ugly. TODO! (oku) */ + + dbg(DBG_KEVENT, "%s: kevent entry flags=x%x", dev->netdev->name, + dev->kevent_flags); + + down(&dev->sem); + + if(test_bit(KEVENT_CTRL_HALT, &dev->kevent_flags)){ + /* this never worked... but it seems + that it's rarely necessary, if at all (oku) */ + ret = usb_clear_halt(dev->udev, + usb_sndctrlpipe (dev->udev, 0)); + if(ret < 0) + err("usb_clear_halt() failed: %d", ret); + else{ + clear_bit(KEVENT_CTRL_HALT, &dev->kevent_flags); + info("usb_clear_halt() succesful"); + } + } + if(test_bit(KEVENT_NEW_BSS, &dev->kevent_flags)){ + struct net_device *netdev = dev->netdev; + struct mib_mac_mgmt *mac_mgmt = kmalloc(sizeof(struct mib_mac_mgmt), GFP_KERNEL); + struct set_mib_buffer mib_buf; + + if(mac_mgmt == NULL) + { + err("%s: out of memory", netdev->name); + goto new_bss_clean; + } + + ret = get_mib(dev->udev, MIB_MAC_MGMT, (u8*)mac_mgmt, + sizeof(struct mib_mac_mgmt)); + if(ret < 0){ + err("%s: get_mib failed: %d", netdev->name, ret); + } + // usb_debug_data(__FUNCTION__, (unsigned char *)mac_mgmt, sizeof(struct mib_mac_mgmt)); + + else + { + dbg(DBG_PROGRESS, "ibss_change = 0x%2x", mac_mgmt->ibss_change); + memcpy(dev->bssid, mac_mgmt->current_bssid, ETH_ALEN); + dbg(DBG_PROGRESS, "using BSSID %s", mac2str(dev->bssid)); + + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_MGMT; + mib_buf.size = 1; + mib_buf.index = IBSS_CHANGE_OK_OFFSET; + ret = set_mib(dev, &mib_buf); + if(ret < 0) { + err("%s: set_mib (ibss change ok) failed: %d", netdev->name, ret); + goto new_bss_clean; + } + clear_bit(KEVENT_NEW_BSS, &dev->kevent_flags); +new_bss_clean: + kfree(mac_mgmt); + } + } + if(test_bit(KEVENT_SET_PROMISC, &dev->kevent_flags)){ + info("%s: KEVENT_SET_PROMISC", dev->netdev->name); + + set_promisc(dev, dev->promisc); + clear_bit(KEVENT_SET_PROMISC, &dev->kevent_flags); + } + + if(test_bit(KEVENT_MGMT_TIMEOUT, &dev->kevent_flags)){ + clear_bit(KEVENT_MGMT_TIMEOUT, &dev->kevent_flags); + handle_mgmt_timeout(dev); + } + + /* check this _before_ KEVENT_JOIN, 'cause _JOIN sets _STARTIBSS bit */ + if (test_bit(KEVENT_STARTIBSS, &dev->kevent_flags)) { + struct set_mib_buffer mib_buf; + clear_bit(KEVENT_STARTIBSS, &dev->kevent_flags); + assert(dev->istate == STARTIBSS); + ret = start_ibss(dev); + if(ret < 0){ + err("%s: start_ibss failed: %d", dev->netdev->name, ret); + goto end_startibss; + } + + ret = wait_completion(dev, CMD_START_IBSS); + if (ret != CMD_STATUS_COMPLETE) { + err("%s start_ibss failed to complete,%d", + dev->netdev->name, ret); + goto end_startibss; + } + + ret = get_current_bssid(dev); + if(ret < 0) goto end_startibss; + + ret = get_current_channel(dev); + if(ret < 0) goto end_startibss; + + /* not sure what this is good for ??? */ + memset(&mib_buf, 0, sizeof(struct set_mib_buffer)); + mib_buf.type = MIB_MAC_MGMT; + mib_buf.size = 1; + mib_buf.index = IBSS_CHANGE_OK_OFFSET; + ret = set_mib(dev, &mib_buf); + if(ret < 0){ + err("%s: set_mib (ibss change ok) failed: %d", dev->netdev->name, ret); + goto end_startibss; + } + + netif_start_queue(dev->netdev); + } +end_startibss: + + /* check this _before_ KEVENT_SCAN, 'cause _SCAN sets _JOIN bit */ + if (test_bit(KEVENT_JOIN, &dev->kevent_flags)) { + clear_bit(KEVENT_JOIN, &dev->kevent_flags); + if (dev->istate == INIT) + goto end_join; + assert(dev->istate == JOINING); + + /* dev->curr_bss == NULL signals a new round, + starting with list_entry(dev->bss_list.next, ...) */ + + /* secure the access to dev->curr_bss ! */ + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + dev->curr_bss=find_matching_bss(dev, dev->curr_bss); + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); + + if (dev->curr_bss != NULL) { + if ((ret=join_bss(dev,dev->curr_bss)) < 0) { + err("%s: join_bss failed with %d", + dev->netdev->name, ret); + goto end_join; + } + + ret=wait_completion(dev,CMD_JOIN); + if (ret != CMD_STATUS_COMPLETE) { + if (ret != CMD_STATUS_TIME_OUT) + err("%s join_bss completed with %d", + dev->netdev->name, ret); + else + info("%s join_bss ssid %s timed out", + dev->netdev->name, + mac2str(dev->curr_bss->bssid)); + + /* retry next BSS immediately */ + defer_kevent(dev,KEVENT_JOIN); + goto end_join; + } + + /* here we have joined the (I)BSS */ + if (dev->iw_mode == IW_MODE_ADHOC) { + struct bss_info *bptr = dev->curr_bss; + NEW_STATE(dev,CONNECTED); + /* get ESSID, BSSID and channel for dev->curr_bss */ + dev->essid_size = bptr->ssid_len; + memcpy(dev->essid, bptr->ssid, bptr->ssid_len); + memcpy(dev->bssid, bptr->bssid, ETH_ALEN); + dev->channel = bptr->channel; + + netif_start_queue(dev->netdev); + /* just to be sure */ + del_timer_sync(&dev->mgmt_timer); + } else { + /* send auth req */ + NEW_STATE(dev,AUTHENTICATING); + auth_req(dev, dev->curr_bss, 1, NULL); + mod_timer(&dev->mgmt_timer, jiffies+HZ); + } + goto end_join; + } /* if (dev->curr_bss != NULL) */ + + /* here we haven't found a matching (i)bss ... */ + if (dev->iw_mode == IW_MODE_ADHOC) { + NEW_STATE(dev,STARTIBSS); + defer_kevent(dev,KEVENT_STARTIBSS); + goto end_join; + } + /* haven't found a matching BSS + in infra mode - use timer to try again in 10 seconds */ + NEW_STATE(dev,SCANNING); + mod_timer(&dev->mgmt_timer, jiffies+RESCAN_TIME*HZ); + } /* if (test_bit(KEVENT_JOIN, &dev->kevent_flags)) */ +end_join: + + if (test_bit(KEVENT_SCAN, &dev->kevent_flags)) { + clear_bit(KEVENT_SCAN, &dev->kevent_flags); + if (dev->istate == INIT) + goto end_scan; + assert(dev->istate == SCANNING); + + /* empty the driver's bss list */ + free_bss_list(dev); + + /* scan twice: first run with ProbeReq containing the + empty SSID, the second run with the real SSID. + APs in cloaked mode (e.g. Agere) will answer + in the second run with their real SSID. */ + + if ((ret=start_scan(dev, 0)) < 0) { + err("%s: start_scan failed with %d", + dev->netdev->name, ret); + goto end_scan; + } + if ((ret=wait_completion(dev,CMD_SCAN)) != + CMD_STATUS_COMPLETE) { + err("%s start_scan completed with %d", + dev->netdev->name, ret); + goto end_scan; + } + + /* dump the results of the scan with ANY ssid */ + dump_bss_table(dev, 0); + if ((ret=start_scan(dev, 1)) < 0) { + err("%s: 2.start_scan failed with %d", + dev->netdev->name, ret); + goto end_scan; + } + if ((ret=wait_completion(dev,CMD_SCAN)) != + CMD_STATUS_COMPLETE) { + err("%s 2.start_scan completed with %d", + dev->netdev->name, ret); + goto end_scan; + } + + /* dump the results of the scan with real ssid */ + dump_bss_table(dev, 0); + NEW_STATE(dev,JOINING); + assert(dev->curr_bss == NULL); /* done in free_bss_list, + find_bss will start with first bss */ + /* call join_bss immediately after + re-run of all other threads in kevent */ + defer_kevent(dev,KEVENT_JOIN); + } /* if (test_bit(KEVENT_SCAN, &dev->kevent_flags)) */ +end_scan: + + if (test_bit(KEVENT_SUBMIT_RX, &dev->kevent_flags)) { + clear_bit(KEVENT_SUBMIT_RX, &dev->kevent_flags); + submit_rx_urb(dev); + } + + + if (test_bit(KEVENT_RESTART, &dev->kevent_flags)) { + clear_bit(KEVENT_RESTART, &dev->kevent_flags); + assert(dev->istate == INIT); + startup_device(dev); + /* scan again in a second */ + NEW_STATE(dev,SCANNING); + mod_timer(&dev->mgmt_timer, jiffies+HZ); + } + + if (test_bit(KEVENT_ASSOC_DONE, &dev->kevent_flags)) { + clear_bit(KEVENT_ASSOC_DONE, &dev->kevent_flags); + assert(dev->istate == ASSOCIATING || + dev->istate == REASSOCIATING); + + if (dev->iw_mode == IW_MODE_INFRA) { + assert(dev->curr_bss != NULL); + if (dev->curr_bss != NULL && + dev->pm_mode != PM_ACTIVE) { + /* calc the listen interval in units of + beacon intervals of the curr_bss */ + dev->pm_period_beacon = (dev->pm_period_us >> 10) / + dev->curr_bss->beacon_interval; + +#if 0 /* only to check if we need to set the listen interval here + or could do it in the (re)assoc_req parameter */ + dump_mib_mac(dev); +#endif + + if (dev->pm_period_beacon < 2) + dev->pm_period_beacon = 2; + else + if ( dev->pm_period_beacon > 0xffff) + dev->pm_period_beacon = 0xffff; + + dbg(DBG_PM, "%s: pm_mode %d assoc id x%x listen int %d", + dev->netdev->name, dev->pm_mode, + dev->curr_bss->assoc_id, dev->pm_period_beacon); + + set_associd(dev, dev->curr_bss->assoc_id); + set_listen_interval(dev, (u16)dev->pm_period_beacon); + set_pm_mode(dev, dev->pm_mode); +#if 0 + dump_mib_mac(dev); + dump_mib_mac_mgmt(dev); +#endif + } + } + + netif_carrier_on(dev->netdev); + netif_wake_queue(dev->netdev); /* _start_queue ??? */ + NEW_STATE(dev,CONNECTED); + dbg(DBG_PROGRESS, "%s: connected to BSSID %s", + dev->netdev->name, mac2str(dev->curr_bss->bssid)); + } + + up(&dev->sem); + + dbg(DBG_KEVENT, "%s: kevent exit flags=x%x", dev->netdev->name, + dev->kevent_flags); + + return; +} + +/** + * essid_matched - check ESSID matches + * @dev: radio device + * @bss: BSS + * + * Check that the BSS matches the ESSID we have set, or that we + * we are using wildcard BSS + */ + +static int essid_matched(struct at76c503 *dev, struct bss_info *ptr) +{ + /* common criteria for both modi */ + + int retval = (dev->essid_size == 0 /* ANY ssid */ || + (dev->essid_size == ptr->ssid_len && + !memcmp(dev->essid, ptr->ssid, ptr->ssid_len))); + if (!retval) + dbg(DBG_BSS_MATCH, "%s bss table entry %p: essid didn't match", + dev->netdev->name, ptr); + return retval; +} + +/** + * mode_matched - check mode compatibility + * @dev: radio device + * @ptr; BSS info + * + * Check if the BSS we have located is in the same mode (adhoc or + * managed) that we wish to use. + */ + +static inline int mode_matched(struct at76c503 *dev, struct bss_info *ptr) +{ + int retval; + + if (dev->iw_mode == IW_MODE_ADHOC) + retval = ptr->capa & IEEE802_11_CAPA_IBSS; + else + retval = ptr->capa & IEEE802_11_CAPA_ESS; + if (!retval) + dbg(DBG_BSS_MATCH, "%s bss table entry %p: mode didn't match", + dev->netdev->name, ptr); + return retval; +} + +/** + * rates_matches - check rate compatibility + * @dev: radio device + * @ptr: BSS + * + * Check that the radio settings we have match with the BSS. We + * must support a matching speed rate and also require the BSS + * supports short preambles if we are using that feature. + * + * Q: should we turn it off if we can't find matches ? + */ + +static int rates_matched(struct at76c503 *dev, struct bss_info *ptr) +{ + int i; + u8 *rate; + + for(i=0,rate=ptr->rates; i < ptr->rates_len; i++,rate++) + if (*rate & 0x80) { + /* this is a basic rate we have to support + (see IEEE802.11, ch. 7.3.2.2) */ + if (*rate != (0x80|hw_rates[0]) && *rate != (0x80|hw_rates[1]) && + *rate != (0x80|hw_rates[2]) && *rate != (0x80|hw_rates[3])) { + dbg(DBG_BSS_MATCH, + "%s: bss table entry %p: basic rate %02x not supported", + dev->netdev->name, ptr, *rate); + return 0; + } + } + /* if we use short preamble, the bss must support it */ + if (dev->preamble_type == PREAMBLE_TYPE_SHORT && + !(ptr->capa & IEEE802_11_CAPA_SHORT_PREAMBLE)) { + dbg(DBG_BSS_MATCH, "%s: %p does not support short preamble", + dev->netdev->name, ptr); + return 0; + } else + return 1; +} + +/** + * wep_matched - check compatible WEP + * @dev: radio device + * @ptr: BSS pointer + * + * Check if the WEP matches for this BSS. If the BSS wants encryption + * and we are not doing encryption we cannot use this BSS + */ + +static inline int wep_matched(struct at76c503 *dev, struct bss_info *ptr) +{ + if (!dev->wep_enabled && (ptr->capa & IEEE802_11_CAPA_PRIVACY)) { + /* we have disabled WEP, but the BSS signals privacy */ + dbg(DBG_BSS_MATCH, "%s: bss table entry %p: requires encryption", + dev->netdev->name, ptr); + return 0; + } + /* otherwise if the BSS does not signal privacy it may well + accept encrypted packets from us ... */ + return 1; +} + +/** + * dump_bss_table - debug bss table + * @dev: radio device + * @force_output: set to output always + * + * Dump the BSS table of the device if we are debugging the BSS + * code, or if force_output is set + */ + +static void dump_bss_table(struct at76c503 *dev, int force_output) +{ + struct bss_info *ptr; + /* hex dump output buffer for debug */ + char hexssid[IW_ESSID_MAX_SIZE*2+1] __attribute__ ((unused)); + char hexrates[BSS_LIST_MAX_RATE_LEN*3+1] __attribute__ ((unused)); + unsigned long flags; + struct list_head *lptr; + + if ((debug & DBG_BSS_TABLE) || (force_output)) { + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + + dbg_uc("%s BSS table (curr=%p, new=%p):", dev->netdev->name, + dev->curr_bss, dev->new_bss); + + list_for_each(lptr, &dev->bss_list) { + ptr = list_entry(lptr, struct bss_info, list); + dbg_uc("0x%p: bssid %s channel %d ssid %s (%s)" + " capa x%04x rates %s rssi %d link %d noise %d", + ptr, mac2str(ptr->bssid), + ptr->channel, + ptr->ssid, + hex2str(hexssid,ptr->ssid,ptr->ssid_len,'\0'), + le16_to_cpu(ptr->capa), + hex2str(hexrates, ptr->rates, + ptr->rates_len, ' '), + ptr->rssi, ptr->link_qual, ptr->noise_level); + } + + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); + } +} + +/** + * find_matching_bss - find a suitable BSS + * + * Try to find a matching bss in dev->bss, starting at position start. + * returns the ptr to a matching bss in the list or NULL if none found + * last is the last bss tried, last == NULL signals a new round, + * starting with list_entry(dev->bss_list.next, ...) . This proc must be + * called inside an aquired dev->bss_list_spinlock otherwise the timeout + * on bss may remove the newly chosen entry ! + */ + +static struct bss_info *find_matching_bss(struct at76c503 *dev, + struct bss_info *last) +{ + struct bss_info *ptr = NULL; + struct list_head *curr; + + curr = last != NULL ? last->list.next : dev->bss_list.next; + while (curr != &dev->bss_list) { + ptr = list_entry(curr, struct bss_info, list); + if (essid_matched(dev,ptr) && + mode_matched(dev,ptr) && + wep_matched(dev,ptr) && + rates_matched(dev,ptr)) + break; + curr = curr->next; + } + + if (curr == &dev->bss_list) + ptr = NULL; + /* otherwise ptr points to the struct bss_info we have chosen */ + + dbg(DBG_BSS_TABLE, "%s %s: returned %p", dev->netdev->name, + __FUNCTION__, ptr); + return ptr; +} + +/** + * rx_mgmt_reassoc - eassociate frame received + * @dev: radio device + * @buf: network buffer + * + * We received an association response, if we were trying to + * associate and the response is ok then we can updayte our status + * and head for connected state. If we get an associate response + * that doesn't like us we go back to looking for a node to join + */ + +static void rx_mgmt_assoc(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_assoc_resp *resp = + (struct ieee802_11_assoc_resp *)mgmt->data; + char orates[2*8+1] __attribute__((unused)); + u16 assoc_id = le16_to_cpu(resp->assoc_id); + u16 status = le16_to_cpu(resp->status); + u16 capa = le16_to_cpu(resp->capability); + dbg(DBG_RX_MGMT, "%s: rx AssocResp bssid %s capa x%04x status x%04x " + "assoc_id x%04x rates %s", + dev->netdev->name, mac2str(mgmt->addr3), capa, status, assoc_id, + hex2str(orates, resp->data+2, + min((size_t)*(resp->data+1),(sizeof(orates)-1)/2), '\0')); + + if (dev->istate == ASSOCIATING) { + + assert(dev->curr_bss != NULL); + if (dev->curr_bss == NULL) + return; + + if (status == IEEE802_11_STATUS_SUCCESS) { + struct bss_info *ptr = dev->curr_bss; + ptr->assoc_id = assoc_id & 0x3fff; + /* update iwconfig params */ + memcpy(dev->bssid, ptr->bssid, ETH_ALEN); + memcpy(dev->essid, ptr->ssid, ptr->ssid_len); + dev->essid_size = ptr->ssid_len; + dev->channel = ptr->channel; + defer_kevent(dev,KEVENT_ASSOC_DONE); + } else { + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + } + del_timer_sync(&dev->mgmt_timer); + } else + info("%s: AssocResp in state %d ignored", + dev->netdev->name, dev->istate); +} + +/** + * rx_mgmt_reassoc - reassociate frame received + * @dev: radio device + * @buf: network buffer + * + * A reassociate frame has arrived. If we were trying to reassociate + * then we go back to connected state and all is good. In any other + * state this frame really doesn't mean anything so its bitbucketed + */ + +static void rx_mgmt_reassoc(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_assoc_resp *resp = + (struct ieee802_11_assoc_resp *)mgmt->data; + char orates[2*8+1] __attribute__((unused)); + unsigned long flags; + u16 capa = le16_to_cpu(resp->capability); + u16 status = le16_to_cpu(resp->status); + u16 assoc_id = le16_to_cpu(resp->assoc_id); + + dbg(DBG_RX_MGMT, "%s: rx ReAssocResp bssid %s capa x%04x status x%04x " + "assoc_id x%04x rates %s", + dev->netdev->name, mac2str(mgmt->addr3), capa, status, assoc_id, + hex2str(orates, resp->data+2, + min((size_t)*(resp->data+1),(sizeof(orates)-1)/2), '\0')); + + if (dev->istate == REASSOCIATING) { + + assert(dev->new_bss != NULL); + if (dev->new_bss == NULL) + return; + + if (status == IEEE802_11_STATUS_SUCCESS) { + struct bss_info *bptr = dev->new_bss; + bptr->assoc_id = assoc_id; + NEW_STATE(dev,CONNECTED); + + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + dev->curr_bss = dev->new_bss; + dev->new_bss = NULL; + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); + + /* get ESSID, BSSID and channel for dev->curr_bss */ + dev->essid_size = bptr->ssid_len; + memcpy(dev->essid, bptr->ssid, bptr->ssid_len); + memcpy(dev->bssid, bptr->bssid, ETH_ALEN); + dev->channel = bptr->channel; + dbg(DBG_PROGRESS, "%s: reassociated to BSSID %s", + dev->netdev->name, mac2str(dev->bssid)); + defer_kevent(dev, KEVENT_ASSOC_DONE); + } else { + del_timer_sync(&dev->mgmt_timer); + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + } + } else + info("%s: ReAssocResp in state %d ignored", + dev->netdev->name, dev->istate); +} + +/** + * rx_mgmt_disassoc - disassociate frame received + * @dev: radio device + * @buf: network buffer + * + * We have received a disassociate frame. If it is the BSS we + * are connected to or trying to finish associating with then + * we go back to trying to join + */ + +static void rx_mgmt_disassoc(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_disassoc_frame *resp = + (struct ieee802_11_disassoc_frame *)mgmt->data; + char obuf[ETH_ALEN*3] __attribute__ ((unused)); + + dbg(DBG_RX_MGMT, "%s: rx DisAssoc bssid %s reason x%04x destination %s", + dev->netdev->name, mac2str(mgmt->addr3), + le16_to_cpu(resp->reason), + hex2str(obuf, mgmt->addr1, ETH_ALEN, ':')); + + if (dev->istate == SCANNING || dev->istate == INIT) + return; + + assert(dev->curr_bss != NULL); + if (dev->curr_bss == NULL) + return; + + if (dev->istate == REASSOCIATING) { + assert(dev->new_bss != NULL); + if (dev->new_bss == NULL) + return; + } + + if (!memcmp(mgmt->addr3, dev->curr_bss->bssid, ETH_ALEN) && + (!memcmp(dev->netdev->dev_addr, mgmt->addr1, ETH_ALEN) || + !memcmp(bc_addr, mgmt->addr1, ETH_ALEN))) { + /* this is a DisAssoc from the BSS we are connected or + trying to connect to, directed to us or broadcasted */ + /* jal: TODO: can the Disassoc also come from the BSS + we've sent a ReAssocReq to (i.e. from dev->new_bss) ? */ + if (dev->istate == DISASSOCIATING || + dev->istate == ASSOCIATING || + dev->istate == REASSOCIATING || + dev->istate == CONNECTED || + dev->istate == JOINING) + { + if (dev->istate == CONNECTED) { + netif_carrier_off(dev->netdev); + netif_stop_queue(dev->netdev); + } + del_timer_sync(&dev->mgmt_timer); + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + } else + /* ignore DisAssoc in states AUTH, ASSOC */ + info("%s: DisAssoc in state %d ignored", + dev->netdev->name, dev->istate); + } + /* ignore DisAssoc to other STA or from other BSSID */ +} + +/** + * rx_mgmt_auth - auth frame received + * @dev: radio device + * @buf: network buffer + * + * We have received an auth frame. This may be accepting us + * or may be a challenge for the WEP "cryptography". If it + * matches our connection attempt we move to associating state. + */ + +static void rx_mgmt_auth(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_auth_frame *resp = + (struct ieee802_11_auth_frame *)mgmt->data; + char obuf[18*3] __attribute__ ((unused)); + int seq_nr = le16_to_cpu(resp->seq_nr); + int alg = le16_to_cpu(resp->algorithm); + int status = le16_to_cpu(resp->status); + + dbg(DBG_RX_MGMT, "%s: rx AuthFrame bssid %s alg %d seq_nr %d status %d " + "destination %s", + dev->netdev->name, mac2str(mgmt->addr3), + alg, seq_nr, status, + hex2str(obuf, mgmt->addr1, ETH_ALEN, ':')); + + if (alg == IEEE802_11_AUTH_ALG_SHARED_SECRET && + seq_nr == 2) { + dbg(DBG_RX_MGMT, "%s: AuthFrame challenge %s ...", + dev->netdev->name, + hex2str(obuf, resp->challenge, sizeof(obuf)/3, ' ')); + } + + if (dev->istate != AUTHENTICATING) { + info("%s: ignored AuthFrame in state %d", + dev->netdev->name, dev->istate); + return; + } + if (dev->auth_mode != alg) { + info("%s: ignored AuthFrame for alg %d", + dev->netdev->name, alg); + return; + } + + assert(dev->curr_bss != NULL); + if (dev->curr_bss == NULL) + return; + + if (!memcmp(mgmt->addr3, dev->curr_bss->bssid, ETH_ALEN) && + !memcmp(dev->netdev->dev_addr, mgmt->addr1, ETH_ALEN)) { + /* this is a AuthFrame from the BSS we are connected or + trying to connect to, directed to us */ + if (status != IEEE802_11_STATUS_SUCCESS) { + del_timer_sync(&dev->mgmt_timer); + /* try to join next bss */ + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + return; + } + + if (dev->auth_mode == IEEE802_11_AUTH_ALG_OPEN_SYSTEM || + seq_nr == 4) { + dev->retries = ASSOC_RETRIES; + NEW_STATE(dev,ASSOCIATING); + assoc_req(dev, dev->curr_bss); + mod_timer(&dev->mgmt_timer,jiffies+HZ); + return; + } + + assert(seq_nr == 2); + auth_req(dev, dev->curr_bss, seq_nr+1, resp->challenge); + mod_timer(&dev->mgmt_timer,jiffies+HZ); + } + /* else: ignore AuthFrames to other receipients */ +} + +/** + * rx_mgmt_deauth - deauth frame received + * @dev: radio device + * @buf: network buffer + * + * We have been deauth'ed by the BSS. If we are scanning then + * its an uninteresting event. In other states we go back to + * attempting to join a network + */ + +static void rx_mgmt_deauth(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_deauth_frame *resp = + (struct ieee802_11_deauth_frame *)mgmt->data; + char obuf[ETH_ALEN*3+1] __attribute__ ((unused)); + + dbg(DBG_RX_MGMT|DBG_PROGRESS, + "%s: rx DeAuth bssid %s reason x%04x destination %s", + dev->netdev->name, mac2str(mgmt->addr3), + le16_to_cpu(resp->reason), + hex2str(obuf, mgmt->addr1, ETH_ALEN, ':')); + + if (dev->istate == DISASSOCIATING || + dev->istate == AUTHENTICATING || + dev->istate == ASSOCIATING || + dev->istate == REASSOCIATING || + dev->istate == CONNECTED) { + + assert(dev->curr_bss != NULL); + if (dev->curr_bss == NULL) + return; + + if (!memcmp(mgmt->addr3, dev->curr_bss->bssid, ETH_ALEN) && + (!memcmp(dev->netdev->dev_addr, mgmt->addr1, ETH_ALEN) || + !memcmp(bc_addr, mgmt->addr1, ETH_ALEN))) { + /* this is a DeAuth from the BSS we are connected or + trying to connect to, directed to us or broadcasted */ + NEW_STATE(dev,JOINING); + defer_kevent(dev,KEVENT_JOIN); + del_timer_sync(&dev->mgmt_timer); + } + /* ignore DeAuth to other STA or from other BSSID */ + } else { + /* ignore DeAuth in states SCANNING */ + info("%s: DeAuth in state %d ignored", + dev->netdev->name, dev->istate); + } +} + +/** + * rx_mgmt_beacon - beacon received + * @dev: radio device + * @buf: buffer holding beacon frame + * + * We have heard a beacon frame. If we are connected we use this + * to know that we can still communicate with the base station. + * In other states we can add the discovered BSS to our node list + */ + +static void rx_mgmt_beacon(struct at76c503 *dev, + struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = (struct ieee802_11_mgmt *)buf->packet; + struct ieee802_11_beacon_data *bdata = + (struct ieee802_11_beacon_data *)mgmt->data; + struct list_head *lptr; + struct bss_info *match; /* entry matching addr3 with its bssid */ + u8 *tlv_ptr; + int new_entry = 0; + int len; + unsigned long flags; + + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + + if (dev->istate == CONNECTED) { + /* in state CONNECTED we use the mgmt_timer to control + the beacon of the BSS */ + assert(dev->curr_bss != NULL); + if (dev->curr_bss == NULL) + goto rx_mgmt_beacon_end; + if (!memcmp(dev->curr_bss->bssid, mgmt->addr3, ETH_ALEN)) { + mod_timer(&dev->mgmt_timer, jiffies+BEACON_TIMEOUT*HZ); + dev->curr_bss->rssi = buf->rssi; + goto rx_mgmt_beacon_end; + } + } + + /* look if we have this BSS already in the list */ + match = NULL; + + if (!list_empty(&dev->bss_list)) { + list_for_each(lptr, &dev->bss_list) { + struct bss_info *bss_ptr = + list_entry(lptr, struct bss_info, list); + if (!memcmp(bss_ptr->bssid, mgmt->addr3, ETH_ALEN)) { + match = bss_ptr; + break; + } + } + } + + if (match == NULL) { + /* haven't found the bss in the list */ + if ((match = kmalloc(sizeof(struct bss_info), GFP_ATOMIC)) == NULL) { + dbg(DBG_BSS_TABLE, "%s: cannot kmalloc new bss info (%d byte)", + dev->netdev->name, sizeof(struct bss_info)); + goto rx_mgmt_beacon_end; + } + memset(match,0,sizeof(*match)); + new_entry = 1; + /* append new struct into list */ + list_add_tail(&match->list, &dev->bss_list); + } + + /* we either overwrite an existing entry or append a new one + match points to the entry in both cases */ + + match->capa = le16_to_cpu(bdata->capability_information); + + /* while beacon_interval is not (!) */ + match->beacon_interval = le16_to_cpu(bdata->beacon_interval); + + match->rssi = buf->rssi; + match->link_qual = buf->link_quality; + match->noise_level = buf->noise_level; + + memcpy(match->mac,mgmt->addr2,ETH_ALEN); //just for info + memcpy(match->bssid,mgmt->addr3,ETH_ALEN); + + tlv_ptr = bdata->data; + + assert(*tlv_ptr == IE_ID_SSID); + len = min(IW_ESSID_MAX_SIZE,(int)*(tlv_ptr+1)); + if ((new_entry) || (len > 0 && memcmp(tlv_ptr+2,zeros,len))) { + /* we copy only if this is a new entry, + or the incoming SSID is not a cloaked SSID. This will + protect us from overwriting a real SSID read in a + ProbeResponse with a cloaked one from a following beacon. */ + match->ssid_len = len; + memcpy(match->ssid, tlv_ptr+2, len); + } + tlv_ptr += (1+1 + *(tlv_ptr+1)); + + assert(*tlv_ptr == IE_ID_SUPPORTED_RATES); + match->rates_len = min((int)sizeof(match->rates),(int)*(tlv_ptr+1)); + memcpy(match->rates, tlv_ptr+2, match->rates_len); + tlv_ptr += (1+1 + *(tlv_ptr+1)); + + assert(*tlv_ptr == IE_ID_DS_PARAM_SET); + match->channel = *(tlv_ptr+2); + + match->last_rx = jiffies; /* record last rx of beacon */ + +rx_mgmt_beacon_end: + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); +} /* rx_mgmt_beacon */ + + +/** + * rx_mgmt - received a management frame + * @dev: radio device + * @buf: buffer holding the management frame + * + * Parse the management frame we received and process it. Also + * use the management frames to update link quality information. + */ + +static void rx_mgmt(struct at76c503 *dev, struct at76c503_rx_buffer *buf) +{ + struct ieee802_11_mgmt *mgmt = ( struct ieee802_11_mgmt *)buf->packet; + struct iw_statistics *wstats = &dev->wstats; + u16 lev_dbm; + u16 subtype = le16_to_cpu(mgmt->frame_ctl) & IEEE802_11_FCTL_STYPE; + + /* update wstats */ + if (dev->istate != INIT && dev->istate != SCANNING) { + /* jal: this is a dirty hack needed by Tim in adhoc mode */ + if (dev->iw_mode == IW_MODE_ADHOC || + (dev->curr_bss != NULL && + !memcmp(mgmt->addr3, dev->curr_bss->bssid, ETH_ALEN))) { + /* Data packets always seem to have a 0 link level, so we + only read link quality info from management packets. + Atmel driver actually averages the present, and previous + values, we just present the raw value at the moment - TJS */ + + if (buf->rssi > 1) { + lev_dbm = (buf->rssi * 10 / 4); + if (lev_dbm > 255) + lev_dbm = 255; + wstats->qual.qual = buf->link_quality; + wstats->qual.level = lev_dbm; + wstats->qual.noise = buf->noise_level; + wstats->qual.updated = 7; + } + } + } + + if (debug & DBG_RX_MGMT_CONTENT) { + char obuf[128*2+1] __attribute__ ((unused)); + dbg_uc("%s rx mgmt subtype x%x %s", + dev->netdev->name, subtype, + hex2str(obuf, (u8 *)mgmt, + min((sizeof(obuf)-1)/2, + (size_t)le16_to_cpu(buf->wlength)), '\0')); + } + + switch (subtype) { + case IEEE802_11_STYPE_BEACON: + case IEEE802_11_STYPE_PROBE_RESP: + rx_mgmt_beacon(dev,buf); + break; + + case IEEE802_11_STYPE_ASSOC_RESP: + rx_mgmt_assoc(dev,buf); + break; + + case IEEE802_11_STYPE_REASSOC_RESP: + rx_mgmt_reassoc(dev,buf); + break; + + case IEEE802_11_STYPE_DISASSOC: + rx_mgmt_disassoc(dev,buf); + break; + + case IEEE802_11_STYPE_AUTH: + rx_mgmt_auth(dev,buf); + break; + + case IEEE802_11_STYPE_DEAUTH: + rx_mgmt_deauth(dev,buf); + break; + + default: + info("%s: mgmt, but not beacon, subtype = %x", + dev->netdev->name, subtype); + } + + return; +} + +static void dbg_dumpbuf(const char *tag, const u8 *buf, int size) +{ + int i; + + if (!debug) return; + + for (i=0; idata; + skb_pull(skb, sizeof(struct ieee802_11_hdr)); +// skb_trim(skb, skb->len - 4); /* Trim CRC */ + + src_addr = iw_mode == IW_MODE_ADHOC ? i802_11_hdr->addr2 + : i802_11_hdr->addr3; + dest_addr = i802_11_hdr->addr1; + + eth_hdr = (struct ethhdr *)skb->data; + if (!memcmp(eth_hdr->h_source, src_addr, ETH_ALEN) && + !memcmp(eth_hdr->h_dest, dest_addr, ETH_ALEN)) { + /* An ethernet frame is encapsulated within the data portion. + * Just use its header instead. */ + skb_pull(skb, sizeof(struct ethhdr)); + build_ethhdr = 0; + } else if (!memcmp(skb->data, snapsig, sizeof(snapsig))) { + /* SNAP frame. */ +#ifdef COLLAPSE_RFC1042 + if (!memcmp(skb->data, rfc1042sig, sizeof(rfc1042sig))) { + /* RFC1042 encapsulated packet. Collapse it to a + * simple Ethernet-II or 802.3 frame */ + /* NOTE: prism2 doesn't collapse Appletalk frames (why?). */ + skb_pull(skb, sizeof(rfc1042sig)+2); + proto = *(unsigned short *)(skb->data - 2); + } else +#endif /* COLLAPSE_RFC1042 */ + proto = htons(skb->len); + } else { +#if IEEE_STANDARD + /* According to all standards, we should assume the data + * portion contains 802.2 LLC information, so we should give it + * an 802.3 header (which has the same implications) */ + proto = htons(skb->len); +#else /* IEEE_STANDARD */ + /* Unfortunately, it appears no actual 802.11 implementations + * follow any standards specs. They all appear to put a + * 16-bit ethertype after the 802.11 header instead, so we take + * that value and make it into an Ethernet-II packet. */ + /* Note that this means we can never support non-SNAP 802.2 + * frames (because we can't tell when we get one) */ + proto = *(unsigned short *)(skb->data); + skb_pull(skb, 2); +#endif /* IEEE_STANDARD */ + } + + eth_hdr = (struct ethhdr *)(skb->data-sizeof(struct ethhdr)); + skb->mac.ethernet = eth_hdr; + if (build_ethhdr) { + /* This needs to be done in this order (eth_hdr->h_dest may + * overlap src_addr) */ + memcpy(eth_hdr->h_source, src_addr, ETH_ALEN); + memcpy(eth_hdr->h_dest, dest_addr, ETH_ALEN); + /* make an 802.3 header (proto = length) */ + eth_hdr->h_proto = proto; + } + + /* TODO: check this max length */ + if (ntohs(eth_hdr->h_proto) >= 1536) { + skb->protocol = eth_hdr->h_proto; + } else if (*(unsigned short *)skb->data == 0xFFFF) { + /* Magic hack for Novell IPX-in-802.3 packets */ + skb->protocol = htons(ETH_P_802_3); + } else { + /* Assume it's an 802.2 packet (it should be, and we have no + * good way to tell if it isn't) */ + skb->protocol = htons(ETH_P_802_2); + } +} + +/* Adjust the skb to trim the hardware header and CRC, and set up skb->mac, + * skb->protocol, etc. + */ +static void ieee80211_fixup(struct sk_buff *skb, int iw_mode) +{ + struct ieee802_11_hdr *i802_11_hdr; + struct ethhdr *eth_hdr; + u8 *src_addr; + u8 *dest_addr; + unsigned short proto = 0; + + i802_11_hdr = (struct ieee802_11_hdr *)skb->data; + skb_pull(skb, sizeof(struct ieee802_11_hdr)); +// skb_trim(skb, skb->len - 4); /* Trim CRC */ + + src_addr = iw_mode == IW_MODE_ADHOC ? i802_11_hdr->addr2 + : i802_11_hdr->addr3; + dest_addr = i802_11_hdr->addr1; + + skb->mac.raw = (unsigned char *)i802_11_hdr; + + eth_hdr = (struct ethhdr *)skb->data; + if (!memcmp(eth_hdr->h_source, src_addr, ETH_ALEN) && + !memcmp(eth_hdr->h_dest, dest_addr, ETH_ALEN)) { + /* There's an ethernet header encapsulated within the data + * portion, count it as part of the hardware header */ + skb_pull(skb, sizeof(struct ethhdr)); + proto = eth_hdr->h_proto; + } else if (!memcmp(skb->data, snapsig, sizeof(snapsig))) { + /* SNAP frame */ +#ifdef COLLAPSE_RFC1042 + if (!memcmp(skb->data, rfc1042sig, sizeof(rfc1042sig))) { + /* RFC1042 encapsulated packet. Treat the SNAP header + * as part of the HW header and note the protocol. */ + /* NOTE: prism2 doesn't collapse Appletalk frames (why?). */ + skb_pull(skb, sizeof(rfc1042sig) + 2); + proto = *(unsigned short *)(skb->data - 2); + } else +#endif /* COLLAPSE_RFC1042 */ + proto = htons(ETH_P_802_2); + } + + /* TODO: check this max length */ + if (ntohs(proto) >= 1536) { + skb->protocol = proto; + } else { +#ifdef IEEE_STANDARD + /* According to all standards, we should assume the data + * portion contains 802.2 LLC information */ + skb->protocol = htons(ETH_P_802_2); +#else /* IEEE_STANDARD */ + /* Unfortunately, it appears no actual 802.11 implementations + * follow any standards specs. They all appear to put a + * 16-bit ethertype after the 802.11 header instead, so we'll + * use that (and consider it part of the hardware header). */ + /* Note that this means we can never support non-SNAP 802.2 + * frames (because we can't tell when we get one) */ + skb->protocol = *(unsigned short *)(skb->data - 2); + skb_pull(skb, 2); +#endif /* IEEE_STANDARD */ + } +} + +/** + * check_for_rx_frags - check for fragmented frame + * @dev: our device + * + * Check for fragmented data in dev->rx_skb. If the packet was no + * fragment or it was the last of a fragment set then an skb containing + * the whole packet is returned for further processing. Otherwise we get + * NULL and are done and the packet is either stored inside the fragment + * buffer or thrown away. The check for rx_copybreak is moved here. + * Every returned skb starts with the ieee802_11 header and contains + * _no_ FCS at the end + */ + +static struct sk_buff *check_for_rx_frags(struct at76c503 *dev) +{ + struct sk_buff *skb = (struct sk_buff *)dev->rx_skb; + struct at76c503_rx_buffer *buf = (struct at76c503_rx_buffer *)skb->data; + struct ieee802_11_hdr *i802_11_hdr = + (struct ieee802_11_hdr *)buf->packet; + /* seq_ctrl, fragment_number, sequence number of new packet */ + u16 sctl = le16_to_cpu(i802_11_hdr->seq_ctl); + u16 fragnr = sctl & 0xf; + u16 seqnr = sctl>>4; + u16 frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl); + + /* length including the IEEE802.11 header, excl. the trailing FCS, + excl. the struct at76c503_rx_buffer */ + int length = le16_to_cpu(buf->wlength) - dev->rx_data_fcs_len; + + /* where does the data payload start in skb->data ? + This depends on if addr4 is present or not. */ + u8 *data = ((frame_ctl & + (IEEE802_11_FCTL_TODS|IEEE802_11_FCTL_FROMDS)) == + (IEEE802_11_FCTL_TODS|IEEE802_11_FCTL_FROMDS) ? + (u8 *)i802_11_hdr + sizeof(struct ieee802_11_hdr) : + (u8 *)&i802_11_hdr->addr4); + /* length of payload, excl. the trailing FCS */ + int data_len = length - (data - (u8 *)i802_11_hdr); + + int i; + struct rx_data_buf *bptr, *optr; + unsigned long oldest = ~0UL; + char dbuf[2*32+1] __attribute__ ((unused)); + + dbg(DBG_RX_FRAGS, "%s: rx data frame_ctl %04x addr2 %s seq/frag %d/%d " + "length %d data %d: %s ...", + dev->netdev->name, frame_ctl, + mac2str(i802_11_hdr->addr2), + seqnr, fragnr, length, data_len, + hex2str(dbuf, data, sizeof(dbuf)/2, '\0')); + + dbg(DBG_RX_FRAGS_SKB, "%s: incoming skb: head %p data %p " + "tail %p end %p len %d", + dev->netdev->name, skb->head, skb->data, skb->tail, + skb->end, skb->len); + + if (data_len <= 0) { + /* buffers contains no data */ + info("%s: rx skb without data", dev->netdev->name); + return NULL; + } + + if (fragnr == 0 && !(frame_ctl & IEEE802_11_FCTL_MOREFRAGS)) { + /* unfragmented packet received */ + if (length < rx_copybreak && (skb = dev_alloc_skb(length)) != NULL) { + memcpy(skb_put(skb, length), + dev->rx_skb->data + AT76C503_RX_HDRLEN, length); + } else { + skb_pull(skb, AT76C503_RX_HDRLEN); + skb_trim(skb, length); + /* Use a new skb for the next receive */ + dev->rx_skb = NULL; + } + + dbg(DBG_RX_FRAGS, "%s: unfragmented", dev->netdev->name); + + return skb; + } + + /* remove the at76c503_rx_buffer header - we don't need it anymore */ + /* we need the IEEE802.11 header (for the addresses) if this packet + is the first of a chain */ + + assert(length > AT76C503_RX_HDRLEN); + skb_pull(skb, AT76C503_RX_HDRLEN); + /* remove FCS at end */ + skb_trim(skb, length); + + dbg(DBG_RX_FRAGS_SKB, "%s: trimmed skb: head %p data %p tail %p " + "end %p len %d data %p data_len %d", + dev->netdev->name, skb->head, skb->data, skb->tail, + skb->end, skb->len, data, data_len); + + /* look if we've got a chain for the sender address. + afterwards optr points to first free or the oldest entry, + or, if i < NR_RX_DATA_BUF, bptr points to the entry for the + sender address */ + /* determining the oldest entry doesn't cope with jiffies wrapping + but I don't care to delete a young entry at these rare moments ... */ + + for(i=0,bptr=dev->rx_data,optr=NULL; i < NR_RX_DATA_BUF; i++,bptr++) { + if (bptr->skb != NULL) { + if (!memcmp(i802_11_hdr->addr2, bptr->sender,ETH_ALEN)) + break; + else + if (optr == NULL) { + optr = bptr; + oldest = bptr->last_rx; + } else { + if (bptr->last_rx < oldest) + optr = bptr; + } + } else { + optr = bptr; + oldest = 0UL; + } + } + + if (i < NR_RX_DATA_BUF) { + + dbg(DBG_RX_FRAGS, "%s: %d. cacheentry (seq/frag=%d/%d) " + "matched sender addr", + dev->netdev->name, i, bptr->seqnr, bptr->fragnr); + + /* bptr points to an entry for the sender address */ + if (bptr->seqnr == seqnr) { + int left; + /* the fragment has the current sequence number */ + if (((bptr->fragnr+1)&0xf) == fragnr) { + bptr->last_rx = jiffies; + /* the next following fragment number -> + add the data at the end */ + /* is & 0xf necessary above ??? */ + + // for test only ??? + if ((left=skb_tailroom(bptr->skb)) < data_len) { + info("%s: only %d byte free (need %d)", + dev->netdev->name, left, data_len); + } else + memcpy(skb_put(bptr->skb, data_len), + data, data_len); + bptr->fragnr = fragnr; + if (!(frame_ctl & + IEEE802_11_FCTL_MOREFRAGS)) { + /* this was the last fragment - send it */ + skb = bptr->skb; + bptr->skb = NULL; /* free the entry */ + dbg(DBG_RX_FRAGS, "%s: last frag of seq %d", + dev->netdev->name, seqnr); + return skb; + } else + return NULL; + } else { + /* wrong fragment number -> ignore it */ + dbg(DBG_RX_FRAGS, "%s: frag nr does not match: %d+1 != %d", + dev->netdev->name, bptr->fragnr, fragnr); + return NULL; + } + } else { + /* got another sequence number */ + if (fragnr == 0) { + /* it's the start of a new chain - replace the + old one by this */ + /* bptr->sender has the correct value already */ + dbg(DBG_RX_FRAGS, "%s: start of new seq %d, " + "removing old seq %d", dev->netdev->name, + seqnr, bptr->seqnr); + bptr->seqnr = seqnr; + bptr->fragnr = 0; + bptr->last_rx = jiffies; + /* swap bptr->skb and dev->rx_skb */ + skb = bptr->skb; + bptr->skb = dev->rx_skb; + dev->rx_skb = skb; + } else { + /* it from the middle of a new chain -> + delete the old entry and skip the new one */ + dbg(DBG_RX_FRAGS, "%s: middle of new seq %d (%d) " + "removing old seq %d", dev->netdev->name, + seqnr, fragnr, bptr->seqnr); + dev_kfree_skb(bptr->skb); + bptr->skb = NULL; + } + return NULL; + } + } else { + /* if we didn't find a chain for the sender address optr + points either to the first free or the oldest entry */ + + if (fragnr != 0) { + /* this is not the begin of a fragment chain ... */ + dbg(DBG_RX_FRAGS, "%s: no chain for non-first fragment (%d)", + dev->netdev->name, fragnr); + return NULL; + } + assert(optr != NULL); + if (optr == NULL) + return NULL; + + if (optr->skb != NULL) { + /* swap the skb's */ + skb = optr->skb; + optr->skb = dev->rx_skb; + dev->rx_skb = skb; + + dbg(DBG_RX_FRAGS, "%s: free old contents: sender %s seq/frag %d/%d", + dev->netdev->name, mac2str(optr->sender), + optr->seqnr, optr->fragnr); + + } else { + /* take the skb from dev->rx_skb */ + optr->skb = dev->rx_skb; + dev->rx_skb = NULL; /* let submit_rx_urb() allocate a new skb */ + + dbg(DBG_RX_FRAGS, "%s: use a free entry", dev->netdev->name); + } + memcpy(optr->sender, i802_11_hdr->addr2, ETH_ALEN); + optr->seqnr = seqnr; + optr->fragnr = 0; + optr->last_rx = jiffies; + + return NULL; + } +} /* check_for_rx_frags */ + +/* rx interrupt: we expect the complete data buffer in dev->rx_skb */ +static void rx_data(struct at76c503 *dev) +{ + struct net_device *netdev = (struct net_device *)dev->netdev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb = dev->rx_skb; + struct at76c503_rx_buffer *buf = (struct at76c503_rx_buffer *)skb->data; + struct ieee802_11_hdr *i802_11_hdr; + int length = le16_to_cpu(buf->wlength); + + if (debug & DBG_RX_DATA) { + dbg_uc("%s received data packet:", netdev->name); + dbg_dumpbuf(" rxhdr", skb->data, AT76C503_RX_HDRLEN); + } + if (debug & DBG_RX_DATA_CONTENT) + dbg_dumpbuf("packet", skb->data + AT76C503_RX_HDRLEN, + length); + + if ((skb=check_for_rx_frags(dev)) == NULL) + return; + + /* if an skb is returned, the at76c503a_rx_header and the FCS is already removed */ + i802_11_hdr = (struct ieee802_11_hdr *)skb->data; + + skb->dev = netdev; + skb->ip_summed = CHECKSUM_NONE; /* TODO: should check CRC */ + + if (i802_11_hdr->addr1[0] & 1) { + if (!memcmp(i802_11_hdr->addr1, netdev->broadcast, ETH_ALEN)) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + } else if (memcmp(i802_11_hdr->addr1, netdev->dev_addr, ETH_ALEN)) { + skb->pkt_type=PACKET_OTHERHOST; + } + + if (netdev->type == ARPHRD_ETHER) { + ieee80211_to_eth(skb, dev->iw_mode); + } else { + ieee80211_fixup(skb, dev->iw_mode); + } + + netdev->last_rx = jiffies; + netif_rx(skb); + stats->rx_packets++; + stats->rx_bytes += length; + + return; +} + +/** + * submit_rx_urb - set up a receive buffer + * @dev: network device + * + * Allocate a network buffer and feed it to the USB layer so + * that we can receive data from the USB adapter when it wants + * to talk to us. + */ + +static int submit_rx_urb(struct at76c503 *dev) +{ + int ret, size; + struct sk_buff *skb = dev->rx_skb; + + if (skb == NULL) { + skb = dev_alloc_skb(sizeof(struct at76c503_rx_buffer)); + if (skb == NULL) { + err("%s: unable to allocate rx skbuff.", dev->netdev->name); + ret = -ENOMEM; + goto exit; + } + dev->rx_skb = skb; + } else { + skb_push(skb, skb_headroom(skb)); + skb_trim(skb, 0); + } + + size = skb_tailroom(skb); + usb_fill_bulk_urb(dev->read_urb, dev->udev, + usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), + skb_put(skb, size), size, + at76c503_read_bulk_callback, dev); + ret = usb_submit_urb(dev->read_urb); + if (ret < 0) { + err("%s: rx, usb_submit_urb failed: %d", dev->netdev->name, ret); + } + +exit: + if (ret < 0) { + /* If we can't submit the URB, the adapter becomes completely + * useless, so try again later */ + if (--dev->nr_submit_rx_tries > 0) + defer_kevent(dev, KEVENT_SUBMIT_RX); + else { + err("%s: giving up to submit rx urb after %d failures -" + " please unload the driver and/or power cycle the device", + dev->netdev->name, NR_SUBMIT_RX_TRIES); + } + } else + /* reset counter to initial value */ + dev->nr_submit_rx_tries = NR_SUBMIT_RX_TRIES; + return ret; +} + + +/* we are doing a lot of things here in an interrupt. Need + a bh handler (Watching TV with a TV card is probably + a good test: if you see flickers, we are doing too much. + Currently I do see flickers... even with our tasklet :-( ) + Maybe because the bttv driver and usb-uhci use the same interrupt +*/ + +static void at76c503_read_bulk_callback (struct urb *urb) +{ + struct at76c503 *dev = (struct at76c503 *)urb->context; + + dev->rx_urb = urb; + tasklet_schedule(&dev->tasklet); + + return; +} + +static void rx_tasklet(unsigned long param) +{ + struct at76c503 *dev = (struct at76c503 *)param; + struct urb *urb = dev->rx_urb; + struct net_device *netdev = (struct net_device *)dev->netdev; + struct at76c503_rx_buffer *buf = (struct at76c503_rx_buffer *)dev->rx_skb->data; + struct ieee802_11_hdr *i802_11_hdr = (struct ieee802_11_hdr *)buf->packet; + u16 frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl); + + if(!urb) return; // paranoid + + if(urb->status != 0){ + if ((urb->status != -ENOENT) && + (urb->status != -ECONNRESET)) { + dbg(DBG_URB,"%s %s: - nonzero read bulk status received: %d", + __FUNCTION__, netdev->name, urb->status); + goto next_urb; + } + return; + } + + /* there is a new bssid around, accept it: */ + if(buf->newbss && dev->iw_mode == IW_MODE_ADHOC){ + dbg(DBG_PROGRESS, "%s: rx newbss", netdev->name); + defer_kevent(dev, KEVENT_NEW_BSS); + } + + if (debug & DBG_RX_ATMEL_HDR) { + char obuf[2*48+1] __attribute__ ((unused)); + dbg_uc("%s: rx frame: rate %d rssi %d noise %d link %d %s", + dev->netdev->name, + buf->rx_rate, buf->rssi, buf->noise_level, + buf->link_quality, + hex2str(obuf,(u8 *)i802_11_hdr,sizeof(obuf)/2,'\0')); + } + + switch (frame_ctl & IEEE802_11_FCTL_FTYPE) { + case IEEE802_11_FTYPE_DATA: + rx_data(dev); + break; + + case IEEE802_11_FTYPE_MGMT: + /* jal: TODO: find out if we can update iwspy also on + other frames than management (might depend on the + radio chip / firmware version !) */ +#if IW_MAX_SPY > 0 + iwspy_update(dev, buf); +#endif + rx_mgmt(dev, buf); + break; + + case IEEE802_11_FTYPE_CTL: + dbg(DBG_RX_CTRL, "%s: ignored ctrl frame: %04x", dev->netdev->name, + frame_ctl); + break; + + default: + info("%s: it's a frame from mars: %2x", dev->netdev->name, + frame_ctl); + } /* switch (frame_ctl & IEEE802_11_FCTL_FTYPE) */ + + next_urb: + submit_rx_urb(dev); + return; +} + +/** + * at76c503_write_bulk_callback - tx complete + * @urb: URB that completed + * + * The USB layer callback for transmit URB's finishing. At this + * point we can issue another transmit either by letting the + * network layer back at it, or if we have a pending management + * URB we send that instead. + */ + +static void at76c503_write_bulk_callback (struct urb *urb) +{ + struct at76c503 *dev = (struct at76c503 *)urb->context; + struct net_device_stats *stats = &dev->stats; + unsigned long flags; + struct at76c503_tx_buffer *mgmt_buf; + int ret; + + if(urb->status != 0){ + if((urb->status != -ENOENT) && + (urb->status != -ECONNRESET)) { + dbg(DBG_URB, "%s - nonzero write bulk status received: %d", + __FUNCTION__, urb->status); + }else + return; /* urb has been unlinked */ + stats->tx_errors++; + }else + stats->tx_packets++; + + spin_lock_irqsave(&dev->mgmt_spinlock, flags); + mgmt_buf = dev->next_mgmt_bulk; + dev->next_mgmt_bulk = NULL; + spin_unlock_irqrestore(&dev->mgmt_spinlock, flags); + + if (mgmt_buf) { + /* we don't copy the padding bytes, but add them + to the length */ + memcpy(dev->bulk_out_buffer, mgmt_buf, + le16_to_cpu(mgmt_buf->wlength) + + offsetof(struct at76c503_tx_buffer,packet)); + FILL_BULK_URB(dev->write_urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out_endpointAddr), + dev->bulk_out_buffer, + le16_to_cpu(mgmt_buf->wlength) + + le16_to_cpu(mgmt_buf->padding) + + AT76C503_TX_HDRLEN, + at76c503_write_bulk_callback, dev); + ret = usb_submit_urb(dev->write_urb); + if (ret) { + err("%s: %s error in tx submit urb: %d", + dev->netdev->name, __FUNCTION__, ret); + } + kfree(mgmt_buf); + } else + netif_wake_queue(dev->netdev); + +} + +/** + * at76c503_tx - transmit packet + * @skb: buffer to transmit + * @netdev: device transmitting + * + * Transmit a packet out onto the wireless link. We only allow + * a single outstanding transmit at any one time. This tx function + * returns with the queue off and the completion handler wakes + * the transmit path up + */ + +static int at76c503_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)(netdev->priv); + struct net_device_stats *stats = &dev->stats; + int ret; + int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + int wlen = len + 18; + int submit_len; + struct at76c503_tx_buffer *tx_buffer = + (struct at76c503_tx_buffer *)dev->bulk_out_buffer; + struct ieee802_11_hdr *i802_11_hdr = + (struct ieee802_11_hdr *)&(tx_buffer->packet); + + /* we can get rid of memcpy, if we set netdev->hard_header_len + to 8 + sizeof(struct ieee802_11_hdr), because then we have + enough space */ + // dbg(DBG_TX, "skb->data - skb->head = %d", skb->data - skb->head); + + /* 18 = sizeof(ieee802_11_hdr) - 2 * ETH_ALEN */ + /* ssap and dsap stay in the data */ + + memset(&(tx_buffer->packet[18]), 0, ETH_ZLEN); + memcpy(&(tx_buffer->packet[18]), skb->data, len); + + /* make wireless header */ + i802_11_hdr->frame_ctl = + cpu_to_le16(IEEE802_11_FTYPE_DATA | + (dev->wep_enabled ? IEEE802_11_FCTL_WEP : 0) | + (dev->iw_mode == IW_MODE_INFRA ? IEEE802_11_FCTL_TODS : 0)); + + if(dev->iw_mode == IW_MODE_ADHOC){ + memcpy(i802_11_hdr->addr1, skb->data, ETH_ALEN); /* destination */ + memcpy(i802_11_hdr->addr2, netdev->dev_addr, ETH_ALEN); /* source */ + memcpy(i802_11_hdr->addr3, dev->bssid, ETH_ALEN); + }else if(dev->iw_mode == IW_MODE_INFRA){ + memcpy(i802_11_hdr->addr1, dev->bssid, ETH_ALEN); + memcpy(i802_11_hdr->addr2, netdev->dev_addr, ETH_ALEN); /* source */ + memcpy(i802_11_hdr->addr3, skb->data, ETH_ALEN); /* destination */ + } + memset(i802_11_hdr->addr4, 0, ETH_ALEN); + + i802_11_hdr->duration_id = cpu_to_le16(0); + i802_11_hdr->seq_ctl = cpu_to_le16(0); + + /* setup 'atmel' header */ + tx_buffer->wlength = cpu_to_le16(wlen); + tx_buffer->tx_rate = dev->txrate; + /* for broadcast destination addresses, the firmware 0.100.x + seems to choose the highest rate set with CMD_STARTUP in + basic_rate_set replacing this value */ + + memset(tx_buffer->reserved, 0, 4); + + tx_buffer->padding = cpu_to_le16(calc_padding(wlen)); + submit_len = wlen + AT76C503_TX_HDRLEN + + le16_to_cpu(tx_buffer->padding); + + { + char hbuf[2*24+1], dbuf[2*16] __attribute__((unused)); + dbg(DBG_TX_DATA, "%s tx wlen x%x pad x%x rate %d hdr %s", + dev->netdev->name, + le16_to_cpu(tx_buffer->wlength), + le16_to_cpu(tx_buffer->padding), tx_buffer->tx_rate, + hex2str(hbuf, (u8 *)i802_11_hdr,sizeof(hbuf)/2,'\0')); + dbg(DBG_TX_DATA_CONTENT, "%s data %s", dev->netdev->name, + hex2str(dbuf, (u8 *)i802_11_hdr+24,sizeof(dbuf)/2,'\0')); + } + + /* send stuff */ + netif_stop_queue(netdev); + netdev->trans_start = jiffies; + + FILL_BULK_URB(dev->write_urb, dev->udev, + usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), + tx_buffer, submit_len, + at76c503_write_bulk_callback, dev); + ret = usb_submit_urb(dev->write_urb); + if(ret){ + stats->tx_errors++; + err("%s: error in tx submit urb: %d", netdev->name, ret); + goto err; + } + + stats->tx_bytes += len; + + dev_kfree_skb(skb); + return 0; + +err: + return ret; +} + + +/** + * at76c503_tx_timeout - transmit timeout + * @netdev: network device + * + * The network layer thinks we have a transmission timeout. + * Clean up the possibly lost frame and log an error + */ + +static void at76c503_tx_timeout(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)(netdev->priv); + + if (!dev) + return; + warn("%s: tx timeout.", netdev->name); + dev->write_urb->transfer_flags |= USB_ASYNC_UNLINK; + usb_unlink_urb(dev->write_urb); + dev->stats.tx_errors++; +} + +/** + * startup_device - activate interface + * @dev: device + * + * Bring up the device side of the interface. This + * consists of loading data into the USB device, activating + * the radio and then setting the radio parameters + */ + +static int startup_device(struct at76c503 *dev) +{ + struct at76c503_card_config *ccfg = &dev->card_config; + int ret; + + if (debug & DBG_PARAMS) { + char hexssid[IW_ESSID_MAX_SIZE*2+1]; + char ossid[IW_ESSID_MAX_SIZE+1]; + + /* make dev->essid printable */ + assert(dev->essid_size <= IW_ESSID_MAX_SIZE); + memcpy(ossid, dev->essid, dev->essid_size); + ossid[dev->essid_size] = '\0'; + + dbg_uc("%s param: ssid %s (%s) mode %s ch %d wep %s key %d keylen %d", + dev->netdev->name, ossid, + hex2str(hexssid,dev->essid,dev->essid_size,'\0'), + dev->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra", + dev->channel, + dev->wep_enabled ? "enabled" : "disabled", + dev->wep_key_id, dev->wep_keys_len[dev->wep_key_id]); + dbg_uc("%s param: preamble %s rts %d frag %d txrate %s excl %d", + dev->netdev->name, + dev->preamble_type == PREAMBLE_TYPE_SHORT ? "short" : "long", + dev->rts_threshold, dev->frag_threshold, + dev->txrate == TX_RATE_1MBIT ? "1MBit" : + dev->txrate == TX_RATE_2MBIT ? "2MBit" : + dev->txrate == TX_RATE_5_5MBIT ? "5.5MBit" : + dev->txrate == TX_RATE_11MBIT ? "11MBit" : + dev->txrate == TX_RATE_AUTO ? "auto" : "", + dev->wep_excl_unencr); + dbg_uc("%s param: pm_mode %d pm_period %d auth_mode %s " + "scan_times %d %d scan_mode %s", + dev->netdev->name, + dev->pm_mode, dev->pm_period_us, + dev->auth_mode == IEEE802_11_AUTH_ALG_OPEN_SYSTEM ? + "open" : "shared_secret", + dev->scan_min_time, dev->scan_max_time, + dev->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive"); + } + + memset(ccfg, 0, sizeof(struct at76c503_card_config)); + ccfg->exclude_unencrypted = dev->wep_excl_unencr; + ccfg->promiscuous_mode = 0; + ccfg->promiscuous_mode = 1; + ccfg->short_retry_limit = 8; + + if (dev->wep_enabled && + dev->wep_keys_len[dev->wep_key_id] > WEP_SMALL_KEY_LEN) + ccfg->encryption_type = 2; + else + ccfg->encryption_type = 1; + + + ccfg->rts_threshold = cpu_to_le16(dev->rts_threshold); + ccfg->fragmentation_threshold = cpu_to_le16(dev->frag_threshold); + + memcpy(ccfg->basic_rate_set, hw_rates, 4); + /* jal: really needed, we do a set_mib for autorate later ??? */ + ccfg->auto_rate_fallback = (dev->txrate == 4 ? 1 : 0); + ccfg->channel = dev->channel; + ccfg->privacy_invoked = dev->wep_enabled; + memcpy(ccfg->current_ssid, dev->essid, IW_ESSID_MAX_SIZE); + ccfg->ssid_len = dev->essid_size; + + ccfg->wep_default_key_id = dev->wep_key_id; + memcpy(ccfg->wep_default_key_value, dev->wep_keys, 4 * WEP_KEY_SIZE); + + ccfg->short_preamble = dev->preamble_type; + ccfg->beacon_period = cpu_to_le16(100); + + ret = set_card_command(dev->udev, CMD_STARTUP, (unsigned char *)&dev->card_config, + sizeof(struct at76c503_card_config)); + if(ret < 0){ + err("%s: set_card_command failed: %d", dev->netdev->name, ret); + return ret; + } + + wait_completion(dev, CMD_STARTUP); + + /* remove BSSID from previous run */ + memset(dev->bssid, 0, ETH_ALEN); + + if (set_radio(dev, 1) == 1) + wait_completion(dev, CMD_RADIO); + + if ((ret=set_preamble(dev, dev->preamble_type)) < 0) + return ret; + if ((ret=set_frag(dev, dev->frag_threshold)) < 0) + return ret; + + if ((ret=set_rts(dev, dev->rts_threshold)) < 0) + return ret; + + if ((ret=set_autorate_fallback(dev, dev->txrate == 4 ? 1 : 0)) < 0) + return ret; + + if ((ret=set_pm_mode(dev, dev->pm_mode)) < 0) + return ret; + + return 0; +} + +/** + * at76c503_open - interface up + * @netdev: network device + * + * Called when the interface enters the "up" state. We + * bring up the device, set the MAC address and then kick off + * receiving and begin a scan for nodes. + */ + +static int at76c503_open(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)(netdev->priv); + int ret = 0; + + dbg(DBG_PROC_ENTRY, "at76c503_open entry"); + + if(down_interruptible(&dev->sem)) + return -EINTR; + + ret = startup_device(dev); + if (ret < 0) + goto err; + + /* if netdev->dev_addr != dev->mac_addr we must + set the mac address in the device ! */ + if (memcmp(netdev->dev_addr, dev->mac_addr, ETH_ALEN)) { + if (set_mac_address(dev,netdev->dev_addr) >= 0) + dbg(DBG_PROGRESS, "%s: set new MAC addr %s", + netdev->name, mac2str(netdev->dev_addr)); + } + + dev->nr_submit_rx_tries = NR_SUBMIT_RX_TRIES; /* init counter */ + + ret = submit_rx_urb(dev); + if(ret < 0){ + err("%s: open: submit_rx_urb failed: %d", netdev->name, ret); + goto err; + } + + NEW_STATE(dev,SCANNING); + defer_kevent(dev,KEVENT_SCAN); + netif_carrier_off(dev->netdev); /* disable running netdev watchdog */ + netif_stop_queue(dev->netdev); /* stop tx data packets */ + + dev->open_count++; + dbg(DBG_PROC_ENTRY, "at76c503_open end"); +err: + up(&dev->sem); + return ret < 0 ? ret : 0; +} + +/** + * at76c503_stop - interface down + * @netdev: network device + * + * The network layer wants us to deactivate the interface. We + * turn the radio off, and free up various USB and timer resources + * that are no longer in use + */ + +static int at76c503_stop(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)(netdev->priv); + unsigned long flags; + + if (down_interruptible(&dev->sem)) + return -EINTR; + + netif_stop_queue(netdev); + + set_radio(dev, 0); + + usb_unlink_urb(dev->read_urb); + usb_unlink_urb(dev->write_urb); + usb_unlink_urb(dev->ctrl_urb); + + del_timer_sync(&dev->mgmt_timer); + + spin_lock_irqsave(&dev->mgmt_spinlock,flags); + if (dev->next_mgmt_bulk) { + kfree(dev->next_mgmt_bulk); + dev->next_mgmt_bulk = NULL; + } + spin_unlock_irqrestore(&dev->mgmt_spinlock,flags); + + /* free the bss_list */ + free_bss_list(dev); + + assert(dev->open_count > 0); + dev->open_count--; + + up(&dev->sem); + + return 0; +} + +/** + * at76c503_get_stats - statistics callback + * @netdev: device being queried + * + * Called by the networking layer when the user requests the statistics. + * We maintain these in software so there is not a lot to do. + */ + +static struct net_device_stats *at76c503_get_stats(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)netdev->priv; + + return &dev->stats; +} + +/** + * at76c503_get_wireless_stats - statistics callback + * @netdev: device being queried + * + * Called by the networking layer when the user requests the + * wireless statistics. We maintain these in software so there is + * not a lot to do. + */ + +static struct iw_statistics *at76c503_get_wireless_stats(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)netdev->priv; + + return &dev->wstats; +} + +/** + * at76c503_set_multicast - multicast callback + * @netdev: network device + * + * Called when the networking layer has upated the multicast + * list or listening mode of the interface. + */ + +static void at76c503_set_multicast(struct net_device *netdev) +{ + struct at76c503 *dev = (struct at76c503 *)netdev->priv; + int promisc; + + promisc = ((netdev->flags & IFF_PROMISC) != 0); + if(promisc != dev->promisc){ + /* grmbl. This gets called in interrupt. */ + dev->promisc = promisc; + defer_kevent(dev, KEVENT_SET_PROMISC); + } +} + +/** + * at76c503_set_mac_address - ioctl handler + * @netdev: network device + * @addr: address block + * + * Called when the user changes the MAC address. We only store the + * new mac address in netdev struct, it will get set when the netdev + * gets opened. + */ + +static +int at76c503_set_mac_address(struct net_device *netdev, void *addr) +{ + struct sockaddr *mac = addr; + memcpy(netdev->dev_addr, mac->sa_data, ETH_ALEN); + return 1; +} + +#if IW_MAX_SPY > 0 +/* == PROC iwspy_update == + check if we spy on the sender address of buf and update statistics */ +static +void iwspy_update(struct at76c503 *dev, struct at76c503_rx_buffer *buf) +{ + int i; + u16 lev_dbm; + struct ieee802_11_hdr *hdr = (struct ieee802_11_hdr *)buf->packet; + + for(i=0; i < dev->iwspy_nr; i++) { + if (!memcmp(hdr->addr2, dev->iwspy_addr[i].sa_data, + ETH_ALEN)) { + dev->iwspy_stats[i].qual = buf->link_quality; + lev_dbm = buf->rssi * 5 / 2; + dev->iwspy_stats[i].level = + (lev_dbm > 255 ? 255 : lev_dbm); + dev->iwspy_stats[i].noise = buf->noise_level; + dev->iwspy_stats[i].updated = 1; + break; + } + } +} /* iwspy_update */ + +/* == PROC ioctl_setspy == */ +static int ioctl_setspy(struct at76c503 *dev, struct iw_point *srq) +{ + int i; + + if (srq == NULL) + return -EFAULT; + + dbg(DBG_IOCTL, "%s: ioctl(SIOCSIWSPY, number %d)", + dev->netdev->name, srq->length); + + if (srq->length > IW_MAX_SPY) + return -E2BIG; + + dev->iwspy_nr = srq->length; + + if (dev->iwspy_nr > 0) { + if (copy_from_user(dev->iwspy_addr, srq->pointer, + sizeof(struct sockaddr) * dev->iwspy_nr)) { + dev->iwspy_nr = 0; + return -EFAULT; + } + memset(dev->iwspy_stats, 0, sizeof(dev->iwspy_stats)); + } + + /* Time to show what we have done... */ + if (debug & DBG_IOCTL) { + dbg_uc("%s: New spy list:", dev->netdev->name); + for (i = 0; i < dev->iwspy_nr; i++) { + dbg_uc("%s: %s", dev->netdev->name, + mac2str(dev->iwspy_addr[i].sa_data)); + } + } + + return 0; +} /* ioctl_setspy */ + + +/* == PROC ioctl_getspy == */ +static int ioctl_getspy(struct at76c503 *dev, struct iw_point *srq) +{ + int i; + + dbg(DBG_IOCTL, "%s: ioctl(SIOCGIWSPY, number %d)", dev->netdev->name, + dev->iwspy_nr); + + srq->length = dev->iwspy_nr; + + if (srq->length > 0 && (srq->pointer)) { + /* Push stuff to user space */ + if(copy_to_user(srq->pointer, dev->iwspy_addr, + sizeof(struct sockaddr) * srq->length)) + return -EFAULT; + if(copy_to_user(srq->pointer + + sizeof(struct sockaddr)*srq->length, + dev->iwspy_stats, + sizeof(struct iw_quality)*srq->length )) + return -EFAULT; + + for(i=0; i < dev->iwspy_nr; i++) + dev->iwspy_stats[i].updated = 0; + } + return 0; +} /* ioctl_getspy */ +#endif /* #if IW_MAX_SPY > 0 */ + + +/** + * ethtool_ioctl - ioctls for ethernet management + * @dev: radiod device + * @useraddr: address of data block + * + * Process the ethernet management interface ioctls. The only + * interface we support is reporting the driver information + */ + +static int ethtool_ioctl(struct at76c503 *dev, void *useraddr) +{ + u32 ethcmd; + + if (get_user(ethcmd, (u32 *)useraddr)) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GDRVINFO: + { + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; + strncpy(info.driver, dev->netdev->owner->name, + sizeof(info.driver)-1); + + strncpy(info.version, DRIVER_VERSION, sizeof(info.version)); + info.version[sizeof(info.version)-1] = '\0'; + + snprintf(info.bus_info, sizeof(info.bus_info)-1, + "usb%d:%d", dev->udev->bus->busnum, + dev->udev->devnum); + + snprintf(info.fw_version, sizeof(info.fw_version)-1, + "%d.%d.%d-%d", + dev->fw_version.major, dev->fw_version.minor, + dev->fw_version.patch, dev->fw_version.build); + if (copy_to_user (useraddr, &info, sizeof (info))) + return -EFAULT; + return 0; + } + break; + + default: + break; + } + + return -EOPNOTSUPP; +} + +/** + * at76c503_ioctl - network ioctls + * @netdev: network devices + * @rq: ifreq block passed to the ioctl + * @cmd: ioctl number + * + * Process ioctls issued to the network driver via the socket interface + * Mostly this is ethtool and the wireless ioctl layer + */ + +static int at76c503_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct at76c503 *dev = netdev->priv; + struct iwreq *wrq = (struct iwreq *)rq; + int ret = 0; + int changed = 0; /* set to 1 if we must re-start the device */ + + if (!netif_device_present(netdev)) + return -ENODEV; + + if (down_interruptible(&dev->sem)) + return -EINTR; + + switch (cmd) { + + /* rudimentary ethtool support for hotplug of SuSE 8.3 */ + case SIOCETHTOOL: + ret = ethtool_ioctl(dev,rq->ifr_data); + break; + + case SIOCGIWNAME: + dbg(DBG_IOCTL, "%s: SIOCGIWNAME", netdev->name); + strcpy(wrq->u.name, "IEEE 802.11-DS"); + break; + + case SIOCGIWAP: + dbg(DBG_IOCTL, "%s: SIOCGIWAP", netdev->name); + wrq->u.ap_addr.sa_family = ARPHRD_ETHER; + + memcpy(wrq->u.ap_addr.sa_data, dev->bssid, ETH_ALEN); + + break; + + case SIOCSIWNICKN: + { + struct iw_point *erq = &wrq->u.data; + char nickn[IW_ESSID_MAX_SIZE+1]; + + memset(nickn, 0, sizeof(nickn)); + + if (erq->flags) { + if (erq->length > IW_ESSID_MAX_SIZE){ + ret = -E2BIG; + goto csiwnickn_error; + } + + if (copy_from_user(nickn, erq->pointer, erq->length)){ + ret = -EFAULT; + goto csiwessid_error; + } + + dbg(DBG_IOCTL, "%s: SIOCSIWNICKN %s", netdev->name, + nickn); + strcpy(dev->nickn, nickn); + } + } + csiwnickn_error: + break; + + case SIOCGIWNICKN: + { + struct iw_point *erq = &wrq->u.data; + + erq->length = strlen(dev->nickn); + if(copy_to_user(erq->pointer, dev->nickn, + erq->length)){ + ret = -EFAULT; + } + } + break; + + case SIOCSIWRTS: + { + int rthr = wrq->u.rts.value; + dbg(DBG_IOCTL, "%s: SIOCSIWRTS: value %d disabled %d", + netdev->name, wrq->u.rts.value, wrq->u.rts.disabled); + + if(wrq->u.rts.disabled) + rthr = MAX_RTS_THRESHOLD; + if((rthr < 0) || (rthr > MAX_RTS_THRESHOLD)) { + ret = -EINVAL; + } else { + dev->rts_threshold = rthr; + changed = 1; + } + } + break; + + // Get the current RTS threshold + case SIOCGIWRTS: + dbg(DBG_IOCTL, "%s: SIOCGIWRTS", netdev->name); + + wrq->u.rts.value = dev->rts_threshold; + wrq->u.rts.disabled = (wrq->u.rts.value == MAX_RTS_THRESHOLD); + wrq->u.rts.fixed = 1; + break; + + // Set the desired fragmentation threshold + + case SIOCSIWFRAG: + { + int fthr = wrq->u.frag.value; + dbg(DBG_IOCTL, "%s: SIOCSIWFRAG, value %d, disabled %d", + netdev->name, wrq->u.frag.value, wrq->u.frag.disabled); + + if(wrq->u.frag.disabled) + fthr = MAX_FRAG_THRESHOLD; + if((fthr < MIN_FRAG_THRESHOLD) || (fthr > MAX_FRAG_THRESHOLD)){ + ret = -EINVAL; + }else{ + dev->frag_threshold = fthr & ~0x1; // get an even value + changed = 1; + } + } + break; + + // Get the current fragmentation threshold + case SIOCGIWFRAG: + dbg(DBG_IOCTL, "%s: SIOCGIWFRAG", netdev->name); + + wrq->u.frag.value = dev->frag_threshold; + wrq->u.frag.disabled = (wrq->u.frag.value >= MAX_FRAG_THRESHOLD); + wrq->u.frag.fixed = 1; + break; + + case SIOCGIWFREQ: + dbg(DBG_IOCTL, "%s: SIOCGIWFREQ", netdev->name); + wrq->u.freq.m = dev->channel; + wrq->u.freq.e = 0; + wrq->u.freq.i = 0; + break; + + case SIOCSIWFREQ: + /* copied from orinoco.c */ + { + struct iw_freq *frq = &wrq->u.freq; + int chan = -1; + + if((frq->e == 0) && (frq->m <= 1000)){ + /* Setting by channel number */ + chan = frq->m; + }else{ + /* Setting by frequency - search the table */ + int mult = 1; + int i; + + for(i = 0; i < (6 - frq->e); i++) + mult *= 10; + + for(i = 0; i < NUM_CHANNELS; i++) + if(frq->m == (channel_frequency[i] * mult)) + chan = i+1; + } + + if (chan < 1 || !dev->domain ) + /* non-positive channels are invalid + we need a domain info to set the channel */ + ret = -EINVAL; + else + if (!(dev->domain->channel_map & (1 << (chan-1)))) { + info("%s: channel %d not allowed for domain %s", + dev->netdev->name, chan, dev->domain->name); + ret = -EINVAL; + } + + if (ret == 0) { + dev->channel = chan; + dbg(DBG_IOCTL, "%s: SIOCSIWFREQ ch %d", + netdev->name, chan); + changed = 1; + } + } + break; + + case SIOCGIWMODE: + dbg(DBG_IOCTL, "%s: SIOCGIWMODE", netdev->name); + wrq->u.mode = dev->iw_mode; + break; + + case SIOCSIWMODE: + dbg(DBG_IOCTL, "%s: SIOCSIWMODE %d", netdev->name, wrq->u.mode); + if ((wrq->u.mode != IW_MODE_ADHOC) && + (wrq->u.mode != IW_MODE_INFRA)) + ret = -EINVAL; + else { + dev->iw_mode = wrq->u.mode; + changed = 1; + } + break; + + case SIOCGIWESSID: + dbg(DBG_IOCTL, "%s: SIOCGIWESSID", netdev->name); + { + char *essid = NULL; + struct iw_point *erq = &wrq->u.essid; + + if (dev->essid_size) { + /* not the ANY ssid in dev->essid */ + erq->flags = 1; + erq->length = dev->essid_size; + essid = dev->essid; + } else { + /* the ANY ssid was specified */ + if (dev->istate == CONNECTED && + dev->curr_bss != NULL) { + /* report the SSID we have found */ + erq->flags=1; + erq->length = dev->curr_bss->ssid_len; + essid = dev->curr_bss->ssid; + } else { + /* report ANY back */ + erq->flags=0; + erq->length=0; + } + } + + if(erq->pointer){ + if(copy_to_user(erq->pointer, essid, + erq->length)){ + ret = -EFAULT; + } + } + + } + break; + + case SIOCSIWESSID: + { + char essidbuf[IW_ESSID_MAX_SIZE+1]; + struct iw_point *erq = &wrq->u.essid; + + memset(&essidbuf, 0, sizeof(essidbuf)); + + if (erq->flags) { + if (erq->length > IW_ESSID_MAX_SIZE){ + ret = -E2BIG; + goto csiwessid_error; + } + + if (copy_from_user(essidbuf, erq->pointer, erq->length)){ + ret = -EFAULT; + goto csiwessid_error; + } + + assert(erq->length > 0); + /* iwconfig gives len including 0 byte - + 3 hours debugging... grrrr (oku) */ + dev->essid_size = erq->length - 1; + dbg(DBG_IOCTL, "%s: SIOCSIWESSID %d %s", netdev->name, + dev->essid_size, essidbuf); + memcpy(dev->essid, essidbuf, IW_ESSID_MAX_SIZE); + } else + dev->essid_size = 0; /* ANY ssid */ + changed = 1; + } + csiwessid_error: + break; + + case SIOCGIWRATE: + wrq->u.bitrate.value = dev->txrate == TX_RATE_1MBIT ? 1000000 : + dev->txrate == TX_RATE_2MBIT ? 2000000 : + dev->txrate == TX_RATE_5_5MBIT ? 5500000 : + dev->txrate == TX_RATE_11MBIT ? 11000000 : 11000000; + wrq->u.bitrate.fixed = (dev->txrate != TX_RATE_AUTO); + wrq->u.bitrate.disabled = 0; + break; + + case SIOCSIWRATE: + dbg(DBG_IOCTL, "%s: SIOCSIWRATE %d", netdev->name, + wrq->u.bitrate.value); + changed = 1; + switch (wrq->u.bitrate.value){ + case -1: dev->txrate = 4; break; /* auto rate */ + case 1000000: dev->txrate = 0; break; + case 2000000: dev->txrate = 1; break; + case 5500000: dev->txrate = 2; break; + case 11000000: dev->txrate = 3; break; + default: + ret = -EINVAL; + changed = 0; + } + break; + + case SIOCSIWENCODE: + dbg(DBG_IOCTL, "%s: SIOCSIWENCODE enc.flags %08x " + "pointer %p len %d", netdev->name, wrq->u.encoding.flags, + wrq->u.encoding.pointer, wrq->u.encoding.length); + dbg(DBG_IOCTL, "%s: old wepstate: enabled %d key_id %d " + "excl_unencr %d\n", + dev->netdev->name, dev->wep_enabled, dev->wep_key_id, + dev->wep_excl_unencr); + changed = 1; + { + int index = (wrq->u.encoding.flags & IW_ENCODE_INDEX) - 1; + /* take the old default key if index is invalid */ + if((index < 0) || (index >= NR_WEP_KEYS)) + index = dev->wep_key_id; + if(wrq->u.encoding.pointer){ + int len = wrq->u.encoding.length; + + if(len > WEP_LARGE_KEY_LEN){ + len = WEP_LARGE_KEY_LEN; + } + + memset(dev->wep_keys[index], 0, WEP_KEY_SIZE); + if(copy_from_user(dev->wep_keys[index], + wrq->u.encoding.pointer, len)) { + dev->wep_keys_len[index] = 0; + changed = 0; + ret = -EFAULT; + }else{ + dev->wep_keys_len[index] = + len <= WEP_SMALL_KEY_LEN ? + WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN; + dev->wep_enabled = 1; + } + } + + dev->wep_key_id = index; + dev->wep_enabled = ((wrq->u.encoding.flags & IW_ENCODE_DISABLED) == 0); + if (wrq->u.encoding.flags & IW_ENCODE_RESTRICTED) + dev->wep_excl_unencr = 1; + if (wrq->u.encoding.flags & IW_ENCODE_OPEN) + dev->wep_excl_unencr = 0; + + dbg(DBG_IOCTL, "%s: new wepstate: enabled %d key_id %d key_len %d " + "excl_unencr %d\n", + dev->netdev->name, dev->wep_enabled, dev->wep_key_id, + dev->wep_keys_len[dev->wep_key_id], + dev->wep_excl_unencr); + } + break; + + // Get the WEP keys and mode + case SIOCGIWENCODE: + dbg(DBG_IOCTL, "%s: SIOCGIWENCODE", netdev->name); + { + int index = (wrq->u.encoding.flags & IW_ENCODE_INDEX) - 1; + if ((index < 0) || (index >= NR_WEP_KEYS)) + index = dev->wep_key_id; + + wrq->u.encoding.flags = + (dev->wep_excl_unencr) ? IW_ENCODE_RESTRICTED : IW_ENCODE_OPEN; + if(!dev->wep_enabled) + wrq->u.encoding.flags |= IW_ENCODE_DISABLED; + if(wrq->u.encoding.pointer){ + wrq->u.encoding.length = dev->wep_keys_len[index]; + if (copy_to_user(wrq->u.encoding.pointer, + dev->wep_keys[index], + dev->wep_keys_len[index])) + ret = -EFAULT; + wrq->u.encoding.flags |= (index + 1); + } + } + break; + +#if IW_MAX_SPY > 0 + // Set the spy list + case SIOCSIWSPY: + /* never needs a device restart */ + ret = ioctl_setspy(dev, &wrq->u.data); + break; + + // Get the spy list + case SIOCGIWSPY: + ret = ioctl_getspy(dev, &wrq->u.data); + break; +#endif /* #if IW_MAX_SPY > 0 */ + + case SIOCSIWPOWER: + dbg(DBG_IOCTL, "%s: SIOCSIWPOWER disabled %d flags x%x value x%x", netdev->name, + wrq->u.power.disabled, wrq->u.power.flags, wrq->u.power.value); + if (wrq->u.power.disabled) + dev->pm_mode = PM_ACTIVE; + else { + /* we set the listen_interval based on the period given */ + /* no idea how to handle the timeout of iwconfig ??? */ + if (wrq->u.power.flags & IW_POWER_PERIOD) { + dev->pm_period_us = wrq->u.power.value; + } + dev->pm_mode = PM_SAVE; /* use iw_priv to select SMART_SAVE */ + } + changed = 1; + break; + + case SIOCGIWPOWER: + dbg(DBG_IOCTL, "%s: SIOCGIWPOWER", netdev->name); + wrq->u.power.disabled = dev->pm_mode == PM_ACTIVE; + if ((wrq->u.power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { + wrq->u.power.flags = IW_POWER_TIMEOUT; + wrq->u.power.value = 0; + } else { + unsigned long flags; + u16 beacon_int; /* of the current bss */ + wrq->u.power.flags = IW_POWER_PERIOD; + + spin_lock_irqsave(&dev->bss_list_spinlock, flags); + beacon_int = dev->curr_bss != NULL ? + dev->curr_bss->beacon_interval : 0; + spin_unlock_irqrestore(&dev->bss_list_spinlock, flags); + + if (beacon_int != 0) { + wrq->u.power.value = + (beacon_int * dev->pm_period_beacon) << 10; + } else + wrq->u.power.value = dev->pm_period_us; + } + wrq->u.power.flags |= IW_POWER_ALL_R; /* ??? */ + break; + + case SIOCGIWPRIV: + if (wrq->u.data.pointer) { + const struct iw_priv_args priv[] = { + { PRIV_IOCTL_SET_SHORT_PREAMBLE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, + "short_preamble" }, /* 0 - long, 1 -short */ + + { PRIV_IOCTL_SET_DEBUG, + /* we must pass the new debug mask as a string, + 'cause iwpriv cannot parse hex numbers + starting with 0x :-( */ + IW_PRIV_TYPE_CHAR | 10, 0, + "set_debug"}, /* set debug value */ + + { PRIV_IOCTL_SET_AUTH_MODE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, + "auth_mode"}, /* 0 - open , 1 - shared secret */ + + { PRIV_IOCTL_LIST_BSS, + 0, 0, "list_bss"}, /* dump current bss table */ + + { PRIV_IOCTL_SET_POWERSAVE_MODE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, + "powersave_mode"}, /* 1 - active, 2 - power save, + 3 - smart power save */ + { PRIV_IOCTL_SET_SCAN_TIMES, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, + "scan_times"}, /* min_channel_time, + max_channel_time */ + { PRIV_IOCTL_SET_SCAN_MODE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, + "scan_mode"}, /* 0 - active, 1 - passive scan */ + }; + + wrq->u.data.length = sizeof(priv) / sizeof(priv[0]); + if (copy_to_user(wrq->u.data.pointer, priv, + sizeof(priv))) + ret = -EFAULT; + } + break; + + case PRIV_IOCTL_SET_SHORT_PREAMBLE: + { + int val = *((int *)wrq->u.name); + dbg(DBG_IOCTL, "%s: PRIV_IOCTL_SET_SHORT_PREAMBLE, %d", + dev->netdev->name, val); + if (val < 0 || val > 2) + //allow value of 2 - in the win98 driver it stands + //for "auto preamble" ...? + ret = -EINVAL; + else { + dev->preamble_type = val; + changed = 1; + } + } + break; + + case PRIV_IOCTL_SET_DEBUG: + { + char *ptr, nbuf[10+1]; + struct iw_point *erq = &wrq->u.data; + u32 val; + + if (erq->length > 0) { + if (copy_from_user(nbuf, erq->pointer, + min((int)sizeof(nbuf),(int)erq->length))) { + ret = -EFAULT; + goto set_debug_end; + } + val = simple_strtol(nbuf, &ptr, 0); + if (ptr == nbuf) + val = DBG_DEFAULTS; + dbg_uc("%s: PRIV_IOCTL_SET_DEBUG input %d: %s -> x%x", + dev->netdev->name, erq->length, nbuf, val); + } else + val = DBG_DEFAULTS; + dbg_uc("%s: PRIV_IOCTL_SET_DEBUG, old 0x%x new 0x%x", + dev->netdev->name, debug, val); + /* jal: some more output to pin down lockups */ + dbg_uc("%s: netif running %d queue_stopped %d carrier_ok %d", + dev->netdev->name, + netif_running(dev->netdev), + netif_queue_stopped(dev->netdev), + netif_carrier_ok(dev->netdev)); + debug = val; + } + set_debug_end: + break; + + case PRIV_IOCTL_SET_AUTH_MODE: + { + int val = *((int *)wrq->u.name); + dbg(DBG_IOCTL, "%s: PRIV_IOCTL_SET_AUTH_MODE, %d (%s)", + dev->netdev->name, val, + val == 0 ? "open system" : val == 1 ? "shared secret" : + ""); + if (val < 0 || val > 1) + ret = -EINVAL; + else { + dev->auth_mode = (val ? IEEE802_11_AUTH_ALG_SHARED_SECRET : + IEEE802_11_AUTH_ALG_OPEN_SYSTEM); + changed = 1; + } + } + break; + + case PRIV_IOCTL_LIST_BSS: + dump_bss_table(dev, 1); + break; + + case PRIV_IOCTL_SET_POWERSAVE_MODE: + { + int val = *((int *)wrq->u.name); + dbg(DBG_IOCTL, "%s: PRIV_IOCTL_SET_POWERSAVE_MODE, %d (%s)", + dev->netdev->name, val, + val == PM_ACTIVE ? "active" : val == PM_SAVE ? "save" : + val == PM_SMART_SAVE ? "smart save" : ""); + if (val < PM_ACTIVE || val > PM_SMART_SAVE) + ret = -EINVAL; + else { + dev->pm_mode = val; + changed = 1; + } + } + break; + + case PRIV_IOCTL_SET_SCAN_TIMES: + { + int mint = *((int *)wrq->u.name); + int maxt = *((int *)wrq->u.name + 1); + dbg(DBG_IOCTL, "%s: PRIV_IOCTL_SET_SCAN_TIMES, %d %d", + dev->netdev->name, mint, maxt); + if (mint <= 0 || maxt <= 0 || mint > maxt) + ret = -EINVAL; + else { + dev->scan_min_time = mint; + dev->scan_max_time = maxt; + changed = 1; + } + } + break; + + case PRIV_IOCTL_SET_SCAN_MODE: + { + int val = *((int *)wrq->u.name); + dbg(DBG_IOCTL, "%s: PRIV_IOCTL_SET_SCAN_MODE, %d", + dev->netdev->name, val); + if (val != SCAN_TYPE_ACTIVE && val != SCAN_TYPE_PASSIVE) + ret = -EINVAL; + else { + dev->scan_mode = val; + changed = 1; + } + } + break; + + default: + dbg(DBG_IOCTL, "%s: ioctl not supported (0x%x)", netdev->name, cmd); + ret = -EOPNOTSUPP; + } + +#if 1 + + /* we only startup the device if it was already opened before. */ + if ((changed) && (dev->open_count > 0)) { + + unsigned long flags; + + assert(ret >= 0); + + dbg(DBG_IOCTL, "%s %s: restarting the device", dev->netdev->name, + __FUNCTION__); + + /* stop any pending tx bulk urb */ + + /* jal: TODO: protect access to dev->istate by a spinlock + (ISR's on other processors may read/write it) */ + if (dev->istate != INIT) { + dev->istate = INIT; + /* stop pending management stuff */ + del_timer_sync(&dev->mgmt_timer); + + spin_lock_irqsave(&dev->mgmt_spinlock,flags); + if (dev->next_mgmt_bulk) { + kfree(dev->next_mgmt_bulk); + dev->next_mgmt_bulk = NULL; + } + spin_unlock_irqrestore(&dev->mgmt_spinlock,flags); + + netif_carrier_off(dev->netdev); + netif_stop_queue(dev->netdev); + } + + /* do the restart after two seconds to catch + following ioctl's (from more params of iwconfig) + in _one_ restart */ + mod_timer(&dev->restart_timer, jiffies+2*HZ); + } +#endif + up(&dev->sem); + return ret; +} + +void at76c503_delete_device(struct at76c503 *dev) +{ + int i; + + if(dev){ + int sem_taken; + if ((sem_taken=down_trylock(&rtnl_sem)) != 0) + info("%s: rtnl_sem already down'ed", __FUNCTION__); + unregister_netdevice(dev->netdev); + if (!sem_taken) + rtnl_unlock(); + + // assuming we used keventd, it must quiesce too + flush_scheduled_tasks (); + + if(dev->bulk_out_buffer != NULL) + kfree(dev->bulk_out_buffer); + if(dev->ctrl_buffer != NULL) + kfree(dev->ctrl_buffer); + + if(dev->write_urb != NULL) + usb_free_urb(dev->write_urb); + if(dev->read_urb != NULL) + usb_free_urb(dev->read_urb); + if(dev->rx_skb != NULL) + kfree_skb(dev->rx_skb); + if(dev->ctrl_buffer != NULL) + usb_free_urb(dev->ctrl_urb); + + del_timer_sync(&dev->bss_list_timer); + free_bss_list(dev); + + for(i=0; i < NR_RX_DATA_BUF; i++) + if (dev->rx_data[i].skb != NULL) { + dev_kfree_skb(dev->rx_data[i].skb); + dev->rx_data[i].skb = NULL; + } + kfree (dev->netdev); /* dev is in net_dev */ + } +} + +static int at76c503_alloc_urbs(struct at76c503 *dev) +{ + struct usb_interface *interface = dev->interface; + struct usb_interface_descriptor *iface_desc = &interface->altsetting[0]; + struct usb_endpoint_descriptor *endpoint; + struct usb_device *udev = dev->udev; + int i, buffer_size; + + for(i = 0; i < iface_desc->bNumEndpoints; i++) { + endpoint = &iface_desc->endpoint[i]; + + if ((endpoint->bEndpointAddress & 0x80) && + ((endpoint->bmAttributes & 3) == 0x02)) { + /* we found a bulk in endpoint */ + + dev->read_urb = usb_alloc_urb(0); + if (!dev->read_urb) { + err("No free urbs available"); + return -1; + } + dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; + } + + if (((endpoint->bEndpointAddress & 0x80) == 0x00) && + ((endpoint->bmAttributes & 3) == 0x02)) { + /* we found a bulk out endpoint */ + dev->write_urb = usb_alloc_urb(0); + if (!dev->write_urb) { + err("no free urbs available"); + return -1; + } + buffer_size = sizeof(struct at76c503_tx_buffer) + + MAX_PADDING_SIZE; + dev->bulk_out_size = buffer_size; + dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; + dev->bulk_out_buffer = kmalloc (buffer_size, GFP_KERNEL); + if (!dev->bulk_out_buffer) { + err("couldn't allocate bulk_out_buffer"); + return -1; + } + FILL_BULK_URB(dev->write_urb, udev, + usb_sndbulkpipe(udev, + endpoint->bEndpointAddress), + dev->bulk_out_buffer, buffer_size, + at76c503_write_bulk_callback, dev); + } + } + + dev->ctrl_urb = usb_alloc_urb(0); + if (!dev->ctrl_urb) { + err("no free urbs available"); + return -1; + } + dev->ctrl_buffer = kmalloc(1024, GFP_KERNEL); + if (!dev->ctrl_buffer) { + err("couldn't allocate ctrl_buffer"); + return -1; + } + + return 0; +} + +struct at76c503 *at76c503_new_device(struct usb_device *udev, int board_type, + const char *netdev_name) +{ + struct net_device *netdev; + struct at76c503 *dev = NULL; + struct usb_interface *interface; + int i,ret; + + /* allocate memory for our device state and intialize it */ + netdev = alloc_etherdev(sizeof(struct at76c503)); + if (netdev == NULL) { + err("out of memory"); + goto error; + } + + dev = (struct at76c503 *)netdev->priv; + dev->udev = udev; + dev->netdev = netdev; + + init_MUTEX (&dev->sem); + INIT_TQUEUE (&dev->kevent, kevent, dev); + + dev->open_count = 0; + + init_timer(&dev->restart_timer); + dev->restart_timer.data = (unsigned long)dev; + dev->restart_timer.function = restart_timeout; + + init_timer(&dev->mgmt_timer); + dev->mgmt_timer.data = (unsigned long)dev; + dev->mgmt_timer.function = mgmt_timeout; + dev->mgmt_spinlock = SPIN_LOCK_UNLOCKED; + dev->next_mgmt_bulk = NULL; + dev->istate = INIT; + + /* initialize empty BSS list */ + dev->curr_bss = dev->new_bss = NULL; + INIT_LIST_HEAD(&dev->bss_list); + dev->bss_list_spinlock = SPIN_LOCK_UNLOCKED; + + init_timer(&dev->bss_list_timer); + dev->bss_list_timer.data = (unsigned long)dev; + dev->bss_list_timer.function = bss_list_timeout; + /* we let this timer run the whole time this driver instance lives */ + mod_timer(&dev->bss_list_timer, jiffies+BSS_LIST_TIMEOUT); + +#if IW_MAX_SPY > 0 + dev->iwspy_nr = 0; +#endif + + /* mark all rx data entries as unused */ + for(i=0; i < NR_RX_DATA_BUF; i++) + dev->rx_data[i].skb = NULL; + + dev->tasklet.func = rx_tasklet; + dev->tasklet.data = (unsigned long)dev; + + dev->board_type = board_type; + + dev->pm_mode = pm_mode; + dev->pm_period_us = pm_period; + + /* set up the endpoint information */ + /* check out the endpoints */ + interface = &udev->actconfig->interface[0]; + dev->interface = interface; + + if(at76c503_alloc_urbs(dev) < 0) + goto error; + + /* get firmware version */ + ret = get_mib(dev->udev, MIB_FW_VERSION, (u8*)&dev->fw_version, sizeof(dev->fw_version)); + if((ret < 0) || ((dev->fw_version.major == 0) && + (dev->fw_version.minor == 0) && + (dev->fw_version.patch == 0) && + (dev->fw_version.build == 0))){ + err("getting firmware failed with %d, or version is 0", ret); + err("this probably means that the ext. fw was not loaded correctly"); + goto error; + } + + /* fw 0.84 doesn't send FCS with rx data */ + if (dev->fw_version.major == 0 && dev->fw_version.minor <= 84) + dev->rx_data_fcs_len = 0; + else + dev->rx_data_fcs_len = 4; + + info("$Id: at76c503.c,v 1.35 2003/07/30 06:31:51 jal2 Exp $ compiled %s %s", __DATE__, __TIME__); + info("firmware version %d.%d.%d #%d (fcs_len %d)", + dev->fw_version.major, dev->fw_version.minor, + dev->fw_version.patch, dev->fw_version.build, + dev->rx_data_fcs_len); + + /* MAC address */ + ret = get_hw_config(dev); + if(ret < 0){ + err("could not get MAC address"); + goto error; + } + + dev->domain = get_reg_domain(dev->regulatory_domain); + /* init. netdev->dev_addr */ + memcpy(netdev->dev_addr, dev->mac_addr, ETH_ALEN); + info("device's MAC %s, regulatory domain %s (id %d)", + mac2str(dev->mac_addr), dev->domain->name, + dev->regulatory_domain); + + /* initializing */ + dev->channel = DEF_CHANNEL; + dev->iw_mode = IW_MODE_ADHOC; + memset(dev->essid, 0, IW_ESSID_MAX_SIZE); + memcpy(dev->essid, DEF_ESSID, DEF_ESSID_LEN); + dev->essid_size = DEF_ESSID_LEN; + strncpy(dev->nickn, DEF_ESSID, sizeof(dev->nickn)); + dev->rts_threshold = DEF_RTS_THRESHOLD; + dev->frag_threshold = DEF_FRAG_THRESHOLD; + dev->txrate = TX_RATE_AUTO; + dev->preamble_type = preamble_type; + dev->auth_mode = auth_mode ? IEEE802_11_AUTH_ALG_SHARED_SECRET : + IEEE802_11_AUTH_ALG_OPEN_SYSTEM; + dev->scan_min_time = scan_min_time; + dev->scan_max_time = scan_max_time; + dev->scan_mode = scan_mode; + + netdev->flags &= ~IFF_MULTICAST; /* not yet or never */ + netdev->open = at76c503_open; + netdev->stop = at76c503_stop; + netdev->get_stats = at76c503_get_stats; + netdev->get_wireless_stats = at76c503_get_wireless_stats; + netdev->hard_start_xmit = at76c503_tx; + netdev->tx_timeout = at76c503_tx_timeout; + netdev->do_ioctl = at76c503_ioctl; + netdev->set_multicast_list = at76c503_set_multicast; + netdev->set_mac_address = at76c503_set_mac_address; + strcpy(netdev->name, netdev_name); + // netdev->hard_header_len = 8 + sizeof(struct ieee802_11_hdr); + /* +// netdev->hard_header = at76c503_header; +*/ + return dev; + + error: + at76c503_delete_device(dev); + return NULL; + +} + +/** + * at76c503_do_probe - probe helper for atmel modules + * @mod: module which owns the device + * @udev: USB device + * @extfw: firmware + * @extfw_size: firmware size + * @board_type: hardware type + * @netdev_name: name for device file + * + * Perform all the shared probing functionality for the atmel wireless + * USB. We set up the configuration, download the second stage firmware + * and then bring it up as a network device + */ + +struct at76c503 *at76c503_do_probe(struct module *mod, struct usb_device *udev, u8 *extfw, int extfw_size, int board_type, const char *netdev_name) +{ + int ret; + struct at76c503 *dev; + + usb_inc_dev_use(udev); + + if ((ret = usb_get_configuration(udev)) != 0) { + err("get configuration failed: %d", ret); + goto error; + } + + if ((ret = usb_set_configuration(udev, 1)) != 0) { + err("set configuration to 1 failed: %d", ret); + goto error; + } + + if (extfw && extfw_size) { + ret = at76c503_download_external_fw(udev, extfw, extfw_size); + if (ret < 0) { + if (ret != USB_ST_STALL) { + err("Downloading external firmware failed: %d", ret); + goto error; + } else + dbg(DBG_DEVSTART, + "assuming external fw was already downloaded"); + } + } + + dev = at76c503_new_device(udev, board_type, netdev_name); + if (!dev) { + err("at76c503_new_device returned NULL"); + goto error; + } + + dev->netdev->owner = mod; + + /* putting this inside rtnl_lock() - rtnl_unlock() hangs modprobe ...? */ + ret = register_netdev(dev->netdev); + if (ret) { + err("Unable to register netdevice %s (status %d)!", + dev->netdev->name, ret); + at76c503_delete_device(dev); + goto error; + } + info("registered %s", dev->netdev->name); + + return dev; + +error: + usb_dec_dev_use(udev); + return NULL; +} + +/** + * at76c503_usbdfu_post - download completion callback + * + * Called by usbdfu driver after the firmware has been downloaded, + * before the final reset. + * (this is called in a usb probe (khubd) context) + */ + +int at76c503_usbdfu_post(struct usb_device *udev) +{ + int result; + + dbg(DBG_DEVSTART, "Sending remap command..."); + result = at76c503_remap(udev); + if (result < 0) { + err("Remap command failed (%d)", result); + return result; + } + return 0; +} + +/** + * at76c503_init + * + * Module load entry point. Nothing needed for a library + */ + +static int __init at76c503_init(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION); + return 0; +} + +/** + * at76c503_exit - module unload + * + * Module unload point. As we are just a library module we + * have nothing to release, instead our users will have tidied + * up their resources before we can be unloaded. + */ + +static void __exit at76c503_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " exit"); +} + +module_init (at76c503_init); +module_exit (at76c503_exit); + +EXPORT_SYMBOL(at76c503_do_probe); +EXPORT_SYMBOL(at76c503_download_external_fw); +EXPORT_SYMBOL(at76c503_new_device); +EXPORT_SYMBOL(at76c503_delete_device); +EXPORT_SYMBOL(at76c503_usbdfu_post); +EXPORT_SYMBOL(at76c503_remap); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503.h linux.22-ac2/drivers/usb/atmel/at76c503.h --- linux.vanilla/drivers/usb/atmel/at76c503.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,579 @@ +/* -*- linux-c -*- */ +/* $Id: at76c503.h,v 1.15 2003/07/11 20:53:32 jal2 Exp $ + * + * USB at76c503 driver + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + */ + +#ifndef _AT76C503_H +#define _AT76C503_H + +#include +#include +#include + +#include "ieee802_11.h" /* we need some constants here */ + +/* this wasn't even defined in early 2.4.x kernels ... */ +#ifndef SIOCIWFIRSTPRIV +# define SIOCIWFIRSTPRIV SIOCDEVPRIVATE +#endif + +/* WIRELESS_EXT 8 does not provide iw_point ! */ +#if WIRELESS_EXT <= 8 +/* this comes from */ +#undef IW_MAX_SPY +#define IW_MAX_SPY 0 +#endif + +/* our private ioctl's */ +/* set preamble length*/ +#define PRIV_IOCTL_SET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 0x0) +/* set debug parameter */ +#define PRIV_IOCTL_SET_DEBUG (SIOCIWFIRSTPRIV + 0x1) +/* set authentication mode: 0 - open, 1 - shared key */ +#define PRIV_IOCTL_SET_AUTH_MODE (SIOCIWFIRSTPRIV + 0x2) +/* dump bss table */ +#define PRIV_IOCTL_LIST_BSS (SIOCIWFIRSTPRIV + 0x3) +/* set power save mode (incl. the Atmel proprietary smart save mode */ +#define PRIV_IOCTL_SET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 0x4) +/* set min and max channel times for scan */ +#define PRIV_IOCTL_SET_SCAN_TIMES (SIOCIWFIRSTPRIV + 0x5) +/* set scan mode */ +#define PRIV_IOCTL_SET_SCAN_MODE (SIOCIWFIRSTPRIV + 0x6) + +#define DEVICE_VENDOR_REQUEST_OUT 0x40 +#define DEVICE_VENDOR_REQUEST_IN 0xc0 +#define INTERFACE_VENDOR_REQUEST_OUT 0x41 +#define INTERFACE_VENDOR_REQUEST_IN 0xc1 +#define CLASS_REQUEST_OUT 0x21 +#define CLASS_REQUEST_IN 0xa1 + +#define CMD_STATUS_IDLE 0x00 +#define CMD_STATUS_COMPLETE 0x01 +#define CMD_STATUS_UNKNOWN 0x02 +#define CMD_STATUS_INVALID_PARAMETER 0x03 +#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04 +#define CMD_STATUS_TIME_OUT 0x07 +#define CMD_STATUS_IN_PROGRESS 0x08 +#define CMD_STATUS_HOST_FAILURE 0xff +#define CMD_STATUS_SCAN_FAILED 0xf0 + +#define OPMODE_NONE 0x00 +#define OPMODE_NETCARD 0x01 +#define OPMODE_CONFIG 0x02 +#define OPMODE_DFU 0x03 +#define OPMODE_NOFLASHNETCARD 0x04 + +#define CMD_SET_MIB 0x01 +#define CMD_GET_MIB 0x02 +#define CMD_SCAN 0x03 +#define CMD_JOIN 0x04 +#define CMD_START_IBSS 0x05 +#define CMD_RADIO 0x06 +#define CMD_STARTUP 0x0B +#define CMD_GETOPMODE 0x33 + +#define MIB_LOCAL 0x01 +#define MIB_MAC_ADD 0x02 +#define MIB_MAC 0x03 +#define MIB_MAC_MGMT 0x05 +#define MIB_MAC_WEP 0x06 +#define MIB_PHY 0x07 +#define MIB_FW_VERSION 0x08 +#define MIB_MDOMAIN 0x09 + +#define ADHOC_MODE 1 +#define INFRASTRUCTURE_MODE 2 + +/* values for struct mib_local, field preamble_type */ +#define PREAMBLE_TYPE_SHORT 1 +#define PREAMBLE_TYPE_LONG 0 + +/* values for tx_rate */ +#define TX_RATE_1MBIT 0 +#define TX_RATE_2MBIT 1 +#define TX_RATE_5_5MBIT 2 +#define TX_RATE_11MBIT 3 +#define TX_RATE_AUTO 4 + +/* power management modi */ +#define PM_ACTIVE 1 +#define PM_SAVE 2 +#define PM_SMART_SAVE 3 + +/* offsets into the MIBs we use to configure the device */ +#define TX_AUTORATE_FALLBACK_OFFSET offsetof(struct mib_local,txautorate_fallback) +#define FRAGMENTATION_OFFSET offsetof(struct mib_mac,frag_threshold) +#define PREAMBLE_TYPE_OFFSET offsetof(struct mib_local,preamble_type) +#define RTS_OFFSET offsetof(struct mib_mac, rts_threshold) + +/* valid only for rfmd and 505 !*/ +#define IBSS_CHANGE_OK_OFFSET offsetof(struct mib_mac_mgmt, ibss_change) +#define IROAMING_OFFSET \ + offsetof(struct mib_mac_mgmt, multi_domain_capability_enabled) +/* the AssocID */ +#define STATION_ID_OFFSET offsetof(struct mib_mac_mgmt, station_id) +#define POWER_MGMT_MODE_OFFSET offsetof(struct mib_mac_mgmt, power_mgmt_mode) +#define LISTEN_INTERVAL_OFFSET offsetof(struct mib_mac, listen_interval) + +#define BOARDTYPE_INTERSIL 0 +#define BOARDTYPE_RFMD 1 +#define BOARDTYPE_R505 2 + +struct hwcfg_r505 { + u8 cr39_values[14]; + u8 reserved1[14]; + u8 bb_cr[14]; + u8 pidvid[4]; + u8 mac_addr[ETH_ALEN]; + u8 regulatory_domain; + u8 reserved2[14]; + u8 cr15_values[14]; + u8 reserved3[3]; +} __attribute__ ((packed)); + +struct hwcfg_rfmd { + u8 cr20_values[14]; + u8 cr21_values[14]; + u8 bb_cr[14]; + u8 pidvid[4]; + u8 mac_addr[ETH_ALEN]; + u8 regulatory_domain; + u8 low_power_values[14]; + u8 normal_power_values[14]; + u8 reserved1[3]; +} __attribute__ ((packed)); + +struct hwcfg_intersil { + u8 mac_addr[ETH_ALEN]; + u8 cr31_values[14]; + u8 cr58_values[14]; + u8 pidvid[4]; + u8 regulatory_domain; + u8 reserved[1]; +} __attribute__ ((packed)); + +#define WEP_KEY_SIZE 13 +#define NR_WEP_KEYS 4 +#define WEP_SMALL_KEY_LEN (40/8) +#define WEP_LARGE_KEY_LEN (104/8) + +struct at76c503_card_config { + u8 exclude_unencrypted; + u8 promiscuous_mode; + u8 short_retry_limit; + u8 encryption_type; + u16 rts_threshold; + u16 fragmentation_threshold; // 256..2346 + u8 basic_rate_set[4]; + u8 auto_rate_fallback; //0,1 + u8 channel; + u8 privacy_invoked; + u8 wep_default_key_id; // 0..3 + u8 current_ssid[32]; + u8 wep_default_key_value[4][WEP_KEY_SIZE]; + u8 ssid_len; + u8 short_preamble; + u16 beacon_period; +} __attribute__ ((packed)); + +struct at76c503_command { + u8 cmd; + u8 reserved; + u16 size; +} __attribute__ ((packed)); + +/* the length of the Atmel firmware specific rx header before IEEE 802.11 starts */ +#define AT76C503_RX_HDRLEN offsetof(struct at76c503_rx_buffer, packet) + +struct at76c503_rx_buffer { + u16 wlength; + u8 rx_rate; + u8 newbss; + u8 fragmentation; + u8 rssi; + u8 link_quality; + u8 noise_level; + u8 rx_time[4]; + u8 packet[IEEE802_11_MAX_FRAME_LEN]; +} __attribute__ ((packed)); + +/* the length of the Atmel firmware specific tx header before IEEE 802.11 starts */ +#define AT76C503_TX_HDRLEN offsetof(struct at76c503_tx_buffer, packet) + +struct at76c503_tx_buffer { + u16 wlength; + u8 tx_rate; + u8 padding; + u8 reserved[4]; + u8 packet[IEEE802_11_MAX_FRAME_LEN]; +} __attribute__ ((packed)); + +/* defines for scan_type below */ +#define SCAN_TYPE_ACTIVE 0 +#define SCAN_TYPE_PASSIVE 1 + +struct at76c503_start_scan { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 scan_type; + u8 channel; + u16 probe_delay; + u16 min_channel_time; + u16 max_channel_time; + u8 essid_size; + u8 international_scan; +} __attribute__ ((packed)); + +struct at76c503_start_bss { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 bss_type; + u8 channel; + u8 essid_size; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct at76c503_join { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 bss_type; + u8 channel; + u16 timeout; + u8 essid_size; + u8 reserved; +} __attribute__ ((packed)); + +struct set_mib_buffer { + u8 type; + u8 size; + u8 index; + u8 reserved; + u8 data[72]; +} __attribute__ ((packed)); + +struct mib_local { + u16 reserved0; + u8 beacon_enable; + u8 txautorate_fallback; + u8 reserved1; + u8 ssid_size; + u8 promiscuous_mode; + u16 reserved2; + u8 preamble_type; + u16 reserved3; +} __attribute__ ((packed)); + +struct mib_mac_addr { + u8 mac_addr[ETH_ALEN]; + u8 res[2]; /* ??? */ + u8 group_addr[4][ETH_ALEN]; + u8 group_addr_status[4]; +} __attribute__ ((packed)); + +struct mib_mac { + u32 max_tx_msdu_lifetime; + u32 max_rx_lifetime; + u16 frag_threshold; + u16 rts_threshold; + u16 cwmin; + u16 cwmax; + u8 short_retry_time; + u8 long_retry_time; + u8 scan_type; /* active or passive */ + u8 scan_channel; + u16 probe_delay; /* delay before sending a ProbeReq in active scan, RO */ + u16 min_channel_time; + u16 max_channel_time; + u16 listen_interval; + u8 desired_ssid[32]; + u8 desired_bssid[ETH_ALEN]; + u8 desired_bsstype; /* ad-hoc or infrastructure */ + u8 reserved2; +} __attribute__ ((packed)); + +struct mib_mac_mgmt { + u16 beacon_period; + u16 CFP_max_duration; + u16 medium_occupancy_limit; + u16 station_id; /* assoc id */ + u16 ATIM_window; + u8 CFP_mode; + u8 privacy_option_implemented; + u8 DTIM_period; + u8 CFP_period; + u8 current_bssid[ETH_ALEN]; + u8 current_essid[32]; + u8 current_bss_type; + u8 power_mgmt_mode; + /* rfmd and 505 */ + u8 ibss_change; + u8 res; + u8 multi_domain_capability_implemented; + u8 multi_domain_capability_enabled; + u8 country_string[3]; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct mib_mac_wep { + u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ + u8 wep_default_key_id; + u8 wep_key_mapping_len; + u8 exclude_unencrypted; + u32 wep_icv_error_count; + u32 wep_excluded_count; + u8 wep_default_keyvalue[NR_WEP_KEYS][WEP_KEY_SIZE]; + u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ +} __attribute__ ((packed)); + +struct mib_phy { + u32 ed_threshold; + + u16 slot_time; + u16 sifs_time; + u16 preamble_length; + u16 plcp_header_length; + u16 mpdu_max_length; + u16 cca_mode_supported; + + u8 operation_rate_set[4]; + u8 channel_id; + u8 current_cca_mode; + u8 phy_type; + u8 current_reg_domain; +} __attribute__ ((packed)); + +struct mib_fw_version { + u8 major; + u8 minor; + u8 patch; + u8 build; +} __attribute__ ((packed)); + +struct mib_mdomain { + u8 tx_powerlevel[14]; + u8 channel_list[14]; /* 0 for invalid channels */ +} __attribute__ ((packed)); + +/* states in infrastructure mode */ +enum infra_state { + INIT, + SCANNING, + AUTHENTICATING, + ASSOCIATING, + REASSOCIATING, + DISASSOCIATING, + JOINING, + CONNECTED, + STARTIBSS +}; + +/* a description of a regulatory domain and the allowed channels */ +struct reg_domain { + u16 code; + char const *name; + u32 channel_map; /* if bit N is set, channel (N+1) is allowed */ +}; + +/* how long do we keep a (I)BSS in the bss_list in jiffies + this should be long enough for the user to retrieve the table + (by iwlist ?) after the device started, because all entries from + other channels than the one the device locks on get removed, too */ +#define BSS_LIST_TIMEOUT (120*HZ) + +/* struct to store BSS info found during scan */ +#define BSS_LIST_MAX_RATE_LEN 32 /* 32 rates should be enough ... */ + +struct bss_info{ + struct list_head list; + + u8 mac[ETH_ALEN]; /* real mac address, differs + for ad-hoc from bssid */ + u8 bssid[ETH_ALEN]; /* bssid */ + u8 ssid[IW_ESSID_MAX_SIZE+1]; /* ssid, +1 for trailing \0 + to make it printable */ + u8 ssid_len; /* length of ssid above */ + u8 channel; + u16 capa; /* the capabilities of the BSS (in original endianess - + we only check IEEE802_11 bits in it) */ + u16 beacon_interval; /* the beacon interval in units of TU (1.024 ms) + (in cpu endianess - we must calc. values from it) */ + u8 rates[BSS_LIST_MAX_RATE_LEN]; /* supported rates (list of bytes: + (basic_rate ? 0x80 : 0) + rate/(500 Kbit/s); e.g. + x82,x84,x8b,x96 for basic rates 1,2,5.5,11 MBit/s) */ + u8 rates_len; + + /* quality of received beacon */ + u8 rssi; + u8 link_qual; + u8 noise_level; + + unsigned long last_rx; /* time (jiffies) of last beacon received */ + u16 assoc_id; /* if this is dev->curr_bss this is the assoc id we got + in a successful AssocResponse */ +}; + +/* a rx data buffer to collect rx fragments */ +struct rx_data_buf { + u8 sender[ETH_ALEN]; /* sender address */ + u16 seqnr; /* sequence number */ + u16 fragnr; /* last fragment received */ + unsigned long last_rx; /* jiffies of last rx */ + struct sk_buff *skb; /* == NULL if entry is free */ +}; + +#define NR_RX_DATA_BUF 8 + +/* how often do we try to submit a rx urb until giving up */ +#define NR_SUBMIT_RX_TRIES 8 + +struct at76c503 { + struct usb_device *udev; /* save off the usb device pointer */ + struct net_device *netdev; /* save off the net device pointer */ + struct net_device_stats stats; + struct iw_statistics wstats; + struct usb_interface *interface; /* the interface for this device */ + + unsigned char num_ports; /* the number of ports this device has */ + char num_interrupt_in; /* number of interrupt in endpoints we have */ + char num_bulk_in; /* number of bulk in endpoints we have */ + char num_bulk_out; /* number of bulk out endpoints we have */ + + struct sk_buff * rx_skb; /* skbuff for receiving packets */ + __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ + + unsigned char * bulk_out_buffer; /* the buffer to send data */ + int bulk_out_size; /* the size of the send buffer */ + struct urb * write_urb; /* the urb used to send data */ + struct urb * read_urb; + __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ + + struct tq_struct tqueue; /* task queue for line discipline waking up */ + int open_count; /* number of times this port has been opened */ + struct semaphore sem; /* locks this structure */ + + + u32 kevent_flags; + struct tq_struct kevent; + int nr_submit_rx_tries; /* number of tries to submit an rx urb left */ + struct tasklet_struct tasklet; + struct urb *rx_urb; /* tmp urb pointer for rx_tasklet */ + + unsigned char *ctrl_buffer; + struct urb *ctrl_urb; + + u8 op_mode; + + /* the WEP stuff */ + int wep_excl_unencr; /* 1 if unencrypted packets shall be discarded */ + int wep_enabled; /* 1 if WEP is enabled */ + int wep_key_id; /* key id to be used */ + u8 wep_keys[NR_WEP_KEYS][WEP_KEY_SIZE]; /* the four WEP keys, + 5 or 13 bytes are used */ + u8 wep_keys_len[NR_WEP_KEYS]; /* the length of the above keys */ + + int channel; + int iw_mode; + int curr_ap; + u8 bssid[ETH_ALEN]; + u8 essid[IW_ESSID_MAX_SIZE]; + char nickn[IW_ESSID_MAX_SIZE+1]; /* nickname, only used in the iwconfig i/f */ + int essid_size; + int radio_on; + int promisc; + + int preamble_type; /* 0 - long preamble, 1 - short preamble */ + int auth_mode; /* authentication type: 0 open, 1 shared key */ + int txrate; /* 0,1,2,3 = 1,2,5.5,11 MBit, 4 is auto-fallback */ + int frag_threshold; /* threshold for fragmentation of tx packets */ + int rts_threshold; /* threshold for RTS mechanism */ + + int scan_min_time; /* scan min channel time */ + int scan_max_time; /* scan max channel time */ + int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */ + + /* the list we got from scanning */ + spinlock_t bss_list_spinlock; /* protects bss_list operations and setting + curr_bss and new_bss */ + struct list_head bss_list; /* the list of bss we received beacons from */ + struct timer_list bss_list_timer; /* a timer removing old entries from + the bss_list. It must aquire bss_list_spinlock + before and must not remove curr_bss nor + new_bss ! */ + struct bss_info *curr_bss; /* if istate == AUTH, ASSOC, REASSOC, JOIN or CONN + dev->bss[curr_bss] is the currently selected BSS + we operate on */ + struct bss_info *new_bss; /* if istate == REASSOC dev->new_bss + is the new bss we want to reassoc to */ + + /* some data for infrastructure mode only */ + spinlock_t mgmt_spinlock; /* this spinlock protects access to + next_mgmt_bulk and istate */ + struct at76c503_tx_buffer *next_mgmt_bulk; /* pending management msg to + send via bulk out */ + enum infra_state istate; + + struct timer_list restart_timer; /* the timer we use to delay the restart a bit */ + + struct timer_list mgmt_timer; /* the timer we use to repeat auth_req etc. */ + int retries; /* counts backwards while re-trying to send auth/assoc_req's */ + u16 assoc_id; /* the assoc_id for states JOINING, REASSOCIATING, CONNECTED */ + u8 pm_mode ; /* power management mode: ACTIVE, SAVE, SMART_SAVE */ + u32 pm_period_us; /* power manag. period (in us ?) - set by iwconfig */ + u32 pm_period_beacon; /* power manag. period (in beacon intervals + of the curr_bss) */ + int board_type; /* 0 = Intersil, 1 = RFMD, 2 = R505 */ + + struct reg_domain const *domain; /* the description of the regulatory domain */ + + /* iwspy support */ +#if IW_MAX_SPY > 0 + int iwspy_nr; /* nr of valid entries below */ + struct sockaddr iwspy_addr[IW_MAX_SPY]; + struct iw_quality iwspy_stats[IW_MAX_SPY]; +#endif + + /* These fields contain HW config provided by the device (not all of + * these fields are used by all board types) */ + u8 mac_addr[ETH_ALEN]; + u8 bb_cr[14]; + u8 pidvid[4]; + u8 regulatory_domain; + u8 cr15_values[14]; + u8 cr20_values[14]; + u8 cr21_values[14]; + u8 cr31_values[14]; + u8 cr39_values[14]; + u8 cr58_values[14]; + u8 low_power_values[14]; + u8 normal_power_values[14]; + + struct at76c503_card_config card_config; + struct mib_fw_version fw_version; + + int rx_data_fcs_len; /* length of the trailing FCS + (0 for fw <= 0.84.x, 4 otherwise) */ + /* store rx fragments until complete */ + struct rx_data_buf rx_data[NR_RX_DATA_BUF]; +}; + +/* Function prototypes */ + +struct at76c503 *at76c503_do_probe(struct module *mod, struct usb_device *udev, u8 *extfw, int extfw_size, int board_type, const char *netdev_name); +int at76c503_download_external_fw(struct usb_device *udev, u8 *buf, int size); +struct at76c503 *at76c503_new_device(struct usb_device *udev, int board_type, const char *netdev_name); +void at76c503_delete_device(struct at76c503 *dev); +int at76c503_usbdfu_post(struct usb_device *udev); +int at76c503_remap(struct usb_device *udev); + +#define DRIVER_VERSION "v0.11beta4-ac1" + +#endif /* _AT76C503_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503-i3861.c linux.22-ac2/drivers/usb/atmel/at76c503-i3861.c --- linux.vanilla/drivers/usb/atmel/at76c503-i3861.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503-i3861.c 2003-08-14 16:25:32.000000000 +0100 @@ -0,0 +1,205 @@ +/* -*- linux-c -*- */ +/* + * at76c503-i3861.c: + * + * Driver for at76c503-based devices based on the Atmel "Fast-Vnet" reference + * design using the Intersil 3861 radio chip + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * This driver is derived from usb-skeleton.c + * + * This driver contains code specific to Atmel AT76C503 (USB wireless 802.11) + * devices which use the Intersil 3861 radio chip. Almost all of the actual + * driver is handled by the generic at76c503.c module, this file mostly just + * deals with the initial probes and downloading the correct firmware to the + * device before handing it off to at76c503. + * + * History: + * + * 2003_02_15 0.1: (alex) + * - created 3861-specific driver file + * + * 2003_02_18 0.2: (alex) + * - Reduced duplicated code and moved as much as possible into at76c503.c + * - Changed default netdev name to "wlan%d" + */ + +#include +#include +#include + +#include "at76c503.h" +#include "../usbdfu.h" + +/* Include firmware data definition */ + +#include "fw-i3861.h" + +/* Version Information */ + +#define DRIVER_NAME "at76c503-i3861" +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "Atmel at76c503 (Intersil 3861) Wireless LAN Driver" + +/* USB Device IDs supported by this driver */ + +#define VENDOR_ID_ATMEL 0x03eb +#define PRODUCT_ID_ATMEL_503I 0x7603 /* Generic AT76C503/3861 device */ + +#define VENDOR_ID_LINKSYS 0x066b +#define PRODUCT_ID_LINKSYS_WUSB11_V21 0x2211 /* Linksys WUSB11 v2.1/v2.6 */ + +#define VENDOR_ID_NETGEAR 0x0864 +#define PRODUCT_ID_NETGEAR_MA101A 0x4100 /* Netgear MA 101 Rev. A */ + +#define VENDOR_ID_TEKRAM 0x0b3b +#define PRODUCT_ID_TEKRAM_U300C 0x1612 /* Tekram U-300C / Allnet ALL0193 */ + +#define VENDOR_ID_HP 0x03f0 +#define PRODUCT_ID_HP_HN210W 0x011c /* HP HN210W PKW-J7801A */ + +#define VENDOR_ID_M4Y750 0x0cde /* Unknown Vendor ID! */ +#define PRODUCT_ID_M4Y750 0x0001 /* Sitecom/Z-Com/Zyxel M4Y-750 */ + +#define VENDOR_ID_DYNALINK 0x069a +#define PRODUCT_ID_DYNALINK_WLL013_I 0x0320 /* Dynalink/Askey WLL013 (intersil) */ + +#define VENDOR_ID_SMC 0x0d5c +#define PRODUCT_ID_SMC2662W_V1 0xa001 /* EZ connect 11Mpbs +Wireless USB Adapter SMC2662W (v1) */ + +#define VENDOR_ID_BENQ 0x4a5 /* BenQ (Acer) */ +#define PRODUCT_ID_BENQ_AWL_300 0x9000 /* AWL-300 */ + +/* this adapter contains flash */ +#define VENDOR_ID_ADDTRON 0x05dd /* Addtron */ +#define PRODUCT_ID_ADDTRON_AWU120 0xff31 /* AWU-120 */ + +#define VENDOR_ID_CONCEPTRONIC 0x0d8e +#define PRODUCT_ID_CONCEPTRONIC_C11U 0x7100 + +static struct usb_device_id dev_table[] = { + { USB_DEVICE(VENDOR_ID_ATMEL, PRODUCT_ID_ATMEL_503I ) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, PRODUCT_ID_LINKSYS_WUSB11_V21) }, + { USB_DEVICE(VENDOR_ID_NETGEAR, PRODUCT_ID_NETGEAR_MA101A ) }, + { USB_DEVICE(VENDOR_ID_TEKRAM, PRODUCT_ID_TEKRAM_U300C ) }, + { USB_DEVICE(VENDOR_ID_HP, PRODUCT_ID_HP_HN210W ) }, + { USB_DEVICE(VENDOR_ID_M4Y750, PRODUCT_ID_M4Y750 ) }, + { USB_DEVICE(VENDOR_ID_DYNALINK, PRODUCT_ID_DYNALINK_WLL013_I ) }, + { USB_DEVICE(VENDOR_ID_SMC, PRODUCT_ID_SMC2662W_V1 ) }, + { USB_DEVICE(VENDOR_ID_BENQ, PRODUCT_ID_BENQ_AWL_300 ) }, + { USB_DEVICE(VENDOR_ID_ADDTRON, PRODUCT_ID_ADDTRON_AWU120 ) }, + { USB_DEVICE(VENDOR_ID_CONCEPTRONIC,PRODUCT_ID_CONCEPTRONIC_C11U) }, + { } +}; + +/* firmware / config variables */ + +static unsigned char fw_internal[] = FW_I3861_INTERNAL; +static unsigned char fw_external[] = FW_I3861_EXTERNAL; + +static int board_type = BOARDTYPE_INTERSIL; + +/*---------------------------------------------------------------------------*/ + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* Module paramaters */ + +static char netdev_name[IFNAMSIZ+1] = "wlan%d"; +MODULE_PARM(netdev_name, "c" __MODULE_STRING(IFNAMSIZ)); +MODULE_PARM_DESC(netdev_name, + "network device name (default is wlan%d)"); + +/* local function prototypes */ + +static void *at76c50x_probe(struct usb_device *dev, unsigned int ifnum, + const struct usb_device_id *id); +static void at76c50x_disconnect(struct usb_device *dev, void *ptr); + +/* structure for registering this driver with the usb subsystem */ + +static struct usb_driver module_usb = { + owner: THIS_MODULE, + name: DRIVER_NAME, + probe: at76c50x_probe, + disconnect: at76c50x_disconnect, + id_table: dev_table, +}; + +/* structure for registering this firmware with the usbdfu subsystem */ + +static struct usbdfu_info module_usbdfu = { + name: DRIVER_NAME, + id_table: dev_table, + fw_buf: fw_internal, + fw_buf_len: sizeof(fw_internal), + post_download_hook: at76c503_usbdfu_post, + reset_delay: 2*HZ +}; + +/* Module and USB entry points */ + +static void *at76c50x_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + if (usbdfu_in_use(udev, ifnum)) { + /* the device is in DFU mode and usbdfu.c is handling it */ + return NULL; + } + + return at76c503_do_probe(THIS_MODULE, udev, fw_external, sizeof(fw_external), board_type, netdev_name); +} + +static void at76c50x_disconnect(struct usb_device *udev, void *ptr) +{ + info("%s disconnected", ((struct at76c503 *)ptr)->netdev->name); + at76c503_delete_device(ptr); +} + +static int __init mod_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + /* register with usbdfu so that the firmware will be automatically + * downloaded to the device on detection */ + result = usbdfu_register(&module_usbdfu); + if (result < 0) { + err("usbdfu_register failed (status %d)", result); + return -1; + } + + /* register this driver with the USB subsystem */ + result = usb_register(&module_usb); + if (result < 0) { + err("usb_register failed (status %d)", result); + usbdfu_unregister(&module_usbdfu); + return -1; + } + + return 0; +} + +static void __exit mod_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " unloading"); + /* deregister this driver with the USB subsystem */ + usbdfu_unregister(&module_usbdfu); + usb_deregister(&module_usb); +} + +module_init (mod_init); +module_exit (mod_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503-i3863.c linux.22-ac2/drivers/usb/atmel/at76c503-i3863.c --- linux.vanilla/drivers/usb/atmel/at76c503-i3863.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503-i3863.c 2003-08-14 16:25:54.000000000 +0100 @@ -0,0 +1,167 @@ +/* -*- linux-c -*- */ +/* + * at76c503-i3863.c: + * + * Driver for at76c503-based devices based on the Atmel "Fast-Vnet" reference + * design using the Intersil 3863 radio chip + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * This driver is derived from usb-skeleton.c + * + * This driver contains code specific to Atmel AT76C503 (USB wireless 802.11) + * devices which use the Intersil 3863 radio chip. Almost all of the actual + * driver is handled by the generic at76c503.c module, this file mostly just + * deals with the initial probes and downloading the correct firmware to the + * device before handing it off to at76c503. + * + * History: + * + * 2003_02_15 0.1: (alex) + * - created 3863-specific driver file + * + * 2003_02_18 0.2: (alex) + * - Reduced duplicated code and moved as much as possible into at76c503.c + * - Changed default netdev name to "wlan%d" + */ + +#include +#include +#include + +#include "at76c503.h" +#include "../usbdfu.h" + +/* Include firmware data definition */ + +#include "fw-i3863.h" + +/* Version Information */ + +#define DRIVER_NAME "at76c503-i3863" +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "Atmel at76c503 (Intersil 3863) Wireless LAN Driver" + +/* USB Device IDs supported by this driver */ + +#define VENDOR_ID_ATMEL 0x03eb +#define PRODUCT_ID_ATMEL_503_I3863 0x7604 /* Generic AT76C503/3863 device */ + +#define VENDOR_ID_SAMSUNG 0x055d +#define PRODUCT_ID_SAMSUNG_SWL2100U 0xa000 /* Samsung SWL-2100U */ + +static struct usb_device_id dev_table[] = { + { USB_DEVICE(VENDOR_ID_ATMEL, PRODUCT_ID_ATMEL_503_I3863 ) }, + { USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG_SWL2100U) }, + { } +}; + +/* firmware / config variables */ + +static unsigned char fw_internal[] = FW_I3863_INTERNAL; +static unsigned char fw_external[] = FW_I3863_EXTERNAL; + +static int board_type = BOARDTYPE_INTERSIL; + +/*---------------------------------------------------------------------------*/ + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* Module paramaters */ + +static char netdev_name[IFNAMSIZ+1] = "wlan%d"; +MODULE_PARM(netdev_name, "c" __MODULE_STRING(IFNAMSIZ)); +MODULE_PARM_DESC(netdev_name, + "network device name (default is wlan%d)"); + +/* local function prototypes */ + +static void *at76c50x_probe(struct usb_device *dev, unsigned int ifnum, + const struct usb_device_id *id); +static void at76c50x_disconnect(struct usb_device *dev, void *ptr); + +/* structure for registering this driver with the usb subsystem */ + +static struct usb_driver module_usb = { + owner: THIS_MODULE, + name: DRIVER_NAME, + probe: at76c50x_probe, + disconnect: at76c50x_disconnect, + id_table: dev_table, +}; + +/* structure for registering this firmware with the usbdfu subsystem */ + +static struct usbdfu_info module_usbdfu = { + name: DRIVER_NAME, + id_table: dev_table, + fw_buf: fw_internal, + fw_buf_len: sizeof(fw_internal), + post_download_hook: at76c503_usbdfu_post, + reset_delay: 2*HZ +}; + +/* Module and USB entry points */ + +static void *at76c50x_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + if (usbdfu_in_use(udev, ifnum)) { + /* the device is in DFU mode and usbdfu.c is handling it */ + return NULL; + } + + return at76c503_do_probe(THIS_MODULE, udev, fw_external, sizeof(fw_external), board_type, netdev_name); +} + +static void at76c50x_disconnect(struct usb_device *udev, void *ptr) +{ + info("%s disconnected", ((struct at76c503 *)ptr)->netdev->name); + at76c503_delete_device(ptr); +} + +static int __init mod_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + /* register with usbdfu so that the firmware will be automatically + * downloaded to the device on detection */ + result = usbdfu_register(&module_usbdfu); + if (result < 0) { + err("usbdfu_register failed (status %d)", result); + return -1; + } + + /* register this driver with the USB subsystem */ + result = usb_register(&module_usb); + if (result < 0) { + err("usb_register failed (status %d)", result); + usbdfu_unregister(&module_usbdfu); + return -1; + } + + return 0; +} + +static void __exit mod_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " unloading"); + /* unregister this driver with the USB subsystem */ + usbdfu_unregister(&module_usbdfu); + usb_deregister(&module_usb); +} + +module_init (mod_init); +module_exit (mod_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503-rfmd-acc.c linux.22-ac2/drivers/usb/atmel/at76c503-rfmd-acc.c --- linux.vanilla/drivers/usb/atmel/at76c503-rfmd-acc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503-rfmd-acc.c 2003-08-14 16:26:28.000000000 +0100 @@ -0,0 +1,165 @@ +/* -*- linux-c -*- */ +/* + * at76c503-rfmd-acc.c: + * + * Driver for at76c503-based devices based on the Atmel "Fast-Vnet" reference + * design using RFMD radio chips. This module uses the firmware for RFMD with + * ACCTON design. + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * This driver is derived from usb-skeleton.c + * + * This driver contains code specific to Atmel AT76C503 (USB wireless 802.11) + * devices which use radio chips from RF Micro Devices (RFMD). Almost + * all of the actual driver is handled by the generic at76c503.c module, this + * file mostly just deals with the initial probes and downloading the correct + * firmware to the device before handing it off to at76c503. + * + * History: + * + * 2003_02_11 0.1: (alex) + * - split board-specific code off from at76c503.c + * - reverted to 0.90.2 firmware because 0.100.x is broken for WUSB11 + * + * 2003_02_18 0.2: (alex) + * - Reduced duplicated code and moved as much as possible into at76c503.c + * - Changed default netdev name to "wlan%d" + */ + +#include +#include +#include + +#include "at76c503.h" +#include "../usbdfu.h" + +/* Include firmware data definition */ + +#include "fw-rfmd-acc-1.101.0-84.h" + +/* Version Information */ + +#define DRIVER_NAME "at76c503-rfmd-acc" +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "Atmel at76c503 (RFMD-ACC) Wireless LAN Driver" + +/* USB Device IDs supported by this driver */ + +#define VENDOR_ID_SMC 0x083a +#define PRODUCT_ID_SMC_2664W 0x3501 + +static struct usb_device_id dev_table[] = { + { USB_DEVICE(VENDOR_ID_SMC, PRODUCT_ID_SMC_2664W) }, + { } +}; + +/* firmware / config variables */ + +static unsigned char fw_internal[] = FW_503RFMD_ACC_INTERNAL; +static unsigned char fw_external[] = FW_503RFMD_ACC_EXTERNAL; + +static int board_type = BOARDTYPE_RFMD; + +/*---------------------------------------------------------------------------*/ + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* Module paramaters */ + +static char netdev_name[IFNAMSIZ+1] = "wlan%d"; +MODULE_PARM(netdev_name, "c" __MODULE_STRING(IFNAMSIZ)); +MODULE_PARM_DESC(netdev_name, + "network device name (default is wlan%d)"); + +/* local function prototypes */ + +static void *at76c50x_probe(struct usb_device *dev, unsigned int ifnum, + const struct usb_device_id *id); +static void at76c50x_disconnect(struct usb_device *dev, void *ptr); + +/* structure for registering this driver with the usb subsystem */ + +static struct usb_driver module_usb = { + owner: THIS_MODULE, + name: DRIVER_NAME, + probe: at76c50x_probe, + disconnect: at76c50x_disconnect, + id_table: dev_table, +}; + +/* structure for registering this firmware with the usbdfu subsystem */ + +static struct usbdfu_info module_usbdfu = { + name: DRIVER_NAME, + id_table: dev_table, + fw_buf: fw_internal, + fw_buf_len: sizeof(fw_internal), + post_download_hook: at76c503_usbdfu_post, + reset_delay: 2*HZ +}; + +/* Module and USB entry points */ + +static void *at76c50x_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + if (usbdfu_in_use(udev, ifnum)) { + /* the device is in DFU mode and usbdfu.c is handling it */ + return NULL; + } + + return at76c503_do_probe(THIS_MODULE, udev, fw_external, sizeof(fw_external), board_type, netdev_name); +} + +static void at76c50x_disconnect(struct usb_device *udev, void *ptr) +{ + info("%s disconnected", ((struct at76c503 *)ptr)->netdev->name); + at76c503_delete_device(ptr); +} + +static int __init mod_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + /* register with usbdfu so that the firmware will be automatically + * downloaded to the device on detection */ + result = usbdfu_register(&module_usbdfu); + if (result < 0) { + err("usbdfu_register failed (status %d)", result); + return -1; + } + + /* register this driver with the USB subsystem */ + result = usb_register(&module_usb); + if (result < 0) { + err("usb_register failed (status %d)", result); + usbdfu_unregister(&module_usbdfu); + return -1; + } + + return 0; +} + +static void __exit mod_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " unloading"); + /* deregister this driver with the USB subsystem */ + usbdfu_unregister(&module_usbdfu); + usb_deregister(&module_usb); +} + +module_init (mod_init); +module_exit (mod_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c503-rfmd.c linux.22-ac2/drivers/usb/atmel/at76c503-rfmd.c --- linux.vanilla/drivers/usb/atmel/at76c503-rfmd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c503-rfmd.c 2003-08-14 16:26:18.000000000 +0100 @@ -0,0 +1,205 @@ +/* -*- linux-c -*- */ +/* + * at76c503-rfmd.c: + * + * Driver for at76c503-based devices based on the Atmel "Fast-Vnet" reference + * design using RFMD radio chips + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * This driver is derived from usb-skeleton.c + * + * This driver contains code specific to Atmel AT76C503 (USB wireless 802.11) + * devices which use radio chips from RF Micro Devices (RFMD). Almost + * all of the actual driver is handled by the generic at76c503.c module, this + * file mostly just deals with the initial probes and downloading the correct + * firmware to the device before handing it off to at76c503. + * + * History: + * + * 2003_02_11 0.1: (alex) + * - split board-specific code off from at76c503.c + * - reverted to 0.90.2 firmware because 0.100.x is broken for WUSB11 + * + * 2003_02_18 0.2: (alex) + * - Reduced duplicated code and moved as much as possible into at76c503.c + * - Changed default netdev name to "wlan%d" + */ + +#include +#include +#include + +#include "at76c503.h" +#include "../usbdfu.h" + +/* Include firmware data definition */ + +#include "fw-rfmd-1.101.0-84.h" + +/* Version Information */ + +#define DRIVER_NAME "at76c503-rfmd" +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "Atmel at76c503 (RFMD) Wireless LAN Driver" + +/* USB Device IDs supported by this driver */ + +#define VENDOR_ID_ATMEL 0x03eb +#define PRODUCT_ID_ATMEL_503R 0x7605 /* Generic AT76C503/RFMD device */ +#define PRODUCT_ID_W_BUDDIE_WN210 0x4102 /* AirVast W-Buddie WN210 */ + +#define VENDOR_ID_BELKIN 0x0d5c +#define PRODUCT_ID_BELKIN_F5D6050 0xa002 /* Belkin F5D6050 / SMC 2662W v2 */ + +#define VENDOR_ID_DYNALINK 0x069a +#define PRODUCT_ID_DYNALINK_WLL013_R 0x0321 /* Dynalink/Askey WLL013 (rfmd) */ + +#define VENDOR_ID_LINKSYS 0x077b +#define PRODUCT_ID_LINKSYS_WUSB11_V26 0x2219 /* Linksys WUSB11 v2.6 */ +#define PRODUCT_ID_NE_NWU11B 0x2227 /* Network Everywhere NWU11B */ + +#define VENDOR_ID_NETGEAR 0x0864 +#define PRODUCT_ID_NETGEAR_MA101B 0x4102 /* Netgear MA 101 Rev. B */ + +#define VENDOR_ID_ACTIONTEC 0x1668 +#define PRODUCT_ID_ACTIONTEC_802UAT1 0x7605 /* Actiontec 802UAT1, HWU01150-01UK */ + +#define VENDOR_ID_DLINK 0x2001 /* D-Link */ +#define PRODUCT_ID_DLINK_DWL120 0x3200 /* DWL-120 rev. E */ + +#define VENDOR_ID_DICK_SMITH_ELECTR 0x1371 /* Dick Smith Electronics */ +#define PRODUCT_ID_DSE_XH1153 0x5743 /* XH1153 802.11b USB adapter */ + +#define VENDOR_ID_BENQ 0x4a5 /* BenQ (Acer) */ +#define PRODUCT_ID_BENQ_AWL_400 0x9001 /* BenQ AWL-400 USB stick */ + +#define VENDOR_ID_3COM 0x506 +#define PRODUCT_ID_3COM_3CRSHEW696 0xa01 /* 3COM 3CRSHEW696 */ + + +static struct usb_device_id dev_table[] = { + { USB_DEVICE(VENDOR_ID_ATMEL, PRODUCT_ID_ATMEL_503R ) }, + { USB_DEVICE(VENDOR_ID_BELKIN, PRODUCT_ID_BELKIN_F5D6050 ) }, + { USB_DEVICE(VENDOR_ID_DYNALINK, PRODUCT_ID_DYNALINK_WLL013_R ) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, PRODUCT_ID_LINKSYS_WUSB11_V26) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, PRODUCT_ID_NE_NWU11B ) }, + { USB_DEVICE(VENDOR_ID_NETGEAR, PRODUCT_ID_NETGEAR_MA101B ) }, + { USB_DEVICE(VENDOR_ID_DLINK, PRODUCT_ID_DLINK_DWL120 ) }, + { USB_DEVICE(VENDOR_ID_ACTIONTEC,PRODUCT_ID_ACTIONTEC_802UAT1 ) }, + { USB_DEVICE(VENDOR_ID_ATMEL, PRODUCT_ID_W_BUDDIE_WN210 ) }, + { USB_DEVICE(VENDOR_ID_DICK_SMITH_ELECTR, PRODUCT_ID_DSE_XH1153) }, + { USB_DEVICE(VENDOR_ID_BENQ, PRODUCT_ID_BENQ_AWL_400) }, + { USB_DEVICE(VENDOR_ID_3COM, PRODUCT_ID_3COM_3CRSHEW696) }, + { } +}; + +/* firmware / config variables */ + +static unsigned char fw_internal[] = FW_503RFMD_INTERNAL; +static unsigned char fw_external[] = FW_503RFMD_EXTERNAL; + +static int board_type = BOARDTYPE_RFMD; + +/*---------------------------------------------------------------------------*/ + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* Module paramaters */ + +static char netdev_name[IFNAMSIZ+1] = "wlan%d"; +MODULE_PARM(netdev_name, "c" __MODULE_STRING(IFNAMSIZ)); +MODULE_PARM_DESC(netdev_name, + "network device name (default is wlan%d)"); + +/* local function prototypes */ + +static void *at76c50x_probe(struct usb_device *dev, unsigned int ifnum, + const struct usb_device_id *id); +static void at76c50x_disconnect(struct usb_device *dev, void *ptr); + +/* structure for registering this driver with the usb subsystem */ + +static struct usb_driver module_usb = { + owner: THIS_MODULE, + name: DRIVER_NAME, + probe: at76c50x_probe, + disconnect: at76c50x_disconnect, + id_table: dev_table, +}; + +/* structure for registering this firmware with the usbdfu subsystem */ + +static struct usbdfu_info module_usbdfu = { + name: DRIVER_NAME, + id_table: dev_table, + fw_buf: fw_internal, + fw_buf_len: sizeof(fw_internal), + post_download_hook: at76c503_usbdfu_post, + reset_delay: 2*HZ +}; + +/* Module and USB entry points */ + +static void *at76c50x_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + if (usbdfu_in_use(udev, ifnum)) { + /* the device is in DFU mode and usbdfu.c is handling it */ + return NULL; + } + + return at76c503_do_probe(THIS_MODULE, udev, fw_external, sizeof(fw_external), board_type, netdev_name); +} + +static void at76c50x_disconnect(struct usb_device *udev, void *ptr) +{ + info("%s disconnected", ((struct at76c503 *)ptr)->netdev->name); + at76c503_delete_device(ptr); +} + +static int __init mod_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + /* register with usbdfu so that the firmware will be automatically + * downloaded to the device on detection */ + result = usbdfu_register(&module_usbdfu); + if (result < 0) { + err("usbdfu_register failed (status %d)", result); + return -1; + } + + /* register this driver with the USB subsystem */ + result = usb_register(&module_usb); + if (result < 0) { + err("usb_register failed (status %d)", result); + usbdfu_unregister(&module_usbdfu); + return -1; + } + + return 0; +} + +static void __exit mod_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " unloading"); + /* deregister this driver with the USB subsystem */ + usbdfu_unregister(&module_usbdfu); + usb_deregister(&module_usb); +} + +module_init (mod_init); +module_exit (mod_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/at76c505-rfmd.c linux.22-ac2/drivers/usb/atmel/at76c505-rfmd.c --- linux.vanilla/drivers/usb/atmel/at76c505-rfmd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/at76c505-rfmd.c 2003-08-14 16:26:48.000000000 +0100 @@ -0,0 +1,163 @@ +/* -*- linux-c -*- */ +/* + * at76c505-rfmd.c: + * + * Driver for at76c505-based devices based on the Atmel "Fast-Vnet" reference + * design using RFMD radio chips + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + * This driver is derived from usb-skeleton.c + * + * This driver contains code specific to Atmel AT76C505 (USB wireless 802.11) + * devices which use radio chips from RF Micro Devices (RFMD). Almost + * all of the actual driver is handled by the generic at76c503.c module, this + * file mostly just deals with the initial probes and downloading the correct + * firmware to the device before handing it off to at76c503. + * + * History: + * + * 2003_02_15 0.1: (alex) + * - created AT76C505-specific driver file + * + * 2003_02_18 0.2: (alex) + * - Reduced duplicated code and moved as much as possible into at76c503.c + * - Changed default netdev name to "wlan%d" + */ + +#include +#include +#include + +#include "at76c503.h" +#include "../usbdfu.h" + +/* Include firmware data definition */ + +#include "fw-r505.h" + +/* Version Information */ + +#define DRIVER_NAME "at76c505-rfmd" +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "Atmel at76c505 (RFMD) Wireless LAN Driver" + +/* USB Device IDs supported by this driver */ + +#define VENDOR_ID_ATMEL 0x03eb +#define PRODUCT_ID_ATMEL_505R 0x7606 /* Generic AT76C505/RFMD device */ + +static struct usb_device_id dev_table[] = { + { USB_DEVICE(VENDOR_ID_ATMEL, PRODUCT_ID_ATMEL_505R ) }, + { } +}; + +/* firmware / config variables */ + +static unsigned char fw_internal[] = FW_505RFMD_INTERNAL; +static unsigned char fw_external[] = FW_505RFMD_EXTERNAL; + +static int board_type = BOARDTYPE_R505; + +/*---------------------------------------------------------------------------*/ + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* Module paramaters */ + +static char netdev_name[IFNAMSIZ+1] = "wlan%d"; +MODULE_PARM(netdev_name, "c" __MODULE_STRING(IFNAMSIZ)); +MODULE_PARM_DESC(netdev_name, + "network device name (default is wlan%d)"); + +/* local function prototypes */ + +static void *at76c50x_probe(struct usb_device *dev, unsigned int ifnum, + const struct usb_device_id *id); +static void at76c50x_disconnect(struct usb_device *dev, void *ptr); + +/* structure for registering this driver with the usb subsystem */ + +static struct usb_driver module_usb = { + owner: THIS_MODULE, + name: DRIVER_NAME, + probe: at76c50x_probe, + disconnect: at76c50x_disconnect, + id_table: dev_table, +}; + +/* structure for registering this firmware with the usbdfu subsystem */ + +static struct usbdfu_info module_usbdfu = { + name: DRIVER_NAME, + id_table: dev_table, + fw_buf: fw_internal, + fw_buf_len: sizeof(fw_internal), + post_download_hook: at76c503_usbdfu_post, + reset_delay: 2*HZ +}; + +/* Module and USB entry points */ + +static void *at76c50x_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + if (usbdfu_in_use(udev, ifnum)) { + /* the device is in DFU mode and usbdfu.c is handling it */ + return NULL; + } + + return at76c503_do_probe(THIS_MODULE, udev, fw_external, sizeof(fw_external), board_type, netdev_name); +} + +static void at76c50x_disconnect(struct usb_device *udev, void *ptr) +{ + info("%s disconnected", ((struct at76c503 *)ptr)->netdev->name); + at76c503_delete_device(ptr); +} + +static int __init mod_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + /* register with usbdfu so that the firmware will be automatically + * downloaded to the device on detection */ + result = usbdfu_register(&module_usbdfu); + if (result < 0) { + err("usbdfu_register failed (status %d)", result); + return -1; + } + + /* register this driver with the USB subsystem */ + result = usb_register(&module_usb); + if (result < 0) { + err("usb_register failed (status %d)", result); + usbdfu_unregister(&module_usbdfu); + return -1; + } + + return 0; +} + +static void __exit mod_exit(void) +{ + info(DRIVER_DESC " " DRIVER_VERSION " unloading"); + /* unregister this driver with the USB subsystem */ + usbdfu_unregister(&module_usbdfu); + usb_deregister(&module_usb); +} + +module_init (mod_init); +module_exit (mod_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/CHANGELOG linux.22-ac2/drivers/usb/atmel/CHANGELOG --- linux.vanilla/drivers/usb/atmel/CHANGELOG 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/CHANGELOG 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,45 @@ +$Id: CHANGELOG,v 1.3 2003/07/11 20:53:32 jal2 Exp $ + +- added switch to cope with Intersil firmware 0.84.0 which does + not send FCS in rx data +- new module at76c503-rfmd-acc.o for SMC2664W, which got RMFD radio, + but need a special firmware (Accton OEM products). +- added regulatory domains MKK1 and Israel +- added rudimentary ethtool support for SuSE's future hotplug +- added nickname ioctl for iwconfig +- added test of valid channel in reg. domain +- added big endian patch (by Kevin Cernekee), changed defines in + ieee802_11.h +- supress unsupported iwconfig modes (monitor, master) - patch by + Pavel Roskin + +version 0.10 (2003-06-01) +------------ +- added iwpriv commands to set scan channel times, + scan mode; added module parameters for all iwpriv + commands +- debug output is now controlled by a bit mask, + see DBG_* defines in at76c503.c +- added tx data padding (Stavros advised to do so) +- added rx data fragmentation support +- added power management support +- new iwpriv cmd "list_bss" to dump the current bss table + to syslog +- fixed error when the driver gets reloaded and + the device kept powered for firmware 0.100.x, 1.101.y +- fixed hanging of SMP kernels +- new params set with iwconfig get active immediately, + no need to bring the netdevice down/up anymore +- bss table is no longer limited in size (solves problem + at crowded places, e.g. CeBIT), old entries get removed +- added iwspy support +- Tim's "link level patch" +- added iwpriv commands for preamble type +- added shared secret authentication (incl. iwpriv command) +- support hidden SSID, fixed bug with ANY SSID +- Alex' "first phase of rx optimization" patch (email 2003-02-23) +- Alex' deprecation patch (email 2003-02-21) + +version 0.9 (2003-03-21) +----------- +- taken from Oliver's website diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/Config.in linux.22-ac2/drivers/usb/atmel/Config.in --- linux.vanilla/drivers/usb/atmel/Config.in 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/Config.in 2003-08-13 22:26:17.000000000 +0100 @@ -0,0 +1,16 @@ +# +# USB atmel wireless device configuration +# +mainmenu_option next_comment +comment 'USB ATMEL wireless support' + +dep_tristate 'Atmel USB wireless support' CONFIG_USB_ATMEL76C503 $CONFIG_USB $CONFIG_USB_DFU +if [ "$CONFIG_USB_ATMEL76C503" != "n" ]; then + dep_tristate 'Atmel 76c503 I3861 firmware support' CONFIG_USB_ATMEL76C503_I3861 $CONFIG_USB_ATMEL76C503 + dep_tristate 'Atmel 76c503 I3863 firmware support' CONFIG_USB_ATMEL76C503_I3863 $CONFIG_USB_ATMEL76C503 + dep_tristate 'Atmel 76c503 RFMD firmware support' CONFIG_USB_ATMEL76C503_RFMD $CONFIG_USB_ATMEL76C503 + dep_tristate 'Atmel 76c503 Accton RFMD firmware support' CONFIG_USB_ATMEL76C503_RFMD_ACCTON $CONFIG_USB_ATMEL76C503 + dep_tristate 'Atmel 76c505 RFMD firmware support' CONFIG_USB_ATMEL76C505_RFMD $CONFIG_USB_ATMEL76C503 +fi + +endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-i3861.h linux.22-ac2/drivers/usb/atmel/fw-i3861.h --- linux.vanilla/drivers/usb/atmel/fw-i3861.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-i3861.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,1926 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76C503 with Intersil 3861 radio * + * Version: 0.90.0 #44 * + ****************************************************************************/ + +/*************************************************************************************** + Copyright 2000-2001 ATMEL Corporation. + + This file is part of atmel wireless lan drivers. + Atmel wireless lan drivers is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + Atmel wireless lan drivers is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + You should have received a copy of the GNU General Public License + along with Atmel wireless lan drivers; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +**************************************************************************************/ +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_I3861_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0xCD,0x13, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFB,0x0E,0x00,0xEA,0x02, \ +0xF0,0xA0,0xFB,0x22,0x48,0x87,0x46,0x14,0x0F,0x00,0xEA,0x02,0xF0,0xF0,0xF9, \ +0x20,0x48,0x87,0x46,0xB5,0x01,0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x02,0x00, \ +0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x84,0x03,0x00,0x02,0xA4,0x03, \ +0x00,0x02,0xA8,0x03,0x00,0x02,0xAC,0x03,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0x20,0x53,0x00,0x01,0xFD,0x3D,0x00,0x00,0xED,0x3E,0x00, \ +0x00,0x00,0xB5,0x03,0xF0,0x7B,0xFD,0x00,0x20,0x00,0xBD,0xF0,0xB5,0x86,0xB0, \ +0x07,0x1C,0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03, \ +0x90,0x01,0x91,0x05,0x92,0x02,0x92,0x3B,0x4A,0x3D,0xA1,0x3B,0x48,0x01,0x23, \ +0x00,0x97,0x03,0xF0,0x51,0xFF,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01, \ +0x22,0x05,0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93, \ +0x03,0x90,0x02,0x92,0x01,0x91,0x37,0xA1,0x35,0x4A,0x35,0x48,0x02,0x23,0x03, \ +0xF0,0x3C,0xFF,0x37,0x48,0x38,0xA1,0x03,0xF0,0xAE,0xFF,0x3A,0x48,0x3B,0xA1, \ +0x03,0xF0,0xAA,0xFF,0x3E,0x48,0x3F,0xA1,0x03,0xF0,0xA6,0xFF,0x42,0x48,0x43, \ +0xA1,0x03,0xF0,0xA2,0xFF,0x46,0x48,0x04,0x26,0x06,0x70,0x46,0x48,0x00,0x27, \ +0x46,0x4D,0x07,0x60,0xE8,0x1D,0x75,0x30,0xEF,0x67,0x44,0x4C,0x47,0x60,0xE0, \ +0x1D,0x79,0x30,0xC7,0x61,0x43,0x48,0x07,0x70,0x03,0xF0,0xEF,0xFB,0x42,0x48, \ +0x07,0x70,0x47,0x70,0x41,0x48,0x07,0x60,0x41,0x48,0x07,0x60,0x02,0xF0,0x88, \ +0xFE,0xE8,0x6F,0x07,0x25,0x6D,0x06,0x01,0x28,0x02,0xD1,0x0C,0x20,0xA8,0x61, \ +0x00,0xE0,0xAE,0x61,0x6F,0x61,0x00,0xF0,0x7C,0xF8,0x00,0xF0,0xC0,0xF8,0x00, \ +0xF0,0x49,0xF9,0x38,0x48,0x00,0x7D,0x00,0xF0,0xC8,0xFA,0x60,0x74,0x00,0xF0, \ +0xD9,0xF8,0x36,0x48,0xE1,0x1D,0x00,0x79,0x69,0x31,0x48,0x70,0x34,0x48,0x80, \ +0x78,0x00,0x28,0x01,0xD0,0x02,0xF0,0xDE,0xF8,0x01,0x20,0x00,0xF0,0x83,0xF9, \ +0x02,0xF0,0xCB,0xF8,0x01,0xF0,0x29,0xFD,0xE8,0x69,0x01,0x23,0xDB,0x03,0x18, \ +0x43,0xE8,0x61,0x06,0xB0,0xF0,0xBD,0xE5,0x18,0x00,0x00,0xA8,0x05,0x00,0x02, \ +0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00,0x00,0x59,0x2B,0x00, \ +0x00,0x38,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20,0x74,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x00,0xC8,0x06,0x00,0x02,0x54,0x78,0x20,0x73,0x74,0x61,0x74,0x75,0x73, \ +0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xE8,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x00, \ +0x00,0x08,0x07,0x00,0x02,0x54,0x58,0x20,0x47,0x4F,0x20,0x73,0x74,0x61,0x74, \ +0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x28,0x07,0x00,0x02,0x4D, \ +0x4E,0x47,0x20,0x47,0x4F,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C, \ +0x61,0x67,0x73,0x00,0x17,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0xF8,0x00,0x00, \ +0x02,0x04,0x05,0x00,0x02,0x15,0x02,0x00,0x02,0xAC,0x08,0x00,0x02,0x38,0x02, \ +0x00,0x02,0x34,0x02,0x00,0x02,0x18,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x80,0xB4,0x1D,0x48,0x00,0x21,0xC1,0x72,0x00,0x20,0x19,0x27, \ +0x1B,0x4A,0xFF,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43, \ +0x27,0x18,0x4A,0x7F,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x16,0x48, \ +0x14,0x4A,0x16,0x4B,0x01,0x60,0x42,0x60,0x13,0x60,0x42,0x68,0xD7,0x1D,0x15, \ +0x37,0x57,0x60,0x42,0x68,0x08,0x3F,0x97,0x60,0x42,0x68,0x11,0x73,0x42,0x68, \ +0x91,0x73,0x47,0x68,0x03,0x22,0xBA,0x75,0x42,0x68,0x91,0x82,0x42,0x68,0xC1, \ +0x60,0x82,0x60,0x09,0x4A,0x02,0x61,0x13,0x60,0x02,0x69,0xD3,0x1D,0x11,0x33, \ +0x53,0x60,0x02,0x69,0x91,0x81,0x02,0x69,0x11,0x72,0x01,0x69,0x41,0x61,0x80, \ +0xBC,0xF7,0x46,0x58,0x01,0x00,0x02,0x00,0x11,0x00,0x02,0x00,0xDA,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x80,0x00,0xB5,0x00,0xF0,0xA9,0xF8,0x0C, \ +0x48,0x01,0x22,0x81,0x89,0x0B,0x48,0xC1,0x63,0x02,0x72,0x82,0x74,0x82,0x73, \ +0x00,0x21,0x01,0x63,0x01,0x60,0x41,0x72,0xC3,0x1D,0x79,0x33,0x01,0x73,0x19, \ +0x62,0xC2,0x72,0x42,0x60,0x05,0x48,0x03,0x22,0x42,0x70,0x01,0x70,0x41,0x80, \ +0x00,0xBD,0x00,0x00,0xD0,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x24,0x01,0x00, \ +0x02,0xB0,0xB5,0x1F,0x4C,0x00,0x20,0xE0,0x71,0x1E,0x48,0x00,0xF0,0x4F,0xF8, \ +0x60,0x7A,0x1D,0x4F,0x01,0x28,0x08,0xD1,0x48,0x20,0x78,0x81,0x18,0x20,0x38, \ +0x81,0x38,0x21,0x08,0x20,0x00,0xF0,0xB5,0xF8,0x07,0xE0,0x90,0x20,0x78,0x81, \ +0x30,0x20,0x38,0x81,0x80,0x21,0x08,0x20,0x00,0xF0,0xAC,0xF8,0x79,0x89,0x3A, \ +0x89,0x13,0x4D,0x38,0x1C,0xEF,0x1D,0x39,0x37,0x89,0x18,0x39,0x81,0xC1,0x88, \ +0x80,0x88,0x0A,0x18,0x12,0x18,0x89,0x18,0xFF,0x31,0x31,0x31,0xBA,0x80,0xF9, \ +0x80,0x21,0x88,0x48,0x43,0x78,0x81,0xE0,0x79,0x38,0x73,0x38,0x7B,0x78,0x73, \ +0x00,0xF0,0xE8,0xF9,0x01,0xF0,0x72,0xFF,0x39,0x7B,0x07,0x48,0x40,0x5C,0xE9, \ +0x1D,0x69,0x31,0x08,0x72,0xB0,0xBD,0x00,0x00,0x14,0x01,0x00,0x02,0x74,0x00, \ +0x00,0x02,0x18,0x00,0x00,0x02,0xF8,0x00,0x00,0x02,0x6C,0x01,0x00,0x02,0x03, \ +0x49,0x0F,0x20,0x00,0x06,0x81,0x80,0x02,0x49,0x81,0x81,0xF7,0x46,0x00,0x00, \ +0xE8,0xE8,0x00,0x00,0x13,0x13,0x00,0x00,0x02,0x79,0x41,0x79,0x12,0x02,0x11, \ +0x43,0xC2,0x78,0x12,0x04,0x11,0x43,0x82,0x78,0x12,0x06,0x0A,0x43,0x01,0x21, \ +0x89,0x06,0x8A,0x61,0x42,0x78,0x00,0x78,0x00,0x02,0x10,0x43,0xC8,0x61,0xF7, \ +0x46,0x00,0xB5,0x0C,0x49,0x0D,0x48,0x41,0x61,0x23,0x21,0x81,0x61,0x00,0x22, \ +0x01,0x05,0x0A,0x61,0xC2,0x01,0x42,0x60,0x07,0x22,0xC2,0x60,0x08,0x4A,0x82, \ +0x62,0xF2,0x22,0x82,0x60,0x32,0x22,0x4A,0x61,0xCA,0x68,0xC9,0x6B,0x00,0x68, \ +0x00,0x21,0x00,0x20,0x00,0xF0,0x0B,0xF8,0x00,0xBD,0x01,0x24,0x00,0x00,0x40, \ +0x00,0x00,0x04,0xAF,0xFF,0x3F,0x00,0x01,0x20,0x80,0x06,0x40,0x6A,0xF7,0x46, \ +0x90,0xB4,0x0F,0x4A,0x01,0x27,0xD2,0x69,0xBF,0x06,0x00,0x2A,0x15,0xD1,0x0F, \ +0x22,0x12,0x06,0x94,0x88,0x0B,0x4B,0x23,0x40,0x93,0x80,0x94,0x89,0x0A,0x4B, \ +0x23,0x40,0x93,0x81,0xB8,0x62,0x79,0x62,0x90,0x89,0x08,0x4B,0x18,0x43,0x90, \ +0x81,0x90,0x88,0x07,0x4B,0x18,0x43,0x90,0x80,0x90,0xBC,0xF7,0x46,0xB8,0x62, \ +0x79,0x62,0xFA,0xE7,0x84,0x05,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF, \ +0xFF,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x01,0x1C,0x06,0x48,0x04,0xD0, \ +0x41,0x68,0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46,0x41,0x68,0x01,0x23,0x5B, \ +0x03,0x99,0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00,0x04,0x80,0x00,0x89,0x02, \ +0x01,0x43,0x03,0x48,0x41,0x62,0x41,0x6A,0x49,0x08,0xFC,0xD3,0xF7,0x46,0x00, \ +0x00,0x40,0x00,0x00,0x04,0x80,0x00,0x02,0x23,0x04,0x49,0x18,0x43,0x48,0x62, \ +0x48,0x6A,0x42,0x08,0xFC,0xD3,0x80,0x03,0x00,0x0E,0xF7,0x46,0x00,0x00,0x40, \ +0x00,0x00,0x04,0xF0,0xB5,0x05,0x26,0x04,0x1C,0x15,0x1C,0x0F,0x1C,0x20,0x1C, \ +0x39,0x1C,0xFF,0xF7,0xDD,0xFF,0x20,0x1C,0xFF,0xF7,0xE6,0xFF,0x78,0x40,0x28, \ +0x40,0x01,0x1C,0x01,0xD0,0x01,0x3E,0xF2,0xD5,0x01,0x20,0x00,0x29,0x00,0xD0, \ +0x00,0x20,0xF0,0xBD,0x00,0x05,0x01,0x43,0x0D,0x48,0x01,0x62,0x01,0x6A,0xC9, \ +0x0D,0xFC,0xD3,0x00,0x2A,0x0A,0xD1,0xC1,0x69,0xFF,0x23,0x01,0x33,0x19,0x43, \ +0xC1,0x61,0xC1,0x69,0xFF,0x23,0x01,0x33,0x99,0x43,0xC1,0x61,0xF7,0x46,0xC1, \ +0x69,0x01,0x23,0x5B,0x02,0x19,0x43,0xC1,0x61,0xC1,0x69,0x99,0x43,0xC1,0x61, \ +0xF7,0x46,0x40,0x00,0x00,0x04,0x90,0xB5,0x09,0x4C,0x00,0x27,0x78,0x00,0xC0, \ +0x19,0x02,0x19,0x10,0x78,0x51,0x78,0x92,0x78,0xFF,0xF7,0xBF,0xFF,0x00,0x28, \ +0x00,0xD1,0x90,0xBD,0x01,0x37,0x72,0x2F,0xF1,0xD3,0x01,0x20,0x90,0xBD,0x00, \ +0x00,0x38,0x50,0x00,0x00,0x90,0xB5,0x25,0x4C,0x07,0x1C,0x00,0x22,0x01,0x20, \ +0x61,0x68,0xFF,0xF7,0xC3,0xFF,0x00,0x22,0x00,0x20,0x21,0x68,0xFF,0xF7,0xBE, \ +0xFF,0x00,0x22,0x02,0x20,0xA1,0x68,0xFF,0xF7,0xB9,0xFF,0x00,0x22,0x03,0x20, \ +0xE1,0x68,0xFF,0xF7,0xB4,0xFF,0x1B,0x4C,0x01,0x22,0x01,0x20,0x61,0x68,0xFF, \ +0xF7,0xAE,0xFF,0x01,0x22,0x00,0x20,0x21,0x68,0xFF,0xF7,0xA9,0xFF,0x78,0x1E, \ +0x0D,0x28,0x06,0xD8,0x78,0x00,0x14,0x49,0x01,0x22,0x09,0x5A,0x02,0x20,0xFF, \ +0xF7,0x9F,0xFF,0x5A,0x20,0x01,0x38,0xFD,0xD1,0x11,0x48,0x23,0x21,0x81,0x61, \ +0x0D,0x21,0x01,0x61,0xC1,0x69,0x20,0x23,0x99,0x43,0xC1,0x61,0x1B,0x21,0x01, \ +0x39,0xFD,0xD1,0xC1,0x69,0x20,0x23,0x19,0x43,0xC1,0x61,0x5A,0x20,0x01,0x38, \ +0xFD,0xD1,0x08,0x48,0xFF,0x22,0xC0,0x19,0x10,0x38,0xC1,0x7B,0x3E,0x20,0xFF, \ +0xF7,0x6A,0xFF,0x90,0xBD,0x00,0x00,0x90,0x51,0x00,0x00,0xA0,0x51,0x00,0x00, \ +0xA8,0x51,0x00,0x00,0x40,0x00,0x00,0x04,0x4C,0x01,0x00,0x02,0x80,0xB5,0x0F, \ +0x27,0x3F,0x06,0xB9,0x88,0x27,0x4B,0x19,0x40,0xB9,0x80,0xB9,0x89,0x26,0x4B, \ +0x19,0x40,0xB9,0x81,0x26,0x49,0xCA,0x69,0x0B,0x0C,0x1A,0x43,0xCA,0x61,0xCA, \ +0x69,0x92,0x08,0x92,0x00,0xCA,0x61,0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69, \ +0x40,0x23,0x9A,0x43,0xCA,0x61,0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69,0x40, \ +0x23,0x1A,0x43,0xCA,0x61,0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69,0x01,0x23, \ +0x9B,0x02,0x9A,0x43,0xCA,0x61,0x3A,0x88,0x09,0x68,0x3A,0x89,0x17,0x49,0x15, \ +0x4A,0xD2,0x69,0x1A,0x04,0xD3,0x68,0xD2,0x6B,0x08,0x75,0x08,0x7D,0xFF,0xF7, \ +0x73,0xFF,0xFF,0xF7,0x21,0xFE,0xB8,0x89,0x11,0x4B,0x18,0x43,0xB8,0x81,0xB8, \ +0x88,0x10,0x4B,0x18,0x43,0xB8,0x80,0x10,0x48,0x41,0x6B,0x01,0x29,0x05,0xD1, \ +0x00,0x22,0x10,0x21,0x0E,0x48,0x03,0xF0,0xF2,0xFC,0x80,0xBD,0x40,0x6B,0x02, \ +0x28,0xFB,0xD1,0x00,0x22,0x10,0x21,0x0A,0x48,0x03,0xF0,0xE9,0xFC,0x80,0xBD, \ +0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x40,0x00,0x00,0x04,0x80,0x00,0x00, \ +0x04,0x18,0x00,0x00,0x02,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x04,0x05, \ +0x00,0x02,0xC8,0x06,0x00,0x02,0xE8,0x06,0x00,0x02,0xB0,0xB5,0x01,0x21,0x89, \ +0x02,0x04,0x1C,0x14,0x48,0x44,0x23,0xC1,0x61,0xC1,0x69,0x19,0x43,0xC1,0x61, \ +0x12,0x49,0x01,0x39,0xFD,0xD1,0xC1,0x69,0x40,0x23,0x99,0x43,0xC1,0x61,0xE1, \ +0x21,0x89,0x00,0x01,0x39,0xFD,0xD1,0x05,0x1C,0xC0,0x69,0x40,0x23,0x18,0x43, \ +0xE8,0x61,0x0A,0x48,0x01,0x38,0xFD,0xD1,0xFF,0xF7,0x0D,0xFF,0x07,0x1C,0x20, \ +0x1C,0xFF,0xF7,0x1F,0xFF,0xE8,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61, \ +0x03,0x48,0x01,0x38,0xFD,0xD1,0x38,0x1C,0xB0,0xBD,0x00,0x00,0x40,0x00,0x00, \ +0x04,0x28,0x23,0x00,0x00,0x80,0xB5,0x12,0x48,0x13,0x4F,0x41,0x7A,0x11,0x48, \ +0x00,0x29,0x0D,0xD0,0x01,0x78,0x00,0x29,0x0A,0xDD,0x00,0x78,0x08,0x21,0x01, \ +0x43,0x0A,0x20,0xFF,0xF7,0x9A,0xFE,0x48,0x20,0x78,0x81,0x18,0x20,0x38,0x81, \ +0x07,0xE0,0x01,0x78,0x0A,0x20,0xFF,0xF7,0x91,0xFE,0x90,0x20,0x78,0x81,0x30, \ +0x20,0x38,0x81,0x78,0x89,0x39,0x89,0x40,0x18,0x05,0x49,0x08,0x80,0x01,0xF0, \ +0x68,0xFD,0x80,0xBD,0x00,0x00,0x14,0x01,0x00,0x02,0x45,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x40,0x01,0x00,0x02,0x00,0xB5,0x00,0x28,0x04,0xD0,0x84,0x21, \ +0x0C,0x20,0xFF,0xF7,0x75,0xFE,0x00,0xBD,0x04,0x21,0x0C,0x20,0xFF,0xF7,0x70, \ +0xFE,0x00,0xBD,0x00,0x00,0xC1,0x0A,0x01,0xD3,0x00,0x20,0xF7,0x46,0xFF,0x22, \ +0x01,0x32,0x02,0x40,0x01,0x21,0x00,0x2A,0x01,0xD0,0x08,0x1C,0xF7,0x46,0x80, \ +0x0A,0x01,0xD3,0x08,0x1C,0xF7,0x46,0x02,0x20,0xF7,0x46,0x80,0xB4,0x00,0x2A, \ +0x0A,0xD9,0x07,0x78,0x0B,0x78,0x01,0x31,0x01,0x30,0x9F,0x42,0x02,0xD0,0x01, \ +0x20,0x80,0xBC,0xF7,0x46,0x01,0x3A,0xF4,0xD1,0x00,0x20,0xF9,0xE7,0x00,0x2A, \ +0x05,0xD9,0x03,0x78,0x01,0x30,0x0B,0x70,0x01,0x31,0x01,0x3A,0xF9,0xD1,0xF7, \ +0x46,0x80,0xB4,0x00,0x22,0x00,0x29,0x03,0x78,0x0C,0xD9,0x07,0x78,0x01,0x30, \ +0xFF,0x2F,0x01,0xD0,0x01,0x22,0x03,0xE0,0x00,0x29,0xF7,0xD8,0x01,0x2A,0x02, \ +0xD1,0x58,0x08,0x00,0xD3,0x00,0x22,0x80,0xBC,0x10,0x1C,0xF7,0x46,0xF8,0xB5, \ +0x0C,0x1C,0x1C,0x49,0x07,0x1C,0x1C,0x4E,0x1D,0x48,0x31,0x60,0xC0,0x6C,0x00, \ +0x25,0xA8,0x42,0x19,0xD9,0x06,0x22,0x38,0x1C,0x31,0x68,0xFF,0xF7,0xC2,0xFF, \ +0x00,0x90,0x00,0x98,0x00,0x28,0x08,0xD1,0x30,0x68,0xC1,0x88,0xA1,0x42,0x01, \ +0xD1,0x01,0x20,0xF8,0xBD,0xC4,0x80,0x00,0x20,0xFB,0xE7,0x30,0x68,0x01,0x35, \ +0x08,0x30,0x30,0x60,0x0F,0x48,0xC0,0x6C,0xA8,0x42,0xE5,0xD8,0x0D,0x48,0xC1, \ +0x6C,0x01,0x31,0xC1,0x64,0xC1,0x6C,0x07,0x29,0x03,0xD9,0x07,0x49,0x31,0x60, \ +0x08,0x21,0xC1,0x64,0x39,0x88,0x30,0x68,0x01,0x80,0x79,0x88,0x41,0x80,0xB9, \ +0x88,0x81,0x80,0x30,0x68,0xC4,0x80,0x00,0x20,0xDD,0xE7,0x00,0x00,0x10,0x08, \ +0x00,0x02,0x28,0x01,0x00,0x02,0x04,0x05,0x00,0x02,0x02,0x78,0x11,0x43,0x01, \ +0x70,0xF7,0x46,0x02,0x78,0xC9,0x43,0x11,0x40,0x01,0x70,0xF7,0x46,0x00,0x78, \ +0x08,0x40,0x01,0xD0,0x01,0x20,0xF7,0x46,0x00,0x20,0xF7,0x46,0x05,0x49,0x8A, \ +0x6C,0x12,0x01,0x02,0x70,0x8A,0x6C,0x12,0x01,0x12,0x0A,0x42,0x70,0x88,0x6C, \ +0x01,0x30,0x88,0x64,0xF7,0x46,0x04,0x05,0x00,0x02,0xB0,0xB4,0x00,0x2A,0x16, \ +0xD1,0x0D,0x4A,0x0F,0x06,0x92,0x7A,0x3F,0x0E,0xBA,0x42,0x00,0xDC,0x11,0x1C, \ +0x4F,0x00,0x0B,0x49,0x09,0x4A,0xCD,0x88,0xD4,0x5B,0x64,0x19,0xE4,0x18,0x04, \ +0x70,0xD2,0x5B,0xC9,0x88,0x51,0x18,0xC9,0x18,0x09,0x0A,0x41,0x70,0xB0,0xBC, \ +0xF7,0x46,0x00,0x21,0x01,0x70,0x41,0x70,0xF9,0xE7,0x14,0x01,0x00,0x02,0x2C, \ +0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x06,0x49,0x09,0x78,0x01,0x29,0x07,0xD1, \ +0x05,0x49,0xC9,0x7A,0x01,0x29,0x03,0xD1,0x01,0x78,0x40,0x23,0x19,0x43,0x01, \ +0x70,0xF7,0x46,0x00,0x00,0x30,0x00,0x00,0x02,0x98,0x00,0x00,0x02,0xF0,0xB5, \ +0x29,0x4C,0x07,0x1C,0x00,0x26,0x27,0x70,0xE1,0x1D,0x03,0x31,0x66,0x70,0x66, \ +0x80,0x06,0x22,0x25,0x48,0xFF,0xF7,0x3F,0xFF,0x25,0x4D,0xE1,0x1D,0x09,0x31, \ +0x06,0x22,0xE8,0x1D,0x35,0x30,0xFF,0xF7,0x37,0xFF,0xFF,0x20,0x20,0x71,0x60, \ +0x71,0xA0,0x71,0xE0,0x71,0x20,0x72,0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F, \ +0x1D,0xD0,0x00,0xF0,0x3E,0xF8,0x00,0xF0,0x46,0xF8,0xE5,0x1D,0x1D,0x35,0x28, \ +0x1C,0x00,0xF0,0x61,0xF8,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x79,0xF8,0x2D,0x18, \ +0x16,0x48,0x80,0x7D,0x02,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x87,0xF8,0x2D, \ +0x18,0x28,0x1C,0x00,0xF0,0xC3,0xF8,0x28,0x18,0x00,0x1B,0xB8,0x66,0xB8,0x65, \ +0xF0,0xBD,0x26,0x76,0x0F,0x4E,0xE8,0x1D,0x72,0x79,0x15,0x30,0xE1,0x1D,0x13, \ +0x31,0x62,0x76,0xFF,0xF7,0x04,0xFF,0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0, \ +0x59,0xF8,0x70,0x79,0x20,0x30,0x00,0x06,0x00,0x0E,0xB8,0x65,0xF0,0xBD,0x00, \ +0x00,0x48,0x07,0x00,0x02,0x74,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x04,0x05, \ +0x00,0x02,0x18,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x03,0x49,0x02,0x48,0x09, \ +0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0x68,0x07,0x00,0x02,0x98,0x00,0x00,0x02, \ +0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02,0x80,0xC9,0x7A,0x00,0x29,0x03, \ +0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80,0x08,0x49,0x49,0x7A,0x01,0x29, \ +0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x01,0x88,0x02, \ +0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x6A,0x07,0x00,0x02,0x98,0x00,0x00,0x02, \ +0x14,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00,0x20,0x0A,0x4A,0x08,0x70,0x53, \ +0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18,0x3F,0x7D,0x0C,0x18,0x01,0x30, \ +0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50,0x79,0x48,0x70,0x50,0x79,0x90, \ +0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x14,0x01,0x00,0x02,0x98,0x00,0x00,0x02, \ +0x80,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x07,0x4A,0x00,0x20,0x13,0x18,0x1B, \ +0x7C,0x00,0x2B,0x04,0xD0,0x0F,0x18,0x01,0x30,0x04,0x28,0xBB,0x70,0xF6,0xD3, \ +0x48,0x70,0x80,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x18,0x00,0x00,0x02,0x03, \ +0x21,0x01,0x70,0x01,0x22,0x42,0x70,0x01,0x30,0x80,0x18,0x02,0x4A,0x12,0x7D, \ +0x02,0x70,0x08,0x1C,0xF7,0x46,0x00,0x00,0x18,0x00,0x00,0x02,0xB0,0xB4,0x04, \ +0x22,0x02,0x70,0x06,0x23,0x43,0x70,0x81,0x70,0x82,0x1C,0x13,0x4C,0x51,0x1C, \ +0x62,0x7B,0x12,0x4F,0x0A,0x70,0x62,0x88,0x4A,0x70,0x62,0x88,0x12,0x0A,0xFB, \ +0x1D,0x69,0x33,0x8A,0x70,0x1D,0x7A,0x00,0x22,0x03,0x31,0x07,0x30,0x00,0x2D, \ +0x0E,0xD0,0x60,0x37,0xFD,0x8A,0x0D,0x70,0xF9,0x8A,0x09,0x12,0x01,0x70,0xF8, \ +0x8A,0x21,0x88,0x40,0x1A,0xF8,0x82,0xF8,0x8A,0x00,0x28,0x03,0xD1,0x1A,0x72, \ +0x01,0xE0,0x0A,0x70,0x02,0x70,0xB0,0xBC,0x08,0x20,0xF7,0x46,0x00,0x00,0x98, \ +0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70, \ +0x04,0x49,0x02,0x30,0x0A,0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04, \ +0x20,0xF7,0x46,0x00,0x00,0x98,0x00,0x00,0x02,0x05,0x22,0x02,0x70,0x04,0x22, \ +0x42,0x70,0x81,0x70,0x04,0x49,0x03,0x30,0x09,0x7B,0x01,0x70,0x00,0x21,0x41, \ +0x70,0x81,0x70,0x06,0x20,0xF7,0x46,0x98,0x00,0x00,0x02,0xF8,0xB5,0x36,0x48, \ +0x00,0x68,0xFF,0xF7,0x09,0xFE,0x07,0x1C,0x34,0x48,0x00,0x68,0x44,0x68,0x20, \ +0x78,0x06,0x07,0x36,0x0F,0x02,0x2F,0x00,0xD1,0xF8,0xBD,0x31,0x4D,0x28,0x79, \ +0x02,0x28,0x0A,0xD1,0xE0,0x1D,0x09,0x30,0x06,0x22,0x2E,0x49,0xFF,0xF7,0x07, \ +0xFE,0x00,0x90,0x00,0x98,0x00,0x28,0x00,0xD1,0xEF,0xE7,0x30,0x06,0x00,0x0E, \ +0x08,0x28,0x44,0xD1,0x29,0x48,0xC0,0x7A,0x05,0x28,0x00,0xD0,0xE6,0xE7,0x28, \ +0x4E,0x00,0x2F,0x14,0xD0,0x28,0x79,0x02,0x28,0x11,0xD1,0xE0,0x1D,0x03,0x30, \ +0x06,0x22,0x31,0x1C,0xFF,0xF7,0xED,0xFD,0x00,0x90,0x00,0x98,0x01,0x28,0x00, \ +0xD1,0xD5,0xE7,0x60,0x78,0x81,0x08,0x00,0xD2,0xD1,0xE7,0x40,0x08,0x00,0xD3, \ +0xCE,0xE7,0x28,0x79,0x01,0x28,0x10,0xD1,0xE0,0x1D,0x09,0x30,0x06,0x22,0x31, \ +0x1C,0xFF,0xF7,0xD8,0xFD,0x00,0x90,0x00,0x98,0x01,0x28,0x00,0xD1,0xC0,0xE7, \ +0x60,0x78,0x81,0x08,0x01,0xD2,0x40,0x08,0x00,0xD3,0xBA,0xE7,0x13,0x48,0x01, \ +0x78,0x00,0x29,0x06,0xD0,0xC0,0x78,0x00,0x28,0x07,0xD0,0x60,0x78,0xC0,0x09, \ +0x04,0xD2,0xAF,0xE7,0x60,0x78,0xC0,0x09,0x00,0xD3,0xAB,0xE7,0x21,0x78,0x38, \ +0x1C,0x00,0xF0,0x72,0xFD,0x04,0xE0,0x00,0x28,0x02,0xD1,0x38,0x1C,0x00,0xF0, \ +0xEA,0xFB,0xA0,0xE7,0x00,0x00,0xA0,0x01,0x00,0x02,0x10,0x00,0x00,0x02,0xC8, \ +0x00,0x00,0x02,0x74,0x00,0x00,0x02,0x74,0x05,0x00,0x02,0xA6,0x00,0x00,0x02, \ +0x30,0x00,0x00,0x02,0x08,0xB5,0x00,0x21,0x00,0x91,0x00,0x28,0x0C,0xD1,0x0B, \ +0x48,0x00,0x68,0x40,0x68,0x81,0x7D,0xC2,0x7D,0x12,0x02,0x11,0x43,0x09,0x04, \ +0x09,0x0C,0x0A,0x30,0xFF,0xF7,0xC1,0xFD,0x00,0x90,0x00,0x98,0x01,0x28,0x03, \ +0xD1,0x04,0x48,0x80,0x79,0x00,0x28,0x01,0xD0,0x00,0xF0,0x05,0xF8,0x08,0xBD, \ +0x10,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78,0x80, \ +0x09,0x04,0xD3,0x04,0x4F,0x38,0x68,0x02,0xF0,0x4D,0xF8,0x38,0x60,0x80,0xBD, \ +0x00,0x00,0x63,0x01,0x00,0x02,0x10,0x00,0x00,0x02,0xF1,0xB5,0x88,0xB0,0x7D, \ +0x24,0xE4,0x00,0x01,0x21,0x89,0x06,0x88,0x68,0xFC,0x49,0x00,0x0B,0xFC,0x27, \ +0x07,0x40,0x04,0x20,0x38,0x40,0xF8,0x4D,0x07,0x91,0x73,0xD0,0xB8,0x09,0x06, \ +0xD3,0xF7,0x48,0xF8,0x4B,0x81,0x6A,0x19,0x40,0x81,0x62,0x14,0x21,0x05,0xE0, \ +0xF4,0x48,0xF5,0x4B,0x81,0x6A,0x19,0x40,0x81,0x62,0x0E,0x21,0x68,0x46,0x02, \ +0x22,0x01,0xF0,0x55,0xFE,0x01,0x28,0x02,0xD0,0x00,0x20,0x09,0xB0,0xF0,0xBD, \ +0x68,0x20,0xFF,0xF7,0xB1,0xFB,0x06,0x90,0x06,0x98,0x10,0x23,0x18,0x40,0x00, \ +0x09,0x00,0x06,0x06,0x99,0x00,0x0E,0x49,0x09,0x40,0x18,0x06,0x90,0x08,0x98, \ +0x99,0x05,0xCA,0x68,0x10,0x43,0x03,0xE0,0x01,0x21,0x89,0x06,0xC9,0x68,0x08, \ +0x43,0x41,0x09,0x02,0xD2,0x01,0x3C,0x00,0x2C,0xF6,0xDC,0x10,0x23,0x98,0x43, \ +0x06,0x1C,0x00,0x2C,0x21,0xD1,0xDC,0x4C,0x1B,0x02,0xE0,0x69,0x18,0x43,0xE0, \ +0x61,0xE0,0x69,0xDC,0x4B,0x18,0x40,0xE0,0x61,0x06,0x21,0x02,0x20,0xFF,0xF7, \ +0x7A,0xFB,0x04,0x21,0x02,0x20,0xFF,0xF7,0x76,0xFB,0x05,0x21,0x02,0x20,0xFF, \ +0xF7,0x72,0xFB,0x04,0x21,0x02,0x20,0xFF,0xF7,0x6E,0xFB,0x01,0x21,0x89,0x06, \ +0xC8,0x68,0xD2,0x4B,0xA0,0x6A,0x18,0x43,0xA0,0x62,0x00,0x20,0xBA,0xE7,0xCB, \ +0x4C,0xE0,0x69,0xA3,0x01,0x18,0x43,0xE0,0x61,0xE0,0x69,0xCB,0x4B,0x18,0x40, \ +0xE0,0x61,0x05,0x21,0x02,0x20,0xFF,0xF7,0x58,0xFB,0x04,0x21,0x02,0x20,0xFF, \ +0xF7,0x54,0xFB,0x01,0x21,0x89,0x06,0xC8,0x68,0xA1,0x6A,0xC4,0x4B,0x30,0x43, \ +0x19,0x43,0x00,0xE0,0x32,0xE0,0xA1,0x62,0x00,0x24,0xC2,0x4E,0xC0,0x0A,0x34, \ +0x60,0x17,0xD3,0xC1,0x48,0x40,0x7A,0x01,0x28,0x07,0xD1,0x06,0x98,0x00,0x28, \ +0x04,0xD9,0x38,0x21,0x08,0x20,0xFF,0xF7,0x3A,0xFB,0x03,0xE0,0x80,0x21,0x08, \ +0x20,0xFF,0xF7,0x35,0xFB,0xB4,0x2F,0x07,0xD0,0xC4,0x2F,0x15,0xD0,0xD4,0x2F, \ +0x01,0xD1,0x00,0xF0,0xD1,0xF9,0x20,0x1C,0x80,0xE7,0x02,0x20,0x70,0x72,0x06, \ +0x98,0x07,0x99,0xB3,0x4F,0x88,0x70,0x68,0x46,0x0A,0x30,0x06,0x22,0x29,0x1C, \ +0xFF,0xF7,0xD3,0xFC,0x00,0xA8,0x40,0x88,0x78,0x80,0xED,0xE7,0x00,0xF0,0x9D, \ +0xFA,0xEA,0xE7,0xAC,0x4E,0x30,0x68,0x00,0x7A,0x00,0x28,0x09,0xD0,0xA2,0x48, \ +0xA2,0x4B,0x81,0x6A,0xA5,0x4E,0x19,0x40,0x81,0x62,0xC4,0x20,0x30,0x60,0x00, \ +0x20,0x5E,0xE7,0x01,0x20,0xFF,0xF7,0xF6,0xFA,0x68,0x20,0xFF,0xF7,0x0F,0xFB, \ +0x06,0x90,0x06,0x98,0x10,0x23,0x18,0x40,0x00,0x09,0x00,0x06,0x06,0x99,0x00, \ +0x0E,0x49,0x09,0x40,0x18,0x06,0x90,0x94,0x48,0xC7,0x6A,0x06,0x98,0x00,0x28, \ +0x1F,0xD0,0x01,0x28,0x1F,0xD0,0x02,0x28,0x1F,0xD0,0x03,0x28,0x08,0xD1,0x0B, \ +0x20,0x78,0x43,0xC7,0x08,0x6A,0x20,0xFF,0xF7,0xF3,0xFA,0x00,0x0A,0x00,0xD3, \ +0x01,0x3F,0x93,0x48,0x80,0x89,0x04,0x30,0xB8,0x42,0x01,0xD3,0x18,0x2F,0x11, \ +0xD8,0x8C,0x4E,0xC3,0x20,0x30,0x60,0x85,0x48,0x8F,0x4B,0x81,0x6A,0x19,0x40, \ +0x81,0x62,0x00,0x20,0x28,0xE7,0xFF,0x08,0xEC,0xE7,0xBF,0x08,0xEA,0xE7,0x0B, \ +0x20,0x78,0x43,0x07,0x09,0xE6,0xE7,0x7E,0x49,0xF8,0x02,0xFF,0x23,0x8A,0x6A, \ +0x18,0x43,0x10,0x40,0x88,0x62,0x82,0x48,0x02,0x22,0x00,0x68,0x18,0x21,0x40, \ +0x68,0x01,0xF0,0x65,0xFD,0x01,0x28,0x01,0xD0,0x00,0x20,0x0E,0xE7,0x01,0x21, \ +0x89,0x06,0x08,0x98,0xCA,0x68,0x10,0x43,0x03,0xE0,0x01,0x21,0x89,0x06,0xC9, \ +0x68,0x08,0x43,0x41,0x09,0x02,0xD2,0x01,0x3C,0x00,0x2C,0xF6,0xDC,0x10,0x23, \ +0x98,0x43,0x06,0x1C,0x72,0x48,0x40,0x7A,0x01,0x28,0x07,0xD1,0x06,0x98,0x00, \ +0x28,0x04,0xD9,0x38,0x21,0x08,0x20,0xFF,0xF7,0x9C,0xFA,0x03,0xE0,0x80,0x21, \ +0x08,0x20,0xFF,0xF7,0x97,0xFA,0x00,0x2C,0x21,0xD1,0x63,0x4F,0xF8,0x69,0xBB, \ +0x01,0x18,0x43,0xF8,0x61,0xF8,0x69,0x63,0x4B,0x18,0x40,0xF8,0x61,0x06,0x21, \ +0x02,0x20,0xFF,0xF7,0x88,0xFA,0x04,0x21,0x02,0x20,0xFF,0xF7,0x84,0xFA,0x05, \ +0x21,0x02,0x20,0xFF,0xF7,0x80,0xFA,0x04,0x21,0x02,0x20,0xFF,0xF7,0x7C,0xFA, \ +0x01,0x21,0x89,0x06,0xC8,0x68,0x59,0x4B,0xB8,0x6A,0x18,0x43,0xB8,0x62,0x00, \ +0x20,0xC8,0xE6,0x5D,0x48,0x06,0x60,0x01,0x20,0x80,0x02,0x30,0x40,0x01,0x24, \ +0x00,0x28,0x01,0xD0,0x53,0x48,0x44,0x72,0x06,0x98,0x07,0x99,0x06,0x22,0x88, \ +0x70,0x29,0x1C,0x52,0x4D,0x28,0x68,0x40,0x68,0x0A,0x30,0xFF,0xF7,0x12,0xFC, \ +0x28,0x68,0x87,0x81,0x06,0x98,0x29,0x68,0x00,0x27,0x88,0x73,0x49,0x48,0x07, \ +0x60,0x29,0x68,0x48,0x68,0x42,0x78,0xD2,0x09,0x11,0xD2,0x89,0x89,0x02,0x22, \ +0x18,0x30,0x18,0x39,0x01,0xF0,0xF3,0xFC,0x00,0x28,0x03,0xD1,0x42,0x49,0x8C, \ +0x73,0x0F,0x73,0xD1,0xE0,0x40,0x49,0x03,0x20,0x08,0x73,0x02,0x20,0x88,0x73, \ +0xCB,0xE0,0x02,0x22,0x04,0x21,0x18,0x30,0x01,0xF0,0xE2,0xFC,0x7D,0x20,0xC0, \ +0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x31,0x43,0x03,0xE0,0x01,0x22,0x92,0x06, \ +0xD2,0x68,0x11,0x43,0x4A,0x09,0x02,0xD2,0x01,0x38,0x00,0x28,0xF6,0xDC,0x10, \ +0x23,0x99,0x43,0x0E,0x1C,0x00,0x28,0x21,0xD1,0x2B,0x4C,0x1B,0x02,0xE0,0x69, \ +0x18,0x43,0xE0,0x61,0xE0,0x69,0x2B,0x4B,0x18,0x40,0xE0,0x61,0x06,0x21,0x02, \ +0x20,0xFF,0xF7,0x18,0xFA,0x04,0x21,0x02,0x20,0xFF,0xF7,0x14,0xFA,0x05,0x21, \ +0x02,0x20,0xFF,0xF7,0x10,0xFA,0x04,0x21,0x02,0x20,0xFF,0xF7,0x0C,0xFA,0x01, \ +0x22,0x92,0x06,0xD0,0x68,0x21,0x4B,0xA0,0x6A,0x18,0x43,0xA0,0x62,0x38,0x1C, \ +0x58,0xE6,0x28,0x68,0x40,0x68,0x41,0x7E,0x02,0x7E,0x09,0x02,0x11,0x43,0x82, \ +0x7E,0xC0,0x7E,0x12,0x04,0x11,0x43,0x80,0x09,0x02,0x06,0x20,0x48,0x12,0x0E, \ +0xC3,0x1D,0x39,0x33,0x1B,0x78,0x01,0x2B,0x15,0xD1,0x0D,0x23,0x5A,0x43,0x10, \ +0x18,0x02,0x7B,0x12,0x06,0x11,0x43,0x03,0x22,0x52,0x06,0x11,0x60,0x83,0x7B, \ +0x41,0x7B,0x1B,0x02,0x19,0x43,0xC3,0x7B,0x00,0x7C,0x1B,0x04,0x19,0x43,0x00, \ +0x06,0x08,0x43,0x50,0x60,0x94,0x60,0x4C,0xE0,0x02,0x2B,0x4A,0xD1,0x0D,0x23, \ +0x5A,0x43,0x10,0x18,0x02,0x7B,0x12,0x06,0x1D,0xE0,0xE4,0x07,0x00,0x02,0x74, \ +0x05,0x00,0x02,0x40,0x00,0x00,0x04,0xFF,0xA0,0x00,0x00,0xFF,0x70,0x00,0x00, \ +0xFF,0xEF,0x00,0x00,0x00,0xFF,0x3F,0x00,0x04,0x05,0x00,0x02,0x14,0x01,0x00, \ +0x02,0xE0,0x07,0x00,0x02,0x10,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0xFF,0xC0, \ +0x00,0x00,0x9C,0x01,0x00,0x02,0x30,0x00,0x00,0x02,0x11,0x43,0x03,0x22,0x52, \ +0x06,0x11,0x60,0x83,0x7B,0x41,0x7B,0x1B,0x02,0x19,0x43,0xC3,0x7B,0x1B,0x04, \ +0x19,0x43,0x03,0x7C,0x1B,0x06,0x19,0x43,0x51,0x60,0x83,0x7C,0x41,0x7C,0x1B, \ +0x02,0x19,0x43,0xC3,0x7C,0x1B,0x04,0x19,0x43,0x03,0x7D,0x1B,0x06,0x19,0x43, \ +0x51,0x61,0x83,0x7D,0x41,0x7D,0x1B,0x02,0x19,0x43,0xC3,0x7D,0x00,0x7E,0x1B, \ +0x04,0x19,0x43,0x00,0x06,0x08,0x43,0x90,0x61,0x81,0x20,0x90,0x60,0x28,0x68, \ +0x0E,0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x20,0x39,0x01,0xF0,0x2D,0xFC,0x00, \ +0x28,0x06,0xD1,0x12,0x49,0x03,0x22,0x8C,0x73,0x0F,0x73,0x52,0x06,0x97,0x60, \ +0x08,0xE0,0x28,0x68,0x81,0x89,0x08,0x39,0x81,0x81,0x0C,0x49,0x03,0x20,0x08, \ +0x73,0x02,0x20,0x88,0x73,0x7C,0x20,0xFF,0xF7,0x7C,0xF9,0x29,0x68,0x48,0x74, \ +0x66,0x20,0xFF,0xF7,0x77,0xF9,0x29,0x68,0x88,0x74,0xB0,0x09,0x05,0xD3,0xF0, \ +0x08,0x01,0xD3,0x24,0x20,0xB8,0xE5,0x20,0x20,0xB6,0xE5,0x38,0x1C,0xB4,0xE5, \ +0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5,0x5F,0x4D,0x68,0x68,0x02,0x28,0x69, \ +0xD1,0x01,0x20,0x01,0xF0,0xE0,0xF8,0x01,0x26,0xEC,0x1D,0x79,0x34,0x6E,0x60, \ +0xA7,0x68,0x08,0x23,0x78,0x78,0x59,0x4A,0x98,0x43,0x78,0x70,0x11,0x78,0x58, \ +0x48,0x01,0x29,0x66,0xD1,0x57,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0,0x18,0x21, \ +0x00,0xE0,0x1E,0x21,0xE3,0x68,0x54,0x4D,0x5B,0x1A,0x1B,0x04,0x2D,0x68,0x1B, \ +0x0C,0x5B,0x19,0x51,0x4D,0x01,0x32,0x2B,0x60,0x4F,0x4B,0x4D,0x48,0x1A,0x70, \ +0x1A,0x78,0xBD,0x7D,0x12,0x07,0x12,0x0F,0xF0,0x23,0x2B,0x40,0x1A,0x43,0xBA, \ +0x75,0xE2,0x68,0x4C,0x4D,0x51,0x1A,0x4A,0x4A,0x13,0x88,0x59,0x1A,0x11,0x80, \ +0x00,0x89,0x12,0x88,0xC1,0x1F,0x15,0x39,0x91,0x42,0x04,0xDA,0x47,0x49,0x04, \ +0x38,0x08,0x80,0xAE,0x72,0x11,0xE0,0x78,0x78,0x04,0x23,0x41,0x4A,0x98,0x43, \ +0x78,0x70,0x10,0x88,0x41,0x4B,0x3B,0x4A,0x18,0x30,0x00,0x04,0x00,0x0C,0x02, \ +0x21,0x18,0x80,0x11,0x70,0x28,0x28,0x01,0xDA,0x28,0x20,0x18,0x80,0x3B,0x49, \ +0x3C,0x48,0x09,0x88,0xE1,0x60,0x00,0x68,0x80,0x7D,0x00,0xF0,0xAA,0xFF,0x20, \ +0x61,0x38,0x48,0x38,0x49,0x00,0x68,0x80,0x7D,0x08,0x70,0x01,0xF0,0x02,0xF8, \ +0x35,0x49,0x08,0x78,0x03,0x28,0x05,0xD1,0x2A,0x48,0x80,0x6A,0xFF,0xF7,0x72, \ +0xFA,0x03,0xE0,0x3E,0xE0,0x00,0x20,0xFF,0xF7,0x6D,0xFA,0x30,0x1C,0x26,0x4A, \ +0xA8,0x72,0x10,0x78,0x2E,0x4D,0x23,0x4E,0x02,0x28,0x02,0xD1,0x00,0x23,0x10, \ +0xE0,0x31,0xE0,0x28,0x48,0x2B,0x49,0x00,0x68,0x80,0x7D,0x89,0x7A,0x88,0x42, \ +0x00,0xDB,0x08,0x1C,0x28,0x49,0x40,0x00,0x08,0x5A,0xE9,0x88,0x49,0x00,0x40, \ +0x18,0x21,0x69,0x43,0x18,0xB8,0x1C,0x21,0x4F,0x00,0x22,0x39,0x78,0xFF,0xF7, \ +0xFA,0xFA,0x22,0x49,0x20,0x69,0x09,0x88,0x20,0x4A,0x40,0x18,0xE9,0x88,0x40, \ +0x18,0x39,0x78,0x49,0x00,0x51,0x5A,0x40,0x18,0x1E,0x49,0x09,0x88,0x41,0x18, \ +0x01,0x20,0x01,0xF0,0x29,0xF8,0x02,0x21,0x71,0x60,0x00,0x21,0x0D,0x48,0x31, \ +0x64,0x80,0x89,0xF0,0x63,0x11,0x48,0x00,0x68,0x41,0x73,0xF0,0xBD,0x00,0x21, \ +0x29,0x64,0x80,0x89,0xE8,0x63,0x0D,0x48,0x00,0x68,0x41,0x73,0x01,0x20,0xFF, \ +0xF7,0x92,0xF8,0x00,0x22,0x10,0x21,0x10,0x48,0x02,0xF0,0x95,0xFE,0xF0,0xBD, \ +0x04,0x05,0x00,0x02,0x61,0x01,0x00,0x02,0xD0,0x00,0x00,0x02,0x62,0x01,0x00, \ +0x02,0x64,0x01,0x00,0x02,0x68,0x01,0x00,0x02,0x74,0x05,0x00,0x02,0x6A,0x01, \ +0x00,0x02,0x04,0x00,0x00,0x02,0x45,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x42,0x01,0x00,0x02, \ +0xE8,0x06,0x00,0x02,0xB0,0xB5,0x21,0x4F,0x78,0x68,0x03,0x28,0x3C,0xD1,0x20, \ +0x49,0x00,0x20,0x48,0x60,0x08,0x05,0x41,0x6A,0x1E,0x4C,0x05,0x31,0x01,0x62, \ +0x20,0x68,0x1D,0x4D,0x80,0x7D,0x28,0x70,0x00,0xF0,0x75,0xFF,0x28,0x78,0x03, \ +0x28,0x03,0xD1,0xB8,0x6A,0xFF,0xF7,0xE7,0xF9,0x02,0xE0,0x00,0x20,0xFF,0xF7, \ +0xE3,0xF9,0xF9,0x1D,0x69,0x31,0x01,0x20,0x88,0x72,0x20,0x68,0x14,0x49,0x80, \ +0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x12,0x49,0xFA,0x1D,0x09,0x88, \ +0x79,0x32,0x12,0x69,0x05,0x31,0x89,0x18,0x10,0x4A,0xD2,0x88,0x89,0x18,0x0F, \ +0x4A,0x40,0x00,0x10,0x5A,0x08,0x18,0x0E,0x49,0x09,0x88,0x44,0x18,0x01,0x20, \ +0x00,0xF0,0xCC,0xFF,0x01,0x20,0x21,0x1C,0x00,0xF0,0xAE,0xFF,0x02,0x20,0x78, \ +0x60,0xB0,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x00, \ +0x00,0x02,0x45,0x01,0x00,0x02,0x14,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x2C,0x01,0x00,0x02,0x42,0x01,0x00,0x02,0x00,0xB5,0x11,0x49, \ +0x09,0x68,0x49,0x68,0x0B,0x78,0x1A,0x07,0x10,0xD1,0x1A,0x11,0x0D,0x2A,0x0D, \ +0xD2,0x01,0xA3,0x9B,0x5C,0x5B,0x00,0x9F,0x44,0x0E,0x0E,0x0E,0x0E,0x11,0x0A, \ +0x09,0x09,0x06,0x09,0x0E,0x0E,0x0E,0x00,0x08,0x1C,0x00,0xF0,0x38,0xF8,0x00, \ +0xBD,0x08,0x1C,0x00,0xF0,0xAC,0xF8,0x00,0xBD,0xFF,0xF7,0x07,0xFC,0x00,0xBD, \ +0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00,0x00,0x10,0x00,0x00,0x02,0x90,0xB5,0x0F, \ +0x4C,0x60,0x7B,0x00,0x28,0x19,0xD0,0x0E,0x4F,0x38,0x68,0x40,0x68,0x42,0x7E, \ +0x18,0x30,0x00,0x2A,0x09,0xD0,0x0B,0x49,0x49,0x79,0x91,0x42,0x0E,0xD1,0x0A, \ +0x49,0x02,0x30,0xFF,0xF7,0x90,0xF9,0x00,0x28,0x08,0xD1,0x38,0x68,0x08,0x49, \ +0x40,0x68,0x0A,0x30,0x06,0x22,0xFF,0xF7,0x97,0xF9,0x01,0x20,0xA0,0x73,0x90, \ +0xBD,0x64,0x05,0x00,0x02,0x10,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xAC,0x00, \ +0x00,0x02,0x46,0x01,0x00,0x02,0xF0,0xB5,0x34,0x4D,0x07,0x1C,0xEC,0x1D,0x69, \ +0x34,0xE0,0x7A,0x01,0x28,0x19,0xD1,0x38,0x1C,0x00,0xF0,0x2D,0xF9,0x00,0x28, \ +0x14,0xD0,0x2F,0x49,0xCA,0x7C,0x2F,0x49,0x09,0x7D,0x8A,0x42,0x0E,0xD1,0x81, \ +0x42,0x0C,0xD1,0x2D,0x48,0x2D,0x4B,0xC1,0x6B,0x00,0x05,0x40,0x6A,0x08,0x1A, \ +0x98,0x42,0x04,0xD9,0x01,0x20,0xE0,0x70,0xFF,0xF7,0xD5,0xFB,0xF0,0xBD,0xE0, \ +0x7A,0x03,0x28,0x03,0xD1,0x05,0xF0,0x41,0xFF,0x00,0x28,0xF7,0xD0,0xE0,0x7A, \ +0x04,0x28,0x03,0xD1,0x01,0x20,0x05,0xF0,0x65,0xFE,0xF0,0xBD,0xE0,0x7A,0x05, \ +0x28,0x33,0xD1,0x20,0x4E,0xFC,0x1D,0xFD,0x1D,0x30,0x79,0xF7,0x1F,0x1B,0x3F, \ +0x19,0x35,0x09,0x34,0x02,0x28,0x09,0xD1,0xA8,0x78,0x40,0x08,0xE0,0xD3,0x06, \ +0x22,0x20,0x1C,0x39,0x1C,0xFF,0xF7,0x35,0xF9,0x00,0x28,0xD9,0xD1,0x30,0x79, \ +0x01,0x28,0x1C,0xD1,0xA8,0x78,0x80,0x08,0xD3,0xD3,0x06,0x22,0x20,0x1C,0x39, \ +0x1C,0xFF,0xF7,0x28,0xF9,0x00,0x28,0x12,0xD1,0x06,0x20,0x00,0xF0,0x0B,0xFF, \ +0x09,0x4D,0x00,0x20,0xE9,0x1D,0x59,0x31,0x48,0x73,0x28,0x74,0x04,0x20,0x68, \ +0x73,0x00,0xF0,0x1B,0xF8,0x00,0xF0,0xBB,0xFE,0x09,0x48,0xC1,0x69,0x01,0x31, \ +0xC1,0x61,0xFF,0xF7,0x8D,0xFB,0xF0,0xBD,0x04,0x05,0x00,0x02,0xD0,0x00,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x40,0x00,0x00,0x04,0x88,0x13,0x00,0x00,0xC8,0x00, \ +0x00,0x02,0x7C,0x01,0x00,0x02,0x00,0xB5,0xFF,0xF7,0x7B,0xFB,0x00,0xBD,0xF0, \ +0xB5,0x84,0xB0,0x01,0x21,0x89,0x06,0x8A,0x6A,0x01,0x92,0x4E,0x6A,0x40,0x49, \ +0x09,0x68,0x03,0x91,0x4A,0x68,0x53,0x7E,0x17,0x7E,0x1B,0x02,0x3B,0x43,0x97, \ +0x7E,0x3F,0x04,0x3B,0x43,0xD7,0x7E,0x3F,0x06,0x3B,0x43,0x1C,0x1C,0x57,0x7F, \ +0x13,0x7F,0x3F,0x02,0x3B,0x43,0x97,0x7F,0xD2,0x7F,0x3F,0x04,0x3B,0x43,0x12, \ +0x06,0x13,0x43,0x8A,0x89,0x89,0x7B,0x18,0x3A,0xD7,0x00,0x1D,0x1C,0x00,0x29, \ +0x22,0xD0,0x01,0x29,0x22,0xD0,0x02,0x29,0x22,0xD0,0x03,0x29,0x0C,0xD1,0x0B, \ +0x20,0x39,0x1C,0x03,0xF0,0x43,0xFA,0x00,0x91,0x79,0x1A,0x0B,0x20,0x03,0xF0, \ +0x3E,0xFA,0x00,0x99,0x00,0x29,0x00,0xD9,0x01,0x30,0x03,0x99,0x4B,0x69,0x01, \ +0x9A,0x19,0x1A,0x00,0x2A,0x02,0xD0,0x83,0x42,0x00,0xD2,0x01,0x3A,0x23,0x48, \ +0x00,0x79,0x02,0x28,0x15,0xD1,0xB3,0x42,0x3C,0xD2,0x1D,0xE0,0x38,0x1C,0xED, \ +0xE7,0x78,0x08,0xEB,0xE7,0x79,0x00,0x02,0x91,0x0B,0x20,0x03,0xF0,0x21,0xFA, \ +0x0F,0x1C,0x02,0x99,0xC9,0x1B,0x0B,0x20,0x03,0xF0,0x1B,0xFA,0x00,0x2F,0xDE, \ +0xD9,0x01,0x30,0xDC,0xE7,0x01,0x28,0x26,0xD1,0x00,0x20,0x15,0x4B,0x95,0x42, \ +0x58,0x73,0x03,0xD8,0x95,0x42,0x1F,0xD1,0x8C,0x42,0x1D,0xD9,0x00,0x20,0x01, \ +0x9B,0xC0,0x43,0x93,0x42,0x01,0xD1,0x72,0x1A,0x01,0xE0,0x42,0x1A,0x92,0x19, \ +0xA7,0x18,0x03,0x1B,0x93,0x42,0x00,0xD2,0x01,0x35,0x40,0x1A,0x98,0x42,0x01, \ +0xD2,0x08,0x1B,0x00,0xE0,0x60,0x1A,0x0A,0x28,0x07,0xD9,0x88,0x18,0x39,0x1C, \ +0x00,0xF0,0x0C,0xF8,0x01,0x21,0x89,0x06,0x8D,0x62,0x4F,0x62,0x04,0xB0,0xF0, \ +0xBD,0x10,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x64,0x05,0x00,0x02,0xF0,0xB4, \ +0x11,0x4C,0x00,0x22,0xA7,0x69,0xA2,0x61,0x01,0x22,0x01,0x25,0x2B,0x1C,0x56, \ +0x1E,0xB3,0x40,0x3B,0x40,0x1E,0x1C,0xC1,0x23,0x33,0x40,0x0D,0xD0,0x0B,0x4B, \ +0x96,0x00,0xF3,0x18,0xDE,0x6A,0x86,0x1B,0x00,0x2E,0x06,0xDC,0xDE,0x6A,0x00, \ +0x2E,0x03,0xD0,0xDE,0x6A,0x36,0x1A,0x8E,0x19,0xDE,0x62,0x01,0x32,0x08,0x2A, \ +0xE6,0xD9,0xA7,0x61,0xF0,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0x40, \ +0x00,0x00,0x04,0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7,0x9F,0xFA,0x00,0xBD, \ +0x90,0xB4,0x10,0x4B,0x01,0x1C,0x1B,0x68,0x24,0x20,0x9B,0x89,0x00,0x22,0x1F, \ +0x1F,0x00,0x23,0x24,0x2F,0x0A,0xD9,0x0C,0x5C,0x03,0x2C,0x0A,0xD0,0x0C,0x18, \ +0x64,0x78,0x02,0x34,0x20,0x18,0x00,0x2A,0x04,0xD1,0x87,0x42,0xF4,0xD8,0x18, \ +0x1C,0x90,0xBC,0xF7,0x46,0x08,0x18,0x41,0x78,0x01,0x29,0x01,0xD0,0x18,0x1C, \ +0xF7,0xE7,0x80,0x78,0xF5,0xE7,0x00,0x00,0x10,0x00,0x00,0x02,0xF0,0xB5,0x82, \ +0xB0,0x17,0x4D,0x18,0x4C,0x18,0x4F,0x00,0x26,0x00,0x22,0xD2,0x43,0x00,0x92, \ +0x01,0x22,0x01,0xAB,0x16,0x48,0x16,0x49,0x02,0xF0,0x5D,0xFD,0xAE,0x61,0x01, \ +0x98,0x41,0x0D,0x03,0xD3,0x40,0x20,0x00,0xF0,0x28,0xF8,0x14,0xE0,0x41,0x09, \ +0x03,0xD3,0x50,0x20,0x00,0xF0,0x22,0xF8,0x0E,0xE0,0x40,0x0F,0x05,0xD3,0x80, \ +0x20,0x00,0xF0,0x1C,0xF8,0x08,0xE0,0x00,0xF0,0x8B,0xF8,0x20,0x78,0x40,0x09, \ +0x03,0xD3,0x38,0x68,0x00,0x7B,0x00,0x0A,0xF6,0xD2,0x00,0x22,0x01,0x21,0x06, \ +0x48,0x02,0xF0,0x41,0xFC,0xD3,0xE7,0x84,0x05,0x00,0x02,0x63,0x01,0x00,0x02, \ +0x04,0x00,0x00,0x02,0x08,0x07,0x00,0x02,0x10,0x10,0x10,0x10,0x28,0x07,0x00, \ +0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0xA0,0xF8,0x00,0x25,0x01,0x26,0x80,0x2F, \ +0x2C,0x4C,0x11,0xD1,0x02,0x20,0xE1,0x1F,0x69,0x39,0x48,0x73,0x03,0xF0,0x5F, \ +0xF9,0x01,0x1C,0xC8,0x20,0x03,0xF0,0x95,0xF9,0xC8,0x00,0x40,0x18,0x80,0x00, \ +0x80,0x08,0x11,0xD0,0x01,0x38,0xFD,0xD1,0x0E,0xE0,0x40,0x2F,0x04,0xD1,0x22, \ +0x48,0x86,0x63,0x22,0x48,0x45,0x80,0x07,0xE0,0x50,0x2F,0x05,0xD1,0x20,0x49, \ +0x21,0x48,0x06,0x22,0xFE,0xF7,0xC7,0xFF,0x26,0x70,0x1F,0x48,0xFF,0xF7,0x33, \ +0xF8,0x01,0x26,0xA6,0x72,0x1F,0x4C,0x50,0x2F,0x1D,0x48,0x08,0xD0,0x17,0x4E, \ +0xB1,0x6D,0xE1,0x60,0xB1,0x6D,0xC0,0x79,0x00,0xF0,0xAE,0xFC,0x20,0x61,0x0D, \ +0xE0,0x12,0x4E,0xB1,0x6E,0xE1,0x60,0xB1,0x6E,0xC0,0x79,0x00,0xF0,0xA5,0xFC, \ +0x16,0x49,0x20,0x61,0x09,0x88,0x20,0x69,0x40,0x18,0x0D,0x49,0x48,0x80,0x0C, \ +0x48,0x0B,0x4E,0xA0,0x60,0x00,0xF0,0x71,0xF9,0x00,0xF0,0xE4,0xF9,0x00,0x90, \ +0x80,0x2F,0x07,0xD1,0x00,0x98,0x00,0x28,0x07,0xD1,0x01,0x20,0xF1,0x1D,0x59, \ +0x31,0x48,0x73,0x02,0xE0,0x40,0x2F,0x00,0xD1,0xB5,0x63,0xF8,0xBD,0x74,0x05, \ +0x00,0x02,0x04,0x05,0x00,0x02,0x48,0x07,0x00,0x02,0x4C,0x07,0x00,0x02,0x46, \ +0x01,0x00,0x02,0x5E,0x07,0x00,0x02,0x14,0x01,0x00,0x02,0x84,0x05,0x00,0x02, \ +0x40,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x96,0x4C,0x00,0x25,0x20,0x68,0x47, \ +0x68,0x39,0x79,0x49,0x08,0x00,0xD3,0x01,0x25,0x93,0x49,0xC9,0x78,0x00,0x29, \ +0x0C,0xD0,0x39,0x78,0x08,0x29,0x09,0xD1,0x91,0x4A,0x11,0x78,0x00,0x29,0x05, \ +0xD0,0x81,0x7D,0x53,0x78,0x99,0x42,0x01,0xDD,0x51,0x78,0x81,0x75,0x20,0x68, \ +0x80,0x7B,0x00,0x28,0x0D,0xD1,0xF8,0x1D,0x0F,0x30,0xFE,0xF7,0xCB,0xFF,0x38, \ +0x78,0x00,0x28,0x06,0xD1,0x20,0x68,0x80,0x8A,0x64,0x28,0x02,0xDD,0x78,0x1C, \ +0xFE,0xF7,0xF3,0xFF,0x81,0x4B,0x2A,0x1C,0x18,0x68,0x00,0x23,0x81,0x7D,0xB8, \ +0x1C,0x06,0x1C,0xFE,0xF7,0xC6,0xFF,0x7F,0x4B,0x80,0x4C,0x18,0x78,0x00,0x28, \ +0x50,0xD1,0x7A,0x4B,0x01,0x21,0x18,0x68,0x00,0x2D,0x60,0x61,0xA1,0x61,0x41, \ +0x68,0xA1,0x60,0x3B,0xD1,0x7A,0x49,0x82,0x8A,0x09,0x89,0x8A,0x42,0x36,0xDB, \ +0x79,0x4A,0x12,0x78,0x00,0x2A,0x32,0xD1,0x74,0x4B,0x01,0x22,0x1A,0x70,0x76, \ +0x4B,0x00,0x22,0x1A,0x70,0xFA,0x1D,0x75,0x4B,0x17,0x32,0x1A,0x60,0x82,0x8A, \ +0x6B,0x4B,0x1E,0x3A,0x73,0x4B,0x04,0x39,0x1A,0x80,0x73,0x4A,0x09,0x04,0x09, \ +0x0C,0x11,0x80,0xE1,0x60,0x80,0x7D,0x00,0xF0,0x11,0xFC,0x65,0x4B,0x20,0x61, \ +0x18,0x68,0x81,0x7D,0x64,0x48,0x80,0x7A,0x81,0x42,0x00,0xDA,0x08,0x1C,0x6B, \ +0x4A,0x40,0x00,0x10,0x5A,0x6B,0x4A,0xD2,0x88,0x52,0x00,0x80,0x18,0x22,0x69, \ +0x83,0x18,0x30,0x1C,0x2A,0x1C,0xFE,0xF7,0x81,0xFF,0x04,0x21,0x78,0x1C,0xFE, \ +0xF7,0x5F,0xFF,0x0B,0xE0,0x00,0x21,0x5E,0x4A,0x1E,0x1C,0x11,0x70,0x81,0x8A, \ +0x80,0x7D,0x00,0xF0,0xEE,0xFB,0x20,0x61,0x30,0x68,0x80,0x8A,0xE0,0x60,0x78, \ +0x1C,0x40,0x21,0x06,0x1C,0xFE,0xF7,0x56,0xFF,0x00,0x28,0x0A,0xD0,0x4E,0x4B, \ +0x18,0x68,0x81,0x8A,0x08,0x31,0xE1,0x60,0x81,0x8A,0x80,0x7D,0x08,0x31,0x00, \ +0xF0,0xD9,0xFB,0x20,0x61,0x28,0x1C,0x00,0xF0,0x31,0xFB,0x02,0x24,0x00,0x28, \ +0x09,0xD0,0x52,0x48,0xE2,0x1E,0x44,0x63,0x00,0x92,0x01,0x22,0x11,0x21,0x50, \ +0x48,0x01,0xAB,0x02,0xF0,0x0E,0xFC,0x4D,0x48,0x00,0x25,0x45,0x63,0x3F,0x48, \ +0x00,0x68,0x41,0x7B,0x00,0x29,0x32,0xD1,0x39,0x78,0x08,0x29,0x22,0xD1,0x3C, \ +0x49,0xC9,0x78,0x00,0x29,0x1E,0xD0,0x3B,0x49,0x0A,0x78,0x01,0x2A,0x09,0xD0, \ +0x02,0x2A,0x18,0xD1,0x4B,0x78,0x03,0x22,0x03,0x2B,0x06,0xDA,0x4B,0x78,0x01, \ +0x33,0x4B,0x70,0x03,0xE0,0x0C,0x70,0x4D,0x80,0x0D,0xE0,0x4A,0x70,0x4B,0x88, \ +0x01,0x33,0x1B,0x04,0x1B,0x0C,0x4B,0x80,0x03,0x2B,0x05,0xDB,0x4B,0x78,0x03, \ +0x2B,0x02,0xD1,0x0D,0x70,0x4A,0x70,0x4D,0x80,0x2C,0x4B,0x30,0x4A,0x35,0x49, \ +0x1D,0x70,0x15,0x70,0x0D,0x64,0x2B,0x4E,0x4D,0x64,0xB2,0x89,0xCA,0x63,0x10, \ +0x21,0x01,0x73,0x44,0xE0,0x00,0xF0,0x05,0xFC,0x22,0x4C,0x20,0x68,0x41,0x7B, \ +0x04,0x29,0x06,0xD0,0x81,0x7B,0x01,0x31,0x81,0x73,0x08,0x21,0x30,0x1C,0xFE, \ +0xF7,0xE7,0xFE,0x20,0x68,0x20,0x4E,0x81,0x7B,0x32,0x7C,0x91,0x42,0x07,0xDA, \ +0x41,0x7B,0x08,0x29,0x04,0xD0,0x45,0x73,0x21,0x68,0x82,0x20,0x08,0x73,0x28, \ +0xE0,0x40,0x7B,0x08,0x28,0x02,0xD1,0x01,0x20,0x00,0xF0,0x4B,0xFC,0x13,0x48, \ +0xC0,0x78,0x00,0x28,0x12,0xD0,0x38,0x78,0x08,0x28,0x0F,0xD1,0x10,0x48,0x01, \ +0x78,0x02,0x29,0x00,0xD1,0x45,0x80,0x41,0x78,0x00,0x29,0x05,0xDD,0x41,0x78, \ +0x04,0x29,0x02,0xDA,0x41,0x78,0x01,0x39,0x41,0x70,0x01,0x21,0x01,0x70,0x09, \ +0x4B,0x0D,0x4A,0x12,0x48,0x1D,0x70,0x15,0x70,0x05,0x64,0x45,0x64,0xB1,0x89, \ +0xC1,0x63,0x20,0x68,0x10,0x21,0x01,0x73,0x02,0xB0,0xF0,0xBD,0x04,0x00,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x24,0x01,0x00,0x02,0x61,0x01,0x00,0x02,0x84,0x05, \ +0x00,0x02,0xD0,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x62,0x01,0x00,0x02,0x64, \ +0x01,0x00,0x02,0x68,0x01,0x00,0x02,0x6A,0x01,0x00,0x02,0x2C,0x01,0x00,0x02, \ +0x18,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0xE8,0x06,0x00,0x02,0x00,0xB5,0x09, \ +0x48,0x03,0x21,0xC1,0x72,0x41,0x7A,0x00,0x29,0x0A,0xD1,0x01,0x7A,0x00,0x29, \ +0x07,0xD0,0x81,0x7C,0x01,0x29,0x04,0xD1,0x80,0x7B,0x01,0x28,0x01,0xD1,0x00, \ +0xF0,0xAA,0xF9,0x00,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5,0x25,0x4F, \ +0x0A,0x20,0x79,0x7B,0x04,0x25,0x24,0x4C,0x24,0x4E,0x02,0x29,0x1C,0xD1,0x38, \ +0x68,0x00,0x28,0x09,0xD1,0x02,0xF0,0x8A,0xFF,0x01,0x1C,0x21,0x48,0x80,0x89, \ +0x02,0xF0,0xBF,0xFF,0x30,0x88,0x09,0x18,0x08,0xE0,0x02,0xF0,0x80,0xFF,0x01, \ +0x1C,0x1C,0x48,0x80,0x89,0x02,0xF0,0xB5,0xFF,0x20,0x88,0x09,0x18,0x00,0x20, \ +0x78,0x61,0x08,0x20,0x00,0xF0,0xB4,0xFB,0xBD,0x74,0xF0,0xBD,0x39,0x68,0x00, \ +0x29,0x10,0xD1,0x79,0x6D,0x32,0x88,0x8B,0x00,0x59,0x18,0x89,0x00,0x89,0x18, \ +0x09,0x1A,0x08,0x20,0x00,0xF0,0xA5,0xFB,0xFE,0xF7,0xF3,0xFB,0x31,0x88,0x0A, \ +0x39,0x40,0x18,0x78,0x61,0x0F,0xE0,0x79,0x6D,0x22,0x88,0x8B,0x00,0x59,0x18, \ +0x89,0x00,0x89,0x18,0x09,0x1A,0x08,0x20,0x00,0xF0,0x94,0xFB,0xFE,0xF7,0xE2, \ +0xFB,0x21,0x88,0x0A,0x39,0x40,0x18,0x78,0x61,0xBD,0x74,0xF0,0xBD,0x04,0x05, \ +0x00,0x02,0x3E,0x01,0x00,0x02,0x3C,0x01,0x00,0x02,0xD0,0x00,0x00,0x02,0x80, \ +0xB5,0x01,0x0A,0x07,0x1C,0x0E,0x20,0xFE,0xF7,0x0C,0xFC,0x10,0x20,0x39,0x1C, \ +0xFE,0xF7,0x08,0xFC,0x80,0xBD,0xB0,0xB5,0x82,0xB0,0x14,0x4D,0x01,0x20,0x68, \ +0x63,0x13,0x4F,0x14,0x48,0x00,0x24,0xBC,0x82,0x38,0x82,0xBC,0x80,0x1E,0x20, \ +0x38,0x80,0x02,0x20,0xB8,0x82,0xC2,0x1E,0x00,0x92,0x01,0x22,0x5A,0x21,0x0E, \ +0x48,0x01,0xAB,0x02,0xF0,0xE2,0xFA,0x6C,0x63,0x3C,0x83,0xBC,0x82,0x01,0x98, \ +0xC1,0x09,0x02,0xD3,0x44,0x20,0x02,0xB0,0xB0,0xBD,0x81,0x08,0x05,0xD3,0x00, \ +0x09,0x01,0xD3,0x82,0x20,0xF7,0xE7,0x20,0x1C,0xF5,0xE7,0x42,0x20,0xF3,0xE7, \ +0x00,0x00,0x04,0x05,0x00,0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00,0x00,0xC8, \ +0x06,0x00,0x02,0xF0,0xB5,0x70,0x4D,0x20,0x23,0xE8,0x69,0x98,0x43,0xE8,0x61, \ +0x1B,0x20,0x01,0x38,0xFD,0xD1,0xE8,0x69,0x20,0x23,0x18,0x43,0x6B,0x4E,0xE8, \ +0x61,0xF4,0x1D,0x69,0x34,0x60,0x7A,0xF7,0x1D,0x79,0x37,0x00,0x28,0x0D,0xD0, \ +0x78,0x68,0xFF,0xF7,0xAA,0xFF,0x39,0x68,0xF0,0x6F,0x00,0x22,0x00,0xF0,0xE1, \ +0xF8,0x00,0x21,0x61,0x72,0x01,0x20,0xFE,0xF7,0xA2,0xFB,0xF0,0xBD,0xB0,0x7A, \ +0x00,0x28,0x19,0xD0,0x5F,0x48,0x60,0x49,0x00,0x68,0x80,0x7D,0x89,0x7A,0x88, \ +0x42,0x00,0xDB,0x08,0x1C,0x5D,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x8F,0xFF, \ +0x00,0x22,0x10,0x21,0x5B,0x48,0x00,0xF0,0xC6,0xF8,0x01,0x21,0xA1,0x72,0x02, \ +0x20,0xF0,0x72,0x00,0x20,0xFE,0xF7,0x85,0xFB,0xF0,0xBD,0xA0,0x7A,0x00,0x28, \ +0xDE,0xD0,0x38,0x69,0xFF,0xF7,0x7C,0xFF,0xBE,0x68,0x70,0x78,0xC0,0x09,0x4C, \ +0xD3,0x4C,0x48,0x01,0x7B,0x00,0x29,0x01,0xD0,0x00,0x21,0x01,0x73,0x03,0x20, \ +0x40,0x06,0x80,0x68,0x40,0x08,0xFA,0xD2,0x06,0x21,0x02,0x20,0xFE,0xF7,0x7B, \ +0xFB,0x04,0x21,0x02,0x20,0xFE,0xF7,0x77,0xFB,0x05,0x21,0x02,0x20,0xFE,0xF7, \ +0x73,0xFB,0x04,0x21,0x02,0x20,0xFE,0xF7,0x6F,0xFB,0x01,0x20,0x80,0x06,0xC0, \ +0x68,0x00,0xF0,0x34,0xFF,0x68,0x68,0xC0,0x0B,0xFC,0xD2,0x40,0x49,0x01,0x20, \ +0x80,0x06,0x41,0x63,0x3F,0x49,0x81,0x63,0x01,0x0B,0x69,0x60,0x69,0x68,0xC9, \ +0x0B,0xFC,0xD2,0x3C,0x49,0x09,0x78,0x02,0x29,0x04,0xD1,0x81,0x21,0x03,0x22, \ +0x52,0x06,0x91,0x60,0x03,0xE0,0x01,0x21,0x03,0x22,0x52,0x06,0x91,0x60,0xB9, \ +0x68,0x18,0x31,0x81,0x63,0xF9,0x68,0x01,0x23,0x1B,0x03,0x20,0x39,0x19,0x43, \ +0x41,0x63,0x32,0x48,0x17,0x23,0x00,0x78,0x9B,0x02,0x18,0x43,0x68,0x60,0x00, \ +0x21,0xA1,0x72,0x37,0xE0,0x2F,0x49,0x08,0x78,0x00,0x28,0x2C,0xD0,0x2E,0x48, \ +0x00,0x78,0x00,0x28,0x01,0xD0,0x18,0x20,0x00,0xE0,0x1E,0x20,0x6A,0x68,0xD2, \ +0x0B,0xFC,0xD2,0x01,0x23,0x5B,0x03,0x03,0x43,0x01,0x22,0x92,0x06,0x53,0x63, \ +0xBB,0x68,0x93,0x63,0x13,0x0B,0x6B,0x60,0x6F,0x68,0xFB,0x0B,0xFC,0xD2,0x23, \ +0x4B,0x1B,0x88,0x18,0x1A,0x01,0x23,0x1B,0x03,0x18,0x43,0x50,0x63,0x21,0x48, \ +0x11,0x23,0x00,0x68,0x9B,0x02,0x90,0x63,0x1A,0x48,0x00,0x78,0x18,0x43,0x68, \ +0x60,0x08,0x78,0x02,0x28,0x09,0xD1,0x00,0x21,0xA1,0x72,0x06,0xE0,0x00,0x21, \ +0xA1,0x72,0xF9,0x68,0xB8,0x68,0x00,0x22,0x00,0xF0,0x2F,0xF8,0x0A,0x48,0x00, \ +0x21,0x41,0x73,0x21,0x70,0x30,0x79,0x40,0x08,0x03,0xD2,0x0F,0x49,0x08,0x78, \ +0x01,0x28,0x03,0xD1,0x01,0x20,0xFE,0xF7,0xE7,0xFA,0xF0,0xBD,0x00,0x20,0xFE, \ +0xF7,0xE3,0xFA,0xF0,0xBD,0x40,0x00,0x00,0x04,0x04,0x05,0x00,0x02,0x04,0x00, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x34,0x01,0x00,0x02,0xF8,0x07,0x00,0x02,0x1C, \ +0x20,0x00,0x00,0x90,0x08,0x00,0x02,0x70,0x00,0x00,0x02,0x70,0x01,0x00,0x02, \ +0x61,0x01,0x00,0x02,0x62,0x01,0x00,0x02,0x6A,0x01,0x00,0x02,0x64,0x01,0x00, \ +0x02,0x90,0xB4,0x08,0x4F,0x7C,0x68,0xE3,0x0B,0xFC,0xD2,0x0A,0x43,0x01,0x21, \ +0x89,0x06,0x4A,0x63,0x88,0x63,0x04,0x48,0x11,0x23,0x00,0x78,0x9B,0x02,0x18, \ +0x43,0x78,0x60,0x90,0xBC,0xF7,0x46,0x40,0x00,0x00,0x04,0x70,0x01,0x00,0x02, \ +0xF0,0xB5,0x56,0x4D,0x00,0x20,0x6A,0x7A,0x00,0x2A,0x19,0xD1,0x2A,0x7A,0x00, \ +0x2A,0x16,0xD0,0xAA,0x7C,0x01,0x2A,0x13,0xD1,0xAA,0x7B,0x01,0x2A,0x10,0xD1, \ +0x6A,0x7B,0xEF,0x1D,0x69,0x37,0x01,0x24,0x04,0x2A,0x0B,0xD1,0x28,0x74,0x68, \ +0x73,0xB8,0x72,0x06,0x1C,0x00,0x22,0x40,0x21,0x4A,0x48,0xEC,0x72,0x02,0xF0, \ +0x99,0xF8,0x30,0x1C,0xF0,0xBD,0x0F,0x26,0x36,0x06,0xB2,0x88,0x46,0x4B,0x1A, \ +0x40,0xB2,0x80,0xB2,0x89,0x45,0x4B,0x1A,0x40,0xB2,0x81,0x6A,0x7B,0x02,0x23, \ +0x13,0x40,0x43,0x4A,0x05,0xD1,0x3B,0x78,0x00,0x2B,0x02,0xD1,0xAB,0x6B,0x00, \ +0x2B,0x05,0xD0,0xD1,0x79,0x40,0x4B,0x19,0x70,0x78,0x72,0xBC,0x72,0x1F,0xE0, \ +0xAB,0x7A,0x00,0x2B,0x0D,0xD0,0x3D,0x4B,0x19,0x68,0x89,0x7D,0x93,0x7A,0x99, \ +0x42,0x02,0xDA,0x39,0x4B,0x19,0x70,0x01,0xE0,0x37,0x49,0x0B,0x70,0x78,0x72, \ +0xBC,0x72,0x0E,0xE0,0xEB,0x1D,0x79,0x33,0x9B,0x69,0x01,0x2B,0x02,0xD1,0x33, \ +0x4B,0x1B,0x68,0x19,0x61,0x78,0x72,0x31,0x4B,0xBC,0x72,0x18,0x68,0x2F,0x49, \ +0x80,0x7D,0x08,0x70,0x2C,0x4A,0x50,0x7A,0x01,0x28,0x08,0xD1,0x2B,0x49,0x08, \ +0x78,0x00,0x28,0x04,0xDD,0x38,0x21,0x08,0x20,0xFE,0xF7,0x58,0xFA,0x03,0xE0, \ +0x80,0x21,0x08,0x20,0xFE,0xF7,0x53,0xFA,0x00,0xF0,0x5D,0xF9,0xA8,0x7A,0x00, \ +0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0xCF,0xFB,0x0A,0xE0,0x20,0x49,0x08,0x78, \ +0x03,0x28,0x03,0xD1,0xA8,0x6A,0xFE,0xF7,0xC7,0xFB,0x02,0xE0,0x00,0x20,0xFE, \ +0xF7,0xC3,0xFB,0x01,0x21,0x89,0x06,0x48,0x6A,0x02,0x22,0xEA,0x72,0xCA,0x0A, \ +0x1A,0x4B,0x0A,0x30,0x5A,0x60,0x08,0x62,0x6A,0x7B,0x92,0x08,0x02,0xD2,0x3A, \ +0x78,0x00,0x2A,0x11,0xD0,0x17,0x4F,0x17,0x4D,0x3F,0x88,0x8B,0x6A,0x49,0x6A, \ +0xC0,0x19,0x0F,0x4F,0x12,0x4A,0xFF,0x79,0xBF,0x00,0xEF,0x59,0xC0,0x19,0x10, \ +0x60,0x88,0x42,0x00,0xD2,0x01,0x33,0x53,0x60,0x10,0x1D,0xB0,0x89,0x0F,0x4B, \ +0x18,0x43,0xB0,0x81,0xB0,0x88,0x0E,0x4B,0x18,0x43,0xB0,0x80,0x20,0x1C,0xF0, \ +0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0xC8,0x06,0x00,0x02,0x17,0x17,0xFF,0xFF, \ +0xEC,0xEC,0xFF,0xFF,0x14,0x01,0x00,0x02,0x45,0x01,0x00,0x02,0x04,0x00,0x00, \ +0x02,0x40,0x00,0x00,0x04,0x60,0x07,0x00,0x02,0x40,0x01,0x00,0x02,0x50,0x08, \ +0x00,0x02,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xF8,0xB5,0x46,0x49,0x06, \ +0x1C,0x08,0x68,0x45,0x49,0x82,0x8A,0x45,0x68,0x49,0x89,0x00,0x27,0x44,0x4C, \ +0x8A,0x42,0x53,0xDD,0x00,0x2E,0x51,0xD1,0x42,0x4E,0xB4,0x21,0x31,0x70,0x42, \ +0x49,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x40,0x49,0x40,0x4A, \ +0xC9,0x88,0x4B,0x00,0x59,0x18,0x40,0x00,0x10,0x5A,0x40,0x00,0x08,0x18,0x3D, \ +0x49,0x06,0x22,0x09,0x88,0x40,0x18,0x3C,0x49,0x09,0x69,0x40,0x18,0x70,0x80, \ +0x28,0x1D,0x31,0x1D,0xFE,0xF7,0x80,0xFB,0xE8,0x1D,0x03,0x30,0x06,0x22,0xF1, \ +0x1D,0x03,0x31,0xFE,0xF7,0x79,0xFB,0x01,0x20,0x35,0x49,0x00,0x25,0x88,0x72, \ +0xA5,0x72,0xFF,0xF7,0x42,0xFD,0xFF,0xF7,0xB5,0xFD,0x00,0x90,0x00,0x98,0x00, \ +0x28,0x1A,0xD1,0x26,0x4F,0x2A,0x49,0x38,0x68,0x80,0x7D,0x89,0x7A,0x88,0x42, \ +0x00,0xDB,0x08,0x1C,0x28,0x4A,0x27,0x49,0x40,0x00,0x10,0x5A,0xC9,0x88,0x40, \ +0x18,0x29,0x49,0x09,0x88,0x41,0x18,0x01,0x20,0x00,0xF0,0x16,0xF9,0x25,0x49, \ +0x03,0x20,0x48,0x60,0x38,0x68,0x01,0x27,0x45,0x73,0x2F,0xE0,0x19,0x49,0x04, \ +0x20,0x09,0x68,0x48,0x73,0x2A,0xE0,0x01,0x20,0xA0,0x72,0x1E,0x4D,0xFF,0xF7, \ +0x17,0xFD,0xFF,0xF7,0x8A,0xFD,0x00,0x90,0x00,0x98,0x00,0x28,0x1B,0xD1,0x11, \ +0x49,0x00,0x2E,0x0A,0x68,0x50,0x73,0x1A,0xD1,0x08,0x68,0x12,0x49,0x80,0x7D, \ +0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x10,0x49,0x10,0x4A,0x40,0x00,0x10, \ +0x5A,0xC9,0x88,0x08,0x18,0x12,0x49,0x09,0x88,0x41,0x18,0x02,0x20,0x68,0x60, \ +0x01,0x20,0x00,0xF0,0xE5,0xF8,0x01,0x27,0x03,0xE0,0x03,0x49,0x04,0x20,0x09, \ +0x68,0x48,0x73,0x38,0x06,0x00,0x0E,0xF8,0xBD,0x04,0x00,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x74,0x05,0x00,0x02,0xF8,0x07,0x00,0x02,0x14,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x2C,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x84,0x05,0x00,0x02, \ +0x04,0x05,0x00,0x02,0x42,0x01,0x00,0x02,0x90,0xB5,0x04,0x31,0xCF,0x00,0x01, \ +0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03,0x28,0x27,0xD1,0x0B,0x20,0x39,0x1C, \ +0x02,0xF0,0x61,0xFC,0x0C,0x1C,0x79,0x1A,0x0B,0x20,0x02,0xF0,0x5C,0xFC,0x07, \ +0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18,0xD9,0x01,0x37,0x04,0x2C,0x13,0xD2, \ +0x01,0x21,0x81,0x62,0x13,0xE0,0x7F,0x08,0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F, \ +0x1C,0x02,0xF0,0x4A,0xFC,0x0C,0x1C,0x79,0x1A,0x0B,0x20,0x02,0xF0,0x45,0xFC, \ +0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0,0x81,0x62,0x00,0xE0,0x81, \ +0x62,0x38,0x1C,0x90,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x10,0x48,0x01,0x88, \ +0x10,0x48,0xCA,0x1D,0x69,0x32,0x02,0x80,0xCA,0x1D,0x31,0x32,0x42,0x80,0xCA, \ +0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1,0x80,0x0B,0x48,0xA0,0x21,0x01,0x80, \ +0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80,0x0F,0x21,0xC1,0x80,0x08,0x48,0xC0, \ +0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23,0x21,0x81,0x60,0x12,0x21,0xC1,0x60, \ +0xF7,0x46,0x00,0x00,0x40,0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0x34,0x01,0x00, \ +0x02,0x50,0x08,0x00,0x02,0x00,0xB5,0x08,0x49,0x08,0x48,0x0A,0x78,0x03,0x78, \ +0x9A,0x42,0x08,0xD0,0x09,0x78,0x01,0x70,0x00,0x78,0x05,0x49,0x08,0x5C,0x05, \ +0x49,0x08,0x70,0xFE,0xF7,0x3A,0xFA,0x00,0xBD,0x00,0x00,0x45,0x01,0x00,0x02, \ +0x44,0x01,0x00,0x02,0x6C,0x01,0x00,0x02,0x70,0x01,0x00,0x02,0x07,0x48,0x01, \ +0x6C,0x01,0x31,0x01,0x64,0xC1,0x6B,0x49,0x00,0x01,0x31,0xC1,0x63,0x04,0x49, \ +0xC2,0x6B,0xC9,0x89,0x8A,0x42,0x00,0xD9,0xC1,0x63,0xF7,0x46,0x00,0x00,0x04, \ +0x05,0x00,0x02,0xD0,0x00,0x00,0x02,0x80,0xB5,0x02,0xF0,0xF5,0xFB,0x04,0x4F, \ +0x01,0x1C,0xF8,0x6B,0x02,0xF0,0xD8,0xFB,0x79,0x65,0x02,0x20,0xF8,0x62,0x80, \ +0xBD,0x04,0x05,0x00,0x02,0xB0,0xB5,0x01,0x20,0x80,0x06,0x81,0x6A,0x44,0x6A, \ +0x11,0x48,0x00,0x88,0x87,0x02,0x00,0x29,0x13,0xD9,0x38,0x1C,0x02,0xF0,0xC5, \ +0xFB,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x38,0x1C,0x02,0xF0,0xBF,0xFB,0x48,0x1C, \ +0x45,0x43,0x38,0x1C,0x21,0x1C,0x02,0xF0,0xB9,0xFB,0x69,0x18,0x38,0x1C,0x02, \ +0xF0,0xB5,0xFB,0x03,0xE0,0x38,0x1C,0x21,0x1C,0x02,0xF0,0xB0,0xFB,0x79,0x1A, \ +0x06,0x20,0x00,0xF0,0x04,0xF8,0xB0,0xBD,0x00,0x00,0x98,0x00,0x00,0x02,0x90, \ +0xB5,0x0C,0x1C,0x07,0x1C,0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A, \ +0x06,0x4B,0x20,0x18,0xB9,0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81, \ +0x40,0x03,0x48,0x82,0x69,0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00, \ +0x00,0x04,0x80,0x00,0x00,0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8, \ +0x40,0x8A,0x69,0xC0,0x43,0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00, \ +0x80,0x00,0x00,0x04,0xF0,0xB5,0x84,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xBF, \ +0x4D,0x00,0x27,0xEC,0x1D,0x79,0x34,0x03,0x90,0xE0,0x69,0x00,0x28,0x28,0xD0, \ +0x01,0x28,0x1B,0xD1,0x03,0x98,0xBA,0x4B,0x18,0x40,0x73,0xD0,0x18,0x05,0xC0, \ +0x68,0x00,0x90,0x00,0x98,0x80,0x08,0x01,0xD3,0x04,0xF0,0x6B,0xFD,0x00,0x98, \ +0x80,0x09,0x69,0xD3,0x00,0x98,0x04,0x21,0x01,0x40,0xB3,0x48,0x03,0xD0,0x41, \ +0x68,0x01,0x31,0x41,0x60,0x79,0xE0,0x01,0x68,0x01,0x31,0x01,0x60,0x75,0xE0, \ +0x02,0x28,0x73,0xD0,0x03,0x28,0x06,0xD1,0x03,0x98,0xAC,0x4B,0x18,0x40,0x53, \ +0xD0,0x04,0xF0,0x65,0xFE,0xF5,0xE0,0x03,0x98,0xAA,0x4B,0x18,0x40,0x0E,0xD0, \ +0xA9,0x48,0x00,0x68,0x02,0x90,0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40, \ +0x08,0x05,0xD3,0x02,0x98,0xC0,0x08,0x01,0xD3,0x02,0x27,0x00,0xE0,0x01,0x27, \ +0x03,0x98,0x9F,0x4B,0xA2,0x4E,0x18,0x40,0x53,0xD0,0xD8,0x04,0xC1,0x6B,0x01, \ +0x91,0x01,0x99,0x89,0x09,0x12,0xD3,0xFF,0x21,0xF5,0x31,0x02,0x69,0x92,0x08, \ +0x03,0xD3,0x0A,0x1C,0x01,0x39,0x00,0x2A,0xF8,0xD8,0x68,0x7A,0x00,0x28,0x01, \ +0xD0,0x00,0xF0,0x4C,0xFA,0xFF,0xF7,0x2E,0xFC,0x01,0x27,0x01,0x20,0x68,0x66, \ +0x01,0x98,0x12,0x23,0x18,0x40,0x36,0xD0,0x00,0x20,0x68,0x66,0x92,0x48,0x40, \ +0x7A,0x01,0x28,0x03,0xD1,0x38,0x21,0x08,0x20,0xFD,0xF7,0xF1,0xFF,0x68,0x7A, \ +0x00,0x28,0x05,0xD0,0x01,0x20,0xFD,0xF7,0xDB,0xFF,0x00,0x20,0x68,0x72,0x70, \ +0x72,0x01,0x98,0x80,0x08,0x02,0xD3,0x00,0x2F,0x00,0xD1,0x02,0x27,0xE8,0x7A, \ +0x02,0x28,0x01,0xE0,0xA3,0xE0,0xA2,0xE0,0x17,0xD1,0x01,0x98,0x40,0x09,0x02, \ +0xD3,0x01,0x20,0xFD,0xF7,0xC6,0xFF,0x01,0x98,0x80,0x08,0x01,0xD3,0xFF,0xF7, \ +0x0B,0xFF,0x00,0x20,0xA8,0x72,0x68,0x73,0x30,0x70,0x01,0x20,0xE8,0x72,0x01, \ +0x99,0x7B,0x48,0x00,0x22,0x01,0xF0,0xBE,0xFD,0x00,0xE0,0x89,0xE0,0x03,0x98, \ +0x71,0x4B,0x18,0x40,0x73,0xD0,0x18,0x05,0xC0,0x68,0x00,0x90,0x00,0x98,0x40, \ +0x09,0x24,0xD3,0x28,0x7B,0x03,0x28,0x21,0xD1,0x04,0x20,0x28,0x73,0x72,0x48, \ +0x00,0x68,0x41,0x68,0x49,0x78,0xC9,0x09,0x19,0xD3,0x81,0x7B,0x03,0x29,0x16, \ +0xD1,0x80,0x89,0x64,0x28,0x13,0xDA,0x68,0x48,0xC1,0x69,0x83,0x01,0x19,0x43, \ +0xC1,0x61,0xC1,0x69,0x6A,0x4B,0x19,0x40,0xC1,0x61,0x05,0x21,0x02,0x20,0xFD, \ +0xF7,0x9C,0xFF,0x04,0x21,0x02,0x20,0xFD,0xF7,0x98,0xFF,0x01,0x20,0x80,0x06, \ +0xC0,0x68,0x00,0x98,0x80,0x08,0x0E,0xD3,0x01,0x20,0xA8,0x73,0x00,0x20,0x28, \ +0x73,0xB0,0x72,0x68,0x72,0x70,0x72,0x20,0x62,0x28,0x63,0x00,0x98,0xFE,0xF7, \ +0xB6,0xFB,0x00,0x99,0x08,0x43,0x00,0x90,0x00,0x98,0x80,0x09,0x0F,0xD3,0x01, \ +0x20,0xA8,0x73,0x28,0x63,0x00,0x99,0x02,0x27,0xC9,0x08,0x01,0xD3,0x20,0x62, \ +0x01,0xE0,0x00,0x20,0x20,0x62,0x4E,0x48,0x53,0x4B,0x81,0x6A,0x19,0x43,0x81, \ +0x62,0x29,0x7B,0x28,0x1C,0x04,0x29,0x28,0xD1,0x01,0x6B,0x00,0x29,0x1C,0xD0, \ +0x41,0x6E,0x00,0x29,0x02,0xD1,0x00,0x2F,0x00,0xD1,0x02,0x27,0x00,0x21,0x01, \ +0x73,0x01,0x63,0x22,0x6A,0x01,0x2A,0x12,0xD1,0x02,0x68,0x00,0x2A,0x0F,0xD1, \ +0x44,0x48,0x02,0x68,0x51,0x72,0x46,0x49,0x0A,0x68,0x4A,0x60,0x01,0x21,0x89, \ +0x06,0xC9,0x6A,0x00,0x68,0x41,0x61,0xFE,0xF7,0xD1,0xFA,0x00,0xE0,0x07,0xE0, \ +0x06,0xE0,0x41,0x72,0x71,0x72,0xC1,0x21,0x01,0x60,0x3A,0x48,0x00,0x68,0x41, \ +0x72,0x01,0x2F,0x02,0xD1,0x00,0xF0,0x43,0xF9,0x03,0xE0,0x02,0x2F,0x01,0xD1, \ +0x00,0xF0,0x6C,0xF9,0x03,0x98,0x38,0x4B,0x18,0x40,0x52,0xD0,0x0D,0x25,0x2D, \ +0x06,0x2F,0x89,0x40,0x20,0x35,0x4E,0x38,0x40,0x08,0xD0,0x30,0x7A,0x00,0x28, \ +0xFC,0xD1,0x33,0x48,0x00,0x7B,0x40,0x08,0x01,0xD3,0x00,0xF0,0x54,0xFC,0x78, \ +0x0A,0x17,0xD3,0xF8,0x43,0xFF,0x23,0x01,0x33,0x18,0x43,0x28,0x81,0x28,0x7B, \ +0x00,0x09,0xFC,0xD2,0x30,0x7A,0x00,0x28,0xFC,0xD1,0x00,0xF0,0x1D,0xFC,0x2A, \ +0x48,0x01,0x68,0x02,0x29,0x02,0xD0,0x01,0x21,0x01,0x60,0x01,0xE0,0x00,0x21, \ +0x01,0x60,0x00,0xF0,0xC2,0xFB,0x25,0x49,0x08,0x68,0x01,0x28,0x1F,0xD1,0xB8, \ +0x08,0x1D,0xD3,0x23,0x4B,0x00,0x22,0x18,0x7A,0x1A,0x72,0xFF,0x43,0x02,0x23, \ +0x3B,0x43,0x2B,0x81,0x09,0x68,0x01,0x29,0x12,0xD1,0x40,0x08,0x10,0xD3,0x28, \ +0x78,0x20,0x23,0x18,0x43,0x07,0x21,0x49,0x06,0x28,0x70,0x8A,0x61,0x00,0x20, \ +0x7D,0x22,0x12,0x01,0x01,0x30,0x90,0x42,0xFC,0xD3,0xFF,0x20,0x48,0x61,0xFF, \ +0xE7,0xFE,0xE7,0xE0,0x69,0x00,0x28,0x01,0xD0,0x04,0xB0,0xF0,0xBD,0xFC,0xE7, \ +0x04,0x05,0x00,0x02,0x40,0x40,0x00,0x00,0xA0,0x02,0x00,0x02,0x80,0x80,0x00, \ +0x00,0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04,0x74,0x05,0x00,0x02,0x14,0x01, \ +0x00,0x02,0xC8,0x06,0x00,0x02,0x10,0x00,0x00,0x02,0xFF,0xEF,0x00,0x00,0x00, \ +0xFF,0x3F,0x00,0x9C,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x20,0x00,0x00,0x0D, \ +0xD0,0x03,0x00,0x0D,0x44,0x02,0x00,0x02,0x78,0x01,0x00,0x02,0xE0,0x03,0x00, \ +0x0D,0xF0,0xB5,0x0F,0x20,0x00,0x06,0x06,0x89,0x52,0x48,0x53,0x4F,0x30,0x40, \ +0x73,0xD0,0x52,0x48,0xC4,0x69,0x60,0x08,0x28,0xD3,0xFD,0x1F,0x69,0x3D,0x69, \ +0x68,0x50,0x48,0x02,0x29,0x03,0xD1,0x00,0x68,0x01,0x21,0x41,0x73,0x05,0xE0, \ +0x69,0x68,0x03,0x29,0x02,0xD1,0x00,0x68,0x02,0x21,0x41,0x73,0x01,0x20,0xE8, \ +0x72,0x00,0x21,0xB9,0x72,0xA9,0x72,0x68,0x60,0xFD,0xF7,0x88,0xFE,0x68,0x6B, \ +0x01,0x28,0x05,0xD1,0x00,0x22,0x10,0x21,0x44,0x48,0x01,0xF0,0x88,0xFC,0x07, \ +0xE0,0x68,0x6B,0x02,0x28,0x04,0xD1,0x00,0x22,0x10,0x21,0x40,0x48,0x01,0xF0, \ +0x7F,0xFC,0xE0,0x09,0x09,0xD3,0x3F,0x48,0x81,0x7C,0x05,0x29,0x05,0xD1,0x01, \ +0x21,0x81,0x74,0x00,0x21,0xC1,0x65,0xFF,0xF7,0x15,0xFA,0x20,0x0A,0x0C,0xD3, \ +0x39,0x48,0x81,0x7C,0x04,0x29,0x08,0xD1,0x01,0x21,0x81,0x74,0x00,0x21,0x41, \ +0x65,0xC0,0x7A,0x03,0x28,0x01,0xD1,0xFF,0xF7,0xAC,0xFB,0x04,0x20,0x20,0x40, \ +0x32,0x4D,0x0D,0xD0,0xF8,0x7A,0x01,0x28,0x0A,0xD1,0x04,0x20,0xFF,0xF7,0xEE, \ +0xFD,0x02,0x21,0xF9,0x72,0x00,0x21,0x29,0x73,0x01,0x21,0x03,0x20,0x00,0xF0, \ +0x14,0xFA,0x20,0x09,0x05,0xD3,0xF8,0x7A,0x01,0x28,0x02,0xD1,0x28,0x73,0x04, \ +0xF0,0x90,0xFE,0x10,0x25,0x25,0x40,0x08,0xD0,0xF8,0x7A,0x03,0x28,0x05,0xD1, \ +0x00,0x21,0xF9,0x72,0x07,0x21,0x04,0x20,0x00,0xF0,0x00,0xFA,0x00,0x2D,0x09, \ +0xD0,0xF8,0x7A,0x04,0x28,0x06,0xD1,0x00,0xE0,0x12,0xE0,0x00,0x21,0xF9,0x72, \ +0x07,0x21,0x00,0xF0,0xF4,0xF9,0xA0,0x09,0x0B,0xD3,0x1A,0x48,0x80,0x78,0x00, \ +0x28,0x07,0xD0,0x79,0x78,0x16,0x48,0x04,0x29,0x03,0xD0,0x01,0x21,0x01,0x74, \ +0xFF,0xF7,0x74,0xFD,0xFF,0x20,0x02,0x30,0x30,0x40,0x14,0xD0,0x01,0x20,0x10, \ +0x4D,0x00,0x24,0xE8,0x72,0xBC,0x72,0xAC,0x72,0x68,0x60,0xFD,0xF7,0x0F,0xFE, \ +0x0F,0x48,0x00,0x22,0x04,0x83,0x84,0x82,0x0E,0x48,0x81,0x68,0x01,0x31,0x81, \ +0x60,0x10,0x21,0x05,0x48,0x01,0xF0,0x0B,0xFC,0xF0,0xBD,0x10,0x10,0x00,0x00, \ +0x74,0x05,0x00,0x02,0x80,0x00,0x00,0x04,0x04,0x00,0x00,0x02,0xC8,0x06,0x00, \ +0x02,0xE8,0x06,0x00,0x02,0x04,0x05,0x00,0x02,0x64,0x05,0x00,0x02,0x14,0x01, \ +0x00,0x02,0x20,0x00,0x20,0x0F,0x9C,0x01,0x00,0x02,0x80,0xB5,0x15,0x4F,0x00, \ +0x20,0x38,0x72,0xB9,0x7C,0x02,0x20,0x01,0x29,0x1C,0xD0,0x04,0x29,0x19,0xD1, \ +0xB8,0x74,0x08,0x20,0xFF,0xF7,0x7A,0xFD,0xFD,0xF7,0xAE,0xFD,0x79,0x69,0x41, \ +0x1A,0x00,0x29,0x0F,0xDD,0x14,0x20,0x02,0xF0,0x51,0xF9,0xF9,0x6B,0x81,0x42, \ +0x02,0xD3,0x79,0x6D,0x81,0x42,0x09,0xD2,0x02,0xF0,0x0F,0xF9,0x01,0x1C,0xF8, \ +0x6B,0x02,0xF0,0xF3,0xF8,0x79,0x65,0x80,0xBD,0xB8,0x74,0x80,0xBD,0x79,0x6D, \ +0x08,0x1A,0x78,0x65,0x80,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x00,0xB5,0x09, \ +0x48,0x01,0x21,0x01,0x72,0x81,0x7B,0x01,0x29,0x0A,0xD1,0x01,0x7B,0x00,0x29, \ +0x07,0xD1,0x41,0x7A,0x00,0x29,0x04,0xD1,0x80,0x7C,0x05,0x28,0x01,0xD0,0xFF, \ +0xF7,0x56,0xF9,0x00,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5,0x28,0x4D, \ +0x28,0x48,0xEC,0x1D,0x69,0x34,0xA1,0x78,0x80,0x7A,0x27,0x4F,0x81,0x42,0x02, \ +0xDA,0xA0,0x78,0x38,0x70,0x00,0xE0,0x38,0x70,0xFF,0xF7,0xB4,0xFC,0x38,0x78, \ +0x03,0x28,0x03,0xD1,0x01,0x20,0xFD,0xF7,0x26,0xFF,0x02,0xE0,0x00,0x20,0xFD, \ +0xF7,0x22,0xFF,0x6A,0x7A,0x28,0x1C,0x00,0x21,0x01,0x2A,0x1C,0x4E,0x1B,0xD0, \ +0x02,0x2A,0x1E,0xD1,0x1B,0x4B,0xC4,0x22,0x1A,0x70,0x1B,0x4A,0xD3,0x88,0x3A, \ +0x78,0x52,0x00,0xB2,0x5A,0x9D,0x18,0x17,0x4A,0x52,0x88,0x95,0x42,0x07,0xDA, \ +0x3D,0x78,0x6D,0x00,0x75,0x5B,0x5B,0x19,0x13,0x4D,0xD2,0x1A,0x6A,0x80,0x01, \ +0xE0,0x11,0x4D,0x69,0x80,0x10,0x4D,0x69,0x70,0x04,0xE0,0x0E,0x4B,0xD4,0x22, \ +0x59,0x80,0x1A,0x70,0x59,0x70,0x0A,0x21,0xC2,0x1D,0x79,0x32,0x11,0x60,0x39, \ +0x78,0x0B,0x4B,0x49,0x00,0x71,0x5A,0x1B,0x88,0xC9,0x1A,0x07,0x4B,0x51,0x60, \ +0xC3,0x67,0x01,0x20,0x60,0x72,0xF0,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x45,0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0xE0,0x07,0x00,0x02, \ +0x18,0x00,0x00,0x02,0x40,0x01,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99, \ +0x42,0x01,0xD8,0x00,0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27, \ +0xBF,0x06,0x3D,0x69,0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08, \ +0x3F,0x3A,0x61,0x01,0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0xF0,0xB5,0x82,0xB0, \ +0x25,0x4C,0x24,0x4D,0xE7,0x1D,0x09,0x37,0x00,0x22,0x00,0x92,0x01,0x22,0x01, \ +0x21,0x28,0x1C,0x01,0xAB,0x01,0xF0,0x24,0xFC,0x01,0x98,0x40,0x08,0x03,0xD3, \ +0x00,0x20,0x1E,0x4E,0x01,0x90,0xF0,0x73,0x1D,0x4E,0xF0,0x7B,0x00,0x28,0x2D, \ +0xD1,0xF8,0x78,0x00,0x28,0x01,0xD0,0x00,0xF0,0x38,0xF8,0x20,0x7B,0x00,0x28, \ +0x02,0xD0,0x00,0xF0,0x91,0xF8,0x22,0xE0,0xA0,0x7B,0x00,0x28,0x02,0xD0,0x00, \ +0xF0,0x79,0xF8,0x1C,0xE0,0x30,0x7C,0x00,0x28,0x02,0xD0,0x00,0xF0,0x63,0xF8, \ +0x16,0xE0,0xF8,0x7A,0x05,0x28,0x13,0xD1,0x0F,0x48,0x00,0x78,0x40,0x09,0x07, \ +0xD3,0x0E,0x4E,0x30,0x68,0x00,0x7B,0xC0,0x09,0x02,0xD3,0x00,0xF0,0x2A,0xF8, \ +0x07,0xE0,0x0A,0x4E,0x30,0x68,0x01,0x7B,0x10,0x29,0x02,0xD1,0x00,0xF0,0x96, \ +0xF9,0x30,0x60,0x00,0xF0,0xBD,0xFC,0x00,0xF0,0x7B,0xF8,0xB9,0xE7,0x28,0x07, \ +0x00,0x02,0x64,0x05,0x00,0x02,0x04,0x05,0x00,0x02,0x63,0x01,0x00,0x02,0x04, \ +0x00,0x00,0x02,0x00,0xB5,0x06,0x48,0x00,0x21,0xC2,0x1D,0x69,0x32,0xD1,0x70, \ +0x01,0x21,0xC1,0x73,0x00,0x22,0x09,0x05,0x02,0x48,0x01,0xF0,0xD5,0xFA,0x00, \ +0xBD,0x04,0x05,0x00,0x02,0x08,0x07,0x00,0x02,0x90,0xB5,0x10,0x4C,0x01,0x20, \ +0x10,0x4F,0xE0,0x73,0x38,0x68,0x00,0xF0,0xC0,0xFE,0x38,0x68,0x40,0x68,0x01, \ +0x78,0x08,0x29,0x0A,0xD1,0xE1,0x1D,0x69,0x31,0x49,0x78,0x01,0x29,0x05,0xD1, \ +0xC1,0x1D,0x09,0x31,0x06,0x22,0x08,0x48,0xFD,0xF7,0x74,0xFE,0x39,0x68,0x80, \ +0x20,0x08,0x73,0x00,0x22,0x01,0x21,0x09,0x03,0x04,0x48,0x01,0xF0,0xAF,0xFA, \ +0x90,0xBD,0x04,0x05,0x00,0x02,0x04,0x00,0x00,0x02,0x0C,0x01,0x00,0x02,0x08, \ +0x07,0x00,0x02,0x00,0xB5,0x05,0x48,0x00,0x21,0x01,0x74,0x01,0x21,0xC1,0x73, \ +0x00,0x22,0x09,0x07,0x02,0x48,0x01,0xF0,0x9B,0xFA,0x00,0xBD,0x04,0x05,0x00, \ +0x02,0x08,0x07,0x00,0x02,0x00,0xB5,0x06,0x48,0x00,0x21,0xC2,0x1D,0x59,0x32, \ +0x91,0x73,0x01,0x21,0xC1,0x73,0x00,0x22,0x10,0x21,0x02,0x48,0x01,0xF0,0x89, \ +0xFA,0x00,0xBD,0x04,0x05,0x00,0x02,0x08,0x07,0x00,0x02,0x80,0xB5,0x06,0x49, \ +0x00,0x20,0x06,0x4F,0x08,0x73,0xF8,0x7C,0xFD,0xF7,0x40,0xFD,0xF8,0x8A,0x81, \ +0x02,0x04,0x20,0xFF,0xF7,0xF1,0xFB,0x80,0xBD,0x64,0x05,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x00,0xB5,0x18,0x48,0x01,0x78,0x00,0x29,0x13,0xD0,0x41,0x78,0x00, \ +0x29,0x10,0xD1,0x01,0x78,0x0D,0x29,0x20,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00, \ +0x9F,0x44,0x00,0x1C,0x1C,0x07,0x1C,0x0A,0x0D,0x13,0x1C,0x1C,0x1C,0x1C,0x10, \ +0x16,0x19,0x00,0x03,0xF0,0x95,0xF9,0x00,0xBD,0x03,0xF0,0xE4,0xF9,0x00,0xBD, \ +0x03,0xF0,0x5B,0xFA,0x00,0xBD,0x03,0xF0,0x9C,0xFF,0x00,0xBD,0x03,0xF0,0xC7, \ +0xFA,0x00,0xBD,0x03,0xF0,0x66,0xF9,0x00,0xBD,0x03,0xF0,0xFB,0xFB,0x00,0xBD, \ +0x00,0x78,0x02,0x21,0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00,0x00,0xAC,0x08,0x00, \ +0x02,0x04,0x4A,0x10,0x60,0x04,0x48,0x01,0x60,0x04,0x49,0x00,0x20,0x08,0x70, \ +0x48,0x70,0xF7,0x46,0x00,0x00,0x34,0x02,0x00,0x02,0x38,0x02,0x00,0x02,0xAC, \ +0x08,0x00,0x02,0xF0,0xB5,0x3B,0x48,0x87,0x68,0xFD,0xF7,0xF3,0xFB,0x02,0x02, \ +0x39,0x4D,0x12,0x0A,0x39,0x49,0x2A,0x60,0x4B,0x78,0x39,0x48,0x03,0x70,0xCB, \ +0x1D,0x39,0x33,0x1B,0x78,0x03,0x24,0x64,0x06,0x01,0x2B,0x17,0xD1,0x06,0x78, \ +0x0D,0x23,0x73,0x43,0x5B,0x18,0x1B,0x7B,0x1B,0x06,0x1A,0x43,0x22,0x60,0x02, \ +0x78,0x0D,0x23,0x5A,0x43,0x51,0x18,0x8B,0x7B,0x4A,0x7B,0x1B,0x02,0x1A,0x43, \ +0xCB,0x7B,0x09,0x7C,0x1B,0x04,0x1A,0x43,0x09,0x06,0x11,0x43,0x61,0x60,0x36, \ +0xE0,0x02,0x2B,0x34,0xD1,0x06,0x78,0x0D,0x23,0x73,0x43,0x5B,0x18,0x1B,0x7B, \ +0x1B,0x06,0x1A,0x43,0x22,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52,0x18,0x96, \ +0x7B,0x53,0x7B,0x36,0x02,0x33,0x43,0xD6,0x7B,0x12,0x7C,0x36,0x04,0x33,0x43, \ +0x12,0x06,0x1A,0x43,0x62,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52,0x18,0x96, \ +0x7C,0x53,0x7C,0x36,0x02,0x33,0x43,0xD6,0x7C,0x12,0x7D,0x36,0x04,0x33,0x43, \ +0x12,0x06,0x1A,0x43,0x62,0x61,0x02,0x78,0x0D,0x23,0x5A,0x43,0x51,0x18,0x8B, \ +0x7D,0x4A,0x7D,0x1B,0x02,0x1A,0x43,0xCB,0x7D,0x09,0x7E,0x1B,0x04,0x1A,0x43, \ +0x09,0x06,0x11,0x43,0xA1,0x61,0x00,0x78,0x29,0x68,0x0D,0x4A,0x80,0x07,0x01, \ +0x43,0x29,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54,0x01,0x30,0x18,0x28,0xFA,0xD3, \ +0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90,0x76,0x08,0x0E,0xD0,0x76,0xF0, \ +0xBD,0x00,0x00,0x84,0x05,0x00,0x02,0x5C,0x01,0x00,0x02,0x30,0x00,0x00,0x02, \ +0x60,0x01,0x00,0x02,0x90,0x08,0x00,0x02,0x80,0xB4,0x10,0x4A,0x11,0x68,0x01, \ +0x31,0x1E,0x29,0x00,0xD1,0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43,0xFB,0x18, \ +0x1F,0x7B,0x00,0x2F,0x11,0xD1,0x11,0x60,0x0C,0x49,0x03,0x22,0x19,0x60,0xD9, \ +0x1D,0x15,0x31,0x59,0x60,0x08,0x39,0x99,0x60,0x00,0x21,0x19,0x73,0x99,0x73, \ +0x9A,0x75,0x99,0x82,0x03,0x60,0x40,0x21,0x01,0x73,0x18,0x1C,0x80,0xBC,0xF7, \ +0x46,0x00,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x00, \ +0x00,0x80,0x80,0xB4,0x0E,0x4A,0x11,0x68,0x01,0x31,0x14,0x29,0x00,0xD1,0x00, \ +0x21,0x0C,0x4F,0x0C,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F,0x0D,0xD1, \ +0x11,0x60,0x0A,0x49,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60,0x01,0x21,0x99, \ +0x81,0x00,0x21,0x19,0x72,0x03,0x60,0x80,0x21,0x01,0x72,0x18,0x1C,0x80,0xBC, \ +0xF7,0x46,0x0C,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00,0x02,0x00, \ +0x00,0x00,0x80,0x01,0x1C,0x00,0x68,0x02,0x08,0x01,0xD3,0x08,0x1C,0xF7,0x46, \ +0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09,0x08,0x02,0xD3,0x40,0x21,0x01, \ +0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x68,0x00,0x2A,0xF9,0xD1,0x02,0x72, \ +0x08,0x1C,0xF7,0x46,0x00,0x00,0x44,0x02,0x00,0x02,0x0B,0x49,0x01,0x20,0x48, \ +0x63,0x00,0x20,0x08,0x64,0xC8,0x63,0x88,0x66,0x48,0x66,0x48,0x65,0xCA,0x1D, \ +0x59,0x32,0x88,0x65,0x10,0x73,0xC8,0x65,0x50,0x73,0xCA,0x1D,0x39,0x32,0x10, \ +0x82,0x50,0x82,0xC8,0x64,0x7C,0x31,0x48,0x62,0xF7,0x46,0x00,0x00,0xA8,0x01, \ +0x00,0x02,0x00,0xB5,0x07,0x21,0x49,0x06,0xC8,0x69,0x40,0x23,0x18,0x43,0xC8, \ +0x61,0x14,0x48,0x01,0x38,0xFD,0xD1,0xC8,0x69,0x20,0x23,0x18,0x43,0xC8,0x61, \ +0xC8,0x69,0x1B,0x01,0x18,0x43,0xC8,0x61,0x00,0x20,0xFF,0x22,0x91,0x32,0x01, \ +0x30,0x90,0x42,0xFC,0xD3,0xC8,0x69,0x0C,0x4B,0x18,0x40,0xC8,0x61,0x00,0x20, \ +0x7D,0x21,0x49,0x01,0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7,0xC2,0xFF,0xFD, \ +0xF7,0x8C,0xFA,0x00,0xF0,0x0E,0xF8,0x05,0x49,0x0D,0x20,0x00,0x06,0x01,0x81, \ +0xFF,0x21,0x41,0x31,0x81,0x80,0x00,0xBD,0xD0,0xDD,0x06,0x00,0xFF,0xFD,0x00, \ +0x00,0xFF,0x0F,0x00,0x00,0x90,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70,0x0D,0x48, \ +0x80,0x27,0x07,0x73,0x01,0x23,0x03,0x72,0x82,0x22,0x02,0x71,0x07,0x22,0x02, \ +0x70,0x0A,0x48,0x05,0x24,0x04,0x73,0x86,0x24,0x04,0x72,0x02,0x71,0x08,0x48, \ +0x24,0x22,0x02,0x71,0x07,0x72,0x03,0x73,0x06,0x48,0x01,0x71,0x01,0x73,0x90, \ +0xBC,0xF7,0x46,0x00,0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03, \ +0x00,0x0D,0xC0,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x92,0x48,0x08, \ +0x22,0x01,0x7B,0x91,0x4C,0x0A,0x40,0x00,0x25,0x01,0x27,0x00,0x2A,0x02,0xD0, \ +0x05,0x73,0x27,0x71,0xF0,0xBD,0x04,0x22,0x0A,0x40,0x8D,0x4E,0x66,0xD0,0x8D, \ +0x49,0x09,0x7B,0x0A,0x29,0x22,0xD1,0x8C,0x4A,0x00,0x21,0x15,0x7B,0x0B,0x1C, \ +0x01,0x31,0x08,0x29,0xF5,0x54,0xF9,0xD1,0x86,0x4E,0xF2,0x78,0xB1,0x78,0x73, \ +0x79,0x12,0x02,0x0A,0x43,0x31,0x79,0x1B,0x02,0xF5,0x79,0x19,0x43,0xB3,0x79, \ +0x2D,0x02,0x1D,0x43,0x33,0x78,0x76,0x78,0x1B,0x02,0x1E,0x43,0x80,0x4B,0x9E, \ +0x42,0x09,0xD1,0x80,0x48,0x43,0x6B,0x10,0x1C,0x2A,0x1C,0x03,0xF0,0x19,0xFD, \ +0x47,0xE0,0x05,0x73,0x27,0x71,0xF0,0xBD,0x7C,0x4B,0x9E,0x42,0x06,0xD1,0x79, \ +0x48,0x42,0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0,0xCA,0xFA,0x3A,0xE0,0x78,0x4B, \ +0x9E,0x42,0x06,0xD1,0x70,0x4E,0x28,0x1C,0xB2,0x78,0xF1,0x78,0x00,0xF0,0xA0, \ +0xFB,0x30,0xE0,0x05,0x2E,0x04,0xD1,0x10,0x1C,0x2A,0x1C,0x00,0xF0,0x3D,0xFB, \ +0x29,0xE0,0x09,0x2E,0x04,0xD1,0x10,0x1C,0x2A,0x1C,0x00,0xF0,0x5A,0xFB,0x22, \ +0xE0,0x6D,0x4B,0x9E,0x42,0x06,0xD1,0x68,0x48,0x43,0x6B,0x10,0x1C,0x2A,0x1C, \ +0x03,0xF0,0x02,0xFD,0x18,0xE0,0x69,0x4B,0x9E,0x42,0x06,0xD1,0x63,0x48,0x42, \ +0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0,0xDA,0xFB,0x0E,0xE0,0x65,0x4B,0xDB,0x69, \ +0x00,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0x07,0xE0,0x08,0xE0,0x2B,0x1C,0x0D, \ +0x1C,0x11,0x1C,0x30,0x1C,0x2A,0x1C,0x03,0xF0,0x38,0xFD,0x27,0x71,0xF0,0xBD, \ +0xCB,0x07,0xDB,0x0F,0xE0,0x22,0x00,0x2B,0x66,0xD0,0x51,0x4E,0x31,0x78,0x73, \ +0x78,0x09,0x02,0x19,0x43,0x05,0x29,0x07,0xD1,0x4D,0x48,0x81,0x78,0x50,0x48, \ +0x40,0x6B,0x00,0xF0,0x84,0xFB,0x27,0x71,0xF0,0xBD,0x4F,0x4B,0x99,0x42,0x08, \ +0xD1,0x80,0x21,0x01,0x73,0x47,0x48,0x81,0x78,0xC0,0x78,0x00,0xF0,0x26,0xFA, \ +0x27,0x71,0xF0,0xBD,0x46,0x4B,0x99,0x42,0x07,0xD1,0x20,0x21,0x01,0x73,0x41, \ +0x48,0x27,0x71,0x00,0x79,0x03,0xF0,0xE0,0xFC,0xF0,0xBD,0x43,0x4B,0x47,0x4E, \ +0x99,0x42,0x22,0xD1,0x20,0x21,0x01,0x73,0x3B,0x48,0x27,0x71,0x81,0x78,0x02, \ +0x29,0x03,0xD1,0xC1,0x78,0x08,0x29,0x00,0xD1,0x4F,0xE7,0x81,0x78,0x01,0x29, \ +0x0F,0xD1,0xF0,0x7B,0x02,0x28,0x02,0xD0,0x01,0xF0,0x24,0xFF,0xF0,0xBD,0x3D, \ +0x48,0x3D,0x49,0x05,0x70,0x0F,0x20,0x00,0x06,0x81,0x80,0x38,0x4B,0x85,0x81, \ +0x5F,0x62,0xF0,0xBD,0xC1,0x78,0x80,0x78,0x03,0xF0,0xFB,0xFB,0xF0,0xBD,0x32, \ +0x4B,0x99,0x42,0x0B,0xD1,0x29,0x4E,0xB1,0x78,0x01,0x29,0x01,0xD1,0x02,0x73, \ +0x03,0xE0,0x80,0x21,0x01,0x73,0x03,0xF0,0x2D,0xFC,0x27,0x71,0xF0,0xBD,0x09, \ +0x29,0x09,0xD1,0x20,0x21,0x01,0x73,0x25,0x48,0x27,0x71,0x40,0x6B,0x03,0x28, \ +0xCE,0xD1,0x77,0x73,0xF0,0xBD,0x25,0xE0,0x24,0x4B,0x99,0x42,0x02,0xD1,0x02, \ +0x73,0x27,0x71,0xF0,0xBD,0x27,0x4B,0x99,0x42,0x0E,0xD0,0x26,0x4B,0x99,0x42, \ +0x0B,0xD0,0x81,0x23,0x1B,0x02,0x99,0x42,0x07,0xD0,0x41,0x23,0x5B,0x02,0x99, \ +0x42,0x03,0xD0,0x01,0x23,0xDB,0x03,0x99,0x42,0x02,0xD1,0x02,0x73,0x27,0x71, \ +0xF0,0xBD,0xFF,0x23,0x0C,0x33,0x99,0x42,0x02,0xD0,0x1C,0x4B,0x99,0x42,0xA9, \ +0xD1,0x20,0x21,0x01,0x73,0x27,0x71,0xF0,0xBD,0x89,0x08,0xA3,0xD3,0x31,0x78, \ +0x73,0x78,0x09,0x02,0x19,0x43,0x0C,0x4B,0x99,0x42,0x03,0xD1,0xB0,0x78,0x00, \ +0xF0,0x3B,0xFA,0x00,0xE0,0x02,0x73,0x27,0x71,0xF0,0xBD,0x00,0x00,0x70,0x03, \ +0x00,0x0D,0xD0,0x03,0x00,0x0D,0xEC,0x01,0x00,0x02,0xF0,0x02,0x00,0x0D,0x30, \ +0x03,0x00,0x0D,0x01,0x02,0x00,0x00,0xA8,0x01,0x00,0x02,0x0E,0x40,0x00,0x00, \ +0x06,0x80,0x00,0x00,0x22,0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0x24,0x02,0x00, \ +0x02,0x08,0x02,0x00,0x02,0x63,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x08,0x80, \ +0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00,0x00,0xF0,0xB5,0x55,0x4D,0x28, \ +0x79,0x80,0x08,0x4D,0xD3,0x54,0x48,0x54,0x4A,0x00,0x79,0x54,0x4B,0x50,0x63, \ +0x0F,0x20,0x00,0x06,0x81,0x88,0x19,0x40,0x81,0x80,0x81,0x89,0x51,0x4B,0x19, \ +0x40,0x81,0x81,0x51,0x49,0x04,0x23,0x0B,0x71,0x00,0x26,0x0E,0x71,0x81,0x89, \ +0x4F,0x4B,0x19,0x43,0x81,0x81,0x81,0x88,0x4E,0x4B,0x19,0x43,0xD7,0x1F,0x75, \ +0x3F,0x81,0x80,0x78,0x6E,0x40,0x28,0x06,0xD2,0x78,0x6E,0x00,0x28,0x03,0xD0, \ +0x78,0x6E,0x02,0x30,0x10,0x63,0x09,0xE0,0x50,0x6B,0x40,0x28,0x04,0xD2,0x79, \ +0x6E,0x00,0x29,0x01,0xD1,0x10,0x63,0x01,0xE0,0x40,0x20,0x10,0x63,0xB8,0x6E, \ +0x11,0x6B,0x40,0x18,0x19,0x23,0x9B,0x01,0x98,0x42,0x06,0xD9,0xBE,0x66,0x7E, \ +0x66,0x01,0x20,0xD0,0x62,0xD0,0x6B,0x01,0x30,0xD0,0x63,0x3B,0x4C,0x20,0x68, \ +0x80,0x68,0xBB,0x6E,0xC0,0x18,0xCD,0x22,0x00,0xF0,0xA2,0xFA,0x31,0x48,0x2E, \ +0x71,0xC0,0x6A,0x01,0x28,0x02,0xD1,0x2F,0x4A,0xD6,0x62,0xF0,0xBD,0xB8,0x6E, \ +0x00,0x28,0x3A,0xD1,0x20,0x68,0x32,0x4B,0x81,0x8A,0xC2,0x7D,0x08,0x31,0x89, \ +0x18,0x79,0x66,0x79,0x6E,0x99,0x42,0x02,0xD8,0x79,0x6E,0x00,0x29,0x06,0xD1, \ +0xBE,0x66,0x25,0x4A,0x7E,0x66,0x10,0x6C,0x01,0x30,0x10,0x64,0xF0,0xBD,0x81, \ +0x7D,0x03,0x29,0x01,0xDD,0x03,0x21,0x81,0x75,0x20,0x68,0x41,0x68,0x08,0x78, \ +0x08,0x28,0x0C,0xD0,0x00,0x28,0x0A,0xD0,0x20,0x28,0x08,0xD0,0xB0,0x28,0x06, \ +0xD0,0xBE,0x66,0x19,0x4A,0x7E,0x66,0x50,0x6C,0x01,0x30,0x50,0x64,0xF0,0xBD, \ +0xC8,0x1D,0x03,0x30,0x06,0x22,0x1D,0x49,0xFD,0xF7,0xA4,0xFA,0x13,0x4A,0x01, \ +0x28,0x90,0x62,0x05,0xD1,0xBE,0x66,0x7E,0x66,0x90,0x6C,0x01,0x30,0x90,0x64, \ +0xF0,0xBD,0x78,0x6E,0x40,0x28,0x06,0xD9,0x78,0x6E,0x40,0x38,0x78,0x66,0xB8, \ +0x6E,0x40,0x30,0xB8,0x66,0xF0,0xBD,0xB8,0x6E,0x79,0x6E,0x40,0x18,0xB8,0x66, \ +0x7E,0x66,0xBE,0x66,0x20,0x68,0xFF,0xF7,0x2F,0xFD,0x05,0x4A,0x20,0x60,0x90, \ +0x6B,0x01,0x30,0x90,0x63,0xF0,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0xF0,0x02, \ +0x00,0x0D,0x24,0x02,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x60, \ +0x02,0x00,0x0D,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x08,0x00,0x00,0x02, \ +0x32,0x06,0x00,0x00,0x74,0x00,0x00,0x02,0x90,0xB5,0x18,0x4A,0x10,0x7A,0x40, \ +0x08,0x16,0xD3,0x17,0x4F,0x00,0x20,0x10,0x72,0xFB,0x6D,0xF9,0x1D,0x59,0x31, \ +0x01,0x2B,0x17,0xD1,0xCB,0x1D,0x15,0x33,0xF8,0x65,0x1B,0x6A,0x00,0x2B,0x01, \ +0xD1,0x10,0x23,0x13,0x72,0xBA,0x6D,0x01,0x24,0x00,0x2A,0x03,0xD0,0x00,0xF0, \ +0x1D,0xF8,0xFC,0x65,0x90,0xBD,0x0A,0x7B,0x01,0x2A,0x02,0xD1,0x08,0x73,0xFC, \ +0x65,0x90,0xBD,0xF8,0x65,0x90,0xBD,0x78,0x65,0xB8,0x65,0x08,0x73,0xF8,0x65, \ +0x06,0x4F,0x38,0x68,0x01,0x7A,0x10,0x29,0xED,0xD1,0xFF,0xF7,0x3B,0xFD,0x38, \ +0x60,0x90,0xBD,0x60,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x14,0x00,0x00,0x02, \ +0xB0,0xB4,0x0F,0x4A,0x90,0x6D,0x40,0x28,0x01,0xD3,0x40,0x20,0x00,0xE0,0x90, \ +0x6D,0x00,0x21,0x00,0x28,0x53,0x6D,0x0A,0xDD,0x0A,0x4C,0x0B,0x4F,0x25,0x6A, \ +0x00,0x2D,0x05,0xD1,0x1D,0x78,0x01,0x33,0x01,0x31,0x81,0x42,0x3D,0x72,0xF6, \ +0xDB,0x91,0x6D,0x09,0x1A,0x91,0x65,0x51,0x6D,0x08,0x18,0x50,0x65,0xB0,0xBC, \ +0xF7,0x46,0x00,0x00,0xA8,0x01,0x00,0x02,0x24,0x02,0x00,0x02,0x20,0x03,0x00, \ +0x0D,0xF0,0xB5,0x13,0x4F,0x00,0x26,0x78,0x65,0xB9,0x65,0xFC,0x1D,0x59,0x34, \ +0xFE,0x65,0x26,0x73,0xB8,0x6D,0x80,0x06,0x80,0x0E,0x01,0x25,0x00,0x28,0x00, \ +0xD1,0x25,0x73,0xFF,0xF7,0xC8,0xFF,0x0B,0x48,0x00,0x6A,0x00,0x28,0x02,0xD1, \ +0x0A,0x49,0x10,0x20,0x08,0x72,0xB8,0x6D,0x00,0x28,0x03,0xD0,0xFF,0xF7,0xBC, \ +0xFF,0xFD,0x65,0xF0,0xBD,0x20,0x7B,0x00,0x28,0x02,0xD0,0x26,0x73,0xFD,0x65, \ +0xF0,0xBD,0xFE,0x65,0xF0,0xBD,0xA8,0x01,0x00,0x02,0x24,0x02,0x00,0x02,0x60, \ +0x03,0x00,0x0D,0x90,0xB5,0x14,0x4F,0x78,0x7B,0x00,0x28,0x23,0xD0,0xFC,0x1D, \ +0x15,0x34,0x20,0x6A,0x01,0x28,0x04,0xD1,0x00,0x20,0x20,0x62,0xF8,0x7B,0x00, \ +0xF0,0xCF,0xF9,0x60,0x6A,0x01,0x28,0x02,0xD1,0xF8,0x7B,0x00,0xF0,0xC9,0xF9, \ +0xF8,0x7B,0x02,0x28,0x10,0xD0,0xFF,0xF7,0x98,0xFE,0x08,0x49,0x08,0x68,0x02, \ +0x7A,0x12,0x0A,0x07,0xD3,0x10,0x22,0x02,0x72,0x08,0x68,0x81,0x89,0x0C,0x30, \ +0x0C,0x31,0xFF,0xF7,0xAB,0xFF,0xFF,0xF7,0x4B,0xFF,0x90,0xBD,0x08,0x02,0x00, \ +0x02,0x14,0x00,0x00,0x02,0x90,0xB4,0x1E,0x4A,0x1E,0x4C,0x91,0x6B,0xD3,0x6B, \ +0x8B,0x42,0x19,0xD1,0x20,0x7B,0x40,0x23,0x03,0x40,0xE0,0x20,0x00,0x2B,0x11, \ +0xD1,0x49,0x07,0x02,0xD0,0x20,0x73,0x90,0xBC,0xF7,0x46,0xD1,0x1D,0x59,0x31, \ +0x8A,0x7B,0x01,0x2A,0x02,0xD1,0xD0,0x20,0x20,0x73,0xF5,0xE7,0x89,0x7B,0x00, \ +0x29,0xF2,0xD1,0x20,0x73,0xF0,0xE7,0x20,0x73,0xEE,0xE7,0x8B,0x42,0xEC,0xD2, \ +0xC9,0x1A,0x08,0x29,0x00,0xD9,0x08,0x21,0x01,0x28,0x01,0xD1,0x0C,0x4F,0x02, \ +0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4F,0x00,0x29,0x08,0xD0,0x0A,0x48,0xD3,0x6B, \ +0xFB,0x5C,0x03,0x73,0xD3,0x6B,0x01,0x33,0xD3,0x63,0x01,0x39,0xF7,0xD1,0x20, \ +0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD1,0xE7,0xA8,0x01,0x00,0x02,0x70,0x03, \ +0x00,0x0D,0xA8,0x01,0x00,0x02,0xBA,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0, \ +0xB5,0x04,0x1C,0x1D,0x48,0x0F,0x1C,0x86,0x78,0xC5,0x78,0x20,0x21,0x03,0x2A, \ +0x1B,0x48,0x01,0xD0,0x01,0x73,0xF0,0xBD,0x02,0x2E,0x05,0xD1,0x01,0x2D,0x01, \ +0xD3,0x09,0x2D,0x01,0xD9,0x01,0x73,0xF0,0xBD,0x00,0x2F,0x07,0xD1,0x00,0xF0, \ +0x8A,0xF9,0x08,0x2D,0x05,0xD1,0x13,0x49,0x01,0x20,0xC8,0x61,0x01,0xE0,0x00, \ +0x21,0x01,0x73,0x02,0x2E,0x0E,0xD1,0x00,0x2C,0x14,0xD1,0x08,0x2D,0x03,0xD1, \ +0x0E,0x48,0x0D,0x49,0xC8,0x60,0x0E,0xE0,0x0D,0x48,0x0B,0x49,0xC8,0x60,0x0D, \ +0x49,0x00,0x20,0x08,0x70,0x07,0xE0,0x01,0x2E,0xD7,0xD0,0x0B,0x48,0x06,0x49, \ +0xC8,0x60,0x00,0x20,0x48,0x61,0x08,0x61,0x09,0x48,0x00,0x21,0x47,0x67,0x81, \ +0x67,0xF0,0xBD,0xEC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x24,0x02,0x00,0x02, \ +0x00,0x60,0x00,0x01,0x60,0x08,0x00,0x02,0x63,0x01,0x00,0x02,0xB0,0x08,0x00, \ +0x02,0xA8,0x01,0x00,0x02,0x90,0xB5,0x16,0x49,0x16,0x4B,0x01,0x28,0x05,0xD1, \ +0x18,0x7B,0x60,0x31,0xC8,0x73,0x00,0xF0,0x4A,0xF9,0x90,0xBD,0x88,0x6F,0x4A, \ +0x6F,0x90,0x42,0xFA,0xD2,0x48,0x6F,0x8A,0x6F,0x80,0x1A,0x08,0x28,0x00,0xD9, \ +0x08,0x20,0x00,0x28,0x0A,0xD0,0x0D,0x4A,0x1C,0x7B,0xD7,0x68,0x3C,0x70,0x01, \ +0x37,0xD7,0x60,0x8F,0x6F,0x01,0x37,0x8F,0x67,0x01,0x38,0xF5,0xD1,0x88,0x6F, \ +0x49,0x6F,0x88,0x42,0x02,0xD1,0x00,0xF0,0x2C,0xF9,0x90,0xBD,0x05,0x49,0x00, \ +0x20,0x08,0x73,0x90,0xBD,0x00,0x00,0xA8,0x01,0x00,0x02,0x30,0x03,0x00,0x0D, \ +0x24,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x00,0xB5,0x7F,0x28,0x07,0xD8,0x00, \ +0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C,0x4A,0x51,0x6B,0x03,0x29,0x03,0xD1, \ +0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0x01,0x29,0x04,0xD1,0x00,0x28,0x08, \ +0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02,0x29,0x03,0xD1,0x00,0x28,0x01,0xD1, \ +0x01,0x20,0x50,0x63,0x00,0xF0,0x00,0xF9,0x00,0xBD,0x00,0x00,0xA8,0x01,0x00, \ +0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00,0x29,0x09,0xD1,0x00,0x2A,0x07,0xD1, \ +0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1,0x14,0x49,0x4A,0x6B,0x01,0x2A,0x03, \ +0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x12,0x4B,0x02,0x2A,0x09,0xD1, \ +0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0x02,0x23,0x3B, \ +0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08,0xD1,0x00,0x28,0x06,0xD1,0x02,0x22, \ +0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23,0x3B,0x40,0x13,0x73,0x08,0x64,0x00, \ +0x20,0x40,0x31,0x08,0x82,0x48,0x82,0x00,0xF0,0xCA,0xF8,0x80,0xBD,0x00,0x00, \ +0xA8,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x12, \ +0x4D,0x12,0x4C,0x01,0x29,0x02,0xD1,0x12,0x23,0xA3,0x63,0x03,0xE0,0x20,0x23, \ +0x02,0x29,0x0A,0xD1,0xA3,0x63,0xE3,0x1D,0x59,0x33,0x00,0x27,0x9F,0x73,0xA6, \ +0x6B,0xB0,0x42,0x04,0xD8,0x9F,0x73,0xA0,0x63,0x07,0xE0,0x2B,0x73,0xF0,0xBD, \ +0x70,0x07,0x01,0xD0,0x9F,0x73,0x01,0xE0,0x01,0x20,0x98,0x73,0x80,0x20,0xE7, \ +0x63,0x28,0x73,0x08,0x1C,0x11,0x1C,0xFF,0xF7,0xB3,0xFE,0xF0,0xBD,0x70,0x03, \ +0x00,0x0D,0xA8,0x01,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x0C,0x4A,0x01,0x28,0x06, \ +0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE,0x23,0x18,0x40,0x38,0x73,0x08,0xE0, \ +0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43,0x10,0x72,0x38,0x7B,0x01,0x23,0x18, \ +0x43,0x38,0x73,0x04,0x49,0x20,0x20,0x08,0x73,0x80,0xBC,0xF7,0x46,0xE0,0x03, \ +0x00,0x0D,0xC0,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0x0D,0x23,0x1B,0x06,0x99, \ +0x83,0x05,0x49,0x0A,0x70,0x05,0x4A,0x10,0x60,0x02,0x20,0x08,0x72,0x08,0x7A, \ +0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00,0x20,0x00,0x00,0x0D,0x40,0x00,0x00, \ +0x0D,0xB0,0xB5,0x11,0x4F,0x03,0x2A,0xBB,0x78,0xFC,0x78,0x10,0x4F,0x02,0xD0, \ +0x20,0x20,0x38,0x73,0xB0,0xBD,0x0E,0x4A,0x00,0x25,0xD5,0x67,0xD5,0x1D,0x75, \ +0x35,0x69,0x60,0x01,0x2B,0x0B,0xD1,0x80,0x20,0x38,0x73,0xD0,0x1D,0x59,0x30, \ +0xC0,0x7B,0x09,0x49,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xB0, \ +0xBD,0x02,0x1C,0x18,0x1C,0x21,0x1C,0x02,0xF0,0xFF,0xFF,0xB0,0xBD,0xEC,0x01, \ +0x00,0x02,0x70,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0x80, \ +0xB5,0x02,0x1C,0x00,0x20,0x02,0x2A,0x14,0x49,0x08,0xD0,0x08,0x72,0x14,0x49, \ +0x20,0x22,0x0A,0x70,0x08,0x70,0x13,0x4A,0x86,0x21,0x11,0x72,0x06,0xE0,0x0A, \ +0x7A,0x52,0x09,0x03,0xD2,0x10,0x22,0x0A,0x72,0x0F,0x49,0x48,0x62,0x0F,0x49, \ +0x10,0x4F,0x48,0x65,0xCA,0x1D,0x59,0x32,0x88,0x65,0x10,0x73,0xC8,0x65,0x08, \ +0x66,0x38,0x68,0x01,0x7A,0x10,0x29,0x02,0xD1,0xFF,0xF7,0xDC,0xFA,0x38,0x60, \ +0x38,0x68,0x01,0x7A,0x40,0x29,0x02,0xD1,0xFF,0xF7,0xD5,0xFA,0x38,0x60,0x80, \ +0xBD,0x60,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0x24,0x02, \ +0x00,0x02,0xA8,0x01,0x00,0x02,0x14,0x00,0x00,0x02,0x05,0x48,0x06,0x49,0x02, \ +0x78,0x0A,0x67,0x0A,0x6F,0x03,0x78,0x9A,0x42,0xFB,0xD0,0x03,0x49,0x60,0x20, \ +0x08,0x73,0xF7,0x46,0xF0,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x70,0x03,0x00, \ +0x0D,0x80,0xB5,0x86,0xB0,0x42,0x68,0x11,0x78,0x08,0x29,0x01,0xD0,0x06,0xB0, \ +0x80,0xBD,0x91,0x7F,0xD3,0x7F,0x09,0x02,0x19,0x43,0x15,0x4B,0x09,0x04,0x1F, \ +0x88,0x09,0x0C,0xB9,0x42,0x02,0xD0,0x5B,0x88,0x8B,0x42,0x06,0xD1,0xD1,0x1D, \ +0x11,0x31,0x06,0x22,0x10,0x48,0xFC,0xF7,0xA9,0xFF,0x19,0xE0,0x03,0x23,0x5B, \ +0x02,0x99,0x42,0x06,0xDD,0xD1,0x1D,0x11,0x31,0x06,0x22,0x0C,0x48,0xFC,0xF7, \ +0x9E,0xFF,0x0E,0xE0,0xD7,0x1D,0x01,0x37,0x47,0x60,0x18,0x31,0x81,0x82,0x69, \ +0x46,0x10,0x1C,0x18,0x22,0xFC,0xF7,0x93,0xFF,0x68,0x46,0x18,0x22,0x39,0x1C, \ +0xFC,0xF7,0x8E,0xFF,0xCE,0xE7,0x00,0x00,0x80,0x02,0x00,0x02,0x7A,0x02,0x00, \ +0x02,0x74,0x02,0x00,0x02,0xB0,0xB5,0x82,0xB0,0x68,0x46,0x08,0x22,0x3D,0x49, \ +0x01,0xF0,0x5B,0xF9,0x00,0xF0,0x85,0xF8,0x3C,0x4F,0x06,0x22,0xFF,0x21,0x38, \ +0x1C,0x01,0x31,0x00,0xF0,0xE5,0xF8,0xFF,0x21,0x11,0x31,0xB8,0x1D,0x0E,0x22, \ +0x05,0x1C,0x00,0xF0,0xDE,0xF8,0x36,0x4C,0x12,0x22,0x03,0x21,0x20,0x1C,0x00, \ +0xF0,0xD8,0xF8,0xF8,0x1D,0x0D,0x30,0x0E,0x22,0xFF,0x21,0x21,0x31,0x00,0xF0, \ +0xD1,0xF8,0xF8,0x1D,0x1F,0x30,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0xF0,0xCA, \ +0xF8,0xF8,0x1D,0x21,0x30,0x07,0x22,0xFF,0x21,0xF1,0x31,0x00,0xF0,0xC3,0xF8, \ +0x00,0xF0,0x72,0xF8,0xE0,0x1D,0x01,0x30,0x04,0x22,0xF9,0x1D,0x1B,0x31,0xFC, \ +0xF7,0x4B,0xFF,0x38,0x78,0x40,0x08,0x0B,0xD2,0x68,0x46,0x06,0x22,0x39,0x1C, \ +0xFC,0xF7,0x33,0xFF,0x00,0x28,0x04,0xD0,0x06,0x22,0x38,0x1C,0x1F,0x49,0xFC, \ +0xF7,0x3C,0xFF,0x28,0x1C,0x0E,0x22,0x1D,0x49,0xFC,0xF7,0x37,0xFF,0xF8,0x1D, \ +0x19,0x30,0x81,0x79,0x10,0x29,0x0B,0xD0,0x20,0x29,0x09,0xD0,0x31,0x29,0x07, \ +0xD0,0x30,0x29,0x05,0xD0,0x32,0x29,0x03,0xD0,0x40,0x29,0x01,0xD0,0x41,0x29, \ +0x01,0xD1,0x14,0x4A,0xD1,0x75,0x02,0x7A,0x14,0x49,0x55,0x2A,0x14,0xD1,0x42, \ +0x7A,0x53,0x2A,0x11,0xD1,0x82,0x7A,0x42,0x2A,0x0E,0xD1,0xC2,0x7A,0x53,0x2A, \ +0x0B,0xD1,0x02,0x7B,0x55,0x2A,0x08,0xD1,0x42,0x7B,0x53,0x2A,0x05,0xD1,0x80, \ +0x7B,0x50,0x28,0x02,0xD1,0x01,0x20,0x08,0x60,0x01,0xE0,0x00,0x20,0x08,0x60, \ +0x02,0xB0,0xB0,0xBD,0x00,0x00,0xC8,0x51,0x00,0x00,0x60,0x08,0x00,0x02,0xA8, \ +0x01,0x00,0x02,0x74,0x00,0x00,0x02,0x4C,0x01,0x00,0x02,0x18,0x00,0x00,0x02, \ +0x74,0x01,0x00,0x02,0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00,0x21,0x01, \ +0x60,0x01,0x21,0x41,0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69,0x01,0x23, \ +0x5B,0x03,0x1A,0x43,0xCA,0x61,0x02,0x49,0x01,0x63,0x01,0x69,0x80,0x68,0xF7, \ +0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x02,0x4B, \ +0x19,0x40,0xC1,0x61,0xF7,0x46,0x00,0x00,0xFF,0xDF,0x00,0x00,0xF0,0xB4,0x00, \ +0x27,0xF3,0x24,0x24,0x05,0x00,0x28,0x08,0xD9,0x13,0x4D,0xEB,0x5D,0xE3,0x60, \ +0x26,0x69,0xB3,0x08,0xFC,0xD3,0x01,0x37,0x87,0x42,0xF7,0xD3,0xFF,0x23,0xE3, \ +0x60,0xA0,0x68,0x27,0x1C,0x38,0x69,0x40,0x08,0xFC,0xD3,0xB8,0x68,0x00,0x20, \ +0x00,0x2A,0x0D,0xD9,0x1C,0x1C,0x3D,0x69,0xAB,0x08,0xFC,0xD3,0xFC,0x60,0x3B, \ +0x69,0x5B,0x08,0xFC,0xD3,0xBB,0x68,0x01,0x30,0x0B,0x70,0x01,0x31,0x90,0x42, \ +0xF2,0xD3,0x12,0x20,0x01,0x38,0xFD,0xD1,0xF0,0xBC,0xF7,0x46,0x00,0x00,0x98, \ +0x02,0x00,0x02,0xF3,0x20,0x00,0x05,0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69, \ +0x89,0x08,0xFC,0xD3,0xFF,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81, \ +0x68,0x01,0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46, \ +0x90,0xB5,0x04,0x1C,0x48,0x09,0x08,0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02, \ +0x43,0x08,0x48,0x02,0x70,0x41,0x70,0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2, \ +0x12,0x20,0x01,0x38,0xFD,0xD1,0x02,0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xA4, \ +0xFF,0x90,0xBD,0x00,0x00,0x98,0x02,0x00,0x02,0xF0,0xB4,0x13,0x4A,0x00,0x27, \ +0xD7,0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48,0x07, \ +0x70,0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05,0xD2, \ +0x5B,0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70,0x01, \ +0x31,0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01,0x30, \ +0x20,0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46,0xA8, \ +0x02,0x00,0x02,0x3C,0x09,0x00,0x02,0x3C,0x0A,0x00,0x02,0x90,0xB5,0x0A,0x4F, \ +0x0A,0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7,0x00, \ +0xFA,0x00,0xF0,0xDC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0x76,0xFA, \ +0x00,0x20,0x38,0x60,0x00,0xF0,0xE3,0xFB,0x90,0xBD,0x14,0x03,0x00,0x02,0xF0, \ +0xF0,0xF0,0xF0,0x84,0x03,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38,0x60, \ +0xFC,0xF7,0xE8,0xF9,0x00,0xF0,0xC4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD,0x00, \ +0x00,0xF0,0xF0,0xF0,0xF0,0x14,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F,0x00, \ +0x2D,0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D, \ +0xE9,0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20, \ +0x83,0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04, \ +0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9, \ +0x84,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00,0x9F, \ +0xE5,0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9, \ +0x4C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83, \ +0xE5,0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00, \ +0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x20, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2, \ +0x01,0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x14,0x03,0x00, \ +0x02,0x04,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00,0xA0, \ +0x00,0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00, \ +0x50,0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F, \ +0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10, \ +0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0, \ +0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D, \ +0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00,0x00, \ +0x91,0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x70,0x01,0x00, \ +0xEA,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x6D,0x01,0x00,0xEA,0x00,0xA0, \ +0x00,0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02,0x10, \ +0x01,0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5,0x00, \ +0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40, \ +0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08, \ +0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20,0xA0, \ +0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0, \ +0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x40, \ +0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80, \ +0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00, \ +0x81,0xE5,0x2F,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x2B,0x01,0x00,0xEA,0x14,0x03,0x00,0x02,0x04,0x03,0x00,0x02, \ +0x00,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x8C,0x03,0x00,0x02,0x24,0x03,0x00, \ +0x02,0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C,0x60, \ +0xBC,0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29,0xFB, \ +0xD3,0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00,0x22, \ +0x00,0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A,0x02, \ +0x92,0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00,0xF0, \ +0x18,0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x88,0x03,0x00,0x02,0xBC, \ +0x0A,0x00,0x02,0x09,0x45,0x00,0x00,0x3C,0x0B,0x00,0x02,0x53,0x79,0x73,0x74, \ +0x65,0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A,0xAE, \ +0x4C,0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64,0x00, \ +0x21,0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA,0x06, \ +0xD2,0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61,0x79, \ +0x60,0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01,0x20, \ +0x90,0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7,0x0C, \ +0xC7,0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0xFB,0xFA,0xC0,0x20, \ +0x00,0xF0,0x32,0xFB,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D,0x79, \ +0x31,0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3,0x1D, \ +0x79,0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60,0x8F, \ +0x60,0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29,0x68, \ +0x01,0x31,0x29,0x60,0x00,0xF0,0x12,0xFB,0x00,0x2C,0x07,0xD0,0x38,0x1C,0x00, \ +0xF0,0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0,0x20, \ +0x00,0xF0,0x05,0xFB,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x00,0xFB,0x00, \ +0x20,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x65,0x46,0x00,0x00,0xA5,0x46,0x00,0x00, \ +0x44,0x52,0x48,0x54,0x0C,0x03,0x00,0x02,0x10,0x03,0x00,0x02,0x24,0x03,0x00, \ +0x02,0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12,0xC0, \ +0x12,0xC0,0xC0,0x20,0x00,0xF0,0xE5,0xFA,0x0C,0x49,0x0C,0x4B,0x39,0x60,0x19, \ +0x68,0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19,0x68, \ +0xB9,0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68,0x01, \ +0x32,0x0A,0x60,0x00,0xF0,0xCF,0xFA,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E,0x44, \ +0x56,0x44,0xB8,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0xF0,0xB5,0x85,0xB0,0x07, \ +0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0x00,0xF0,0xBE,0xFA,0xA9,0x08,0x03,0xD3, \ +0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60,0x3C, \ +0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35,0xD1, \ +0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C,0x1E, \ +0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3,0x6F, \ +0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43,0xBA, \ +0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0x00,0xF0,0x8D,0xFA,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30,0x00, \ +0xF0,0x34,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x90,0xFA, \ +0x00,0x28,0x01,0xD0,0x00,0xF0,0xF6,0xFA,0x30,0x1C,0x9B,0xE0,0x00,0xF0,0x78, \ +0xFA,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02,0x93, \ +0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0x00,0xF0,0x6B,0xFA,0xC0,0x20,0x00, \ +0xF0,0x68,0xFA,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03,0x9C, \ +0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C,0x46, \ +0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06,0xD3, \ +0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0,0xA1, \ +0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22,0x6F, \ +0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08,0x03, \ +0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02,0xD1, \ +0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91,0x63, \ +0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79,0x61, \ +0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C,0x26, \ +0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14,0x1C, \ +0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0x00,0xF0,0x0F, \ +0xFA,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00,0x28, \ +0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xB1,0xFA,0x00,0xE0,0xEC,0x64,0xC0, \ +0x20,0x00,0xF0,0xFE,0xF9,0x31,0x68,0x01,0x31,0x31,0x60,0x00,0xF0,0xF9,0xF9, \ +0x28,0x1C,0x00,0xF0,0x06,0xFA,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0x00,0xF0,0xF1, \ +0xF9,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0x00,0xF0,0xEB,0xF9,0x0C,0x48, \ +0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68,0x00, \ +0x28,0x01,0xD1,0x00,0xF0,0x59,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79,0x69, \ +0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7,0x24, \ +0x03,0x00,0x02,0x04,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x14,0x03,0x00,0x02, \ +0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0x00,0xF0,0xC5,0xF9,0x02, \ +0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02,0xE0, \ +0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60,0x02, \ +0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07,0x24, \ +0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67,0xE5, \ +0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39,0x69, \ +0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69,0x49, \ +0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64,0x67, \ +0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63,0x0E, \ +0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0x00,0xF0,0x82,0xF9,0x01,0x23, \ +0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x97,0xFA,0x20,0x1C,0x00, \ +0xF0,0xCE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0x00,0xF0,0x73,0xF9,0x20,0x1C, \ +0xF9,0xE7,0x00,0x00,0x04,0x03,0x00,0x02,0xF9,0x48,0x00,0x00,0x24,0x03,0x00, \ +0x02,0x00,0xB5,0xFF,0xF7,0xE7,0xFB,0xFF,0xF7,0xC1,0xFD,0x00,0xF0,0x9F,0xFB, \ +0x00,0xF0,0xA5,0xFB,0x00,0xF0,0x05,0xFA,0x00,0xF0,0xA9,0xFB,0x00,0xF0,0xAF, \ +0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1, \ +0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC,0xFF,0xFF, \ +0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5,0x00,0x00, \ +0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82,0xE2,0x04, \ +0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30,0x82,0xE5, \ +0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0,0x80,0xFD, \ +0x08,0xFF,0xDF,0xFD,0xE8,0x08,0x03,0x00,0x02,0x04,0x03,0x00,0x02,0x8C,0x03, \ +0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02,0xB0,0xF0, \ +0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0x00,0xF0,0x19,0xF9,0x4A,0x4D, \ +0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A,0x61,0x29, \ +0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68,0x91,0x42, \ +0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0x00,0xF0,0x01, \ +0xF9,0xC0,0x20,0x00,0xF0,0xFE,0xF8,0x01,0x99,0x00,0x29,0x5C,0xD0,0x01,0x9C, \ +0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A,0x61,0x21, \ +0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69,0x01,0x91, \ +0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04,0xE0,0x27, \ +0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61,0x24,0x61, \ +0x00,0xE0,0xA6,0x61,0x00,0xF0,0xD8,0xF8,0x00,0x2D,0x02,0xD0,0x38,0x1C,0x00, \ +0xF0,0xEE,0xFB,0xC0,0x20,0x00,0xF0,0xD0,0xF8,0xA2,0x69,0x69,0x46,0x8A,0x42, \ +0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39,0x20, \ +0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42,0x07,0xD3, \ +0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89,0x00,0x89, \ +0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69,0x62,0x61, \ +0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1,0x61,0x64, \ +0x61,0x0C,0x60,0x00,0xF0,0xA4,0xF8,0xC0,0x20,0x00,0xF0,0xA1,0xF8,0x01,0x99, \ +0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E,0x4C,0x03, \ +0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A,0x11,0x68, \ +0x01,0x31,0x11,0x60,0x00,0xF0,0x8D,0xF8,0x20,0x68,0x00,0xF0,0xE0,0xF9,0x6C, \ +0xE7,0x00,0xF0,0x87,0xF8,0x69,0xE7,0x4D,0x49,0x54,0x41,0x9C,0x03,0x00,0x02, \ +0x98,0x03,0x00,0x02,0x94,0x03,0x00,0x02,0xA0,0x03,0x00,0x02,0x04,0x03,0x00, \ +0x02,0x24,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28,0x0C,0xD1, \ +0xC0,0x20,0x00,0xF0,0x70,0xF8,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0x00, \ +0xF0,0x6A,0xF8,0x38,0x1C,0x00,0xF0,0x77,0xF8,0x90,0xBD,0xC0,0x20,0x00,0xF0, \ +0x63,0xF8,0xBC,0x6E,0x00,0xF0,0x60,0xF8,0x00,0x2C,0xF6,0xD0,0x38,0x1C,0x00, \ +0xF0,0x75,0xFB,0x90,0xBD,0x24,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F,0x39,0x68, \ +0x88,0x6C,0x49,0x6C,0x00,0xF0,0x68,0xFB,0xC0,0x20,0x00,0xF0,0x4E,0xF8,0x3A, \ +0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68,0x01,0x32, \ +0x0A,0x60,0x00,0xF0,0x43,0xF8,0x38,0x68,0x00,0xF0,0x96,0xF9,0x80,0xBD,0x00, \ +0x00,0x04,0x03,0x00,0x02,0x24,0x03,0x00,0x02,0x00,0xA3,0x18,0x47,0x10,0x20, \ +0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0,0xE3,0x00, \ +0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3, \ +0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14,0x30,0x82, \ +0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5,0x24,0x30, \ +0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90,0xE5,0x30, \ +0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30,0x82,0xE5, \ +0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08,0x20,0x80, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1,0x3F,0x20, \ +0xA0,0xE3,0x02,0x10,0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21,0xE1,0x02, \ +0x00,0xC3,0xE1,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0,0x20, \ +0xFF,0xF7,0xEA,0xFF,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xBA, \ +0x6B,0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02,0x2A, \ +0x37,0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B,0x00, \ +0x2A,0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9,0x6A, \ +0x1D,0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62,0x57, \ +0x62,0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17,0x4A, \ +0x3B,0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A,0x02, \ +0xD1,0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19,0x60, \ +0xD3,0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49,0x12, \ +0x6C,0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0xA2,0xFF,0x0B,0x48, \ +0x00,0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28,0x00, \ +0xD1,0x01,0x24,0x20,0x1C,0xF0,0xBD,0x24,0x03,0x00,0x02,0x08,0x03,0x00,0x02, \ +0x3C,0x0A,0x00,0x02,0x18,0x03,0x00,0x02,0x20,0x03,0x00,0x02,0x1C,0x03,0x00, \ +0x02,0x04,0x03,0x00,0x02,0x14,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00,0x00, \ +0xA0,0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9,0xD3, \ +0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93,0xE5, \ +0x28,0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40,0xA0, \ +0xE3,0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00,0x40, \ +0x82,0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x02,0xFF,0xFF,0xEA,0x04, \ +0x03,0x00,0x02,0x8C,0x03,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60, \ +0xF7,0x46,0x00,0x00,0xB8,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20,0xFF, \ +0xF7,0x4D,0xFF,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA,0x42, \ +0x04,0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69,0x51, \ +0x61,0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04,0xD1, \ +0x3A,0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7,0x30, \ +0xFF,0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF,0xF7, \ +0x28,0xFF,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68,0x1C, \ +0x4B,0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01,0xD1, \ +0x25,0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F,0x7A, \ +0x6F,0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10,0xD1, \ +0xFA,0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF, \ +0xF7,0x02,0xFF,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF,0xF7, \ +0x75,0xFF,0x01,0xE0,0xFF,0xF7,0xF8,0xFE,0x78,0x6E,0x00,0x28,0x04,0xD0,0xF8, \ +0x1D,0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF,0xF7, \ +0xEC,0xFE,0xFF,0xF7,0xEA,0xFE,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0x24, \ +0x03,0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0xDF,0xFE,0x39,0x68, \ +0x00,0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9,0x1F, \ +0x21,0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10,0x4A, \ +0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A,0x89, \ +0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A,0x61, \ +0x0A,0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61,0x03, \ +0xE0,0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0xB2,0xFE,0x00,0x20, \ +0x80,0xBD,0x9C,0x03,0x00,0x02,0x98,0x03,0x00,0x02,0x94,0x03,0x00,0x02,0xF0, \ +0xB5,0x05,0x1C,0xC0,0x20,0xFF,0xF7,0xA5,0xFE,0x67,0x49,0x67,0x4C,0x0A,0x68, \ +0x67,0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26,0xAE, \ +0x63,0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29,0x6A, \ +0x6B,0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1,0x2B, \ +0x6A,0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3,0x43, \ +0x0B,0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68,0x9B, \ +0x00,0xD2,0x58,0x0A,0x60,0xFF,0xF7,0x78,0xFE,0x55,0x49,0x38,0x68,0x09,0x68, \ +0x88,0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26,0x4E, \ +0x4B,0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33,0x40, \ +0x13,0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43,0x48, \ +0x4E,0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14,0xE0, \ +0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32,0x0C, \ +0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10,0x32, \ +0x04,0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B,0x1A, \ +0x60,0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32,0x68, \ +0x36,0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A,0x42, \ +0xD0,0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x2B,0xFE,0xC0,0x20, \ +0xFF,0xF7,0x28,0xFE,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60,0x2A, \ +0x49,0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E,0xE0, \ +0x28,0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7,0x13, \ +0xFE,0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0, \ +0x22,0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E,0x03, \ +0xD0,0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59,0x5C, \ +0x18,0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68,0xB3, \ +0x42,0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19,0x60, \ +0xFF,0xF7,0xEC,0xFD,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0,0x20, \ +0x68,0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0xE1,0xFD,0x0A,0x49,0x38,0x68, \ +0x09,0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7,0x51, \ +0xFE,0xF0,0xBD,0x24,0x03,0x00,0x02,0x14,0x03,0x00,0x02,0x04,0x03,0x00,0x02, \ +0x3C,0x0A,0x00,0x02,0x1C,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x20,0x03,0x00, \ +0x02,0x18,0x03,0x00,0x02,0x3C,0x09,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60, \ +0x41,0x60,0xF7,0x46,0x00,0x00,0xC0,0x03,0x00,0x02,0x02,0x48,0x00,0x21,0x01, \ +0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xC8,0x03,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xD0,0x03,0x00,0x02,0x02,0x48,0x00, \ +0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xD8,0x03,0x00,0x02,0x4B,0x08, \ +0x02,0x1C,0x02,0xD1,0x00,0xF0,0xBD,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00, \ +0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42, \ +0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46,0x00, \ +0x00,0xB0,0xB5,0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75,0x39, \ +0x9C,0x00,0x0C,0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36,0x23,0x00, \ +0x29,0x01,0x66,0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0,0x51,0x1E, \ +0x41,0x66,0x00,0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x5C,0x04,0x00, \ +0x02,0x80,0xB5,0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66,0x07,0x4A, \ +0x00,0x21,0x03,0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01,0x31,0x58, \ +0x43,0x05,0x4B,0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x5C,0x04,0x00,0x02, \ +0xE0,0x03,0x00,0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0xCB,0x17,0x59, \ +0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C,0xB4,0x4B,0x08,0x02,0x1C, \ +0x02,0xD1,0x00,0xF0,0x64,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00,0x23,0x91, \ +0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42,0xF9,0xD3, \ +0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0x0C,0xBC,0x5A,0x40,0x50, \ +0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0x43,0x1A,0x93,0x42,0x30,0xD3, \ +0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B,0x78,0x03,0x70,0x40, \ +0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1,0x10,0x3A,0x05,0xD3, \ +0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0,0xBC,0x0C,0x32,0x0F, \ +0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0,0x08,0xC9,0x03,0x70, \ +0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3,0x70,0x00,0x1D,0x12, \ +0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70,0x49,0x1C,0x40,0x1C, \ +0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B,0x43,0x13,0x43,0x9B, \ +0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1,0xF7,0x46,0x52,0x1E, \ +0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x00,0x47,0x08,0x47,0x10, \ +0x47,0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00,0x00, \ +0x2C,0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00,0x8F, \ +0xE2,0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0xE8,0xEC, \ +0xFF,0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62,0x79, \ +0x20,0x7A,0x65,0x72,0x6F,0x00,0x00,0xC4,0x04,0x00,0x02,0x78,0x47,0x00,0x00, \ +0x01,0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04,0x00, \ +0xE2,0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78, \ +0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50,0xE3, \ +0x01,0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00, \ +0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0, \ +0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00, \ +0x00,0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3, \ +0x00,0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77,0x6E, \ +0x20,0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42,0x72, \ +0x61,0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A,0x65, \ +0x72,0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65, \ +0x64,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00, \ +0x00,0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20, \ +0x53,0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63,0x68, \ +0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61,0x74, \ +0x61,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41,0x64, \ +0x64,0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65, \ +0x64,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00,0x02, \ +0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73,0x74, \ +0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00,0x60, \ +0x4E,0x00,0x00,0x78,0x4E,0x00,0x00,0x94,0x4E,0x00,0x00,0xB4,0x4E,0x00,0x00, \ +0xC8,0x4E,0x00,0x00,0xD8,0x4E,0x00,0x00,0xF0,0x4E,0x00,0x00,0x08,0x4F,0x00, \ +0x00,0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0x80,0xEC,0xFF,0xEA,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5,0x01, \ +0x20,0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00,0xEB, \ +0x44,0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10,0x81, \ +0xE0,0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04,0x30, \ +0x90,0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x00, \ +0x20,0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF,0x3A, \ +0x0E,0xF0,0xA0,0xE1,0xD0,0x51,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x05,0x00, \ +0x00,0xC8,0x06,0x00,0x00,0x04,0x05,0x00,0x02,0x78,0x47,0x00,0x00,0xD3,0x00, \ +0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04, \ +0x10,0x80,0xE5,0x0C,0x10,0x80,0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5, \ +0x00,0x00,0xA0,0xE3,0x10,0xFF,0x2F,0xE1,0x00,0x00,0xA0,0xE1,0x00,0x00,0xA0, \ +0xE1,0x16,0x9B,0xFF,0x02,0x04,0xFF,0x04,0x48,0xFF,0x06,0x48,0xFF,0x08,0x80, \ +0xFF,0x0A,0x03,0xFF,0x0C,0x04,0xFF,0x0E,0x00,0xFF,0x10,0x00,0xFF,0x12,0xA2, \ +0xFF,0x14,0xB4,0xFF,0x16,0x1B,0xFF,0x18,0x00,0xFF,0x1A,0x00,0xFF,0x1C,0x00, \ +0xFF,0x1E,0x5C,0xFF,0x20,0x82,0xFF,0x22,0x20,0xFF,0x24,0xC4,0xFF,0x26,0x17, \ +0xFF,0x28,0x0A,0xFF,0x2A,0x0F,0xFF,0x2C,0x20,0xFF,0x2E,0x0C,0xFF,0x30,0x2D, \ +0xFF,0x32,0x20,0xFF,0x34,0x90,0xFF,0x36,0x18,0xFF,0x38,0x76,0xFF,0x3A,0x0A, \ +0xFF,0x3C,0x24,0xFF,0x3E,0xEF,0xFF,0x40,0x00,0xFF,0x42,0x00,0xFF,0x44,0x00, \ +0xFF,0x46,0x0C,0xFF,0x48,0x26,0xFF,0x4A,0x5B,0xFF,0x4C,0x7F,0xFF,0x4E,0x29, \ +0xFF,0x50,0x0F,0xFF,0x52,0x20,0xFF,0x54,0x20,0xFF,0x56,0x10,0xFF,0x58,0x10, \ +0xFF,0x5A,0x10,0xFF,0x5C,0x10,0xFF,0x5E,0x1E,0xFF,0x60,0x1E,0xFF,0x62,0x08, \ +0xFF,0x2C,0x20,0xFF,0x2E,0x0C,0xFF,0x2C,0x21,0xFF,0x2E,0x10,0xFF,0x2C,0x22, \ +0xFF,0x2E,0x14,0xFF,0x2C,0x23,0xFF,0x2E,0x18,0xFF,0x2C,0x24,0xFF,0x2E,0x1C, \ +0xFF,0x2C,0x25,0xFF,0x2E,0x20,0xFF,0x2C,0x26,0xFF,0x2E,0x24,0xFF,0x2C,0x27, \ +0xFF,0x2E,0x28,0xFF,0x2C,0x28,0xFF,0x2E,0x2E,0xFF,0x2C,0x29,0xFF,0x2E,0x34, \ +0xFF,0x2C,0x2A,0xFF,0x2E,0x38,0xFF,0x2C,0x2B,0xFF,0x2E,0x3C,0xFF,0x2C,0x2C, \ +0xFF,0x2E,0x3F,0xFF,0x2C,0x2D,0xFF,0x2E,0x43,0xFF,0x2C,0x2E,0xFF,0x2E,0x46, \ +0xFF,0x2C,0x2F,0xFF,0x2E,0x48,0xFF,0x2C,0x30,0xFF,0x2E,0x46,0xFF,0x2C,0x31, \ +0xFF,0x2E,0x50,0xFF,0x2C,0x32,0xFF,0x2E,0x55,0xFF,0x2C,0x33,0xFF,0x2E,0x5A, \ +0xFF,0x2C,0x34,0xFF,0x2E,0x63,0xFF,0x2C,0x35,0xFF,0x2E,0x6D,0xFF,0x2C,0x36, \ +0xFF,0x2E,0x76,0xFF,0x2C,0x37,0xFF,0x2E,0x7F,0xFF,0x2C,0x38,0xFF,0x2E,0x7F, \ +0xFF,0x2C,0x39,0xFF,0x2E,0x7F,0xFF,0x2C,0x3A,0xFF,0x2E,0x7F,0xFF,0x2C,0x3B, \ +0xFF,0x2E,0x7F,0xFF,0x2C,0x3C,0xFF,0x2E,0x7F,0xFF,0x2C,0x3D,0xFF,0x2E,0x7F, \ +0xFF,0x2C,0x3E,0xFF,0x2E,0x7F,0xFF,0x2C,0x3F,0xFF,0x2E,0x7F,0xFF,0x00,0x00, \ +0x58,0x00,0x00,0x00,0x85,0x21,0x00,0x00,0x18,0x17,0x00,0x00,0x0A,0x08,0x00, \ +0x00,0x58,0x00,0x00,0x00,0x9D,0x21,0x00,0x00,0x90,0x1F,0xAC,0x1F,0xB6,0x1F, \ +0x00,0x20,0x0A,0x20,0x14,0x20,0x1E,0x20,0x28,0x20,0x32,0x20,0x3C,0x20,0x86, \ +0x20,0x90,0x20,0x9A,0x20,0xA4,0x20,0xBC,0x20,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x14,0x00,0x0A,0x00,0x90,0x00,0x30,0x00,0x08,0x06,0x07,0x00,0x82, \ +0x84,0x8B,0x96,0x09,0x04,0x02,0x41,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11, \ +0x11,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, \ +0x00,0x04,0xAC,0x6C,0x32,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x00,0x30,0x75,0x64,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x04,0x03,0x00,0x04,0xAC,0x6C,0x32,0x70,0x55,0x4E,0x48,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01, \ +0x00,0x00,0x00,0xFA,0x00,0x00,0x00,0xFA,0x00,0x00,0x2A,0x09,0x2A,0x09,0x08, \ +0x00,0x40,0x00,0x08,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x41,0x54,0x4D,0x45,0x4C,0x5F,0x41,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x05,0x00,0x00,0x00,0x00, \ +0x08,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x5A,0x00,0x2C,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0, \ +0xC0,0xC0,0xC0,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x06,0x0C,0x18,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x07,0xFF,0x07,0xFF,0x1F, \ +0x00,0x06,0x00,0x1E,0x00,0x20,0xFF,0x3F,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x01,0x10,0x01,0xFE,0x01,0x00, \ +0x08,0xEB,0x03,0x03,0x76,0x00,0x01,0x00,0x00,0x00,0x01,0x09,0x02,0x20,0x00, \ +0x01,0x01,0x00,0x80,0xFA,0x09,0x04,0x00,0x00,0x02,0xFF,0x00,0xFF,0x00,0x07, \ +0x05,0x85,0x02,0x40,0x00,0x00,0x07,0x05,0x02,0x02,0x40,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37, \ +0x81,0xF3,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69, \ +0x67,0x68,0x74,0x20,0x28,0x63,0x29,0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30, \ +0x30,0x30,0x20,0x45,0x78,0x70,0x72,0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69, \ +0x63,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64, \ +0x58,0x20,0x54,0x48,0x55,0x4D,0x42,0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56, \ +0x65,0x72,0x73,0x69,0x6F,0x6E,0x20,0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E, \ +0x30,0x62,0x20,0x2A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0, \ +0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x47,0x2D,0x47,0x42,0x2D,0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44, \ +0x4C,0x2D,0x4B,0x4D,0x4C,0x2D,0x43,0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D, \ +0x4C,0x32,0x2D,0x47,0x5A,0x2D,0x4B,0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50, \ +0x2D,0x54,0x43,0x2D,0x4E,0x48,0x2D,0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41, \ +0x2D,0x47,0x46,0x2D,0x44,0x44,0x2D,0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53, \ +0x2D,0x44,0x57,0x2D,0x55,0x53,0x41,0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53, \ +0x44,0x53,0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09, \ +0x8C,0xD3,0xD5,0xF5,0xD8,0x09,0x0A,0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB, \ +0x69,0x07,0xF7,0xBD,0x34,0x12,0x3D,0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE, \ +0x61,0xAE,0x8D,0x40,0xA7,0xE8,0xBD,0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E, \ +0xE6,0xBB,0xFF,0x7F,0xD5,0xF6,0x7A,0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C, \ +0x2E,0x9F,0xE1,0x05,0x57,0x5F,0x69,0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50, \ +0x94,0x0E,0x8B,0x72,0xAE,0x55,0xCC,0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F, \ +0x85,0x17,0x5C,0x22,0xD0,0xA3,0xFD,0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82, \ +0xDB,0xF1,0x9D,0xC5,0xFB,0xBC,0x89,0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30, \ +0x5C,0x50,0xCC,0xC9,0x5A,0xBC,0x9C,0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39, \ +0xD2,0x7B,0x15,0x75,0xFB,0x58,0xC1,0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82, \ +0xC5,0xC2,0xD6,0x69,0x05,0xB3,0x30,0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4, \ +0x1D,0x1F,0x15,0xE2,0x60,0x56,0xC5,0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08, \ +0x32,0x59,0x4A,0x4C,0xED,0x7B,0x5B,0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51, \ +0xFA,0x3D,0xFF,0xAC,0xB5,0x6C,0x09,0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D, \ +0x25,0x17,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_I3861_EXTERNAL { \ +0x00,0xB5,0x0D,0x49,0x00,0x20,0x08,0x70,0x0C,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0xFC,0xF7,0x97,0xFE, \ +0x00,0xBD,0x08,0x21,0x0B,0x20,0xFC,0xF7,0x92,0xFE,0x00,0xF0,0x16,0xFA,0x01, \ +0x21,0x0B,0x20,0xFC,0xF7,0x8C,0xFE,0x00,0xBD,0x00,0x00,0x63,0x01,0x00,0x02, \ +0xAC,0x08,0x00,0x02,0x00,0xB5,0x21,0x48,0x01,0x78,0x08,0x29,0x34,0xD2,0x02, \ +0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x30,0x04,0x08,0x0C,0x30,0x10, \ +0x14,0x18,0x81,0x78,0x1B,0x4A,0x89,0x18,0x12,0xE0,0x81,0x78,0x1A,0x4A,0x89, \ +0x18,0x0E,0xE0,0x81,0x78,0x19,0x4A,0x89,0x18,0x0A,0xE0,0x81,0x78,0x18,0x4A, \ +0x89,0x18,0x06,0xE0,0x81,0x78,0x17,0x4A,0x89,0x18,0x02,0xE0,0x81,0x78,0x16, \ +0x4A,0x89,0x18,0x00,0x29,0x17,0xD0,0x43,0x78,0x00,0x22,0x00,0x2B,0x07,0xD9, \ +0x83,0x18,0x1B,0x79,0x01,0x32,0x0B,0x70,0x43,0x78,0x01,0x31,0x93,0x42,0xF7, \ +0xD8,0xFA,0xF7,0xCF,0xF9,0x01,0x21,0x01,0x20,0xFC,0xF7,0x4D,0xFE,0x00,0xBD, \ +0x04,0x21,0x01,0x20,0xFC,0xF7,0x48,0xFE,0x00,0xBD,0x03,0x21,0x01,0x20,0xFC, \ +0xF7,0x43,0xFE,0x00,0xBD,0xB4,0x08,0x00,0x02,0x14,0x01,0x00,0x02,0x74,0x00, \ +0x00,0x02,0xD0,0x00,0x00,0x02,0x98,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18, \ +0x00,0x00,0x02,0xF0,0xB5,0x34,0x4E,0x01,0x25,0xF0,0x1D,0x69,0x30,0x33,0x4A, \ +0xC5,0x72,0xD2,0x7D,0x2F,0x4F,0x31,0x2A,0x23,0xD0,0x07,0xDC,0x10,0x2A,0x1C, \ +0xD0,0x20,0x2A,0x1C,0xD0,0x30,0x2A,0x08,0xD1,0x02,0x21,0x06,0xE0,0x32,0x2A, \ +0x1A,0xD0,0x40,0x2A,0x1A,0xD0,0x41,0x2A,0x00,0xD1,0x06,0x21,0x49,0x00,0x28, \ +0x4A,0xFC,0x1D,0x19,0x34,0x51,0x5A,0xE2,0x79,0x01,0x3A,0x2B,0x1C,0x93,0x40, \ +0x19,0x40,0x0E,0xD1,0x03,0x21,0x03,0x20,0xFC,0xF7,0x0A,0xFE,0xF0,0xBD,0x00, \ +0x21,0xED,0xE7,0x01,0x21,0xEB,0xE7,0x03,0x21,0xE9,0xE7,0x04,0x21,0xE7,0xE7, \ +0x05,0x21,0xE5,0xE7,0x04,0x21,0x41,0x70,0x06,0x22,0x38,0x1C,0x1B,0x49,0xFA, \ +0xF7,0xF2,0xFB,0x20,0x22,0xB8,0x1D,0x19,0x49,0xFA,0xF7,0xED,0xFB,0xA0,0x7B, \ +0x18,0x49,0x60,0x36,0x48,0x71,0x00,0x20,0x70,0x73,0xA0,0x79,0x16,0x49,0x20, \ +0x23,0x88,0x74,0xE0,0x79,0xC8,0x74,0x38,0x8D,0x88,0x82,0x78,0x8D,0xC8,0x82, \ +0xB8,0x8D,0x08,0x83,0x12,0x48,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10, \ +0x23,0x19,0x43,0x01,0x70,0x0D,0x49,0x08,0x8B,0x81,0x02,0x03,0x20,0xFC,0xF7, \ +0x8B,0xF9,0x08,0x21,0x03,0x20,0x35,0x73,0xFC,0xF7,0xCE,0xFD,0xF0,0xBD,0x00, \ +0x00,0xB4,0x08,0x00,0x02,0x04,0x05,0x00,0x02,0x18,0x00,0x00,0x02,0x7C,0x01, \ +0x00,0x02,0x0C,0x01,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xD0, \ +0x00,0x00,0x02,0x63,0x01,0x00,0x02,0xF0,0xB5,0x33,0x49,0x31,0x4F,0xC9,0x7D, \ +0x31,0x29,0x23,0xD0,0x07,0xDC,0x10,0x29,0x1C,0xD0,0x20,0x29,0x1C,0xD0,0x30, \ +0x29,0x08,0xD1,0x02,0x20,0x06,0xE0,0x32,0x29,0x1A,0xD0,0x40,0x29,0x1A,0xD0, \ +0x41,0x29,0x00,0xD1,0x06,0x20,0x40,0x00,0x29,0x49,0xFC,0x1D,0x19,0x34,0x08, \ +0x5A,0xE1,0x79,0x4A,0x1E,0x01,0x21,0x91,0x40,0x08,0x40,0x0E,0xD1,0x03,0x21, \ +0x04,0x20,0xFC,0xF7,0x95,0xFD,0xF0,0xBD,0x00,0x20,0xED,0xE7,0x01,0x20,0xEB, \ +0xE7,0x03,0x20,0xE9,0xE7,0x04,0x20,0xE7,0xE7,0x05,0x20,0xE5,0xE7,0x1D,0x4D, \ +0xA0,0x79,0xE9,0x1D,0x39,0x31,0x88,0x70,0xE9,0x1D,0x38,0x1C,0x06,0x22,0x35, \ +0x31,0xFA,0xF7,0x79,0xFB,0x20,0x22,0xB8,0x1D,0xE9,0x1D,0x15,0x31,0xFA,0xF7, \ +0x73,0xFB,0xA0,0x7A,0x15,0x4E,0x00,0x25,0x70,0x71,0x15,0x48,0x45,0x73,0xA1, \ +0x79,0x10,0x30,0x02,0x29,0x02,0xD1,0x03,0x21,0xC1,0x72,0x03,0xE0,0x01,0x29, \ +0x0F,0xD1,0x04,0x21,0xC1,0x72,0x08,0x21,0x04,0x20,0xFC,0xF7,0x64,0xFD,0xE0, \ +0x79,0xFA,0xF7,0x63,0xFA,0x38,0x8D,0x81,0x02,0x05,0x20,0xFC,0xF7,0x14,0xF9, \ +0xB5,0x70,0xF0,0xBD,0x03,0x21,0x04,0x20,0xFC,0xF7,0x56,0xFD,0xF0,0xBD,0x00, \ +0x00,0xB4,0x08,0x00,0x02,0x18,0x00,0x00,0x02,0x7C,0x01,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x64,0x05,0x00,0x02,0xF0,0xB5,0x33,0x4D,0x10, \ +0x23,0x29,0x78,0x30,0x4C,0x99,0x43,0x29,0x70,0x29,0x78,0x20,0x23,0x99,0x43, \ +0x29,0x70,0x2F,0x49,0xC9,0x7D,0x31,0x29,0x24,0xD0,0x07,0xDC,0x10,0x29,0x1D, \ +0xD0,0x20,0x29,0x1D,0xD0,0x30,0x29,0x08,0xD1,0x02,0x20,0x06,0xE0,0x32,0x29, \ +0x1B,0xD0,0x40,0x29,0x1B,0xD0,0x41,0x29,0x00,0xD1,0x06,0x20,0x40,0x00,0x25, \ +0x49,0xE7,0x1D,0x19,0x37,0x09,0x5A,0xF8,0x79,0x01,0x26,0x42,0x1E,0x33,0x1C, \ +0x93,0x40,0x19,0x40,0x0E,0xD1,0x03,0x21,0x05,0x20,0xFC,0xF7,0x19,0xFD,0xF0, \ +0xBD,0x00,0x20,0xEC,0xE7,0x01,0x20,0xEA,0xE7,0x03,0x20,0xE8,0xE7,0x04,0x20, \ +0xE6,0xE7,0x05,0x20,0xE4,0xE7,0xFA,0xF7,0x0E,0xFA,0x39,0x7A,0x18,0x48,0x41, \ +0x71,0xB9,0x79,0x01,0x29,0x1D,0xD1,0x16,0x49,0x07,0x1C,0xCA,0x1D,0x39,0x32, \ +0x96,0x70,0x42,0x79,0xA0,0x1D,0x1C,0x31,0x04,0x1C,0xFA,0xF7,0xF5,0xFA,0x7A, \ +0x79,0x11,0x49,0x20,0x1C,0xFA,0xF7,0xF0,0xFA,0x10,0x49,0x04,0x20,0xC8,0x72, \ +0x00,0xF0,0x1F,0xF8,0x28,0x78,0x20,0x23,0x18,0x43,0x28,0x70,0x28,0x78,0x10, \ +0x23,0x18,0x43,0x28,0x70,0xF0,0xBD,0x03,0x21,0x05,0x20,0xFC,0xF7,0xE4,0xFC, \ +0xF0,0xBD,0x00,0x00,0xB4,0x08,0x00,0x02,0x63,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x7C,0x01,0x00,0x02,0x14,0x01,0x00,0x02,0xD0,0x00,0x00,0x02,0xAC,0x00, \ +0x00,0x02,0x74,0x05,0x00,0x02,0xF0,0xB5,0xFA,0xF7,0xD7,0xF8,0x23,0x4F,0x02, \ +0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38,0x74,0x01,0x0A,0x79,0x74,0x01,0x0C, \ +0x00,0x0E,0xB9,0x74,0xF8,0x74,0x1E,0x4E,0xF8,0x1D,0x07,0x30,0x06,0x22,0xF1, \ +0x1D,0x35,0x31,0xFA,0xF7,0xB5,0xFA,0x1B,0x4D,0x01,0x24,0xF8,0x1D,0x29,0x30, \ +0x6C,0x70,0x04,0x71,0x19,0x48,0xF9,0x1D,0x42,0x79,0xF0,0x1D,0x15,0x30,0x0D, \ +0x31,0xFA,0xF7,0xA7,0xFA,0x16,0x4F,0x82,0x20,0x38,0x74,0x84,0x20,0x78,0x74, \ +0x8B,0x20,0xB8,0x74,0x96,0x20,0xF8,0x74,0x12,0x48,0x20,0x23,0x01,0x78,0x19, \ +0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70,0x0C,0x48,0x84,0x70, \ +0xFC,0xF7,0x25,0xF8,0x0D,0x48,0x04,0x21,0x44,0x73,0x05,0x20,0xE8,0x72,0xF8, \ +0x1D,0x09,0x30,0x00,0xF0,0x6E,0xFF,0x01,0x21,0x05,0x20,0xFC,0xF7,0x8A,0xFC, \ +0xF0,0xBD,0x00,0x00,0x98,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x74,0x05,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x63,0x01,0x00,0x02,0x64,0x05, \ +0x00,0x02,0xF0,0xB5,0x2E,0x4F,0x2E,0x4E,0xFC,0x1D,0x59,0x34,0xF8,0x1D,0xF1, \ +0x1D,0x0D,0x31,0x09,0x30,0x05,0x1C,0x22,0x79,0xFA,0xF7,0x67,0xFA,0x22,0x79, \ +0x29,0x49,0x28,0x1C,0xFA,0xF7,0x62,0xFA,0x20,0x79,0x28,0x49,0x48,0x71,0xB9, \ +0x7B,0x27,0x48,0x00,0x29,0x03,0xD1,0x01,0x70,0xF1,0x72,0x41,0x70,0x08,0xE0, \ +0x01,0x21,0x01,0x70,0xF1,0x72,0xF9,0x7B,0xC2,0x1D,0x39,0x32,0x41,0x70,0xF9, \ +0x78,0x11,0x70,0x00,0x25,0x0D,0x20,0x68,0x43,0x1E,0x49,0x0D,0x22,0x41,0x18, \ +0xC0,0x19,0x30,0x30,0x0C,0x31,0xFA,0xF7,0x43,0xFA,0x01,0x35,0x04,0x2D,0xF2, \ +0xD3,0xE0,0x88,0x30,0x80,0x60,0x79,0x00,0x28,0x03,0xD0,0x15,0x48,0x01,0x21, \ +0x41,0x72,0x02,0xE0,0x13,0x48,0x00,0x21,0x41,0x72,0x78,0x7B,0x0E,0x28,0x03, \ +0xDC,0x01,0x28,0x01,0xDB,0x11,0x49,0x08,0x75,0xB8,0x78,0x10,0x49,0x08,0x74, \ +0x38,0x7B,0x01,0x28,0x02,0xD1,0x0B,0x4A,0xD0,0x70,0x02,0xE0,0x09,0x4A,0x00, \ +0x20,0xD0,0x70,0xF8,0x88,0x08,0x81,0xB8,0x88,0x48,0x81,0x38,0x78,0x06,0x49, \ +0xC8,0x70,0xF9,0xF7,0x9C,0xFF,0xF0,0xBD,0x00,0x00,0xB4,0x08,0x00,0x02,0x98, \ +0x00,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x30,0x00,0x00,0x02, \ +0x18,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x80,0xB5,0x17,0x4A,0x17,0x49,0x0A, \ +0x60,0x17,0x49,0x0F,0x68,0x0A,0x2F,0x17,0xD2,0x01,0xA3,0xDB,0x5D,0x5B,0x00, \ +0x9F,0x44,0x13,0x04,0x07,0x0A,0x0D,0x0F,0x13,0x13,0x13,0x12,0xFF,0x20,0x01, \ +0x30,0x0B,0xE0,0xFF,0x20,0x11,0x30,0x08,0xE0,0xFF,0x20,0x21,0x30,0x05,0xE0, \ +0x0B,0x20,0x03,0xE0,0xFF,0x20,0x31,0x30,0x00,0xE0,0x00,0x20,0x01,0x23,0x8B, \ +0x60,0xC9,0x68,0x00,0xF0,0x6C,0xF8,0x04,0x21,0x0C,0x20,0xFC,0xF7,0xE5,0xFB, \ +0x0F,0x20,0x00,0x06,0x81,0x88,0x04,0x4B,0x19,0x43,0x81,0x80,0x80,0xBD,0x60, \ +0x08,0x00,0x02,0x70,0x02,0x00,0x02,0x84,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0x80,0xB4,0xF3,0x22,0x12,0x05,0x93,0x68,0x06,0x23,0xD3,0x60,0x17,0x69,0xBB, \ +0x08,0xFC,0xD3,0x2D,0x23,0x01,0x3B,0xFD,0xD1,0x93,0x68,0x47,0x09,0x08,0x23, \ +0x1F,0x40,0x02,0x23,0x3B,0x43,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x00, \ +0x06,0x00,0x0E,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x90,0x68,0x08,0x06, \ +0x00,0x0E,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x90,0x68,0x80,0xBC,0xF7, \ +0x46,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B,0x03,0x19,0x43,0xC1,0x61, \ +0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F,0x23,0x1B,0x04,0x99,0x43,0x41, \ +0x60,0x41,0x68,0x03,0x23,0x1B,0x04,0x19,0x43,0x41,0x60,0xF7,0x46,0xF0,0xB5, \ +0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFD,0xF7,0x97,0xFA,0x00,0x26,0x00,0x2F,0x10, \ +0xD9,0xFD,0xF7,0xE4,0xFA,0x40,0x08,0xFB,0xD2,0xB4,0x20,0x01,0x38,0xFD,0xD1, \ +0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xB1,0xFF,0xB4,0x20,0x01,0x38,0xFD,0xD1,0x01, \ +0x36,0xBE,0x42,0xEE,0xD3,0xFD,0xF7,0x99,0xFA,0x00,0x20,0xF0,0xBD,0xF8,0xB5, \ +0x02,0x1C,0x31,0x4B,0x08,0x1C,0x19,0x68,0x2E,0x4F,0x00,0x29,0x59,0xD0,0x99, \ +0x68,0x01,0x29,0x56,0xD1,0x00,0x24,0x0F,0x21,0x09,0x06,0x8C,0x80,0x8C,0x81, \ +0x0C,0x88,0x09,0x89,0x19,0x68,0x27,0x4B,0xDD,0x1D,0xDE,0x1D,0x9C,0x1D,0x22, \ +0x33,0x1F,0x36,0x0D,0x35,0x09,0x29,0x00,0x93,0x1D,0xD1,0x0B,0x22,0x04,0x20, \ +0x00,0x99,0xFF,0xF7,0xC0,0xFF,0xFF,0x22,0x06,0x20,0x39,0x1C,0x01,0x32,0xFF, \ +0xF7,0xBA,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x1C,0x11,0x32,0xFF,0xF7,0xB4,0xFF, \ +0xFF,0x22,0x0E,0x20,0x29,0x1C,0x21,0x32,0xFF,0xF7,0xAE,0xFF,0xFF,0x22,0x01, \ +0x20,0x31,0x1C,0x31,0x32,0xFF,0xF7,0xA8,0xFF,0x02,0xE0,0x39,0x1C,0xFF,0xF7, \ +0xA4,0xFF,0xFD,0xF7,0x3F,0xFA,0x06,0x22,0xFF,0x21,0x38,0x1C,0x01,0x31,0xFD, \ +0xF7,0xA0,0xFA,0x04,0x22,0x0B,0x21,0x00,0x98,0xFD,0xF7,0x9B,0xFA,0x0E,0x22, \ +0xFF,0x21,0x20,0x1C,0x11,0x31,0xFD,0xF7,0x95,0xFA,0x0E,0x22,0xFF,0x21,0x28, \ +0x1C,0x21,0x31,0xFD,0xF7,0x8F,0xFA,0x01,0x22,0xFF,0x21,0x30,0x1C,0x31,0x31, \ +0xFD,0xF7,0x89,0xFA,0xFD,0xF7,0x38,0xFA,0x02,0x4B,0x00,0x24,0x1C,0x60,0xF8, \ +0xBD,0x60,0x08,0x00,0x02,0x84,0x02,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28, \ +0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20, \ +0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49, \ +0x00,0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0xE8,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4, \ +0x0B,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03, \ +0x2B,0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73, \ +0x04,0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3, \ +0xE7,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28, \ +0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20, \ +0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21, \ +0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1, \ +0x00,0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20, \ +0x20,0x08,0x73,0x00,0xBD,0xFD,0xF7,0xD1,0xF8,0x04,0x49,0x00,0x20,0x08,0x80, \ +0x03,0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0xF8,0x01,0x00, \ +0x02,0xFA,0x01,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1, \ +0x02,0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08, \ +0x06,0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7, \ +0x00,0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08, \ +0xD0,0x80,0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0, \ +0x00,0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00, \ +0x21,0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7, \ +0x25,0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0xFA,0x01,0x00,0x02,0xF8,0x01,0x00, \ +0x02,0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1, \ +0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09, \ +0x0E,0x01,0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80, \ +0x01,0xE0,0x05,0x49,0x08,0x80,0xFD,0xF7,0x6F,0xF8,0x90,0xBD,0x27,0x73,0x90, \ +0xBD,0x70,0x03,0x00,0x0D,0xFA,0x01,0x00,0x02,0xF8,0x01,0x00,0x02,0x80,0xB4, \ +0x0D,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01, \ +0x2B,0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73, \ +0x06,0x48,0x01,0x68,0x06,0x48,0x01,0x73,0x00,0x21,0x01,0x73,0x38,0x7B,0x10, \ +0x23,0x18,0x43,0x38,0x73,0xF0,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0xF4,0x01, \ +0x00,0x02,0x30,0x03,0x00,0x0D,0x90,0xB5,0x17,0x1C,0x02,0x28,0x22,0x4C,0x04, \ +0xD1,0x09,0x29,0x37,0xD1,0x21,0x48,0x20,0x60,0x34,0xE0,0x03,0x28,0x07,0xD1, \ +0x74,0x20,0xF9,0xF7,0x63,0xFE,0x1E,0x49,0x88,0x70,0x88,0x1C,0x20,0x60,0x2A, \ +0xE0,0x04,0x28,0x02,0xD1,0x1B,0x48,0x20,0x60,0x25,0xE0,0x05,0x28,0x02,0xD1, \ +0x1A,0x48,0x20,0x60,0x20,0xE0,0x00,0x28,0x1E,0xD1,0x09,0x29,0x1A,0xD2,0x01, \ +0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x16,0x04,0x07,0x0A,0x16,0x0D,0x10,0x13, \ +0x16,0x00,0x13,0x48,0x20,0x60,0x10,0xE0,0x12,0x48,0x20,0x60,0x0D,0xE0,0x12, \ +0x48,0x20,0x60,0x0A,0xE0,0x11,0x48,0x20,0x60,0x07,0xE0,0x11,0x48,0x20,0x60, \ +0x04,0xE0,0x10,0x48,0x20,0x60,0x01,0xE0,0x10,0x48,0x20,0x60,0x20,0x68,0x0F, \ +0x49,0xC0,0x19,0x20,0x60,0x80,0x20,0x08,0x73,0x00,0xF0,0x5B,0xF8,0x90,0xBD, \ +0x2C,0x02,0x00,0x02,0x60,0x08,0x00,0x02,0xAC,0x08,0x00,0x02,0xA0,0x02,0x00, \ +0x02,0x38,0x09,0x00,0x02,0x14,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x98,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x20, \ +0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x00,0x22,0x02,0x28,0x17,0x4B, \ +0x10,0xD1,0x17,0x48,0x87,0x79,0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29,0x07, \ +0xD0,0x14,0x48,0xC7,0x60,0x0C,0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01,0x60, \ +0x82,0x60,0x80,0xBC,0xF7,0x46,0x06,0x28,0x0E,0xD1,0x0F,0x48,0x00,0x68,0x01, \ +0x28,0xF7,0xD1,0xFF,0x20,0x0D,0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0C,0x49, \ +0x01,0x20,0x08,0x71,0x0B,0x49,0x08,0x60,0xEC,0xE7,0x18,0x79,0x18,0x70,0x5A, \ +0x70,0x9A,0x70,0x18,0x78,0x0A,0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2,0xE7, \ +0x00,0x00,0xAC,0x08,0x00,0x02,0xEC,0x01,0x00,0x02,0x84,0x02,0x00,0x02,0x74, \ +0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x78,0x01,0x00,0x02,0x63,0x01,0x00,0x02, \ +0xB0,0xB4,0x1B,0x4A,0x1B,0x48,0x11,0x68,0x07,0x68,0x1B,0x4B,0xB9,0x42,0x12, \ +0xD1,0x1A,0x7B,0x19,0x1C,0xD2,0x09,0x09,0xD2,0x00,0x68,0x40,0x07,0x03,0xD0, \ +0xE0,0x20,0x08,0x73,0xB0,0xBC,0xF7,0x46,0xD0,0x20,0x08,0x73,0xFA,0xE7,0x08, \ +0x7B,0x20,0x23,0x18,0x43,0x08,0x73,0xF5,0xE7,0x00,0x68,0x11,0x68,0x40,0x1A, \ +0x08,0x28,0x03,0xD9,0x08,0x20,0x0E,0x4F,0x0E,0x49,0x02,0xE0,0x00,0x28,0xFA, \ +0xD1,0x09,0xE0,0x0D,0x68,0x2C,0x78,0x01,0x35,0x0D,0x60,0x3C,0x73,0x14,0x68, \ +0x01,0x34,0x14,0x60,0x01,0x38,0xF5,0xD1,0x19,0x7B,0x18,0x1C,0x10,0x23,0x19, \ +0x43,0x01,0x73,0xD9,0xE7,0x00,0x00,0x24,0x02,0x00,0x02,0x28,0x02,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x2C,0x02,0x00,0x02,0x90,0xB5,0x20, \ +0x27,0x00,0x28,0x09,0x4C,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0, \ +0x27,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28,0x01,0xD1,0x27, \ +0x73,0x90,0xBD,0xFC,0xF7,0x4C,0xFF,0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0x0D,0x48,0x01,0x2B,0x02,0xD1,0x20,0x21,0x01,0x73,0xF7,0x46,0x80,0x21,0x01, \ +0x73,0x0A,0x49,0x01,0x22,0x0A,0x73,0x00,0x22,0x0A,0x73,0x02,0x23,0x0B,0x73, \ +0x0A,0x73,0x07,0x4A,0x10,0x23,0x12,0x68,0x0A,0x73,0x06,0x4A,0x12,0x68,0x0A, \ +0x73,0x01,0x7B,0x19,0x43,0x01,0x73,0xF7,0x46,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0x30,0x03,0x00,0x0D,0x34,0x02,0x00,0x02,0x38,0x02,0x00,0x02,0x00,0x21,0x02, \ +0x28,0x10,0xD1,0x08,0x1C,0x0B,0x49,0x04,0x22,0x08,0x71,0x0B,0x49,0x0A,0x70, \ +0x08,0x70,0x0A,0x4A,0x82,0x21,0x11,0x71,0x0A,0x49,0x08,0x60,0x0A,0x49,0x08, \ +0x60,0x0A,0x49,0x08,0x80,0xF7,0x46,0x85,0x28,0xFC,0xD1,0x08,0x4A,0x01,0x20, \ +0x10,0x60,0x08,0x48,0x01,0x80,0xF7,0x46,0x70,0x03,0x00,0x0D,0xC0,0x03,0x00, \ +0x0D,0xB0,0x03,0x00,0x0D,0x10,0x02,0x00,0x02,0x0C,0x02,0x00,0x02,0xF8,0x01, \ +0x00,0x02,0x44,0x02,0x00,0x02,0xFA,0x01,0x00,0x02,0x90,0xB5,0x0F,0x1C,0x19, \ +0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xB0,0xFD,0x90,0xBD,0x24,0x4B,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xC6,0xFD,0x90,0xBD, \ +0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0xD7,0xFD,0x90,0xBD,0xFF,0x23,0x0C,0x33,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xEA,0xFD,0x90,0xBD,0x41, \ +0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C, \ +0xFF,0xF7,0xFB,0xFD,0x90,0xBD,0x0F,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A, \ +0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x2D,0xFE,0x90,0xBD,0x01,0x23,0xDB,0x03, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x44, \ +0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08,0x73,0x90,0xBD,0x00,0x00,0xDC,0x01, \ +0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x80,0xB5,0x00,0x20,0x1C,0x49,0x0F,0x27,0x3F,0x06,0x08,0x70, \ +0xB8,0x80,0x39,0x88,0xB8,0x81,0x38,0x89,0x19,0x48,0xC0,0x69,0x01,0x20,0x80, \ +0x06,0xC1,0x68,0xC0,0x6B,0x18,0x49,0x17,0x48,0x00,0x68,0x02,0x20,0xC8,0x61, \ +0x17,0x48,0x01,0x7A,0x0C,0x30,0x08,0x29,0x19,0xD2,0x01,0xA3,0x5B,0x5C,0x5B, \ +0x00,0x9F,0x44,0x15,0x03,0x06,0x09,0x0C,0x15,0x0F,0x12,0x00,0xF0,0x96,0xF8, \ +0x80,0xBD,0x00,0xF0,0x11,0xF9,0x80,0xBD,0x00,0xF0,0x1E,0xF8,0x80,0xBD,0x00, \ +0xF0,0x6D,0xF9,0x80,0xBD,0x00,0xF0,0xE2,0xF9,0x80,0xBD,0x00,0xF0,0x2F,0xFA, \ +0x80,0xBD,0x02,0x21,0x0A,0x20,0xFC,0xF7,0x3E,0xF8,0x06,0x48,0xB8,0x80,0x80, \ +0xBD,0x00,0x00,0x63,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04, \ +0x84,0x05,0x00,0x02,0xAC,0x08,0x00,0x02,0x08,0x08,0x00,0x00,0xB0,0xB5,0x82, \ +0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x22,0xFE,0x00,0xA8,0x40,0x78,0x32,0x4F, \ +0x80,0x08,0x80,0x00,0x0F,0x24,0x24,0x06,0x00,0x28,0x06,0xD0,0x03,0x21,0x0A, \ +0x20,0xFC,0xF7,0x1B,0xF8,0xA7,0x80,0x02,0xB0,0xB0,0xBD,0x00,0xA8,0x00,0x78, \ +0x0E,0x28,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A, \ +0x20,0xFC,0xF7,0x0C,0xF8,0xA7,0x80,0xEF,0xE7,0x00,0xA8,0xC0,0x78,0x03,0x28, \ +0x05,0xDD,0x03,0x21,0x0A,0x20,0xFC,0xF7,0x02,0xF8,0xA7,0x80,0xE5,0xE7,0x08, \ +0x21,0x0A,0x20,0xFB,0xF7,0xFC,0xFF,0xA7,0x80,0x00,0x27,0x1D,0x4C,0x05,0x25, \ +0x00,0xA8,0x00,0x78,0xF9,0xF7,0x5E,0xFD,0x00,0x28,0x00,0xD1,0x65,0x70,0xF9, \ +0xF7,0xD6,0xFB,0x00,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0x30,0xFC,0x00,0xA9, \ +0xC9,0x78,0x0A,0x20,0xF9,0xF7,0x2B,0xFC,0x00,0xA8,0x40,0x78,0x01,0x28,0x04, \ +0xD1,0x3C,0x21,0x12,0x20,0xF9,0xF7,0x23,0xFC,0x03,0xE0,0x38,0x21,0x12,0x20, \ +0xF9,0xF7,0x1E,0xFC,0x01,0xA8,0x00,0x78,0x01,0x28,0x04,0xD1,0x1B,0x21,0x16, \ +0x20,0xF9,0xF7,0x16,0xFC,0x03,0xE0,0x3B,0x21,0x16,0x20,0xF9,0xF7,0x11,0xFC, \ +0x00,0xF0,0x1B,0xF9,0x01,0x37,0x02,0x2F,0xCD,0xD3,0x01,0x21,0x0A,0x20,0xFB, \ +0xF7,0xC2,0xFF,0xA6,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xAC,0x08,0x00,0x02, \ +0xB0,0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0xB0,0xFD,0x00,0xA8,0x40, \ +0x78,0x38,0x4C,0x80,0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28,0x06,0xD0, \ +0x03,0x21,0x0A,0x20,0xFB,0xF7,0xA9,0xFF,0xBC,0x80,0x02,0xB0,0xB0,0xBD,0x00, \ +0xA8,0x00,0x78,0x0E,0x28,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFB,0xF7,0x9A,0xFF,0xBC,0x80,0xEF,0xE7,0x00,0xA8,0xC0, \ +0x78,0x03,0x28,0x05,0xDD,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x90,0xFF,0xBC,0x80, \ +0xE5,0xE7,0x08,0x21,0x0A,0x20,0xFB,0xF7,0x8A,0xFF,0xBC,0x80,0x00,0x27,0x23, \ +0x4C,0x05,0x25,0x00,0xA8,0x00,0x78,0xF9,0xF7,0xEC,0xFC,0x00,0x28,0x00,0xD1, \ +0x65,0x70,0xF9,0xF7,0x64,0xFB,0x00,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0xBE, \ +0xFB,0x00,0xA9,0xC9,0x78,0x0A,0x20,0xF9,0xF7,0xB9,0xFB,0x00,0xA8,0x40,0x78, \ +0x01,0x28,0x04,0xD1,0xA4,0x21,0x12,0x20,0xF9,0xF7,0xB1,0xFB,0x03,0xE0,0xA0, \ +0x21,0x12,0x20,0xF9,0xF7,0xAC,0xFB,0x01,0xA8,0x00,0x78,0x01,0x28,0x04,0xD1, \ +0x1B,0x21,0x16,0x20,0xF9,0xF7,0xA4,0xFB,0x03,0xE0,0x3B,0x21,0x16,0x20,0xF9, \ +0xF7,0x9F,0xFB,0x00,0x21,0x40,0x20,0xF9,0xF7,0x9B,0xFB,0x00,0xF0,0xA5,0xF8, \ +0x01,0x37,0x02,0x2F,0xC9,0xD3,0xE1,0x20,0x80,0x00,0x01,0x38,0xFD,0xD1,0x74, \ +0x20,0xF9,0xF7,0x9B,0xFB,0xA0,0x70,0x01,0x21,0x0A,0x20,0xFB,0xF7,0x44,0xFF, \ +0x9A,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xAC,0x08,0x00,0x02,0x98,0xB5,0x69, \ +0x46,0x04,0x22,0xF9,0xF7,0x33,0xFD,0x00,0xA8,0x40,0x78,0x27,0x4C,0x80,0x08, \ +0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28,0x05,0xD0,0x03,0x21,0x0A,0x20,0xFB, \ +0xF7,0x2C,0xFF,0xBC,0x80,0x98,0xBD,0x00,0xA8,0x00,0x78,0x0E,0x28,0x03,0xDC, \ +0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x1E, \ +0xFF,0xBC,0x80,0xF0,0xE7,0x08,0x21,0x0A,0x20,0xFB,0xF7,0x18,0xFF,0x18,0x49, \ +0x00,0x20,0x08,0x70,0x18,0x49,0x08,0x60,0x48,0x60,0xBC,0x80,0xF9,0xF7,0x45, \ +0xFB,0x00,0xA8,0x00,0x78,0xF9,0xF7,0x75,0xFC,0x00,0x28,0x02,0xD1,0x13,0x49, \ +0x05,0x20,0x48,0x70,0xF9,0xF7,0xEB,0xFA,0x11,0x48,0x01,0x21,0x81,0x73,0xBA, \ +0x88,0x10,0x4B,0x1A,0x43,0xBA,0x80,0x80,0x30,0xC1,0x61,0x00,0xA8,0x40,0x78, \ +0x01,0x28,0x04,0xD1,0xB7,0x21,0x14,0x20,0xF9,0xF7,0x39,0xFB,0x03,0xE0,0xB5, \ +0x21,0x14,0x20,0xF9,0xF7,0x34,0xFB,0x01,0x21,0x0A,0x20,0xFB,0xF7,0xEA,0xFE, \ +0xBD,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0x63,0x01,0x00,0x02,0xA0,0x02,0x00, \ +0x02,0xAC,0x08,0x00,0x02,0x04,0x05,0x00,0x02,0x40,0x40,0x00,0x00,0x90,0xB5, \ +0x82,0xB0,0x0F,0x24,0x24,0x06,0x12,0x4F,0x69,0x46,0x08,0x22,0xA7,0x80,0xF9, \ +0xF7,0xCC,0xFC,0x00,0xA8,0x00,0x78,0x0E,0x28,0x03,0xDC,0x00,0xA8,0x00,0x78, \ +0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xC6,0xFE,0xA7,0x80,0x02, \ +0xB0,0x90,0xBD,0x00,0xA8,0x00,0x78,0xF9,0xF7,0x29,0xFC,0x00,0x28,0x02,0xD1, \ +0x06,0x49,0x05,0x20,0x48,0x70,0xF9,0xF7,0x9F,0xFA,0x01,0x21,0x0A,0x20,0xFB, \ +0xF7,0xB4,0xFE,0xED,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xAC,0x08,0x00,0x02, \ +0x08,0x48,0xC1,0x69,0x03,0x0C,0x19,0x43,0xC1,0x61,0xC1,0x69,0x04,0x23,0x19, \ +0x43,0xC1,0x61,0xC1,0x69,0x01,0x23,0x19,0x43,0xC1,0x61,0xC1,0x69,0x08,0x23, \ +0x19,0x43,0xC1,0x61,0xF7,0x46,0x40,0x00,0x00,0x04,0x98,0xB5,0x19,0x4C,0xA0, \ +0x6A,0x00,0x06,0x00,0x0E,0xA0,0x62,0xE7,0x6A,0x68,0x20,0xF9,0xF7,0xE2,0xFA, \ +0x00,0x90,0x00,0x98,0x10,0x23,0x18,0x40,0x00,0x09,0x00,0x06,0x00,0x99,0x00, \ +0x0E,0x49,0x09,0x40,0x18,0x00,0x90,0x00,0x98,0x00,0x28,0x13,0xD0,0x01,0x28, \ +0x13,0xD0,0x02,0x28,0x13,0xD0,0x03,0x28,0x08,0xD1,0x0B,0x20,0x78,0x43,0xC7, \ +0x08,0x6A,0x20,0xF9,0xF7,0xC8,0xFA,0x00,0x0A,0x00,0xD3,0x01,0x3F,0xA1,0x6A, \ +0xF8,0x02,0x08,0x43,0xA0,0x62,0x98,0xBD,0xFF,0x08,0xF8,0xE7,0xBF,0x08,0xF6, \ +0xE7,0x0B,0x20,0x78,0x43,0x07,0x09,0xF2,0xE7,0x40,0x00,0x00,0x04,0x90,0xB5, \ +0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x58,0xFC,0x00,0xA8,0x00,0x78,0x0F, \ +0x27,0x3F,0x06,0x0E,0x28,0x21,0x4C,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00,0x28, \ +0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x4F,0xFE,0xBC,0x80,0x02,0xB0,0x90, \ +0xBD,0xBC,0x80,0x00,0xA8,0x00,0x78,0xF9,0xF7,0xB1,0xFB,0xF9,0xF7,0x2C,0xFA, \ +0x00,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0x86,0xFA,0x00,0xA9,0xC9,0x78,0x0A, \ +0x20,0xF9,0xF7,0x81,0xFA,0x00,0xA8,0x40,0x78,0x01,0x28,0x04,0xD1,0x2C,0x21, \ +0x12,0x20,0xF9,0xF7,0x79,0xFA,0x03,0xE0,0x28,0x21,0x12,0x20,0xF9,0xF7,0x74, \ +0xFA,0x01,0xA8,0x00,0x78,0x01,0x28,0x04,0xD1,0x1B,0x21,0x16,0x20,0xF9,0xF7, \ +0x6C,0xFA,0x03,0xE0,0x3B,0x21,0x16,0x20,0xF9,0xF7,0x67,0xFA,0x0C,0x21,0x40, \ +0x20,0xF9,0xF7,0x63,0xFA,0xFF,0xF7,0x6D,0xFF,0x01,0x21,0x0A,0x20,0xFB,0xF7, \ +0x17,0xFE,0xC7,0xE7,0x08,0x08,0x00,0x00,0xB0,0xB5,0x83,0xB0,0x69,0x46,0x0C, \ +0x22,0xF9,0xF7,0x08,0xFC,0x02,0xA8,0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28, \ +0x35,0x4C,0x03,0xDC,0x02,0xA8,0x00,0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A, \ +0x20,0xFB,0xF7,0xFF,0xFD,0xAC,0x80,0x03,0xB0,0xB0,0xBD,0x02,0xA8,0x00,0x78, \ +0x2E,0x4F,0xB8,0x70,0x00,0xA8,0x40,0x78,0x78,0x70,0x00,0xA8,0x00,0x78,0x38, \ +0x70,0x00,0xA8,0x40,0x88,0xB8,0x60,0x01,0x98,0xF8,0x60,0x00,0x20,0xF8,0x70, \ +0x78,0x60,0x38,0x61,0xAC,0x80,0xF9,0xF7,0x1C,0xFA,0x02,0xA8,0x00,0x78,0xF9, \ +0xF7,0x4C,0xFB,0xF9,0xF7,0xC7,0xF9,0x02,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7, \ +0x21,0xFA,0x02,0xA8,0x40,0x78,0x01,0x28,0x04,0xD1,0x24,0x21,0x12,0x20,0xF9, \ +0xF7,0x19,0xFA,0x03,0xE0,0x20,0x21,0x12,0x20,0xF9,0xF7,0x14,0xFA,0x02,0xA8, \ +0xC0,0x78,0x01,0x28,0x04,0xD1,0x1B,0x21,0x16,0x20,0xF9,0xF7,0x0C,0xFA,0x03, \ +0xE0,0x3B,0x21,0x16,0x20,0xF9,0xF7,0x07,0xFA,0x00,0xA9,0x09,0x78,0x01,0x98, \ +0x00,0xF0,0x28,0xF8,0x01,0x98,0x00,0xF0,0x61,0xF8,0x78,0x60,0x79,0x78,0x0A, \ +0x20,0xF9,0xF7,0xFA,0xF9,0xF8,0x78,0xF9,0xF7,0x7B,0xFB,0x78,0x68,0xFA,0xF7, \ +0xE2,0xFD,0x09,0x49,0x03,0x20,0xC8,0x61,0x09,0x48,0x09,0x4B,0xA8,0x80,0x01, \ +0x20,0x80,0x06,0x41,0x6A,0xC9,0x18,0x01,0x62,0x01,0x21,0x0A,0x20,0xFB,0xF7, \ +0x9F,0xFD,0x9F,0xE7,0x08,0x08,0x00,0x00,0x28,0x09,0x00,0x02,0x84,0x05,0x00, \ +0x02,0x88,0x88,0x00,0x00,0x10,0x27,0x00,0x00,0x00,0x22,0x00,0x28,0x04,0x4B, \ +0x06,0xD9,0x09,0x06,0x09,0x0E,0x19,0x70,0x01,0x33,0x01,0x32,0x82,0x42,0xFA, \ +0xD3,0xF7,0x46,0x00,0x72,0x01,0x02,0x81,0xB0,0x01,0x20,0x80,0x06,0xC1,0x6B, \ +0x00,0xAB,0x19,0x80,0x00,0xA9,0x09,0x88,0x20,0x22,0x0A,0x40,0x0D,0x49,0x0B, \ +0xD0,0x0D,0x4A,0x0E,0x4B,0x82,0x63,0xCA,0x68,0x42,0x63,0x4A,0x78,0x9A,0x5C, \ +0x11,0x23,0x9B,0x02,0x1A,0x43,0x0A,0x4B,0x5A,0x60,0x00,0xAA,0x12,0x88,0x92, \ +0x08,0x06,0xD3,0x0A,0x69,0x01,0x32,0x0A,0x61,0x89,0x68,0x42,0x6A,0x89,0x18, \ +0x01,0x62,0x01,0xB0,0xF7,0x46,0x00,0x00,0x28,0x09,0x00,0x02,0x00,0x72,0x01, \ +0x02,0x6C,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xF0,0xB5,0x04,0x30,0xC7,0x00, \ +0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0,0x02,0x28,0x15, \ +0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFD,0xF7,0xA7,0xFC,0x0D,0x1C, \ +0x79,0x1A,0x0B,0x20,0xFD,0xF7,0xA2,0xFC,0x07,0x1C,0x00,0x2D,0x18,0xD9,0x01, \ +0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F,0x08,0x11,0xE0, \ +0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFD,0xF7,0x92,0xFC,0x0C,0x1C,0x79,0x1A,0x0B, \ +0x20,0xFD,0xF7,0x8D,0xFC,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0, \ +0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD,0x28,0x09,0x00, \ +0x02,0xB0,0xB4,0x00,0x22,0x00,0x29,0x06,0xDD,0x85,0x5C,0x2B,0x0A,0x00,0xD3, \ +0x14,0x1C,0x01,0x32,0x8A,0x42,0xF8,0xDB,0x00,0x20,0x00,0x29,0x08,0xDD,0x08, \ +0x4A,0x13,0x18,0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x07,0x1C,0x01,0x30,0x88,0x42, \ +0xF7,0xDB,0x05,0x48,0xBC,0x42,0x02,0xDD,0x87,0x72,0xB0,0xBC,0xF7,0x46,0x84, \ +0x72,0xFB,0xE7,0x00,0x00,0x18,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xF0,0xB5, \ +0x85,0xB0,0x5F,0x48,0x00,0x25,0x00,0x68,0x46,0x68,0x80,0x89,0x29,0x28,0x02, \ +0xDA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0xF0,0x1D,0x09,0x30,0x5A,0x49,0x03,0x90, \ +0x06,0x22,0x04,0x91,0xF9,0xF7,0xD2,0xFA,0x00,0x28,0x01,0xD0,0x00,0x20,0xF1, \ +0xE7,0x20,0x20,0xF1,0x1D,0x19,0x31,0x30,0x5C,0x49,0x78,0x09,0x02,0x08,0x43, \ +0x04,0x04,0x24,0x0C,0x14,0x2C,0x03,0xDB,0x7D,0x23,0xDB,0x00,0x9C,0x42,0x01, \ +0xDD,0x00,0x20,0xE0,0xE7,0x22,0x20,0x30,0x5C,0x80,0x08,0x01,0xD2,0x00,0x20, \ +0xDA,0xE7,0x49,0x48,0x24,0x27,0x01,0x68,0x89,0x89,0x04,0x39,0x24,0x29,0x50, \ +0xDD,0x47,0x49,0x02,0x91,0xF0,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x20,0xD0, \ +0x03,0x28,0x45,0xD1,0xF0,0x19,0x41,0x78,0x01,0x29,0x33,0xD0,0x00,0x20,0xC4, \ +0xE7,0xF5,0x19,0x68,0x78,0x00,0x28,0x05,0xD0,0x3F,0x49,0x49,0x79,0x81,0x42, \ +0x01,0xD0,0x00,0x20,0xBA,0xE7,0x3C,0x49,0xA8,0x1C,0x4A,0x79,0x02,0x99,0xF9, \ +0xF7,0x91,0xFA,0x00,0x28,0x01,0xD0,0x00,0x20,0xB0,0xE7,0x68,0x78,0x01,0x25, \ +0xC0,0x19,0x87,0x1C,0x20,0xE0,0xF0,0x19,0x01,0x90,0x41,0x78,0x05,0x29,0x00, \ +0xDB,0x04,0x21,0x00,0x20,0x00,0x29,0x07,0xDD,0x3A,0x18,0x92,0x19,0x93,0x78, \ +0x6A,0x46,0x13,0x54,0x01,0x30,0x88,0x42,0xF7,0xDB,0x68,0x46,0xFF,0xF7,0x6A, \ +0xFF,0x01,0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x29,0x49,0x80,0x78, \ +0x09,0x7D,0x81,0x42,0x01,0xD0,0x00,0x20,0x8B,0xE7,0x03,0x37,0x21,0x48,0x00, \ +0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xB2,0xDC,0x00,0x2D,0x01,0xD1,0x00,0x20, \ +0x80,0xE7,0x05,0x20,0xFB,0xF7,0x42,0xF8,0x1F,0x48,0x20,0x23,0x01,0x78,0x1F, \ +0x4F,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70,0xF9,0x1D, \ +0x06,0x22,0x07,0x31,0x03,0x98,0xF9,0xF7,0x5A,0xFA,0x06,0x22,0x03,0x98,0x04, \ +0x99,0xF9,0xF7,0x55,0xFA,0x13,0x4D,0xF9,0x1D,0x6A,0x79,0x02,0x98,0x0D,0x31, \ +0xF9,0xF7,0x4E,0xFA,0x13,0x4E,0x01,0x20,0xF9,0x1D,0x29,0x31,0x70,0x70,0x08, \ +0x71,0x3C,0x80,0xA8,0x70,0x00,0x21,0x00,0x20,0xF9,0xF7,0x54,0xF8,0xFA,0xF7, \ +0xD4,0xFF,0x0D,0x49,0x01,0x20,0x48,0x73,0x05,0x20,0xF0,0x72,0x01,0x21,0x04, \ +0x20,0xFB,0xF7,0x3D,0xFC,0x01,0x20,0x48,0xE7,0x00,0x00,0x10,0x00,0x00,0x02, \ +0x0C,0x01,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x63,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0x74,0x05,0x00,0x02,0x64,0x05, \ +0x00,0x02,0xF0,0xB5,0x84,0xB0,0x4D,0x4F,0x4E,0x49,0x38,0x68,0x04,0x26,0x45, \ +0x68,0x00,0x24,0x06,0x22,0xE8,0x1D,0x09,0x30,0x03,0x91,0xF9,0xF7,0x04,0xFA, \ +0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x39,0x68,0x38,0x1C,0x89, \ +0x89,0x29,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7,0x20,0x22,0xEB,0x1D,0x19,0x33, \ +0xAA,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12,0x04,0x12,0x0C,0x00,0x92,0x14, \ +0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00,0x9A,0x42,0x01,0xDD,0x00,0x20, \ +0xE3,0xE7,0x22,0x22,0xAA,0x5C,0x52,0x08,0x01,0xD2,0x00,0x20,0xDD,0xE7,0x24, \ +0x27,0x04,0x39,0x24,0x29,0x2F,0xDD,0xE8,0x5D,0x00,0x28,0x1B,0xD0,0x01,0x28, \ +0x28,0xD1,0xE8,0x19,0x02,0x90,0x40,0x78,0x05,0x28,0x00,0xDA,0x06,0x1C,0x00, \ +0x20,0x00,0x2E,0x07,0xDD,0x39,0x18,0x49,0x19,0x8A,0x78,0x01,0xA9,0x0A,0x54, \ +0x01,0x30,0xB0,0x42,0xF7,0xDB,0x01,0xA8,0x31,0x1C,0xFF,0xF7,0xB7,0xFE,0x02, \ +0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x08,0xE0,0xE8,0x19,0x40,0x78,0x20,0x28, \ +0x01,0xD9,0x00,0x24,0x00,0xE0,0x01,0x24,0xC0,0x19,0x87,0x1C,0x1F,0x48,0x00, \ +0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xD1,0xDC,0x00,0x2C,0x01,0xD1,0x00,0x20, \ +0xA7,0xE7,0x1C,0x4F,0x1C,0x4C,0xF9,0x1D,0x07,0x31,0x06,0x22,0x03,0x98,0xF9, \ +0xF7,0xB1,0xF9,0xE0,0x1D,0x15,0x30,0x20,0x22,0xF9,0x1D,0x0D,0x31,0xF9,0xF7, \ +0xAA,0xF9,0xE0,0x1D,0x39,0x30,0x81,0x78,0xF8,0x1D,0x29,0x30,0x01,0x71,0x02, \ +0x79,0x13,0x48,0xC1,0x1D,0x69,0x31,0x4A,0x70,0x00,0x9A,0x20,0x23,0x3A,0x80, \ +0x10,0x4A,0x17,0x78,0x3B,0x43,0x13,0x70,0x17,0x78,0x10,0x23,0x3B,0x43,0x13, \ +0x70,0x00,0x22,0x60,0x30,0x42,0x73,0x0C,0x48,0x82,0x70,0x05,0x20,0xC8,0x72, \ +0xFA,0xF7,0x63,0xFF,0x01,0x21,0x04,0x20,0xFB,0xF7,0x8D,0xFB,0x01,0x20,0x73, \ +0xE7,0x00,0x00,0x10,0x00,0x00,0x02,0x0C,0x01,0x00,0x02,0x98,0x00,0x00,0x02, \ +0xD0,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x63,0x01,0x00,0x02,0x14,0x01,0x00, \ +0x02,0x80,0xB4,0x19,0x49,0xC9,0x7D,0x31,0x29,0x19,0xD0,0x07,0xDC,0x10,0x29, \ +0x12,0xD0,0x20,0x29,0x12,0xD0,0x30,0x29,0x08,0xD1,0x02,0x20,0x06,0xE0,0x32, \ +0x29,0x10,0xD0,0x40,0x29,0x10,0xD0,0x41,0x29,0x00,0xD1,0x06,0x20,0x10,0x49, \ +0x40,0x00,0x09,0x5A,0x01,0x20,0x0F,0x4A,0x09,0xE0,0x00,0x20,0xF7,0xE7,0x01, \ +0x20,0xF5,0xE7,0x03,0x20,0xF3,0xE7,0x04,0x20,0xF1,0xE7,0x05,0x20,0xEF,0xE7, \ +0xD3,0x7C,0x01,0x33,0xD3,0x74,0xD3,0x7C,0x0E,0x2B,0x00,0xDD,0xD0,0x74,0xD3, \ +0x7C,0x5F,0x1E,0x03,0x1C,0xBB,0x40,0x0B,0x40,0xF2,0xD0,0x80,0xBC,0xF7,0x46, \ +0x18,0x00,0x00,0x02,0x7C,0x01,0x00,0x02,0xD0,0x00,0x00,0x02} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-i3863.h linux.22-ac2/drivers/usb/atmel/fw-i3863.h --- linux.vanilla/drivers/usb/atmel/fw-i3863.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-i3863.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,1918 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76C503 with Intersil 3863 radio * + * Version: 0.90.0 #44 * + ****************************************************************************/ + +/*************************************************************************************** + Copyright 2000-2001 ATMEL Corporation. + + This file is part of atmel wireless lan drivers. + Atmel wireless lan drivers is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + Atmel wireless lan drivers is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + You should have received a copy of the GNU General Public License + along with Atmel wireless lan drivers; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +**************************************************************************************/ +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_I3863_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0xA8,0x13, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xD6,0x0E,0x00,0xEA,0x02, \ +0xF0,0x56,0xFB,0x22,0x48,0x87,0x46,0xEF,0x0E,0x00,0xEA,0x02,0xF0,0xB0,0xF9, \ +0x20,0x48,0x87,0x46,0xB5,0x01,0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x02,0x00, \ +0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x84,0x03,0x00,0x02,0xA4,0x03, \ +0x00,0x02,0xA8,0x03,0x00,0x02,0xAC,0x03,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0x20,0x53,0x00,0x01,0x69,0x3D,0x00,0x00,0x59,0x3E,0x00, \ +0x00,0x00,0xB5,0x03,0xF0,0x31,0xFD,0x00,0x20,0x00,0xBD,0xF0,0xB5,0x86,0xB0, \ +0x07,0x1C,0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03, \ +0x90,0x01,0x91,0x05,0x92,0x02,0x92,0x3B,0x4A,0x3D,0xA1,0x3B,0x48,0x01,0x23, \ +0x00,0x97,0x03,0xF0,0x07,0xFF,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01, \ +0x22,0x05,0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93, \ +0x03,0x90,0x02,0x92,0x01,0x91,0x37,0xA1,0x35,0x4A,0x35,0x48,0x02,0x23,0x03, \ +0xF0,0xF2,0xFE,0x37,0x48,0x38,0xA1,0x03,0xF0,0x64,0xFF,0x3A,0x48,0x3B,0xA1, \ +0x03,0xF0,0x60,0xFF,0x3E,0x48,0x3F,0xA1,0x03,0xF0,0x5C,0xFF,0x42,0x48,0x43, \ +0xA1,0x03,0xF0,0x58,0xFF,0x46,0x48,0x04,0x26,0x06,0x70,0x46,0x48,0x00,0x27, \ +0x46,0x4D,0x07,0x60,0xE8,0x1D,0x75,0x30,0xEF,0x67,0x44,0x4C,0x47,0x60,0xE0, \ +0x1D,0x79,0x30,0xC7,0x61,0x43,0x48,0x07,0x70,0x03,0xF0,0xA5,0xFB,0x42,0x48, \ +0x07,0x70,0x47,0x70,0x41,0x48,0x07,0x60,0x41,0x48,0x07,0x60,0x02,0xF0,0x3E, \ +0xFE,0xE8,0x6F,0x07,0x25,0x6D,0x06,0x01,0x28,0x02,0xD1,0x0C,0x20,0xA8,0x61, \ +0x00,0xE0,0xAE,0x61,0x6F,0x61,0x00,0xF0,0x7C,0xF8,0x00,0xF0,0xC0,0xF8,0x00, \ +0xF0,0x3F,0xF9,0x38,0x48,0x00,0x7D,0x00,0xF0,0xBE,0xFA,0x60,0x74,0x00,0xF0, \ +0xD9,0xF8,0x36,0x48,0xE1,0x1D,0x00,0x79,0x69,0x31,0x48,0x70,0x34,0x48,0x80, \ +0x78,0x00,0x28,0x01,0xD0,0x02,0xF0,0x9E,0xF8,0x01,0x20,0x00,0xF0,0x79,0xF9, \ +0x02,0xF0,0x8B,0xF8,0x01,0xF0,0xFD,0xFC,0xE8,0x69,0x01,0x23,0xDB,0x03,0x18, \ +0x43,0xE8,0x61,0x06,0xB0,0xF0,0xBD,0x8D,0x18,0x00,0x00,0xA8,0x05,0x00,0x02, \ +0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00,0x00,0xC5,0x2A,0x00, \ +0x00,0x38,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20,0x74,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x00,0xC8,0x06,0x00,0x02,0x54,0x78,0x20,0x73,0x74,0x61,0x74,0x75,0x73, \ +0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xE8,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x00, \ +0x00,0x08,0x07,0x00,0x02,0x54,0x58,0x20,0x47,0x4F,0x20,0x73,0x74,0x61,0x74, \ +0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x28,0x07,0x00,0x02,0x4D, \ +0x4E,0x47,0x20,0x47,0x4F,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C, \ +0x61,0x67,0x73,0x00,0x17,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0xF8,0x00,0x00, \ +0x02,0x04,0x05,0x00,0x02,0x15,0x02,0x00,0x02,0xAC,0x08,0x00,0x02,0x38,0x02, \ +0x00,0x02,0x34,0x02,0x00,0x02,0x18,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x80,0xB4,0x1D,0x48,0x00,0x21,0xC1,0x72,0x00,0x20,0x19,0x27, \ +0x1B,0x4A,0xFF,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43, \ +0x27,0x18,0x4A,0x7F,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x16,0x48, \ +0x14,0x4A,0x16,0x4B,0x01,0x60,0x42,0x60,0x13,0x60,0x42,0x68,0xD7,0x1D,0x15, \ +0x37,0x57,0x60,0x42,0x68,0x08,0x3F,0x97,0x60,0x42,0x68,0x11,0x73,0x42,0x68, \ +0x91,0x73,0x47,0x68,0x03,0x22,0xBA,0x75,0x42,0x68,0x91,0x82,0x42,0x68,0xC1, \ +0x60,0x82,0x60,0x09,0x4A,0x02,0x61,0x13,0x60,0x02,0x69,0xD3,0x1D,0x11,0x33, \ +0x53,0x60,0x02,0x69,0x91,0x81,0x02,0x69,0x11,0x72,0x01,0x69,0x41,0x61,0x80, \ +0xBC,0xF7,0x46,0x58,0x01,0x00,0x02,0x00,0x11,0x00,0x02,0x00,0xDA,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x80,0x00,0xB5,0x00,0xF0,0x9F,0xF8,0x0C, \ +0x48,0x01,0x22,0x81,0x89,0x0B,0x48,0xC1,0x63,0x02,0x72,0x82,0x74,0x82,0x73, \ +0x00,0x21,0x01,0x63,0x01,0x60,0x41,0x72,0xC3,0x1D,0x79,0x33,0x01,0x73,0x19, \ +0x62,0xC2,0x72,0x42,0x60,0x05,0x48,0x03,0x22,0x42,0x70,0x01,0x70,0x41,0x80, \ +0x00,0xBD,0x00,0x00,0xD0,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x24,0x01,0x00, \ +0x02,0xB0,0xB5,0x1A,0x4D,0x00,0x20,0xE8,0x71,0x19,0x48,0x00,0xF0,0x45,0xF8, \ +0x69,0x7A,0x18,0x48,0x01,0x29,0x04,0xD1,0x48,0x21,0x41,0x81,0x18,0x21,0x01, \ +0x81,0x03,0xE0,0x90,0x21,0x41,0x81,0x30,0x21,0x01,0x81,0x41,0x89,0x02,0x89, \ +0x12,0x4C,0xE7,0x1D,0x39,0x37,0x89,0x18,0x39,0x81,0xC1,0x88,0x80,0x88,0x0A, \ +0x18,0x12,0x18,0x89,0x18,0xFF,0x31,0x31,0x31,0xBA,0x80,0xF9,0x80,0x29,0x88, \ +0x48,0x43,0x78,0x81,0xE8,0x79,0x38,0x73,0x38,0x7B,0x78,0x73,0x00,0xF0,0xE7, \ +0xF9,0x01,0xF0,0x3B,0xFF,0x39,0x7B,0x06,0x48,0x40,0x5C,0xE1,0x1D,0x69,0x31, \ +0x08,0x72,0xB0,0xBD,0x14,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0x18,0x00,0x00, \ +0x02,0xF8,0x00,0x00,0x02,0x6C,0x01,0x00,0x02,0x03,0x49,0x0F,0x20,0x00,0x06, \ +0x81,0x80,0x02,0x49,0x81,0x81,0xF7,0x46,0x00,0x00,0xE8,0xE8,0x00,0x00,0x13, \ +0x13,0x00,0x00,0x02,0x79,0x41,0x79,0x12,0x02,0x11,0x43,0xC2,0x78,0x12,0x04, \ +0x11,0x43,0x82,0x78,0x12,0x06,0x0A,0x43,0x01,0x21,0x89,0x06,0x8A,0x61,0x42, \ +0x78,0x00,0x78,0x00,0x02,0x10,0x43,0xC8,0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49, \ +0x0D,0x48,0x41,0x61,0x23,0x21,0x81,0x61,0x00,0x22,0x01,0x05,0x0A,0x61,0xC2, \ +0x01,0x42,0x60,0x07,0x22,0xC2,0x60,0x08,0x4A,0x82,0x62,0xF2,0x22,0x82,0x60, \ +0x32,0x22,0x4A,0x61,0xCA,0x68,0xC9,0x6B,0x00,0x68,0x00,0x21,0x00,0x20,0x00, \ +0xF0,0x0B,0xF8,0x00,0xBD,0x01,0x24,0x00,0x00,0x40,0x00,0x00,0x04,0xAF,0xFF, \ +0x3F,0x00,0x01,0x20,0x80,0x06,0x40,0x6A,0xF7,0x46,0x90,0xB4,0x0F,0x4A,0x01, \ +0x27,0xD2,0x69,0xBF,0x06,0x00,0x2A,0x15,0xD1,0x0F,0x22,0x12,0x06,0x94,0x88, \ +0x0B,0x4B,0x23,0x40,0x93,0x80,0x94,0x89,0x0A,0x4B,0x23,0x40,0x93,0x81,0xB8, \ +0x62,0x79,0x62,0x90,0x89,0x08,0x4B,0x18,0x43,0x90,0x81,0x90,0x88,0x07,0x4B, \ +0x18,0x43,0x90,0x80,0x90,0xBC,0xF7,0x46,0xB8,0x62,0x79,0x62,0xFA,0xE7,0x84, \ +0x05,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x13,0x13,0x00,0x00, \ +0xE8,0xE8,0x00,0x00,0x01,0x1C,0x06,0x48,0x04,0xD0,0x41,0x68,0xC3,0x01,0x19, \ +0x43,0x41,0x60,0xF7,0x46,0x41,0x68,0x01,0x23,0x5B,0x03,0x99,0x43,0x41,0x60, \ +0xF7,0x46,0x40,0x00,0x00,0x04,0x80,0x00,0x89,0x02,0x01,0x43,0x03,0x48,0x41, \ +0x62,0x41,0x6A,0x49,0x08,0xFC,0xD3,0xF7,0x46,0x00,0x00,0x40,0x00,0x00,0x04, \ +0x80,0x00,0x02,0x23,0x04,0x49,0x18,0x43,0x48,0x62,0x48,0x6A,0x42,0x08,0xFC, \ +0xD3,0x80,0x03,0x00,0x0E,0xF7,0x46,0x00,0x00,0x40,0x00,0x00,0x04,0xF0,0xB5, \ +0x05,0x26,0x04,0x1C,0x15,0x1C,0x0F,0x1C,0x20,0x1C,0x39,0x1C,0xFF,0xF7,0xDD, \ +0xFF,0x20,0x1C,0xFF,0xF7,0xE6,0xFF,0x78,0x40,0x28,0x40,0x01,0x1C,0x01,0xD0, \ +0x01,0x3E,0xF2,0xD5,0x01,0x20,0x00,0x29,0x00,0xD0,0x00,0x20,0xF0,0xBD,0x00, \ +0x05,0x01,0x43,0x0D,0x48,0x01,0x62,0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x00,0x2A, \ +0x0A,0xD1,0xC1,0x69,0xFF,0x23,0x01,0x33,0x19,0x43,0xC1,0x61,0xC1,0x69,0xFF, \ +0x23,0x01,0x33,0x99,0x43,0xC1,0x61,0xF7,0x46,0xC1,0x69,0x01,0x23,0x5B,0x02, \ +0x19,0x43,0xC1,0x61,0xC1,0x69,0x99,0x43,0xC1,0x61,0xF7,0x46,0x40,0x00,0x00, \ +0x04,0x90,0xB5,0x09,0x4C,0x00,0x27,0x78,0x00,0xC0,0x19,0x02,0x19,0x10,0x78, \ +0x51,0x78,0x92,0x78,0xFF,0xF7,0xBF,0xFF,0x00,0x28,0x00,0xD1,0x90,0xBD,0x01, \ +0x37,0x70,0x2F,0xF1,0xD3,0x01,0x20,0x90,0xBD,0x00,0x00,0xA4,0x4F,0x00,0x00, \ +0x90,0xB5,0x25,0x4C,0x07,0x1C,0x00,0x22,0x01,0x20,0x61,0x68,0xFF,0xF7,0xC3, \ +0xFF,0x00,0x22,0x00,0x20,0x21,0x68,0xFF,0xF7,0xBE,0xFF,0x00,0x22,0x02,0x20, \ +0xA1,0x68,0xFF,0xF7,0xB9,0xFF,0x00,0x22,0x03,0x20,0xE1,0x68,0xFF,0xF7,0xB4, \ +0xFF,0x1B,0x4C,0x01,0x22,0x01,0x20,0x61,0x68,0xFF,0xF7,0xAE,0xFF,0x01,0x22, \ +0x00,0x20,0x21,0x68,0xFF,0xF7,0xA9,0xFF,0x78,0x1E,0x0D,0x28,0x06,0xD8,0x78, \ +0x00,0x14,0x49,0x01,0x22,0x09,0x5A,0x02,0x20,0xFF,0xF7,0x9F,0xFF,0x5A,0x20, \ +0x01,0x38,0xFD,0xD1,0x11,0x48,0x23,0x21,0x81,0x61,0x0D,0x21,0x01,0x61,0xC1, \ +0x69,0x20,0x23,0x99,0x43,0xC1,0x61,0x1B,0x21,0x01,0x39,0xFD,0xD1,0xC1,0x69, \ +0x20,0x23,0x19,0x43,0xC1,0x61,0x5A,0x20,0x01,0x38,0xFD,0xD1,0x08,0x48,0xFF, \ +0x22,0xC0,0x19,0x10,0x38,0xC1,0x7B,0x3E,0x20,0xFF,0xF7,0x6A,0xFF,0x90,0xBD, \ +0x00,0x00,0xF4,0x50,0x00,0x00,0x04,0x51,0x00,0x00,0x0C,0x51,0x00,0x00,0x40, \ +0x00,0x00,0x04,0x4C,0x01,0x00,0x02,0x80,0xB5,0x0F,0x27,0x3F,0x06,0xB9,0x88, \ +0x27,0x4B,0x19,0x40,0xB9,0x80,0xB9,0x89,0x26,0x4B,0x19,0x40,0xB9,0x81,0x26, \ +0x49,0xCA,0x69,0x0B,0x0C,0x1A,0x43,0xCA,0x61,0xCA,0x69,0x92,0x08,0x92,0x00, \ +0xCA,0x61,0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69,0x40,0x23,0x9A,0x43,0xCA, \ +0x61,0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69,0x40,0x23,0x1A,0x43,0xCA,0x61, \ +0x09,0x22,0x01,0x3A,0xFD,0xD1,0xCA,0x69,0x01,0x23,0x9B,0x02,0x9A,0x43,0xCA, \ +0x61,0x3A,0x88,0x09,0x68,0x3A,0x89,0x17,0x49,0x15,0x4A,0xD2,0x69,0x1A,0x04, \ +0xD3,0x68,0xD2,0x6B,0x08,0x75,0x08,0x7D,0xFF,0xF7,0x73,0xFF,0xFF,0xF7,0x2B, \ +0xFE,0xB8,0x89,0x11,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x10,0x4B,0x18,0x43, \ +0xB8,0x80,0x10,0x48,0x41,0x6B,0x01,0x29,0x05,0xD1,0x00,0x22,0x10,0x21,0x0E, \ +0x48,0x03,0xF0,0xB2,0xFC,0x80,0xBD,0x40,0x6B,0x02,0x28,0xFB,0xD1,0x00,0x22, \ +0x10,0x21,0x0A,0x48,0x03,0xF0,0xA9,0xFC,0x80,0xBD,0x17,0x17,0xFF,0xFF,0xEC, \ +0xEC,0xFF,0xFF,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x18,0x00,0x00,0x02, \ +0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x04,0x05,0x00,0x02,0xC8,0x06,0x00, \ +0x02,0xE8,0x06,0x00,0x02,0xB0,0xB5,0x01,0x21,0x89,0x02,0x04,0x1C,0x14,0x48, \ +0x44,0x23,0xC1,0x61,0xC1,0x69,0x19,0x43,0xC1,0x61,0x12,0x49,0x01,0x39,0xFD, \ +0xD1,0xC1,0x69,0x40,0x23,0x99,0x43,0xC1,0x61,0xE1,0x21,0x89,0x00,0x01,0x39, \ +0xFD,0xD1,0x05,0x1C,0xC0,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x0A,0x48,0x01, \ +0x38,0xFD,0xD1,0xFF,0xF7,0x0D,0xFF,0x07,0x1C,0x20,0x1C,0xFF,0xF7,0x1F,0xFF, \ +0xE8,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61,0x03,0x48,0x01,0x38,0xFD, \ +0xD1,0x38,0x1C,0xB0,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x28,0x23,0x00,0x00, \ +0x80,0xB5,0x12,0x48,0x13,0x4F,0x41,0x7A,0x11,0x48,0x00,0x29,0x0D,0xD0,0x01, \ +0x78,0x00,0x29,0x0A,0xDD,0x00,0x78,0x08,0x21,0x01,0x43,0x0A,0x20,0xFF,0xF7, \ +0x9A,0xFE,0x48,0x20,0x78,0x81,0x18,0x20,0x38,0x81,0x07,0xE0,0x01,0x78,0x0A, \ +0x20,0xFF,0xF7,0x91,0xFE,0x90,0x20,0x78,0x81,0x30,0x20,0x38,0x81,0x78,0x89, \ +0x39,0x89,0x40,0x18,0x05,0x49,0x08,0x80,0x01,0xF0,0x32,0xFD,0x80,0xBD,0x00, \ +0x00,0x14,0x01,0x00,0x02,0x45,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x40,0x01, \ +0x00,0x02,0x00,0xB5,0x00,0x28,0x04,0xD0,0x84,0x21,0x0C,0x20,0xFF,0xF7,0x75, \ +0xFE,0x00,0xBD,0x04,0x21,0x0C,0x20,0xFF,0xF7,0x70,0xFE,0x00,0xBD,0x00,0x00, \ +0xC1,0x0A,0x01,0xD3,0x00,0x20,0xF7,0x46,0xFF,0x22,0x01,0x32,0x02,0x40,0x01, \ +0x21,0x00,0x2A,0x01,0xD0,0x08,0x1C,0xF7,0x46,0x80,0x0A,0x01,0xD3,0x08,0x1C, \ +0xF7,0x46,0x02,0x20,0xF7,0x46,0x80,0xB4,0x00,0x2A,0x0A,0xD9,0x07,0x78,0x0B, \ +0x78,0x01,0x31,0x01,0x30,0x9F,0x42,0x02,0xD0,0x01,0x20,0x80,0xBC,0xF7,0x46, \ +0x01,0x3A,0xF4,0xD1,0x00,0x20,0xF9,0xE7,0x00,0x2A,0x05,0xD9,0x03,0x78,0x01, \ +0x30,0x0B,0x70,0x01,0x31,0x01,0x3A,0xF9,0xD1,0xF7,0x46,0x80,0xB4,0x00,0x22, \ +0x00,0x29,0x03,0x78,0x0C,0xD9,0x07,0x78,0x01,0x30,0xFF,0x2F,0x01,0xD0,0x01, \ +0x22,0x03,0xE0,0x00,0x29,0xF7,0xD8,0x01,0x2A,0x02,0xD1,0x58,0x08,0x00,0xD3, \ +0x00,0x22,0x80,0xBC,0x10,0x1C,0xF7,0x46,0xF8,0xB5,0x0C,0x1C,0x1C,0x49,0x07, \ +0x1C,0x1C,0x4E,0x1D,0x48,0x31,0x60,0xC0,0x6C,0x00,0x25,0xA8,0x42,0x19,0xD9, \ +0x06,0x22,0x38,0x1C,0x31,0x68,0xFF,0xF7,0xC2,0xFF,0x00,0x90,0x00,0x98,0x00, \ +0x28,0x08,0xD1,0x30,0x68,0xC1,0x88,0xA1,0x42,0x01,0xD1,0x01,0x20,0xF8,0xBD, \ +0xC4,0x80,0x00,0x20,0xFB,0xE7,0x30,0x68,0x01,0x35,0x08,0x30,0x30,0x60,0x0F, \ +0x48,0xC0,0x6C,0xA8,0x42,0xE5,0xD8,0x0D,0x48,0xC1,0x6C,0x01,0x31,0xC1,0x64, \ +0xC1,0x6C,0x07,0x29,0x03,0xD9,0x07,0x49,0x31,0x60,0x08,0x21,0xC1,0x64,0x39, \ +0x88,0x30,0x68,0x01,0x80,0x79,0x88,0x41,0x80,0xB9,0x88,0x81,0x80,0x30,0x68, \ +0xC4,0x80,0x00,0x20,0xDD,0xE7,0x00,0x00,0x10,0x08,0x00,0x02,0x28,0x01,0x00, \ +0x02,0x04,0x05,0x00,0x02,0x02,0x78,0x11,0x43,0x01,0x70,0xF7,0x46,0x02,0x78, \ +0xC9,0x43,0x11,0x40,0x01,0x70,0xF7,0x46,0x00,0x78,0x08,0x40,0x01,0xD0,0x01, \ +0x20,0xF7,0x46,0x00,0x20,0xF7,0x46,0x05,0x49,0x8A,0x6C,0x12,0x01,0x02,0x70, \ +0x8A,0x6C,0x12,0x01,0x12,0x0A,0x42,0x70,0x88,0x6C,0x01,0x30,0x88,0x64,0xF7, \ +0x46,0x04,0x05,0x00,0x02,0xB0,0xB4,0x00,0x2A,0x16,0xD1,0x0D,0x4A,0x0F,0x06, \ +0x92,0x7A,0x3F,0x0E,0xBA,0x42,0x00,0xDC,0x11,0x1C,0x4F,0x00,0x0B,0x49,0x09, \ +0x4A,0xCD,0x88,0xD4,0x5B,0x64,0x19,0xE4,0x18,0x04,0x70,0xD2,0x5B,0xC9,0x88, \ +0x51,0x18,0xC9,0x18,0x09,0x0A,0x41,0x70,0xB0,0xBC,0xF7,0x46,0x00,0x21,0x01, \ +0x70,0x41,0x70,0xF9,0xE7,0x14,0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0x18,0x00, \ +0x00,0x02,0x06,0x49,0x09,0x78,0x01,0x29,0x07,0xD1,0x05,0x49,0xC9,0x7A,0x01, \ +0x29,0x03,0xD1,0x01,0x78,0x40,0x23,0x19,0x43,0x01,0x70,0xF7,0x46,0x00,0x00, \ +0x30,0x00,0x00,0x02,0x98,0x00,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C,0x00, \ +0x26,0x27,0x70,0xE1,0x1D,0x03,0x31,0x66,0x70,0x66,0x80,0x06,0x22,0x25,0x48, \ +0xFF,0xF7,0x3F,0xFF,0x25,0x4D,0xE1,0x1D,0x09,0x31,0x06,0x22,0xE8,0x1D,0x35, \ +0x30,0xFF,0xF7,0x37,0xFF,0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0,0x71, \ +0x20,0x72,0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0,0x3E, \ +0xF8,0x00,0xF0,0x46,0xF8,0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x61,0xF8, \ +0x2D,0x18,0x28,0x1C,0x00,0xF0,0x79,0xF8,0x2D,0x18,0x16,0x48,0x80,0x7D,0x02, \ +0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x87,0xF8,0x2D,0x18,0x28,0x1C,0x00,0xF0, \ +0xC3,0xF8,0x28,0x18,0x00,0x1B,0xB8,0x66,0xB8,0x65,0xF0,0xBD,0x26,0x76,0x0F, \ +0x4E,0xE8,0x1D,0x72,0x79,0x15,0x30,0xE1,0x1D,0x13,0x31,0x62,0x76,0xFF,0xF7, \ +0x04,0xFF,0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0,0x59,0xF8,0x70,0x79,0x20, \ +0x30,0x00,0x06,0x00,0x0E,0xB8,0x65,0xF0,0xBD,0x00,0x00,0x48,0x07,0x00,0x02, \ +0x74,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x03,0x49,0x02,0x48,0x09,0x88,0x01,0x80,0xF7,0x46, \ +0x00,0x00,0x68,0x07,0x00,0x02,0x98,0x00,0x00,0x02,0x0D,0x49,0x0C,0x48,0x8A, \ +0x7A,0x92,0x00,0x02,0x80,0xC9,0x7A,0x00,0x29,0x03,0xD0,0x01,0x88,0x10,0x23, \ +0x19,0x43,0x01,0x80,0x08,0x49,0x49,0x7A,0x01,0x29,0x04,0xD1,0x01,0x88,0x22, \ +0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x01,0x88,0x02,0x23,0x19,0x43,0x01,0x80, \ +0xF7,0x46,0x6A,0x07,0x00,0x02,0x98,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x90, \ +0xB4,0x01,0x1C,0x00,0x20,0x0A,0x4A,0x08,0x70,0x53,0x79,0x00,0x2B,0x08,0xD9, \ +0x08,0x4B,0x1F,0x18,0x3F,0x7D,0x0C,0x18,0x01,0x30,0xA7,0x70,0x57,0x79,0x87, \ +0x42,0xF7,0xD8,0x50,0x79,0x48,0x70,0x50,0x79,0x90,0xBC,0x02,0x30,0xF7,0x46, \ +0x00,0x00,0x14,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0x80,0xB4,0x01,0x1C,0x01, \ +0x20,0x08,0x70,0x07,0x4A,0x00,0x20,0x13,0x18,0x1B,0x7C,0x00,0x2B,0x04,0xD0, \ +0x0F,0x18,0x01,0x30,0x04,0x28,0xBB,0x70,0xF6,0xD3,0x48,0x70,0x80,0xBC,0x02, \ +0x30,0xF7,0x46,0x00,0x00,0x18,0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22, \ +0x42,0x70,0x01,0x30,0x80,0x18,0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7, \ +0x46,0x00,0x00,0x18,0x00,0x00,0x02,0xB0,0xB4,0x04,0x22,0x02,0x70,0x06,0x23, \ +0x43,0x70,0x81,0x70,0x82,0x1C,0x13,0x4C,0x51,0x1C,0x62,0x7B,0x12,0x4F,0x0A, \ +0x70,0x62,0x88,0x4A,0x70,0x62,0x88,0x12,0x0A,0xFB,0x1D,0x69,0x33,0x8A,0x70, \ +0x1D,0x7A,0x00,0x22,0x03,0x31,0x07,0x30,0x00,0x2D,0x0E,0xD0,0x60,0x37,0xFD, \ +0x8A,0x0D,0x70,0xF9,0x8A,0x09,0x12,0x01,0x70,0xF8,0x8A,0x21,0x88,0x40,0x1A, \ +0xF8,0x82,0xF8,0x8A,0x00,0x28,0x03,0xD1,0x1A,0x72,0x01,0xE0,0x0A,0x70,0x02, \ +0x70,0xB0,0xBC,0x08,0x20,0xF7,0x46,0x00,0x00,0x98,0x00,0x00,0x02,0x04,0x05, \ +0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x04,0x49,0x02,0x30,0x0A, \ +0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04,0x20,0xF7,0x46,0x00,0x00, \ +0x98,0x00,0x00,0x02,0x05,0x22,0x02,0x70,0x04,0x22,0x42,0x70,0x81,0x70,0x04, \ +0x49,0x03,0x30,0x09,0x7B,0x01,0x70,0x00,0x21,0x41,0x70,0x81,0x70,0x06,0x20, \ +0xF7,0x46,0x98,0x00,0x00,0x02,0xF8,0xB5,0x36,0x48,0x00,0x68,0xFF,0xF7,0x09, \ +0xFE,0x07,0x1C,0x34,0x48,0x00,0x68,0x44,0x68,0x20,0x78,0x06,0x07,0x36,0x0F, \ +0x02,0x2F,0x00,0xD1,0xF8,0xBD,0x31,0x4D,0x28,0x79,0x02,0x28,0x0A,0xD1,0xE0, \ +0x1D,0x09,0x30,0x06,0x22,0x2E,0x49,0xFF,0xF7,0x07,0xFE,0x00,0x90,0x00,0x98, \ +0x00,0x28,0x00,0xD1,0xEF,0xE7,0x30,0x06,0x00,0x0E,0x08,0x28,0x44,0xD1,0x29, \ +0x48,0xC0,0x7A,0x05,0x28,0x00,0xD0,0xE6,0xE7,0x28,0x4E,0x00,0x2F,0x14,0xD0, \ +0x28,0x79,0x02,0x28,0x11,0xD1,0xE0,0x1D,0x03,0x30,0x06,0x22,0x31,0x1C,0xFF, \ +0xF7,0xED,0xFD,0x00,0x90,0x00,0x98,0x01,0x28,0x00,0xD1,0xD5,0xE7,0x60,0x78, \ +0x81,0x08,0x00,0xD2,0xD1,0xE7,0x40,0x08,0x00,0xD3,0xCE,0xE7,0x28,0x79,0x01, \ +0x28,0x10,0xD1,0xE0,0x1D,0x09,0x30,0x06,0x22,0x31,0x1C,0xFF,0xF7,0xD8,0xFD, \ +0x00,0x90,0x00,0x98,0x01,0x28,0x00,0xD1,0xC0,0xE7,0x60,0x78,0x81,0x08,0x01, \ +0xD2,0x40,0x08,0x00,0xD3,0xBA,0xE7,0x13,0x48,0x01,0x78,0x00,0x29,0x06,0xD0, \ +0xC0,0x78,0x00,0x28,0x07,0xD0,0x60,0x78,0xC0,0x09,0x04,0xD2,0xAF,0xE7,0x60, \ +0x78,0xC0,0x09,0x00,0xD3,0xAB,0xE7,0x21,0x78,0x38,0x1C,0x00,0xF0,0x50,0xFD, \ +0x04,0xE0,0x00,0x28,0x02,0xD1,0x38,0x1C,0x00,0xF0,0xC8,0xFB,0xA0,0xE7,0x00, \ +0x00,0xA0,0x01,0x00,0x02,0x10,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x74,0x00, \ +0x00,0x02,0x74,0x05,0x00,0x02,0xA6,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x08, \ +0xB5,0x00,0x21,0x00,0x91,0x00,0x28,0x0C,0xD1,0x0B,0x48,0x00,0x68,0x40,0x68, \ +0x81,0x7D,0xC2,0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C,0x0A,0x30,0xFF, \ +0xF7,0xC1,0xFD,0x00,0x90,0x00,0x98,0x01,0x28,0x03,0xD1,0x04,0x48,0x80,0x79, \ +0x00,0x28,0x01,0xD0,0x00,0xF0,0x05,0xF8,0x08,0xBD,0x10,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78,0x80,0x09,0x04,0xD3,0x04,0x4F, \ +0x38,0x68,0x02,0xF0,0x0D,0xF8,0x38,0x60,0x80,0xBD,0x00,0x00,0x63,0x01,0x00, \ +0x02,0x10,0x00,0x00,0x02,0xF0,0xB5,0x88,0xB0,0x7D,0x25,0xED,0x00,0x01,0x21, \ +0x89,0x06,0x04,0x1C,0x88,0x68,0xFC,0x49,0x00,0x0B,0xFC,0x27,0x07,0x40,0x04, \ +0x20,0x38,0x40,0xF8,0x4E,0x07,0x91,0x73,0xD0,0xB8,0x09,0x06,0xD3,0xF8,0x48, \ +0xF8,0x4B,0x81,0x6A,0x19,0x40,0x81,0x62,0x14,0x21,0x05,0xE0,0xF4,0x48,0xF6, \ +0x4B,0x81,0x6A,0x19,0x40,0x81,0x62,0x0E,0x21,0x68,0x46,0x02,0x22,0x01,0xF0, \ +0x14,0xFE,0x01,0x28,0x02,0xD0,0x00,0x20,0x08,0xB0,0xF0,0xBD,0x68,0x20,0xFF, \ +0xF7,0xB0,0xFB,0x06,0x90,0x06,0x98,0x10,0x23,0x18,0x40,0x00,0x09,0x00,0x06, \ +0x06,0x99,0x00,0x0E,0x49,0x09,0x40,0x18,0x06,0x90,0x99,0x05,0xC8,0x68,0x20, \ +0x43,0x03,0xE0,0x01,0x21,0x89,0x06,0xC9,0x68,0x08,0x43,0x41,0x09,0x02,0xD2, \ +0x01,0x3D,0x00,0x2D,0xF6,0xDC,0x10,0x23,0x98,0x43,0x04,0x1C,0x00,0x2D,0x21, \ +0xD1,0xDD,0x4D,0x1B,0x02,0xE8,0x69,0x18,0x43,0xE8,0x61,0xE8,0x69,0xDD,0x4B, \ +0x18,0x40,0xE8,0x61,0x06,0x21,0x02,0x20,0xFF,0xF7,0x7A,0xFB,0x04,0x21,0x02, \ +0x20,0xFF,0xF7,0x76,0xFB,0x05,0x21,0x02,0x20,0xFF,0xF7,0x72,0xFB,0x04,0x21, \ +0x02,0x20,0xFF,0xF7,0x6E,0xFB,0x01,0x21,0x89,0x06,0xC8,0x68,0xD3,0x4B,0xA8, \ +0x6A,0x18,0x43,0xA8,0x62,0x00,0x20,0xBB,0xE7,0xCC,0x4D,0xE8,0x69,0xAB,0x01, \ +0x18,0x43,0xE8,0x61,0xE8,0x69,0xCC,0x4B,0x18,0x40,0xE8,0x61,0x05,0x21,0x02, \ +0x20,0xFF,0xF7,0x58,0xFB,0x04,0x21,0x02,0x20,0xFF,0xF7,0x54,0xFB,0x01,0x21, \ +0x89,0x06,0xC8,0x68,0xA9,0x6A,0xC5,0x4B,0x20,0x43,0x19,0x43,0xA9,0x62,0x00, \ +0xE0,0x21,0xE0,0x00,0x24,0xC3,0x49,0xC0,0x0A,0x0C,0x60,0x07,0xD3,0xB4,0x2F, \ +0x07,0xD0,0xC4,0x2F,0x15,0xD0,0xD4,0x2F,0x01,0xD1,0x00,0xF0,0xBF,0xF9,0x20, \ +0x1C,0x91,0xE7,0x02,0x20,0x48,0x72,0x06,0x98,0x07,0x99,0xBB,0x4F,0x88,0x70, \ +0x68,0x46,0x0A,0x30,0x06,0x22,0x31,0x1C,0xFF,0xF7,0xE3,0xFC,0x00,0xA8,0x40, \ +0x88,0x78,0x80,0xED,0xE7,0x00,0xF0,0x8B,0xFA,0xEA,0xE7,0xB4,0x48,0x00,0x68, \ +0x00,0x7A,0x00,0x28,0x09,0xD0,0xAB,0x48,0xAB,0x4B,0x81,0x6A,0x19,0x40,0x81, \ +0x62,0xAD,0x49,0xC4,0x20,0x08,0x60,0x00,0x20,0x6F,0xE7,0x01,0x20,0xFF,0xF7, \ +0x06,0xFB,0x68,0x20,0xFF,0xF7,0x1F,0xFB,0x06,0x90,0x06,0x98,0x10,0x23,0x18, \ +0x40,0x00,0x09,0x00,0x06,0x06,0x99,0x00,0x0E,0x49,0x09,0x40,0x18,0x06,0x90, \ +0x9D,0x48,0xC7,0x6A,0x06,0x98,0x00,0x28,0x1F,0xD0,0x01,0x28,0x1F,0xD0,0x02, \ +0x28,0x1F,0xD0,0x03,0x28,0x08,0xD1,0x0B,0x20,0x78,0x43,0xC7,0x08,0x6A,0x20, \ +0xFF,0xF7,0x03,0xFB,0x00,0x0A,0x00,0xD3,0x01,0x3F,0x9B,0x48,0x80,0x89,0x04, \ +0x30,0xB8,0x42,0x01,0xD3,0x18,0x2F,0x11,0xD8,0x95,0x49,0xC3,0x20,0x08,0x60, \ +0x8E,0x48,0x97,0x4B,0x81,0x6A,0x19,0x40,0x81,0x62,0x00,0x20,0x39,0xE7,0xFF, \ +0x08,0xEC,0xE7,0xBF,0x08,0xEA,0xE7,0x0B,0x20,0x78,0x43,0x07,0x09,0xE6,0xE7, \ +0x87,0x49,0xF8,0x02,0xFF,0x23,0x8A,0x6A,0x18,0x43,0x10,0x40,0x88,0x62,0x8A, \ +0x48,0x02,0x22,0x00,0x68,0x18,0x21,0x40,0x68,0x01,0xF0,0x35,0xFD,0x01,0x28, \ +0x01,0xD0,0x00,0x20,0x1F,0xE7,0x01,0x21,0x89,0x06,0xC8,0x68,0x20,0x43,0x03, \ +0xE0,0x01,0x21,0x89,0x06,0xC9,0x68,0x08,0x43,0x41,0x09,0x02,0xD2,0x01,0x3D, \ +0x00,0x2D,0xF6,0xDC,0x10,0x23,0x98,0x43,0x04,0x1C,0x00,0x2D,0x21,0xD1,0x74, \ +0x4F,0x1B,0x02,0xF8,0x69,0x18,0x43,0xF8,0x61,0xF8,0x69,0x74,0x4B,0x18,0x40, \ +0xF8,0x61,0x06,0x21,0x02,0x20,0xFF,0xF7,0xA9,0xFA,0x04,0x21,0x02,0x20,0xFF, \ +0xF7,0xA5,0xFA,0x05,0x21,0x02,0x20,0xFF,0xF7,0xA1,0xFA,0x04,0x21,0x02,0x20, \ +0xFF,0xF7,0x9D,0xFA,0x01,0x21,0x89,0x06,0xC8,0x68,0x6A,0x4B,0xB8,0x6A,0x18, \ +0x43,0xB8,0x62,0x00,0x20,0xEA,0xE6,0x6D,0x48,0x04,0x60,0x01,0x20,0x80,0x02, \ +0x20,0x40,0x01,0x25,0x00,0x28,0x01,0xD0,0x64,0x48,0x45,0x72,0x06,0x98,0x07, \ +0x99,0x06,0x22,0x88,0x70,0x31,0x1C,0x63,0x4E,0x30,0x68,0x40,0x68,0x0A,0x30, \ +0xFF,0xF7,0x33,0xFC,0x30,0x68,0x87,0x81,0x06,0x98,0x31,0x68,0x00,0x27,0x88, \ +0x73,0x5B,0x48,0x07,0x60,0x31,0x68,0x48,0x68,0x42,0x78,0xD2,0x09,0x11,0xD2, \ +0x89,0x89,0x02,0x22,0x18,0x30,0x18,0x39,0x01,0xF0,0xD4,0xFC,0x00,0x28,0x03, \ +0xD1,0x53,0x48,0x85,0x73,0x07,0x73,0xD0,0xE0,0x51,0x49,0x03,0x20,0x08,0x73, \ +0x02,0x20,0x88,0x73,0xCA,0xE0,0x02,0x22,0x04,0x21,0x18,0x30,0x01,0xF0,0xC3, \ +0xFC,0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x21,0x43,0x03,0xE0, \ +0x01,0x22,0x92,0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x02,0xD2,0x01,0x38,0x00, \ +0x28,0xF6,0xDC,0x10,0x23,0x99,0x43,0x0C,0x1C,0x00,0x28,0x21,0xD1,0x3C,0x4C, \ +0x1B,0x02,0xE0,0x69,0x18,0x43,0xE0,0x61,0xE0,0x69,0x3C,0x4B,0x18,0x40,0xE0, \ +0x61,0x06,0x21,0x02,0x20,0xFF,0xF7,0x39,0xFA,0x04,0x21,0x02,0x20,0xFF,0xF7, \ +0x35,0xFA,0x05,0x21,0x02,0x20,0xFF,0xF7,0x31,0xFA,0x04,0x21,0x02,0x20,0xFF, \ +0xF7,0x2D,0xFA,0x01,0x22,0x92,0x06,0xD0,0x68,0x32,0x4B,0xA0,0x6A,0x18,0x43, \ +0xA0,0x62,0x38,0x1C,0x7A,0xE6,0x30,0x68,0x40,0x68,0x41,0x7E,0x02,0x7E,0x09, \ +0x02,0x11,0x43,0x82,0x7E,0xC0,0x7E,0x12,0x04,0x11,0x43,0x80,0x09,0x02,0x06, \ +0x30,0x48,0x12,0x0E,0xC3,0x1D,0x39,0x33,0x1B,0x78,0x01,0x2B,0x15,0xD1,0x0D, \ +0x23,0x5A,0x43,0x10,0x18,0x02,0x7B,0x12,0x06,0x11,0x43,0x03,0x22,0x52,0x06, \ +0x11,0x60,0x83,0x7B,0x41,0x7B,0x1B,0x02,0x19,0x43,0xC3,0x7B,0x00,0x7C,0x1B, \ +0x04,0x19,0x43,0x00,0x06,0x08,0x43,0x50,0x60,0x95,0x60,0x4B,0xE0,0x02,0x2B, \ +0x49,0xD1,0x0D,0x23,0x5A,0x43,0x10,0x18,0x02,0x7B,0x12,0x06,0x11,0x43,0x03, \ +0x22,0x52,0x06,0x11,0x60,0x83,0x7B,0x41,0x7B,0x1B,0x02,0x19,0x43,0xC3,0x7B, \ +0x1B,0x04,0x19,0x43,0x03,0x7C,0x1B,0x06,0x19,0x43,0x51,0x60,0x83,0x7C,0x41, \ +0x7C,0x1B,0x02,0x19,0x43,0xC3,0x7C,0x1B,0x04,0x19,0x43,0x03,0x7D,0x1B,0x06, \ +0x19,0x43,0x51,0x61,0x83,0x7D,0x41,0x7D,0x1B,0x02,0x19,0x43,0xC3,0x7D,0x00, \ +0x7E,0x1B,0x04,0x19,0x43,0x1C,0xE0,0x00,0x00,0xE4,0x07,0x00,0x02,0x74,0x05, \ +0x00,0x02,0x40,0x00,0x00,0x04,0xFF,0xA0,0x00,0x00,0xFF,0x70,0x00,0x00,0xFF, \ +0xEF,0x00,0x00,0x00,0xFF,0x3F,0x00,0x04,0x05,0x00,0x02,0xE0,0x07,0x00,0x02, \ +0x10,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0xFF,0xC0,0x00,0x00,0x9C,0x01,0x00, \ +0x02,0x30,0x00,0x00,0x02,0x00,0x06,0x08,0x43,0x90,0x61,0x81,0x20,0x90,0x60, \ +0x30,0x68,0x0E,0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x20,0x39,0x01,0xF0,0x0F, \ +0xFC,0x00,0x28,0x06,0xD1,0x12,0x48,0x03,0x22,0x85,0x73,0x07,0x73,0x52,0x06, \ +0x97,0x60,0x08,0xE0,0x30,0x68,0x81,0x89,0x08,0x39,0x81,0x81,0x0C,0x49,0x03, \ +0x20,0x08,0x73,0x02,0x20,0x88,0x73,0x7C,0x20,0xFF,0xF7,0x9E,0xF9,0x31,0x68, \ +0x48,0x74,0x66,0x20,0xFF,0xF7,0x99,0xF9,0x31,0x68,0x88,0x74,0xA0,0x09,0x05, \ +0xD3,0xE0,0x08,0x01,0xD3,0x24,0x20,0xDB,0xE5,0x20,0x20,0xD9,0xE5,0x38,0x1C, \ +0xD7,0xE5,0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5,0x5F,0x4D,0x68,0x68,0x02, \ +0x28,0x69,0xD1,0x01,0x20,0x01,0xF0,0xCC,0xF8,0x01,0x26,0xEC,0x1D,0x79,0x34, \ +0x6E,0x60,0xA7,0x68,0x08,0x23,0x78,0x78,0x59,0x4A,0x98,0x43,0x78,0x70,0x11, \ +0x78,0x58,0x48,0x01,0x29,0x66,0xD1,0x57,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0, \ +0x18,0x21,0x00,0xE0,0x1E,0x21,0xE3,0x68,0x54,0x4D,0x5B,0x1A,0x1B,0x04,0x2D, \ +0x68,0x1B,0x0C,0x5B,0x19,0x51,0x4D,0x01,0x32,0x2B,0x60,0x4F,0x4B,0x4D,0x48, \ +0x1A,0x70,0x1A,0x78,0xBD,0x7D,0x12,0x07,0x12,0x0F,0xF0,0x23,0x2B,0x40,0x1A, \ +0x43,0xBA,0x75,0xE2,0x68,0x4C,0x4D,0x51,0x1A,0x4A,0x4A,0x13,0x88,0x59,0x1A, \ +0x11,0x80,0x00,0x89,0x12,0x88,0xC1,0x1F,0x15,0x39,0x91,0x42,0x04,0xDA,0x47, \ +0x49,0x04,0x38,0x08,0x80,0xAE,0x72,0x11,0xE0,0x78,0x78,0x04,0x23,0x41,0x4A, \ +0x98,0x43,0x78,0x70,0x10,0x88,0x41,0x4B,0x3B,0x4A,0x18,0x30,0x00,0x04,0x00, \ +0x0C,0x02,0x21,0x18,0x80,0x11,0x70,0x28,0x28,0x01,0xDA,0x28,0x20,0x18,0x80, \ +0x3B,0x49,0x3C,0x48,0x09,0x88,0xE1,0x60,0x00,0x68,0x80,0x7D,0x00,0xF0,0x96, \ +0xFF,0x20,0x61,0x38,0x48,0x38,0x49,0x00,0x68,0x80,0x7D,0x08,0x70,0x00,0xF0, \ +0xEE,0xFF,0x35,0x49,0x08,0x78,0x03,0x28,0x05,0xD1,0x2A,0x48,0x80,0x6A,0xFF, \ +0xF7,0x94,0xFA,0x03,0xE0,0x3E,0xE0,0x00,0x20,0xFF,0xF7,0x8F,0xFA,0x30,0x1C, \ +0x26,0x4A,0xA8,0x72,0x10,0x78,0x2E,0x4D,0x23,0x4E,0x02,0x28,0x02,0xD1,0x00, \ +0x23,0x10,0xE0,0x31,0xE0,0x28,0x48,0x2B,0x49,0x00,0x68,0x80,0x7D,0x89,0x7A, \ +0x88,0x42,0x00,0xDB,0x08,0x1C,0x28,0x49,0x40,0x00,0x08,0x5A,0xE9,0x88,0x49, \ +0x00,0x40,0x18,0x21,0x69,0x43,0x18,0xB8,0x1C,0x21,0x4F,0x00,0x22,0x39,0x78, \ +0xFF,0xF7,0x1C,0xFB,0x22,0x49,0x20,0x69,0x09,0x88,0x20,0x4A,0x40,0x18,0xE9, \ +0x88,0x40,0x18,0x39,0x78,0x49,0x00,0x51,0x5A,0x40,0x18,0x1E,0x49,0x09,0x88, \ +0x41,0x18,0x01,0x20,0x01,0xF0,0x15,0xF8,0x02,0x21,0x71,0x60,0x00,0x21,0x0D, \ +0x48,0x31,0x64,0x80,0x89,0xF0,0x63,0x11,0x48,0x00,0x68,0x41,0x73,0xF0,0xBD, \ +0x00,0x21,0x29,0x64,0x80,0x89,0xE8,0x63,0x0D,0x48,0x00,0x68,0x41,0x73,0x01, \ +0x20,0xFF,0xF7,0xB4,0xF8,0x00,0x22,0x10,0x21,0x10,0x48,0x02,0xF0,0x77,0xFE, \ +0xF0,0xBD,0x04,0x05,0x00,0x02,0x61,0x01,0x00,0x02,0xD0,0x00,0x00,0x02,0x62, \ +0x01,0x00,0x02,0x64,0x01,0x00,0x02,0x68,0x01,0x00,0x02,0x74,0x05,0x00,0x02, \ +0x6A,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x45,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x42,0x01, \ +0x00,0x02,0xE8,0x06,0x00,0x02,0xB0,0xB5,0x21,0x4F,0x78,0x68,0x03,0x28,0x3C, \ +0xD1,0x20,0x49,0x00,0x20,0x48,0x60,0x08,0x05,0x41,0x6A,0x1E,0x4C,0x05,0x31, \ +0x01,0x62,0x20,0x68,0x1D,0x4D,0x80,0x7D,0x28,0x70,0x00,0xF0,0x61,0xFF,0x28, \ +0x78,0x03,0x28,0x03,0xD1,0xB8,0x6A,0xFF,0xF7,0x09,0xFA,0x02,0xE0,0x00,0x20, \ +0xFF,0xF7,0x05,0xFA,0xF9,0x1D,0x69,0x31,0x01,0x20,0x88,0x72,0x20,0x68,0x14, \ +0x49,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x12,0x49,0xFA,0x1D, \ +0x09,0x88,0x79,0x32,0x12,0x69,0x05,0x31,0x89,0x18,0x10,0x4A,0xD2,0x88,0x89, \ +0x18,0x0F,0x4A,0x40,0x00,0x10,0x5A,0x08,0x18,0x0E,0x49,0x09,0x88,0x44,0x18, \ +0x01,0x20,0x00,0xF0,0xB8,0xFF,0x01,0x20,0x21,0x1C,0x00,0xF0,0x9A,0xFF,0x02, \ +0x20,0x78,0x60,0xB0,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x40,0x00,0x00,0x04, \ +0x04,0x00,0x00,0x02,0x45,0x01,0x00,0x02,0x14,0x01,0x00,0x02,0x40,0x01,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x2C,0x01,0x00,0x02,0x42,0x01,0x00,0x02,0x00,0xB5, \ +0x11,0x49,0x09,0x68,0x49,0x68,0x0B,0x78,0x1A,0x07,0x10,0xD1,0x1A,0x11,0x0D, \ +0x2A,0x0D,0xD2,0x01,0xA3,0x9B,0x5C,0x5B,0x00,0x9F,0x44,0x0E,0x0E,0x0E,0x0E, \ +0x11,0x0A,0x09,0x09,0x06,0x09,0x0E,0x0E,0x0E,0x00,0x08,0x1C,0x00,0xF0,0x38, \ +0xF8,0x00,0xBD,0x08,0x1C,0x00,0xF0,0xAC,0xF8,0x00,0xBD,0xFF,0xF7,0x29,0xFC, \ +0x00,0xBD,0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00,0x00,0x10,0x00,0x00,0x02,0x90, \ +0xB5,0x0F,0x4C,0x60,0x7B,0x00,0x28,0x19,0xD0,0x0E,0x4F,0x38,0x68,0x40,0x68, \ +0x42,0x7E,0x18,0x30,0x00,0x2A,0x09,0xD0,0x0B,0x49,0x49,0x79,0x91,0x42,0x0E, \ +0xD1,0x0A,0x49,0x02,0x30,0xFF,0xF7,0xB2,0xF9,0x00,0x28,0x08,0xD1,0x38,0x68, \ +0x08,0x49,0x40,0x68,0x0A,0x30,0x06,0x22,0xFF,0xF7,0xB9,0xF9,0x01,0x20,0xA0, \ +0x73,0x90,0xBD,0x64,0x05,0x00,0x02,0x10,0x00,0x00,0x02,0x14,0x01,0x00,0x02, \ +0xAC,0x00,0x00,0x02,0x46,0x01,0x00,0x02,0xF0,0xB5,0x34,0x4D,0x07,0x1C,0xEC, \ +0x1D,0x69,0x34,0xE0,0x7A,0x01,0x28,0x19,0xD1,0x38,0x1C,0x00,0xF0,0x2D,0xF9, \ +0x00,0x28,0x14,0xD0,0x2F,0x49,0xCA,0x7C,0x2F,0x49,0x09,0x7D,0x8A,0x42,0x0E, \ +0xD1,0x81,0x42,0x0C,0xD1,0x2D,0x48,0x2D,0x4B,0xC1,0x6B,0x00,0x05,0x40,0x6A, \ +0x08,0x1A,0x98,0x42,0x04,0xD9,0x01,0x20,0xE0,0x70,0xFF,0xF7,0xF7,0xFB,0xF0, \ +0xBD,0xE0,0x7A,0x03,0x28,0x03,0xD1,0x05,0xF0,0x7D,0xFF,0x00,0x28,0xF7,0xD0, \ +0xE0,0x7A,0x04,0x28,0x03,0xD1,0x01,0x20,0x05,0xF0,0xA1,0xFE,0xF0,0xBD,0xE0, \ +0x7A,0x05,0x28,0x33,0xD1,0x20,0x4E,0xFC,0x1D,0xFD,0x1D,0x30,0x79,0xF7,0x1F, \ +0x1B,0x3F,0x19,0x35,0x09,0x34,0x02,0x28,0x09,0xD1,0xA8,0x78,0x40,0x08,0xE0, \ +0xD3,0x06,0x22,0x20,0x1C,0x39,0x1C,0xFF,0xF7,0x57,0xF9,0x00,0x28,0xD9,0xD1, \ +0x30,0x79,0x01,0x28,0x1C,0xD1,0xA8,0x78,0x80,0x08,0xD3,0xD3,0x06,0x22,0x20, \ +0x1C,0x39,0x1C,0xFF,0xF7,0x4A,0xF9,0x00,0x28,0x12,0xD1,0x06,0x20,0x00,0xF0, \ +0xF7,0xFE,0x09,0x4D,0x00,0x20,0xE9,0x1D,0x59,0x31,0x48,0x73,0x28,0x74,0x04, \ +0x20,0x68,0x73,0x00,0xF0,0x1B,0xF8,0x00,0xF0,0xA7,0xFE,0x09,0x48,0xC1,0x69, \ +0x01,0x31,0xC1,0x61,0xFF,0xF7,0xAF,0xFB,0xF0,0xBD,0x04,0x05,0x00,0x02,0xD0, \ +0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x40,0x00,0x00,0x04,0x88,0x13,0x00,0x00, \ +0xC8,0x00,0x00,0x02,0x7C,0x01,0x00,0x02,0x00,0xB5,0xFF,0xF7,0x9D,0xFB,0x00, \ +0xBD,0xF0,0xB5,0x84,0xB0,0x01,0x21,0x89,0x06,0x8A,0x6A,0x01,0x92,0x4E,0x6A, \ +0x40,0x49,0x09,0x68,0x03,0x91,0x4A,0x68,0x53,0x7E,0x17,0x7E,0x1B,0x02,0x3B, \ +0x43,0x97,0x7E,0x3F,0x04,0x3B,0x43,0xD7,0x7E,0x3F,0x06,0x3B,0x43,0x1C,0x1C, \ +0x57,0x7F,0x13,0x7F,0x3F,0x02,0x3B,0x43,0x97,0x7F,0xD2,0x7F,0x3F,0x04,0x3B, \ +0x43,0x12,0x06,0x13,0x43,0x8A,0x89,0x89,0x7B,0x18,0x3A,0xD7,0x00,0x1D,0x1C, \ +0x00,0x29,0x22,0xD0,0x01,0x29,0x22,0xD0,0x02,0x29,0x22,0xD0,0x03,0x29,0x0C, \ +0xD1,0x0B,0x20,0x39,0x1C,0x03,0xF0,0x25,0xFA,0x00,0x91,0x79,0x1A,0x0B,0x20, \ +0x03,0xF0,0x20,0xFA,0x00,0x99,0x00,0x29,0x00,0xD9,0x01,0x30,0x03,0x99,0x4B, \ +0x69,0x01,0x9A,0x19,0x1A,0x00,0x2A,0x02,0xD0,0x83,0x42,0x00,0xD2,0x01,0x3A, \ +0x23,0x48,0x00,0x79,0x02,0x28,0x15,0xD1,0xB3,0x42,0x3C,0xD2,0x1D,0xE0,0x38, \ +0x1C,0xED,0xE7,0x78,0x08,0xEB,0xE7,0x79,0x00,0x02,0x91,0x0B,0x20,0x03,0xF0, \ +0x03,0xFA,0x0F,0x1C,0x02,0x99,0xC9,0x1B,0x0B,0x20,0x03,0xF0,0xFD,0xF9,0x00, \ +0x2F,0xDE,0xD9,0x01,0x30,0xDC,0xE7,0x01,0x28,0x26,0xD1,0x00,0x20,0x15,0x4B, \ +0x95,0x42,0x58,0x73,0x03,0xD8,0x95,0x42,0x1F,0xD1,0x8C,0x42,0x1D,0xD9,0x00, \ +0x20,0x01,0x9B,0xC0,0x43,0x93,0x42,0x01,0xD1,0x72,0x1A,0x01,0xE0,0x42,0x1A, \ +0x92,0x19,0xA7,0x18,0x03,0x1B,0x93,0x42,0x00,0xD2,0x01,0x35,0x40,0x1A,0x98, \ +0x42,0x01,0xD2,0x08,0x1B,0x00,0xE0,0x60,0x1A,0x0A,0x28,0x07,0xD9,0x88,0x18, \ +0x39,0x1C,0x00,0xF0,0x0C,0xF8,0x01,0x21,0x89,0x06,0x8D,0x62,0x4F,0x62,0x04, \ +0xB0,0xF0,0xBD,0x10,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x64,0x05,0x00,0x02, \ +0xF0,0xB4,0x11,0x4C,0x00,0x22,0xA7,0x69,0xA2,0x61,0x01,0x22,0x01,0x25,0x2B, \ +0x1C,0x56,0x1E,0xB3,0x40,0x3B,0x40,0x1E,0x1C,0xC1,0x23,0x33,0x40,0x0D,0xD0, \ +0x0B,0x4B,0x96,0x00,0xF3,0x18,0xDE,0x6A,0x86,0x1B,0x00,0x2E,0x06,0xDC,0xDE, \ +0x6A,0x00,0x2E,0x03,0xD0,0xDE,0x6A,0x36,0x1A,0x8E,0x19,0xDE,0x62,0x01,0x32, \ +0x08,0x2A,0xE6,0xD9,0xA7,0x61,0xF0,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00, \ +0x04,0x40,0x00,0x00,0x04,0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7,0xC1,0xFA, \ +0x00,0xBD,0x90,0xB4,0x10,0x4B,0x01,0x1C,0x1B,0x68,0x24,0x20,0x9B,0x89,0x00, \ +0x22,0x1F,0x1F,0x00,0x23,0x24,0x2F,0x0A,0xD9,0x0C,0x5C,0x03,0x2C,0x0A,0xD0, \ +0x0C,0x18,0x64,0x78,0x02,0x34,0x20,0x18,0x00,0x2A,0x04,0xD1,0x87,0x42,0xF4, \ +0xD8,0x18,0x1C,0x90,0xBC,0xF7,0x46,0x08,0x18,0x41,0x78,0x01,0x29,0x01,0xD0, \ +0x18,0x1C,0xF7,0xE7,0x80,0x78,0xF5,0xE7,0x00,0x00,0x10,0x00,0x00,0x02,0xF0, \ +0xB5,0x82,0xB0,0x17,0x4D,0x18,0x4C,0x18,0x4F,0x00,0x26,0x00,0x22,0xD2,0x43, \ +0x00,0x92,0x01,0x22,0x01,0xAB,0x16,0x48,0x16,0x49,0x02,0xF0,0x3F,0xFD,0xAE, \ +0x61,0x01,0x98,0x41,0x0D,0x03,0xD3,0x40,0x20,0x00,0xF0,0x28,0xF8,0x14,0xE0, \ +0x41,0x09,0x03,0xD3,0x50,0x20,0x00,0xF0,0x22,0xF8,0x0E,0xE0,0x40,0x0F,0x05, \ +0xD3,0x80,0x20,0x00,0xF0,0x1C,0xF8,0x08,0xE0,0x00,0xF0,0x8B,0xF8,0x20,0x78, \ +0x40,0x09,0x03,0xD3,0x38,0x68,0x00,0x7B,0x00,0x0A,0xF6,0xD2,0x00,0x22,0x01, \ +0x21,0x06,0x48,0x02,0xF0,0x23,0xFC,0xD3,0xE7,0x84,0x05,0x00,0x02,0x63,0x01, \ +0x00,0x02,0x04,0x00,0x00,0x02,0x08,0x07,0x00,0x02,0x10,0x10,0x10,0x10,0x28, \ +0x07,0x00,0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0xC2,0xF8,0x00,0x25,0x01,0x26, \ +0x80,0x2F,0x2C,0x4C,0x11,0xD1,0x02,0x20,0xE1,0x1F,0x69,0x39,0x48,0x73,0x03, \ +0xF0,0x41,0xF9,0x01,0x1C,0xC8,0x20,0x03,0xF0,0x77,0xF9,0xC8,0x00,0x40,0x18, \ +0x80,0x00,0x80,0x08,0x11,0xD0,0x01,0x38,0xFD,0xD1,0x0E,0xE0,0x40,0x2F,0x04, \ +0xD1,0x22,0x48,0x86,0x63,0x22,0x48,0x45,0x80,0x07,0xE0,0x50,0x2F,0x05,0xD1, \ +0x20,0x49,0x21,0x48,0x06,0x22,0xFE,0xF7,0xE9,0xFF,0x26,0x70,0x1F,0x48,0xFF, \ +0xF7,0x55,0xF8,0x01,0x26,0xA6,0x72,0x1F,0x4C,0x50,0x2F,0x1D,0x48,0x08,0xD0, \ +0x17,0x4E,0xB1,0x6D,0xE1,0x60,0xB1,0x6D,0xC0,0x79,0x00,0xF0,0x9A,0xFC,0x20, \ +0x61,0x0D,0xE0,0x12,0x4E,0xB1,0x6E,0xE1,0x60,0xB1,0x6E,0xC0,0x79,0x00,0xF0, \ +0x91,0xFC,0x16,0x49,0x20,0x61,0x09,0x88,0x20,0x69,0x40,0x18,0x0D,0x49,0x48, \ +0x80,0x0C,0x48,0x0B,0x4E,0xA0,0x60,0x00,0xF0,0x71,0xF9,0x00,0xF0,0xE4,0xF9, \ +0x00,0x90,0x80,0x2F,0x07,0xD1,0x00,0x98,0x00,0x28,0x07,0xD1,0x01,0x20,0xF1, \ +0x1D,0x59,0x31,0x48,0x73,0x02,0xE0,0x40,0x2F,0x00,0xD1,0xB5,0x63,0xF8,0xBD, \ +0x74,0x05,0x00,0x02,0x04,0x05,0x00,0x02,0x48,0x07,0x00,0x02,0x4C,0x07,0x00, \ +0x02,0x46,0x01,0x00,0x02,0x5E,0x07,0x00,0x02,0x14,0x01,0x00,0x02,0x84,0x05, \ +0x00,0x02,0x40,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x96,0x4C,0x00,0x25,0x20, \ +0x68,0x47,0x68,0x39,0x79,0x49,0x08,0x00,0xD3,0x01,0x25,0x93,0x49,0xC9,0x78, \ +0x00,0x29,0x0C,0xD0,0x39,0x78,0x08,0x29,0x09,0xD1,0x91,0x4A,0x11,0x78,0x00, \ +0x29,0x05,0xD0,0x81,0x7D,0x53,0x78,0x99,0x42,0x01,0xDD,0x51,0x78,0x81,0x75, \ +0x20,0x68,0x80,0x7B,0x00,0x28,0x0D,0xD1,0xF8,0x1D,0x0F,0x30,0xFE,0xF7,0xED, \ +0xFF,0x38,0x78,0x00,0x28,0x06,0xD1,0x20,0x68,0x80,0x8A,0x64,0x28,0x02,0xDD, \ +0x78,0x1C,0xFF,0xF7,0x15,0xF8,0x81,0x4B,0x2A,0x1C,0x18,0x68,0x00,0x23,0x81, \ +0x7D,0xB8,0x1C,0x06,0x1C,0xFE,0xF7,0xE8,0xFF,0x7F,0x4B,0x80,0x4C,0x18,0x78, \ +0x00,0x28,0x50,0xD1,0x7A,0x4B,0x01,0x21,0x18,0x68,0x00,0x2D,0x60,0x61,0xA1, \ +0x61,0x41,0x68,0xA1,0x60,0x3B,0xD1,0x7A,0x49,0x82,0x8A,0x09,0x89,0x8A,0x42, \ +0x36,0xDB,0x79,0x4A,0x12,0x78,0x00,0x2A,0x32,0xD1,0x74,0x4B,0x01,0x22,0x1A, \ +0x70,0x76,0x4B,0x00,0x22,0x1A,0x70,0xFA,0x1D,0x75,0x4B,0x17,0x32,0x1A,0x60, \ +0x82,0x8A,0x6B,0x4B,0x1E,0x3A,0x73,0x4B,0x04,0x39,0x1A,0x80,0x73,0x4A,0x09, \ +0x04,0x09,0x0C,0x11,0x80,0xE1,0x60,0x80,0x7D,0x00,0xF0,0xFD,0xFB,0x65,0x4B, \ +0x20,0x61,0x18,0x68,0x81,0x7D,0x64,0x48,0x80,0x7A,0x81,0x42,0x00,0xDA,0x08, \ +0x1C,0x6B,0x4A,0x40,0x00,0x10,0x5A,0x6B,0x4A,0xD2,0x88,0x52,0x00,0x80,0x18, \ +0x22,0x69,0x83,0x18,0x30,0x1C,0x2A,0x1C,0xFE,0xF7,0xA3,0xFF,0x04,0x21,0x78, \ +0x1C,0xFE,0xF7,0x81,0xFF,0x0B,0xE0,0x00,0x21,0x5E,0x4A,0x1E,0x1C,0x11,0x70, \ +0x81,0x8A,0x80,0x7D,0x00,0xF0,0xDA,0xFB,0x20,0x61,0x30,0x68,0x80,0x8A,0xE0, \ +0x60,0x78,0x1C,0x40,0x21,0x06,0x1C,0xFE,0xF7,0x78,0xFF,0x00,0x28,0x0A,0xD0, \ +0x4E,0x4B,0x18,0x68,0x81,0x8A,0x08,0x31,0xE1,0x60,0x81,0x8A,0x80,0x7D,0x08, \ +0x31,0x00,0xF0,0xC5,0xFB,0x20,0x61,0x28,0x1C,0x00,0xF0,0x1D,0xFB,0x02,0x24, \ +0x00,0x28,0x09,0xD0,0x52,0x48,0xE2,0x1E,0x44,0x63,0x00,0x92,0x01,0x22,0x11, \ +0x21,0x50,0x48,0x01,0xAB,0x02,0xF0,0xF0,0xFB,0x4D,0x48,0x00,0x25,0x45,0x63, \ +0x3F,0x48,0x00,0x68,0x41,0x7B,0x00,0x29,0x32,0xD1,0x39,0x78,0x08,0x29,0x22, \ +0xD1,0x3C,0x49,0xC9,0x78,0x00,0x29,0x1E,0xD0,0x3B,0x49,0x0A,0x78,0x01,0x2A, \ +0x09,0xD0,0x02,0x2A,0x18,0xD1,0x4B,0x78,0x03,0x22,0x03,0x2B,0x06,0xDA,0x4B, \ +0x78,0x01,0x33,0x4B,0x70,0x03,0xE0,0x0C,0x70,0x4D,0x80,0x0D,0xE0,0x4A,0x70, \ +0x4B,0x88,0x01,0x33,0x1B,0x04,0x1B,0x0C,0x4B,0x80,0x03,0x2B,0x05,0xDB,0x4B, \ +0x78,0x03,0x2B,0x02,0xD1,0x0D,0x70,0x4A,0x70,0x4D,0x80,0x2C,0x4B,0x30,0x4A, \ +0x35,0x49,0x1D,0x70,0x15,0x70,0x0D,0x64,0x2B,0x4E,0x4D,0x64,0xB2,0x89,0xCA, \ +0x63,0x10,0x21,0x01,0x73,0x44,0xE0,0x00,0xF0,0xF1,0xFB,0x22,0x4C,0x20,0x68, \ +0x41,0x7B,0x04,0x29,0x06,0xD0,0x81,0x7B,0x01,0x31,0x81,0x73,0x08,0x21,0x30, \ +0x1C,0xFE,0xF7,0x09,0xFF,0x20,0x68,0x20,0x4E,0x81,0x7B,0x32,0x7C,0x91,0x42, \ +0x07,0xDA,0x41,0x7B,0x08,0x29,0x04,0xD0,0x45,0x73,0x21,0x68,0x82,0x20,0x08, \ +0x73,0x28,0xE0,0x40,0x7B,0x08,0x28,0x02,0xD1,0x01,0x20,0x00,0xF0,0x37,0xFC, \ +0x13,0x48,0xC0,0x78,0x00,0x28,0x12,0xD0,0x38,0x78,0x08,0x28,0x0F,0xD1,0x10, \ +0x48,0x01,0x78,0x02,0x29,0x00,0xD1,0x45,0x80,0x41,0x78,0x00,0x29,0x05,0xDD, \ +0x41,0x78,0x04,0x29,0x02,0xDA,0x41,0x78,0x01,0x39,0x41,0x70,0x01,0x21,0x01, \ +0x70,0x09,0x4B,0x0D,0x4A,0x12,0x48,0x1D,0x70,0x15,0x70,0x05,0x64,0x45,0x64, \ +0xB1,0x89,0xC1,0x63,0x20,0x68,0x10,0x21,0x01,0x73,0x02,0xB0,0xF0,0xBD,0x04, \ +0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x24,0x01,0x00,0x02,0x61,0x01,0x00,0x02, \ +0x84,0x05,0x00,0x02,0xD0,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x62,0x01,0x00, \ +0x02,0x64,0x01,0x00,0x02,0x68,0x01,0x00,0x02,0x6A,0x01,0x00,0x02,0x2C,0x01, \ +0x00,0x02,0x18,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0xE8,0x06,0x00,0x02,0x00, \ +0xB5,0x09,0x48,0x03,0x21,0xC1,0x72,0x41,0x7A,0x00,0x29,0x0A,0xD1,0x01,0x7A, \ +0x00,0x29,0x07,0xD0,0x81,0x7C,0x01,0x29,0x04,0xD1,0x80,0x7B,0x01,0x28,0x01, \ +0xD1,0x00,0xF0,0xAA,0xF9,0x00,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5, \ +0x25,0x4F,0x0A,0x20,0x79,0x7B,0x04,0x25,0x24,0x4C,0x24,0x4E,0x02,0x29,0x1C, \ +0xD1,0x38,0x68,0x00,0x28,0x09,0xD1,0x02,0xF0,0x6C,0xFF,0x01,0x1C,0x21,0x48, \ +0x80,0x89,0x02,0xF0,0xA1,0xFF,0x30,0x88,0x09,0x18,0x08,0xE0,0x02,0xF0,0x62, \ +0xFF,0x01,0x1C,0x1C,0x48,0x80,0x89,0x02,0xF0,0x97,0xFF,0x20,0x88,0x09,0x18, \ +0x00,0x20,0x78,0x61,0x08,0x20,0x00,0xF0,0xA0,0xFB,0xBD,0x74,0xF0,0xBD,0x39, \ +0x68,0x00,0x29,0x10,0xD1,0x79,0x6D,0x32,0x88,0x8B,0x00,0x59,0x18,0x89,0x00, \ +0x89,0x18,0x09,0x1A,0x08,0x20,0x00,0xF0,0x91,0xFB,0xFE,0xF7,0x15,0xFC,0x31, \ +0x88,0x0A,0x39,0x40,0x18,0x78,0x61,0x0F,0xE0,0x79,0x6D,0x22,0x88,0x8B,0x00, \ +0x59,0x18,0x89,0x00,0x89,0x18,0x09,0x1A,0x08,0x20,0x00,0xF0,0x80,0xFB,0xFE, \ +0xF7,0x04,0xFC,0x21,0x88,0x0A,0x39,0x40,0x18,0x78,0x61,0xBD,0x74,0xF0,0xBD, \ +0x04,0x05,0x00,0x02,0x3E,0x01,0x00,0x02,0x3C,0x01,0x00,0x02,0xD0,0x00,0x00, \ +0x02,0x80,0xB5,0x01,0x0A,0x07,0x1C,0x0E,0x20,0xFE,0xF7,0x2E,0xFC,0x10,0x20, \ +0x39,0x1C,0xFE,0xF7,0x2A,0xFC,0x80,0xBD,0xB0,0xB5,0x82,0xB0,0x14,0x4D,0x01, \ +0x20,0x68,0x63,0x13,0x4F,0x14,0x48,0x00,0x24,0xBC,0x82,0x38,0x82,0xBC,0x80, \ +0x1E,0x20,0x38,0x80,0x02,0x20,0xB8,0x82,0xC2,0x1E,0x00,0x92,0x01,0x22,0x5A, \ +0x21,0x0E,0x48,0x01,0xAB,0x02,0xF0,0xC4,0xFA,0x6C,0x63,0x3C,0x83,0xBC,0x82, \ +0x01,0x98,0xC1,0x09,0x02,0xD3,0x44,0x20,0x02,0xB0,0xB0,0xBD,0x81,0x08,0x05, \ +0xD3,0x00,0x09,0x01,0xD3,0x82,0x20,0xF7,0xE7,0x20,0x1C,0xF5,0xE7,0x42,0x20, \ +0xF3,0xE7,0x00,0x00,0x04,0x05,0x00,0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00, \ +0x00,0xC8,0x06,0x00,0x02,0xF0,0xB5,0x70,0x4D,0x20,0x23,0xE8,0x69,0x98,0x43, \ +0xE8,0x61,0x1B,0x20,0x01,0x38,0xFD,0xD1,0xE8,0x69,0x20,0x23,0x18,0x43,0x6B, \ +0x4E,0xE8,0x61,0xF4,0x1D,0x69,0x34,0x60,0x7A,0xF7,0x1D,0x79,0x37,0x00,0x28, \ +0x0D,0xD0,0x78,0x68,0xFF,0xF7,0xAA,0xFF,0x39,0x68,0xF0,0x6F,0x00,0x22,0x00, \ +0xF0,0xE1,0xF8,0x00,0x21,0x61,0x72,0x01,0x20,0xFE,0xF7,0xC4,0xFB,0xF0,0xBD, \ +0xB0,0x7A,0x00,0x28,0x19,0xD0,0x5F,0x48,0x60,0x49,0x00,0x68,0x80,0x7D,0x89, \ +0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x5D,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7, \ +0x8F,0xFF,0x00,0x22,0x10,0x21,0x5B,0x48,0x00,0xF0,0xC6,0xF8,0x01,0x21,0xA1, \ +0x72,0x02,0x20,0xF0,0x72,0x00,0x20,0xFE,0xF7,0xA7,0xFB,0xF0,0xBD,0xA0,0x7A, \ +0x00,0x28,0xDE,0xD0,0x38,0x69,0xFF,0xF7,0x7C,0xFF,0xBE,0x68,0x70,0x78,0xC0, \ +0x09,0x4C,0xD3,0x4C,0x48,0x01,0x7B,0x00,0x29,0x01,0xD0,0x00,0x21,0x01,0x73, \ +0x03,0x20,0x40,0x06,0x80,0x68,0x40,0x08,0xFA,0xD2,0x06,0x21,0x02,0x20,0xFE, \ +0xF7,0x9D,0xFB,0x04,0x21,0x02,0x20,0xFE,0xF7,0x99,0xFB,0x05,0x21,0x02,0x20, \ +0xFE,0xF7,0x95,0xFB,0x04,0x21,0x02,0x20,0xFE,0xF7,0x91,0xFB,0x01,0x20,0x80, \ +0x06,0xC0,0x68,0x00,0xF0,0x16,0xFF,0x68,0x68,0xC0,0x0B,0xFC,0xD2,0x40,0x49, \ +0x01,0x20,0x80,0x06,0x41,0x63,0x3F,0x49,0x81,0x63,0x01,0x0B,0x69,0x60,0x69, \ +0x68,0xC9,0x0B,0xFC,0xD2,0x3C,0x49,0x09,0x78,0x02,0x29,0x04,0xD1,0x81,0x21, \ +0x03,0x22,0x52,0x06,0x91,0x60,0x03,0xE0,0x01,0x21,0x03,0x22,0x52,0x06,0x91, \ +0x60,0xB9,0x68,0x18,0x31,0x81,0x63,0xF9,0x68,0x01,0x23,0x1B,0x03,0x20,0x39, \ +0x19,0x43,0x41,0x63,0x32,0x48,0x17,0x23,0x00,0x78,0x9B,0x02,0x18,0x43,0x68, \ +0x60,0x00,0x21,0xA1,0x72,0x37,0xE0,0x2F,0x49,0x08,0x78,0x00,0x28,0x2C,0xD0, \ +0x2E,0x48,0x00,0x78,0x00,0x28,0x01,0xD0,0x18,0x20,0x00,0xE0,0x1E,0x20,0x6A, \ +0x68,0xD2,0x0B,0xFC,0xD2,0x01,0x23,0x5B,0x03,0x03,0x43,0x01,0x22,0x92,0x06, \ +0x53,0x63,0xBB,0x68,0x93,0x63,0x13,0x0B,0x6B,0x60,0x6F,0x68,0xFB,0x0B,0xFC, \ +0xD2,0x23,0x4B,0x1B,0x88,0x18,0x1A,0x01,0x23,0x1B,0x03,0x18,0x43,0x50,0x63, \ +0x21,0x48,0x11,0x23,0x00,0x68,0x9B,0x02,0x90,0x63,0x1A,0x48,0x00,0x78,0x18, \ +0x43,0x68,0x60,0x08,0x78,0x02,0x28,0x09,0xD1,0x00,0x21,0xA1,0x72,0x06,0xE0, \ +0x00,0x21,0xA1,0x72,0xF9,0x68,0xB8,0x68,0x00,0x22,0x00,0xF0,0x2F,0xF8,0x0A, \ +0x48,0x00,0x21,0x41,0x73,0x21,0x70,0x30,0x79,0x40,0x08,0x03,0xD2,0x0F,0x49, \ +0x08,0x78,0x01,0x28,0x03,0xD1,0x01,0x20,0xFE,0xF7,0x09,0xFB,0xF0,0xBD,0x00, \ +0x20,0xFE,0xF7,0x05,0xFB,0xF0,0xBD,0x40,0x00,0x00,0x04,0x04,0x05,0x00,0x02, \ +0x04,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x34,0x01,0x00,0x02,0xF8,0x07,0x00, \ +0x02,0x1C,0x20,0x00,0x00,0x90,0x08,0x00,0x02,0x70,0x00,0x00,0x02,0x70,0x01, \ +0x00,0x02,0x61,0x01,0x00,0x02,0x62,0x01,0x00,0x02,0x6A,0x01,0x00,0x02,0x64, \ +0x01,0x00,0x02,0x90,0xB4,0x08,0x4F,0x7C,0x68,0xE3,0x0B,0xFC,0xD2,0x0A,0x43, \ +0x01,0x21,0x89,0x06,0x4A,0x63,0x88,0x63,0x04,0x48,0x11,0x23,0x00,0x78,0x9B, \ +0x02,0x18,0x43,0x78,0x60,0x90,0xBC,0xF7,0x46,0x40,0x00,0x00,0x04,0x70,0x01, \ +0x00,0x02,0xF0,0xB5,0x4C,0x4D,0x00,0x20,0x6A,0x7A,0x00,0x2A,0x19,0xD1,0x2A, \ +0x7A,0x00,0x2A,0x16,0xD0,0xAA,0x7C,0x01,0x2A,0x13,0xD1,0xAA,0x7B,0x01,0x2A, \ +0x10,0xD1,0x6A,0x7B,0xEF,0x1D,0x69,0x37,0x01,0x24,0x04,0x2A,0x0B,0xD1,0x28, \ +0x74,0x68,0x73,0xB8,0x72,0x06,0x1C,0x00,0x22,0x40,0x21,0x40,0x48,0xEC,0x72, \ +0x02,0xF0,0x7B,0xF8,0x30,0x1C,0xF0,0xBD,0x0F,0x26,0x36,0x06,0xB2,0x88,0x3C, \ +0x4B,0x1A,0x40,0xB2,0x80,0xB2,0x89,0x3B,0x4B,0x1A,0x40,0xB2,0x81,0x6A,0x7B, \ +0x92,0x08,0x05,0xD2,0x3A,0x78,0x00,0x2A,0x02,0xD1,0xAA,0x6B,0x00,0x2A,0x06, \ +0xD0,0x36,0x4A,0xD1,0x79,0x36,0x4A,0x11,0x70,0x78,0x72,0xBC,0x72,0x1E,0xE0, \ +0xAB,0x7A,0x34,0x4A,0x00,0x2B,0x0D,0xD0,0x11,0x68,0x30,0x4A,0x89,0x7D,0x92, \ +0x7A,0x91,0x42,0x02,0xDA,0x2F,0x4A,0x11,0x70,0x01,0xE0,0x2D,0x49,0x0A,0x70, \ +0x78,0x72,0xBC,0x72,0x0C,0xE0,0xEB,0x1D,0x79,0x33,0x9B,0x69,0x01,0x2B,0x01, \ +0xD1,0x13,0x68,0x19,0x61,0x78,0x72,0xBC,0x72,0x10,0x68,0x26,0x49,0x80,0x7D, \ +0x08,0x70,0x00,0xF0,0x5C,0xF9,0xA8,0x7A,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE, \ +0xF7,0x04,0xFC,0x0A,0xE0,0x20,0x49,0x08,0x78,0x03,0x28,0x03,0xD1,0xA8,0x6A, \ +0xFE,0xF7,0xFC,0xFB,0x02,0xE0,0x00,0x20,0xFE,0xF7,0xF8,0xFB,0x01,0x21,0x89, \ +0x06,0x48,0x6A,0x02,0x22,0xEA,0x72,0xCA,0x0A,0x19,0x4B,0x0A,0x30,0x5A,0x60, \ +0x08,0x62,0x6A,0x7B,0x92,0x08,0x02,0xD2,0x3A,0x78,0x00,0x2A,0x11,0xD0,0x16, \ +0x4F,0x17,0x4D,0x3F,0x88,0x8B,0x6A,0x49,0x6A,0xC0,0x19,0x0E,0x4F,0x12,0x4A, \ +0xFF,0x79,0xBF,0x00,0xEF,0x59,0xC0,0x19,0x10,0x60,0x88,0x42,0x00,0xD2,0x01, \ +0x33,0x53,0x60,0x10,0x1D,0xB0,0x89,0x0F,0x4B,0x18,0x43,0xB0,0x81,0xB0,0x88, \ +0x0E,0x4B,0x18,0x43,0xB0,0x80,0x20,0x1C,0xF0,0xBD,0x04,0x05,0x00,0x02,0xC8, \ +0x06,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x14,0x01,0x00,0x02, \ +0x45,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x40,0x00,0x00,0x04,0x60,0x07,0x00, \ +0x02,0x40,0x01,0x00,0x02,0x50,0x08,0x00,0x02,0x13,0x13,0x00,0x00,0xE8,0xE8, \ +0x00,0x00,0xF8,0xB5,0x46,0x49,0x06,0x1C,0x08,0x68,0x45,0x49,0x82,0x8A,0x45, \ +0x68,0x49,0x89,0x00,0x27,0x44,0x4C,0x8A,0x42,0x53,0xDD,0x00,0x2E,0x51,0xD1, \ +0x42,0x4E,0xB4,0x21,0x31,0x70,0x42,0x49,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00, \ +0xDB,0x08,0x1C,0x40,0x49,0x40,0x4A,0xC9,0x88,0x4B,0x00,0x59,0x18,0x40,0x00, \ +0x10,0x5A,0x40,0x00,0x08,0x18,0x3D,0x49,0x06,0x22,0x09,0x88,0x40,0x18,0x3C, \ +0x49,0x09,0x69,0x40,0x18,0x70,0x80,0x28,0x1D,0x31,0x1D,0xFE,0xF7,0xB6,0xFB, \ +0xE8,0x1D,0x03,0x30,0x06,0x22,0xF1,0x1D,0x03,0x31,0xFE,0xF7,0xAF,0xFB,0x01, \ +0x20,0x35,0x49,0x00,0x25,0x88,0x72,0xA5,0x72,0xFF,0xF7,0x56,0xFD,0xFF,0xF7, \ +0xC9,0xFD,0x00,0x90,0x00,0x98,0x00,0x28,0x1A,0xD1,0x26,0x4F,0x2A,0x49,0x38, \ +0x68,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x28,0x4A,0x27,0x49, \ +0x40,0x00,0x10,0x5A,0xC9,0x88,0x40,0x18,0x29,0x49,0x09,0x88,0x41,0x18,0x01, \ +0x20,0x00,0xF0,0x16,0xF9,0x25,0x49,0x03,0x20,0x48,0x60,0x38,0x68,0x01,0x27, \ +0x45,0x73,0x2F,0xE0,0x19,0x49,0x04,0x20,0x09,0x68,0x48,0x73,0x2A,0xE0,0x01, \ +0x20,0xA0,0x72,0x1E,0x4D,0xFF,0xF7,0x2B,0xFD,0xFF,0xF7,0x9E,0xFD,0x00,0x90, \ +0x00,0x98,0x00,0x28,0x1B,0xD1,0x11,0x49,0x00,0x2E,0x0A,0x68,0x50,0x73,0x1A, \ +0xD1,0x08,0x68,0x12,0x49,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C, \ +0x10,0x49,0x10,0x4A,0x40,0x00,0x10,0x5A,0xC9,0x88,0x08,0x18,0x12,0x49,0x09, \ +0x88,0x41,0x18,0x02,0x20,0x68,0x60,0x01,0x20,0x00,0xF0,0xE5,0xF8,0x01,0x27, \ +0x03,0xE0,0x03,0x49,0x04,0x20,0x09,0x68,0x48,0x73,0x38,0x06,0x00,0x0E,0xF8, \ +0xBD,0x04,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x74,0x05,0x00,0x02,0xF8,0x07, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x2C,0x01,0x00,0x02,0x40, \ +0x01,0x00,0x02,0x84,0x05,0x00,0x02,0x04,0x05,0x00,0x02,0x42,0x01,0x00,0x02, \ +0x90,0xB5,0x04,0x31,0xCF,0x00,0x01,0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03, \ +0x28,0x27,0xD1,0x0B,0x20,0x39,0x1C,0x02,0xF0,0x57,0xFC,0x0C,0x1C,0x79,0x1A, \ +0x0B,0x20,0x02,0xF0,0x52,0xFC,0x07,0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18, \ +0xD9,0x01,0x37,0x04,0x2C,0x13,0xD2,0x01,0x21,0x81,0x62,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0x40,0xFC,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0x02,0xF0,0x3B,0xFC,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0x81,0x62,0x00,0xE0,0x81,0x62,0x38,0x1C,0x90,0xBD,0x00,0x00,0x04, \ +0x05,0x00,0x02,0x10,0x48,0x01,0x88,0x10,0x48,0xCA,0x1D,0x69,0x32,0x02,0x80, \ +0xCA,0x1D,0x31,0x32,0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1, \ +0x80,0x0B,0x48,0xA0,0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80, \ +0x0F,0x21,0xC1,0x80,0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23, \ +0x21,0x81,0x60,0x12,0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0x40,0x01,0x00,0x02, \ +0x2C,0x01,0x00,0x02,0x34,0x01,0x00,0x02,0x50,0x08,0x00,0x02,0x00,0xB5,0x08, \ +0x49,0x08,0x48,0x0A,0x78,0x03,0x78,0x9A,0x42,0x08,0xD0,0x09,0x78,0x01,0x70, \ +0x00,0x78,0x05,0x49,0x08,0x5C,0x05,0x49,0x08,0x70,0xFE,0xF7,0x70,0xFA,0x00, \ +0xBD,0x00,0x00,0x45,0x01,0x00,0x02,0x44,0x01,0x00,0x02,0x6C,0x01,0x00,0x02, \ +0x70,0x01,0x00,0x02,0x07,0x48,0x01,0x6C,0x01,0x31,0x01,0x64,0xC1,0x6B,0x49, \ +0x00,0x01,0x31,0xC1,0x63,0x04,0x49,0xC2,0x6B,0xC9,0x89,0x8A,0x42,0x00,0xD9, \ +0xC1,0x63,0xF7,0x46,0x00,0x00,0x04,0x05,0x00,0x02,0xD0,0x00,0x00,0x02,0x80, \ +0xB5,0x02,0xF0,0xEB,0xFB,0x04,0x4F,0x01,0x1C,0xF8,0x6B,0x02,0xF0,0xCE,0xFB, \ +0x79,0x65,0x02,0x20,0xF8,0x62,0x80,0xBD,0x04,0x05,0x00,0x02,0xB0,0xB5,0x01, \ +0x20,0x80,0x06,0x81,0x6A,0x44,0x6A,0x11,0x48,0x00,0x88,0x87,0x02,0x00,0x29, \ +0x13,0xD9,0x38,0x1C,0x02,0xF0,0xBB,0xFB,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x38, \ +0x1C,0x02,0xF0,0xB5,0xFB,0x48,0x1C,0x45,0x43,0x38,0x1C,0x21,0x1C,0x02,0xF0, \ +0xAF,0xFB,0x69,0x18,0x38,0x1C,0x02,0xF0,0xAB,0xFB,0x03,0xE0,0x38,0x1C,0x21, \ +0x1C,0x02,0xF0,0xA6,0xFB,0x79,0x1A,0x06,0x20,0x00,0xF0,0x04,0xF8,0xB0,0xBD, \ +0x00,0x00,0x98,0x00,0x00,0x02,0x90,0xB5,0x0C,0x1C,0x07,0x1C,0x00,0xF0,0x15, \ +0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B,0x20,0x18,0xB9,0x00,0xC9,0x18, \ +0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03,0x48,0x82,0x69,0x11,0x43,0x81, \ +0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x80,0xB4, \ +0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A,0x69,0xC0,0x43,0x10,0x40,0x88, \ +0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0xF0,0xB5,0x84,0xB0, \ +0x0F,0x20,0x00,0x06,0x00,0x88,0xBB,0x4D,0x00,0x27,0xEC,0x1D,0x79,0x34,0x03, \ +0x90,0xE0,0x69,0x00,0x28,0x28,0xD0,0x01,0x28,0x1B,0xD1,0x03,0x98,0xB6,0x4B, \ +0x18,0x40,0x73,0xD0,0x18,0x05,0xC0,0x68,0x00,0x90,0x00,0x98,0x80,0x08,0x01, \ +0xD3,0x04,0xF0,0xB3,0xFD,0x00,0x98,0x80,0x09,0x69,0xD3,0x00,0x98,0x04,0x21, \ +0x01,0x40,0xAF,0x48,0x03,0xD0,0x41,0x68,0x01,0x31,0x41,0x60,0x79,0xE0,0x01, \ +0x68,0x01,0x31,0x01,0x60,0x75,0xE0,0x02,0x28,0x73,0xD0,0x03,0x28,0x06,0xD1, \ +0x03,0x98,0xA8,0x4B,0x18,0x40,0x53,0xD0,0x04,0xF0,0xB5,0xFE,0xED,0xE0,0x03, \ +0x98,0xA6,0x4B,0x18,0x40,0x0E,0xD0,0xA5,0x48,0x00,0x68,0x02,0x90,0x02,0x98, \ +0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x05,0xD3,0x02,0x98,0xC0,0x08,0x01, \ +0xD3,0x02,0x27,0x00,0xE0,0x01,0x27,0x03,0x98,0x9B,0x4B,0x9E,0x4E,0x18,0x40, \ +0x49,0xD0,0xD8,0x04,0xC1,0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x12,0xD3,0xFF, \ +0x21,0xF5,0x31,0x02,0x69,0x92,0x08,0x03,0xD3,0x0A,0x1C,0x01,0x39,0x00,0x2A, \ +0xF8,0xD8,0x68,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0x42,0xFA,0xFF,0xF7,0x42, \ +0xFC,0x01,0x27,0x01,0x20,0x68,0x66,0x01,0x98,0x12,0x23,0x18,0x40,0x2C,0xD0, \ +0x00,0x20,0x68,0x66,0x68,0x7A,0x00,0x28,0x05,0xD0,0x01,0x20,0xFE,0xF7,0x19, \ +0xF8,0x00,0x20,0x68,0x72,0x70,0x72,0x01,0x98,0x80,0x08,0x02,0xD3,0x00,0x2F, \ +0x00,0xD1,0x02,0x27,0xE8,0x7A,0x02,0x28,0x18,0xD1,0x01,0x98,0x40,0x09,0x02, \ +0xD3,0x01,0x20,0xFE,0xF7,0x07,0xF8,0x01,0x98,0x01,0xE0,0x9B,0xE0,0x9A,0xE0, \ +0x80,0x08,0x01,0xD3,0xFF,0xF7,0x13,0xFF,0x00,0x20,0xA8,0x72,0x68,0x73,0x30, \ +0x70,0x01,0x20,0xE8,0x72,0x01,0x99,0x7A,0x48,0x00,0x22,0x01,0xF0,0xBC,0xFD, \ +0x03,0x98,0x72,0x4B,0x18,0x40,0x75,0xD0,0x18,0x05,0xC0,0x68,0x00,0x90,0x00, \ +0x98,0x00,0xE0,0x81,0xE0,0x40,0x09,0x24,0xD3,0x28,0x7B,0x03,0x28,0x21,0xD1, \ +0x04,0x20,0x28,0x73,0x71,0x48,0x00,0x68,0x41,0x68,0x49,0x78,0xC9,0x09,0x19, \ +0xD3,0x81,0x7B,0x03,0x29,0x16,0xD1,0x80,0x89,0x64,0x28,0x13,0xDA,0x68,0x48, \ +0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0x69,0x4B,0x19,0x40,0xC1, \ +0x61,0x05,0x21,0x02,0x20,0xFD,0xF7,0xDA,0xFF,0x04,0x21,0x02,0x20,0xFD,0xF7, \ +0xD6,0xFF,0x01,0x20,0x80,0x06,0xC0,0x68,0x00,0x98,0x80,0x08,0x0E,0xD3,0x01, \ +0x20,0xA8,0x73,0x00,0x20,0x28,0x73,0xB0,0x72,0x68,0x72,0x70,0x72,0x20,0x62, \ +0x28,0x63,0x00,0x98,0xFE,0xF7,0xF4,0xFB,0x00,0x99,0x08,0x43,0x00,0x90,0x00, \ +0x98,0x80,0x09,0x0F,0xD3,0x01,0x20,0xA8,0x73,0x28,0x63,0x00,0x99,0x02,0x27, \ +0xC9,0x08,0x01,0xD3,0x20,0x62,0x01,0xE0,0x00,0x20,0x20,0x62,0x4E,0x48,0x52, \ +0x4B,0x81,0x6A,0x19,0x43,0x81,0x62,0x29,0x7B,0x28,0x1C,0x04,0x29,0x28,0xD1, \ +0x01,0x6B,0x00,0x29,0x1C,0xD0,0x41,0x6E,0x00,0x29,0x02,0xD1,0x00,0x2F,0x00, \ +0xD1,0x02,0x27,0x00,0x21,0x01,0x73,0x01,0x63,0x22,0x6A,0x01,0x2A,0x12,0xD1, \ +0x02,0x68,0x00,0x2A,0x0F,0xD1,0x43,0x48,0x02,0x68,0x51,0x72,0x45,0x49,0x0A, \ +0x68,0x4A,0x60,0x01,0x21,0x89,0x06,0xC9,0x6A,0x00,0x68,0x41,0x61,0xFE,0xF7, \ +0x0F,0xFB,0x00,0xE0,0x07,0xE0,0x06,0xE0,0x41,0x72,0x71,0x72,0xC1,0x21,0x01, \ +0x60,0x39,0x48,0x00,0x68,0x41,0x72,0x01,0x2F,0x02,0xD1,0x00,0xF0,0x41,0xF9, \ +0x03,0xE0,0x02,0x2F,0x01,0xD1,0x00,0xF0,0x6A,0xF9,0x03,0x98,0x37,0x4B,0x18, \ +0x40,0x52,0xD0,0x0D,0x25,0x2D,0x06,0x2F,0x89,0x40,0x20,0x34,0x4E,0x38,0x40, \ +0x08,0xD0,0x30,0x7A,0x00,0x28,0xFC,0xD1,0x32,0x48,0x00,0x7B,0x40,0x08,0x01, \ +0xD3,0x00,0xF0,0x52,0xFC,0x78,0x0A,0x17,0xD3,0xF8,0x43,0xFF,0x23,0x01,0x33, \ +0x18,0x43,0x28,0x81,0x28,0x7B,0x00,0x09,0xFC,0xD2,0x30,0x7A,0x00,0x28,0xFC, \ +0xD1,0x00,0xF0,0x1B,0xFC,0x29,0x48,0x01,0x68,0x02,0x29,0x02,0xD0,0x01,0x21, \ +0x01,0x60,0x01,0xE0,0x00,0x21,0x01,0x60,0x00,0xF0,0xC0,0xFB,0x24,0x49,0x08, \ +0x68,0x01,0x28,0x1F,0xD1,0xB8,0x08,0x1D,0xD3,0x22,0x4B,0x00,0x22,0x18,0x7A, \ +0x1A,0x72,0xFF,0x43,0x02,0x23,0x3B,0x43,0x2B,0x81,0x09,0x68,0x01,0x29,0x12, \ +0xD1,0x40,0x08,0x10,0xD3,0x28,0x78,0x20,0x23,0x18,0x43,0x07,0x21,0x49,0x06, \ +0x28,0x70,0x8A,0x61,0x00,0x20,0x7D,0x22,0x12,0x01,0x01,0x30,0x90,0x42,0xFC, \ +0xD3,0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xE0,0x69,0x00,0x28,0x01,0xD0, \ +0x04,0xB0,0xF0,0xBD,0xFC,0xE7,0x04,0x05,0x00,0x02,0x40,0x40,0x00,0x00,0xA0, \ +0x02,0x00,0x02,0x80,0x80,0x00,0x00,0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04, \ +0x74,0x05,0x00,0x02,0xC8,0x06,0x00,0x02,0x10,0x00,0x00,0x02,0xFF,0xEF,0x00, \ +0x00,0x00,0xFF,0x3F,0x00,0x9C,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x20,0x00, \ +0x00,0x0D,0xD0,0x03,0x00,0x0D,0x44,0x02,0x00,0x02,0x78,0x01,0x00,0x02,0xE0, \ +0x03,0x00,0x0D,0xF0,0xB5,0x0F,0x20,0x00,0x06,0x06,0x89,0x52,0x48,0x53,0x4F, \ +0x30,0x40,0x73,0xD0,0x52,0x48,0xC4,0x69,0x60,0x08,0x28,0xD3,0xFD,0x1F,0x69, \ +0x3D,0x69,0x68,0x50,0x48,0x02,0x29,0x03,0xD1,0x00,0x68,0x01,0x21,0x41,0x73, \ +0x05,0xE0,0x69,0x68,0x03,0x29,0x02,0xD1,0x00,0x68,0x02,0x21,0x41,0x73,0x01, \ +0x20,0xE8,0x72,0x00,0x21,0xB9,0x72,0xA9,0x72,0x68,0x60,0xFD,0xF7,0xC8,0xFE, \ +0x68,0x6B,0x01,0x28,0x05,0xD1,0x00,0x22,0x10,0x21,0x44,0x48,0x01,0xF0,0x88, \ +0xFC,0x07,0xE0,0x68,0x6B,0x02,0x28,0x04,0xD1,0x00,0x22,0x10,0x21,0x40,0x48, \ +0x01,0xF0,0x7F,0xFC,0xE0,0x09,0x09,0xD3,0x3F,0x48,0x81,0x7C,0x05,0x29,0x05, \ +0xD1,0x01,0x21,0x81,0x74,0x00,0x21,0xC1,0x65,0xFF,0xF7,0x33,0xFA,0x20,0x0A, \ +0x0C,0xD3,0x39,0x48,0x81,0x7C,0x04,0x29,0x08,0xD1,0x01,0x21,0x81,0x74,0x00, \ +0x21,0x41,0x65,0xC0,0x7A,0x03,0x28,0x01,0xD1,0xFF,0xF7,0xCA,0xFB,0x04,0x20, \ +0x20,0x40,0x32,0x4D,0x0D,0xD0,0xF8,0x7A,0x01,0x28,0x0A,0xD1,0x04,0x20,0xFF, \ +0xF7,0xF8,0xFD,0x02,0x21,0xF9,0x72,0x00,0x21,0x29,0x73,0x01,0x21,0x03,0x20, \ +0x00,0xF0,0x14,0xFA,0x20,0x09,0x05,0xD3,0xF8,0x7A,0x01,0x28,0x02,0xD1,0x28, \ +0x73,0x04,0xF0,0xEA,0xFE,0x10,0x25,0x25,0x40,0x08,0xD0,0xF8,0x7A,0x03,0x28, \ +0x05,0xD1,0x00,0x21,0xF9,0x72,0x07,0x21,0x04,0x20,0x00,0xF0,0x00,0xFA,0x00, \ +0x2D,0x09,0xD0,0xF8,0x7A,0x04,0x28,0x06,0xD1,0x00,0xE0,0x12,0xE0,0x00,0x21, \ +0xF9,0x72,0x07,0x21,0x00,0xF0,0xF4,0xF9,0xA0,0x09,0x0B,0xD3,0x1A,0x48,0x80, \ +0x78,0x00,0x28,0x07,0xD0,0x79,0x78,0x16,0x48,0x04,0x29,0x03,0xD0,0x01,0x21, \ +0x01,0x74,0xFF,0xF7,0x7E,0xFD,0xFF,0x20,0x02,0x30,0x30,0x40,0x14,0xD0,0x01, \ +0x20,0x10,0x4D,0x00,0x24,0xE8,0x72,0xBC,0x72,0xAC,0x72,0x68,0x60,0xFD,0xF7, \ +0x4F,0xFE,0x0F,0x48,0x00,0x22,0x04,0x83,0x84,0x82,0x0E,0x48,0x81,0x68,0x01, \ +0x31,0x81,0x60,0x10,0x21,0x05,0x48,0x01,0xF0,0x0B,0xFC,0xF0,0xBD,0x10,0x10, \ +0x00,0x00,0x74,0x05,0x00,0x02,0x80,0x00,0x00,0x04,0x04,0x00,0x00,0x02,0xC8, \ +0x06,0x00,0x02,0xE8,0x06,0x00,0x02,0x04,0x05,0x00,0x02,0x64,0x05,0x00,0x02, \ +0x14,0x01,0x00,0x02,0x20,0x00,0x20,0x0F,0x9C,0x01,0x00,0x02,0x80,0xB5,0x15, \ +0x4F,0x00,0x20,0x38,0x72,0xB9,0x7C,0x02,0x20,0x01,0x29,0x1C,0xD0,0x04,0x29, \ +0x19,0xD1,0xB8,0x74,0x08,0x20,0xFF,0xF7,0x84,0xFD,0xFD,0xF7,0xEE,0xFD,0x79, \ +0x69,0x41,0x1A,0x00,0x29,0x0F,0xDD,0x14,0x20,0x02,0xF0,0x51,0xF9,0xF9,0x6B, \ +0x81,0x42,0x02,0xD3,0x79,0x6D,0x81,0x42,0x09,0xD2,0x02,0xF0,0x0F,0xF9,0x01, \ +0x1C,0xF8,0x6B,0x02,0xF0,0xF3,0xF8,0x79,0x65,0x80,0xBD,0xB8,0x74,0x80,0xBD, \ +0x79,0x6D,0x08,0x1A,0x78,0x65,0x80,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0x00, \ +0xB5,0x09,0x48,0x01,0x21,0x01,0x72,0x81,0x7B,0x01,0x29,0x0A,0xD1,0x01,0x7B, \ +0x00,0x29,0x07,0xD1,0x41,0x7A,0x00,0x29,0x04,0xD1,0x80,0x7C,0x05,0x28,0x01, \ +0xD0,0xFF,0xF7,0x74,0xF9,0x00,0xBD,0x00,0x00,0x04,0x05,0x00,0x02,0xF0,0xB5, \ +0x28,0x4D,0x28,0x48,0xEC,0x1D,0x69,0x34,0xA1,0x78,0x80,0x7A,0x27,0x4F,0x81, \ +0x42,0x02,0xDA,0xA0,0x78,0x38,0x70,0x00,0xE0,0x38,0x70,0xFF,0xF7,0xBE,0xFC, \ +0x38,0x78,0x03,0x28,0x03,0xD1,0x01,0x20,0xFD,0xF7,0x66,0xFF,0x02,0xE0,0x00, \ +0x20,0xFD,0xF7,0x62,0xFF,0x6A,0x7A,0x28,0x1C,0x00,0x21,0x01,0x2A,0x1C,0x4E, \ +0x1B,0xD0,0x02,0x2A,0x1E,0xD1,0x1B,0x4B,0xC4,0x22,0x1A,0x70,0x1B,0x4A,0xD3, \ +0x88,0x3A,0x78,0x52,0x00,0xB2,0x5A,0x9D,0x18,0x17,0x4A,0x52,0x88,0x95,0x42, \ +0x07,0xDA,0x3D,0x78,0x6D,0x00,0x75,0x5B,0x5B,0x19,0x13,0x4D,0xD2,0x1A,0x6A, \ +0x80,0x01,0xE0,0x11,0x4D,0x69,0x80,0x10,0x4D,0x69,0x70,0x04,0xE0,0x0E,0x4B, \ +0xD4,0x22,0x59,0x80,0x1A,0x70,0x59,0x70,0x0A,0x21,0xC2,0x1D,0x79,0x32,0x11, \ +0x60,0x39,0x78,0x0B,0x4B,0x49,0x00,0x71,0x5A,0x1B,0x88,0xC9,0x1A,0x07,0x4B, \ +0x51,0x60,0xC3,0x67,0x01,0x20,0x60,0x72,0xF0,0xBD,0x00,0x00,0x04,0x05,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x45,0x01,0x00,0x02,0x2C,0x01,0x00,0x02,0xE0,0x07, \ +0x00,0x02,0x18,0x00,0x00,0x02,0x40,0x01,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00, \ +0x24,0x99,0x42,0x01,0xD8,0x00,0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46, \ +0x01,0x27,0xBF,0x06,0x3D,0x69,0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03, \ +0xC7,0x08,0x3F,0x3A,0x61,0x01,0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0xF0,0xB5, \ +0x82,0xB0,0x25,0x4C,0x24,0x4D,0xE7,0x1D,0x09,0x37,0x00,0x22,0x00,0x92,0x01, \ +0x22,0x01,0x21,0x28,0x1C,0x01,0xAB,0x01,0xF0,0x24,0xFC,0x01,0x98,0x40,0x08, \ +0x03,0xD3,0x00,0x20,0x1E,0x4E,0x01,0x90,0xF0,0x73,0x1D,0x4E,0xF0,0x7B,0x00, \ +0x28,0x2D,0xD1,0xF8,0x78,0x00,0x28,0x01,0xD0,0x00,0xF0,0x38,0xF8,0x20,0x7B, \ +0x00,0x28,0x02,0xD0,0x00,0xF0,0x91,0xF8,0x22,0xE0,0xA0,0x7B,0x00,0x28,0x02, \ +0xD0,0x00,0xF0,0x79,0xF8,0x1C,0xE0,0x30,0x7C,0x00,0x28,0x02,0xD0,0x00,0xF0, \ +0x63,0xF8,0x16,0xE0,0xF8,0x7A,0x05,0x28,0x13,0xD1,0x0F,0x48,0x00,0x78,0x40, \ +0x09,0x07,0xD3,0x0E,0x4E,0x30,0x68,0x00,0x7B,0xC0,0x09,0x02,0xD3,0x00,0xF0, \ +0x2A,0xF8,0x07,0xE0,0x0A,0x4E,0x30,0x68,0x01,0x7B,0x10,0x29,0x02,0xD1,0x00, \ +0xF0,0x96,0xF9,0x30,0x60,0x00,0xF0,0xBD,0xFC,0x00,0xF0,0x7B,0xF8,0xB9,0xE7, \ +0x28,0x07,0x00,0x02,0x64,0x05,0x00,0x02,0x04,0x05,0x00,0x02,0x63,0x01,0x00, \ +0x02,0x04,0x00,0x00,0x02,0x00,0xB5,0x06,0x48,0x00,0x21,0xC2,0x1D,0x69,0x32, \ +0xD1,0x70,0x01,0x21,0xC1,0x73,0x00,0x22,0x09,0x05,0x02,0x48,0x01,0xF0,0xD5, \ +0xFA,0x00,0xBD,0x04,0x05,0x00,0x02,0x08,0x07,0x00,0x02,0x90,0xB5,0x10,0x4C, \ +0x01,0x20,0x10,0x4F,0xE0,0x73,0x38,0x68,0x00,0xF0,0xC0,0xFE,0x38,0x68,0x40, \ +0x68,0x01,0x78,0x08,0x29,0x0A,0xD1,0xE1,0x1D,0x69,0x31,0x49,0x78,0x01,0x29, \ +0x05,0xD1,0xC1,0x1D,0x09,0x31,0x06,0x22,0x08,0x48,0xFD,0xF7,0xB4,0xFE,0x39, \ +0x68,0x80,0x20,0x08,0x73,0x00,0x22,0x01,0x21,0x09,0x03,0x04,0x48,0x01,0xF0, \ +0xAF,0xFA,0x90,0xBD,0x04,0x05,0x00,0x02,0x04,0x00,0x00,0x02,0x0C,0x01,0x00, \ +0x02,0x08,0x07,0x00,0x02,0x00,0xB5,0x05,0x48,0x00,0x21,0x01,0x74,0x01,0x21, \ +0xC1,0x73,0x00,0x22,0x09,0x07,0x02,0x48,0x01,0xF0,0x9B,0xFA,0x00,0xBD,0x04, \ +0x05,0x00,0x02,0x08,0x07,0x00,0x02,0x00,0xB5,0x06,0x48,0x00,0x21,0xC2,0x1D, \ +0x59,0x32,0x91,0x73,0x01,0x21,0xC1,0x73,0x00,0x22,0x10,0x21,0x02,0x48,0x01, \ +0xF0,0x89,0xFA,0x00,0xBD,0x04,0x05,0x00,0x02,0x08,0x07,0x00,0x02,0x80,0xB5, \ +0x06,0x49,0x00,0x20,0x06,0x4F,0x08,0x73,0xF8,0x7C,0xFD,0xF7,0x80,0xFD,0xF8, \ +0x8A,0x81,0x02,0x04,0x20,0xFF,0xF7,0xFB,0xFB,0x80,0xBD,0x64,0x05,0x00,0x02, \ +0xD0,0x00,0x00,0x02,0x00,0xB5,0x18,0x48,0x01,0x78,0x00,0x29,0x13,0xD0,0x41, \ +0x78,0x00,0x29,0x10,0xD1,0x01,0x78,0x0D,0x29,0x20,0xD2,0x02,0xA3,0x5B,0x5C, \ +0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1C,0x07,0x1C,0x0A,0x0D,0x13,0x1C,0x1C,0x1C, \ +0x1C,0x10,0x16,0x19,0x00,0x03,0xF0,0xDF,0xF9,0x00,0xBD,0x03,0xF0,0x2E,0xFA, \ +0x00,0xBD,0x03,0xF0,0xA5,0xFA,0x00,0xBD,0x03,0xF0,0xE6,0xFF,0x00,0xBD,0x03, \ +0xF0,0x11,0xFB,0x00,0xBD,0x03,0xF0,0xB0,0xF9,0x00,0xBD,0x03,0xF0,0x45,0xFC, \ +0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00,0x00,0xAC, \ +0x08,0x00,0x02,0x04,0x4A,0x10,0x60,0x04,0x48,0x01,0x60,0x04,0x49,0x00,0x20, \ +0x08,0x70,0x48,0x70,0xF7,0x46,0x00,0x00,0x34,0x02,0x00,0x02,0x38,0x02,0x00, \ +0x02,0xAC,0x08,0x00,0x02,0xF0,0xB5,0x3B,0x48,0x87,0x68,0xFD,0xF7,0x33,0xFC, \ +0x02,0x02,0x39,0x4D,0x12,0x0A,0x39,0x49,0x2A,0x60,0x4B,0x78,0x39,0x48,0x03, \ +0x70,0xCB,0x1D,0x39,0x33,0x1B,0x78,0x03,0x24,0x64,0x06,0x01,0x2B,0x17,0xD1, \ +0x06,0x78,0x0D,0x23,0x73,0x43,0x5B,0x18,0x1B,0x7B,0x1B,0x06,0x1A,0x43,0x22, \ +0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x51,0x18,0x8B,0x7B,0x4A,0x7B,0x1B,0x02, \ +0x1A,0x43,0xCB,0x7B,0x09,0x7C,0x1B,0x04,0x1A,0x43,0x09,0x06,0x11,0x43,0x61, \ +0x60,0x36,0xE0,0x02,0x2B,0x34,0xD1,0x06,0x78,0x0D,0x23,0x73,0x43,0x5B,0x18, \ +0x1B,0x7B,0x1B,0x06,0x1A,0x43,0x22,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52, \ +0x18,0x96,0x7B,0x53,0x7B,0x36,0x02,0x33,0x43,0xD6,0x7B,0x12,0x7C,0x36,0x04, \ +0x33,0x43,0x12,0x06,0x1A,0x43,0x62,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52, \ +0x18,0x96,0x7C,0x53,0x7C,0x36,0x02,0x33,0x43,0xD6,0x7C,0x12,0x7D,0x36,0x04, \ +0x33,0x43,0x12,0x06,0x1A,0x43,0x62,0x61,0x02,0x78,0x0D,0x23,0x5A,0x43,0x51, \ +0x18,0x8B,0x7D,0x4A,0x7D,0x1B,0x02,0x1A,0x43,0xCB,0x7D,0x09,0x7E,0x1B,0x04, \ +0x1A,0x43,0x09,0x06,0x11,0x43,0xA1,0x61,0x00,0x78,0x29,0x68,0x0D,0x4A,0x80, \ +0x07,0x01,0x43,0x29,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54,0x01,0x30,0x18,0x28, \ +0xFA,0xD3,0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90,0x76,0x08,0x0E,0xD0, \ +0x76,0xF0,0xBD,0x00,0x00,0x84,0x05,0x00,0x02,0x5C,0x01,0x00,0x02,0x30,0x00, \ +0x00,0x02,0x60,0x01,0x00,0x02,0x90,0x08,0x00,0x02,0x80,0xB4,0x10,0x4A,0x11, \ +0x68,0x01,0x31,0x1E,0x29,0x00,0xD1,0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43, \ +0xFB,0x18,0x1F,0x7B,0x00,0x2F,0x11,0xD1,0x11,0x60,0x0C,0x49,0x03,0x22,0x19, \ +0x60,0xD9,0x1D,0x15,0x31,0x59,0x60,0x08,0x39,0x99,0x60,0x00,0x21,0x19,0x73, \ +0x99,0x73,0x9A,0x75,0x99,0x82,0x03,0x60,0x40,0x21,0x01,0x73,0x18,0x1C,0x80, \ +0xBC,0xF7,0x46,0x00,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0x11,0x00,0x02, \ +0x00,0x00,0x00,0x80,0x80,0xB4,0x0E,0x4A,0x11,0x68,0x01,0x31,0x14,0x29,0x00, \ +0xD1,0x00,0x21,0x0C,0x4F,0x0C,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F, \ +0x0D,0xD1,0x11,0x60,0x0A,0x49,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60,0x01, \ +0x21,0x99,0x81,0x00,0x21,0x19,0x72,0x03,0x60,0x80,0x21,0x01,0x72,0x18,0x1C, \ +0x80,0xBC,0xF7,0x46,0x0C,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00, \ +0x02,0x00,0x00,0x00,0x80,0x01,0x1C,0x00,0x68,0x02,0x08,0x01,0xD3,0x08,0x1C, \ +0xF7,0x46,0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09,0x08,0x02,0xD3,0x40, \ +0x21,0x01,0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x68,0x00,0x2A,0xF9,0xD1, \ +0x02,0x72,0x08,0x1C,0xF7,0x46,0x00,0x00,0x44,0x02,0x00,0x02,0x0B,0x49,0x01, \ +0x20,0x48,0x63,0x00,0x20,0x08,0x64,0xC8,0x63,0x88,0x66,0x48,0x66,0x48,0x65, \ +0xCA,0x1D,0x59,0x32,0x88,0x65,0x10,0x73,0xC8,0x65,0x50,0x73,0xCA,0x1D,0x39, \ +0x32,0x10,0x82,0x50,0x82,0xC8,0x64,0x7C,0x31,0x48,0x62,0xF7,0x46,0x00,0x00, \ +0xA8,0x01,0x00,0x02,0x00,0xB5,0x07,0x21,0x49,0x06,0xC8,0x69,0x40,0x23,0x18, \ +0x43,0xC8,0x61,0x14,0x48,0x01,0x38,0xFD,0xD1,0xC8,0x69,0x20,0x23,0x18,0x43, \ +0xC8,0x61,0xC8,0x69,0x1B,0x01,0x18,0x43,0xC8,0x61,0x00,0x20,0xFF,0x22,0x91, \ +0x32,0x01,0x30,0x90,0x42,0xFC,0xD3,0xC8,0x69,0x0C,0x4B,0x18,0x40,0xC8,0x61, \ +0x00,0x20,0x7D,0x21,0x49,0x01,0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7,0xC2, \ +0xFF,0xFD,0xF7,0xCC,0xFA,0x00,0xF0,0x0E,0xF8,0x05,0x49,0x0D,0x20,0x00,0x06, \ +0x01,0x81,0xFF,0x21,0x41,0x31,0x81,0x80,0x00,0xBD,0xD0,0xDD,0x06,0x00,0xFF, \ +0xFD,0x00,0x00,0xFF,0x0F,0x00,0x00,0x90,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70, \ +0x0D,0x48,0x80,0x27,0x07,0x73,0x01,0x23,0x03,0x72,0x82,0x22,0x02,0x71,0x07, \ +0x22,0x02,0x70,0x0A,0x48,0x05,0x24,0x04,0x73,0x86,0x24,0x04,0x72,0x02,0x71, \ +0x08,0x48,0x24,0x22,0x02,0x71,0x07,0x72,0x03,0x73,0x06,0x48,0x01,0x71,0x01, \ +0x73,0x90,0xBC,0xF7,0x46,0x00,0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D, \ +0xA0,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x92, \ +0x48,0x08,0x22,0x01,0x7B,0x91,0x4C,0x0A,0x40,0x00,0x25,0x01,0x27,0x00,0x2A, \ +0x02,0xD0,0x05,0x73,0x27,0x71,0xF0,0xBD,0x04,0x22,0x0A,0x40,0x8D,0x4E,0x66, \ +0xD0,0x8D,0x49,0x09,0x7B,0x0A,0x29,0x22,0xD1,0x8C,0x4A,0x00,0x21,0x15,0x7B, \ +0x0B,0x1C,0x01,0x31,0x08,0x29,0xF5,0x54,0xF9,0xD1,0x86,0x4E,0xF2,0x78,0xB1, \ +0x78,0x73,0x79,0x12,0x02,0x0A,0x43,0x31,0x79,0x1B,0x02,0xF5,0x79,0x19,0x43, \ +0xB3,0x79,0x2D,0x02,0x1D,0x43,0x33,0x78,0x76,0x78,0x1B,0x02,0x1E,0x43,0x80, \ +0x4B,0x9E,0x42,0x09,0xD1,0x80,0x48,0x43,0x6B,0x10,0x1C,0x2A,0x1C,0x03,0xF0, \ +0x63,0xFD,0x47,0xE0,0x05,0x73,0x27,0x71,0xF0,0xBD,0x7C,0x4B,0x9E,0x42,0x06, \ +0xD1,0x79,0x48,0x42,0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0,0xCA,0xFA,0x3A,0xE0, \ +0x78,0x4B,0x9E,0x42,0x06,0xD1,0x70,0x4E,0x28,0x1C,0xB2,0x78,0xF1,0x78,0x00, \ +0xF0,0xA0,0xFB,0x30,0xE0,0x05,0x2E,0x04,0xD1,0x10,0x1C,0x2A,0x1C,0x00,0xF0, \ +0x3D,0xFB,0x29,0xE0,0x09,0x2E,0x04,0xD1,0x10,0x1C,0x2A,0x1C,0x00,0xF0,0x5A, \ +0xFB,0x22,0xE0,0x6D,0x4B,0x9E,0x42,0x06,0xD1,0x68,0x48,0x43,0x6B,0x10,0x1C, \ +0x2A,0x1C,0x03,0xF0,0x4C,0xFD,0x18,0xE0,0x69,0x4B,0x9E,0x42,0x06,0xD1,0x63, \ +0x48,0x42,0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0,0xDA,0xFB,0x0E,0xE0,0x65,0x4B, \ +0xDB,0x69,0x00,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0x07,0xE0,0x08,0xE0,0x2B, \ +0x1C,0x0D,0x1C,0x11,0x1C,0x30,0x1C,0x2A,0x1C,0x03,0xF0,0x82,0xFD,0x27,0x71, \ +0xF0,0xBD,0xCB,0x07,0xDB,0x0F,0xE0,0x22,0x00,0x2B,0x66,0xD0,0x51,0x4E,0x31, \ +0x78,0x73,0x78,0x09,0x02,0x19,0x43,0x05,0x29,0x07,0xD1,0x4D,0x48,0x81,0x78, \ +0x50,0x48,0x40,0x6B,0x00,0xF0,0x84,0xFB,0x27,0x71,0xF0,0xBD,0x4F,0x4B,0x99, \ +0x42,0x08,0xD1,0x80,0x21,0x01,0x73,0x47,0x48,0x81,0x78,0xC0,0x78,0x00,0xF0, \ +0x26,0xFA,0x27,0x71,0xF0,0xBD,0x46,0x4B,0x99,0x42,0x07,0xD1,0x20,0x21,0x01, \ +0x73,0x41,0x48,0x27,0x71,0x00,0x79,0x03,0xF0,0x2A,0xFD,0xF0,0xBD,0x43,0x4B, \ +0x47,0x4E,0x99,0x42,0x22,0xD1,0x20,0x21,0x01,0x73,0x3B,0x48,0x27,0x71,0x81, \ +0x78,0x02,0x29,0x03,0xD1,0xC1,0x78,0x08,0x29,0x00,0xD1,0x4F,0xE7,0x81,0x78, \ +0x01,0x29,0x0F,0xD1,0xF0,0x7B,0x02,0x28,0x02,0xD0,0x01,0xF0,0x24,0xFF,0xF0, \ +0xBD,0x3D,0x48,0x3D,0x49,0x05,0x70,0x0F,0x20,0x00,0x06,0x81,0x80,0x38,0x4B, \ +0x85,0x81,0x5F,0x62,0xF0,0xBD,0xC1,0x78,0x80,0x78,0x03,0xF0,0x45,0xFC,0xF0, \ +0xBD,0x32,0x4B,0x99,0x42,0x0B,0xD1,0x29,0x4E,0xB1,0x78,0x01,0x29,0x01,0xD1, \ +0x02,0x73,0x03,0xE0,0x80,0x21,0x01,0x73,0x03,0xF0,0x77,0xFC,0x27,0x71,0xF0, \ +0xBD,0x09,0x29,0x09,0xD1,0x20,0x21,0x01,0x73,0x25,0x48,0x27,0x71,0x40,0x6B, \ +0x03,0x28,0xCE,0xD1,0x77,0x73,0xF0,0xBD,0x25,0xE0,0x24,0x4B,0x99,0x42,0x02, \ +0xD1,0x02,0x73,0x27,0x71,0xF0,0xBD,0x27,0x4B,0x99,0x42,0x0E,0xD0,0x26,0x4B, \ +0x99,0x42,0x0B,0xD0,0x81,0x23,0x1B,0x02,0x99,0x42,0x07,0xD0,0x41,0x23,0x5B, \ +0x02,0x99,0x42,0x03,0xD0,0x01,0x23,0xDB,0x03,0x99,0x42,0x02,0xD1,0x02,0x73, \ +0x27,0x71,0xF0,0xBD,0xFF,0x23,0x0C,0x33,0x99,0x42,0x02,0xD0,0x1C,0x4B,0x99, \ +0x42,0xA9,0xD1,0x20,0x21,0x01,0x73,0x27,0x71,0xF0,0xBD,0x89,0x08,0xA3,0xD3, \ +0x31,0x78,0x73,0x78,0x09,0x02,0x19,0x43,0x0C,0x4B,0x99,0x42,0x03,0xD1,0xB0, \ +0x78,0x00,0xF0,0x3B,0xFA,0x00,0xE0,0x02,0x73,0x27,0x71,0xF0,0xBD,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0xEC,0x01,0x00,0x02,0xF0,0x02,0x00, \ +0x0D,0x30,0x03,0x00,0x0D,0x01,0x02,0x00,0x00,0xA8,0x01,0x00,0x02,0x0E,0x40, \ +0x00,0x00,0x06,0x80,0x00,0x00,0x22,0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0x24, \ +0x02,0x00,0x02,0x08,0x02,0x00,0x02,0x63,0x01,0x00,0x02,0x08,0x08,0x00,0x00, \ +0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00,0x00,0xF0,0xB5,0x55, \ +0x4D,0x28,0x79,0x80,0x08,0x4D,0xD3,0x54,0x48,0x54,0x4A,0x00,0x79,0x54,0x4B, \ +0x50,0x63,0x0F,0x20,0x00,0x06,0x81,0x88,0x19,0x40,0x81,0x80,0x81,0x89,0x51, \ +0x4B,0x19,0x40,0x81,0x81,0x51,0x49,0x04,0x23,0x0B,0x71,0x00,0x26,0x0E,0x71, \ +0x81,0x89,0x4F,0x4B,0x19,0x43,0x81,0x81,0x81,0x88,0x4E,0x4B,0x19,0x43,0xD7, \ +0x1F,0x75,0x3F,0x81,0x80,0x78,0x6E,0x40,0x28,0x06,0xD2,0x78,0x6E,0x00,0x28, \ +0x03,0xD0,0x78,0x6E,0x02,0x30,0x10,0x63,0x09,0xE0,0x50,0x6B,0x40,0x28,0x04, \ +0xD2,0x79,0x6E,0x00,0x29,0x01,0xD1,0x10,0x63,0x01,0xE0,0x40,0x20,0x10,0x63, \ +0xB8,0x6E,0x11,0x6B,0x40,0x18,0x19,0x23,0x9B,0x01,0x98,0x42,0x06,0xD9,0xBE, \ +0x66,0x7E,0x66,0x01,0x20,0xD0,0x62,0xD0,0x6B,0x01,0x30,0xD0,0x63,0x3B,0x4C, \ +0x20,0x68,0x80,0x68,0xBB,0x6E,0xC0,0x18,0xCD,0x22,0x00,0xF0,0xA2,0xFA,0x31, \ +0x48,0x2E,0x71,0xC0,0x6A,0x01,0x28,0x02,0xD1,0x2F,0x4A,0xD6,0x62,0xF0,0xBD, \ +0xB8,0x6E,0x00,0x28,0x3A,0xD1,0x20,0x68,0x32,0x4B,0x81,0x8A,0xC2,0x7D,0x08, \ +0x31,0x89,0x18,0x79,0x66,0x79,0x6E,0x99,0x42,0x02,0xD8,0x79,0x6E,0x00,0x29, \ +0x06,0xD1,0xBE,0x66,0x25,0x4A,0x7E,0x66,0x10,0x6C,0x01,0x30,0x10,0x64,0xF0, \ +0xBD,0x81,0x7D,0x03,0x29,0x01,0xDD,0x03,0x21,0x81,0x75,0x20,0x68,0x41,0x68, \ +0x08,0x78,0x08,0x28,0x0C,0xD0,0x00,0x28,0x0A,0xD0,0x20,0x28,0x08,0xD0,0xB0, \ +0x28,0x06,0xD0,0xBE,0x66,0x19,0x4A,0x7E,0x66,0x50,0x6C,0x01,0x30,0x50,0x64, \ +0xF0,0xBD,0xC8,0x1D,0x03,0x30,0x06,0x22,0x1D,0x49,0xFD,0xF7,0xE4,0xFA,0x13, \ +0x4A,0x01,0x28,0x90,0x62,0x05,0xD1,0xBE,0x66,0x7E,0x66,0x90,0x6C,0x01,0x30, \ +0x90,0x64,0xF0,0xBD,0x78,0x6E,0x40,0x28,0x06,0xD9,0x78,0x6E,0x40,0x38,0x78, \ +0x66,0xB8,0x6E,0x40,0x30,0xB8,0x66,0xF0,0xBD,0xB8,0x6E,0x79,0x6E,0x40,0x18, \ +0xB8,0x66,0x7E,0x66,0xBE,0x66,0x20,0x68,0xFF,0xF7,0x2F,0xFD,0x05,0x4A,0x20, \ +0x60,0x90,0x6B,0x01,0x30,0x90,0x63,0xF0,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0xF0,0x02,0x00,0x0D,0x24,0x02,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF, \ +0xFF,0x60,0x02,0x00,0x0D,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x08,0x00, \ +0x00,0x02,0x32,0x06,0x00,0x00,0x74,0x00,0x00,0x02,0x90,0xB5,0x18,0x4A,0x10, \ +0x7A,0x40,0x08,0x16,0xD3,0x17,0x4F,0x00,0x20,0x10,0x72,0xFB,0x6D,0xF9,0x1D, \ +0x59,0x31,0x01,0x2B,0x17,0xD1,0xCB,0x1D,0x15,0x33,0xF8,0x65,0x1B,0x6A,0x00, \ +0x2B,0x01,0xD1,0x10,0x23,0x13,0x72,0xBA,0x6D,0x01,0x24,0x00,0x2A,0x03,0xD0, \ +0x00,0xF0,0x1D,0xF8,0xFC,0x65,0x90,0xBD,0x0A,0x7B,0x01,0x2A,0x02,0xD1,0x08, \ +0x73,0xFC,0x65,0x90,0xBD,0xF8,0x65,0x90,0xBD,0x78,0x65,0xB8,0x65,0x08,0x73, \ +0xF8,0x65,0x06,0x4F,0x38,0x68,0x01,0x7A,0x10,0x29,0xED,0xD1,0xFF,0xF7,0x3B, \ +0xFD,0x38,0x60,0x90,0xBD,0x60,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x14,0x00, \ +0x00,0x02,0xB0,0xB4,0x0F,0x4A,0x90,0x6D,0x40,0x28,0x01,0xD3,0x40,0x20,0x00, \ +0xE0,0x90,0x6D,0x00,0x21,0x00,0x28,0x53,0x6D,0x0A,0xDD,0x0A,0x4C,0x0B,0x4F, \ +0x25,0x6A,0x00,0x2D,0x05,0xD1,0x1D,0x78,0x01,0x33,0x01,0x31,0x81,0x42,0x3D, \ +0x72,0xF6,0xDB,0x91,0x6D,0x09,0x1A,0x91,0x65,0x51,0x6D,0x08,0x18,0x50,0x65, \ +0xB0,0xBC,0xF7,0x46,0x00,0x00,0xA8,0x01,0x00,0x02,0x24,0x02,0x00,0x02,0x20, \ +0x03,0x00,0x0D,0xF0,0xB5,0x13,0x4F,0x00,0x26,0x78,0x65,0xB9,0x65,0xFC,0x1D, \ +0x59,0x34,0xFE,0x65,0x26,0x73,0xB8,0x6D,0x80,0x06,0x80,0x0E,0x01,0x25,0x00, \ +0x28,0x00,0xD1,0x25,0x73,0xFF,0xF7,0xC8,0xFF,0x0B,0x48,0x00,0x6A,0x00,0x28, \ +0x02,0xD1,0x0A,0x49,0x10,0x20,0x08,0x72,0xB8,0x6D,0x00,0x28,0x03,0xD0,0xFF, \ +0xF7,0xBC,0xFF,0xFD,0x65,0xF0,0xBD,0x20,0x7B,0x00,0x28,0x02,0xD0,0x26,0x73, \ +0xFD,0x65,0xF0,0xBD,0xFE,0x65,0xF0,0xBD,0xA8,0x01,0x00,0x02,0x24,0x02,0x00, \ +0x02,0x60,0x03,0x00,0x0D,0x90,0xB5,0x14,0x4F,0x78,0x7B,0x00,0x28,0x23,0xD0, \ +0xFC,0x1D,0x15,0x34,0x20,0x6A,0x01,0x28,0x04,0xD1,0x00,0x20,0x20,0x62,0xF8, \ +0x7B,0x00,0xF0,0xCF,0xF9,0x60,0x6A,0x01,0x28,0x02,0xD1,0xF8,0x7B,0x00,0xF0, \ +0xC9,0xF9,0xF8,0x7B,0x02,0x28,0x10,0xD0,0xFF,0xF7,0x98,0xFE,0x08,0x49,0x08, \ +0x68,0x02,0x7A,0x12,0x0A,0x07,0xD3,0x10,0x22,0x02,0x72,0x08,0x68,0x81,0x89, \ +0x0C,0x30,0x0C,0x31,0xFF,0xF7,0xAB,0xFF,0xFF,0xF7,0x4B,0xFF,0x90,0xBD,0x08, \ +0x02,0x00,0x02,0x14,0x00,0x00,0x02,0x90,0xB4,0x1E,0x4A,0x1E,0x4C,0x91,0x6B, \ +0xD3,0x6B,0x8B,0x42,0x19,0xD1,0x20,0x7B,0x40,0x23,0x03,0x40,0xE0,0x20,0x00, \ +0x2B,0x11,0xD1,0x49,0x07,0x02,0xD0,0x20,0x73,0x90,0xBC,0xF7,0x46,0xD1,0x1D, \ +0x59,0x31,0x8A,0x7B,0x01,0x2A,0x02,0xD1,0xD0,0x20,0x20,0x73,0xF5,0xE7,0x89, \ +0x7B,0x00,0x29,0xF2,0xD1,0x20,0x73,0xF0,0xE7,0x20,0x73,0xEE,0xE7,0x8B,0x42, \ +0xEC,0xD2,0xC9,0x1A,0x08,0x29,0x00,0xD9,0x08,0x21,0x01,0x28,0x01,0xD1,0x0C, \ +0x4F,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4F,0x00,0x29,0x08,0xD0,0x0A,0x48, \ +0xD3,0x6B,0xFB,0x5C,0x03,0x73,0xD3,0x6B,0x01,0x33,0xD3,0x63,0x01,0x39,0xF7, \ +0xD1,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD1,0xE7,0xA8,0x01,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0xBA,0x01,0x00,0x02,0x30,0x03,0x00, \ +0x0D,0xF0,0xB5,0x04,0x1C,0x1D,0x48,0x0F,0x1C,0x86,0x78,0xC5,0x78,0x20,0x21, \ +0x03,0x2A,0x1B,0x48,0x01,0xD0,0x01,0x73,0xF0,0xBD,0x02,0x2E,0x05,0xD1,0x01, \ +0x2D,0x01,0xD3,0x09,0x2D,0x01,0xD9,0x01,0x73,0xF0,0xBD,0x00,0x2F,0x07,0xD1, \ +0x00,0xF0,0x8A,0xF9,0x08,0x2D,0x05,0xD1,0x13,0x49,0x01,0x20,0xC8,0x61,0x01, \ +0xE0,0x00,0x21,0x01,0x73,0x02,0x2E,0x0E,0xD1,0x00,0x2C,0x14,0xD1,0x08,0x2D, \ +0x03,0xD1,0x0E,0x48,0x0D,0x49,0xC8,0x60,0x0E,0xE0,0x0D,0x48,0x0B,0x49,0xC8, \ +0x60,0x0D,0x49,0x00,0x20,0x08,0x70,0x07,0xE0,0x01,0x2E,0xD7,0xD0,0x0B,0x48, \ +0x06,0x49,0xC8,0x60,0x00,0x20,0x48,0x61,0x08,0x61,0x09,0x48,0x00,0x21,0x47, \ +0x67,0x81,0x67,0xF0,0xBD,0xEC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x24,0x02, \ +0x00,0x02,0x00,0x60,0x00,0x01,0x60,0x08,0x00,0x02,0x63,0x01,0x00,0x02,0xB0, \ +0x08,0x00,0x02,0xA8,0x01,0x00,0x02,0x90,0xB5,0x16,0x49,0x16,0x4B,0x01,0x28, \ +0x05,0xD1,0x18,0x7B,0x60,0x31,0xC8,0x73,0x00,0xF0,0x4A,0xF9,0x90,0xBD,0x88, \ +0x6F,0x4A,0x6F,0x90,0x42,0xFA,0xD2,0x48,0x6F,0x8A,0x6F,0x80,0x1A,0x08,0x28, \ +0x00,0xD9,0x08,0x20,0x00,0x28,0x0A,0xD0,0x0D,0x4A,0x1C,0x7B,0xD7,0x68,0x3C, \ +0x70,0x01,0x37,0xD7,0x60,0x8F,0x6F,0x01,0x37,0x8F,0x67,0x01,0x38,0xF5,0xD1, \ +0x88,0x6F,0x49,0x6F,0x88,0x42,0x02,0xD1,0x00,0xF0,0x2C,0xF9,0x90,0xBD,0x05, \ +0x49,0x00,0x20,0x08,0x73,0x90,0xBD,0x00,0x00,0xA8,0x01,0x00,0x02,0x30,0x03, \ +0x00,0x0D,0x24,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x00,0xB5,0x7F,0x28,0x07, \ +0xD8,0x00,0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C,0x4A,0x51,0x6B,0x03,0x29, \ +0x03,0xD1,0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0x01,0x29,0x04,0xD1,0x00, \ +0x28,0x08,0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02,0x29,0x03,0xD1,0x00,0x28, \ +0x01,0xD1,0x01,0x20,0x50,0x63,0x00,0xF0,0x00,0xF9,0x00,0xBD,0x00,0x00,0xA8, \ +0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00,0x29,0x09,0xD1,0x00,0x2A, \ +0x07,0xD1,0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1,0x14,0x49,0x4A,0x6B,0x01, \ +0x2A,0x03,0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x12,0x4B,0x02,0x2A, \ +0x09,0xD1,0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0x02, \ +0x23,0x3B,0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08,0xD1,0x00,0x28,0x06,0xD1, \ +0x02,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23,0x3B,0x40,0x13,0x73,0x08, \ +0x64,0x00,0x20,0x40,0x31,0x08,0x82,0x48,0x82,0x00,0xF0,0xCA,0xF8,0x80,0xBD, \ +0x00,0x00,0xA8,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0, \ +0xB5,0x12,0x4D,0x12,0x4C,0x01,0x29,0x02,0xD1,0x12,0x23,0xA3,0x63,0x03,0xE0, \ +0x20,0x23,0x02,0x29,0x0A,0xD1,0xA3,0x63,0xE3,0x1D,0x59,0x33,0x00,0x27,0x9F, \ +0x73,0xA6,0x6B,0xB0,0x42,0x04,0xD8,0x9F,0x73,0xA0,0x63,0x07,0xE0,0x2B,0x73, \ +0xF0,0xBD,0x70,0x07,0x01,0xD0,0x9F,0x73,0x01,0xE0,0x01,0x20,0x98,0x73,0x80, \ +0x20,0xE7,0x63,0x28,0x73,0x08,0x1C,0x11,0x1C,0xFF,0xF7,0xB3,0xFE,0xF0,0xBD, \ +0x70,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x0C,0x4A,0x01, \ +0x28,0x06,0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE,0x23,0x18,0x40,0x38,0x73, \ +0x08,0xE0,0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43,0x10,0x72,0x38,0x7B,0x01, \ +0x23,0x18,0x43,0x38,0x73,0x04,0x49,0x20,0x20,0x08,0x73,0x80,0xBC,0xF7,0x46, \ +0xE0,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0x0D,0x23,0x1B, \ +0x06,0x99,0x83,0x05,0x49,0x0A,0x70,0x05,0x4A,0x10,0x60,0x02,0x20,0x08,0x72, \ +0x08,0x7A,0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00,0x20,0x00,0x00,0x0D,0x40, \ +0x00,0x00,0x0D,0xB0,0xB5,0x11,0x4F,0x03,0x2A,0xBB,0x78,0xFC,0x78,0x10,0x4F, \ +0x02,0xD0,0x20,0x20,0x38,0x73,0xB0,0xBD,0x0E,0x4A,0x00,0x25,0xD5,0x67,0xD5, \ +0x1D,0x75,0x35,0x69,0x60,0x01,0x2B,0x0B,0xD1,0x80,0x20,0x38,0x73,0xD0,0x1D, \ +0x59,0x30,0xC0,0x7B,0x09,0x49,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38, \ +0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C,0x21,0x1C,0x03,0xF0,0x49,0xF8,0xB0,0xBD, \ +0xEC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x30,0x03,0x00, \ +0x0D,0x80,0xB5,0x02,0x1C,0x00,0x20,0x02,0x2A,0x14,0x49,0x08,0xD0,0x08,0x72, \ +0x14,0x49,0x20,0x22,0x0A,0x70,0x08,0x70,0x13,0x4A,0x86,0x21,0x11,0x72,0x06, \ +0xE0,0x0A,0x7A,0x52,0x09,0x03,0xD2,0x10,0x22,0x0A,0x72,0x0F,0x49,0x48,0x62, \ +0x0F,0x49,0x10,0x4F,0x48,0x65,0xCA,0x1D,0x59,0x32,0x88,0x65,0x10,0x73,0xC8, \ +0x65,0x08,0x66,0x38,0x68,0x01,0x7A,0x10,0x29,0x02,0xD1,0xFF,0xF7,0xDC,0xFA, \ +0x38,0x60,0x38,0x68,0x01,0x7A,0x40,0x29,0x02,0xD1,0xFF,0xF7,0xD5,0xFA,0x38, \ +0x60,0x80,0xBD,0x60,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D, \ +0x24,0x02,0x00,0x02,0xA8,0x01,0x00,0x02,0x14,0x00,0x00,0x02,0x05,0x48,0x06, \ +0x49,0x02,0x78,0x0A,0x67,0x0A,0x6F,0x03,0x78,0x9A,0x42,0xFB,0xD0,0x03,0x49, \ +0x60,0x20,0x08,0x73,0xF7,0x46,0xF0,0x03,0x00,0x0D,0xA8,0x01,0x00,0x02,0x70, \ +0x03,0x00,0x0D,0x80,0xB5,0x86,0xB0,0x42,0x68,0x11,0x78,0x08,0x29,0x01,0xD0, \ +0x06,0xB0,0x80,0xBD,0x91,0x7F,0xD3,0x7F,0x09,0x02,0x19,0x43,0x15,0x4B,0x09, \ +0x04,0x1F,0x88,0x09,0x0C,0xB9,0x42,0x02,0xD0,0x5B,0x88,0x8B,0x42,0x06,0xD1, \ +0xD1,0x1D,0x11,0x31,0x06,0x22,0x10,0x48,0xFC,0xF7,0xE9,0xFF,0x19,0xE0,0x03, \ +0x23,0x5B,0x02,0x99,0x42,0x06,0xDD,0xD1,0x1D,0x11,0x31,0x06,0x22,0x0C,0x48, \ +0xFC,0xF7,0xDE,0xFF,0x0E,0xE0,0xD7,0x1D,0x01,0x37,0x47,0x60,0x18,0x31,0x81, \ +0x82,0x69,0x46,0x10,0x1C,0x18,0x22,0xFC,0xF7,0xD3,0xFF,0x68,0x46,0x18,0x22, \ +0x39,0x1C,0xFC,0xF7,0xCE,0xFF,0xCE,0xE7,0x00,0x00,0x80,0x02,0x00,0x02,0x7A, \ +0x02,0x00,0x02,0x74,0x02,0x00,0x02,0xB0,0xB5,0x82,0xB0,0x68,0x46,0x08,0x22, \ +0x3D,0x49,0x01,0xF0,0x5B,0xF9,0x00,0xF0,0x85,0xF8,0x3C,0x4F,0x06,0x22,0xFF, \ +0x21,0x38,0x1C,0x01,0x31,0x00,0xF0,0xE5,0xF8,0xFF,0x21,0x11,0x31,0xB8,0x1D, \ +0x0E,0x22,0x05,0x1C,0x00,0xF0,0xDE,0xF8,0x36,0x4C,0x12,0x22,0x03,0x21,0x20, \ +0x1C,0x00,0xF0,0xD8,0xF8,0xF8,0x1D,0x0D,0x30,0x0E,0x22,0xFF,0x21,0x21,0x31, \ +0x00,0xF0,0xD1,0xF8,0xF8,0x1D,0x1F,0x30,0x01,0x22,0xFF,0x21,0x31,0x31,0x00, \ +0xF0,0xCA,0xF8,0xF8,0x1D,0x21,0x30,0x07,0x22,0xFF,0x21,0xF1,0x31,0x00,0xF0, \ +0xC3,0xF8,0x00,0xF0,0x72,0xF8,0xE0,0x1D,0x01,0x30,0x04,0x22,0xF9,0x1D,0x1B, \ +0x31,0xFC,0xF7,0x8B,0xFF,0x38,0x78,0x40,0x08,0x0B,0xD2,0x68,0x46,0x06,0x22, \ +0x39,0x1C,0xFC,0xF7,0x73,0xFF,0x00,0x28,0x04,0xD0,0x06,0x22,0x38,0x1C,0x1F, \ +0x49,0xFC,0xF7,0x7C,0xFF,0x28,0x1C,0x0E,0x22,0x1D,0x49,0xFC,0xF7,0x77,0xFF, \ +0xF8,0x1D,0x19,0x30,0x81,0x79,0x10,0x29,0x0B,0xD0,0x20,0x29,0x09,0xD0,0x31, \ +0x29,0x07,0xD0,0x30,0x29,0x05,0xD0,0x32,0x29,0x03,0xD0,0x40,0x29,0x01,0xD0, \ +0x41,0x29,0x01,0xD1,0x14,0x4A,0xD1,0x75,0x02,0x7A,0x14,0x49,0x55,0x2A,0x14, \ +0xD1,0x42,0x7A,0x53,0x2A,0x11,0xD1,0x82,0x7A,0x42,0x2A,0x0E,0xD1,0xC2,0x7A, \ +0x53,0x2A,0x0B,0xD1,0x02,0x7B,0x55,0x2A,0x08,0xD1,0x42,0x7B,0x53,0x2A,0x05, \ +0xD1,0x80,0x7B,0x50,0x28,0x02,0xD1,0x01,0x20,0x08,0x60,0x01,0xE0,0x00,0x20, \ +0x08,0x60,0x02,0xB0,0xB0,0xBD,0x00,0x00,0x2C,0x51,0x00,0x00,0x60,0x08,0x00, \ +0x02,0xA8,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0x4C,0x01,0x00,0x02,0x18,0x00, \ +0x00,0x02,0x74,0x01,0x00,0x02,0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00, \ +0x21,0x01,0x60,0x01,0x21,0x41,0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69, \ +0x01,0x23,0x5B,0x03,0x1A,0x43,0xCA,0x61,0x02,0x49,0x01,0x63,0x01,0x69,0x80, \ +0x68,0xF7,0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69, \ +0x02,0x4B,0x19,0x40,0xC1,0x61,0xF7,0x46,0x00,0x00,0xFF,0xDF,0x00,0x00,0xF0, \ +0xB4,0x00,0x27,0xF3,0x24,0x24,0x05,0x00,0x28,0x08,0xD9,0x13,0x4D,0xEB,0x5D, \ +0xE3,0x60,0x26,0x69,0xB3,0x08,0xFC,0xD3,0x01,0x37,0x87,0x42,0xF7,0xD3,0xFF, \ +0x23,0xE3,0x60,0xA0,0x68,0x27,0x1C,0x38,0x69,0x40,0x08,0xFC,0xD3,0xB8,0x68, \ +0x00,0x20,0x00,0x2A,0x0D,0xD9,0x1C,0x1C,0x3D,0x69,0xAB,0x08,0xFC,0xD3,0xFC, \ +0x60,0x3B,0x69,0x5B,0x08,0xFC,0xD3,0xBB,0x68,0x01,0x30,0x0B,0x70,0x01,0x31, \ +0x90,0x42,0xF2,0xD3,0x12,0x20,0x01,0x38,0xFD,0xD1,0xF0,0xBC,0xF7,0x46,0x00, \ +0x00,0x98,0x02,0x00,0x02,0xF3,0x20,0x00,0x05,0x81,0x68,0x05,0x21,0xC1,0x60, \ +0x01,0x69,0x89,0x08,0xFC,0xD3,0xFF,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC, \ +0xD3,0x81,0x68,0x01,0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x00,0x06,0x00,0x0E, \ +0xF7,0x46,0x90,0xB5,0x04,0x1C,0x48,0x09,0x08,0x23,0x18,0x40,0x17,0x1C,0x03, \ +0x22,0x02,0x43,0x08,0x48,0x02,0x70,0x41,0x70,0xFF,0xF7,0xDE,0xFF,0x40,0x08, \ +0xFB,0xD2,0x12,0x20,0x01,0x38,0xFD,0xD1,0x02,0x20,0x21,0x1C,0x3A,0x1C,0xFF, \ +0xF7,0xA4,0xFF,0x90,0xBD,0x00,0x00,0x98,0x02,0x00,0x02,0xF0,0xB4,0x13,0x4A, \ +0x00,0x27,0xD7,0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10, \ +0x48,0x07,0x70,0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08, \ +0x05,0xD2,0x5B,0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D, \ +0x70,0x01,0x31,0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1, \ +0x01,0x30,0x20,0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7, \ +0x46,0xA8,0x02,0x00,0x02,0x3C,0x09,0x00,0x02,0x3C,0x0A,0x00,0x02,0x90,0xB5, \ +0x0A,0x4F,0x0A,0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC, \ +0xF7,0x4A,0xFA,0x00,0xF0,0xDC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7, \ +0xC0,0xFA,0x00,0x20,0x38,0x60,0x00,0xF0,0xE3,0xFB,0x90,0xBD,0x14,0x03,0x00, \ +0x02,0xF0,0xF0,0xF0,0xF0,0x84,0x03,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F, \ +0x38,0x60,0xFC,0xF7,0x32,0xFA,0x00,0xF0,0xC4,0xFB,0x03,0x48,0x38,0x60,0x80, \ +0xBD,0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x14,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0, \ +0x0F,0x00,0x2D,0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20, \ +0x82,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01, \ +0x40,0x2D,0xE9,0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2, \ +0x00,0x20,0x83,0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50, \ +0xE3,0x04,0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40, \ +0x2D,0xE9,0x84,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82, \ +0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40, \ +0x2D,0xE9,0x4C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3, \ +0x04,0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D, \ +0xE9,0x20,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0, \ +0x4E,0xE2,0x01,0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x14, \ +0x03,0x00,0x02,0x04,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00, \ +0x00,0xA0,0x00,0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00, \ +0x52,0xE3,0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F, \ +0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5, \ +0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93, \ +0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00, \ +0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8, \ +0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D, \ +0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0, \ +0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F, \ +0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5, \ +0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93, \ +0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20, \ +0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x70, \ +0x01,0x00,0xEA,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x6D,0x01,0x00,0xEA, \ +0x00,0xA0,0x00,0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00, \ +0x52,0xE3,0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F, \ +0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3, \ +0x02,0x10,0x01,0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F, \ +0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30, \ +0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4, \ +0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A, \ +0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0, \ +0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0, \ +0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1, \ +0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3, \ +0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D, \ +0xE9,0x40,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30, \ +0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18, \ +0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3, \ +0x00,0x00,0x81,0xE5,0x2F,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0, \ +0xE3,0x00,0xF0,0x21,0xE1,0x2B,0x01,0x00,0xEA,0x14,0x03,0x00,0x02,0x04,0x03, \ +0x00,0x02,0x00,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x8C,0x03,0x00,0x02,0x24, \ +0x03,0x00,0x02,0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60, \ +0x7C,0x60,0xBC,0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20, \ +0x29,0xFB,0xD3,0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A, \ +0x00,0x22,0x00,0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39, \ +0x6A,0x02,0x92,0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B, \ +0x00,0xF0,0x18,0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x88,0x03,0x00, \ +0x02,0xBC,0x0A,0x00,0x02,0x75,0x44,0x00,0x00,0x3C,0x0B,0x00,0x02,0x53,0x79, \ +0x73,0x74,0x65,0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65, \ +0x61,0x64,0x00,0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C, \ +0x0A,0xAE,0x4C,0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9, \ +0x64,0x00,0x21,0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61, \ +0xDA,0x06,0xD2,0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8, \ +0x61,0x79,0x60,0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65, \ +0x01,0x20,0x90,0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E, \ +0xC7,0x0C,0xC7,0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0xFB,0xFA, \ +0xC0,0x20,0x00,0xF0,0x32,0xFB,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9, \ +0x1D,0x79,0x31,0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60, \ +0xD3,0x1D,0x79,0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F, \ +0x60,0x8F,0x60,0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0x29,0x68,0x01,0x31,0x29,0x60,0x00,0xF0,0x12,0xFB,0x00,0x2C,0x07,0xD0,0x38, \ +0x1C,0x00,0xF0,0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0, \ +0xC0,0x20,0x00,0xF0,0x05,0xFB,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x00, \ +0xFB,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x00,0x00,0xD1,0x45,0x00,0x00,0x11,0x46, \ +0x00,0x00,0x44,0x52,0x48,0x54,0x0C,0x03,0x00,0x02,0x10,0x03,0x00,0x02,0x24, \ +0x03,0x00,0x02,0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24, \ +0x12,0xC0,0x12,0xC0,0xC0,0x20,0x00,0xF0,0xE5,0xFA,0x0C,0x49,0x0C,0x4B,0x39, \ +0x60,0x19,0x68,0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61, \ +0x19,0x68,0xB9,0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A, \ +0x68,0x01,0x32,0x0A,0x60,0x00,0xF0,0xCF,0xFA,0x20,0x1C,0x90,0xBD,0x00,0x00, \ +0x4E,0x44,0x56,0x44,0xB8,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0xF0,0xB5,0x85, \ +0xB0,0x07,0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0x00,0xF0,0xBE,0xFA,0xA9,0x08, \ +0x03,0xD3,0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9, \ +0x60,0x3C,0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B, \ +0x35,0xD1,0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16, \ +0x1C,0x1E,0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0, \ +0xE3,0x6F,0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A, \ +0x43,0xBA,0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32, \ +0x0A,0x60,0x00,0xF0,0x8D,0xFA,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45, \ +0x30,0x00,0xF0,0x34,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0, \ +0x90,0xFA,0x00,0x28,0x01,0xD0,0x00,0xF0,0xF6,0xFA,0x30,0x1C,0x9B,0xE0,0x00, \ +0xF0,0x78,0xFA,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A, \ +0x02,0x93,0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0x00,0xF0,0x6B,0xFA,0xC0, \ +0x20,0x00,0xF0,0x68,0xFA,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60, \ +0x03,0x9C,0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00, \ +0x2C,0x46,0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08, \ +0x06,0xD3,0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F, \ +0xE0,0xA1,0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21, \ +0x22,0x6F,0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49, \ +0x08,0x03,0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42, \ +0x02,0xD1,0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03, \ +0x91,0x63,0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39, \ +0x79,0x61,0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25, \ +0x1C,0x26,0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B, \ +0x14,0x1C,0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0x00, \ +0xF0,0x0F,0xFA,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F, \ +0x00,0x28,0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xB1,0xFA,0x00,0xE0,0xEC, \ +0x64,0xC0,0x20,0x00,0xF0,0xFE,0xF9,0x31,0x68,0x01,0x31,0x31,0x60,0x00,0xF0, \ +0xF9,0xF9,0x28,0x1C,0x00,0xF0,0x06,0xFA,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0x00, \ +0xF0,0xF1,0xF9,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0x00,0xF0,0xEB,0xF9, \ +0x0C,0x48,0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00, \ +0x68,0x00,0x28,0x01,0xD1,0x00,0xF0,0x59,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD, \ +0x79,0x69,0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59, \ +0xE7,0x24,0x03,0x00,0x02,0x04,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x14,0x03, \ +0x00,0x02,0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0x00,0xF0,0xC5, \ +0xF9,0x02,0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1, \ +0x02,0xE0,0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19, \ +0x60,0x02,0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0, \ +0x07,0x24,0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5, \ +0x67,0xE5,0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66, \ +0x39,0x69,0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39, \ +0x69,0x49,0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67, \ +0x64,0x67,0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1, \ +0x63,0x0E,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0x00,0xF0,0x82,0xF9, \ +0x01,0x23,0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x97,0xFA,0x20, \ +0x1C,0x00,0xF0,0xCE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0x00,0xF0,0x73,0xF9, \ +0x20,0x1C,0xF9,0xE7,0x00,0x00,0x04,0x03,0x00,0x02,0x65,0x48,0x00,0x00,0x24, \ +0x03,0x00,0x02,0x00,0xB5,0xFF,0xF7,0xE7,0xFB,0xFF,0xF7,0xC1,0xFD,0x00,0xF0, \ +0x9F,0xFB,0x00,0xF0,0xA5,0xFB,0x00,0xF0,0x05,0xFA,0x00,0xF0,0xA9,0xFB,0x00, \ +0xF0,0xAF,0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0, \ +0x21,0xE1,0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC, \ +0xFF,0xFF,0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5, \ +0x00,0x00,0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82, \ +0xE2,0x04,0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30, \ +0x82,0xE5,0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0, \ +0x80,0xFD,0x08,0xFF,0xDF,0xFD,0xE8,0x08,0x03,0x00,0x02,0x04,0x03,0x00,0x02, \ +0x8C,0x03,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02, \ +0xB0,0xF0,0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0x00,0xF0,0x19,0xF9, \ +0x4A,0x4D,0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A, \ +0x61,0x29,0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68, \ +0x91,0x42,0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0x00, \ +0xF0,0x01,0xF9,0xC0,0x20,0x00,0xF0,0xFE,0xF8,0x01,0x99,0x00,0x29,0x5C,0xD0, \ +0x01,0x9C,0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A, \ +0x61,0x21,0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69, \ +0x01,0x91,0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04, \ +0xE0,0x27,0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61, \ +0x24,0x61,0x00,0xE0,0xA6,0x61,0x00,0xF0,0xD8,0xF8,0x00,0x2D,0x02,0xD0,0x38, \ +0x1C,0x00,0xF0,0xEE,0xFB,0xC0,0x20,0x00,0xF0,0xD0,0xF8,0xA2,0x69,0x69,0x46, \ +0x8A,0x42,0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01, \ +0x39,0x20,0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42, \ +0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89, \ +0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69, \ +0x62,0x61,0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1, \ +0x61,0x64,0x61,0x0C,0x60,0x00,0xF0,0xA4,0xF8,0xC0,0x20,0x00,0xF0,0xA1,0xF8, \ +0x01,0x99,0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E, \ +0x4C,0x03,0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A, \ +0x11,0x68,0x01,0x31,0x11,0x60,0x00,0xF0,0x8D,0xF8,0x20,0x68,0x00,0xF0,0xE0, \ +0xF9,0x6C,0xE7,0x00,0xF0,0x87,0xF8,0x69,0xE7,0x4D,0x49,0x54,0x41,0x9C,0x03, \ +0x00,0x02,0x98,0x03,0x00,0x02,0x94,0x03,0x00,0x02,0xA0,0x03,0x00,0x02,0x04, \ +0x03,0x00,0x02,0x24,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28, \ +0x0C,0xD1,0xC0,0x20,0x00,0xF0,0x70,0xF8,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0x00,0xF0,0x6A,0xF8,0x38,0x1C,0x00,0xF0,0x77,0xF8,0x90,0xBD,0xC0,0x20, \ +0x00,0xF0,0x63,0xF8,0xBC,0x6E,0x00,0xF0,0x60,0xF8,0x00,0x2C,0xF6,0xD0,0x38, \ +0x1C,0x00,0xF0,0x75,0xFB,0x90,0xBD,0x24,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F, \ +0x39,0x68,0x88,0x6C,0x49,0x6C,0x00,0xF0,0x68,0xFB,0xC0,0x20,0x00,0xF0,0x4E, \ +0xF8,0x3A,0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68, \ +0x01,0x32,0x0A,0x60,0x00,0xF0,0x43,0xF8,0x38,0x68,0x00,0xF0,0x96,0xF9,0x80, \ +0xBD,0x00,0x00,0x04,0x03,0x00,0x02,0x24,0x03,0x00,0x02,0x00,0xA3,0x18,0x47, \ +0x10,0x20,0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0, \ +0xE3,0x00,0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30, \ +0xA0,0xE3,0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14, \ +0x30,0x82,0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5, \ +0x24,0x30,0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90, \ +0xE5,0x30,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30, \ +0x82,0xE5,0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08, \ +0x20,0x80,0xE5,0x1E,0xFF,0x2F,0xE1,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1, \ +0x3F,0x20,0xA0,0xE3,0x02,0x10,0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21, \ +0xE1,0x02,0x00,0xC3,0xE1,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C, \ +0xC0,0x20,0xFF,0xF7,0xEA,0xFF,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A, \ +0x60,0xBA,0x6B,0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0, \ +0x02,0x2A,0x37,0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A, \ +0x6B,0x00,0x2A,0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63, \ +0xF9,0x6A,0x1D,0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F, \ +0x62,0x57,0x62,0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62, \ +0x17,0x4A,0x3B,0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00, \ +0x2A,0x02,0xD1,0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2, \ +0x19,0x60,0xD3,0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E, \ +0x49,0x12,0x6C,0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0xA2,0xFF, \ +0x0B,0x48,0x00,0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00, \ +0x28,0x00,0xD1,0x01,0x24,0x20,0x1C,0xF0,0xBD,0x24,0x03,0x00,0x02,0x08,0x03, \ +0x00,0x02,0x3C,0x0A,0x00,0x02,0x18,0x03,0x00,0x02,0x20,0x03,0x00,0x02,0x1C, \ +0x03,0x00,0x02,0x04,0x03,0x00,0x02,0x14,0x03,0x00,0x02,0x00,0xA0,0x00,0x47, \ +0x00,0x00,0xA0,0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D, \ +0xE9,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00, \ +0x93,0xE5,0x28,0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00, \ +0x40,0xA0,0xE3,0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5, \ +0x00,0x40,0x82,0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x02,0xFF,0xFF, \ +0xEA,0x04,0x03,0x00,0x02,0x8C,0x03,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60, \ +0x41,0x60,0xF7,0x46,0x00,0x00,0xB8,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0, \ +0x20,0xFF,0xF7,0x4D,0xFF,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69, \ +0xBA,0x42,0x04,0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79, \ +0x69,0x51,0x61,0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42, \ +0x04,0xD1,0x3A,0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF, \ +0xF7,0x30,0xFF,0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20, \ +0xFF,0xF7,0x28,0xFF,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21, \ +0x68,0x1C,0x4B,0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42, \ +0x01,0xD1,0x25,0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39, \ +0x6F,0x7A,0x6F,0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29, \ +0x10,0xD1,0xFA,0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0xFF,0xF7,0x02,0xFF,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0, \ +0xFF,0xF7,0x75,0xFF,0x01,0xE0,0xFF,0xF7,0xF8,0xFE,0x78,0x6E,0x00,0x28,0x04, \ +0xD0,0xF8,0x1D,0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20, \ +0xFF,0xF7,0xEC,0xFE,0xFF,0xF7,0xEA,0xFE,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56, \ +0x44,0x24,0x03,0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0xDF,0xFE, \ +0x39,0x68,0x00,0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01, \ +0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18, \ +0x10,0x4A,0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E, \ +0x4A,0x89,0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0, \ +0x3A,0x61,0x0A,0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9, \ +0x61,0x03,0xE0,0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0xB2,0xFE, \ +0x00,0x20,0x80,0xBD,0x9C,0x03,0x00,0x02,0x98,0x03,0x00,0x02,0x94,0x03,0x00, \ +0x02,0xF0,0xB5,0x05,0x1C,0xC0,0x20,0xFF,0xF7,0xA5,0xFE,0x67,0x49,0x67,0x4C, \ +0x0A,0x68,0x67,0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00, \ +0x26,0xAE,0x63,0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62, \ +0x29,0x6A,0x6B,0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11, \ +0xD1,0x2B,0x6A,0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C, \ +0xB3,0x43,0x0B,0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B, \ +0x68,0x9B,0x00,0xD2,0x58,0x0A,0x60,0xFF,0xF7,0x78,0xFE,0x55,0x49,0x38,0x68, \ +0x09,0x68,0x88,0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00, \ +0x26,0x4E,0x4B,0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68, \ +0x33,0x40,0x13,0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2, \ +0x43,0x48,0x4E,0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C, \ +0x14,0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08, \ +0x32,0x0C,0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C, \ +0x10,0x32,0x04,0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B, \ +0x4B,0x1A,0x60,0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B, \ +0x32,0x68,0x36,0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00, \ +0x2A,0x42,0xD0,0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x2B,0xFE, \ +0xC0,0x20,0xFF,0xF7,0x28,0xFE,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31, \ +0x60,0x2A,0x49,0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C, \ +0x1E,0xE0,0x28,0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF, \ +0xF7,0x13,0xFE,0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E, \ +0x03,0xD0,0x22,0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09, \ +0x0E,0x03,0xD0,0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B, \ +0x59,0x5C,0x18,0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36, \ +0x68,0xB3,0x42,0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40, \ +0x19,0x60,0xFF,0xF7,0xEC,0xFD,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F, \ +0xD0,0x20,0x68,0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0xE1,0xFD,0x0A,0x49, \ +0x38,0x68,0x09,0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF, \ +0xF7,0x51,0xFE,0xF0,0xBD,0x24,0x03,0x00,0x02,0x14,0x03,0x00,0x02,0x04,0x03, \ +0x00,0x02,0x3C,0x0A,0x00,0x02,0x1C,0x03,0x00,0x02,0x08,0x03,0x00,0x02,0x20, \ +0x03,0x00,0x02,0x18,0x03,0x00,0x02,0x3C,0x09,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xC0,0x03,0x00,0x02,0x02,0x48,0x00, \ +0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xC8,0x03,0x00,0x02,0x02,0x48, \ +0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xD0,0x03,0x00,0x02,0x02, \ +0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xD8,0x03,0x00,0x02, \ +0x4B,0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0xBD,0xF8,0x52,0x00,0x9A,0x42,0xFC, \ +0xD9,0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08, \ +0x91,0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7, \ +0x46,0x00,0x00,0xB0,0xB5,0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F, \ +0x75,0x39,0x9C,0x00,0x0C,0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36, \ +0x23,0x00,0x29,0x01,0x66,0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0, \ +0x51,0x1E,0x41,0x66,0x00,0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x5C, \ +0x04,0x00,0x02,0x80,0xB5,0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66, \ +0x07,0x4A,0x00,0x21,0x03,0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01, \ +0x31,0x58,0x43,0x05,0x4B,0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x5C,0x04, \ +0x00,0x02,0xE0,0x03,0x00,0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0xCB, \ +0x17,0x59,0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C,0xB4,0x4B,0x08, \ +0x02,0x1C,0x02,0xD1,0x00,0xF0,0x64,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00, \ +0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42, \ +0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0x0C,0xBC,0x5A, \ +0x40,0x50,0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0x43,0x1A,0x93,0x42, \ +0x30,0xD3,0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B,0x78,0x03, \ +0x70,0x40,0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1,0x10,0x3A, \ +0x05,0xD3,0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0,0xBC,0x0C, \ +0x32,0x0F,0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0,0x08,0xC9, \ +0x03,0x70,0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3,0x70,0x00, \ +0x1D,0x12,0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70,0x49,0x1C, \ +0x40,0x1C,0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B,0x43,0x13, \ +0x43,0x9B,0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1,0xF7,0x46, \ +0x52,0x1E,0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x00,0x47,0x08, \ +0x47,0x10,0x47,0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47, \ +0x00,0x00,0x2C,0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C, \ +0x00,0x8F,0xE2,0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5, \ +0x0D,0xED,0xFF,0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20, \ +0x62,0x79,0x20,0x7A,0x65,0x72,0x6F,0x00,0x00,0xC4,0x04,0x00,0x02,0x78,0x47, \ +0x00,0x00,0x01,0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF, \ +0x04,0x00,0xE2,0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13, \ +0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F, \ +0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B, \ +0x50,0xE3,0x01,0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78, \ +0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00, \ +0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E, \ +0xE3,0x00,0x00,0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0, \ +0x8E,0xE3,0x00,0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F, \ +0x77,0x6E,0x20,0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00, \ +0x42,0x72,0x61,0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20, \ +0x5A,0x65,0x72,0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69, \ +0x6E,0x65,0x64,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65, \ +0x64,0x20,0x53,0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69, \ +0x6F,0x6E,0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74, \ +0x63,0x68,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44, \ +0x61,0x74,0x61,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00, \ +0x41,0x64,0x64,0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69, \ +0x6F,0x6E,0x00,0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64, \ +0x6C,0x65,0x64,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07, \ +0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61, \ +0x73,0x74,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00, \ +0x00,0xCC,0x4D,0x00,0x00,0xE4,0x4D,0x00,0x00,0x00,0x4E,0x00,0x00,0x20,0x4E, \ +0x00,0x00,0x34,0x4E,0x00,0x00,0x44,0x4E,0x00,0x00,0x5C,0x4E,0x00,0x00,0x74, \ +0x4E,0x00,0x00,0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0xA5,0xEC,0xFF,0xEA, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F, \ +0xE5,0x01,0x20,0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00, \ +0x00,0xEB,0x44,0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02, \ +0x10,0x81,0xE0,0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1, \ +0x04,0x30,0x90,0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0, \ +0xE1,0x00,0x20,0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF, \ +0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x34,0x51,0x00,0x00,0x00,0x00,0x00,0x02,0x04, \ +0x05,0x00,0x00,0xC8,0x06,0x00,0x00,0x04,0x05,0x00,0x02,0x78,0x47,0x00,0x00, \ +0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0, \ +0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80,0xE5,0x00,0x10,0x90,0xE5,0x08,0x10, \ +0x90,0xE5,0x00,0x00,0xA0,0xE3,0x10,0xFF,0x2F,0xE1,0x00,0x00,0xA0,0xE1,0x00, \ +0x00,0xA0,0xE1,0x04,0x01,0xFF,0x02,0x04,0xFF,0x06,0x38,0xFF,0x08,0x80,0xFF, \ +0x0A,0x03,0xFF,0x0C,0x04,0xFF,0x0E,0x00,0xFF,0x10,0x00,0xFF,0x12,0xA2,0xFF, \ +0x14,0xC0,0xFF,0x16,0x0B,0xFF,0x18,0x00,0xFF,0x1A,0x00,0xFF,0x1C,0x00,0xFF, \ +0x1E,0x5C,0xFF,0x20,0x82,0xFF,0x22,0x1E,0xFF,0x24,0xC7,0xFF,0x26,0x17,0xFF, \ +0x28,0x6A,0xFF,0x2A,0x12,0xFF,0x2C,0x00,0xFF,0x2E,0x0C,0xFF,0x2C,0x01,0xFF, \ +0x2E,0x10,0xFF,0x2C,0x02,0xFF,0x2E,0x14,0xFF,0x2C,0x03,0xFF,0x2E,0x18,0xFF, \ +0x2C,0x04,0xFF,0x2E,0x1C,0xFF,0x2C,0x05,0xFF,0x2E,0x20,0xFF,0x2C,0x06,0xFF, \ +0x2E,0x24,0xFF,0x2C,0x07,0xFF,0x2E,0x28,0xFF,0x2C,0x08,0xFF,0x2E,0x2E,0xFF, \ +0x2C,0x09,0xFF,0x2E,0x34,0xFF,0x2C,0x0A,0xFF,0x2E,0x38,0xFF,0x2C,0x0B,0xFF, \ +0x2E,0x3C,0xFF,0x2C,0x0C,0xFF,0x2E,0x3F,0xFF,0x2C,0x0D,0xFF,0x2E,0x43,0xFF, \ +0x2C,0x0E,0xFF,0x2E,0x46,0xFF,0x2C,0x0F,0xFF,0x2E,0x48,0xFF,0x2C,0x10,0xFF, \ +0x2E,0x4B,0xFF,0x2C,0x11,0xFF,0x2E,0x50,0xFF,0x2C,0x12,0xFF,0x2E,0x55,0xFF, \ +0x2C,0x13,0xFF,0x2E,0x5A,0xFF,0x2C,0x14,0xFF,0x2E,0x63,0xFF,0x2C,0x15,0xFF, \ +0x2E,0x6D,0xFF,0x2C,0x16,0xFF,0x2E,0x76,0xFF,0x2C,0x17,0xFF,0x2E,0x7F,0xFF, \ +0x2C,0x18,0xFF,0x2E,0x7F,0xFF,0x2C,0x19,0xFF,0x2E,0x7F,0xFF,0x2C,0x1A,0xFF, \ +0x2E,0x7F,0xFF,0x2C,0x1B,0xFF,0x2E,0x7F,0xFF,0x2C,0x1C,0xFF,0x2E,0x7F,0xFF, \ +0x2C,0x1D,0xFF,0x2E,0x7F,0xFF,0x2C,0x1E,0xFF,0x2E,0x7F,0xFF,0x2C,0x1F,0xFF, \ +0x2E,0x7F,0xFF,0x30,0x2D,0xFF,0x32,0x20,0xFF,0x34,0x82,0xFF,0x36,0x18,0xFF, \ +0x38,0x79,0xFF,0x3A,0xCA,0xFF,0x3C,0x24,0xFF,0x3E,0xF0,0xFF,0x40,0x00,0xFF, \ +0x42,0x00,0xFF,0x44,0x00,0xFF,0x46,0x7F,0xFF,0x48,0x8B,0xFF,0x4A,0x0F,0xFF, \ +0x4C,0x06,0xFF,0x4E,0x0A,0xFF,0x50,0x0F,0xFF,0x52,0x20,0xFF,0x54,0x20,0xFF, \ +0x56,0x10,0xFF,0x58,0x10,0xFF,0x5A,0x20,0xFF,0x5C,0xEE,0xFF,0x5E,0x1E,0xFF, \ +0x60,0x26,0xFF,0x62,0x5B,0xFF,0x04,0x00,0xFF,0x58,0x00,0x00,0x00,0x85,0x21, \ +0x00,0x00,0x18,0x17,0x00,0x00,0x0A,0x08,0x00,0x00,0x58,0x00,0x00,0x00,0x9D, \ +0x21,0x00,0x00,0x90,0x1F,0xAC,0x1F,0xB6,0x1F,0x00,0x20,0x0A,0x20,0x14,0x20, \ +0x1E,0x20,0x28,0x20,0x32,0x20,0x3C,0x20,0x86,0x20,0x90,0x20,0x9A,0x20,0xA4, \ +0x20,0xBC,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x0A,0x00, \ +0x90,0x00,0x30,0x00,0x08,0x06,0x07,0x00,0x82,0x84,0x8B,0x96,0x09,0x04,0x02, \ +0x41,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x11,0x11, \ +0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0xAC,0x6C,0x32,0x70, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x64,0x00,0x30,0x75,0x64,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x03,0x00, \ +0x04,0xAC,0x6C,0x32,0x70,0x55,0x4E,0x48,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00,0x00,0xFA,0x00,0x00, \ +0x00,0xFA,0x00,0x00,0x2A,0x09,0x2A,0x09,0x08,0x00,0x40,0x00,0x08,0x08,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x54,0x4D,0x45,0x4C,0x5F, \ +0x41,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF, \ +0xFF,0xFF,0x01,0x00,0x05,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x01, \ +0x00,0x00,0x5A,0x00,0x2C,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x03,0x06,0x0C,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0xFF,0x07,0xFF,0x07,0xFF,0x1F,0x00,0x06,0x00,0x1E,0x00,0x20, \ +0xFF,0x3F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x12,0x01,0x10,0x01,0xFE,0x01,0x00,0x08,0xEB,0x03,0x04,0x76,0x00, \ +0x01,0x00,0x00,0x00,0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA,0x09, \ +0x04,0x00,0x00,0x02,0xFF,0x00,0xFF,0x00,0x07,0x05,0x85,0x02,0x40,0x00,0x00, \ +0x07,0x05,0x02,0x02,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xAA,0xAA,0x03,0x00, \ +0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81,0xF3,0x80,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x63, \ +0x29,0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30,0x30,0x30,0x20,0x45,0x78,0x70, \ +0x72,0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69,0x63,0x20,0x49,0x6E,0x63,0x2E, \ +0x20,0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x58,0x20,0x54,0x48,0x55,0x4D, \ +0x42,0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56,0x65,0x72,0x73,0x69,0x6F,0x6E, \ +0x20,0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E,0x30,0x62,0x20,0x2A,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x47,0x2D,0x47,0x42, \ +0x2D,0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44,0x4C,0x2D,0x4B,0x4D,0x4C,0x2D, \ +0x43,0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D,0x4C,0x32,0x2D,0x47,0x5A,0x2D, \ +0x4B,0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50,0x2D,0x54,0x43,0x2D,0x4E,0x48, \ +0x2D,0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41,0x2D,0x47,0x46,0x2D,0x44,0x44, \ +0x2D,0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53,0x2D,0x44,0x57,0x2D,0x55,0x53, \ +0x41,0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53,0x44,0x53,0x55,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x01,0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09,0x8C,0xD3,0xD5,0xF5,0xD8,0x09, \ +0x0A,0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB,0x69,0x07,0xF7,0xBD,0x34,0x12, \ +0x3D,0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE,0x61,0xAE,0x8D,0x40,0xA7,0xE8, \ +0xBD,0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E,0xE6,0xBB,0xFF,0x7F,0xD5,0xF6, \ +0x7A,0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C,0x2E,0x9F,0xE1,0x05,0x57,0x5F, \ +0x69,0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50,0x94,0x0E,0x8B,0x72,0xAE,0x55, \ +0xCC,0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F,0x85,0x17,0x5C,0x22,0xD0,0xA3, \ +0xFD,0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82,0xDB,0xF1,0x9D,0xC5,0xFB,0xBC, \ +0x89,0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30,0x5C,0x50,0xCC,0xC9,0x5A,0xBC, \ +0x9C,0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39,0xD2,0x7B,0x15,0x75,0xFB,0x58, \ +0xC1,0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82,0xC5,0xC2,0xD6,0x69,0x05,0xB3, \ +0x30,0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4,0x1D,0x1F,0x15,0xE2,0x60,0x56, \ +0xC5,0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08,0x32,0x59,0x4A,0x4C,0xED,0x7B, \ +0x5B,0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51,0xFA,0x3D,0xFF,0xAC,0xB5,0x6C, \ +0x09,0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D,0x25,0x17,0x00,0x00,0x00,0x36, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_I3863_EXTERNAL { \ +0x00,0xB5,0x0D,0x49,0x00,0x20,0x08,0x70,0x0C,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0xFC,0xF7,0x4D,0xFE, \ +0x00,0xBD,0x08,0x21,0x0B,0x20,0xFC,0xF7,0x48,0xFE,0x00,0xF0,0x16,0xFA,0x01, \ +0x21,0x0B,0x20,0xFC,0xF7,0x42,0xFE,0x00,0xBD,0x00,0x00,0x63,0x01,0x00,0x02, \ +0xAC,0x08,0x00,0x02,0x00,0xB5,0x21,0x48,0x01,0x78,0x08,0x29,0x34,0xD2,0x02, \ +0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x30,0x04,0x08,0x0C,0x30,0x10, \ +0x14,0x18,0x81,0x78,0x1B,0x4A,0x89,0x18,0x12,0xE0,0x81,0x78,0x1A,0x4A,0x89, \ +0x18,0x0E,0xE0,0x81,0x78,0x19,0x4A,0x89,0x18,0x0A,0xE0,0x81,0x78,0x18,0x4A, \ +0x89,0x18,0x06,0xE0,0x81,0x78,0x17,0x4A,0x89,0x18,0x02,0xE0,0x81,0x78,0x16, \ +0x4A,0x89,0x18,0x00,0x29,0x17,0xD0,0x43,0x78,0x00,0x22,0x00,0x2B,0x07,0xD9, \ +0x83,0x18,0x1B,0x79,0x01,0x32,0x0B,0x70,0x43,0x78,0x01,0x31,0x93,0x42,0xF7, \ +0xD8,0xFA,0xF7,0xCF,0xF9,0x01,0x21,0x01,0x20,0xFC,0xF7,0x03,0xFE,0x00,0xBD, \ +0x04,0x21,0x01,0x20,0xFC,0xF7,0xFE,0xFD,0x00,0xBD,0x03,0x21,0x01,0x20,0xFC, \ +0xF7,0xF9,0xFD,0x00,0xBD,0xB4,0x08,0x00,0x02,0x14,0x01,0x00,0x02,0x74,0x00, \ +0x00,0x02,0xD0,0x00,0x00,0x02,0x98,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18, \ +0x00,0x00,0x02,0xF0,0xB5,0x34,0x4E,0x01,0x25,0xF0,0x1D,0x69,0x30,0x33,0x4A, \ +0xC5,0x72,0xD2,0x7D,0x2F,0x4F,0x31,0x2A,0x23,0xD0,0x07,0xDC,0x10,0x2A,0x1C, \ +0xD0,0x20,0x2A,0x1C,0xD0,0x30,0x2A,0x08,0xD1,0x02,0x21,0x06,0xE0,0x32,0x2A, \ +0x1A,0xD0,0x40,0x2A,0x1A,0xD0,0x41,0x2A,0x00,0xD1,0x06,0x21,0x49,0x00,0x28, \ +0x4A,0xFC,0x1D,0x19,0x34,0x51,0x5A,0xE2,0x79,0x01,0x3A,0x2B,0x1C,0x93,0x40, \ +0x19,0x40,0x0E,0xD1,0x03,0x21,0x03,0x20,0xFC,0xF7,0xC0,0xFD,0xF0,0xBD,0x00, \ +0x21,0xED,0xE7,0x01,0x21,0xEB,0xE7,0x03,0x21,0xE9,0xE7,0x04,0x21,0xE7,0xE7, \ +0x05,0x21,0xE5,0xE7,0x04,0x21,0x41,0x70,0x06,0x22,0x38,0x1C,0x1B,0x49,0xFA, \ +0xF7,0xE8,0xFB,0x20,0x22,0xB8,0x1D,0x19,0x49,0xFA,0xF7,0xE3,0xFB,0xA0,0x7B, \ +0x18,0x49,0x60,0x36,0x48,0x71,0x00,0x20,0x70,0x73,0xA0,0x79,0x16,0x49,0x20, \ +0x23,0x88,0x74,0xE0,0x79,0xC8,0x74,0x38,0x8D,0x88,0x82,0x78,0x8D,0xC8,0x82, \ +0xB8,0x8D,0x08,0x83,0x12,0x48,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10, \ +0x23,0x19,0x43,0x01,0x70,0x0D,0x49,0x08,0x8B,0x81,0x02,0x03,0x20,0xFC,0xF7, \ +0x4B,0xF9,0x08,0x21,0x03,0x20,0x35,0x73,0xFC,0xF7,0x84,0xFD,0xF0,0xBD,0x00, \ +0x00,0xB4,0x08,0x00,0x02,0x04,0x05,0x00,0x02,0x18,0x00,0x00,0x02,0x7C,0x01, \ +0x00,0x02,0x0C,0x01,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xD0, \ +0x00,0x00,0x02,0x63,0x01,0x00,0x02,0xF0,0xB5,0x33,0x49,0x31,0x4F,0xC9,0x7D, \ +0x31,0x29,0x23,0xD0,0x07,0xDC,0x10,0x29,0x1C,0xD0,0x20,0x29,0x1C,0xD0,0x30, \ +0x29,0x08,0xD1,0x02,0x20,0x06,0xE0,0x32,0x29,0x1A,0xD0,0x40,0x29,0x1A,0xD0, \ +0x41,0x29,0x00,0xD1,0x06,0x20,0x40,0x00,0x29,0x49,0xFC,0x1D,0x19,0x34,0x08, \ +0x5A,0xE1,0x79,0x4A,0x1E,0x01,0x21,0x91,0x40,0x08,0x40,0x0E,0xD1,0x03,0x21, \ +0x04,0x20,0xFC,0xF7,0x4B,0xFD,0xF0,0xBD,0x00,0x20,0xED,0xE7,0x01,0x20,0xEB, \ +0xE7,0x03,0x20,0xE9,0xE7,0x04,0x20,0xE7,0xE7,0x05,0x20,0xE5,0xE7,0x1D,0x4D, \ +0xA0,0x79,0xE9,0x1D,0x39,0x31,0x88,0x70,0xE9,0x1D,0x38,0x1C,0x06,0x22,0x35, \ +0x31,0xFA,0xF7,0x6F,0xFB,0x20,0x22,0xB8,0x1D,0xE9,0x1D,0x15,0x31,0xFA,0xF7, \ +0x69,0xFB,0xA0,0x7A,0x15,0x4E,0x00,0x25,0x70,0x71,0x15,0x48,0x45,0x73,0xA1, \ +0x79,0x10,0x30,0x02,0x29,0x02,0xD1,0x03,0x21,0xC1,0x72,0x03,0xE0,0x01,0x29, \ +0x0F,0xD1,0x04,0x21,0xC1,0x72,0x08,0x21,0x04,0x20,0xFC,0xF7,0x1A,0xFD,0xE0, \ +0x79,0xFA,0xF7,0x59,0xFA,0x38,0x8D,0x81,0x02,0x05,0x20,0xFC,0xF7,0xD4,0xF8, \ +0xB5,0x70,0xF0,0xBD,0x03,0x21,0x04,0x20,0xFC,0xF7,0x0C,0xFD,0xF0,0xBD,0x00, \ +0x00,0xB4,0x08,0x00,0x02,0x18,0x00,0x00,0x02,0x7C,0x01,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x64,0x05,0x00,0x02,0xF0,0xB5,0x33,0x4D,0x10, \ +0x23,0x29,0x78,0x30,0x4C,0x99,0x43,0x29,0x70,0x29,0x78,0x20,0x23,0x99,0x43, \ +0x29,0x70,0x2F,0x49,0xC9,0x7D,0x31,0x29,0x24,0xD0,0x07,0xDC,0x10,0x29,0x1D, \ +0xD0,0x20,0x29,0x1D,0xD0,0x30,0x29,0x08,0xD1,0x02,0x20,0x06,0xE0,0x32,0x29, \ +0x1B,0xD0,0x40,0x29,0x1B,0xD0,0x41,0x29,0x00,0xD1,0x06,0x20,0x40,0x00,0x25, \ +0x49,0xE7,0x1D,0x19,0x37,0x09,0x5A,0xF8,0x79,0x01,0x26,0x42,0x1E,0x33,0x1C, \ +0x93,0x40,0x19,0x40,0x0E,0xD1,0x03,0x21,0x05,0x20,0xFC,0xF7,0xCF,0xFC,0xF0, \ +0xBD,0x00,0x20,0xEC,0xE7,0x01,0x20,0xEA,0xE7,0x03,0x20,0xE8,0xE7,0x04,0x20, \ +0xE6,0xE7,0x05,0x20,0xE4,0xE7,0xFA,0xF7,0x04,0xFA,0x39,0x7A,0x18,0x48,0x41, \ +0x71,0xB9,0x79,0x01,0x29,0x1D,0xD1,0x16,0x49,0x07,0x1C,0xCA,0x1D,0x39,0x32, \ +0x96,0x70,0x42,0x79,0xA0,0x1D,0x1C,0x31,0x04,0x1C,0xFA,0xF7,0xEB,0xFA,0x7A, \ +0x79,0x11,0x49,0x20,0x1C,0xFA,0xF7,0xE6,0xFA,0x10,0x49,0x04,0x20,0xC8,0x72, \ +0x00,0xF0,0x1F,0xF8,0x28,0x78,0x20,0x23,0x18,0x43,0x28,0x70,0x28,0x78,0x10, \ +0x23,0x18,0x43,0x28,0x70,0xF0,0xBD,0x03,0x21,0x05,0x20,0xFC,0xF7,0x9A,0xFC, \ +0xF0,0xBD,0x00,0x00,0xB4,0x08,0x00,0x02,0x63,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x7C,0x01,0x00,0x02,0x14,0x01,0x00,0x02,0xD0,0x00,0x00,0x02,0xAC,0x00, \ +0x00,0x02,0x74,0x05,0x00,0x02,0xF0,0xB5,0xFA,0xF7,0xCD,0xF8,0x23,0x4F,0x02, \ +0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38,0x74,0x01,0x0A,0x79,0x74,0x01,0x0C, \ +0x00,0x0E,0xB9,0x74,0xF8,0x74,0x1E,0x4E,0xF8,0x1D,0x07,0x30,0x06,0x22,0xF1, \ +0x1D,0x35,0x31,0xFA,0xF7,0xAB,0xFA,0x1B,0x4D,0x01,0x24,0xF8,0x1D,0x29,0x30, \ +0x6C,0x70,0x04,0x71,0x19,0x48,0xF9,0x1D,0x42,0x79,0xF0,0x1D,0x15,0x30,0x0D, \ +0x31,0xFA,0xF7,0x9D,0xFA,0x16,0x4F,0x82,0x20,0x38,0x74,0x84,0x20,0x78,0x74, \ +0x8B,0x20,0xB8,0x74,0x96,0x20,0xF8,0x74,0x12,0x48,0x20,0x23,0x01,0x78,0x19, \ +0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70,0x0C,0x48,0x84,0x70, \ +0xFB,0xF7,0xE5,0xFF,0x0D,0x48,0x04,0x21,0x44,0x73,0x05,0x20,0xE8,0x72,0xF8, \ +0x1D,0x09,0x30,0x00,0xF0,0x7E,0xFF,0x01,0x21,0x05,0x20,0xFC,0xF7,0x40,0xFC, \ +0xF0,0xBD,0x00,0x00,0x98,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x74,0x05,0x00, \ +0x02,0x14,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x63,0x01,0x00,0x02,0x64,0x05, \ +0x00,0x02,0xF0,0xB5,0x2E,0x4F,0x2E,0x4E,0xFC,0x1D,0x59,0x34,0xF8,0x1D,0xF1, \ +0x1D,0x0D,0x31,0x09,0x30,0x05,0x1C,0x22,0x79,0xFA,0xF7,0x5D,0xFA,0x22,0x79, \ +0x29,0x49,0x28,0x1C,0xFA,0xF7,0x58,0xFA,0x20,0x79,0x28,0x49,0x48,0x71,0xB9, \ +0x7B,0x27,0x48,0x00,0x29,0x03,0xD1,0x01,0x70,0xF1,0x72,0x41,0x70,0x08,0xE0, \ +0x01,0x21,0x01,0x70,0xF1,0x72,0xF9,0x7B,0xC2,0x1D,0x39,0x32,0x41,0x70,0xF9, \ +0x78,0x11,0x70,0x00,0x25,0x0D,0x20,0x68,0x43,0x1E,0x49,0x0D,0x22,0x41,0x18, \ +0xC0,0x19,0x30,0x30,0x0C,0x31,0xFA,0xF7,0x39,0xFA,0x01,0x35,0x04,0x2D,0xF2, \ +0xD3,0xE0,0x88,0x30,0x80,0x60,0x79,0x00,0x28,0x03,0xD0,0x15,0x48,0x01,0x21, \ +0x41,0x72,0x02,0xE0,0x13,0x48,0x00,0x21,0x41,0x72,0x78,0x7B,0x0E,0x28,0x03, \ +0xDC,0x01,0x28,0x01,0xDB,0x11,0x49,0x08,0x75,0xB8,0x78,0x10,0x49,0x08,0x74, \ +0x38,0x7B,0x01,0x28,0x02,0xD1,0x0B,0x4A,0xD0,0x70,0x02,0xE0,0x09,0x4A,0x00, \ +0x20,0xD0,0x70,0xF8,0x88,0x08,0x81,0xB8,0x88,0x48,0x81,0x38,0x78,0x06,0x49, \ +0xC8,0x70,0xF9,0xF7,0x9C,0xFF,0xF0,0xBD,0x00,0x00,0xB4,0x08,0x00,0x02,0x98, \ +0x00,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x30,0x00,0x00,0x02, \ +0x18,0x00,0x00,0x02,0xD0,0x00,0x00,0x02,0x80,0xB5,0x17,0x4A,0x17,0x49,0x0A, \ +0x60,0x17,0x49,0x0F,0x68,0x0A,0x2F,0x17,0xD2,0x01,0xA3,0xDB,0x5D,0x5B,0x00, \ +0x9F,0x44,0x13,0x04,0x07,0x0A,0x0D,0x0F,0x13,0x13,0x13,0x12,0xFF,0x20,0x01, \ +0x30,0x0B,0xE0,0xFF,0x20,0x11,0x30,0x08,0xE0,0xFF,0x20,0x21,0x30,0x05,0xE0, \ +0x0B,0x20,0x03,0xE0,0xFF,0x20,0x31,0x30,0x00,0xE0,0x00,0x20,0x01,0x23,0x8B, \ +0x60,0xC9,0x68,0x00,0xF0,0x6C,0xF8,0x04,0x21,0x0C,0x20,0xFC,0xF7,0x9B,0xFB, \ +0x0F,0x20,0x00,0x06,0x81,0x88,0x04,0x4B,0x19,0x43,0x81,0x80,0x80,0xBD,0x60, \ +0x08,0x00,0x02,0x70,0x02,0x00,0x02,0x84,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0x80,0xB4,0xF3,0x22,0x12,0x05,0x93,0x68,0x06,0x23,0xD3,0x60,0x17,0x69,0xBB, \ +0x08,0xFC,0xD3,0x2D,0x23,0x01,0x3B,0xFD,0xD1,0x93,0x68,0x47,0x09,0x08,0x23, \ +0x1F,0x40,0x02,0x23,0x3B,0x43,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x00, \ +0x06,0x00,0x0E,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x90,0x68,0x08,0x06, \ +0x00,0x0E,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x90,0x68,0x80,0xBC,0xF7, \ +0x46,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B,0x03,0x19,0x43,0xC1,0x61, \ +0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F,0x23,0x1B,0x04,0x99,0x43,0x41, \ +0x60,0x41,0x68,0x03,0x23,0x1B,0x04,0x19,0x43,0x41,0x60,0xF7,0x46,0xF0,0xB5, \ +0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFD,0xF7,0x4D,0xFA,0x00,0x26,0x00,0x2F,0x10, \ +0xD9,0xFD,0xF7,0x9A,0xFA,0x40,0x08,0xFB,0xD2,0xB4,0x20,0x01,0x38,0xFD,0xD1, \ +0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xB1,0xFF,0xB4,0x20,0x01,0x38,0xFD,0xD1,0x01, \ +0x36,0xBE,0x42,0xEE,0xD3,0xFD,0xF7,0x4F,0xFA,0x00,0x20,0xF0,0xBD,0xF8,0xB5, \ +0x02,0x1C,0x31,0x4B,0x08,0x1C,0x19,0x68,0x2E,0x4F,0x00,0x29,0x59,0xD0,0x99, \ +0x68,0x01,0x29,0x56,0xD1,0x00,0x24,0x0F,0x21,0x09,0x06,0x8C,0x80,0x8C,0x81, \ +0x0C,0x88,0x09,0x89,0x19,0x68,0x27,0x4B,0xDD,0x1D,0xDE,0x1D,0x9C,0x1D,0x22, \ +0x33,0x1F,0x36,0x0D,0x35,0x09,0x29,0x00,0x93,0x1D,0xD1,0x0B,0x22,0x04,0x20, \ +0x00,0x99,0xFF,0xF7,0xC0,0xFF,0xFF,0x22,0x06,0x20,0x39,0x1C,0x01,0x32,0xFF, \ +0xF7,0xBA,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x1C,0x11,0x32,0xFF,0xF7,0xB4,0xFF, \ +0xFF,0x22,0x0E,0x20,0x29,0x1C,0x21,0x32,0xFF,0xF7,0xAE,0xFF,0xFF,0x22,0x01, \ +0x20,0x31,0x1C,0x31,0x32,0xFF,0xF7,0xA8,0xFF,0x02,0xE0,0x39,0x1C,0xFF,0xF7, \ +0xA4,0xFF,0xFD,0xF7,0xF5,0xF9,0x06,0x22,0xFF,0x21,0x38,0x1C,0x01,0x31,0xFD, \ +0xF7,0x56,0xFA,0x04,0x22,0x0B,0x21,0x00,0x98,0xFD,0xF7,0x51,0xFA,0x0E,0x22, \ +0xFF,0x21,0x20,0x1C,0x11,0x31,0xFD,0xF7,0x4B,0xFA,0x0E,0x22,0xFF,0x21,0x28, \ +0x1C,0x21,0x31,0xFD,0xF7,0x45,0xFA,0x01,0x22,0xFF,0x21,0x30,0x1C,0x31,0x31, \ +0xFD,0xF7,0x3F,0xFA,0xFD,0xF7,0xEE,0xF9,0x02,0x4B,0x00,0x24,0x1C,0x60,0xF8, \ +0xBD,0x60,0x08,0x00,0x02,0x84,0x02,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28, \ +0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20, \ +0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49, \ +0x00,0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0xE8,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4, \ +0x0B,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03, \ +0x2B,0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73, \ +0x04,0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3, \ +0xE7,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28, \ +0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20, \ +0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21, \ +0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1, \ +0x00,0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20, \ +0x20,0x08,0x73,0x00,0xBD,0xFD,0xF7,0x87,0xF8,0x04,0x49,0x00,0x20,0x08,0x80, \ +0x03,0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0xF8,0x01,0x00, \ +0x02,0xFA,0x01,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1, \ +0x02,0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08, \ +0x06,0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7, \ +0x00,0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08, \ +0xD0,0x80,0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0, \ +0x00,0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00, \ +0x21,0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7, \ +0x25,0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0xFA,0x01,0x00,0x02,0xF8,0x01,0x00, \ +0x02,0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1, \ +0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09, \ +0x0E,0x01,0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80, \ +0x01,0xE0,0x05,0x49,0x08,0x80,0xFD,0xF7,0x25,0xF8,0x90,0xBD,0x27,0x73,0x90, \ +0xBD,0x70,0x03,0x00,0x0D,0xFA,0x01,0x00,0x02,0xF8,0x01,0x00,0x02,0x80,0xB4, \ +0x0D,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01, \ +0x2B,0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73, \ +0x06,0x48,0x01,0x68,0x06,0x48,0x01,0x73,0x00,0x21,0x01,0x73,0x38,0x7B,0x10, \ +0x23,0x18,0x43,0x38,0x73,0xF0,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0xF4,0x01, \ +0x00,0x02,0x30,0x03,0x00,0x0D,0x90,0xB5,0x17,0x1C,0x02,0x28,0x22,0x4C,0x04, \ +0xD1,0x09,0x29,0x37,0xD1,0x21,0x48,0x20,0x60,0x34,0xE0,0x03,0x28,0x07,0xD1, \ +0x74,0x20,0xF9,0xF7,0x59,0xFE,0x1E,0x49,0x88,0x70,0x88,0x1C,0x20,0x60,0x2A, \ +0xE0,0x04,0x28,0x02,0xD1,0x1B,0x48,0x20,0x60,0x25,0xE0,0x05,0x28,0x02,0xD1, \ +0x1A,0x48,0x20,0x60,0x20,0xE0,0x00,0x28,0x1E,0xD1,0x09,0x29,0x1A,0xD2,0x01, \ +0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x16,0x04,0x07,0x0A,0x16,0x0D,0x10,0x13, \ +0x16,0x00,0x13,0x48,0x20,0x60,0x10,0xE0,0x12,0x48,0x20,0x60,0x0D,0xE0,0x12, \ +0x48,0x20,0x60,0x0A,0xE0,0x11,0x48,0x20,0x60,0x07,0xE0,0x11,0x48,0x20,0x60, \ +0x04,0xE0,0x10,0x48,0x20,0x60,0x01,0xE0,0x10,0x48,0x20,0x60,0x20,0x68,0x0F, \ +0x49,0xC0,0x19,0x20,0x60,0x80,0x20,0x08,0x73,0x00,0xF0,0x5B,0xF8,0x90,0xBD, \ +0x2C,0x02,0x00,0x02,0x60,0x08,0x00,0x02,0xAC,0x08,0x00,0x02,0xA0,0x02,0x00, \ +0x02,0x38,0x09,0x00,0x02,0x14,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0xD0,0x00, \ +0x00,0x02,0x98,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x20, \ +0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x00,0x22,0x02,0x28,0x17,0x4B, \ +0x10,0xD1,0x17,0x48,0x87,0x79,0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29,0x07, \ +0xD0,0x14,0x48,0xC7,0x60,0x0C,0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01,0x60, \ +0x82,0x60,0x80,0xBC,0xF7,0x46,0x06,0x28,0x0E,0xD1,0x0F,0x48,0x00,0x68,0x01, \ +0x28,0xF7,0xD1,0xFF,0x20,0x0D,0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0C,0x49, \ +0x01,0x20,0x08,0x71,0x0B,0x49,0x08,0x60,0xEC,0xE7,0x18,0x79,0x18,0x70,0x5A, \ +0x70,0x9A,0x70,0x18,0x78,0x0A,0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2,0xE7, \ +0x00,0x00,0xAC,0x08,0x00,0x02,0xEC,0x01,0x00,0x02,0x84,0x02,0x00,0x02,0x74, \ +0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x78,0x01,0x00,0x02,0x63,0x01,0x00,0x02, \ +0xB0,0xB4,0x1B,0x4A,0x1B,0x48,0x11,0x68,0x07,0x68,0x1B,0x4B,0xB9,0x42,0x12, \ +0xD1,0x1A,0x7B,0x19,0x1C,0xD2,0x09,0x09,0xD2,0x00,0x68,0x40,0x07,0x03,0xD0, \ +0xE0,0x20,0x08,0x73,0xB0,0xBC,0xF7,0x46,0xD0,0x20,0x08,0x73,0xFA,0xE7,0x08, \ +0x7B,0x20,0x23,0x18,0x43,0x08,0x73,0xF5,0xE7,0x00,0x68,0x11,0x68,0x40,0x1A, \ +0x08,0x28,0x03,0xD9,0x08,0x20,0x0E,0x4F,0x0E,0x49,0x02,0xE0,0x00,0x28,0xFA, \ +0xD1,0x09,0xE0,0x0D,0x68,0x2C,0x78,0x01,0x35,0x0D,0x60,0x3C,0x73,0x14,0x68, \ +0x01,0x34,0x14,0x60,0x01,0x38,0xF5,0xD1,0x19,0x7B,0x18,0x1C,0x10,0x23,0x19, \ +0x43,0x01,0x73,0xD9,0xE7,0x00,0x00,0x24,0x02,0x00,0x02,0x28,0x02,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x2C,0x02,0x00,0x02,0x90,0xB5,0x20, \ +0x27,0x00,0x28,0x09,0x4C,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0, \ +0x27,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28,0x01,0xD1,0x27, \ +0x73,0x90,0xBD,0xFC,0xF7,0x02,0xFF,0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0x0D,0x48,0x01,0x2B,0x02,0xD1,0x20,0x21,0x01,0x73,0xF7,0x46,0x80,0x21,0x01, \ +0x73,0x0A,0x49,0x01,0x22,0x0A,0x73,0x00,0x22,0x0A,0x73,0x02,0x23,0x0B,0x73, \ +0x0A,0x73,0x07,0x4A,0x10,0x23,0x12,0x68,0x0A,0x73,0x06,0x4A,0x12,0x68,0x0A, \ +0x73,0x01,0x7B,0x19,0x43,0x01,0x73,0xF7,0x46,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0x30,0x03,0x00,0x0D,0x34,0x02,0x00,0x02,0x38,0x02,0x00,0x02,0x00,0x21,0x02, \ +0x28,0x10,0xD1,0x08,0x1C,0x0B,0x49,0x04,0x22,0x08,0x71,0x0B,0x49,0x0A,0x70, \ +0x08,0x70,0x0A,0x4A,0x82,0x21,0x11,0x71,0x0A,0x49,0x08,0x60,0x0A,0x49,0x08, \ +0x60,0x0A,0x49,0x08,0x80,0xF7,0x46,0x85,0x28,0xFC,0xD1,0x08,0x4A,0x01,0x20, \ +0x10,0x60,0x08,0x48,0x01,0x80,0xF7,0x46,0x70,0x03,0x00,0x0D,0xC0,0x03,0x00, \ +0x0D,0xB0,0x03,0x00,0x0D,0x10,0x02,0x00,0x02,0x0C,0x02,0x00,0x02,0xF8,0x01, \ +0x00,0x02,0x44,0x02,0x00,0x02,0xFA,0x01,0x00,0x02,0x90,0xB5,0x0F,0x1C,0x19, \ +0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xB0,0xFD,0x90,0xBD,0x24,0x4B,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xC6,0xFD,0x90,0xBD, \ +0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0xD7,0xFD,0x90,0xBD,0xFF,0x23,0x0C,0x33,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xEA,0xFD,0x90,0xBD,0x41, \ +0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C, \ +0xFF,0xF7,0xFB,0xFD,0x90,0xBD,0x0F,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A, \ +0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x2D,0xFE,0x90,0xBD,0x01,0x23,0xDB,0x03, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x44, \ +0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08,0x73,0x90,0xBD,0x00,0x00,0xDC,0x01, \ +0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x80,0xB5,0x00,0x20,0x1C,0x49,0x0F,0x27,0x3F,0x06,0x08,0x70, \ +0xB8,0x80,0x39,0x88,0xB8,0x81,0x38,0x89,0x19,0x48,0xC0,0x69,0x01,0x20,0x80, \ +0x06,0xC1,0x68,0xC0,0x6B,0x18,0x49,0x17,0x48,0x00,0x68,0x02,0x20,0xC8,0x61, \ +0x17,0x48,0x01,0x7A,0x0C,0x30,0x08,0x29,0x19,0xD2,0x01,0xA3,0x5B,0x5C,0x5B, \ +0x00,0x9F,0x44,0x15,0x03,0x06,0x09,0x0C,0x15,0x0F,0x12,0x00,0xF0,0x9A,0xF8, \ +0x80,0xBD,0x00,0xF0,0x19,0xF9,0x80,0xBD,0x00,0xF0,0x1E,0xF8,0x80,0xBD,0x00, \ +0xF0,0x75,0xF9,0x80,0xBD,0x00,0xF0,0xEA,0xF9,0x80,0xBD,0x00,0xF0,0x3B,0xFA, \ +0x80,0xBD,0x02,0x21,0x0A,0x20,0xFB,0xF7,0xF4,0xFF,0x06,0x48,0xB8,0x80,0x80, \ +0xBD,0x00,0x00,0x63,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04, \ +0x84,0x05,0x00,0x02,0xAC,0x08,0x00,0x02,0x08,0x08,0x00,0x00,0xB0,0xB5,0x82, \ +0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x18,0xFE,0x00,0xA8,0x40,0x78,0x34,0x4C, \ +0x80,0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28,0x06,0xD0,0x03,0x21,0x0A, \ +0x20,0xFB,0xF7,0xD1,0xFF,0xBC,0x80,0x02,0xB0,0xB0,0xBD,0x00,0xA8,0x00,0x78, \ +0x0E,0x28,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A, \ +0x20,0xFB,0xF7,0xC2,0xFF,0xBC,0x80,0xEF,0xE7,0x00,0xA8,0xC0,0x78,0x03,0x28, \ +0x05,0xDD,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xB8,0xFF,0xBC,0x80,0xE5,0xE7,0x08, \ +0x21,0x0A,0x20,0xFB,0xF7,0xB2,0xFF,0xBC,0x80,0x00,0x27,0x1F,0x4D,0x05,0x24, \ +0x00,0xA8,0x00,0x78,0xF9,0xF7,0x54,0xFD,0x00,0x28,0x00,0xD1,0x6C,0x70,0xF9, \ +0xF7,0xCC,0xFB,0x00,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0x26,0xFC,0x00,0xA9, \ +0xC9,0x78,0x0A,0x20,0xF9,0xF7,0x21,0xFC,0x00,0xA8,0x40,0x78,0x01,0x28,0x04, \ +0xD1,0x3C,0x21,0x12,0x20,0xF9,0xF7,0x19,0xFC,0x03,0xE0,0x38,0x21,0x12,0x20, \ +0xF9,0xF7,0x14,0xFC,0x01,0xA8,0x00,0x78,0x01,0x28,0x05,0xD1,0x00,0xA9,0xC9, \ +0x78,0x0A,0x20,0xF9,0xF7,0x0B,0xFC,0x06,0xE0,0x00,0xA8,0xC0,0x78,0x10,0x21, \ +0x01,0x43,0x0A,0x20,0xF9,0xF7,0x03,0xFC,0x00,0xF0,0x1F,0xF9,0x01,0x37,0x02, \ +0x2F,0xC9,0xD3,0x01,0x21,0x0A,0x20,0xFB,0xF7,0x74,0xFF,0xA2,0xE7,0x00,0x00, \ +0x08,0x08,0x00,0x00,0xAC,0x08,0x00,0x02,0xB0,0xB5,0x82,0xB0,0x69,0x46,0x08, \ +0x22,0xF9,0xF7,0xA2,0xFD,0x00,0xA8,0x40,0x78,0x3A,0x4C,0x80,0x08,0x80,0x00, \ +0x0F,0x27,0x3F,0x06,0x00,0x28,0x06,0xD0,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x5B, \ +0xFF,0xBC,0x80,0x02,0xB0,0xB0,0xBD,0x00,0xA8,0x00,0x78,0x0E,0x28,0x03,0xDC, \ +0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x4C, \ +0xFF,0xBC,0x80,0xEF,0xE7,0x00,0xA8,0xC0,0x78,0x03,0x28,0x05,0xDD,0x03,0x21, \ +0x0A,0x20,0xFB,0xF7,0x42,0xFF,0xBC,0x80,0xE5,0xE7,0x08,0x21,0x0A,0x20,0xFB, \ +0xF7,0x3C,0xFF,0xBC,0x80,0x00,0x27,0x25,0x4C,0x05,0x25,0x00,0xA8,0x00,0x78, \ +0xF9,0xF7,0xDE,0xFC,0x00,0x28,0x00,0xD1,0x65,0x70,0xF9,0xF7,0x56,0xFB,0x00, \ +0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0xB0,0xFB,0x00,0xA9,0xC9,0x78,0x0A,0x20, \ +0xF9,0xF7,0xAB,0xFB,0x00,0xA8,0x40,0x78,0x01,0x28,0x04,0xD1,0xA4,0x21,0x12, \ +0x20,0xF9,0xF7,0xA3,0xFB,0x03,0xE0,0xA0,0x21,0x12,0x20,0xF9,0xF7,0x9E,0xFB, \ +0x01,0xA8,0x00,0x78,0x01,0x28,0x05,0xD1,0x00,0xA9,0xC9,0x78,0x0A,0x20,0xF9, \ +0xF7,0x95,0xFB,0x06,0xE0,0x00,0xA8,0xC0,0x78,0x10,0x21,0x01,0x43,0x0A,0x20, \ +0xF9,0xF7,0x8D,0xFB,0x00,0x21,0x40,0x20,0xF9,0xF7,0x89,0xFB,0x00,0xF0,0xA5, \ +0xF8,0x01,0x37,0x02,0x2F,0xC5,0xD3,0xE1,0x20,0x80,0x00,0x01,0x38,0xFD,0xD1, \ +0x74,0x20,0xF9,0xF7,0x89,0xFB,0xA0,0x70,0x01,0x21,0x0A,0x20,0xFB,0xF7,0xF2, \ +0xFE,0x96,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xAC,0x08,0x00,0x02,0x98,0xB5, \ +0x69,0x46,0x04,0x22,0xF9,0xF7,0x21,0xFD,0x00,0xA8,0x40,0x78,0x27,0x4C,0x80, \ +0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28,0x05,0xD0,0x03,0x21,0x0A,0x20, \ +0xFB,0xF7,0xDA,0xFE,0xBC,0x80,0x98,0xBD,0x00,0xA8,0x00,0x78,0x0E,0x28,0x03, \ +0xDC,0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7, \ +0xCC,0xFE,0xBC,0x80,0xF0,0xE7,0x08,0x21,0x0A,0x20,0xFB,0xF7,0xC6,0xFE,0x18, \ +0x49,0x00,0x20,0x08,0x70,0x18,0x49,0x08,0x60,0x48,0x60,0xBC,0x80,0xF9,0xF7, \ +0x33,0xFB,0x00,0xA8,0x00,0x78,0xF9,0xF7,0x63,0xFC,0x00,0x28,0x02,0xD1,0x13, \ +0x49,0x05,0x20,0x48,0x70,0xF9,0xF7,0xD9,0xFA,0x11,0x48,0x01,0x21,0x81,0x73, \ +0xBA,0x88,0x10,0x4B,0x1A,0x43,0xBA,0x80,0x80,0x30,0xC1,0x61,0x00,0xA8,0x40, \ +0x78,0x01,0x28,0x04,0xD1,0xC3,0x21,0x14,0x20,0xF9,0xF7,0x27,0xFB,0x03,0xE0, \ +0xC1,0x21,0x14,0x20,0xF9,0xF7,0x22,0xFB,0x01,0x21,0x0A,0x20,0xFB,0xF7,0x98, \ +0xFE,0xBD,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0x63,0x01,0x00,0x02,0xA0,0x02, \ +0x00,0x02,0xAC,0x08,0x00,0x02,0x04,0x05,0x00,0x02,0x40,0x40,0x00,0x00,0x90, \ +0xB5,0x82,0xB0,0x0F,0x24,0x24,0x06,0x12,0x4F,0x69,0x46,0x08,0x22,0xA7,0x80, \ +0xF9,0xF7,0xBA,0xFC,0x00,0xA8,0x00,0x78,0x0E,0x28,0x03,0xDC,0x00,0xA8,0x00, \ +0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x74,0xFE,0xA7,0x80, \ +0x02,0xB0,0x90,0xBD,0x00,0xA8,0x00,0x78,0xF9,0xF7,0x17,0xFC,0x00,0x28,0x02, \ +0xD1,0x06,0x49,0x05,0x20,0x48,0x70,0xF9,0xF7,0x8D,0xFA,0x01,0x21,0x0A,0x20, \ +0xFB,0xF7,0x62,0xFE,0xED,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xAC,0x08,0x00, \ +0x02,0x08,0x48,0xC1,0x69,0x03,0x0C,0x19,0x43,0xC1,0x61,0xC1,0x69,0x04,0x23, \ +0x19,0x43,0xC1,0x61,0xC1,0x69,0x01,0x23,0x19,0x43,0xC1,0x61,0xC1,0x69,0x08, \ +0x23,0x19,0x43,0xC1,0x61,0xF7,0x46,0x40,0x00,0x00,0x04,0x98,0xB5,0x19,0x4C, \ +0xA0,0x6A,0x00,0x06,0x00,0x0E,0xA0,0x62,0xE7,0x6A,0x68,0x20,0xF9,0xF7,0xD0, \ +0xFA,0x00,0x90,0x00,0x98,0x10,0x23,0x18,0x40,0x00,0x09,0x00,0x06,0x00,0x99, \ +0x00,0x0E,0x49,0x09,0x40,0x18,0x00,0x90,0x00,0x98,0x00,0x28,0x13,0xD0,0x01, \ +0x28,0x13,0xD0,0x02,0x28,0x13,0xD0,0x03,0x28,0x08,0xD1,0x0B,0x20,0x78,0x43, \ +0xC7,0x08,0x6A,0x20,0xF9,0xF7,0xB6,0xFA,0x00,0x0A,0x00,0xD3,0x01,0x3F,0xA1, \ +0x6A,0xF8,0x02,0x08,0x43,0xA0,0x62,0x98,0xBD,0xFF,0x08,0xF8,0xE7,0xBF,0x08, \ +0xF6,0xE7,0x0B,0x20,0x78,0x43,0x07,0x09,0xF2,0xE7,0x40,0x00,0x00,0x04,0x90, \ +0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x46,0xFC,0x00,0xA8,0x00,0x78, \ +0x0F,0x27,0x3F,0x06,0x0E,0x28,0x23,0x4C,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00, \ +0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xFD,0xFD,0xBC,0x80,0x02,0xB0, \ +0x90,0xBD,0xBC,0x80,0x00,0xA8,0x00,0x78,0xF9,0xF7,0x9F,0xFB,0xF9,0xF7,0x1A, \ +0xFA,0x00,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0x74,0xFA,0x00,0xA9,0xC9,0x78, \ +0x0A,0x20,0xF9,0xF7,0x6F,0xFA,0x00,0xA8,0x40,0x78,0x01,0x28,0x04,0xD1,0x2C, \ +0x21,0x12,0x20,0xF9,0xF7,0x67,0xFA,0x03,0xE0,0x28,0x21,0x12,0x20,0xF9,0xF7, \ +0x62,0xFA,0x01,0xA8,0x00,0x78,0x01,0x28,0x05,0xD1,0x00,0xA9,0xC9,0x78,0x0A, \ +0x20,0xF9,0xF7,0x59,0xFA,0x06,0xE0,0x00,0xA8,0xC0,0x78,0x10,0x21,0x01,0x43, \ +0x0A,0x20,0xF9,0xF7,0x51,0xFA,0x0C,0x21,0x40,0x20,0xF9,0xF7,0x4D,0xFA,0xFF, \ +0xF7,0x69,0xFF,0x01,0x21,0x0A,0x20,0xFB,0xF7,0xC1,0xFD,0xC3,0xE7,0x08,0x08, \ +0x00,0x00,0xB0,0xB5,0x83,0xB0,0x69,0x46,0x0C,0x22,0xF9,0xF7,0xF2,0xFB,0x02, \ +0xA8,0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x37,0x4C,0x03,0xDC,0x02,0xA8, \ +0x00,0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xA9,0xFD,0xAC, \ +0x80,0x03,0xB0,0xB0,0xBD,0x02,0xA8,0x00,0x78,0x30,0x4F,0xB8,0x70,0x00,0xA8, \ +0x40,0x78,0x78,0x70,0x00,0xA8,0x00,0x78,0x38,0x70,0x00,0xA8,0x40,0x88,0xB8, \ +0x60,0x01,0x98,0xF8,0x60,0x00,0x20,0xF8,0x70,0x78,0x60,0x38,0x61,0xAC,0x80, \ +0xF9,0xF7,0x06,0xFA,0x02,0xA8,0x00,0x78,0xF9,0xF7,0x36,0xFB,0xF9,0xF7,0xB1, \ +0xF9,0x02,0xA9,0x89,0x78,0x3E,0x20,0xF9,0xF7,0x0B,0xFA,0x02,0xA8,0x40,0x78, \ +0x01,0x28,0x04,0xD1,0x24,0x21,0x12,0x20,0xF9,0xF7,0x03,0xFA,0x03,0xE0,0x20, \ +0x21,0x12,0x20,0xF9,0xF7,0xFE,0xF9,0x02,0xA8,0xC0,0x78,0x01,0x28,0x05,0xD1, \ +0x00,0xA9,0x49,0x78,0x0A,0x20,0xF9,0xF7,0xF5,0xF9,0x06,0xE0,0x00,0xA8,0x40, \ +0x78,0x10,0x21,0x01,0x43,0x0A,0x20,0xF9,0xF7,0xED,0xF9,0x00,0xA9,0x09,0x78, \ +0x01,0x98,0x00,0xF0,0x28,0xF8,0x01,0x98,0x00,0xF0,0x61,0xF8,0x78,0x60,0x79, \ +0x78,0x0A,0x20,0xF9,0xF7,0xE0,0xF9,0xF8,0x78,0xF9,0xF7,0x61,0xFB,0x78,0x68, \ +0xFA,0xF7,0xA6,0xFD,0x09,0x49,0x03,0x20,0xC8,0x61,0x09,0x48,0x09,0x4B,0xA8, \ +0x80,0x01,0x20,0x80,0x06,0x41,0x6A,0xC9,0x18,0x01,0x62,0x01,0x21,0x0A,0x20, \ +0xFB,0xF7,0x45,0xFD,0x9B,0xE7,0x08,0x08,0x00,0x00,0x28,0x09,0x00,0x02,0x84, \ +0x05,0x00,0x02,0x88,0x88,0x00,0x00,0x10,0x27,0x00,0x00,0x00,0x22,0x00,0x28, \ +0x04,0x4B,0x06,0xD9,0x09,0x06,0x09,0x0E,0x19,0x70,0x01,0x33,0x01,0x32,0x82, \ +0x42,0xFA,0xD3,0xF7,0x46,0x00,0x72,0x01,0x02,0x81,0xB0,0x01,0x20,0x80,0x06, \ +0xC1,0x6B,0x00,0xAB,0x19,0x80,0x00,0xA9,0x09,0x88,0x20,0x22,0x0A,0x40,0x0D, \ +0x49,0x0B,0xD0,0x0D,0x4A,0x0E,0x4B,0x82,0x63,0xCA,0x68,0x42,0x63,0x4A,0x78, \ +0x9A,0x5C,0x11,0x23,0x9B,0x02,0x1A,0x43,0x0A,0x4B,0x5A,0x60,0x00,0xAA,0x12, \ +0x88,0x92,0x08,0x06,0xD3,0x0A,0x69,0x01,0x32,0x0A,0x61,0x89,0x68,0x42,0x6A, \ +0x89,0x18,0x01,0x62,0x01,0xB0,0xF7,0x46,0x00,0x00,0x28,0x09,0x00,0x02,0x00, \ +0x72,0x01,0x02,0x6C,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xF0,0xB5,0x04,0x30, \ +0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0,0x02, \ +0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFD,0xF7,0x4D,0xFC, \ +0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFD,0xF7,0x48,0xFC,0x07,0x1C,0x00,0x2D,0x18, \ +0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFD,0xF7,0x38,0xFC,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0xFD,0xF7,0x33,0xFC,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD,0x28, \ +0x09,0x00,0x02,0xB0,0xB4,0x00,0x22,0x00,0x29,0x06,0xDD,0x85,0x5C,0x2B,0x0A, \ +0x00,0xD3,0x14,0x1C,0x01,0x32,0x8A,0x42,0xF8,0xDB,0x00,0x20,0x00,0x29,0x08, \ +0xDD,0x08,0x4A,0x13,0x18,0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x07,0x1C,0x01,0x30, \ +0x88,0x42,0xF7,0xDB,0x05,0x48,0xBC,0x42,0x02,0xDD,0x87,0x72,0xB0,0xBC,0xF7, \ +0x46,0x84,0x72,0xFB,0xE7,0x00,0x00,0x18,0x00,0x00,0x02,0x14,0x01,0x00,0x02, \ +0xF0,0xB5,0x85,0xB0,0x5F,0x48,0x00,0x25,0x00,0x68,0x46,0x68,0x80,0x89,0x29, \ +0x28,0x02,0xDA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0xF0,0x1D,0x09,0x30,0x5A,0x49, \ +0x03,0x90,0x06,0x22,0x04,0x91,0xF9,0xF7,0xB8,0xFA,0x00,0x28,0x01,0xD0,0x00, \ +0x20,0xF1,0xE7,0x20,0x20,0xF1,0x1D,0x19,0x31,0x30,0x5C,0x49,0x78,0x09,0x02, \ +0x08,0x43,0x04,0x04,0x24,0x0C,0x14,0x2C,0x03,0xDB,0x7D,0x23,0xDB,0x00,0x9C, \ +0x42,0x01,0xDD,0x00,0x20,0xE0,0xE7,0x22,0x20,0x30,0x5C,0x80,0x08,0x01,0xD2, \ +0x00,0x20,0xDA,0xE7,0x49,0x48,0x24,0x27,0x01,0x68,0x89,0x89,0x04,0x39,0x24, \ +0x29,0x50,0xDD,0x47,0x49,0x02,0x91,0xF0,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28, \ +0x20,0xD0,0x03,0x28,0x45,0xD1,0xF0,0x19,0x41,0x78,0x01,0x29,0x33,0xD0,0x00, \ +0x20,0xC4,0xE7,0xF5,0x19,0x68,0x78,0x00,0x28,0x05,0xD0,0x3F,0x49,0x49,0x79, \ +0x81,0x42,0x01,0xD0,0x00,0x20,0xBA,0xE7,0x3C,0x49,0xA8,0x1C,0x4A,0x79,0x02, \ +0x99,0xF9,0xF7,0x77,0xFA,0x00,0x28,0x01,0xD0,0x00,0x20,0xB0,0xE7,0x68,0x78, \ +0x01,0x25,0xC0,0x19,0x87,0x1C,0x20,0xE0,0xF0,0x19,0x01,0x90,0x41,0x78,0x05, \ +0x29,0x00,0xDB,0x04,0x21,0x00,0x20,0x00,0x29,0x07,0xDD,0x3A,0x18,0x92,0x19, \ +0x93,0x78,0x6A,0x46,0x13,0x54,0x01,0x30,0x88,0x42,0xF7,0xDB,0x68,0x46,0xFF, \ +0xF7,0x6A,0xFF,0x01,0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x29,0x49, \ +0x80,0x78,0x09,0x7D,0x81,0x42,0x01,0xD0,0x00,0x20,0x8B,0xE7,0x03,0x37,0x21, \ +0x48,0x00,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xB2,0xDC,0x00,0x2D,0x01,0xD1, \ +0x00,0x20,0x80,0xE7,0x05,0x20,0xFA,0xF7,0xF2,0xFF,0x1F,0x48,0x20,0x23,0x01, \ +0x78,0x1F,0x4F,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70, \ +0xF9,0x1D,0x06,0x22,0x07,0x31,0x03,0x98,0xF9,0xF7,0x40,0xFA,0x06,0x22,0x03, \ +0x98,0x04,0x99,0xF9,0xF7,0x3B,0xFA,0x13,0x4D,0xF9,0x1D,0x6A,0x79,0x02,0x98, \ +0x0D,0x31,0xF9,0xF7,0x34,0xFA,0x13,0x4E,0x01,0x20,0xF9,0x1D,0x29,0x31,0x70, \ +0x70,0x08,0x71,0x3C,0x80,0xA8,0x70,0x00,0x21,0x00,0x20,0xF9,0xF7,0x3A,0xF8, \ +0xFA,0xF7,0x84,0xFF,0x0D,0x49,0x01,0x20,0x48,0x73,0x05,0x20,0xF0,0x72,0x01, \ +0x21,0x04,0x20,0xFB,0xF7,0xE3,0xFB,0x01,0x20,0x48,0xE7,0x00,0x00,0x10,0x00, \ +0x00,0x02,0x0C,0x01,0x00,0x02,0xEC,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x63,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0x74,0x05,0x00,0x02, \ +0x64,0x05,0x00,0x02,0xF0,0xB5,0x84,0xB0,0x4D,0x4F,0x4E,0x49,0x38,0x68,0x04, \ +0x26,0x45,0x68,0x00,0x24,0x06,0x22,0xE8,0x1D,0x09,0x30,0x03,0x91,0xF9,0xF7, \ +0xEA,0xF9,0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x39,0x68,0x38, \ +0x1C,0x89,0x89,0x29,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7,0x20,0x22,0xEB,0x1D, \ +0x19,0x33,0xAA,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12,0x04,0x12,0x0C,0x00, \ +0x92,0x14,0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00,0x9A,0x42,0x01,0xDD, \ +0x00,0x20,0xE3,0xE7,0x22,0x22,0xAA,0x5C,0x52,0x08,0x01,0xD2,0x00,0x20,0xDD, \ +0xE7,0x24,0x27,0x04,0x39,0x24,0x29,0x2F,0xDD,0xE8,0x5D,0x00,0x28,0x1B,0xD0, \ +0x01,0x28,0x28,0xD1,0xE8,0x19,0x02,0x90,0x40,0x78,0x05,0x28,0x00,0xDA,0x06, \ +0x1C,0x00,0x20,0x00,0x2E,0x07,0xDD,0x39,0x18,0x49,0x19,0x8A,0x78,0x01,0xA9, \ +0x0A,0x54,0x01,0x30,0xB0,0x42,0xF7,0xDB,0x01,0xA8,0x31,0x1C,0xFF,0xF7,0xB7, \ +0xFE,0x02,0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x08,0xE0,0xE8,0x19,0x40,0x78, \ +0x20,0x28,0x01,0xD9,0x00,0x24,0x00,0xE0,0x01,0x24,0xC0,0x19,0x87,0x1C,0x1F, \ +0x48,0x00,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xD1,0xDC,0x00,0x2C,0x01,0xD1, \ +0x00,0x20,0xA7,0xE7,0x1C,0x4F,0x1C,0x4C,0xF9,0x1D,0x07,0x31,0x06,0x22,0x03, \ +0x98,0xF9,0xF7,0x97,0xF9,0xE0,0x1D,0x15,0x30,0x20,0x22,0xF9,0x1D,0x0D,0x31, \ +0xF9,0xF7,0x90,0xF9,0xE0,0x1D,0x39,0x30,0x81,0x78,0xF8,0x1D,0x29,0x30,0x01, \ +0x71,0x02,0x79,0x13,0x48,0xC1,0x1D,0x69,0x31,0x4A,0x70,0x00,0x9A,0x20,0x23, \ +0x3A,0x80,0x10,0x4A,0x17,0x78,0x3B,0x43,0x13,0x70,0x17,0x78,0x10,0x23,0x3B, \ +0x43,0x13,0x70,0x00,0x22,0x60,0x30,0x42,0x73,0x0C,0x48,0x82,0x70,0x05,0x20, \ +0xC8,0x72,0xFA,0xF7,0x13,0xFF,0x01,0x21,0x04,0x20,0xFB,0xF7,0x33,0xFB,0x01, \ +0x20,0x73,0xE7,0x00,0x00,0x10,0x00,0x00,0x02,0x0C,0x01,0x00,0x02,0x98,0x00, \ +0x00,0x02,0xD0,0x00,0x00,0x02,0x04,0x05,0x00,0x02,0x63,0x01,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x80,0xB4,0x19,0x49,0xC9,0x7D,0x31,0x29,0x19,0xD0,0x07,0xDC, \ +0x10,0x29,0x12,0xD0,0x20,0x29,0x12,0xD0,0x30,0x29,0x08,0xD1,0x02,0x20,0x06, \ +0xE0,0x32,0x29,0x10,0xD0,0x40,0x29,0x10,0xD0,0x41,0x29,0x00,0xD1,0x06,0x20, \ +0x10,0x49,0x40,0x00,0x09,0x5A,0x01,0x20,0x0F,0x4A,0x09,0xE0,0x00,0x20,0xF7, \ +0xE7,0x01,0x20,0xF5,0xE7,0x03,0x20,0xF3,0xE7,0x04,0x20,0xF1,0xE7,0x05,0x20, \ +0xEF,0xE7,0xD3,0x7C,0x01,0x33,0xD3,0x74,0xD3,0x7C,0x0E,0x2B,0x00,0xDD,0xD0, \ +0x74,0xD3,0x7C,0x5F,0x1E,0x03,0x1C,0xBB,0x40,0x0B,0x40,0xF2,0xD0,0x80,0xBC, \ +0xF7,0x46,0x18,0x00,0x00,0x02,0x7C,0x01,0x00,0x02,0xD0,0x00,0x00,0x02} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-r505.h linux.22-ac2/drivers/usb/atmel/fw-r505.h --- linux.vanilla/drivers/usb/atmel/fw-r505.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-r505.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,2402 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76C505 with RFMD radio * + * Version: 0.91.0 #4 * + ****************************************************************************/ + +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_505RFMD_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0x65,0x12, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x84,0x0D,0x00,0xEA,0x01, \ +0xF0,0x3E,0xFF,0x22,0x48,0x87,0x46,0x9D,0x0D,0x00,0xEA,0x01,0xF0,0x7A,0xFD, \ +0x20,0x48,0x87,0x46,0xB5,0x01,0x00,0x00,0xA8,0x03,0x00,0x02,0x00,0x01,0x00, \ +0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x2C,0x04,0x00,0x02,0x4C,0x04, \ +0x00,0x02,0x50,0x04,0x00,0x02,0x54,0x04,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0xE0,0x59,0x00,0x01,0x21,0x38,0x00,0x00,0x11,0x39,0x00, \ +0x00,0x00,0xB5,0x03,0xF0,0x8D,0xFA,0x00,0x20,0x00,0xBD,0x80,0xB5,0x86,0xB0, \ +0x07,0x1C,0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03, \ +0x90,0x01,0x91,0x05,0x92,0x02,0x92,0x17,0x4A,0x19,0xA1,0x17,0x48,0x01,0x23, \ +0x00,0x97,0x03,0xF0,0x63,0xFC,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01, \ +0x22,0x05,0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93, \ +0x03,0x90,0x02,0x92,0x01,0x91,0x13,0xA1,0x11,0x4A,0x11,0x48,0x02,0x23,0x03, \ +0xF0,0x4E,0xFC,0x13,0x48,0x14,0xA1,0x03,0xF0,0xC0,0xFC,0x16,0x48,0x17,0xA1, \ +0x03,0xF0,0xBC,0xFC,0x1A,0x48,0x1B,0xA1,0x03,0xF0,0xB8,0xFC,0x1E,0x48,0x1F, \ +0xA1,0x03,0xF0,0xB4,0xFC,0x03,0xF0,0x9A,0xF8,0x06,0xB0,0x80,0xBD,0xF9,0x10, \ +0x00,0x00,0xAC,0x05,0x00,0x02,0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x00,0x00,0xD9,0x22,0x00,0x00,0x3C,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20, \ +0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00,0xCC,0x06,0x00,0x02,0x54,0x78,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xEC,0x06, \ +0x00,0x02,0x4D,0x67,0x6D,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C, \ +0x61,0x67,0x73,0x00,0x00,0x00,0x00,0x0C,0x07,0x00,0x02,0x54,0x58,0x20,0x47, \ +0x4F,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00, \ +0x00,0x2C,0x07,0x00,0x02,0x50,0x73,0x50,0x6F,0x6C,0x6C,0x20,0x73,0x74,0x61, \ +0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x43,0x01,0x18,0x18,0x80, \ +0x08,0x01,0xD0,0x01,0x38,0xFD,0xD1,0xF7,0x46,0x00,0x00,0x03,0x49,0x0F,0x20, \ +0x00,0x06,0x81,0x80,0x02,0x49,0x81,0x81,0xF7,0x46,0x00,0x00,0xE8,0xE8,0x00, \ +0x00,0x13,0x13,0x00,0x00,0x01,0x20,0x80,0x06,0x40,0x6A,0xF7,0x46,0x01,0x1C, \ +0x06,0x48,0x04,0xD0,0x41,0x68,0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46,0x41, \ +0x68,0x01,0x23,0x5B,0x03,0x99,0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00,0x04, \ +0x00,0xB5,0x13,0x4A,0x13,0x49,0x10,0x78,0x08,0x61,0x13,0x48,0x13,0x49,0x43, \ +0x7A,0x13,0x48,0x00,0x2B,0x0B,0xD0,0x12,0x78,0x00,0x2A,0x08,0xDD,0x0A,0x68, \ +0x01,0x23,0x1A,0x43,0x0A,0x60,0x48,0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x07, \ +0xE0,0x0A,0x68,0xFE,0x23,0x1A,0x40,0x0A,0x60,0x90,0x21,0x41,0x81,0x30,0x21, \ +0x01,0x81,0x41,0x89,0x00,0x89,0x08,0x18,0x07,0x49,0x08,0x80,0x01,0xF0,0x8A, \ +0xFB,0x00,0xBD,0x00,0x00,0x92,0x01,0x00,0x02,0x40,0x00,0x00,0x05,0x04,0x01, \ +0x00,0x02,0x80,0x00,0x00,0x05,0x00,0x00,0x00,0x02,0xA8,0x01,0x00,0x02,0x01, \ +0x1C,0x06,0x48,0x04,0xD0,0x41,0x69,0x80,0x23,0x19,0x43,0x41,0x61,0xF7,0x46, \ +0x41,0x69,0x49,0x06,0x49,0x0E,0x41,0x61,0xF7,0x46,0x00,0x00,0x40,0x00,0x00, \ +0x05,0x00,0xB5,0x00,0x20,0xFF,0xF7,0xA8,0xFF,0x05,0x48,0x00,0x68,0xC1,0x43, \ +0x05,0x48,0xC2,0x69,0x11,0x40,0xC1,0x61,0x04,0x49,0x01,0x20,0x08,0x70,0x00, \ +0xBD,0x00,0x00,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xA9,0x02,0x00,0x02, \ +0xC1,0x0A,0x01,0xD3,0x00,0x20,0xF7,0x46,0xFF,0x22,0x01,0x32,0x02,0x40,0x01, \ +0x21,0x00,0x2A,0x01,0xD0,0x08,0x1C,0xF7,0x46,0x80,0x0A,0x01,0xD3,0x08,0x1C, \ +0xF7,0x46,0x02,0x20,0xF7,0x46,0xF0,0xB5,0x0F,0x1C,0x19,0x49,0x04,0x1C,0x19, \ +0x4E,0x1A,0x48,0x31,0x60,0x05,0x6C,0x00,0x2D,0x16,0xD0,0x06,0x22,0x20,0x1C, \ +0x31,0x68,0x04,0xF0,0x17,0xF9,0x00,0x28,0x08,0xD1,0x30,0x68,0xC1,0x88,0xB9, \ +0x42,0x01,0xD1,0x01,0x20,0xF0,0xBD,0xC7,0x80,0x00,0x20,0xF0,0xBD,0x30,0x68, \ +0x08,0x30,0x30,0x60,0x28,0x1C,0x01,0x3D,0x00,0x28,0xE8,0xD1,0x0C,0x48,0x01, \ +0x6C,0x01,0x31,0x01,0x64,0x01,0x6C,0x07,0x29,0x03,0xD9,0x06,0x49,0x31,0x60, \ +0x08,0x21,0x01,0x64,0x06,0x22,0x21,0x1C,0x30,0x68,0x04,0xF0,0x13,0xF9,0x30, \ +0x68,0xC7,0x80,0x00,0x20,0xF0,0xBD,0x00,0x00,0x68,0x07,0x00,0x02,0x40,0x01, \ +0x00,0x02,0x18,0x09,0x00,0x02,0x05,0x49,0x0A,0x68,0x12,0x01,0x02,0x70,0x0A, \ +0x68,0x12,0x01,0x12,0x0A,0x42,0x70,0x08,0x68,0x01,0x30,0x08,0x60,0xF7,0x46, \ +0x44,0x01,0x00,0x02,0x00,0x2A,0x0C,0xD1,0x08,0x4A,0x92,0x7A,0x8A,0x42,0x00, \ +0xD8,0x11,0x1C,0x07,0x4A,0x49,0x00,0x51,0x5A,0x06,0x4A,0xD2,0x88,0x89,0x18, \ +0xC9,0x18,0x00,0xE0,0x00,0x21,0x01,0x70,0x09,0x0A,0x41,0x70,0xF7,0x46,0x04, \ +0x01,0x00,0x02,0xAC,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xF0,0xB5,0x49,0x48, \ +0x00,0x68,0xFF,0xF7,0x85,0xFF,0x04,0x06,0x47,0x48,0x24,0x0E,0x00,0x68,0x47, \ +0x68,0x38,0x78,0x06,0x07,0x36,0x0F,0x02,0x2C,0x73,0xD0,0x44,0x4D,0x28,0x79, \ +0x02,0x28,0x07,0xD1,0xF8,0x1D,0x09,0x30,0x06,0x22,0x41,0x49,0x04,0xF0,0xA9, \ +0xF8,0x00,0x28,0x67,0xD0,0x30,0x06,0x00,0x0E,0x08,0x28,0x64,0xD1,0x3E,0x48, \ +0x80,0x79,0x05,0x28,0x61,0xD1,0x28,0x79,0x3C,0x4E,0x02,0x28,0x0C,0xD1,0xF8, \ +0x1D,0x03,0x30,0x06,0x22,0x31,0x1C,0x04,0xF0,0x95,0xF8,0x00,0x28,0x55,0xD1, \ +0x78,0x78,0x81,0x08,0x5A,0xD3,0x40,0x08,0x58,0xD2,0x28,0x79,0x01,0x28,0x0C, \ +0xD1,0xF8,0x1D,0x09,0x30,0x06,0x22,0x31,0x1C,0x04,0xF0,0x85,0xF8,0x00,0x28, \ +0x45,0xD1,0x78,0x78,0x81,0x08,0x4A,0xD2,0x40,0x08,0x48,0xD2,0x38,0x78,0x08, \ +0x28,0x30,0xD1,0x2C,0x48,0x01,0x78,0x00,0x29,0x11,0xD0,0xC0,0x78,0x00,0x28, \ +0x11,0xD0,0x78,0x78,0xC0,0x09,0x0E,0xD2,0xB9,0x7F,0xF8,0x1D,0x09,0x30,0x88, \ +0x29,0x02,0xD1,0xC0,0x7B,0x8E,0x28,0x06,0xD0,0xB8,0x7D,0x00,0x07,0x26,0xD0, \ +0x02,0xE0,0x78,0x78,0xC0,0x09,0x2C,0xD2,0x28,0x79,0x02,0x28,0x14,0xD1,0x78, \ +0x78,0x04,0x21,0x01,0x40,0x20,0x23,0x18,0x40,0x22,0x1C,0x02,0xF0,0xE9,0xFF, \ +0x00,0x2C,0x0A,0xD1,0x1A,0x48,0x41,0x68,0x04,0x29,0x06,0xD1,0x01,0x26,0x46, \ +0x60,0x01,0x20,0x01,0xF0,0x36,0xFB,0x16,0x48,0x06,0x70,0x68,0x79,0x03,0x28, \ +0x0D,0xD1,0x19,0x21,0xC9,0x02,0x02,0x20,0x01,0xF0,0x12,0xFB,0x12,0x48,0x81, \ +0x78,0x02,0xE0,0x08,0xE0,0x08,0xE0,0x06,0xE0,0x08,0x23,0x99,0x43,0x81,0x70, \ +0x39,0x78,0x20,0x1C,0x00,0xF0,0xB7,0xFC,0xF0,0xBD,0x00,0x28,0xFC,0xD1,0x20, \ +0x1C,0x00,0xF0,0x83,0xFB,0xF0,0xBD,0xE0,0x01,0x00,0x02,0x48,0x01,0x00,0x02, \ +0xB0,0x00,0x00,0x02,0x5C,0x00,0x00,0x02,0x68,0x09,0x00,0x02,0x8E,0x00,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x18,0x09,0x00,0x02,0xD4,0x01,0x00,0x02,0x88,0x09, \ +0x00,0x02,0x08,0xB5,0x00,0x21,0x00,0x91,0x00,0x28,0x0C,0xD1,0x09,0x48,0x00, \ +0x68,0x40,0x68,0x81,0x7D,0xC2,0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C, \ +0x0A,0x30,0xFF,0xF7,0xE3,0xFE,0x00,0x90,0x00,0x98,0x01,0x28,0x01,0xD0,0x00, \ +0xF0,0x03,0xF8,0x08,0xBD,0x48,0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78, \ +0x80,0x09,0x04,0xD3,0x04,0x4F,0x38,0x68,0x01,0xF0,0xE5,0xFF,0x38,0x60,0x80, \ +0xBD,0x00,0x00,0x93,0x01,0x00,0x02,0x48,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0, \ +0x00,0x25,0x7D,0x26,0x36,0x01,0x01,0x21,0x89,0x06,0x88,0x68,0x00,0x0B,0xFC, \ +0x27,0x07,0x40,0xF7,0x48,0x41,0x6B,0x82,0x6B,0x00,0x6B,0x09,0x02,0x11,0x43, \ +0x00,0x90,0xB8,0x08,0x0C,0x1C,0xF3,0x49,0x08,0x5C,0x00,0x28,0x07,0xD0,0x00, \ +0x20,0xFF,0xF7,0x33,0xFE,0x01,0x21,0x89,0x06,0xC8,0x68,0x02,0xB0,0xF0,0xBD, \ +0xEE,0x49,0x04,0x20,0x38,0x40,0x01,0x91,0x69,0xD0,0xB8,0x09,0x01,0xD3,0x14, \ +0x21,0x00,0xE0,0x0E,0x21,0xEA,0x4C,0x02,0x22,0x20,0x1C,0x01,0xF0,0xF3,0xFD, \ +0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8,0x68,0xE8,0xE7,0x01,0x21,0x89, \ +0x06,0xC8,0x68,0x28,0x43,0x01,0xE0,0xCA,0x68,0x10,0x43,0x42,0x09,0x03,0xD2, \ +0x32,0x1C,0x01,0x3E,0x00,0x2A,0xF7,0xD1,0x10,0x23,0x98,0x43,0x05,0x1C,0x00, \ +0x2E,0x01,0xDC,0x28,0x1C,0xD4,0xE7,0x00,0x98,0xDB,0x4E,0x40,0x06,0x40,0x0E, \ +0x70,0x75,0xA9,0x09,0x06,0xD3,0xE9,0x08,0x04,0xD2,0x01,0x20,0xFF,0xF7,0xF9, \ +0xFD,0x28,0x1C,0xC6,0xE7,0x00,0x23,0xE9,0x0A,0x33,0x60,0x1C,0xD3,0x70,0x75, \ +0xB4,0x2F,0x06,0xD0,0xC4,0x2F,0x14,0xD0,0xD4,0x2F,0x20,0xD1,0x00,0xF0,0xC2, \ +0xF9,0x1D,0xE0,0xCF,0x48,0x40,0x68,0x80,0x0B,0x19,0xD3,0x02,0x20,0xE1,0x1D, \ +0x03,0x31,0xB0,0x72,0x01,0x98,0x06,0x22,0xCB,0x4F,0x03,0xF0,0x9C,0xFF,0x60, \ +0x88,0x78,0x80,0x0D,0xE0,0x00,0xF0,0xBD,0xFA,0x0A,0xE0,0xA4,0x2F,0x15,0xD0, \ +0xB4,0x2F,0x13,0xD0,0xC4,0x2F,0x01,0xD0,0xD4,0x2F,0x02,0xD1,0x60,0x88,0xC3, \ +0x49,0x08,0x80,0x70,0x68,0x04,0x28,0x06,0xD1,0x00,0xF0,0x94,0xF9,0x00,0x22, \ +0x10,0x21,0xBF,0x48,0x03,0xF0,0x41,0xFA,0x28,0x1C,0x8E,0xE7,0x11,0xE0,0x72, \ +0x7D,0xBD,0x48,0xBD,0x4B,0x52,0x00,0x9A,0x5A,0xC1,0x88,0xB8,0x4B,0x8A,0x18, \ +0x1A,0x80,0xB4,0x2F,0xE7,0xD1,0x80,0x88,0x40,0x00,0x08,0x18,0x19,0x88,0x40, \ +0x18,0x18,0x80,0xE0,0xE7,0xB6,0x4E,0x30,0x68,0x00,0x7A,0x00,0x28,0x06,0xD0, \ +0xAD,0x49,0xC4,0x20,0x08,0x60,0x01,0x21,0x89,0x06,0xC8,0x68,0x6F,0xE7,0x01, \ +0x20,0xFF,0xF7,0x9D,0xFD,0x80,0x2F,0x01,0xD0,0x50,0x2F,0x13,0xD1,0xA2,0x48, \ +0x32,0x68,0x00,0x6A,0x00,0x23,0x50,0x74,0xAB,0x48,0x01,0x22,0x82,0x62,0xAB, \ +0x4A,0x31,0x1C,0x52,0x68,0x83,0x62,0x08,0x68,0x41,0x7C,0x91,0x42,0x02,0xD0, \ +0x18,0x31,0x41,0x74,0x00,0xE0,0x43,0x74,0xA1,0x48,0x80,0x89,0x04,0x30,0xA0, \ +0x42,0x01,0xD3,0x18,0x2C,0x06,0xD8,0x98,0x49,0xC3,0x20,0x08,0x60,0x01,0x21, \ +0x89,0x06,0xC8,0x68,0x46,0xE7,0x30,0x68,0x02,0x22,0x18,0x21,0x40,0x68,0x01, \ +0xF0,0x45,0xFD,0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8,0x68,0x3A,0xE7, \ +0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x03,0xE0,0x01, \ +0x22,0x92,0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x03,0xD2,0x02,0x1C,0x01,0x38, \ +0x00,0x2A,0xF5,0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28,0x01,0xDC,0x28, \ +0x1C,0x22,0xE7,0x8A,0x48,0x85,0x49,0x00,0x68,0x00,0x22,0x46,0x68,0x0A,0x80, \ +0x2A,0x0A,0x52,0x07,0x08,0xD1,0x70,0x88,0x01,0x22,0x08,0x80,0x7D,0x49,0x0A, \ +0x73,0x00,0x22,0xCA,0x72,0x28,0x1C,0x10,0xE7,0x84,0x49,0x0D,0x60,0xE9,0x0A, \ +0x13,0xD3,0x78,0x49,0x01,0x22,0x8A,0x72,0x71,0x78,0xC9,0x08,0x03,0xD3,0x71, \ +0x88,0x76,0x4A,0x51,0x80,0x02,0xE0,0x75,0x49,0x00,0x22,0x4A,0x80,0x40,0x68, \ +0xC1,0x1D,0x03,0x31,0x06,0x22,0x01,0x98,0x03,0xF0,0xE9,0xFE,0x00,0x98,0x6D, \ +0x49,0x40,0x06,0x40,0x0E,0x48,0x75,0x72,0x48,0x02,0x68,0x94,0x81,0x4A,0x7D, \ +0x03,0x68,0x00,0x24,0x9A,0x73,0x0C,0x60,0x31,0x78,0x48,0x29,0x03,0xD1,0x71, \ +0x78,0x40,0x23,0x99,0x43,0x71,0x70,0x71,0x78,0xC9,0x09,0x2E,0xD2,0x00,0x68, \ +0x02,0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x18,0x39,0x01,0xF0,0xDE,0xFC,0x00, \ +0x28,0x05,0xD1,0x01,0x22,0x5D,0x49,0x28,0x1C,0x0A,0x73,0xCC,0x72,0xD1,0xE6, \ +0x5A,0x49,0x03,0x20,0xC8,0x72,0x02,0x20,0x08,0x73,0x80,0x2F,0x15,0xD1,0x62, \ +0x48,0xC2,0x1D,0x29,0x32,0x12,0x79,0x01,0x2A,0x0F,0xD1,0x0F,0x1C,0x50,0x31, \ +0x89,0x79,0x05,0x29,0x0A,0xD1,0xC1,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x09, \ +0x30,0x03,0xF0,0x8B,0xFE,0x00,0x28,0x01,0xD1,0x01,0x22,0xBA,0x76,0x28,0x1C, \ +0xB2,0xE6,0x57,0x4E,0x31,0x78,0x00,0x29,0x05,0xD1,0x01,0x22,0x48,0x49,0x28, \ +0x1C,0x0A,0x73,0xCC,0x72,0xA8,0xE6,0x07,0x1C,0x00,0x68,0x02,0x22,0x40,0x68, \ +0x04,0x21,0x18,0x30,0x01,0xF0,0xA5,0xFC,0x00,0x28,0x01,0xD1,0x28,0x1C,0x9C, \ +0xE6,0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x01,0xE0, \ +0xD3,0x68,0x19,0x43,0x4B,0x09,0x03,0xD2,0x03,0x1C,0x01,0x38,0x00,0x2B,0xF7, \ +0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28,0x01,0xDC,0x28,0x1C,0x86,0xE6, \ +0x38,0x68,0x40,0x68,0xC1,0x1D,0x11,0x31,0x40,0x7E,0x0A,0x78,0x00,0x02,0x10, \ +0x43,0x8A,0x78,0xC9,0x78,0x12,0x04,0x10,0x43,0x89,0x09,0x09,0x06,0x09,0x0E, \ +0x0D,0x23,0x59,0x43,0x89,0x19,0x0B,0x7B,0x1B,0x06,0x18,0x43,0x32,0x1C,0x03, \ +0x26,0x76,0x06,0x30,0x60,0x8B,0x7B,0x48,0x7B,0x1B,0x02,0x18,0x43,0xCB,0x7B, \ +0x1B,0x04,0x18,0x43,0x0B,0x7C,0x1B,0x06,0x18,0x43,0x70,0x60,0xD0,0x1D,0x39, \ +0x30,0x00,0x78,0x01,0x28,0x02,0xD1,0x01,0x22,0xB2,0x60,0x19,0xE0,0x02,0x28, \ +0x17,0xD1,0x8A,0x7C,0x48,0x7C,0x12,0x02,0x10,0x43,0xCA,0x7C,0x12,0x04,0x10, \ +0x43,0x0A,0x7D,0x12,0x06,0x10,0x43,0x70,0x61,0x8A,0x7D,0x48,0x7D,0x12,0x02, \ +0x10,0x43,0xCA,0x7D,0x09,0x7E,0x12,0x04,0x10,0x43,0x09,0x06,0x08,0x43,0xB0, \ +0x61,0x81,0x20,0xB0,0x60,0x38,0x68,0x0E,0x22,0x81,0x89,0x40,0x68,0x18,0x30, \ +0x20,0x39,0x01,0xF0,0x3D,0xFC,0x00,0x28,0x06,0xD1,0x01,0x22,0x0C,0x49,0x28, \ +0x1C,0x0A,0x73,0xCC,0x72,0xB4,0x60,0x2F,0xE6,0x38,0x68,0x81,0x89,0x08,0x39, \ +0x81,0x81,0x07,0x49,0x03,0x20,0xC8,0x72,0x02,0x20,0x08,0x73,0x28,0x1C,0x24, \ +0xE6,0x00,0x00,0x40,0x00,0x00,0x05,0x4C,0x01,0x00,0x02,0x04,0x09,0x00,0x02, \ +0xA8,0x07,0x00,0x02,0x18,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x09,0x00, \ +0x02,0xDA,0x01,0x00,0x02,0x2C,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xAC,0x01, \ +0x00,0x02,0x48,0x01,0x00,0x02,0x80,0x00,0x00,0x05,0x00,0x01,0x00,0x05,0xDC, \ +0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x00,0xB5,0x03,0x49, \ +0x01,0x20,0x48,0x60,0x01,0xF0,0xB8,0xF8,0x00,0xBD,0x00,0x00,0x18,0x09,0x00, \ +0x02,0xF0,0xB5,0x3D,0x4F,0x01,0x24,0x78,0x68,0x04,0x28,0x0C,0xD1,0x01,0x20, \ +0x01,0xF0,0xAB,0xF8,0x7C,0x60,0x01,0x20,0xFF,0xF7,0x1B,0xFC,0x00,0x22,0x01, \ +0x21,0x37,0x48,0x03,0xF0,0x96,0xF8,0xF0,0xBD,0x78,0x68,0x02,0x28,0xFB,0xD1, \ +0x01,0x20,0x01,0xF0,0x9B,0xF8,0x7C,0x60,0x78,0x6E,0x08,0x23,0x41,0x78,0x32, \ +0x4C,0x99,0x43,0x41,0x70,0x2F,0x49,0x89,0x89,0xB9,0x87,0x22,0x78,0x2F,0x49, \ +0x01,0x2A,0x45,0xD1,0x2F,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0,0x18,0x25,0x00, \ +0xE0,0x1E,0x25,0x2C,0x4E,0x36,0x88,0x75,0x1B,0x2C,0x4E,0x36,0x68,0xAD,0x19, \ +0x2A,0x4E,0x01,0x32,0x35,0x60,0x1A,0x70,0x1A,0x78,0x86,0x7D,0x12,0x07,0x12, \ +0x0F,0x1D,0x1C,0xF0,0x23,0x33,0x40,0x1A,0x43,0x82,0x75,0x42,0x78,0xD2,0x09, \ +0x03,0xD3,0x22,0x4A,0x13,0x68,0x08,0x3B,0x13,0x60,0x21,0x4B,0x2A,0x78,0x1B, \ +0x88,0x9A,0x42,0x0F,0xD1,0x20,0x4A,0x1C,0x4E,0x12,0x88,0x04,0x23,0x32,0x80, \ +0x42,0x78,0x9A,0x43,0x42,0x70,0x02,0x20,0x20,0x70,0x08,0x68,0x80,0x7D,0x31, \ +0x88,0x00,0xF0,0x56,0xFF,0xF8,0x66,0x15,0x4E,0x30,0x88,0xB8,0x66,0x20,0x78, \ +0x02,0x28,0x04,0xD0,0x01,0x21,0x01,0x20,0x00,0xF0,0x2D,0xF8,0xF0,0xBD,0x01, \ +0x21,0x00,0x20,0x00,0xF0,0x28,0xF8,0xF0,0xBD,0x09,0x68,0x00,0x20,0x48,0x73, \ +0x0F,0x49,0x09,0x68,0x48,0x70,0x01,0x20,0xFF,0xF7,0xB2,0xFB,0x00,0x22,0x10, \ +0x21,0x0C,0x48,0x03,0xF0,0x2D,0xF8,0xF0,0xBD,0x18,0x09,0x00,0x02,0x2C,0x07, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0x94,0x01,0x00,0x02,0xBC,0x01,0x00,0x02,0x95, \ +0x01,0x00,0x02,0x96,0x01,0x00,0x02,0x9C,0x01,0x00,0x02,0x9A,0x01,0x00,0x02, \ +0x98,0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0xEC,0x06,0x00,0x02,0xF0,0xB5,0x30, \ +0x4D,0x04,0x1C,0x28,0x68,0x0F,0x1C,0x80,0x7D,0x2E,0x49,0x08,0x70,0x00,0xF0, \ +0x74,0xFF,0x2C,0x49,0x08,0x78,0x03,0x28,0x04,0xD1,0x2B,0x48,0x40,0x6B,0xFF, \ +0xF7,0xC6,0xFB,0x02,0xE0,0x00,0x20,0xFF,0xF7,0xC2,0xFB,0x28,0x68,0x85,0x7D, \ +0x27,0x48,0x80,0x7A,0x85,0x42,0x00,0xDB,0x05,0x1C,0x23,0x48,0x00,0x78,0x01, \ +0xF0,0x5F,0xFB,0x24,0x4A,0x24,0x49,0x10,0x60,0xCB,0x88,0x69,0x00,0x23,0x4D, \ +0x69,0x5A,0x59,0x18,0x00,0x2C,0x12,0xD0,0x00,0x2F,0x11,0xD0,0x21,0x4C,0x21, \ +0x4D,0x24,0x88,0x2D,0x78,0x5B,0x18,0x1B,0x18,0x01,0x3C,0xAC,0x42,0x03,0xD1, \ +0x1E,0x4C,0x24,0x68,0x1E,0x19,0x04,0xE0,0x15,0x4C,0xE4,0x6E,0xE6,0x18,0x00, \ +0xE0,0x00,0x26,0x13,0x4C,0x14,0x4A,0xE3,0x6E,0x18,0x18,0x10,0x60,0x18,0x4A, \ +0x12,0x88,0x10,0x18,0x45,0x18,0x00,0x2F,0x07,0xD0,0x60,0x6E,0x0C,0x49,0x02, \ +0x30,0x33,0x1C,0x00,0x22,0x09,0x78,0xFF,0xF7,0x08,0xFC,0x01,0x20,0x29,0x1C, \ +0x00,0xF0,0xB0,0xFF,0x02,0x20,0x60,0x60,0x01,0x20,0x0F,0x49,0xE0,0x75,0x09, \ +0x88,0xE0,0x6E,0x06,0x4A,0x40,0x18,0x10,0x60,0xF0,0xBD,0x00,0x00,0xBC,0x01, \ +0x00,0x02,0x92,0x01,0x00,0x02,0x18,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x8C, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xAC,0x01,0x00,0x02,0x9A,0x01,0x00,0x02, \ +0x95,0x01,0x00,0x02,0xA0,0x01,0x00,0x02,0xAA,0x01,0x00,0x02,0xA8,0x01,0x00, \ +0x02,0x00,0xB5,0x06,0x48,0x40,0x68,0x03,0x28,0x06,0xD1,0x01,0x20,0x00,0xF0, \ +0x9E,0xFF,0x00,0x21,0x01,0x20,0xFF,0xF7,0x7A,0xFF,0x00,0xBD,0x00,0x00,0x18, \ +0x09,0x00,0x02,0x00,0xB5,0x11,0x49,0x09,0x68,0x49,0x68,0x0A,0x78,0x13,0x07, \ +0x10,0xD1,0x12,0x11,0x0D,0x2A,0x0D,0xD2,0x01,0xA3,0x9B,0x5C,0x5B,0x00,0x9F, \ +0x44,0x09,0x0A,0x09,0x0A,0x0D,0x06,0x09,0x09,0x06,0x09,0x0A,0x0A,0x0A,0x00, \ +0x08,0x1C,0x00,0xF0,0x10,0xF8,0x00,0xBD,0xFF,0xF7,0x77,0xFC,0x00,0xBD,0x04, \ +0x48,0x40,0x78,0x00,0x28,0xF7,0xD0,0x07,0xF0,0x60,0xFF,0x00,0xBD,0x00,0x00, \ +0x48,0x01,0x00,0x02,0x68,0x09,0x00,0x02,0xB0,0xB5,0x28,0x4C,0x07,0x1C,0xA0, \ +0x79,0x01,0x28,0x02,0xD1,0x38,0x1C,0x07,0xF0,0x43,0xFF,0x38,0x78,0x50,0x28, \ +0x0E,0xD0,0xA0,0x79,0x03,0x28,0x03,0xD1,0x07,0xF0,0x21,0xFB,0x00,0x28,0x3D, \ +0xD0,0xA0,0x79,0x04,0x28,0x04,0xD1,0x01,0x20,0x07,0xF0,0x17,0xFA,0x00,0x28, \ +0x35,0xD0,0xA0,0x79,0x05,0x28,0x30,0xD1,0x1A,0x4D,0xF8,0x1D,0x09,0x30,0x06, \ +0x22,0xE9,0x1D,0x07,0x31,0x03,0xF0,0x5E,0xFC,0x17,0x4C,0x00,0x28,0x20,0xD1, \ +0x20,0x79,0x20,0x37,0x02,0x28,0x0A,0xD1,0xB8,0x78,0x40,0x08,0x20,0xD3,0x06, \ +0x20,0x00,0xF0,0x3B,0xFF,0x00,0x20,0x00,0xF0,0x22,0xF8,0x00,0xF0,0xD4,0xF8, \ +0x20,0x79,0x01,0x28,0x13,0xD1,0xB8,0x78,0x80,0x08,0x12,0xD3,0x00,0x20,0x00, \ +0xF0,0x17,0xF8,0x00,0xF0,0xED,0xFE,0x29,0x88,0x89,0x02,0x09,0x1A,0x06,0x20, \ +0x00,0xF0,0x0B,0xFF,0x04,0xE0,0x20,0x79,0x01,0x28,0x01,0xD1,0x07,0xF0,0x93, \ +0xFF,0xFF,0xF7,0x33,0xFC,0xB0,0xBD,0x68,0x09,0x00,0x02,0x80,0x00,0x00,0x02, \ +0xB0,0x00,0x00,0x02,0xF1,0xB5,0x83,0xB0,0x3E,0x49,0x00,0x25,0x0B,0x68,0x02, \ +0x93,0x59,0x68,0x4A,0x7E,0x0F,0x7E,0x12,0x02,0x3A,0x43,0x8F,0x7E,0x3F,0x04, \ +0x3A,0x43,0xCF,0x7E,0x3F,0x06,0x3A,0x43,0x16,0x1C,0x4F,0x7F,0x0A,0x7F,0x3F, \ +0x02,0x3A,0x43,0x8F,0x7F,0xC9,0x7F,0x3F,0x04,0x3A,0x43,0x09,0x06,0x0A,0x43, \ +0x99,0x89,0x18,0x39,0xCC,0x00,0x99,0x7B,0x17,0x1C,0x00,0x29,0x26,0xD0,0x01, \ +0x29,0x26,0xD0,0x02,0x29,0x26,0xD0,0x03,0x29,0x0C,0xD1,0x0B,0x20,0x21,0x1C, \ +0x03,0xF0,0x61,0xFC,0x00,0x91,0x61,0x1A,0x0B,0x20,0x03,0xF0,0x5C,0xFC,0x00, \ +0x99,0x00,0x29,0x00,0xD9,0x01,0x30,0x01,0x24,0xA4,0x06,0xA2,0x6A,0x61,0x6A, \ +0x02,0x9B,0x30,0x18,0x5B,0x69,0xCB,0x1A,0xC0,0x18,0xB0,0x42,0x00,0xD2,0x01, \ +0x37,0x06,0x1C,0x1F,0x48,0x03,0x79,0x00,0x20,0x02,0x2B,0x14,0xD1,0x01,0x25, \ +0x1F,0xE0,0x20,0x1C,0xE9,0xE7,0x60,0x08,0xE7,0xE7,0x61,0x00,0x01,0x91,0x0B, \ +0x20,0x03,0xF0,0x3B,0xFC,0x0C,0x1C,0x01,0x99,0x09,0x1B,0x0B,0x20,0x03,0xF0, \ +0x35,0xFC,0x00,0x2C,0xDA,0xD9,0x01,0x30,0xD8,0xE7,0x01,0x2B,0x0A,0xD1,0x12, \ +0x4B,0x97,0x42,0x58,0x70,0x01,0xD9,0x01,0x25,0x04,0xE0,0x97,0x42,0x02,0xD1, \ +0x8E,0x42,0x00,0xD9,0x01,0x25,0x03,0x9A,0x00,0x2A,0x03,0xD0,0x00,0x2D,0x03, \ +0xD1,0x04,0xB0,0xF0,0xBD,0x00,0x2D,0x09,0xD0,0x70,0x1A,0x00,0xF0,0x10,0xF8, \ +0x01,0x23,0xDE,0x42,0x01,0xD1,0x00,0x26,0x01,0x37,0xA7,0x62,0x66,0x62,0x01, \ +0x20,0xEF,0xE7,0x00,0x00,0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0x68,0x09, \ +0x00,0x02,0x90,0xB4,0x10,0x4A,0x00,0x21,0x97,0x69,0x91,0x61,0x01,0x21,0x0E, \ +0x4B,0x8C,0x00,0xE3,0x18,0xDC,0x6A,0x01,0x31,0x24,0x18,0xDC,0x62,0x08,0x29, \ +0xF6,0xD9,0x0B,0x49,0x0B,0x6B,0x1B,0x18,0x0B,0x63,0x0B,0x6B,0x5B,0x00,0x5B, \ +0x08,0x0B,0x63,0xCB,0x6A,0x18,0x18,0xC8,0x62,0xC8,0x6A,0x40,0x00,0x40,0x08, \ +0xC8,0x62,0x97,0x61,0x90,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0x40, \ +0x00,0x00,0x04,0x18,0x09,0x00,0x02,0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7, \ +0x5F,0xFB,0x00,0xBD,0xB0,0xB5,0x00,0x25,0x00,0x24,0x05,0x20,0x00,0x06,0xC1, \ +0x68,0x11,0x29,0x01,0xD1,0x19,0x21,0xC1,0x60,0x1C,0x48,0x02,0x68,0x24,0x20, \ +0x51,0x68,0x92,0x89,0x04,0x3A,0x24,0x2A,0x27,0xD9,0x0F,0x5C,0x06,0x2F,0x1D, \ +0xD2,0x02,0xA3,0xDB,0x5D,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x03,0x03,0x19,0x03, \ +0x08,0x21,0x0B,0x18,0x5B,0x78,0x02,0x33,0x18,0x18,0x0E,0xE0,0x0B,0x18,0x5F, \ +0x79,0x1C,0x79,0x3F,0x02,0x27,0x43,0x3D,0x1C,0xDF,0x79,0x9C,0x79,0x5B,0x78, \ +0x3F,0x02,0x27,0x43,0x3C,0x04,0x24,0x0C,0x02,0x33,0x18,0x18,0x82,0x42,0xDE, \ +0xD8,0x00,0x2C,0x04,0xD9,0xAC,0x42,0x02,0xD2,0xA0,0x02,0x00,0xF0,0x0E,0xF8, \ +0xB0,0xBD,0x05,0x4A,0x52,0x79,0x01,0x2A,0xF3,0xDD,0x08,0x18,0x00,0xF0,0x24, \ +0xF8,0xEF,0xE7,0x00,0x00,0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0x0B,0x49, \ +0x09,0x68,0x49,0x69,0x08,0x18,0x0A,0x49,0x4A,0x7A,0x05,0x2A,0x02,0xD1,0x8A, \ +0x6B,0x82,0x42,0x0A,0xD2,0x05,0x22,0x4A,0x72,0x02,0x1C,0x06,0x48,0x80,0x23, \ +0xC2,0x60,0x82,0x69,0x1A,0x43,0x82,0x61,0xC0,0x68,0x88,0x63,0xF7,0x46,0x00, \ +0x00,0x48,0x01,0x00,0x02,0x18,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5, \ +0x02,0x79,0x35,0x4C,0x87,0x78,0xFE,0x21,0x11,0x40,0xE5,0x88,0x03,0x23,0x9B, \ +0x03,0x9D,0x43,0x2B,0x1C,0x00,0x29,0x03,0xD0,0xCD,0x00,0x01,0x3D,0x9D,0x42, \ +0x05,0xD2,0x45,0x78,0x6D,0x18,0xED,0x00,0x18,0x3D,0x9D,0x42,0x01,0xD8,0x00, \ +0x25,0x08,0xE0,0xC9,0x00,0x59,0x1A,0x89,0x09,0x5E,0x07,0x76,0x0F,0x41,0x18, \ +0x49,0x79,0xF1,0x40,0x0D,0x1C,0x00,0x23,0x26,0x49,0x52,0x08,0x8B,0x70,0x05, \ +0xD3,0x00,0x2F,0x03,0xD1,0x8A,0x78,0x02,0x23,0x1A,0x43,0x8A,0x70,0xEA,0x07, \ +0xD2,0x0F,0x03,0xD0,0x8D,0x78,0x04,0x23,0x2B,0x43,0x8B,0x70,0xE3,0x1D,0x29, \ +0x33,0x5B,0x79,0x01,0x25,0x02,0x2B,0x1E,0xD1,0x8E,0x78,0x08,0x23,0x33,0x43, \ +0x8B,0x70,0x00,0x2A,0x03,0xD0,0x0A,0x78,0x00,0x2A,0x00,0xD1,0x4D,0x70,0x00, \ +0x2F,0x00,0xD1,0xC7,0x78,0x15,0x48,0x40,0x8B,0xB8,0x42,0x00,0xD8,0x07,0x1C, \ +0x00,0xF0,0x70,0xFD,0x21,0x88,0x4F,0x43,0xB9,0x02,0x08,0x1A,0x7D,0x23,0xDB, \ +0x00,0xC1,0x1A,0x06,0x20,0x00,0xF0,0x8A,0xFD,0xF0,0xBD,0x88,0x78,0xC0,0x08, \ +0x00,0xD3,0x8D,0x71,0x88,0x78,0x40,0x08,0x80,0x07,0x07,0xD1,0x09,0x48,0x80, \ +0x69,0x80,0x08,0x03,0xD2,0x88,0x78,0x08,0x23,0x18,0x43,0x88,0x70,0x88,0x78, \ +0x04,0x23,0x98,0x43,0x88,0x70,0xF0,0xBD,0x80,0x00,0x00,0x02,0x88,0x09,0x00, \ +0x02,0xC0,0x00,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5,0x82,0xB0,0x36,0x48, \ +0x34,0x4E,0xC5,0x1D,0x09,0x35,0x33,0x4C,0xC7,0x1D,0x69,0x37,0x00,0x22,0xD2, \ +0x43,0x00,0x92,0x01,0x22,0x01,0xAB,0x31,0x48,0x32,0x49,0x02,0xF0,0x62,0xFE, \ +0x01,0x98,0x41,0x0A,0x0C,0xD3,0x80,0x20,0x38,0x71,0x00,0x20,0x78,0x71,0x38, \ +0x79,0x00,0x0A,0x4C,0xD3,0x07,0xF0,0xDE,0xFE,0x38,0x79,0x00,0x0A,0xFA,0xD2, \ +0x46,0xE0,0x41,0x08,0x0F,0xD3,0x30,0x1C,0xFF,0xF7,0x8B,0xF9,0x27,0x48,0xC1, \ +0x6B,0x09,0x78,0x40,0x29,0x3C,0xD0,0x07,0xF0,0xC0,0xFB,0x23,0x48,0xC0,0x6B, \ +0x00,0x78,0x40,0x28,0xF8,0xD1,0x34,0xE0,0x41,0x0D,0x03,0xD3,0x40,0x20,0x07, \ +0xF0,0x83,0xFA,0x2E,0xE0,0x41,0x09,0x03,0xD3,0x50,0x20,0x07,0xF0,0x7D,0xFA, \ +0x28,0xE0,0x40,0x0F,0x03,0xD3,0x80,0x20,0x07,0xF0,0x77,0xFA,0x22,0xE0,0x00, \ +0x21,0x79,0x22,0x52,0x05,0x17,0x48,0x91,0x82,0x10,0x82,0x91,0x80,0x64,0x20, \ +0x10,0x80,0x02,0x20,0x90,0x82,0x12,0x48,0x21,0x70,0x01,0x6B,0x09,0x7B,0x09, \ +0x0A,0x06,0xD3,0x00,0xF0,0x21,0xF8,0x0E,0x48,0x00,0x6B,0x00,0x7B,0x00,0x0A, \ +0xF8,0xD2,0xC0,0x20,0x02,0xF0,0x75,0xFE,0x00,0x21,0x79,0x22,0x52,0x05,0x91, \ +0x82,0x11,0x83,0x21,0x70,0x02,0xF0,0x6D,0xFE,0x00,0x20,0xA8,0x73,0x9C,0xE7, \ +0xEE,0x07,0x00,0x02,0xCC,0x01,0x00,0x02,0x18,0x09,0x00,0x02,0x0C,0x07,0x00, \ +0x02,0x11,0x11,0x10,0x10,0x8C,0x01,0x00,0x02,0xE8,0x80,0x00,0x00,0xF0,0xB5, \ +0x83,0xB0,0x87,0x4D,0x00,0x24,0x28,0x6B,0x47,0x68,0x39,0x79,0x49,0x08,0x01, \ +0xD3,0x01,0x26,0x00,0xE0,0x00,0x26,0x82,0x4D,0x29,0x7A,0x00,0x29,0x73,0xD1, \ +0x81,0x4A,0xD1,0x78,0x00,0x29,0x0C,0xD0,0x39,0x78,0x08,0x29,0x09,0xD1,0x7F, \ +0x4A,0x91,0x78,0x00,0x29,0x05,0xD0,0x81,0x7D,0xD3,0x78,0x99,0x42,0x01,0xDD, \ +0xD1,0x78,0x81,0x75,0x78,0x4B,0x7A,0x49,0x1D,0x6B,0x00,0x22,0x68,0x68,0x00, \ +0x2E,0x48,0x66,0x5A,0x72,0x65,0xD1,0x77,0x48,0xA9,0x8A,0x00,0x89,0x04,0x38, \ +0x81,0x42,0x60,0xDD,0x70,0x4A,0x01,0x21,0x11,0x72,0xF9,0x1D,0x17,0x31,0x11, \ +0x61,0x6D,0x49,0x04,0x04,0x24,0x0C,0x4C,0x81,0xA8,0x8A,0x01,0x1B,0xE0,0x1F, \ +0x11,0x38,0x02,0x90,0x03,0xF0,0x5B,0xFA,0x68,0x49,0x01,0x30,0xC8,0x81,0xA8, \ +0x8A,0x01,0x1B,0x02,0x98,0x03,0xF0,0x53,0xFA,0xC8,0x1D,0x63,0x49,0x11,0x30, \ +0x88,0x81,0x88,0x89,0x18,0x28,0x04,0xD1,0x60,0x4B,0x9C,0x81,0xD8,0x89,0x01, \ +0x38,0xD8,0x81,0x78,0x78,0xC0,0x09,0x06,0xD3,0x5C,0x4B,0xE0,0x1D,0x01,0x30, \ +0x58,0x81,0x98,0x89,0x08,0x30,0x98,0x81,0x59,0x4C,0xA8,0x7D,0xA1,0x89,0x00, \ +0xF0,0xA6,0xFB,0x60,0x61,0x20,0x6B,0x80,0x7D,0x61,0x89,0x00,0xF0,0xA0,0xFB, \ +0x56,0x49,0x54,0x4A,0xC8,0x66,0x60,0x89,0x88,0x66,0x20,0x6B,0x80,0x7D,0x92, \ +0x7A,0x90,0x42,0x00,0xDA,0x02,0x1C,0x53,0x4B,0x52,0x00,0x9A,0x5A,0x52,0x4B, \ +0xDB,0x88,0x5B,0x00,0xD2,0x18,0xE3,0x89,0x65,0x7A,0x01,0x3B,0xAB,0x42,0x04, \ +0xD1,0x61,0x69,0x54,0x18,0x00,0xE0,0x25,0xE0,0x01,0xE0,0xC9,0x6E,0x8C,0x18, \ +0x00,0xF0,0xE2,0xFF,0x04,0x19,0x78,0x78,0x04,0x23,0x18,0x43,0x78,0x70,0x12, \ +0xE0,0xFF,0xE7,0x40,0x48,0x42,0x49,0x02,0x72,0xA8,0x8A,0x88,0x66,0x78,0x78, \ +0xC0,0x09,0x02,0xD3,0x88,0x6E,0x08,0x30,0x88,0x66,0x3D,0x49,0x89,0x6E,0xA8, \ +0x7D,0x00,0xF0,0x69,0xFB,0x3B,0x49,0xC8,0x66,0x37,0x48,0x32,0x1C,0x00,0x6B, \ +0x81,0x7D,0xB8,0x1C,0x23,0x1C,0xFF,0xF7,0x9B,0xF8,0x30,0x1C,0x00,0xF0,0xD8, \ +0xFA,0x00,0x28,0x0A,0xD0,0x02,0x20,0x33,0x49,0xC2,0x1E,0x48,0x74,0x00,0x92, \ +0x01,0x22,0x11,0x21,0x34,0x48,0x01,0xAB,0x02,0xF0,0x3F,0xFD,0x2E,0x48,0x00, \ +0x24,0x2A,0x4D,0x44,0x74,0x28,0x6B,0x41,0x7B,0x00,0x29,0x0C,0xD1,0x38,0x1C, \ +0x00,0xF0,0x70,0xF8,0x27,0x4A,0x54,0x70,0x10,0x78,0x01,0x30,0x10,0x70,0x00, \ +0xF0,0xBB,0xFB,0x00,0xF0,0x55,0xF8,0x3E,0xE0,0xE9,0x1D,0x39,0x31,0x0A,0x78, \ +0x01,0x2A,0x05,0xD1,0x08,0x22,0x42,0x73,0x0C,0x70,0x00,0xF0,0x4A,0xF8,0x33, \ +0xE0,0x40,0x7B,0x04,0x28,0x1F,0xD0,0x00,0xF0,0x98,0xFB,0x28,0x6B,0x81,0x7B, \ +0x01,0x31,0x81,0x73,0x78,0x78,0x08,0x23,0x18,0x43,0x78,0x70,0x38,0x78,0x08, \ +0x28,0x12,0xD1,0x14,0x48,0xC0,0x78,0x00,0x28,0x0E,0xD0,0x13,0x4A,0x18,0x4B, \ +0x50,0x78,0x01,0x30,0x00,0x06,0x00,0x0E,0x50,0x70,0x29,0x6B,0x89,0x7D,0x59, \ +0x5C,0x88,0x42,0x00,0xDD,0x14,0x70,0x00,0xF0,0x8A,0xFB,0x28,0x6B,0x0D,0x4A, \ +0x81,0x7B,0x12,0x7C,0x91,0x42,0x04,0xDA,0x44,0x73,0x29,0x6B,0x82,0x20,0x08, \ +0x73,0x05,0xE0,0x01,0x21,0x38,0x1C,0x00,0xF0,0x29,0xF8,0x00,0xF0,0x15,0xF8, \ +0x03,0xB0,0xF0,0xBD,0x00,0x00,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xC0, \ +0x01,0x00,0x02,0x18,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0xAC,0x01,0x00,0x02, \ +0x00,0x00,0x00,0x02,0xEC,0x06,0x00,0x02,0xCE,0x01,0x00,0x02,0x05,0x48,0x00, \ +0x21,0x01,0x72,0x41,0x72,0x04,0x49,0x05,0x4A,0x89,0x89,0x91,0x87,0x00,0x6B, \ +0x10,0x21,0x01,0x73,0xF7,0x46,0x8C,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x18, \ +0x09,0x00,0x02,0x80,0xB4,0x09,0x4A,0x01,0x27,0x53,0x79,0x08,0x4A,0x03,0x2B, \ +0x02,0xD1,0xD7,0x70,0x80,0xBC,0xF7,0x46,0x40,0x78,0x40,0x09,0xFA,0xD3,0x00, \ +0x29,0x02,0xD1,0x00,0x20,0xD0,0x70,0xF5,0xE7,0xD7,0x70,0xF3,0xE7,0xB0,0x00, \ +0x00,0x02,0x88,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0x02,0xF0,0x0A,0xFD,0x0A, \ +0x4C,0x03,0x21,0xA1,0x73,0x02,0xF0,0x05,0xFD,0x60,0x7F,0x01,0x28,0x0C,0xD0, \ +0xC0,0x20,0x02,0xF0,0xFF,0xFC,0x07,0x1C,0xA0,0x7B,0x03,0x28,0x02,0xD1,0x00, \ +0x20,0x00,0xF0,0x6E,0xF9,0x38,0x1C,0x02,0xF0,0xF5,0xFC,0x90,0xBD,0x18,0x09, \ +0x00,0x02,0x90,0xB5,0xFE,0xF7,0x0F,0xFF,0x1E,0x4F,0xF9,0x6A,0x40,0x1A,0x41, \ +0x00,0x78,0x7F,0x49,0x08,0x01,0x28,0x01,0xD1,0xB8,0x6A,0x00,0xE0,0x78,0x6A, \ +0x3B,0x68,0x19,0x4A,0x00,0x2B,0x1C,0xD1,0x84,0x00,0x13,0x8B,0x24,0x18,0xA4, \ +0x00,0xE2,0x18,0x51,0x1A,0x8A,0x42,0x00,0xD2,0x11,0x1C,0x00,0x28,0x0F,0xD1, \ +0x01,0x20,0x78,0x72,0xB8,0x7B,0x03,0x28,0x05,0xD1,0x0D,0x29,0x04,0xD9,0xC8, \ +0x1F,0x01,0x38,0x00,0xF0,0x40,0xF9,0x90,0xBD,0x00,0x20,0x00,0xF0,0x3C,0xF9, \ +0x90,0xBD,0x3B,0x62,0x09,0xE0,0x83,0x00,0x52,0x8B,0x18,0x18,0x80,0x00,0x80, \ +0x18,0x41,0x1A,0x88,0x42,0x00,0xD2,0x01,0x1C,0x3A,0x62,0x08,0x20,0x00,0xF0, \ +0x4C,0xFB,0x04,0x20,0x78,0x72,0x90,0xBD,0x00,0x00,0x18,0x09,0x00,0x02,0x8C, \ +0x01,0x00,0x02,0x02,0x49,0x02,0x0A,0x8A,0x61,0xC8,0x61,0xF7,0x46,0x00,0x00, \ +0x40,0x00,0x00,0x05,0xB0,0xB5,0x82,0xB0,0x11,0x4D,0x01,0x20,0x68,0x74,0x11, \ +0x4F,0x11,0x48,0x00,0x24,0xBC,0x82,0x38,0x82,0xBC,0x80,0x1E,0x20,0x38,0x80, \ +0x02,0x20,0xB8,0x82,0xC2,0x1E,0x00,0x92,0x01,0x22,0x1A,0x21,0x0C,0x48,0x01, \ +0xAB,0x02,0xF0,0x2D,0xFC,0x6C,0x74,0x3C,0x83,0xBC,0x82,0x01,0x98,0x81,0x08, \ +0x06,0xD3,0x00,0x09,0x02,0xD3,0x82,0x20,0x02,0xB0,0xB0,0xBD,0x20,0x1C,0xFB, \ +0xE7,0x42,0x20,0xF9,0xE7,0x18,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0xE8,0x80, \ +0x00,0x00,0xCC,0x06,0x00,0x02,0xF0,0xB5,0xFF,0x20,0x01,0x24,0xA4,0x06,0xF5, \ +0x30,0x21,0x69,0x89,0x08,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD8, \ +0x60,0x4E,0x00,0x27,0xB0,0x7D,0x00,0x28,0x0F,0xD0,0x30,0x6E,0xFF,0xF7,0xB6, \ +0xFF,0x01,0x23,0x9B,0x03,0x03,0x22,0xF1,0x6D,0xB0,0x6D,0x12,0x03,0x00,0xF0, \ +0xC2,0xF8,0xB7,0x75,0x01,0x20,0xFE,0xF7,0x7C,0xFE,0xF0,0xBD,0xF0,0x7B,0x56, \ +0x4D,0x00,0x28,0x18,0xD0,0x28,0x6B,0x55,0x49,0x80,0x7D,0x89,0x7A,0x88,0x42, \ +0x00,0xDB,0x08,0x1C,0x53,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x99,0xFF,0x01, \ +0x23,0x9B,0x03,0x03,0x22,0x12,0x03,0x10,0x21,0x4F,0x48,0x00,0xF0,0xA5,0xF8, \ +0x01,0x20,0xFE,0xF7,0x60,0xFE,0xF7,0x73,0xF0,0xBD,0xF0,0x7D,0x00,0x28,0xDE, \ +0xD0,0x46,0x4E,0xF0,0x6E,0xFF,0xF7,0x85,0xFF,0x76,0x6E,0x70,0x78,0xC0,0x09, \ +0x43,0xD3,0x42,0x4A,0xD0,0x7A,0x00,0x28,0x00,0xD0,0xD7,0x72,0xE0,0x68,0x00, \ +0xF0,0x47,0xFF,0x01,0x23,0x9B,0x03,0x9A,0x08,0x1C,0x21,0x41,0x48,0x00,0xF0, \ +0x86,0xF8,0xFF,0x20,0x40,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B,0x03,0xD3,0x02, \ +0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x3C,0x48,0x03,0x21,0x00,0x78,0x49,0x06, \ +0x02,0x28,0x02,0xD1,0x81,0x20,0x88,0x60,0x01,0xE0,0x01,0x20,0x88,0x60,0x2A, \ +0x7A,0x00,0x2A,0x0F,0xD0,0x68,0x7A,0x00,0x28,0x03,0xD0,0x00,0x28,0x01,0xDD, \ +0x28,0x69,0x01,0xE0,0x28,0x69,0x06,0x38,0x69,0x89,0x20,0x39,0x02,0x2A,0x08, \ +0xD1,0x28,0x4A,0xD7,0x75,0x05,0xE0,0x26,0x4A,0x50,0x6E,0x91,0x6E,0x18,0x30, \ +0x20,0x39,0xD7,0x75,0x0B,0x23,0xDB,0x02,0x01,0x22,0x52,0x03,0x00,0xF0,0x52, \ +0xF8,0x2E,0xE0,0x28,0x7A,0x00,0x28,0x20,0xD0,0x68,0x7A,0x00,0x28,0x01,0xD0, \ +0x18,0x24,0x00,0xE0,0x1E,0x24,0x01,0x23,0x9B,0x03,0x1A,0x48,0x9A,0x08,0x21, \ +0x1C,0x40,0x6E,0x00,0xF0,0x40,0xF8,0xFF,0x20,0x1D,0x49,0xF5,0x30,0x4A,0x68, \ +0xD2,0x0B,0x03,0xD3,0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x68,0x89,0x01, \ +0x1B,0x01,0x23,0x9B,0x03,0x5A,0x08,0x28,0x69,0x00,0xF0,0x2E,0xF8,0x08,0xE0, \ +0x0E,0x48,0x01,0x23,0x9B,0x03,0x03,0x22,0x81,0x6E,0x40,0x6E,0x12,0x03,0x00, \ +0xF0,0x24,0xF8,0x09,0x48,0xC7,0x75,0x08,0x48,0x47,0x77,0x50,0x30,0x07,0x71, \ +0x30,0x79,0x40,0x08,0x02,0xD2,0x28,0x7A,0x01,0x28,0x03,0xD1,0x01,0x20,0xFE, \ +0xF7,0xD3,0xFD,0xF0,0xBD,0x00,0x20,0xFE,0xF7,0xCF,0xFD,0xF0,0xBD,0x18,0x09, \ +0x00,0x02,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xB4,0x01,0x00,0x02,0xC0, \ +0x07,0x00,0x02,0x4C,0x07,0x00,0x02,0x40,0x00,0x00,0x04,0x58,0x00,0x00,0x02, \ +0xB0,0xB4,0x06,0x4C,0x1F,0x1C,0x65,0x68,0xEB,0x0B,0x04,0xD2,0x0A,0x43,0x21, \ +0x05,0x4A,0x63,0x88,0x63,0x67,0x60,0xB0,0xBC,0xF7,0x46,0x00,0x00,0x40,0x00, \ +0x00,0x04,0xF0,0xB5,0x52,0x49,0x07,0x1C,0x8A,0x7A,0x00,0x20,0x00,0x2A,0x61, \ +0xD1,0x0A,0x7A,0x00,0x2A,0x6B,0xD0,0x4A,0x7A,0x01,0x2A,0x5B,0xD1,0x0A,0x7B, \ +0x01,0x2A,0x58,0xD1,0xCA,0x7A,0x00,0x2A,0x55,0xD1,0xCE,0x1D,0x49,0x36,0xF1, \ +0x78,0xF5,0x1F,0x39,0x3D,0x00,0x29,0x0F,0xD1,0x45,0x49,0xCA,0x1D,0x69,0x32, \ +0x12,0x78,0x00,0x2A,0x09,0xD1,0x6A,0x7B,0x01,0x2A,0x06,0xD0,0x32,0x79,0x00, \ +0x2A,0x03,0xD1,0x0C,0x1C,0x89,0x7C,0x00,0x29,0x09,0xD0,0x3E,0x4A,0x3F,0x4B, \ +0xD1,0x79,0x99,0x71,0x3B,0x49,0x88,0x75,0x01,0x20,0xC8,0x75,0xCC,0x6E,0x21, \ +0xE0,0x21,0x7F,0x00,0x29,0x12,0xD0,0x39,0x4B,0x37,0x4A,0x19,0x6B,0x89,0x7D, \ +0x92,0x7A,0x91,0x42,0x01,0xDA,0x99,0x71,0x00,0xE0,0x9A,0x71,0xA0,0x75,0x01, \ +0x21,0xE1,0x73,0xE0,0x75,0x98,0x79,0x32,0x49,0x40,0x00,0x0C,0x5A,0x0B,0xE0, \ +0xE2,0x7E,0x21,0x1C,0x00,0x2A,0x27,0xD0,0xCC,0x6E,0x88,0x75,0x01,0x20,0x2C, \ +0x4B,0xC8,0x75,0x18,0x6B,0x80,0x7D,0x98,0x71,0x00,0xF0,0x40,0xF9,0x26,0x4B, \ +0xD8,0x7B,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0x93,0xFD,0x0C,0xE0,0x24, \ +0x4B,0x98,0x79,0x03,0x28,0x05,0xD1,0x20,0x4B,0x58,0x6B,0xFE,0xF7,0x8A,0xFD, \ +0x03,0xE0,0x3A,0xE0,0x00,0x20,0xFE,0xF7,0x85,0xFD,0x01,0x21,0x89,0x06,0x00, \ +0x2F,0x05,0xD0,0x05,0x2F,0x03,0xD9,0x48,0x6A,0x38,0x18,0x02,0xE0,0x2D,0xE0, \ +0x48,0x6A,0x0A,0x30,0x16,0x4B,0x02,0x22,0x9A,0x73,0x18,0x4B,0x92,0x03,0x5A, \ +0x60,0x08,0x62,0x6A,0x7B,0x01,0x2A,0x02,0xD0,0x32,0x79,0x00,0x2A,0x15,0xD0, \ +0x10,0x4A,0x8D,0x6A,0x4E,0x6A,0xD1,0x79,0x13,0x4A,0x12,0x4F,0x89,0x00,0x51, \ +0x58,0x0D,0x4B,0x08,0x18,0x38,0x60,0x98,0x79,0x00,0xF0,0x06,0xFD,0x39,0x68, \ +0x40,0x18,0x38,0x60,0xB0,0x42,0x00,0xD2,0x01,0x35,0x7D,0x60,0x38,0x1D,0x06, \ +0x4F,0x3C,0x60,0xB8,0x79,0x00,0xF0,0xF9,0xFC,0x39,0x68,0x40,0x18,0x38,0x60, \ +0x01,0x20,0xF0,0xBD,0x18,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x8C,0x01,0x00, \ +0x02,0xB4,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xF0,0x07,0x00,0x02,0xF0,0x08, \ +0x00,0x02,0xF8,0xB5,0x38,0x49,0x04,0x1C,0x08,0x6B,0x37,0x4A,0x85,0x7D,0x46, \ +0x68,0x92,0x7A,0x00,0x27,0x95,0x42,0x00,0xDB,0x15,0x1C,0x34,0x49,0x80,0x8A, \ +0x49,0x89,0x88,0x42,0x2E,0xDD,0x00,0x2C,0x2C,0xD1,0x2F,0x49,0x48,0x7A,0x00, \ +0x28,0x28,0xD1,0x30,0x49,0xB4,0x20,0x08,0x70,0x2F,0x48,0x30,0x4A,0xC0,0x88, \ +0x41,0x00,0x09,0x18,0x68,0x00,0x10,0x5A,0x40,0x00,0x08,0x18,0x2D,0x49,0xC9, \ +0x6E,0x40,0x18,0x28,0x49,0x48,0x80,0x28,0x1C,0x00,0xF0,0xBB,0xFC,0x26,0x49, \ +0x49,0x88,0x40,0x18,0x24,0x49,0x48,0x80,0x31,0x1D,0x06,0x22,0x26,0x48,0x02, \ +0xF0,0x85,0xFE,0xF1,0x1D,0x03,0x31,0x06,0x22,0x24,0x48,0x02,0xF0,0x7F,0xFE, \ +0x01,0x20,0x20,0x49,0x01,0x26,0x08,0x77,0x03,0xE0,0x01,0x20,0x1E,0x49,0x00, \ +0x26,0xC8,0x76,0xFF,0xF7,0x82,0xFD,0xFF,0xF7,0xE8,0xFD,0x00,0x90,0x00,0x98, \ +0x00,0x28,0x1E,0xD1,0x12,0x49,0x00,0x2E,0x0A,0x6B,0x50,0x73,0x01,0xD1,0x00, \ +0x2C,0x01,0xD0,0x01,0x2E,0x19,0xD1,0x13,0x4A,0x68,0x00,0x10,0x5A,0x10,0x4A, \ +0xD2,0x88,0xC9,0x8B,0x80,0x18,0x41,0x18,0x01,0x20,0x00,0xF0,0x0C,0xF9,0x01, \ +0x2E,0x03,0xD1,0x0D,0x49,0x03,0x20,0x48,0x60,0x02,0xE0,0x0B,0x49,0x02,0x20, \ +0x48,0x60,0x01,0x27,0x03,0xE0,0x03,0x49,0x04,0x20,0x09,0x6B,0x48,0x73,0x38, \ +0x1C,0xF8,0xBD,0x00,0x00,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x00, \ +0x00,0x02,0xC0,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xAC,0x01,0x00,0x02,0x18, \ +0x09,0x00,0x02,0xC4,0x07,0x00,0x02,0xCA,0x07,0x00,0x02,0xF0,0xB5,0x04,0x31, \ +0xCF,0x00,0x00,0x26,0x17,0x4C,0x01,0x28,0x66,0x63,0x15,0xD0,0x02,0x28,0x15, \ +0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0x02,0xF0,0x6A,0xFE,0x0D,0x1C, \ +0x79,0x1A,0x0B,0x20,0x02,0xF0,0x65,0xFE,0x07,0x1C,0x00,0x2D,0x18,0xD9,0x01, \ +0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0x60,0x63,0x13,0xE0,0x7F,0x08,0x11,0xE0, \ +0x79,0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0x55,0xFE,0x0C,0x1C,0x79,0x1A,0x0B, \ +0x20,0x02,0xF0,0x50,0xFE,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0, \ +0x66,0x63,0x00,0xE0,0x66,0x63,0x38,0x1C,0xF0,0xBD,0x18,0x09,0x00,0x02,0xFF, \ +0x21,0x10,0x48,0x31,0x31,0x01,0x80,0x0F,0x49,0x89,0x8B,0xCA,0x1D,0x31,0x32, \ +0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1,0x80,0x0B,0x48,0xA0, \ +0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80,0x0F,0x21,0xC1,0x80, \ +0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23,0x21,0x81,0x60,0x12, \ +0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0xAC,0x01,0x00,0x02,0x8C,0x01,0x00,0x02, \ +0xB4,0x01,0x00,0x02,0xF0,0x08,0x00,0x02,0x00,0xB5,0x05,0x48,0x81,0x79,0x42, \ +0x79,0x91,0x42,0x03,0xD0,0x81,0x79,0x41,0x71,0xFE,0xF7,0x1C,0xFC,0x00,0xBD, \ +0x00,0x00,0x8C,0x01,0x00,0x02,0x05,0x48,0x81,0x8F,0x49,0x00,0x01,0x31,0x81, \ +0x87,0x04,0x49,0x82,0x8F,0xC9,0x89,0x8A,0x42,0x00,0xDD,0x81,0x87,0xF7,0x46, \ +0x18,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0x1A,0x49,0x19,0x48,0x09,0x6B,0x1A, \ +0x4B,0x89,0x7D,0x42,0x78,0x5B,0x5C,0x00,0x21,0x9A,0x42,0x15,0xDD,0x41,0x70, \ +0x01,0x70,0xC2,0x78,0x01,0x21,0x00,0x2A,0x0D,0xDD,0xC2,0x78,0x04,0x2A,0x0A, \ +0xDA,0xC2,0x78,0x01,0x3A,0xC2,0x70,0xC2,0x78,0x00,0x2A,0x04,0xD1,0x10,0x4A, \ +0x52,0x7A,0x01,0x2A,0x00,0xD1,0xC1,0x70,0x81,0x70,0xF7,0x46,0x82,0x78,0x00, \ +0x2A,0xFB,0xD0,0x02,0x78,0x02,0x2A,0xF8,0xDD,0x41,0x70,0x01,0x70,0xC2,0x78, \ +0x01,0x32,0x12,0x06,0x12,0x0E,0xC2,0x70,0x03,0x2A,0xEF,0xDD,0x81,0x70,0x03, \ +0x21,0xC1,0x70,0xF7,0x46,0x00,0x00,0xC0,0x01,0x00,0x02,0x8C,0x01,0x00,0x02, \ +0xCE,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0xB5,0x02,0xF0,0xF9,0xFD,0x02, \ +0x49,0x8A,0x8F,0x10,0x40,0x48,0x62,0x00,0xBD,0x18,0x09,0x00,0x02,0xB0,0xB5, \ +0x01,0x20,0x80,0x06,0x85,0x6A,0x41,0x6A,0x0E,0x48,0x00,0x88,0x84,0x02,0x20, \ +0x1C,0x02,0xF0,0xAB,0xFD,0x0F,0x1C,0x00,0x2D,0x10,0xD9,0x20,0x1C,0x29,0x1C, \ +0x02,0xF0,0xA4,0xFD,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x20,0x1C,0x02,0xF0,0x9E, \ +0xFD,0x48,0x1C,0x45,0x43,0xE9,0x19,0x20,0x1C,0x02,0xF0,0x98,0xFD,0x0F,0x1C, \ +0x38,0x1C,0xB0,0xBD,0x00,0x00,0x80,0x00,0x00,0x02,0x90,0xB5,0x0C,0x1C,0x07, \ +0x1C,0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B,0x20,0x18, \ +0xB9,0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03,0x48,0x82, \ +0x69,0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x00, \ +0x00,0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A,0x69,0xC0, \ +0x43,0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04, \ +0xF0,0xB5,0x84,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xCE,0x4C,0x00,0x27,0x03, \ +0x90,0xE0,0x7C,0x00,0x28,0x04,0xD0,0x03,0x98,0x06,0xF0,0xCA,0xF9,0x00,0x28, \ +0x73,0xD1,0x03,0x98,0xC9,0x4B,0x18,0x40,0x15,0xD0,0xC8,0x48,0x00,0x68,0x02, \ +0x90,0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x0C,0xD3,0x01,0x20, \ +0x80,0x06,0x00,0x6B,0x02,0x99,0x40,0x00,0x40,0x08,0xC9,0x08,0x02,0xD3,0xE0, \ +0x62,0x02,0x27,0x01,0xE0,0x20,0x63,0x01,0x27,0x03,0x98,0xBE,0x4B,0xBA,0x49, \ +0x18,0x40,0xCD,0x1D,0xCE,0x1D,0x49,0x36,0x09,0x35,0x00,0x28,0x59,0xD0,0xD8, \ +0x04,0xC1,0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x0D,0xD3,0x00,0x6A,0x40,0x00, \ +0x40,0x08,0x20,0x63,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0xA9,0xFA,0xFF, \ +0xF7,0x85,0xFC,0x01,0x27,0x01,0x22,0x62,0x73,0x01,0x98,0x12,0x23,0x18,0x40, \ +0x41,0xD0,0x00,0x20,0x60,0x73,0xA0,0x7A,0x00,0x28,0x18,0xD0,0x01,0x98,0x80, \ +0x08,0x0E,0xD3,0x20,0x6B,0x21,0x6E,0x40,0x18,0xE0,0x62,0xA8,0x48,0x00,0x78, \ +0x00,0xF0,0xF6,0xFA,0xE1,0x6A,0x40,0x18,0xE0,0x62,0xE0,0x6A,0x40,0x00,0x40, \ +0x08,0xE0,0x62,0x01,0x20,0xFE,0xF7,0x00,0xFB,0x00,0x20,0xA0,0x72,0xA0,0x75, \ +0x0B,0xE0,0xFF,0xF7,0x3E,0xFF,0x01,0x98,0x80,0x08,0x06,0xD3,0x9E,0x49,0x20, \ +0x6B,0x09,0x68,0x40,0x18,0x40,0x00,0x40,0x08,0xE0,0x62,0x00,0x2F,0x00,0xD1, \ +0x02,0x27,0x01,0x98,0x40,0x09,0x02,0xD3,0x01,0x20,0xFE,0xF7,0xE7,0xFA,0xA0, \ +0x7B,0x02,0x28,0x0B,0xD1,0x00,0x20,0x00,0xE0,0x13,0xE1,0x68,0x73,0x30,0x71, \ +0x00,0xF0,0x0F,0xFA,0x01,0x99,0x91,0x48,0x00,0x22,0x01,0xF0,0x58,0xFF,0x03, \ +0x98,0x90,0x4B,0x18,0x40,0x73,0xD0,0x19,0x05,0xC8,0x68,0x00,0x90,0x00,0x98, \ +0x40,0x09,0x14,0xD3,0xE0,0x7A,0x03,0x28,0x11,0xD1,0x04,0x20,0xE0,0x72,0x00, \ +0x98,0xC9,0x68,0x08,0x43,0x88,0x49,0x00,0x90,0x08,0x68,0x40,0x68,0x40,0x78, \ +0xC0,0x09,0x05,0xD3,0x00,0x98,0x40,0x08,0x02,0xD2,0x84,0x49,0x00,0x20,0x48, \ +0x71,0x00,0x98,0x80,0x08,0x3C,0xD3,0xB0,0x79,0x01,0x28,0x0E,0xD1,0xE0,0x1D, \ +0x69,0x30,0x81,0x7A,0x01,0x29,0x09,0xD1,0x02,0x21,0x81,0x72,0x7D,0x48,0x01, \ +0x8B,0xC0,0x8A,0x08,0x1A,0x81,0x02,0x04,0x20,0xFF,0xF7,0x16,0xFF,0x60,0x7A, \ +0x06,0x28,0x04,0xD1,0x02,0x21,0x61,0x72,0x08,0x20,0xFF,0xF7,0x28,0xFF,0x00, \ +0x20,0x74,0x49,0x01,0x22,0xC8,0x80,0x22,0x73,0xE0,0x72,0xA0,0x72,0xA0,0x75, \ +0x20,0x74,0x08,0x71,0x4A,0x71,0xFE,0xF7,0x41,0xFC,0x00,0x99,0x08,0x43,0x00, \ +0x90,0x60,0x68,0x04,0x28,0x0F,0xD1,0x01,0x20,0xFF,0xF7,0x12,0xFF,0x20,0x7B, \ +0x01,0x28,0x09,0xD1,0xE0,0x7A,0x00,0x28,0x06,0xD1,0xFE,0xF7,0x4C,0xFE,0x00, \ +0x22,0x10,0x21,0x66,0x48,0x01,0xF0,0xF9,0xFE,0x00,0x98,0x80,0x09,0x68,0xD3, \ +0x01,0x20,0x20,0x73,0x20,0x74,0x5F,0x49,0x87,0x06,0xF8,0x6A,0x09,0x68,0x48, \ +0x61,0xF8,0x6A,0x40,0x00,0x40,0x08,0xE0,0x62,0x60,0x7A,0x05,0x28,0x0C,0xD1, \ +0xA0,0x6B,0x79,0x6A,0x88,0x42,0x08,0xD2,0x02,0x21,0x61,0x72,0x00,0xE0,0x88, \ +0xE0,0x59,0x48,0x80,0x23,0x81,0x69,0x99,0x43,0x81,0x61,0x00,0x98,0xC0,0x08, \ +0x0E,0xD3,0x52,0x48,0x01,0x21,0x01,0x71,0xC1,0x88,0x00,0x29,0x11,0xDD,0xC1, \ +0x88,0x01,0x23,0xDB,0x03,0x99,0x42,0x0C,0xDA,0xC0,0x88,0xFF,0xF7,0xBB,0xF8, \ +0x08,0xE0,0x4A,0x49,0x00,0x20,0x08,0x71,0xA0,0x72,0xC1,0x20,0x20,0x60,0x01, \ +0x20,0xFE,0xF7,0x3F,0xFA,0x3F,0x48,0x00,0x68,0x02,0x90,0x02,0x98,0xC0,0x08, \ +0x01,0xD3,0x02,0x27,0x04,0xE0,0x38,0x6B,0x40,0x00,0x40,0x08,0x20,0x63,0x01, \ +0x27,0xA0,0x7E,0x00,0x28,0x23,0xD0,0x3E,0x48,0x00,0x79,0x00,0x28,0x1F,0xD0, \ +0x20,0x7C,0x00,0x28,0x1C,0xD0,0x20,0x68,0x00,0x28,0x19,0xD1,0x06,0x20,0xFF, \ +0xF7,0xAE,0xFE,0x00,0x20,0xE8,0x73,0xA0,0x76,0x70,0x70,0x69,0x7B,0x01,0x29, \ +0x02,0xD0,0x69,0x7B,0x03,0x29,0x0C,0xD1,0x69,0x7B,0x01,0x29,0x08,0xD1,0xA1, \ +0x73,0x68,0x73,0x00,0x22,0x10,0x21,0x2C,0x48,0x01,0xF0,0x8E,0xFE,0x01,0xE0, \ +0x02,0xE0,0x68,0x73,0x00,0x20,0xA0,0x76,0xE0,0x7A,0x04,0x28,0x2F,0xD1,0x20, \ +0x7C,0x00,0x28,0x2C,0xD0,0x60,0x7B,0x00,0x28,0x02,0xD1,0x00,0x2F,0x00,0xD1, \ +0x02,0x27,0x00,0x20,0xE0,0x72,0x24,0x4D,0x20,0x74,0x29,0x79,0x01,0x29,0x15, \ +0xD1,0x21,0x68,0x00,0x29,0x12,0xD1,0x69,0x79,0x00,0x29,0x0F,0xD0,0x1D,0x49, \ +0x09,0x68,0x48,0x72,0xA9,0x68,0xE9,0x60,0x28,0x70,0xFE,0xF7,0xC8,0xFA,0x28, \ +0x78,0x01,0x28,0x04,0xD1,0x00,0x22,0x01,0x21,0x1A,0x48,0x01,0xF0,0x60,0xFE, \ +0x60,0x68,0x04,0x28,0x06,0xD1,0xFE,0xF7,0xA9,0xFD,0x00,0x22,0x10,0x21,0x15, \ +0x48,0x01,0xF0,0x56,0xFE,0x01,0x2F,0x02,0xD1,0x00,0xF0,0x10,0xF9,0x03,0xE0, \ +0x02,0x2F,0x01,0xD1,0x00,0xF0,0x3B,0xF9,0x03,0x98,0x10,0x4B,0x18,0x40,0x01, \ +0xD0,0x01,0xF0,0xB9,0xF8,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x18,0x09,0x00,0x02, \ +0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x80,0x00,0x00,0x92,0x01,0x00, \ +0x02,0x8C,0x01,0x00,0x02,0xCC,0x06,0x00,0x02,0x40,0x40,0x00,0x00,0x48,0x01, \ +0x00,0x02,0xD4,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x2C,0x07,0x00,0x02,0x80, \ +0x00,0x00,0x04,0x08,0x08,0x00,0x00,0xF0,0xB5,0x0F,0x20,0x00,0x06,0x05,0x89, \ +0x5F,0x48,0x60,0x4E,0x28,0x40,0x01,0x24,0x00,0x28,0x66,0xD0,0x5E,0x49,0xCF, \ +0x69,0x78,0x08,0x3E,0xD3,0x88,0x69,0x40,0x08,0x3B,0xD3,0x88,0x69,0xA0,0x43, \ +0x88,0x61,0x5A,0x48,0x41,0x68,0x04,0x29,0x0A,0xD1,0xFE,0xF7,0x5C,0xFD,0x01, \ +0x20,0xFE,0xF7,0x8B,0xF9,0x00,0x22,0x10,0x21,0x56,0x48,0x01,0xF0,0x06,0xFE, \ +0x29,0xE0,0x42,0x68,0x54,0x49,0x02,0x2A,0x05,0xD1,0x09,0x68,0x4C,0x73,0x53, \ +0x49,0x09,0x68,0x4C,0x70,0x05,0xE0,0x42,0x68,0x03,0x2A,0x02,0xD1,0x09,0x68, \ +0x02,0x22,0x4A,0x73,0x4B,0x48,0x44,0x60,0x00,0xF0,0xA3,0xF8,0x01,0x20,0xFE, \ +0xF7,0x6E,0xF9,0x47,0x48,0x40,0x7C,0x01,0x28,0x05,0xD1,0x00,0x22,0x10,0x21, \ +0x30,0x1C,0x01,0xF0,0xE5,0xFD,0x08,0xE0,0x42,0x48,0x40,0x7C,0x02,0x28,0x04, \ +0xD1,0x00,0x22,0x10,0x21,0x43,0x48,0x01,0xF0,0xDB,0xFD,0x3D,0x48,0x80,0x69, \ +0x00,0x0A,0x32,0xD3,0x38,0x0A,0x30,0xD3,0x3A,0x48,0x80,0x23,0x81,0x69,0x99, \ +0x43,0x81,0x61,0x38,0x48,0x41,0x7A,0x05,0x29,0x02,0xD0,0x41,0x7A,0x06,0x29, \ +0x15,0xD1,0x00,0x21,0x81,0x63,0x01,0x7A,0x01,0x29,0x0D,0xD1,0x44,0x72,0x81, \ +0x7A,0x00,0x29,0x1B,0xD1,0x01,0x7B,0x01,0x29,0x18,0xD1,0xC0,0x7A,0x00,0x28, \ +0x15,0xD1,0xFF,0xF7,0x22,0xFA,0x12,0xE0,0x3A,0xE0,0x02,0x22,0x42,0x72,0x0E, \ +0xE0,0x44,0x72,0x41,0x7F,0x01,0x29,0x02,0xD1,0x00,0x21,0x81,0x62,0x01,0xE0, \ +0x00,0x21,0x41,0x62,0x80,0x7B,0x03,0x28,0x02,0xD1,0x00,0x20,0xFF,0xF7,0x76, \ +0xFB,0x38,0x09,0x05,0xD3,0x20,0x48,0x80,0x69,0x00,0x09,0x01,0xD3,0x04,0xF0, \ +0xA4,0xFB,0xB8,0x08,0x16,0xD3,0x1C,0x48,0x81,0x69,0x89,0x08,0x12,0xD3,0x81, \ +0x69,0x02,0x23,0x99,0x43,0x81,0x61,0x19,0x48,0xC1,0x1D,0x49,0x31,0x89,0x79, \ +0x05,0x29,0x08,0xD1,0x1B,0x49,0x49,0x79,0x03,0x29,0x04,0xD1,0x70,0x30,0x81, \ +0x78,0x08,0x23,0x19,0x43,0x81,0x70,0xB8,0x09,0x05,0xD3,0x10,0x48,0x80,0x69, \ +0x80,0x09,0x01,0xD3,0x07,0xF0,0x21,0xFC,0x14,0x48,0x28,0x40,0x06,0xD0,0x13, \ +0x48,0x00,0x21,0x04,0x70,0x79,0x20,0x40,0x05,0x01,0x83,0x81,0x82,0xFF,0x20, \ +0x02,0x30,0x28,0x40,0x06,0xD0,0x07,0xF0,0x57,0xFC,0x00,0x22,0x10,0x21,0x30, \ +0x1C,0x01,0xF0,0x66,0xFD,0xF0,0xBD,0x00,0x00,0x10,0x10,0x00,0x00,0xCC,0x06, \ +0x00,0x02,0x80,0x00,0x00,0x04,0x18,0x09,0x00,0x02,0x2C,0x07,0x00,0x02,0xBC, \ +0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0xEC,0x06,0x00,0x02,0xB0,0x00,0x00,0x02, \ +0x02,0x02,0x00,0x00,0xCC,0x01,0x00,0x02,0x04,0x48,0x01,0x21,0x81,0x73,0x00, \ +0x21,0xC1,0x75,0xC1,0x73,0xC1,0x76,0x01,0x77,0xF7,0x46,0x00,0x00,0x18,0x09, \ +0x00,0x02,0x80,0xB5,0x16,0x4F,0x00,0x20,0x38,0x72,0x79,0x7A,0x02,0x20,0x01, \ +0x29,0x1C,0xD0,0x04,0x29,0x19,0xD1,0x78,0x72,0x08,0x20,0xFF,0xF7,0x40,0xFD, \ +0x38,0x6B,0xF9,0x6A,0x40,0x1A,0x40,0x00,0x39,0x6A,0x40,0x08,0x81,0x42,0x0D, \ +0xD2,0x39,0x6A,0x41,0x1A,0x14,0x20,0x02,0xF0,0xAB,0xFA,0x79,0x7F,0x01,0x29, \ +0x08,0xD1,0xB9,0x6A,0x81,0x42,0x02,0xD3,0xB9,0x6A,0x08,0x1A,0xB8,0x62,0x80, \ +0xBD,0x78,0x72,0x80,0xBD,0x79,0x6A,0x81,0x42,0xF9,0xD3,0x79,0x6A,0x08,0x1A, \ +0x78,0x62,0x80,0xBD,0x18,0x09,0x00,0x02,0x00,0xB5,0x0A,0x48,0x01,0x21,0x01, \ +0x72,0x01,0x7B,0x01,0x29,0x0D,0xD1,0xC1,0x7A,0x00,0x29,0x0A,0xD1,0x81,0x7A, \ +0x00,0x29,0x07,0xD1,0x41,0x7A,0x06,0x29,0x04,0xD0,0x40,0x7A,0x05,0x28,0x01, \ +0xD0,0xFF,0xF7,0x67,0xF9,0x00,0xBD,0x18,0x09,0x00,0x02,0xB0,0xB5,0x20,0x4F, \ +0x20,0x48,0x79,0x7D,0x80,0x7A,0x20,0x4C,0x81,0x42,0x02,0xDA,0x78,0x7D,0x20, \ +0x70,0x00,0xE0,0x20,0x70,0xFF,0xF7,0x56,0xFC,0x20,0x78,0x03,0x28,0x03,0xD1, \ +0x01,0x20,0xFE,0xF7,0xAA,0xF8,0x02,0xE0,0x00,0x20,0xFE,0xF7,0xA6,0xF8,0xB8, \ +0x7A,0x17,0x4D,0x02,0x28,0x02,0xD1,0xC4,0x20,0x28,0x70,0x04,0xE0,0xB8,0x7A, \ +0x01,0x28,0x01,0xD1,0xD4,0x20,0x28,0x70,0x00,0x20,0x68,0x70,0x69,0x88,0x11, \ +0x48,0x00,0x29,0x07,0xD0,0x23,0x78,0x10,0x4A,0x5B,0x00,0xC3,0x5A,0xD2,0x88, \ +0xD2,0x18,0x89,0x1A,0x69,0x80,0x0A,0x21,0xF9,0x65,0x21,0x78,0x49,0x00,0x40, \ +0x5A,0x38,0x66,0x20,0x78,0x00,0xF0,0x2B,0xF8,0x39,0x6E,0x08,0x1A,0x38,0x66, \ +0xBD,0x65,0x01,0x20,0xB8,0x75,0xB0,0xBD,0x18,0x09,0x00,0x02,0x04,0x01,0x00, \ +0x02,0x92,0x01,0x00,0x02,0x00,0x09,0x00,0x02,0xAC,0x01,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99,0x42,0x01,0xD8,0x00,0x29,0x02, \ +0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27,0xBF,0x06,0x3D,0x69,0xAB,0x08, \ +0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08,0x3F,0x3A,0x61,0x01,0x20,0xF1, \ +0xE7,0x0E,0x06,0x00,0x00,0x00,0x28,0x01,0xD1,0xC0,0x20,0xF7,0x46,0x01,0x48, \ +0x00,0x88,0xF7,0x46,0x00,0x00,0xA8,0x01,0x00,0x02,0xF8,0xB5,0x3D,0x48,0x00, \ +0x90,0x3D,0x48,0xC4,0x1D,0x49,0x34,0xC7,0x1D,0x09,0x37,0x3B,0x4D,0x3C,0x4E, \ +0x00,0xF0,0xDC,0xFC,0x00,0xF0,0xEA,0xF8,0x30,0x78,0x00,0x28,0x04,0xD1,0xA0, \ +0x79,0x05,0x28,0x01,0xD0,0x06,0xF0,0x84,0xFE,0x28,0x78,0x00,0x28,0xF0,0xD0, \ +0xB8,0x7B,0x00,0x28,0xED,0xD1,0x33,0x48,0x01,0x78,0x01,0x29,0x03,0xD1,0x00, \ +0x21,0x01,0x70,0x04,0xF0,0x38,0xFB,0x30,0x48,0x00,0x78,0x02,0x28,0x46,0xD0, \ +0x2F,0x48,0x00,0x78,0x02,0x28,0x01,0xD1,0x06,0xF0,0xA4,0xFE,0x06,0xF0,0xB2, \ +0xFF,0x28,0x4E,0x05,0x1C,0x30,0x78,0x01,0x28,0x09,0xD1,0x03,0x03,0x9D,0x42, \ +0x03,0xD1,0x28,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00,0x21,0xB9,0x73,0xC8, \ +0xE7,0x00,0x2D,0x23,0xD0,0x01,0x23,0x1B,0x03,0x9D,0x42,0x08,0xD0,0x23,0x48, \ +0x80,0x21,0x02,0x68,0x11,0x70,0x02,0x68,0x00,0x21,0x51,0x70,0x00,0x68,0x81, \ +0x70,0xA0,0x79,0x05,0x28,0x0D,0xD1,0x00,0x98,0x40,0x79,0x01,0x28,0x09,0xDD, \ +0xC0,0x20,0x01,0xF0,0x8E,0xFD,0x06,0x1C,0x28,0x1C,0x06,0xF0,0xA2,0xFE,0x30, \ +0x1C,0x01,0xF0,0x87,0xFD,0x29,0x1C,0x00,0x22,0x16,0x48,0x01,0xF0,0x26,0xFC, \ +0xA2,0xE7,0x00,0x98,0x40,0x79,0x01,0x28,0x9E,0xDD,0xA0,0x79,0x05,0x28,0x9B, \ +0xD1,0x00,0xF0,0x22,0xF8,0x98,0xE7,0x06,0xF0,0x71,0xFF,0x01,0x23,0x1B,0x03, \ +0x98,0x42,0x03,0xD1,0x0A,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00,0x21,0xB9, \ +0x73,0x8B,0xE7,0xB0,0x00,0x00,0x02,0x18,0x09,0x00,0x02,0x56,0x02,0x00,0x02, \ +0xA9,0x02,0x00,0x02,0xCD,0x01,0x00,0x02,0x4B,0x02,0x00,0x02,0x3C,0x01,0x00, \ +0x02,0xBC,0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0x0C,0x07,0x00,0x02,0x80,0xB5, \ +0xC0,0x20,0x01,0xF0,0x52,0xFD,0x07,0x1C,0x0D,0x48,0x81,0x78,0x49,0x08,0x89, \ +0x07,0x11,0xD1,0x81,0x78,0x09,0x09,0x0E,0xD3,0x0A,0x49,0x09,0x68,0x09,0x7B, \ +0x09,0x0A,0x09,0xD2,0xC1,0x78,0x00,0x29,0x04,0xD0,0x00,0x21,0xC1,0x70,0x01, \ +0x21,0x81,0x71,0x01,0xE0,0x06,0xF0,0x80,0xFE,0x38,0x1C,0x01,0xF0,0x37,0xFD, \ +0x80,0xBD,0x88,0x09,0x00,0x02,0xBC,0x01,0x00,0x02,0xB0,0xB5,0x1C,0x4C,0x01, \ +0x20,0x1C,0x4D,0xA0,0x77,0x28,0x68,0x00,0xF0,0xA0,0xFE,0x29,0x68,0x00,0x20, \ +0x4F,0x68,0x88,0x73,0x18,0x49,0x8A,0x78,0x00,0x2A,0x00,0xD1,0x48,0x70,0x38, \ +0x78,0x08,0x28,0x19,0xD1,0x20,0x7D,0x01,0x28,0x06,0xD1,0x06,0x22,0xF8,0x1D, \ +0x09,0x30,0x12,0x49,0x02,0xF0,0xF9,0xF8,0x0F,0xE0,0x20,0x7D,0x02,0x28,0x0C, \ +0xD1,0x10,0x48,0x40,0x79,0x02,0x28,0x08,0xD1,0xE0,0x1D,0x49,0x30,0x80,0x79, \ +0x05,0x28,0x03,0xD1,0x78,0x78,0x10,0x23,0x18,0x43,0x78,0x70,0xF8,0x1D,0x0F, \ +0x30,0xFD,0xF7,0xDC,0xFF,0x38,0x1C,0x06,0xF0,0xC5,0xFB,0x29,0x68,0x80,0x20, \ +0x08,0x73,0x40,0x01,0xB0,0xBD,0x18,0x09,0x00,0x02,0xBC,0x01,0x00,0x02,0xC0, \ +0x01,0x00,0x02,0xFC,0x00,0x00,0x02,0xB0,0x00,0x00,0x02,0x00,0xB5,0x05,0x48, \ +0x01,0x78,0x00,0x29,0x04,0xD0,0x40,0x78,0x00,0x28,0x01,0xD1,0x04,0xF0,0x2E, \ +0xFA,0x00,0xBD,0x00,0x00,0x98,0x09,0x00,0x02,0xF0,0xB5,0x2E,0x48,0x47,0x6E, \ +0xFD,0xF7,0xFB,0xFE,0x01,0x02,0x2C,0x4C,0x09,0x0A,0x2C,0x48,0x21,0x60,0x43, \ +0x78,0x2C,0x4A,0x13,0x70,0x15,0x78,0x0D,0x23,0x6B,0x43,0x1B,0x18,0x1B,0x7B, \ +0x1B,0x06,0x0B,0x43,0x03,0x21,0x49,0x06,0x0B,0x60,0x15,0x78,0x0D,0x23,0x6B, \ +0x43,0x1B,0x18,0x9E,0x7B,0x5D,0x7B,0x36,0x02,0x35,0x43,0xDE,0x7B,0x1B,0x7C, \ +0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43,0x4B,0x60,0xC3,0x1D,0x39,0x33,0x1B, \ +0x78,0x02,0x2B,0x1D,0xD1,0x15,0x78,0x0D,0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7C, \ +0x5D,0x7C,0x36,0x02,0x35,0x43,0xDE,0x7C,0x1B,0x7D,0x36,0x04,0x35,0x43,0x1B, \ +0x06,0x2B,0x43,0x4B,0x61,0x15,0x78,0x0D,0x23,0x6B,0x43,0x18,0x18,0x85,0x7D, \ +0x43,0x7D,0x2D,0x02,0x2B,0x43,0xC5,0x7D,0x00,0x7E,0x2D,0x04,0x2B,0x43,0x00, \ +0x06,0x18,0x43,0x88,0x61,0x10,0x78,0x21,0x68,0x0D,0x4A,0x80,0x07,0x01,0x43, \ +0x21,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54,0x01,0x30,0x18,0x28,0xFA,0xD3,0x11, \ +0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90,0x76,0x08,0x0E,0xD0,0x76,0xF0,0xBD, \ +0x00,0x00,0x18,0x09,0x00,0x02,0xE4,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x90, \ +0x01,0x00,0x02,0x4C,0x07,0x00,0x02,0x80,0xB4,0x11,0x4A,0x11,0x88,0x01,0x31, \ +0x09,0x04,0x09,0x0C,0x1E,0x29,0x00,0xD1,0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F, \ +0x43,0xFB,0x18,0x1F,0x7B,0x00,0x2F,0x11,0xD1,0x11,0x80,0x0C,0x49,0x03,0x22, \ +0x19,0x60,0xD9,0x1D,0x15,0x31,0x59,0x60,0x08,0x39,0x99,0x60,0x00,0x21,0x19, \ +0x73,0x99,0x73,0x9A,0x75,0x99,0x82,0x03,0x60,0x40,0x21,0x01,0x73,0x18,0x1C, \ +0x80,0xBC,0xF7,0x46,0xE8,0x01,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0x11,0x00, \ +0x02,0x00,0x00,0x00,0x80,0x80,0xB4,0x13,0x4A,0x51,0x88,0x01,0x31,0x09,0x04, \ +0x09,0x0C,0x14,0x29,0x00,0xD1,0x00,0x21,0x10,0x4F,0x10,0x4B,0x4F,0x43,0xFB, \ +0x18,0x1F,0x7A,0x00,0x2F,0x15,0xD1,0x51,0x80,0x0E,0x49,0x01,0x22,0x19,0x60, \ +0xD9,0x1D,0x11,0x31,0x59,0x60,0x9A,0x81,0x00,0x21,0x19,0x72,0x0A,0x4F,0xD9, \ +0x73,0xBF,0x79,0x01,0x2F,0x01,0xD1,0xC2,0x73,0x00,0xE0,0xC1,0x73,0x80,0x21, \ +0x03,0x60,0x01,0x72,0x18,0x1C,0x80,0xBC,0xF7,0x46,0xE8,0x01,0x00,0x02,0xA4, \ +0x06,0x00,0x00,0x00,0xDA,0x00,0x02,0x00,0x00,0x00,0x80,0xB0,0x00,0x00,0x02, \ +0x01,0x1C,0x00,0x68,0x02,0x08,0x01,0xD3,0x08,0x1C,0xF7,0x46,0x00,0x22,0x0A, \ +0x73,0xF7,0x46,0x01,0x68,0x09,0x08,0x02,0xD3,0x40,0x21,0x01,0x72,0xF7,0x46, \ +0x04,0x4A,0x01,0x68,0x12,0x78,0x00,0x2A,0xF9,0xD1,0x02,0x72,0x08,0x1C,0xF7, \ +0x46,0x00,0x00,0x43,0x02,0x00,0x02,0x0A,0x49,0x01,0x20,0x48,0x63,0x00,0x20, \ +0xCA,0x1D,0x39,0x32,0x88,0x63,0x50,0x83,0x10,0x83,0x08,0x65,0xCB,0x1D,0x49, \ +0x33,0x90,0x82,0x18,0x73,0x98,0x71,0x58,0x73,0x90,0x81,0xD0,0x81,0x60,0x31, \ +0xC8,0x72,0xF7,0x46,0x00,0x00,0xEC,0x01,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F, \ +0x06,0xF8,0x69,0x20,0x23,0x18,0x43,0xF8,0x61,0x17,0x48,0xFD,0xF7,0xEA,0xFD, \ +0xF8,0x69,0x40,0x23,0x18,0x43,0xF8,0x61,0x13,0x48,0xFD,0xF7,0xE3,0xFD,0xF8, \ +0x69,0x01,0x23,0x5B,0x02,0x18,0x43,0xF8,0x61,0x00,0x20,0xFF,0x21,0x91,0x31, \ +0x01,0x30,0x88,0x42,0xFC,0xD3,0xF8,0x69,0x01,0x23,0x5B,0x02,0x98,0x43,0xF8, \ +0x61,0x00,0x20,0x7D,0x21,0x49,0x01,0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7, \ +0xBF,0xFF,0xFD,0xF7,0xD1,0xFD,0x00,0xF0,0x0D,0xF8,0x05,0x49,0x0D,0x20,0x00, \ +0x06,0x01,0x81,0xFF,0x21,0x41,0x31,0x81,0x80,0x80,0xBD,0x00,0x00,0x50,0xC3, \ +0x00,0x00,0xFF,0x0F,0x00,0x00,0x80,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70,0x0D, \ +0x48,0x80,0x23,0x03,0x73,0x86,0x22,0x02,0x72,0x82,0x22,0x02,0x71,0x07,0x22, \ +0x02,0x70,0x0A,0x48,0x05,0x27,0x07,0x73,0x06,0x27,0x07,0x72,0x02,0x71,0x08, \ +0x48,0x01,0x22,0x07,0x71,0x03,0x72,0x02,0x73,0x06,0x48,0x01,0x71,0x01,0x73, \ +0x80,0xBC,0xF7,0x46,0x00,0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0, \ +0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0x90,0xB5,0x20,0x49, \ +0x20,0x4C,0x00,0x20,0x0B,0x7B,0x02,0x1C,0x01,0x30,0x08,0x28,0xA3,0x54,0xF9, \ +0xD1,0xE1,0x78,0xA0,0x78,0xE2,0x79,0x09,0x02,0x08,0x43,0x07,0x1C,0x61,0x79, \ +0x20,0x79,0x09,0x02,0x01,0x43,0xA0,0x79,0x12,0x02,0x02,0x43,0x20,0x78,0x63, \ +0x78,0x00,0x02,0x18,0x43,0x05,0x28,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x62,0xFB, \ +0x1C,0xE0,0x09,0x28,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x80,0xFB,0x16,0xE0,0x0F, \ +0x4B,0x98,0x42,0x04,0xD1,0xE1,0x78,0x10,0x1C,0x00,0xF0,0xB0,0xFB,0x0E,0xE0, \ +0x0C,0x4B,0x9B,0x7A,0x00,0x2B,0x05,0xD1,0x13,0x1C,0x0A,0x1C,0x39,0x1C,0x00, \ +0xF0,0x14,0xF8,0x04,0xE0,0x13,0x1C,0x0A,0x1C,0x39,0x1C,0x04,0xF0,0xD0,0xFD, \ +0x05,0x49,0x01,0x20,0x08,0x71,0x90,0xBD,0x30,0x03,0x00,0x0D,0x28,0x02,0x00, \ +0x02,0x06,0x80,0x00,0x00,0x4C,0x02,0x00,0x02,0xD0,0x03,0x00,0x0D,0x80,0xB5, \ +0x0F,0x1C,0x11,0x1C,0x1A,0x1C,0x08,0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00, \ +0xF0,0x12,0xF8,0x80,0xBD,0x06,0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0, \ +0x2B,0xF8,0x80,0xBD,0x03,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x33,0xC1,0x00, \ +0x00,0x0E,0x40,0x00,0x00,0x70,0x03,0x00,0x0D,0x0B,0x49,0x0C,0x48,0x4A,0x6B, \ +0x03,0x2A,0x03,0xD1,0x0B,0x4A,0x92,0x78,0x01,0x2A,0x02,0xD0,0x20,0x21,0x01, \ +0x73,0xF7,0x46,0x80,0x22,0x02,0x73,0x50,0x31,0xC9,0x7B,0x06,0x4A,0x10,0x23, \ +0x11,0x73,0x01,0x7B,0x19,0x43,0x01,0x73,0xF7,0x46,0x00,0x00,0xEC,0x01,0x00, \ +0x02,0x70,0x03,0x00,0x0D,0x28,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5, \ +0x13,0x4D,0x17,0x1C,0xA8,0x78,0x12,0x4A,0x02,0x28,0x06,0xD1,0xE8,0x78,0x08, \ +0x28,0x03,0xD1,0x10,0x48,0x43,0x6B,0x03,0x2B,0x02,0xD0,0x20,0x20,0x10,0x73, \ +0xF0,0xBD,0x00,0x26,0x00,0x2F,0x0D,0x4C,0x04,0xD1,0x00,0xF0,0xEF,0xFB,0x01, \ +0x20,0xA0,0x72,0x04,0xE0,0x00,0x29,0x01,0xD1,0x09,0x49,0x41,0x66,0x16,0x73, \ +0x27,0x80,0x29,0x1C,0x08,0x22,0x66,0x80,0x07,0x48,0x01,0xF0,0xBC,0xFE,0x26, \ +0x73,0xF0,0xBD,0x28,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xEC,0x01,0x00,0x02, \ +0x4C,0x02,0x00,0x02,0x00,0x60,0x00,0x01,0x30,0x02,0x00,0x02,0x90,0xB5,0x17, \ +0x49,0x08,0x78,0x4A,0x78,0x00,0x02,0x10,0x43,0x05,0x28,0x15,0x4A,0x04,0xD1, \ +0x89,0x78,0x50,0x6B,0x00,0xF0,0x45,0xFB,0x90,0xBD,0x13,0x4B,0x01,0x27,0x98, \ +0x42,0x11,0x4C,0x04,0xD1,0xC8,0x78,0x00,0xF0,0x14,0xFA,0x27,0x71,0x90,0xBD, \ +0x09,0x28,0x0A,0xD1,0x0E,0x49,0x20,0x20,0x08,0x73,0x27,0x71,0x50,0x6B,0x03, \ +0x28,0xEB,0xD1,0xD0,0x1D,0x49,0x30,0x47,0x73,0x90,0xBD,0xD1,0x1D,0x59,0x31, \ +0x89,0x7A,0x00,0x29,0x02,0xD1,0x00,0xF0,0x0E,0xF8,0x90,0xBD,0x04,0xF0,0x63, \ +0xFD,0x90,0xBD,0x28,0x02,0x00,0x02,0xEC,0x01,0x00,0x02,0xD0,0x03,0x00,0x0D, \ +0x06,0x80,0x00,0x00,0x70,0x03,0x00,0x0D,0x08,0x4B,0x07,0x49,0x98,0x42,0x02, \ +0xD1,0xE0,0x20,0x08,0x73,0x04,0xE0,0x06,0x4B,0x98,0x42,0x01,0xD1,0x20,0x20, \ +0x08,0x73,0x04,0x49,0x01,0x20,0x08,0x71,0xF7,0x46,0x70,0x03,0x00,0x0D,0x33, \ +0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0xD0,0x03,0x00,0x0D,0x80,0xB5,0x11,0x48, \ +0x11,0x4B,0x01,0x78,0x42,0x78,0x09,0x02,0x11,0x43,0x0F,0x1C,0x9F,0x42,0x03, \ +0xD1,0x80,0x78,0x00,0xF0,0x19,0xFA,0x05,0xE0,0x0C,0x49,0xE0,0x20,0x08,0x73, \ +0x0C,0x49,0x01,0x20,0x08,0x71,0x0B,0x4B,0x9F,0x42,0x0B,0xD1,0x0B,0x48,0x01, \ +0x7B,0x02,0x29,0x07,0xD1,0x03,0x21,0x01,0x73,0x09,0x48,0x00,0x22,0xC1,0x78, \ +0x80,0x78,0x04,0xF0,0x43,0xFA,0x80,0xBD,0x28,0x02,0x00,0x02,0x0E,0x40,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x22,0xC1,0x00,0x00,0x4C,0x02, \ +0x00,0x02,0x30,0x02,0x00,0x02,0x00,0xB5,0x0C,0x49,0x08,0x7B,0x02,0x09,0x05, \ +0xD3,0x00,0x20,0x08,0x73,0x0A,0x49,0x01,0x20,0x08,0x71,0x00,0xBD,0xC1,0x08, \ +0x02,0xD3,0xFF,0xF7,0xB1,0xFE,0x00,0xBD,0x41,0x08,0x02,0xD3,0xFF,0xF7,0x68, \ +0xFF,0x00,0xBD,0x80,0x08,0xF2,0xD3,0xFF,0xF7,0xB5,0xFF,0x00,0xBD,0x70,0x03, \ +0x00,0x0D,0xD0,0x03,0x00,0x0D,0xF0,0xB5,0x3E,0x48,0x00,0x79,0x80,0x08,0x3A, \ +0xD3,0x3D,0x4E,0x70,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xFB,0xFD,0x3B,0x48, \ +0x00,0x79,0x02,0x38,0x05,0x04,0x2D,0x0C,0xC0,0x20,0x01,0xF0,0x15,0xFA,0x38, \ +0x49,0x04,0x22,0x0A,0x71,0x00,0x22,0x0A,0x71,0x01,0xF0,0x0E,0xFA,0x35,0x48, \ +0xC7,0x1D,0x39,0x37,0x79,0x8B,0xC4,0x1D,0x59,0x34,0x49,0x19,0x19,0x23,0x9B, \ +0x01,0x99,0x42,0x04,0xD9,0x00,0x21,0x79,0x83,0x39,0x83,0x01,0x21,0xA1,0x73, \ +0x2D,0x48,0xCD,0x22,0x40,0x6F,0x80,0x68,0x79,0x8B,0x40,0x18,0x29,0x1C,0x00, \ +0xF0,0xA3,0xFA,0x25,0x48,0x00,0x25,0x05,0x71,0x70,0x79,0x01,0x28,0x01,0xDD, \ +0x00,0xF0,0xB7,0xFD,0xA0,0x7B,0x24,0x4E,0x01,0x28,0x01,0xD1,0xA5,0x73,0xF0, \ +0xBD,0x78,0x8B,0x00,0x28,0x27,0xD1,0x70,0x6F,0x81,0x8A,0xC0,0x7D,0x08,0x31, \ +0x08,0x18,0x38,0x83,0x70,0x6F,0x81,0x7D,0x44,0x68,0x03,0x29,0x01,0xDD,0x03, \ +0x21,0x81,0x75,0x38,0x8B,0x1A,0x4B,0x98,0x42,0x13,0xD8,0x38,0x8B,0x00,0x28, \ +0x10,0xD0,0xE0,0x1D,0x03,0x30,0x06,0x22,0x17,0x49,0x01,0xF0,0x90,0xFD,0x00, \ +0x28,0x08,0xD1,0x20,0x78,0x08,0x28,0x08,0xD0,0x00,0x28,0x06,0xD0,0x20,0x28, \ +0x04,0xD0,0xB0,0x28,0x02,0xD0,0x7D,0x83,0x3D,0x83,0xF0,0xBD,0x38,0x8B,0x40, \ +0x28,0x06,0xDD,0x38,0x8B,0x40,0x38,0x38,0x83,0x78,0x8B,0x40,0x30,0x78,0x83, \ +0xF0,0xBD,0x3D,0x83,0x7D,0x83,0x70,0x6F,0xFF,0xF7,0x35,0xFD,0x70,0x67,0xF0, \ +0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0xB0,0x00,0x00,0x02,0xF0,0x02,0x00,0x0D, \ +0x60,0x02,0x00,0x0D,0xEC,0x01,0x00,0x02,0x32,0x06,0x00,0x00,0x5C,0x00,0x00, \ +0x02,0x90,0xB5,0x1C,0x4F,0x38,0x7A,0x40,0x08,0x1D,0xD3,0x1B,0x48,0x40,0x79, \ +0x01,0x28,0x01,0xDD,0x00,0xF0,0x6F,0xFD,0x00,0x20,0x18,0x4B,0x38,0x72,0x3A, \ +0x1C,0xDF,0x1D,0x49,0x37,0xBC,0x79,0xD9,0x1D,0x39,0x31,0x01,0x2C,0x15,0xD1, \ +0xB8,0x71,0xFB,0x79,0x00,0x2B,0x01,0xD1,0x10,0x23,0x13,0x72,0x89,0x8A,0x01, \ +0x24,0x00,0x29,0x03,0xD0,0x00,0xF0,0x1E,0xF8,0xBC,0x71,0x90,0xBD,0x39,0x7B, \ +0x01,0x29,0x02,0xD1,0x38,0x73,0xBC,0x71,0x90,0xBD,0xB8,0x71,0x90,0xBD,0x18, \ +0x65,0x88,0x82,0x38,0x73,0xB8,0x71,0x18,0x6F,0x01,0x7A,0x10,0x29,0xEE,0xD1, \ +0x1F,0x1C,0xFF,0xF7,0x54,0xFD,0x38,0x67,0x90,0xBD,0x00,0x00,0x70,0x03,0x00, \ +0x0D,0xB0,0x00,0x00,0x02,0xEC,0x01,0x00,0x02,0xF0,0xB4,0x10,0x49,0xCA,0x1D, \ +0x39,0x32,0x90,0x8A,0x40,0x28,0x01,0xDB,0x40,0x20,0x00,0xE0,0x90,0x8A,0x00, \ +0x23,0x00,0x28,0x0F,0x6D,0x0B,0xDD,0xCC,0x1D,0x0A,0x4D,0x49,0x34,0xE6,0x79, \ +0x00,0x2E,0x05,0xD1,0x3E,0x78,0x01,0x37,0x01,0x33,0x83,0x42,0x2E,0x72,0xF6, \ +0xDB,0x93,0x8A,0x1B,0x1A,0x93,0x82,0x0A,0x6D,0x10,0x18,0x08,0x65,0xF0,0xBC, \ +0xF7,0x46,0xEC,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x04,0x1C,0x17, \ +0x48,0x0F,0x1C,0x40,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x0B,0xFD,0x15,0x48, \ +0x00,0x25,0xC6,0x1D,0x39,0x36,0x04,0x65,0xB7,0x82,0xC7,0x1D,0x49,0x37,0xBD, \ +0x71,0x3D,0x73,0xB0,0x8A,0x80,0x06,0x80,0x0E,0x01,0x24,0x00,0x28,0x00,0xD1, \ +0x3C,0x73,0xFF,0xF7,0xBE,0xFF,0xF8,0x79,0x00,0x28,0x02,0xD1,0x0B,0x49,0x10, \ +0x20,0x08,0x72,0xB0,0x8A,0x00,0x28,0x03,0xD0,0xFF,0xF7,0xB3,0xFF,0xBC,0x71, \ +0xF0,0xBD,0x38,0x7B,0x00,0x28,0x02,0xD0,0x3D,0x73,0xBC,0x71,0xF0,0xBD,0xBD, \ +0x71,0xF0,0xBD,0x00,0x00,0xB0,0x00,0x00,0x02,0xEC,0x01,0x00,0x02,0x70,0x03, \ +0x00,0x0D,0xB0,0xB5,0x29,0x4D,0xEF,0x1D,0x49,0x37,0x78,0x7B,0x00,0x28,0x39, \ +0xD0,0x27,0x48,0x00,0x78,0x01,0x28,0x35,0xD1,0x26,0x48,0x00,0x24,0x01,0x78, \ +0x01,0x29,0x02,0xD1,0x04,0x70,0x06,0xF0,0x6D,0xFA,0xF8,0x79,0x01,0x28,0x03, \ +0xD1,0xFC,0x71,0xF8,0x7B,0x00,0xF0,0xA4,0xF9,0xE8,0x1D,0x59,0x30,0xC0,0x7A, \ +0x01,0x28,0x02,0xD1,0xF8,0x7B,0x00,0xF0,0x9C,0xF9,0xF8,0x7B,0x02,0x28,0x1C, \ +0xD0,0xFF,0xF7,0xAF,0xFE,0x28,0x6F,0x80,0x23,0x01,0x7A,0x17,0x4F,0x19,0x40, \ +0x0C,0xD0,0x10,0x21,0x01,0x72,0x28,0x6F,0x81,0x89,0x0C,0x30,0x0C,0x31,0xFF, \ +0xF7,0x93,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x8E,0xFC,0xFF,0xF7, \ +0x26,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x87,0xFC,0xB0,0xBD,0x0C, \ +0x48,0x00,0x78,0x00,0x28,0xFA,0xD0,0x28,0x6F,0x01,0x7A,0x10,0x29,0x05,0xD0, \ +0x01,0x7A,0x80,0x29,0x02,0xD0,0x01,0x7A,0x40,0x29,0xF0,0xD1,0xFF,0xF7,0x9B, \ +0xFC,0x28,0x67,0xB0,0xBD,0xEC,0x01,0x00,0x02,0xD7,0x01,0x00,0x02,0xD5,0x01, \ +0x00,0x02,0xB0,0x00,0x00,0x02,0xD6,0x01,0x00,0x02,0xB0,0xB4,0x21,0x4F,0x80, \ +0x21,0x21,0x4A,0x39,0x73,0xD1,0x1D,0x59,0x31,0x4C,0x88,0x0D,0x88,0xAC,0x42, \ +0x17,0xD1,0x38,0x7B,0x40,0x23,0x03,0x40,0xE0,0x20,0x00,0x2B,0x0F,0xD1,0x09, \ +0x88,0x49,0x07,0x02,0xD0,0x38,0x73,0xB0,0xBC,0xF7,0x46,0xD1,0x1D,0x49,0x31, \ +0x89,0x7B,0x01,0x29,0x02,0xD1,0xD0,0x20,0x38,0x73,0xF5,0xE7,0x38,0x73,0xF3, \ +0xE7,0x38,0x73,0xF1,0xE7,0x4A,0x88,0x0C,0x88,0xA2,0x42,0xED,0xDA,0x0A,0x88, \ +0x4C,0x88,0x12,0x1B,0x08,0x2A,0x00,0xD9,0x08,0x22,0x01,0x28,0x01,0xD1,0x0C, \ +0x4B,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4B,0x00,0x2A,0x08,0xD0,0x0A,0x48, \ +0x4C,0x88,0x4D,0x88,0x01,0x34,0x4C,0x80,0x5C,0x5D,0x01,0x3A,0x04,0x73,0xF7, \ +0xD1,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xD0,0xE7,0x70,0x03,0x00,0x0D, \ +0xEC,0x01,0x00,0x02,0xEC,0x01,0x00,0x02,0xFE,0x01,0x00,0x02,0x30,0x03,0x00, \ +0x0D,0xF0,0xB5,0x00,0x24,0x01,0x28,0x1F,0x4D,0x1F,0x4E,0x20,0x4F,0x0A,0xD1, \ +0xFD,0xF7,0xC1,0xFA,0x30,0x7B,0x1E,0x49,0xC8,0x73,0x00,0xF0,0x4C,0xF9,0x3C, \ +0x73,0x01,0x20,0x28,0x71,0xF0,0xBD,0x79,0x88,0x3B,0x88,0x1A,0x4A,0x99,0x42, \ +0x1D,0xDA,0x39,0x88,0x7B,0x88,0xC9,0x1A,0x08,0x29,0x00,0xD9,0x08,0x21,0x00, \ +0x29,0x0A,0xD0,0x12,0x4E,0x53,0x6E,0x36,0x7B,0x1E,0x70,0x01,0x33,0x53,0x66, \ +0x7B,0x88,0x01,0x33,0x7B,0x80,0x01,0x39,0xF4,0xD1,0x7A,0x88,0x3B,0x88,0x0F, \ +0x49,0x9A,0x42,0x0D,0xD1,0x02,0x28,0x06,0xD1,0x00,0xF0,0x27,0xF9,0x3C,0x73, \ +0x08,0xE0,0x01,0x20,0x28,0x71,0xF0,0xBD,0x60,0x20,0x08,0x73,0x01,0x20,0x38, \ +0x73,0x00,0xE0,0x0C,0x73,0x01,0x20,0x28,0x71,0xF0,0xBD,0x00,0x00,0xD0,0x03, \ +0x00,0x0D,0x30,0x03,0x00,0x0D,0x4C,0x02,0x00,0x02,0x3C,0x02,0x00,0x02,0xEC, \ +0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x00,0xB5,0x7F,0x28,0x07,0xD8,0x00,0x29, \ +0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C,0x4A,0x51,0x6B,0x03,0x29,0x03,0xD1,0x0B, \ +0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0x01,0x29,0x04,0xD1,0x00,0x28,0x08,0xD0, \ +0x02,0x20,0x50,0x63,0x05,0xE0,0x02,0x29,0x03,0xD1,0x00,0x28,0x01,0xD1,0x01, \ +0x20,0x50,0x63,0x00,0xF0,0xEE,0xF8,0x00,0xBD,0x00,0x00,0xEC,0x01,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0x80,0xB5,0x00,0x29,0x09,0xD1,0x00,0x2A,0x07,0xD1,0x00, \ +0x28,0x01,0xD0,0x01,0x28,0x03,0xD1,0x14,0x49,0x4A,0x6B,0x01,0x2A,0x03,0xD1, \ +0x13,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x12,0x4B,0x02,0x2A,0x09,0xD1,0x00, \ +0x28,0x12,0xD0,0x03,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0x02,0x23,0x3B,0x43, \ +0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08,0xD1,0x00,0x28,0x06,0xD1,0x02,0x22,0x4A, \ +0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23,0x3B,0x40,0x13,0x73,0x88,0x63,0x00,0x20, \ +0x40,0x31,0x88,0x81,0xC8,0x81,0x00,0xF0,0xB8,0xF8,0x80,0xBD,0x00,0x00,0xEC, \ +0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0x90,0xB5,0x15,0x4F, \ +0xFA,0x1D,0x59,0x32,0x01,0x29,0x02,0xD1,0x12,0x23,0x13,0x80,0x03,0xE0,0x20, \ +0x23,0x02,0x29,0x09,0xD1,0x13,0x80,0x00,0x23,0x50,0x37,0xBB,0x73,0x14,0x88, \ +0xA0,0x42,0x05,0xD8,0xBB,0x73,0x10,0x80,0x0F,0xE0,0x0B,0x48,0x03,0x73,0x90, \ +0xBD,0x14,0x88,0xA0,0x42,0x09,0xD9,0x10,0x88,0x40,0x07,0x01,0xD0,0xBB,0x73, \ +0x04,0xE0,0x10,0x88,0x40,0x07,0x01,0xD1,0x01,0x20,0xB8,0x73,0x53,0x80,0x08, \ +0x1C,0xFF,0xF7,0xDE,0xFE,0x90,0xBD,0x00,0x00,0xEC,0x01,0x00,0x02,0x70,0x03, \ +0x00,0x0D,0x80,0xB4,0x0E,0x4F,0x0E,0x4A,0x01,0x28,0x06,0xD1,0x80,0x20,0x10, \ +0x72,0x38,0x7B,0xFE,0x23,0x18,0x40,0x38,0x73,0x08,0xE0,0x02,0x28,0x06,0xD1, \ +0x80,0x20,0x08,0x43,0x10,0x72,0x38,0x7B,0x01,0x23,0x18,0x43,0x38,0x73,0x06, \ +0x49,0x20,0x20,0x08,0x73,0x05,0x49,0x01,0x20,0x08,0x71,0x80,0xBC,0xF7,0x46, \ +0x00,0x00,0xE0,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0xD0, \ +0x03,0x00,0x0D,0x0D,0x23,0x1B,0x06,0x99,0x83,0x05,0x49,0x0A,0x70,0x05,0x4A, \ +0x10,0x60,0x02,0x20,0x08,0x72,0x08,0x7A,0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00, \ +0x00,0x20,0x00,0x00,0x0D,0x40,0x00,0x00,0x0D,0x90,0xB5,0x1B,0x4C,0x07,0x1C, \ +0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x16,0xFB,0x00,0x21,0x02,0x2F,0x17, \ +0x48,0x18,0x4A,0x0F,0xD0,0x43,0x7B,0x02,0x2B,0x03,0xD1,0x41,0x73,0x04,0xF0, \ +0xA9,0xF8,0x1A,0xE0,0x11,0x72,0x14,0x48,0x20,0x22,0x02,0x70,0x01,0x70,0x13, \ +0x49,0x86,0x20,0x08,0x72,0x11,0xE0,0x12,0x4B,0x9B,0x7B,0x00,0x2B,0x0D,0xD1, \ +0x17,0x7A,0x7B,0x09,0x0A,0xD2,0x10,0x23,0x13,0x72,0xC1,0x72,0x0E,0x4A,0x01, \ +0x20,0x10,0x70,0x0F,0x20,0x00,0x06,0x81,0x81,0x0C,0x49,0x81,0x80,0x60,0x79, \ +0x01,0x28,0x01,0xDD,0x00,0xF0,0xD6,0xFA,0x04,0xF0,0xDE,0xF8,0x90,0xBD,0x00, \ +0x00,0xB0,0x00,0x00,0x02,0x4C,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xC0,0x03, \ +0x00,0x0D,0xB0,0x03,0x00,0x0D,0x28,0x09,0x00,0x02,0xD6,0x01,0x00,0x02,0x08, \ +0x08,0x00,0x00,0x04,0x48,0x01,0x78,0x02,0x78,0x91,0x42,0xFC,0xD0,0x03,0x49, \ +0x60,0x20,0x08,0x73,0xF7,0x46,0x00,0x00,0xF0,0x03,0x00,0x0D,0x70,0x03,0x00, \ +0x0D,0xF0,0xB5,0x29,0x4E,0x30,0x78,0x00,0x28,0x01,0xD1,0x00,0xF0,0x69,0xFA, \ +0x0D,0x24,0x24,0x06,0x27,0x89,0x40,0x20,0x25,0x4D,0x38,0x40,0x08,0xD0,0x28, \ +0x7A,0x00,0x28,0xFC,0xD1,0x23,0x48,0x00,0x7B,0x40,0x08,0x01,0xD3,0xFF,0xF7, \ +0x8D,0xFC,0x78,0x0A,0x1F,0xD3,0xF8,0x43,0xFF,0x23,0x01,0x33,0x18,0x43,0x20, \ +0x81,0xFD,0xF7,0x42,0xF9,0x20,0x7B,0x00,0x09,0xFC,0xD2,0x28,0x7A,0x00,0x28, \ +0xFC,0xD1,0xFF,0xF7,0x14,0xFB,0x18,0x49,0xC8,0x1D,0x49,0x30,0xC2,0x79,0x02, \ +0x2A,0x06,0xD0,0x01,0x22,0xC2,0x71,0xC8,0x1D,0x00,0x23,0x59,0x30,0x43,0x73, \ +0x01,0xE0,0x00,0x23,0xC3,0x71,0xFF,0xF7,0xB1,0xFA,0x11,0x49,0x08,0x78,0x01, \ +0x28,0x10,0xD1,0xB8,0x08,0x0E,0xD3,0x0F,0x4A,0x00,0x23,0x10,0x7A,0x13,0x72, \ +0xFA,0x43,0x02,0x23,0x1A,0x43,0x22,0x81,0x09,0x78,0x01,0x29,0x03,0xD1,0x00, \ +0x04,0x00,0x0C,0x03,0xF0,0xF2,0xFF,0x30,0x78,0x00,0x28,0x01,0xD1,0x00,0xF0, \ +0x41,0xFA,0xF0,0xBD,0x3D,0x01,0x00,0x02,0x20,0x00,0x00,0x0D,0xD0,0x03,0x00, \ +0x0D,0xEC,0x01,0x00,0x02,0x37,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x90,0xB5, \ +0x41,0x68,0x0A,0x78,0x08,0x2A,0x12,0xD1,0x8A,0x7F,0xCB,0x7F,0x12,0x02,0x1A, \ +0x43,0x15,0x4B,0x12,0x04,0x1F,0x88,0x12,0x0C,0xBA,0x42,0x02,0xD0,0x5B,0x88, \ +0x93,0x42,0x06,0xD1,0xC8,0x1D,0x11,0x30,0x06,0x22,0x10,0x49,0x01,0xF0,0x56, \ +0xFA,0x90,0xBD,0x03,0x23,0x5B,0x02,0x9A,0x42,0x06,0xDD,0xC8,0x1D,0x11,0x30, \ +0x06,0x22,0x0B,0x49,0x01,0xF0,0x4B,0xFA,0x90,0xBD,0xCF,0x1D,0x01,0x37,0x47, \ +0x60,0x18,0x32,0x82,0x82,0x08,0x4C,0x18,0x22,0x20,0x1C,0x01,0xF0,0x40,0xFA, \ +0x18,0x22,0x38,0x1C,0x21,0x1C,0x01,0xF0,0x3B,0xFA,0x90,0xBD,0x8C,0x02,0x00, \ +0x02,0x86,0x02,0x00,0x02,0x80,0x02,0x00,0x02,0x14,0x0A,0x00,0x02,0xB0,0xB5, \ +0x00,0xF0,0x0F,0xF9,0x4F,0x4F,0xFF,0x21,0xF8,0x1D,0x27,0x30,0x01,0x31,0x06, \ +0x22,0x04,0x1C,0x00,0xF0,0x6B,0xF9,0x4B,0x4D,0x12,0x22,0x03,0x21,0x28,0x1C, \ +0x00,0xF0,0x65,0xF9,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41,0x31,0x00,0xF0,0x5F, \ +0xF9,0xF8,0x1D,0x3C,0x30,0x0E,0x22,0xFF,0x21,0x71,0x31,0x00,0xF0,0x58,0xF9, \ +0xF8,0x1D,0x15,0x30,0x0E,0x22,0xFF,0x21,0x11,0x31,0x00,0xF0,0x51,0xF9,0xF8, \ +0x1D,0x2D,0x30,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0xF0,0x4A,0xF9,0xF8,0x1D, \ +0x4A,0x30,0x07,0x22,0xFF,0x21,0x81,0x31,0x00,0xF0,0x43,0xF9,0xF8,0x1D,0x51, \ +0x30,0x02,0x22,0xFF,0x21,0x89,0x31,0x00,0xF0,0x3C,0xF9,0x00,0xF0,0xF3,0xF8, \ +0xF8,0x1D,0x23,0x30,0x04,0x22,0xE9,0x1D,0x01,0x31,0x01,0xF0,0xEE,0xF9,0xF8, \ +0x1D,0x19,0x30,0x80,0x7B,0xC0,0x07,0xC0,0x0F,0x00,0x25,0x00,0x28,0x10,0xD1, \ +0x2C,0x4A,0x15,0x54,0x01,0x30,0x06,0x28,0xFB,0xD3,0x10,0x1C,0x06,0x22,0x21, \ +0x1C,0x01,0xF0,0xBE,0xF9,0x00,0x28,0x04,0xD0,0x21,0x1C,0x06,0x22,0x26,0x48, \ +0x01,0xF0,0xD5,0xF9,0xF8,0x1D,0x29,0x30,0x00,0x79,0x10,0x28,0x0D,0xD0,0x20, \ +0x28,0x0B,0xD0,0x31,0x28,0x09,0xD0,0x30,0x28,0x07,0xD0,0x32,0x28,0x05,0xD0, \ +0x40,0x28,0x03,0xD0,0x41,0x28,0x01,0xD0,0x50,0x28,0x01,0xD1,0x1C,0x49,0xC8, \ +0x75,0xF8,0x1D,0x49,0x30,0x43,0x78,0x01,0x22,0x1A,0x49,0x55,0x2B,0x13,0xD1, \ +0x83,0x78,0x53,0x2B,0x10,0xD1,0xC3,0x78,0x42,0x2B,0x0D,0xD1,0x03,0x79,0x53, \ +0x2B,0x0A,0xD1,0x43,0x79,0x55,0x2B,0x07,0xD1,0x83,0x79,0x53,0x2B,0x04,0xD1, \ +0xC3,0x79,0x50,0x2B,0x01,0xD1,0x0A,0x70,0x00,0xE0,0x0D,0x70,0x03,0x7A,0x0F, \ +0x49,0x32,0x2B,0x04,0xD1,0x40,0x7A,0x32,0x28,0x01,0xD1,0xCA,0x62,0x00,0xE0, \ +0xCD,0x62,0xFF,0x20,0x01,0x30,0xC8,0x61,0x01,0x20,0x40,0x02,0x08,0x62,0x03, \ +0x20,0xC8,0x60,0xB0,0xBD,0x00,0x00,0x2C,0x0A,0x00,0x02,0xEC,0x01,0x00,0x02, \ +0x00,0x72,0x01,0x02,0x5C,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x36,0x01,0x00, \ +0x02,0x90,0x02,0x00,0x02,0xF0,0xB5,0x2A,0x49,0x04,0x20,0x08,0x70,0x29,0x48, \ +0x00,0x25,0x05,0x70,0x29,0x48,0x02,0x24,0x04,0x70,0x20,0x20,0x07,0x27,0x7F, \ +0x06,0xB8,0x61,0x7D,0x61,0xB8,0x69,0x04,0x23,0x18,0x43,0xB8,0x61,0xF8,0x69, \ +0xFB,0x0B,0x98,0x43,0xF8,0x61,0xF8,0x69,0x10,0x23,0x98,0x43,0xF8,0x61,0xFF, \ +0xF7,0x33,0xFF,0x1F,0x49,0xC8,0x6A,0x01,0x28,0x03,0xD1,0x2C,0x20,0x48,0x62, \ +0x88,0x62,0x02,0xE0,0x58,0x20,0x48,0x62,0x88,0x62,0x01,0x22,0x1A,0x48,0x4A, \ +0x76,0xC6,0x69,0x03,0x0C,0x33,0x43,0xC3,0x61,0x18,0x4E,0x81,0x23,0xF3,0x60, \ +0x05,0x23,0x1B,0x06,0xDD,0x62,0x1D,0x63,0xCD,0x68,0xBB,0x69,0xAA,0x40,0xD2, \ +0x43,0x1A,0x40,0xBA,0x61,0xC2,0x69,0x04,0x23,0x9A,0x43,0xC2,0x61,0xCA,0x69, \ +0xC3,0x69,0xD2,0x43,0x1A,0x40,0xC2,0x61,0x0A,0x6A,0xC3,0x69,0x1A,0x43,0xC2, \ +0x61,0xC2,0x69,0x03,0x0C,0x9A,0x43,0xC2,0x61,0x0C,0x76,0xFF,0xF7,0x66,0xF9, \ +0xF8,0x69,0x01,0x23,0xDB,0x03,0x18,0x43,0xF8,0x61,0xF0,0xBD,0x4B,0x02,0x00, \ +0x02,0x56,0x02,0x00,0x02,0x3B,0x01,0x00,0x02,0x90,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x00,0x01,0x00,0x05,0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00, \ +0x21,0x01,0x60,0x01,0x21,0x41,0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69, \ +0x01,0x23,0x5B,0x03,0x1A,0x43,0xCA,0x61,0x04,0x49,0x01,0x63,0x04,0x49,0x41, \ +0x63,0x81,0x63,0xC1,0x63,0x01,0x69,0x80,0x68,0xF7,0x46,0x00,0x00,0x01,0x0C, \ +0x00,0x02,0x01,0x02,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B, \ +0x03,0x99,0x43,0xC1,0x61,0xF7,0x46,0xF0,0xB5,0x0F,0x1C,0x00,0x21,0xF3,0x24, \ +0x24,0x05,0x00,0x28,0x08,0xD9,0x10,0x4D,0x6B,0x5C,0xE3,0x60,0x26,0x69,0xB3, \ +0x08,0xFC,0xD3,0x01,0x31,0x81,0x42,0xF7,0xD3,0xFF,0x20,0xE0,0x60,0xA1,0x68, \ +0x21,0x1C,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x0C,0x69,0xA3,0x08,0xFC, \ +0xD3,0xC8,0x60,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x3B,0x70,0x01,0x37, \ +0x01,0x3A,0xF3,0xD1,0x02,0x20,0xFC,0xF7,0xFB,0xFE,0xF0,0xBD,0xA0,0x02,0x00, \ +0x02,0xF3,0x20,0x00,0x05,0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69,0x89,0x08, \ +0xFC,0xD3,0xFF,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81,0x68,0x01, \ +0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46,0x90,0xB5, \ +0x04,0x1C,0x48,0x09,0x08,0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02,0x43,0x08, \ +0x48,0x02,0x70,0x41,0x70,0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2,0x02,0x20, \ +0xFC,0xF7,0xD1,0xFE,0x02,0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xAA,0xFF,0x90, \ +0xBD,0x00,0x00,0xA0,0x02,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23, \ +0x5B,0x03,0x19,0x43,0xC1,0x61,0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F, \ +0x23,0x1B,0x04,0x99,0x43,0x41,0x60,0x41,0x68,0x19,0x43,0x41,0x60,0xF7,0x46, \ +0x00,0x00,0x80,0xB4,0x14,0x4B,0x5B,0x79,0x01,0x2B,0x0E,0xDD,0x17,0x1C,0x12, \ +0x4A,0x14,0xD1,0x02,0x2B,0x09,0xD1,0x00,0x29,0x07,0xD1,0x00,0x28,0x07,0xD1, \ +0x90,0x78,0x4B,0x1F,0x18,0x40,0x90,0x70,0x00,0x20,0x50,0x70,0x80,0xBC,0xF7, \ +0x46,0x90,0x78,0x04,0x23,0x18,0x43,0x90,0x70,0x01,0x20,0x50,0x70,0xF6,0xE7, \ +0x00,0x28,0x04,0xD1,0x90,0x78,0x02,0x23,0x98,0x43,0x90,0x70,0xEF,0xE7,0x90, \ +0x78,0x02,0x23,0x18,0x43,0x90,0x70,0xEA,0xE7,0x00,0x00,0xB0,0x00,0x00,0x02, \ +0x88,0x09,0x00,0x02,0x90,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x15, \ +0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x01,0x28,0x01,0xD1,0x08,0x49, \ +0x08,0x70,0x08,0x4C,0x67,0x68,0xFC,0xF7,0x88,0xFE,0x39,0x1A,0x49,0x01,0x09, \ +0x18,0x06,0x4A,0x61,0x60,0x51,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x50,0x63, \ +0x90,0xBD,0x00,0x00,0x3D,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00, \ +0x04,0x90,0xB5,0x0C,0x48,0x80,0x78,0x01,0x28,0x13,0xD1,0x0B,0x4F,0x7C,0x68, \ +0xFC,0xF7,0x6D,0xFE,0x21,0x1A,0x49,0x09,0x09,0x18,0x79,0x60,0x08,0x49,0x4A, \ +0x6B,0x12,0x1A,0x52,0x09,0x10,0x18,0x48,0x63,0x07,0x20,0x40,0x06,0xC1,0x69, \ +0x10,0x23,0x19,0x43,0xC1,0x61,0x90,0xBD,0x88,0x09,0x00,0x02,0x80,0x00,0x00, \ +0x04,0x40,0x00,0x00,0x04,0x80,0xB5,0xC0,0x20,0x00,0xF0,0x32,0xFC,0x07,0x1C, \ +0x06,0x48,0x01,0x78,0x00,0x29,0x03,0xD0,0x00,0x21,0x01,0x70,0xFF,0xF7,0xD3, \ +0xFF,0x38,0x1C,0x00,0xF0,0x26,0xFC,0x80,0xBD,0x00,0x00,0x3D,0x01,0x00,0x02, \ +0x80,0xB5,0xC0,0x20,0x00,0xF0,0x1E,0xFC,0x07,0x1C,0x01,0x20,0xFF,0xF7,0xA0, \ +0xFF,0x38,0x1C,0x00,0xF0,0x17,0xFC,0x80,0xBD,0xF0,0xB4,0x13,0x4A,0x00,0x27, \ +0xD7,0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48,0x07, \ +0x70,0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05,0xD2, \ +0x5B,0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70,0x01, \ +0x31,0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01,0x30, \ +0x20,0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46,0x50, \ +0x03,0x00,0x02,0xB8,0x0B,0x00,0x02,0xB8,0x0C,0x00,0x02,0x90,0xB5,0x0A,0x4F, \ +0x0A,0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7,0xEE, \ +0xFC,0x00,0xF0,0xEC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0x64,0xFD, \ +0x00,0x20,0x38,0x60,0x00,0xF0,0xF3,0xFB,0x90,0xBD,0xBC,0x03,0x00,0x02,0xF0, \ +0xF0,0xF0,0xF0,0x2C,0x04,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38,0x60, \ +0xFC,0xF7,0xD6,0xFC,0x00,0xF0,0xD4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD,0x00, \ +0x00,0xF0,0xF0,0xF0,0xF0,0xBC,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F,0x00, \ +0x2D,0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D, \ +0xE9,0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20, \ +0x83,0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04, \ +0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9, \ +0x84,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00,0x9F, \ +0xE5,0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9, \ +0x4C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83, \ +0xE5,0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00, \ +0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x20, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2, \ +0x01,0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0xBC,0x03,0x00, \ +0x02,0xAC,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00,0xA0, \ +0x00,0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00, \ +0x50,0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F, \ +0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10, \ +0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0, \ +0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D, \ +0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00,0x00, \ +0x91,0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x78,0x01,0x00, \ +0xEA,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x75,0x01,0x00,0xEA,0x00,0xA0, \ +0x00,0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02,0x10, \ +0x01,0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5,0x00, \ +0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40, \ +0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08, \ +0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20,0xA0, \ +0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0, \ +0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x40, \ +0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80, \ +0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00, \ +0x81,0xE5,0x37,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x33,0x01,0x00,0xEA,0xBC,0x03,0x00,0x02,0xAC,0x03,0x00,0x02, \ +0xA8,0x03,0x00,0x02,0xB0,0x03,0x00,0x02,0x34,0x04,0x00,0x02,0xCC,0x03,0x00, \ +0x02,0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C,0x60, \ +0xBC,0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29,0xFB, \ +0xD3,0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00,0x22, \ +0x00,0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A,0x02, \ +0x92,0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00,0xF0, \ +0x18,0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x30,0x04,0x00,0x02,0x38, \ +0x0D,0x00,0x02,0x4D,0x3F,0x00,0x00,0xB8,0x0D,0x00,0x02,0x53,0x79,0x73,0x74, \ +0x65,0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A,0xAE, \ +0x4C,0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64,0x00, \ +0x21,0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA,0x06, \ +0xD2,0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61,0x79, \ +0x60,0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01,0x20, \ +0x90,0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7,0x0C, \ +0xC7,0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0x0B,0xFB,0xC0,0x20, \ +0x00,0xF0,0xC8,0xF9,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D,0x79, \ +0x31,0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3,0x1D, \ +0x79,0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60,0x8F, \ +0x60,0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29,0x68, \ +0x01,0x31,0x29,0x60,0x00,0xF0,0xA8,0xF9,0x00,0x2C,0x07,0xD0,0x38,0x1C,0x00, \ +0xF0,0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0,0x20, \ +0x00,0xF0,0x9B,0xF9,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x96,0xF9,0x00, \ +0x20,0x04,0xB0,0xF0,0xBD,0x00,0x00,0xA9,0x40,0x00,0x00,0xE9,0x40,0x00,0x00, \ +0x44,0x52,0x48,0x54,0xB4,0x03,0x00,0x02,0xB8,0x03,0x00,0x02,0xCC,0x03,0x00, \ +0x02,0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12,0xC0, \ +0x12,0xC0,0xC0,0x20,0x00,0xF0,0x7B,0xF9,0x0C,0x49,0x0C,0x4B,0x39,0x60,0x19, \ +0x68,0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19,0x68, \ +0xB9,0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68,0x01, \ +0x32,0x0A,0x60,0x00,0xF0,0x65,0xF9,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E,0x44, \ +0x56,0x44,0x60,0x04,0x00,0x02,0x64,0x04,0x00,0x02,0xF0,0xB5,0x85,0xB0,0x07, \ +0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0x00,0xF0,0x54,0xF9,0xA9,0x08,0x03,0xD3, \ +0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60,0x3C, \ +0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35,0xD1, \ +0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C,0x1E, \ +0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3,0x6F, \ +0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43,0xBA, \ +0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0x00,0xF0,0x23,0xF9,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30,0x00, \ +0xF0,0x34,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x90,0xFA, \ +0x00,0x28,0x01,0xD0,0x00,0xF0,0xF6,0xFA,0x30,0x1C,0x9B,0xE0,0x00,0xF0,0x0E, \ +0xF9,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02,0x93, \ +0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0x00,0xF0,0x01,0xF9,0xC0,0x20,0x00, \ +0xF0,0xFE,0xF8,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03,0x9C, \ +0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C,0x46, \ +0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06,0xD3, \ +0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0,0xA1, \ +0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22,0x6F, \ +0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08,0x03, \ +0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02,0xD1, \ +0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91,0x63, \ +0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79,0x61, \ +0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C,0x26, \ +0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14,0x1C, \ +0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0x00,0xF0,0xA5, \ +0xF8,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00,0x28, \ +0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xB1,0xFA,0x00,0xE0,0xEC,0x64,0xC0, \ +0x20,0x00,0xF0,0x94,0xF8,0x31,0x68,0x01,0x31,0x31,0x60,0x00,0xF0,0x8F,0xF8, \ +0x28,0x1C,0x00,0xF0,0x06,0xFA,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0x00,0xF0,0x87, \ +0xF8,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0x00,0xF0,0x81,0xF8,0x0C,0x48, \ +0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68,0x00, \ +0x28,0x01,0xD1,0x00,0xF0,0x59,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79,0x69, \ +0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7,0xCC, \ +0x03,0x00,0x02,0xAC,0x03,0x00,0x02,0xB0,0x03,0x00,0x02,0xBC,0x03,0x00,0x02, \ +0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0x00,0xF0,0x5B,0xF8,0x02, \ +0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02,0xE0, \ +0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60,0x02, \ +0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07,0x24, \ +0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67,0xE5, \ +0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39,0x69, \ +0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69,0x49, \ +0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64,0x67, \ +0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63,0x0E, \ +0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0x00,0xF0,0x18,0xF8,0x01,0x23, \ +0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x97,0xFA,0x20,0x1C,0x00, \ +0xF0,0xCE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0x00,0xF0,0x09,0xF8,0x20,0x1C, \ +0xF9,0xE7,0x00,0x00,0xAC,0x03,0x00,0x02,0x1D,0x43,0x00,0x00,0xCC,0x03,0x00, \ +0x02,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1,0x3F,0x20,0xA0,0xE3,0x02,0x10, \ +0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21,0xE1,0x02,0x00,0xC3,0xE1,0x1E, \ +0xFF,0x2F,0xE1,0x00,0xB5,0xFF,0xF7,0xD7,0xFB,0xFF,0xF7,0xB1,0xFD,0x00,0xF0, \ +0x8F,0xFB,0x00,0xF0,0x95,0xFB,0x00,0xF0,0xF5,0xF9,0x00,0xF0,0x99,0xFB,0x00, \ +0xF0,0x9F,0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0, \ +0x21,0xE1,0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC, \ +0xFF,0xFF,0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5, \ +0x00,0x00,0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82, \ +0xE2,0x04,0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30, \ +0x82,0xE5,0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0, \ +0x80,0xFD,0x08,0xFF,0xDF,0xFD,0xE8,0xB0,0x03,0x00,0x02,0xAC,0x03,0x00,0x02, \ +0x34,0x04,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02, \ +0xB0,0xF0,0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0xFF,0xF7,0x9F,0xFF, \ +0x4A,0x4D,0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A, \ +0x61,0x29,0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68, \ +0x91,0x42,0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0xFF, \ +0xF7,0x87,0xFF,0xC0,0x20,0xFF,0xF7,0x84,0xFF,0x01,0x99,0x00,0x29,0x5C,0xD0, \ +0x01,0x9C,0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A, \ +0x61,0x21,0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69, \ +0x01,0x91,0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04, \ +0xE0,0x27,0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61, \ +0x24,0x61,0x00,0xE0,0xA6,0x61,0xFF,0xF7,0x5E,0xFF,0x00,0x2D,0x02,0xD0,0x38, \ +0x1C,0x00,0xF0,0xFC,0xFB,0xC0,0x20,0xFF,0xF7,0x56,0xFF,0xA2,0x69,0x69,0x46, \ +0x8A,0x42,0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01, \ +0x39,0x20,0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42, \ +0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89, \ +0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69, \ +0x62,0x61,0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1, \ +0x61,0x64,0x61,0x0C,0x60,0xFF,0xF7,0x2A,0xFF,0xC0,0x20,0xFF,0xF7,0x27,0xFF, \ +0x01,0x99,0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E, \ +0x4C,0x03,0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A, \ +0x11,0x68,0x01,0x31,0x11,0x60,0xFF,0xF7,0x13,0xFF,0x20,0x68,0x00,0xF0,0xD0, \ +0xF9,0x6C,0xE7,0xFF,0xF7,0x0D,0xFF,0x69,0xE7,0x4D,0x49,0x54,0x41,0x44,0x04, \ +0x00,0x02,0x40,0x04,0x00,0x02,0x3C,0x04,0x00,0x02,0x48,0x04,0x00,0x02,0xAC, \ +0x03,0x00,0x02,0xCC,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28, \ +0x0C,0xD1,0xC0,0x20,0xFF,0xF7,0xF6,0xFE,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0xFF,0xF7,0xF0,0xFE,0x38,0x1C,0x00,0xF0,0x67,0xF8,0x90,0xBD,0xC0,0x20, \ +0xFF,0xF7,0xE9,0xFE,0xBC,0x6E,0xFF,0xF7,0xE6,0xFE,0x00,0x2C,0xF6,0xD0,0x38, \ +0x1C,0x00,0xF0,0x83,0xFB,0x90,0xBD,0xCC,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F, \ +0x39,0x68,0x88,0x6C,0x49,0x6C,0x00,0xF0,0x76,0xFB,0xC0,0x20,0xFF,0xF7,0xD4, \ +0xFE,0x3A,0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68, \ +0x01,0x32,0x0A,0x60,0xFF,0xF7,0xC9,0xFE,0x38,0x68,0x00,0xF0,0x86,0xF9,0x80, \ +0xBD,0x00,0x00,0xAC,0x03,0x00,0x02,0xCC,0x03,0x00,0x02,0x00,0xA3,0x18,0x47, \ +0x10,0x20,0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0, \ +0xE3,0x00,0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30, \ +0xA0,0xE3,0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14, \ +0x30,0x82,0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5, \ +0x24,0x30,0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90, \ +0xE5,0x30,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30, \ +0x82,0xE5,0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08, \ +0x20,0x80,0xE5,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0,0x20, \ +0xFF,0xF7,0x80,0xFE,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xBA, \ +0x6B,0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02,0x2A, \ +0x37,0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B,0x00, \ +0x2A,0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9,0x6A, \ +0x1D,0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62,0x57, \ +0x62,0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17,0x4A, \ +0x3B,0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A,0x02, \ +0xD1,0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19,0x60, \ +0xD3,0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49,0x12, \ +0x6C,0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0x38,0xFE,0x0B,0x48, \ +0x00,0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28,0x00, \ +0xD1,0x01,0x24,0x20,0x1C,0xF0,0xBD,0xCC,0x03,0x00,0x02,0xB0,0x03,0x00,0x02, \ +0xB8,0x0C,0x00,0x02,0xC0,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xC4,0x03,0x00, \ +0x02,0xAC,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00,0x00, \ +0xA0,0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9,0xD3, \ +0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93,0xE5, \ +0x28,0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40,0xA0, \ +0xE3,0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00,0x40, \ +0x82,0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x0A,0xFF,0xFF,0xEA,0xAC, \ +0x03,0x00,0x02,0x34,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60, \ +0xF7,0x46,0x00,0x00,0x60,0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20,0xFF, \ +0xF7,0xE3,0xFD,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA,0x42, \ +0x04,0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69,0x51, \ +0x61,0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04,0xD1, \ +0x3A,0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7,0xC6, \ +0xFD,0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF,0xF7, \ +0xBE,0xFD,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68,0x1C, \ +0x4B,0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01,0xD1, \ +0x25,0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F,0x7A, \ +0x6F,0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10,0xD1, \ +0xFA,0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF, \ +0xF7,0x98,0xFD,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF,0xF7, \ +0x75,0xFF,0x01,0xE0,0xFF,0xF7,0x8E,0xFD,0x78,0x6E,0x00,0x28,0x04,0xD0,0xF8, \ +0x1D,0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF,0xF7, \ +0x82,0xFD,0xFF,0xF7,0x80,0xFD,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0xCC, \ +0x03,0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x75,0xFD,0x39,0x68, \ +0x00,0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9,0x1F, \ +0x21,0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10,0x4A, \ +0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A,0x89, \ +0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A,0x61, \ +0x0A,0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61,0x03, \ +0xE0,0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0x48,0xFD,0x00,0x20, \ +0x80,0xBD,0x44,0x04,0x00,0x02,0x40,0x04,0x00,0x02,0x3C,0x04,0x00,0x02,0xF0, \ +0xB5,0x05,0x1C,0xC0,0x20,0xFF,0xF7,0x3B,0xFD,0x67,0x49,0x67,0x4C,0x0A,0x68, \ +0x67,0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26,0xAE, \ +0x63,0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29,0x6A, \ +0x6B,0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1,0x2B, \ +0x6A,0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3,0x43, \ +0x0B,0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68,0x9B, \ +0x00,0xD2,0x58,0x0A,0x60,0xFF,0xF7,0x0E,0xFD,0x55,0x49,0x38,0x68,0x09,0x68, \ +0x88,0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26,0x4E, \ +0x4B,0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33,0x40, \ +0x13,0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43,0x48, \ +0x4E,0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14,0xE0, \ +0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32,0x0C, \ +0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10,0x32, \ +0x04,0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B,0x1A, \ +0x60,0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32,0x68, \ +0x36,0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A,0x42, \ +0xD0,0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0xC1,0xFC,0xC0,0x20, \ +0xFF,0xF7,0xBE,0xFC,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60,0x2A, \ +0x49,0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E,0xE0, \ +0x28,0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7,0xA9, \ +0xFC,0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0, \ +0x22,0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E,0x03, \ +0xD0,0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59,0x5C, \ +0x18,0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68,0xB3, \ +0x42,0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19,0x60, \ +0xFF,0xF7,0x82,0xFC,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0,0x20, \ +0x68,0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0x77,0xFC,0x0A,0x49,0x38,0x68, \ +0x09,0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7,0x51, \ +0xFE,0xF0,0xBD,0xCC,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0xAC,0x03,0x00,0x02, \ +0xB8,0x0C,0x00,0x02,0xC4,0x03,0x00,0x02,0xB0,0x03,0x00,0x02,0xC8,0x03,0x00, \ +0x02,0xC0,0x03,0x00,0x02,0xB8,0x0B,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60, \ +0x41,0x60,0xF7,0x46,0x00,0x00,0x68,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01, \ +0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x70,0x04,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x78,0x04,0x00,0x02,0x02,0x48,0x00, \ +0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x80,0x04,0x00,0x02,0xBC,0x46, \ +0x03,0x1C,0x08,0x43,0x80,0x07,0x13,0xD1,0x12,0x1F,0x05,0xD3,0x01,0xCB,0x80, \ +0xC9,0xC0,0x1B,0x04,0xD1,0x12,0x1F,0xF9,0xD2,0xD2,0x1C,0x0C,0xD3,0x02,0xE0, \ +0x1B,0x1F,0x09,0x1F,0xD2,0x1C,0x18,0x78,0x0F,0x78,0xC0,0x1B,0x04,0xD1,0x5B, \ +0x1C,0x49,0x1C,0x52,0x1E,0xF7,0xD2,0x00,0x20,0x67,0x46,0xF7,0x46,0x43,0x1A, \ +0x93,0x42,0x30,0xD3,0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B, \ +0x78,0x03,0x70,0x40,0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1, \ +0x10,0x3A,0x05,0xD3,0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0, \ +0xBC,0x0C,0x32,0x0F,0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0, \ +0x08,0xC9,0x03,0x70,0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3, \ +0x70,0x00,0x1D,0x12,0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70, \ +0x49,0x1C,0x40,0x1C,0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B, \ +0x43,0x13,0x43,0x9B,0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1, \ +0xF7,0x46,0x52,0x1E,0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x4B, \ +0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0x79,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9, \ +0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91, \ +0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46, \ +0x00,0x00,0xCB,0x17,0x59,0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C, \ +0xB4,0x4B,0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0x5A,0xF8,0x52,0x00,0x9A,0x42, \ +0xFC,0xD9,0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52, \ +0x08,0x91,0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C, \ +0x0C,0xBC,0x5A,0x40,0x50,0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0xB0, \ +0xB5,0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75,0x39,0x9C,0x00, \ +0x0C,0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36,0x23,0x00,0x29,0x01, \ +0x66,0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0,0x51,0x1E,0x41,0x66, \ +0x00,0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x04,0x05,0x00,0x02,0x80, \ +0xB5,0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66,0x07,0x4A,0x00,0x21, \ +0x03,0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01,0x31,0x58,0x43,0x05, \ +0x4B,0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x04,0x05,0x00,0x02,0x88,0x04, \ +0x00,0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0x00,0x47,0x08,0x47,0x10, \ +0x47,0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00,0x00, \ +0x2C,0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00,0x8F, \ +0xE2,0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0x50,0xEE, \ +0xFF,0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62,0x79, \ +0x20,0x7A,0x65,0x72,0x6F,0x00,0x00,0x6C,0x05,0x00,0x02,0x78,0x47,0x00,0x00, \ +0x01,0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04,0x00, \ +0xE2,0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78, \ +0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50,0xE3, \ +0x01,0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00, \ +0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0, \ +0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00, \ +0x00,0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3, \ +0x00,0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77,0x6E, \ +0x20,0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42,0x72, \ +0x61,0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A,0x65, \ +0x72,0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65, \ +0x64,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00, \ +0x00,0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20, \ +0x53,0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63,0x68, \ +0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61,0x74, \ +0x61,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41,0x64, \ +0x64,0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65, \ +0x64,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00,0x02, \ +0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73,0x74, \ +0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00,0xC0, \ +0x48,0x00,0x00,0xD8,0x48,0x00,0x00,0xF4,0x48,0x00,0x00,0x14,0x49,0x00,0x00, \ +0x28,0x49,0x00,0x00,0x38,0x49,0x00,0x00,0x50,0x49,0x00,0x00,0x68,0x49,0x00, \ +0x00,0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0xE8,0xED,0xFF,0xEA,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5,0x01, \ +0x20,0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00,0xEB, \ +0x44,0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10,0x81, \ +0xE0,0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04,0x30, \ +0x90,0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x00, \ +0x20,0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF,0x3A, \ +0x0E,0xF0,0xA0,0xE1,0x98,0x4A,0x00,0x00,0x00,0x00,0x00,0x02,0xAC,0x05,0x00, \ +0x00,0x9C,0x08,0x00,0x00,0xAC,0x05,0x00,0x02,0x78,0x47,0x00,0x00,0xD3,0x00, \ +0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04, \ +0x10,0x80,0xE5,0x0C,0x10,0x80,0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5, \ +0x00,0x00,0xA0,0xE3,0x10,0xFF,0x2F,0xE1,0x00,0x00,0xA0,0xE1,0x00,0x00,0xA0, \ +0xE1,0x00,0x00,0x00,0x00,0x14,0x00,0x0A,0x00,0x90,0x00,0x30,0x00,0x08,0x06, \ +0x07,0x00,0x82,0x84,0x8B,0x96,0x09,0x04,0x02,0x41,0x00,0x00,0x00,0x01,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11, \ +0x11,0x11,0x11,0x11,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01, \ +0x00,0x00,0x00,0x00,0x04,0xAC,0x6C,0x32,0x70,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x00,0x30,0x75,0x64,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x03,0x00,0x04,0xAC,0x6C,0x32,0x70,0x55, \ +0x4E,0x48,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x01,0x01,0x00,0x00,0x00,0x00,0x45,0x55,0x00,0x00,0x00,0x00,0x00,0xFA, \ +0x00,0x00,0x00,0xFA,0x00,0x00,0x2A,0x09,0x2A,0x09,0x1F,0x00,0xFF,0x00,0x08, \ +0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x41,0x54,0x4D,0x45, \ +0x4C,0x5F,0x41,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF, \ +0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x05,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00, \ +0x00,0x01,0x01,0x00,0x5B,0x00,0x04,0x1E,0x1E,0x1E,0x1E,0x00,0x00,0x28,0x28, \ +0x28,0x00,0x00,0x32,0x3C,0x46,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01, \ +0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00, \ +0x01,0x01,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x00, \ +0x01,0x01,0x00,0x00,0x01,0x01,0x00,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01, \ +0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC4,0x01,0x00,0x02,0x00,0x00,0x00,0x07, \ +0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x01, \ +0x10,0x01,0xFE,0x01,0x00,0x08,0xEB,0x03,0x06,0x76,0x00,0x01,0x00,0x00,0x00, \ +0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA,0x09,0x04,0x00,0x00,0x02, \ +0xFF,0x00,0xFF,0x00,0x07,0x05,0x81,0x02,0x40,0x00,0x00,0x07,0x05,0x02,0x02, \ +0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x07, \ +0xFF,0x07,0xFF,0x1F,0x00,0x06,0x00,0x1E,0x00,0x20,0xFF,0x3F,0xFC,0x01,0x01, \ +0x01,0x01,0x0A,0x0A,0x0E,0x01,0x03,0x00,0x00,0x00,0x00,0xAA,0xAA,0x03,0x00, \ +0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81,0xF3,0x80,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0xD8,0x05, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04, \ +0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00, \ +0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00, \ +0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xF6,0x07, \ +0x00,0x00,0xFB,0x07,0x00,0x00,0x00,0x08,0x00,0x00,0x05,0x08,0x00,0x00,0x0A, \ +0x08,0x00,0x00,0x0F,0x08,0x00,0x00,0x14,0x08,0x00,0x00,0x19,0x08,0x00,0x00, \ +0x1E,0x08,0x00,0x00,0x23,0x08,0x00,0x00,0x28,0x08,0x00,0x00,0x2D,0x08,0x00, \ +0x00,0x32,0x08,0x00,0x00,0x3E,0x08,0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69, \ +0x67,0x68,0x74,0x20,0x28,0x63,0x29,0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30, \ +0x30,0x30,0x20,0x45,0x78,0x70,0x72,0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69, \ +0x63,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64, \ +0x58,0x20,0x54,0x48,0x55,0x4D,0x42,0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56, \ +0x65,0x72,0x73,0x69,0x6F,0x6E,0x20,0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E, \ +0x30,0x62,0x20,0x2A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0, \ +0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x47,0x2D,0x47,0x42,0x2D,0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44, \ +0x4C,0x2D,0x4B,0x4D,0x4C,0x2D,0x43,0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D, \ +0x4C,0x32,0x2D,0x47,0x5A,0x2D,0x4B,0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50, \ +0x2D,0x54,0x43,0x2D,0x4E,0x48,0x2D,0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41, \ +0x2D,0x47,0x46,0x2D,0x44,0x44,0x2D,0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53, \ +0x2D,0x44,0x57,0x2D,0x55,0x53,0x41,0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53, \ +0x44,0x53,0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09, \ +0x8C,0xD3,0xD5,0xF5,0xD8,0x09,0x0A,0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB, \ +0x69,0x07,0xF7,0xBD,0x34,0x12,0x3D,0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE, \ +0x61,0xAE,0x8D,0x40,0xA7,0xE8,0xBD,0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E, \ +0xE6,0xBB,0xFF,0x7F,0xD5,0xF6,0x7A,0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C, \ +0x2E,0x9F,0xE1,0x05,0x57,0x5F,0x69,0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50, \ +0x94,0x0E,0x8B,0x72,0xAE,0x55,0xCC,0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F, \ +0x85,0x17,0x5C,0x22,0xD0,0xA3,0xFD,0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82, \ +0xDB,0xF1,0x9D,0xC5,0xFB,0xBC,0x89,0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30, \ +0x5C,0x50,0xCC,0xC9,0x5A,0xBC,0x9C,0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39, \ +0xD2,0x7B,0x15,0x75,0xFB,0x58,0xC1,0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82, \ +0xC5,0xC2,0xD6,0x69,0x05,0xB3,0x30,0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4, \ +0x1D,0x1F,0x15,0xE2,0x60,0x56,0xC5,0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08, \ +0x32,0x59,0x4A,0x4C,0xED,0x7B,0x5B,0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51, \ +0xFA,0x3D,0xFF,0xAC,0xB5,0x6C,0x09,0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D, \ +0x25,0x17,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_505RFMD_EXTERNAL { \ +0x80,0xB5,0x10,0x49,0x00,0x20,0x08,0x70,0x0F,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0x00,0xF0,0xE7,0xFC, \ +0x80,0xBD,0x08,0x21,0x0B,0x20,0x00,0xF0,0xE2,0xFC,0xC0,0x20,0xFD,0xF7,0x3B, \ +0xFF,0x07,0x1C,0x00,0xF0,0xA8,0xFA,0x38,0x1C,0xFD,0xF7,0x35,0xFF,0x01,0x21, \ +0x0B,0x20,0x00,0xF0,0xD5,0xFC,0x80,0xBD,0x93,0x01,0x00,0x02,0x98,0x09,0x00, \ +0x02,0xB0,0xB5,0x27,0x4C,0x20,0x78,0x0A,0x28,0x40,0xD2,0x02,0xA3,0x1B,0x5C, \ +0x5B,0x00,0x9F,0x44,0x00,0x1C,0x3C,0x05,0x09,0x0D,0x3C,0x11,0x15,0x19,0x3C, \ +0x1D,0xA0,0x78,0x20,0x49,0x45,0x18,0x16,0xE0,0xA0,0x78,0x1F,0x49,0x45,0x18, \ +0x12,0xE0,0xA0,0x78,0x1E,0x49,0x45,0x18,0x0E,0xE0,0xA0,0x78,0x1D,0x49,0x45, \ +0x18,0x0A,0xE0,0xA0,0x78,0x1C,0x49,0x45,0x18,0x06,0xE0,0xA0,0x78,0x1B,0x49, \ +0x45,0x18,0x02,0xE0,0xA0,0x78,0x1A,0x49,0x45,0x18,0x00,0x2D,0x1E,0xD0,0xC0, \ +0x20,0xFD,0xF7,0xFD,0xFE,0x61,0x78,0x07,0x1C,0x00,0x20,0x00,0x29,0x07,0xD9, \ +0x21,0x18,0x09,0x79,0x01,0x30,0x29,0x70,0x61,0x78,0x01,0x35,0x81,0x42,0xF7, \ +0xD8,0x02,0xF0,0x74,0xFE,0x38,0x1C,0xFD,0xF7,0xEB,0xFE,0x01,0x21,0x01,0x20, \ +0x00,0xF0,0x8B,0xFC,0xB0,0xBD,0x04,0x21,0x01,0x20,0x00,0xF0,0x86,0xFC,0xB0, \ +0xBD,0x03,0x21,0x01,0x20,0x00,0xF0,0x81,0xFC,0xB0,0xBD,0xA0,0x09,0x00,0x02, \ +0x04,0x01,0x00,0x02,0x5C,0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xF0,0xB5, \ +0x82,0xB0,0x5A,0x49,0x0E,0x20,0x08,0x83,0x5A,0x4A,0x60,0x39,0x57,0x4C,0x01, \ +0x92,0x50,0x7A,0xCD,0x1D,0xCF,0x1D,0xE6,0x1D,0x19,0x36,0x69,0x37,0x49,0x35, \ +0x00,0x28,0x2F,0xD0,0xF0,0x7B,0x54,0x49,0x00,0x28,0x18,0xD0,0xF0,0x79,0x01, \ +0x28,0x01,0xDB,0x0E,0x28,0x05,0xDD,0x03,0x21,0x03,0x20,0x00,0xF0,0x51,0xFC, \ +0x02,0xB0,0xF0,0xBD,0x00,0x20,0x00,0x22,0x0B,0x18,0x9A,0x73,0x0A,0x54,0x01, \ +0x30,0x00,0x04,0x00,0x0C,0x0E,0x28,0xF7,0xDB,0xFA,0x71,0x01,0x21,0xE9,0x71, \ +0x29,0xE0,0xF0,0x79,0x41,0x18,0x49,0x7B,0x00,0x29,0x0A,0xD1,0x02,0xF0,0xD8, \ +0xF9,0x00,0x06,0x00,0x0E,0xF0,0x71,0x04,0xD1,0x03,0x21,0x03,0x20,0x00,0xF0, \ +0x32,0xFC,0xDF,0xE7,0x00,0x22,0xEA,0x71,0x16,0xE0,0xF8,0x7A,0x3D,0x49,0x40, \ +0x00,0x08,0x5A,0xF1,0x79,0x00,0x91,0x4A,0x1E,0x01,0x21,0x91,0x40,0x08,0x40, \ +0x0B,0xD1,0x00,0x98,0x02,0xF0,0xBE,0xF9,0xF0,0x71,0xF0,0x79,0x00,0x28,0x04, \ +0xD1,0x03,0x21,0x03,0x20,0x00,0xF0,0x18,0xFC,0xC5,0xE7,0xC0,0x20,0xFD,0xF7, \ +0x70,0xFE,0x06,0x1C,0x04,0x20,0xFB,0xF7,0x1C,0xFD,0x2F,0x49,0x00,0x20,0x2F, \ +0x4A,0x0B,0x18,0x12,0x5C,0x01,0x30,0x00,0x04,0x00,0x0C,0x04,0x28,0x1A,0x74, \ +0xF6,0xDB,0x2C,0x48,0x2C,0x4A,0x00,0x88,0x10,0x80,0xC8,0x1D,0x09,0x30,0x00, \ +0x22,0x04,0x21,0x01,0xF0,0x21,0xFF,0x01,0x21,0xA9,0x71,0x28,0x48,0x04,0x21, \ +0x01,0x75,0x00,0x21,0xB9,0x72,0x06,0x22,0x21,0x1C,0x25,0x48,0xFE,0xF7,0x2F, \ +0xFA,0xA1,0x1D,0x20,0x22,0x24,0x48,0xFE,0xF7,0x2A,0xFA,0xE0,0x1D,0x19,0x30, \ +0x81,0x7B,0x22,0x4A,0x51,0x71,0x00,0x21,0x69,0x70,0x01,0x9A,0x20,0x23,0x91, \ +0x71,0x81,0x79,0x1F,0x4A,0x91,0x74,0xC0,0x79,0xD0,0x74,0x20,0x8D,0x90,0x82, \ +0x60,0x8D,0xD0,0x82,0xA0,0x8D,0x10,0x83,0x1B,0x48,0x01,0x78,0x19,0x43,0x01, \ +0x70,0x01,0x21,0xF9,0x70,0x01,0x9A,0x51,0x71,0xB8,0x78,0x01,0x28,0x03,0xD1, \ +0x00,0x21,0x00,0x20,0x03,0xF0,0x22,0xFA,0x00,0x20,0xB8,0x70,0x30,0x1C,0xFD, \ +0xF7,0x1D,0xFE,0x01,0x20,0x28,0x70,0x08,0x21,0x03,0x20,0x00,0xF0,0xBB,0xFB, \ +0x68,0xE7,0xA0,0x09,0x00,0x02,0x78,0x09,0x00,0x02,0xB0,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x64,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0x7C,0x02,0x00,0x02, \ +0x38,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x09,0x00,0x02,0xFC,0x00,0x00, \ +0x02,0xDC,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x93,0x01, \ +0x00,0x02,0xF0,0xB5,0x82,0xB0,0x42,0x49,0x41,0x4E,0x01,0x91,0x48,0x7A,0xF4, \ +0x1D,0x19,0x34,0x00,0x28,0x40,0x4F,0x13,0xD0,0xF8,0x79,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x04,0x20,0x00,0xF0,0x8A,0xFB,0x02,0xB0,0xF0,0xBD,0xE0,0x79,0x3B, \ +0x49,0x40,0x18,0x40,0x7B,0x00,0x28,0x13,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0, \ +0x7E,0xFB,0xF2,0xE7,0xF8,0x7A,0x36,0x49,0x40,0x00,0x08,0x5A,0xE1,0x79,0x01, \ +0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x04,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0, \ +0x6F,0xFB,0xE3,0xE7,0xC0,0x20,0xFD,0xF7,0xC7,0xFD,0x00,0x90,0xA0,0x79,0x2E, \ +0x4D,0x02,0x28,0x02,0xD1,0x03,0x20,0xA8,0x71,0x03,0xE0,0x01,0x28,0x41,0xD1, \ +0x04,0x20,0xA8,0x71,0x04,0x20,0xFB,0xF7,0x68,0xFC,0x28,0x49,0x00,0x20,0x88, \ +0x70,0xA0,0x79,0x27,0x49,0x06,0x22,0x88,0x70,0x08,0x1F,0x31,0x1C,0xFE,0xF7, \ +0x91,0xF9,0xB1,0x1D,0x20,0x22,0x24,0x48,0xFE,0xF7,0x8C,0xF9,0xA0,0x7A,0x20, \ +0x49,0x48,0x71,0x00,0x20,0x68,0x70,0x01,0x99,0x88,0x71,0x08,0x21,0x04,0x20, \ +0x00,0xF0,0x41,0xFB,0x01,0x20,0xF8,0x70,0x01,0x99,0x48,0x71,0xB8,0x78,0x01, \ +0x28,0x03,0xD1,0x00,0x21,0x00,0x20,0x03,0xF0,0x92,0xF9,0x00,0x20,0xB8,0x70, \ +0x17,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x99, \ +0x43,0x01,0x70,0x00,0x98,0xFD,0xF7,0x84,0xFD,0x30,0x8D,0x81,0x02,0x04,0x20, \ +0xFB,0xF7,0x15,0xFC,0xE0,0x79,0x03,0xF0,0xF8,0xF8,0x94,0xE7,0x03,0x21,0x04, \ +0x20,0x00,0xF0,0x1B,0xFB,0x00,0x98,0xFD,0xF7,0x74,0xFD,0x8C,0xE7,0x00,0x00, \ +0xA0,0x09,0x00,0x02,0xB0,0x00,0x00,0x02,0x88,0x09,0x00,0x02,0x14,0x01,0x00, \ +0x02,0x64,0x02,0x00,0x02,0x68,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x01, \ +0x00,0x02,0xDC,0x00,0x00,0x02,0x93,0x01,0x00,0x02,0xF0,0xB5,0x25,0x48,0x10, \ +0x23,0x01,0x78,0x22,0x4C,0x99,0x43,0x01,0x70,0x01,0x78,0x20,0x23,0x99,0x43, \ +0x01,0x70,0x21,0x48,0x21,0x49,0xC0,0x7A,0x40,0x00,0x09,0x5A,0xE7,0x18,0xF8, \ +0x79,0x01,0x25,0x42,0x1E,0x2B,0x1C,0x93,0x40,0x19,0x40,0x04,0xD1,0x03,0x21, \ +0x05,0x20,0x00,0xF0,0xE6,0xFA,0xF0,0xBD,0xB9,0x79,0x01,0x29,0x04,0xD0,0x03, \ +0x21,0x05,0x20,0x00,0xF0,0xDE,0xFA,0xF0,0xBD,0x03,0xF0,0xB3,0xF8,0xC0,0x20, \ +0xFD,0xF7,0x34,0xFD,0x06,0x1C,0x38,0x7A,0x12,0x4F,0x78,0x71,0x12,0x48,0xC1, \ +0x1D,0x39,0x31,0x8D,0x70,0xA1,0x1D,0x1C,0x30,0x0C,0x1C,0x7A,0x79,0xFE,0xF7, \ +0x0A,0xF9,0x7A,0x79,0x0E,0x4F,0x21,0x1C,0xF8,0x1D,0x0D,0x30,0xFE,0xF7,0x03, \ +0xF9,0x00,0x20,0xF9,0x1D,0x29,0x31,0x88,0x71,0x00,0xF0,0x13,0xF8,0x30,0x1C, \ +0xFD,0xF7,0x16,0xFD,0xF0,0xBD,0x00,0x00,0xA0,0x09,0x00,0x02,0x93,0x01,0x00, \ +0x02,0x88,0x09,0x00,0x02,0x64,0x02,0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x00, \ +0x00,0x02,0x80,0x00,0x00,0x02,0xF0,0xB5,0xF9,0xF7,0x23,0xFF,0xFE,0xF7,0x83, \ +0xF9,0xF9,0xF7,0x1F,0xFF,0x2D,0x4F,0x02,0x21,0xB9,0x73,0x00,0x21,0xF9,0x73, \ +0x38,0x74,0x01,0x0A,0x79,0x74,0x01,0x0C,0x00,0x0E,0xB9,0x74,0x28,0x4E,0xF8, \ +0x74,0xF9,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x35,0x30,0xFE,0xF7,0xCF,0xF8, \ +0x25,0x4C,0x01,0x25,0xF8,0x1D,0x29,0x30,0x25,0x75,0x05,0x71,0x23,0x48,0xF1, \ +0x1D,0x42,0x79,0xF8,0x1D,0x0D,0x30,0x15,0x31,0xFE,0xF7,0xC1,0xF8,0x20,0x48, \ +0x20,0x4A,0x00,0x21,0x53,0x5C,0x46,0x18,0x01,0x31,0x04,0x29,0x33,0x74,0xF9, \ +0xD3,0x1D,0x49,0x00,0x22,0x09,0x88,0x39,0x80,0x04,0x21,0x10,0x30,0x01,0xF0, \ +0x96,0xFD,0x1A,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10, \ +0x23,0x19,0x43,0x01,0x70,0x12,0x48,0x85,0x70,0xFB,0xF7,0x31,0xFB,0x39,0x88, \ +0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0x4F,0xFB,0xE0,0x1D,0x49,0x30,0x45, \ +0x70,0x05,0x21,0x81,0x71,0x0F,0x48,0x29,0x1C,0x00,0x68,0x81,0x40,0x07,0x20, \ +0x40,0x06,0x82,0x69,0x11,0x43,0x81,0x61,0x0C,0x48,0x01,0x21,0x05,0x70,0x05, \ +0x20,0x00,0xF0,0x49,0xFA,0xF0,0xBD,0x80,0x00,0x00,0x02,0xC0,0x00,0x00,0x02, \ +0x18,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x7C,0x02,0x00, \ +0x02,0x38,0x01,0x00,0x02,0x93,0x01,0x00,0x02,0x9C,0x02,0x00,0x02,0x3A,0x01, \ +0x00,0x02,0xF0,0xB5,0x53,0x4F,0x53,0x4E,0xFC,0x1D,0xF9,0x1D,0x09,0x31,0x59, \ +0x34,0x0D,0x1C,0xF0,0x1D,0x0D,0x30,0x22,0x79,0xFE,0xF7,0x67,0xF8,0x22,0x79, \ +0x29,0x1C,0x4E,0x48,0xFE,0xF7,0x62,0xF8,0x20,0x79,0x4D,0x49,0x4D,0x4A,0x48, \ +0x71,0xB8,0x7B,0x00,0x28,0x03,0xD1,0x10,0x70,0xF0,0x72,0x50,0x70,0x08,0xE0, \ +0x01,0x20,0x10,0x70,0xF0,0x72,0xF8,0x7B,0xD1,0x1D,0x39,0x31,0x50,0x70,0xF8, \ +0x78,0x08,0x70,0x00,0x25,0x0D,0x20,0x68,0x43,0xC1,0x19,0x42,0x4A,0x30,0x31, \ +0x80,0x18,0x0D,0x22,0x0C,0x30,0xFE,0xF7,0x43,0xF8,0x01,0x35,0x04,0x2D,0xF2, \ +0xD3,0x60,0x79,0x00,0x28,0x03,0xD0,0x3B,0x49,0x01,0x20,0x48,0x72,0x02,0xE0, \ +0x39,0x49,0x00,0x20,0x48,0x72,0x78,0x7B,0x39,0x49,0x0E,0x28,0x02,0xDC,0x01, \ +0x28,0x00,0xDB,0x08,0x75,0xB8,0x78,0x36,0x4A,0x10,0x74,0x38,0x7B,0x01,0x28, \ +0x02,0xD1,0x31,0x4B,0xD8,0x70,0x02,0xE0,0x2F,0x4B,0x00,0x20,0xD8,0x70,0xF8, \ +0x88,0x10,0x81,0xB8,0x88,0x50,0x81,0x38,0x78,0x2C,0x4A,0xD0,0x70,0xE0,0x88, \ +0x2E,0x4A,0x30,0x80,0x00,0x20,0x3B,0x18,0x1C,0x7A,0x0D,0x18,0x2C,0x74,0x1B, \ +0x7A,0x13,0x54,0x01,0x30,0x04,0x28,0xF6,0xD3,0x30,0x88,0x28,0x4A,0x10,0x80, \ +0xC8,0x1D,0x09,0x30,0x0F,0x1C,0x00,0x22,0x04,0x21,0x01,0xF0,0xEB,0xFC,0x00, \ +0xF0,0xF3,0xF8,0x24,0x4C,0x24,0x49,0xE0,0x1D,0x69,0x30,0xC0,0x7A,0x08,0x5C, \ +0x38,0x75,0x22,0x4F,0x38,0x78,0x02,0x28,0x28,0xD1,0x02,0xF0,0xDE,0xFB,0x03, \ +0xF0,0x33,0xF9,0x16,0x49,0x88,0x78,0x00,0x28,0x07,0xD0,0xFB,0xF7,0x7C,0xFA, \ +0x31,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0x9A,0xFA,0x01,0x20,0x00, \ +0xF0,0xC1,0xF9,0x02,0xF0,0x85,0xFB,0x01,0x20,0xF9,0xF7,0x20,0xFE,0x01,0x20, \ +0x80,0x06,0x80,0x69,0xFE,0xF7,0x79,0xF8,0xFB,0xF7,0x5D,0xFA,0xFA,0xF7,0x01, \ +0xFF,0xFE,0xF7,0x55,0xF8,0x80,0x06,0x80,0x0E,0xA0,0x62,0x01,0x20,0x38,0x70, \ +0xF0,0xBD,0x02,0xF0,0x6F,0xFB,0xF0,0xBD,0xA0,0x09,0x00,0x02,0x80,0x00,0x00, \ +0x02,0xDC,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0x7C,0x02,0x00,0x02,0x38,0x01,0x00,0x02,0x18, \ +0x09,0x00,0x02,0x74,0x02,0x00,0x02,0x3B,0x01,0x00,0x02,0x80,0xB5,0x1A,0x49, \ +0x18,0x4A,0x0F,0x68,0x0B,0x2F,0x1D,0xD2,0x01,0xA3,0xDB,0x5D,0x5B,0x00,0x9F, \ +0x44,0x19,0x05,0x0A,0x19,0x0D,0x0F,0x12,0x19,0x19,0x15,0x18,0x00,0x06,0x23, \ +0xFF,0x20,0x01,0x30,0x8B,0x60,0x0E,0xE0,0xFF,0x20,0x41,0x30,0x0B,0xE0,0x0B, \ +0x20,0x09,0xE0,0xFF,0x20,0x31,0x30,0x06,0xE0,0xFF,0x20,0x11,0x30,0x03,0xE0, \ +0xFF,0x20,0x71,0x30,0x00,0xE0,0x00,0x20,0x01,0x23,0x4B,0x60,0x89,0x68,0x00, \ +0xF0,0xD6,0xF9,0x04,0x21,0x0C,0x20,0x00,0xF0,0x47,0xF9,0x0F,0x20,0x00,0x06, \ +0x81,0x88,0x03,0x4B,0x19,0x43,0x81,0x80,0x80,0xBD,0x2C,0x0A,0x00,0x02,0x90, \ +0x02,0x00,0x02,0x08,0x08,0x00,0x00,0xB0,0xB5,0x0D,0x4D,0x00,0x24,0xE8,0x1D, \ +0x49,0x30,0x0C,0x4F,0x04,0x70,0xF8,0x7C,0x02,0xF0,0x08,0xFF,0xE8,0x1D,0x69, \ +0x30,0x84,0x72,0x38,0x8B,0x81,0x02,0x04,0x20,0xFB,0xF7,0x1A,0xFA,0xB8,0x7C, \ +0x00,0x28,0x03,0xD1,0x01,0x20,0xA8,0x77,0x00,0x05,0xB0,0xBD,0x20,0x1C,0xB0, \ +0xBD,0x00,0x00,0x18,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0x04,0x48,0x00,0x21, \ +0xC2,0x1D,0x49,0x32,0x91,0x70,0x01,0x21,0x81,0x77,0x10,0x20,0xF7,0x46,0x00, \ +0x00,0x18,0x09,0x00,0x02,0x03,0x48,0x00,0x21,0xC1,0x73,0x01,0x21,0x81,0x73, \ +0x08,0x07,0xF7,0x46,0x00,0x00,0x28,0x09,0x00,0x02,0x04,0x48,0x00,0x21,0xC2, \ +0x1D,0x49,0x32,0x51,0x71,0x01,0x21,0x81,0x77,0x08,0x05,0xF7,0x46,0x00,0x00, \ +0x18,0x09,0x00,0x02,0xB0,0xB5,0x04,0x20,0xFB,0xF7,0x00,0xFA,0x0F,0x48,0xC7, \ +0x1D,0x49,0x37,0xB9,0x79,0x01,0x29,0x16,0xD1,0x03,0x21,0x70,0x30,0x81,0x72, \ +0x00,0x25,0x0B,0x4C,0x7D,0x71,0xE0,0x7C,0x01,0xF0,0x83,0xFE,0x00,0x28,0x07, \ +0xD1,0x3D,0x70,0x02,0x20,0xB8,0x71,0x01,0x21,0x03,0x20,0x00,0xF0,0xDC,0xF8, \ +0xB0,0xBD,0x01,0x21,0x39,0x70,0xE0,0x74,0xB0,0xBD,0x02,0xF0,0x43,0xFA,0xB0, \ +0xBD,0x18,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0x12,0x49,0xC9,0x7D,0x32,0x29, \ +0x1A,0xD0,0x09,0xDC,0x10,0x29,0x11,0xD0,0x20,0x29,0x11,0xD0,0x30,0x29,0x11, \ +0xD0,0x31,0x29,0x08,0xD1,0x03,0x20,0x06,0xE0,0x40,0x29,0x0F,0xD0,0x41,0x29, \ +0x0F,0xD0,0x50,0x29,0x00,0xD1,0x07,0x20,0x08,0x49,0xC8,0x72,0xF7,0x46,0x00, \ +0x20,0xFA,0xE7,0x01,0x20,0xF8,0xE7,0x02,0x20,0xF6,0xE7,0x04,0x20,0xF4,0xE7, \ +0x05,0x20,0xF2,0xE7,0x06,0x20,0xF0,0xE7,0x00,0x00,0x00,0x00,0x00,0x02,0x88, \ +0x09,0x00,0x02,0xF0,0xB5,0x1F,0x4D,0x01,0x24,0x28,0x78,0x01,0x28,0x32,0xD1, \ +0x1D,0x4C,0x1E,0x49,0xE0,0x7A,0x1E,0x4E,0x08,0x5C,0x30,0x75,0xC0,0x20,0xFD, \ +0xF7,0xF3,0xFA,0x07,0x1C,0x1B,0x48,0x01,0x78,0x02,0x29,0x07,0xD1,0x01,0x21, \ +0x01,0x70,0x30,0x7D,0x01,0x22,0x02,0xF0,0x30,0xFD,0x20,0x73,0x0E,0xE0,0x07, \ +0x20,0x40,0x06,0xC1,0x69,0x10,0x23,0x99,0x43,0xC1,0x61,0x13,0x48,0x01,0x21, \ +0x41,0x71,0x00,0x21,0x00,0x20,0x02,0xF0,0xD9,0xFE,0x00,0x20,0xA0,0x70,0x20, \ +0x7B,0x01,0x28,0x01,0xD1,0x00,0x20,0x28,0x70,0x20,0x7B,0x01,0x21,0x00,0x28, \ +0x00,0xD1,0x05,0x21,0x38,0x1C,0x0C,0x1C,0xFD,0xF7,0xC9,0xFA,0x21,0x06,0x09, \ +0x0E,0x06,0x20,0x00,0xF0,0x68,0xF8,0xF0,0xBD,0x00,0x00,0xA9,0x02,0x00,0x02, \ +0x88,0x09,0x00,0x02,0x74,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0xA8,0x02,0x00, \ +0x02,0xB0,0x00,0x00,0x02,0x00,0xB5,0x12,0x48,0x01,0x78,0x0D,0x29,0x1A,0xD2, \ +0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x16,0x07,0x16,0x07,0x07, \ +0x07,0x0B,0x0E,0x16,0x16,0x07,0x07,0x07,0x00,0x0B,0x49,0x01,0x20,0x08,0x70, \ +0x00,0xBD,0xFF,0xF7,0x9D,0xFF,0x00,0xBD,0x08,0x49,0x02,0x20,0x08,0x70,0x08, \ +0x21,0x07,0x20,0x00,0xF0,0x3B,0xF8,0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0, \ +0x36,0xF8,0x00,0xBD,0x00,0x00,0x98,0x09,0x00,0x02,0xCD,0x01,0x00,0x02,0x3C, \ +0x01,0x00,0x02,0x00,0xB5,0x15,0x48,0x01,0x78,0x0D,0x29,0x20,0xD2,0x02,0xA3, \ +0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1C,0x07,0x1C,0x0A,0x0D,0x13,0x1C, \ +0x1C,0x1C,0x1C,0x10,0x16,0x19,0x00,0xFF,0xF7,0x4B,0xFB,0x00,0xBD,0xFF,0xF7, \ +0xA8,0xFB,0x00,0xBD,0xFF,0xF7,0x79,0xFC,0x00,0xBD,0x00,0xF0,0x8A,0xFD,0x00, \ +0xBD,0xFF,0xF7,0x0D,0xFD,0x00,0xBD,0xFF,0xF7,0x16,0xFB,0x00,0xBD,0xFF,0xF7, \ +0x95,0xFE,0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00, \ +0x00,0x98,0x09,0x00,0x02,0x04,0x4A,0x10,0x70,0x04,0x48,0x01,0x70,0x04,0x48, \ +0x00,0x21,0x01,0x70,0x41,0x70,0xF7,0x46,0x00,0x00,0x54,0x02,0x00,0x02,0x55, \ +0x02,0x00,0x02,0x98,0x09,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x69,0x32, \ +0x51,0x70,0x01,0x21,0x81,0x77,0x08,0x02,0xF7,0x46,0x00,0x00,0x18,0x09,0x00, \ +0x02,0x80,0xB5,0x0F,0x4F,0x01,0x28,0x03,0xD1,0xF9,0xF7,0x5A,0xFC,0xF8,0x62, \ +0x38,0x63,0x0C,0x48,0x01,0x21,0x80,0x89,0x0C,0x4A,0xB8,0x87,0x39,0x72,0x79, \ +0x72,0x39,0x73,0x00,0x20,0x38,0x74,0x38,0x60,0xB8,0x72,0xF8,0x72,0x10,0x70, \ +0xB9,0x73,0x79,0x60,0x06,0x49,0xCA,0x7A,0x06,0x49,0xCA,0x70,0x88,0x70,0x08, \ +0x70,0x80,0xBD,0x00,0x00,0x18,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0xD8,0x01, \ +0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x01,0x00,0x02,0xB0,0xB5,0xF3,0x25,0x2D, \ +0x05,0x07,0x1C,0xA8,0x68,0x06,0x20,0xE8,0x60,0x0C,0x1C,0x28,0x69,0x80,0x08, \ +0xFC,0xD3,0x0A,0x20,0xF9,0xF7,0x14,0xFC,0xA8,0x68,0x78,0x09,0x08,0x23,0x18, \ +0x40,0x02,0x23,0x18,0x43,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3,0x38,0x06, \ +0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3,0xA8,0x68,0x20,0x06,0x00, \ +0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3,0xA8,0x68,0xB0,0xBD,0xF0,0xB5, \ +0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFC,0xF7,0xAD,0xFC,0x00,0x26,0x00,0x2F,0x10, \ +0xD9,0xFC,0xF7,0xF8,0xFC,0x40,0x08,0xFB,0xD2,0x28,0x20,0xF9,0xF7,0xEB,0xFB, \ +0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xC5,0xFF,0x28,0x20,0xF9,0xF7,0xE4,0xFB,0x01, \ +0x36,0xBE,0x42,0xEE,0xD3,0xFC,0xF7,0xB5,0xFC,0x00,0x20,0xF0,0xBD,0xF0,0xB5, \ +0x82,0xB0,0x02,0x1C,0x39,0x4B,0x08,0x1C,0x19,0x68,0x37,0x4F,0x00,0x29,0x68, \ +0xD0,0x59,0x68,0x01,0x29,0x65,0xD1,0x00,0x24,0x0F,0x21,0x09,0x06,0x8C,0x80, \ +0x8C,0x81,0x0C,0x88,0xFE,0x1D,0x2D,0x36,0xF5,0x1D,0x09,0x89,0x08,0x35,0xEC, \ +0x1F,0x20,0x3C,0x19,0x68,0xE3,0x1D,0x0B,0x33,0x01,0x93,0x04,0x3B,0x00,0x93, \ +0x0A,0x29,0x23,0xD1,0x0B,0x22,0x04,0x20,0x00,0x99,0xFF,0xF7,0xBC,0xFF,0xFF, \ +0x22,0x06,0x20,0x01,0x32,0x01,0x99,0xFF,0xF7,0xB6,0xFF,0xFF,0x22,0x0E,0x20, \ +0x39,0x1C,0x41,0x32,0xFF,0xF7,0xB0,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x1C,0x11, \ +0x32,0xFF,0xF7,0xAA,0xFF,0xFF,0x22,0x0E,0x20,0x29,0x1C,0x71,0x32,0xFF,0xF7, \ +0xA4,0xFF,0xFF,0x22,0x01,0x20,0x31,0x1C,0x31,0x32,0xFF,0xF7,0x9E,0xFF,0x02, \ +0xE0,0x39,0x1C,0xFF,0xF7,0x9A,0xFF,0xFC,0xF7,0x4B,0xFC,0x06,0x22,0xFF,0x21, \ +0x01,0x31,0x01,0x98,0xFC,0xF7,0xAA,0xFC,0x04,0x22,0x0B,0x21,0x00,0x98,0xFC, \ +0xF7,0xA5,0xFC,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41,0x31,0xFC,0xF7,0x9F,0xFC, \ +0x0E,0x22,0xFF,0x21,0x28,0x1C,0x71,0x31,0xFC,0xF7,0x99,0xFC,0x0E,0x22,0xFF, \ +0x21,0x20,0x1C,0x11,0x31,0xFC,0xF7,0x93,0xFC,0x01,0x22,0xFF,0x21,0x30,0x1C, \ +0x31,0x31,0xFC,0xF7,0x8D,0xFC,0xFC,0xF7,0x44,0xFC,0x03,0x4B,0x00,0x24,0x1C, \ +0x60,0x02,0xB0,0xF0,0xBD,0x00,0x00,0x2C,0x0A,0x00,0x02,0x90,0x02,0x00,0x02, \ +0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01, \ +0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20, \ +0x38,0x73,0x05,0x48,0x06,0x49,0x00,0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18, \ +0x43,0x38,0x73,0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x24,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0x80,0xB4,0x0B,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03, \ +0xD1,0x01,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC, \ +0xF7,0x46,0x80,0x20,0x38,0x73,0x04,0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10, \ +0x23,0x18,0x43,0x38,0x73,0xF3,0xE7,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D, \ +0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01, \ +0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20, \ +0x38,0x73,0x05,0x48,0x00,0x21,0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18, \ +0x43,0x38,0x73,0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D, \ +0x00,0xB5,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03, \ +0x2B,0x03,0xD0,0x06,0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0xFC,0xF7,0xF7,0xF9, \ +0x04,0x49,0x00,0x20,0x08,0x80,0x03,0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x38,0x02,0x00,0x02,0x3A,0x02,0x00,0x02,0xB0,0xB4,0x20,0x25, \ +0x00,0x28,0x18,0x4C,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25, \ +0x73,0xB0,0xBC,0xF7,0x46,0x08,0x06,0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28, \ +0x01,0xD0,0x25,0x73,0xF5,0xE7,0x00,0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00, \ +0x28,0x08,0xD0,0x02,0x28,0x08,0xD0,0x80,0x28,0x04,0xD0,0x81,0x28,0x11,0xD1, \ +0x0A,0x48,0x07,0x88,0x03,0xE0,0x00,0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80, \ +0x20,0x20,0x73,0x08,0x48,0x00,0x21,0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23, \ +0x18,0x43,0x20,0x73,0xD7,0xE7,0x25,0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0x3A, \ +0x02,0x00,0x02,0x38,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27, \ +0x00,0x28,0x0C,0x4C,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27, \ +0x73,0x90,0xBD,0x09,0x06,0x09,0x0E,0x01,0x20,0x02,0x29,0x04,0xD0,0x81,0x29, \ +0x07,0xD1,0x05,0x49,0x08,0x80,0x01,0xE0,0x05,0x49,0x08,0x80,0xFC,0xF7,0x95, \ +0xF9,0x90,0xBD,0x27,0x73,0x90,0xBD,0x70,0x03,0x00,0x0D,0x3A,0x02,0x00,0x02, \ +0x38,0x02,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03, \ +0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC, \ +0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21,0x01,0x73,0x01,0x73,0x38, \ +0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D, \ +0x30,0x03,0x00,0x0D,0x90,0xB5,0x17,0x1C,0x02,0x28,0x03,0xD1,0x0A,0x29,0x2F, \ +0xD1,0x1A,0x4A,0x2D,0xE0,0x04,0x28,0x01,0xD1,0x19,0x4A,0x29,0xE0,0x19,0x4C, \ +0x05,0x28,0x02,0xD1,0xE2,0x1D,0x09,0x32,0x23,0xE0,0x03,0x28,0x04,0xD1,0x01, \ +0xF0,0xA2,0xF8,0xE2,0x1D,0x11,0x32,0x1C,0xE0,0x00,0x28,0x1A,0xD1,0x0A,0x29, \ +0x17,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x13,0x05,0x07, \ +0x09,0x13,0x0B,0x0D,0x0F,0x13,0x11,0x0D,0x4A,0x0C,0xE0,0x0D,0x4A,0x0A,0xE0, \ +0x0D,0x4A,0x08,0xE0,0x0D,0x4A,0x06,0xE0,0x0D,0x4A,0x04,0xE0,0x0D,0x4A,0x02, \ +0xE0,0x0D,0x4A,0x00,0xE0,0x0D,0x4A,0x0D,0x49,0xD0,0x19,0x08,0x60,0x00,0xF0, \ +0x5B,0xF8,0x90,0xBD,0x2C,0x0A,0x00,0x02,0xC8,0x02,0x00,0x02,0x88,0x0A,0x00, \ +0x02,0x04,0x01,0x00,0x02,0x5C,0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00, \ +0x00,0x02,0x18,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x10, \ +0x01,0x00,0x02,0x50,0x02,0x00,0x02,0x80,0xB4,0x17,0x1C,0x00,0x22,0x01,0x2F, \ +0x17,0x4B,0x23,0xD1,0x02,0x28,0x10,0xD1,0x16,0x48,0x87,0x79,0xC0,0x79,0x00, \ +0x02,0x07,0x43,0x08,0x29,0x07,0xD0,0x14,0x48,0x87,0x60,0x0C,0x27,0x1F,0x70, \ +0x5A,0x70,0x9A,0x70,0x01,0x60,0x42,0x60,0x80,0xBC,0xF7,0x46,0x06,0x28,0xFB, \ +0xD1,0x0F,0x48,0x00,0x78,0x01,0x28,0xF7,0xD1,0xFF,0x20,0x0D,0x21,0x09,0x06, \ +0x43,0x30,0x88,0x80,0x0B,0x49,0x01,0x20,0x08,0x71,0x0B,0x49,0x08,0x70,0xEC, \ +0xE7,0x18,0x79,0x18,0x70,0x5A,0x70,0x9A,0x70,0x18,0x78,0x0A,0x28,0xE5,0xD1, \ +0x07,0x48,0x02,0x70,0xE2,0xE7,0x98,0x09,0x00,0x02,0x28,0x02,0x00,0x02,0x90, \ +0x02,0x00,0x02,0x36,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x37,0x01,0x00,0x02, \ +0x93,0x01,0x00,0x02,0x90,0xB4,0x1A,0x4A,0x80,0x20,0x10,0x73,0x19,0x49,0x1A, \ +0x48,0x0B,0x88,0x07,0x88,0xBB,0x42,0x11,0xD1,0x11,0x7B,0xC9,0x09,0x09,0xD2, \ +0x00,0x88,0x40,0x07,0x03,0xD0,0xE0,0x20,0x10,0x73,0x90,0xBC,0xF7,0x46,0xD0, \ +0x20,0x10,0x73,0xFA,0xE7,0x10,0x7B,0x20,0x23,0x18,0x43,0x10,0x73,0xF5,0xE7, \ +0x00,0x88,0x0B,0x88,0xC0,0x1A,0x08,0x28,0x00,0xD9,0x08,0x20,0x0B,0x88,0x1B, \ +0x18,0x0B,0x80,0x00,0x28,0x08,0xD0,0x0A,0x4B,0x0A,0x49,0x0F,0x68,0x3C,0x78, \ +0x01,0x37,0x0F,0x60,0x1C,0x73,0x01,0x38,0xF8,0xD1,0x10,0x7B,0x10,0x23,0x18, \ +0x43,0x10,0x73,0xDC,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x4E,0x02,0x00,0x02, \ +0x4C,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x50,0x02,0x00,0x02,0x90,0xB5,0x20, \ +0x24,0x00,0x28,0x0B,0x4F,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0, \ +0x3C,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28,0x01,0xD1,0x3C, \ +0x73,0x90,0xBD,0x04,0x48,0x00,0x79,0x00,0xF0,0x3A,0xF8,0x60,0x20,0x38,0x73, \ +0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x28,0x02,0x00,0x02,0xB0,0xB4,0x13, \ +0x48,0x01,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0xB0,0xBC,0xF7,0x46,0x10,0x49, \ +0x00,0x23,0x0D,0x78,0x02,0x22,0x0F,0x4C,0x10,0x4F,0x01,0x2D,0x02,0xD0,0x0D, \ +0x78,0x02,0x2D,0x02,0xD1,0x0A,0x70,0x3B,0x70,0x23,0x70,0x80,0x21,0x01,0x73, \ +0x0B,0x49,0x01,0x25,0x0D,0x73,0x0B,0x73,0x0A,0x73,0x0B,0x73,0x3A,0x78,0x10, \ +0x23,0x0A,0x73,0x22,0x78,0x0A,0x73,0x01,0x7B,0x19,0x43,0x01,0x73,0xDE,0xE7, \ +0x00,0x00,0x70,0x03,0x00,0x0D,0x58,0x02,0x00,0x02,0x55,0x02,0x00,0x02,0x54, \ +0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x01,0x22,0x00,0x23,0x02,0x28, \ +0x10,0x49,0x12,0xD1,0x18,0x1C,0x10,0x4B,0x04,0x27,0x18,0x71,0x0F,0x4B,0x1F, \ +0x70,0x18,0x70,0x0F,0x4F,0x82,0x23,0x3B,0x71,0x0E,0x4B,0x18,0x80,0x0E,0x4B, \ +0x18,0x80,0x0E,0x4B,0x18,0x80,0x0A,0x70,0x80,0xBC,0xF7,0x46,0x81,0x28,0xFB, \ +0xD1,0x0C,0x48,0x03,0x80,0x0C,0x48,0x02,0x70,0x08,0x78,0x01,0x28,0xF4,0xD1, \ +0x02,0x20,0x08,0x70,0xF1,0xE7,0x00,0x00,0x59,0x02,0x00,0x02,0x70,0x03,0x00, \ +0x0D,0xC0,0x03,0x00,0x0D,0xB0,0x03,0x00,0x0D,0x46,0x02,0x00,0x02,0x44,0x02, \ +0x00,0x02,0x38,0x02,0x00,0x02,0x3A,0x02,0x00,0x02,0x43,0x02,0x00,0x02,0x90, \ +0xB5,0x0F,0x1C,0x19,0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xA2,0xFD,0x90,0xBD,0x24, \ +0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7, \ +0xB8,0xFD,0x90,0xBD,0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A, \ +0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xC9,0xFD,0x90,0xBD,0xFF,0x23,0x0C,0x33, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xDC, \ +0xFD,0x90,0xBD,0x41,0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xED,0xFD,0x90,0xBD,0x0F,0x4B,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x1F,0xFE,0x90,0xBD, \ +0x01,0x23,0xDB,0x03,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0x36,0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08,0x73,0x90,0xBD, \ +0x00,0x00,0x20,0x02,0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03, \ +0x02,0x00,0x00,0x70,0x03,0x00,0x0D,0x10,0x49,0x09,0x78,0x01,0x29,0x1C,0xD1, \ +0x40,0x08,0x1A,0xD3,0x0D,0x20,0x00,0x06,0x01,0x78,0x20,0x23,0x19,0x43,0x01, \ +0x70,0x0B,0x48,0x01,0x21,0x00,0x68,0x81,0x40,0xC8,0x43,0x07,0x21,0x49,0x06, \ +0x8A,0x69,0x10,0x40,0x88,0x61,0x00,0x20,0x7D,0x22,0x12,0x01,0x88,0x61,0x01, \ +0x30,0x90,0x42,0xFC,0xD3,0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xF7,0x46, \ +0x36,0x01,0x00,0x02,0x9C,0x02,0x00,0x02,0xF0,0xB5,0xC0,0x20,0xFC,0xF7,0x80, \ +0xFE,0x22,0x4C,0x23,0x4F,0x21,0x7A,0x23,0x4A,0x39,0x70,0x11,0x79,0x79,0x70, \ +0x21,0x7B,0xF9,0x70,0x11,0x7B,0xB9,0x70,0x0D,0x21,0x09,0x06,0x8B,0x88,0x07, \ +0x25,0x6D,0x06,0xBB,0x80,0xEE,0x69,0x01,0x23,0x5B,0x02,0x33,0x43,0xEB,0x61, \ +0x00,0x23,0x01,0x33,0x32,0x2B,0xFC,0xD3,0xEE,0x69,0x01,0x23,0x5B,0x02,0x9E, \ +0x43,0xEE,0x61,0x00,0x23,0x01,0x33,0x64,0x2B,0xFC,0xD3,0x14,0x4D,0x00,0x23, \ +0x2B,0x70,0x13,0x4B,0x80,0x25,0x1D,0x73,0x86,0x25,0x1D,0x72,0x82,0x25,0x1D, \ +0x71,0x07,0x25,0x1D,0x70,0x10,0x4B,0x05,0x26,0x1E,0x73,0x06,0x26,0x1E,0x72, \ +0x1D,0x71,0x26,0x71,0x3B,0x78,0x23,0x72,0xFB,0x78,0x23,0x73,0x7B,0x78,0x13, \ +0x71,0xBB,0x78,0x13,0x73,0x09,0x4A,0x0A,0x81,0xBA,0x88,0x8A,0x80,0xFC,0xF7, \ +0x3C,0xFE,0xF0,0xBD,0x00,0x00,0xC0,0x03,0x00,0x0D,0xC0,0x02,0x00,0x02,0xE0, \ +0x03,0x00,0x0D,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D, \ +0xFF,0x0F,0x00,0x00,0x80,0xB5,0x0C,0x49,0x00,0x20,0x08,0x60,0x0B,0x49,0x0E, \ +0x4F,0x08,0x80,0x0B,0x49,0x08,0x70,0x0B,0x49,0x08,0x70,0x38,0x68,0x01,0x7A, \ +0x10,0x29,0x02,0xD1,0xFB,0xF7,0x0A,0xFA,0x38,0x60,0x38,0x68,0x01,0x7A,0x40, \ +0x29,0x02,0xD1,0xFB,0xF7,0x03,0xFA,0x38,0x60,0x80,0xBD,0x3C,0x02,0x00,0x02, \ +0x40,0x02,0x00,0x02,0x48,0x02,0x00,0x02,0x42,0x02,0x00,0x02,0x5C,0x02,0x00, \ +0x02,0xF0,0xB5,0x23,0x4E,0x04,0x1C,0x0F,0x1C,0x13,0x1C,0x20,0x22,0xB5,0x78, \ +0xF1,0x78,0x03,0x2B,0x20,0x48,0x01,0xD0,0x02,0x73,0xF0,0xBD,0x02,0x2D,0x09, \ +0xD1,0x01,0x29,0x01,0xD3,0x0A,0x29,0x01,0xD9,0x02,0x73,0xF0,0xBD,0x08,0x29, \ +0x01,0xD1,0x02,0x73,0xF0,0xBD,0x00,0x2F,0x09,0xD1,0xFB,0xF7,0xF3,0xFE,0x06, \ +0x2D,0x07,0xD1,0xF9,0xF7,0x5F,0xF8,0x15,0x48,0x00,0x21,0x01,0x70,0x01,0xE0, \ +0x00,0x21,0x01,0x73,0x13,0x48,0x02,0x2D,0x07,0xD1,0x00,0x2C,0x0E,0xD1,0x11, \ +0x49,0x01,0x60,0x11,0x48,0x00,0x21,0x01,0x70,0x08,0xE0,0x01,0x2D,0xD7,0xD0, \ +0x0F,0x49,0x01,0x60,0x0F,0x48,0x00,0x21,0x01,0x70,0x0F,0x48,0x01,0x70,0x0F, \ +0x48,0x31,0x1C,0x07,0x80,0x0E,0x48,0x00,0x27,0x07,0x80,0x0E,0x48,0x08,0x22, \ +0xFD,0xF7,0xA7,0xF9,0x03,0x48,0x07,0x70,0xF0,0xBD,0x28,0x02,0x00,0x02,0x70, \ +0x03,0x00,0x0D,0x58,0x02,0x00,0x02,0x50,0x02,0x00,0x02,0x2C,0x0A,0x00,0x02, \ +0x93,0x01,0x00,0x02,0x9C,0x09,0x00,0x02,0x55,0x02,0x00,0x02,0x54,0x02,0x00, \ +0x02,0x4C,0x02,0x00,0x02,0x4E,0x02,0x00,0x02,0x30,0x02,0x00,0x02,0xB0,0xB5, \ +0x11,0x4F,0x14,0x1C,0xBB,0x78,0xFF,0x78,0x03,0x2C,0x0F,0x4A,0x02,0xD0,0x20, \ +0x20,0x10,0x73,0xB0,0xBD,0x0E,0x4D,0x00,0x24,0x2C,0x80,0x0D,0x4C,0x01,0x2B, \ +0x21,0x80,0x0A,0xD1,0x80,0x20,0x10,0x73,0x0B,0x48,0x0C,0x49,0x00,0x78,0x10, \ +0x23,0x08,0x73,0x10,0x7B,0x18,0x43,0x10,0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C, \ +0x39,0x1C,0xFF,0xF7,0x20,0xFD,0xB0,0xBD,0x00,0x00,0x28,0x02,0x00,0x02,0x70, \ +0x03,0x00,0x0D,0x4E,0x02,0x00,0x02,0x4C,0x02,0x00,0x02,0x4B,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0xB0,0xB5,0x0F,0x1C,0x18,0x4D,0x19,0x1C,0x14,0x1C,0xA8, \ +0x42,0x02,0xD0,0x17,0x4B,0x00,0x22,0x1A,0x70,0x16,0x4A,0xA8,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xD5,0xFD,0xB0,0xBD,0x12, \ +0x4B,0x98,0x42,0x04,0xD1,0x12,0x68,0x20,0x1C,0xFF,0xF7,0x55,0xFF,0xB0,0xBD, \ +0x0F,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF, \ +0xF7,0xE1,0xFD,0xB0,0xBD,0x0B,0x4B,0x98,0x42,0x04,0xD1,0x12,0x68,0x20,0x1C, \ +0xFF,0xF7,0xA3,0xFF,0xB0,0xBD,0x0B,0x1C,0x39,0x1C,0x22,0x1C,0xFF,0xF7,0x3D, \ +0xFE,0xB0,0xBD,0x01,0x02,0x00,0x00,0x59,0x02,0x00,0x02,0x20,0x02,0x00,0x02, \ +0x0E,0x40,0x00,0x00,0x22,0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0xF0,0xB5,0x22, \ +0x4B,0xE0,0x25,0x01,0x27,0x98,0x42,0x1D,0x49,0x1D,0x4C,0x1E,0x4A,0x08,0xD1, \ +0x90,0x78,0x01,0x28,0x01,0xD1,0x0D,0x73,0x01,0xE0,0xFF,0xF7,0x58,0xFD,0x27, \ +0x71,0xF0,0xBD,0x1A,0x4B,0x20,0x26,0x98,0x42,0x21,0xD1,0x0E,0x73,0x19,0x48, \ +0x27,0x71,0x00,0x78,0x00,0x28,0xF4,0xD1,0x90,0x78,0x02,0x28,0x02,0xD1,0xD0, \ +0x78,0x08,0x28,0xEE,0xD0,0x90,0x78,0x01,0x28,0x0C,0xD1,0x13,0x49,0x00,0x20, \ +0x08,0x70,0x12,0x48,0x00,0x78,0x02,0x28,0x02,0xD1,0x11,0x48,0x07,0x70,0xF0, \ +0xBD,0x11,0x48,0x07,0x70,0xF0,0xBD,0xD1,0x78,0x90,0x78,0x01,0x22,0xFF,0xF7, \ +0xEF,0xFC,0xF0,0xBD,0x10,0x78,0x00,0x0A,0x01,0xD2,0x0E,0x73,0x00,0xE0,0x0D, \ +0x73,0x27,0x71,0xF0,0xBD,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x28,0x02, \ +0x00,0x02,0x33,0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0x58,0x02,0x00,0x02,0x93, \ +0x01,0x00,0x02,0x4B,0x02,0x00,0x02,0x57,0x02,0x00,0x02,0xD5,0x01,0x00,0x02, \ +0x80,0xB5,0x00,0x20,0x1E,0x49,0x0F,0x27,0x3F,0x06,0x08,0x70,0xB8,0x80,0x39, \ +0x88,0xB8,0x81,0x1C,0x4A,0x39,0x89,0xD1,0x69,0xD1,0x04,0xCB,0x68,0xC9,0x6B, \ +0x1A,0x49,0x09,0x68,0x90,0x61,0x19,0x49,0x02,0x20,0xC8,0x74,0x19,0x48,0x01, \ +0x7A,0x0C,0x30,0x09,0x29,0x1D,0xD2,0x01,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44, \ +0x19,0x04,0x07,0x19,0x0A,0x0D,0x10,0x13,0x16,0x00,0x00,0xF0,0x5B,0xFC,0x80, \ +0xBD,0x00,0xF0,0x9C,0xF9,0x80,0xBD,0x00,0xF0,0x61,0xFA,0x80,0xBD,0x00,0xF0, \ +0x1E,0xF8,0x80,0xBD,0x00,0xF0,0xDB,0xF8,0x80,0xBD,0x00,0xF0,0xCE,0xFA,0x80, \ +0xBD,0x00,0xF0,0x11,0xFD,0x80,0xBD,0x02,0x21,0x0A,0x20,0xFF,0xF7,0x4C,0xFA, \ +0x06,0x48,0xB8,0x80,0x80,0xBD,0x00,0x00,0x93,0x01,0x00,0x02,0x80,0x00,0x00, \ +0x04,0x40,0x00,0x00,0x04,0x18,0x09,0x00,0x02,0x98,0x09,0x00,0x02,0x08,0x08, \ +0x00,0x00,0xF0,0xB5,0x54,0x4E,0x06,0x23,0x04,0x1C,0x37,0x1C,0x26,0xCC,0x26, \ +0xC7,0x01,0x3B,0xFB,0xD1,0xC1,0x1D,0x50,0x4D,0x01,0x31,0x40,0x22,0x28,0x1C, \ +0xFD,0xF7,0x6C,0xF8,0x30,0x78,0x0F,0x24,0x24,0x06,0x0E,0x28,0x4C,0x4F,0x01, \ +0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFF,0xF7,0x20,0xFA,0xA7,0x80, \ +0xF0,0xBD,0xE8,0x1D,0x19,0x30,0x01,0x78,0xFB,0x23,0x19,0x40,0x01,0x70,0xB1, \ +0x78,0x01,0x29,0x04,0xD1,0x01,0x78,0xF7,0x23,0x19,0x40,0x01,0x70,0x0A,0xE0, \ +0x02,0x29,0x04,0xD1,0x01,0x78,0x08,0x23,0x19,0x43,0x01,0x70,0x03,0xE0,0x01, \ +0x78,0x04,0x23,0x19,0x43,0x01,0x70,0xFF,0x20,0x3B,0x4D,0xF5,0x30,0x69,0x68, \ +0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49, \ +0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7, \ +0x59,0xFE,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02, \ +0xF0,0x68,0xF9,0x2F,0x49,0x00,0x20,0xC8,0x60,0x05,0x21,0x09,0x06,0xC8,0x62, \ +0x08,0x63,0xF0,0x78,0x01,0x28,0x05,0xD1,0x01,0x22,0x00,0x21,0x30,0x78,0x01, \ +0xF0,0x7E,0xFE,0x04,0xE0,0x00,0x22,0x00,0x21,0x30,0x78,0x01,0xF0,0x78,0xFE, \ +0x00,0x20,0x25,0x49,0x00,0x26,0x48,0x63,0x01,0x30,0x17,0x28,0x8E,0x63,0xFA, \ +0xD3,0x00,0x20,0xF8,0xF7,0x49,0xFE,0x0A,0x20,0xF8,0xF7,0x2E,0xFE,0x01,0x20, \ +0x80,0x06,0x46,0x61,0xC0,0x68,0x1D,0x48,0x68,0x61,0x68,0x68,0xC0,0x0B,0x05, \ +0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7,0xB9,0xF9,0xA7,0x80,0xF0,0xBD,0x00,0x22, \ +0xFF,0x21,0x7D,0x20,0xC0,0x00,0xA7,0x80,0x00,0xF0,0x00,0xFB,0x05,0x21,0x09, \ +0x06,0xCE,0x61,0x01,0x21,0x13,0x48,0x89,0x06,0x88,0x63,0x13,0x48,0x13,0x4A, \ +0x48,0x63,0xA6,0x80,0x04,0x20,0xD0,0x74,0xA8,0x60,0x00,0x03,0x68,0x60,0x0A, \ +0x4A,0x6B,0x20,0xD0,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0D,0x48,0x01,0x21, \ +0xA0,0x80,0x0A,0x20,0xFF,0xF7,0x94,0xF9,0xF0,0xBD,0x00,0x00,0xE0,0x0A,0x00, \ +0x02,0x78,0x0B,0x00,0x02,0x08,0x08,0x00,0x00,0x40,0x00,0x00,0x04,0x00,0x01, \ +0x00,0x05,0x80,0x00,0x00,0x05,0x04,0x21,0x00,0x00,0x00,0x72,0x01,0x02,0x64, \ +0x10,0x00,0x00,0x18,0x09,0x00,0x02,0x88,0x88,0x00,0x00,0xF0,0xB5,0x51,0x4E, \ +0x06,0x23,0x04,0x1C,0x37,0x1C,0x26,0xCC,0x26,0xC7,0x01,0x3B,0xFB,0xD1,0xC1, \ +0x1D,0x4D,0x4D,0x01,0x31,0x40,0x22,0x28,0x1C,0xFC,0xF7,0xAC,0xFF,0x30,0x78, \ +0x0F,0x24,0x24,0x06,0x0E,0x28,0x49,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFF,0xF7,0x60,0xF9,0xA7,0x80,0xF0,0xBD,0xE8,0x1D,0x19,0x30, \ +0x01,0x78,0xFB,0x23,0x19,0x40,0x01,0x70,0xB1,0x78,0x01,0x29,0x04,0xD1,0x01, \ +0x78,0xF7,0x23,0x19,0x40,0x01,0x70,0x0A,0xE0,0x02,0x29,0x04,0xD1,0x01,0x78, \ +0x08,0x23,0x19,0x43,0x01,0x70,0x03,0xE0,0x01,0x78,0x04,0x23,0x19,0x43,0x01, \ +0x70,0xFF,0x20,0x38,0x4D,0xF5,0x30,0x69,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x99,0xFD,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0xA8,0xF8,0x2C,0x49,0x00, \ +0x20,0xC8,0x60,0x05,0x21,0x09,0x06,0xC8,0x62,0x08,0x63,0xF0,0x78,0x01,0x28, \ +0x05,0xD1,0x01,0x22,0x00,0x21,0x30,0x78,0x01,0xF0,0xBE,0xFD,0x04,0xE0,0x00, \ +0x22,0x00,0x21,0x30,0x78,0x01,0xF0,0xB8,0xFD,0x00,0x20,0xF8,0xF7,0x91,0xFD, \ +0x0A,0x20,0xF8,0xF7,0x76,0xFD,0x00,0x26,0x01,0x20,0x80,0x06,0x46,0x61,0xC0, \ +0x68,0x1D,0x48,0x68,0x61,0x68,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20, \ +0xFF,0xF7,0x00,0xF9,0xA7,0x80,0xF0,0xBD,0x00,0x22,0x55,0x21,0x7D,0x20,0xC0, \ +0x00,0xA7,0x80,0x00,0xF0,0x47,0xFA,0x05,0x20,0x00,0x06,0xC6,0x61,0x31,0x1C, \ +0x01,0x22,0x12,0x48,0x92,0x06,0x90,0x63,0x12,0x48,0x50,0x63,0xA1,0x80,0x11, \ +0x49,0x04,0x20,0xC8,0x74,0xA8,0x60,0x00,0x03,0x68,0x60,0x0A,0x49,0x6B,0x20, \ +0xC8,0x60,0x50,0x6A,0x01,0x21,0x0A,0x30,0x10,0x62,0x0C,0x48,0xA0,0x80,0x0A, \ +0x20,0xFF,0xF7,0xDA,0xF8,0xF0,0xBD,0x00,0x00,0xE0,0x0A,0x00,0x02,0x78,0x0B, \ +0x00,0x02,0x08,0x08,0x00,0x00,0x40,0x00,0x00,0x04,0x00,0x01,0x00,0x05,0x04, \ +0x21,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x18,0x09,0x00,0x02, \ +0x88,0x88,0x00,0x00,0xF0,0xB5,0x58,0x4D,0x06,0x23,0x04,0x1C,0x2F,0x1C,0x46, \ +0xCC,0x46,0xC7,0x01,0x3B,0xFB,0xD1,0xC1,0x1D,0x01,0x31,0x40,0x22,0x53,0x48, \ +0xFC,0xF7,0xF5,0xFE,0xA8,0x78,0x52,0x4C,0x80,0x08,0x80,0x00,0x0F,0x27,0x3F, \ +0x06,0x00,0x28,0x05,0xD0,0x03,0x21,0x0A,0x20,0xFF,0xF7,0xA9,0xF8,0xBC,0x80, \ +0xF0,0xBD,0x28,0x78,0x0E,0x28,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A, \ +0x20,0xFF,0xF7,0x9E,0xF8,0xBC,0x80,0xF0,0xBD,0x08,0x21,0x0A,0x20,0xFF,0xF7, \ +0x98,0xF8,0x45,0x48,0x00,0x26,0x06,0x70,0x45,0x48,0x06,0x60,0x46,0x60,0x00, \ +0x20,0xF8,0xF7,0x0F,0xFD,0xA9,0x78,0x42,0x48,0x01,0x29,0x08,0xD1,0xC1,0x7A, \ +0xF3,0x23,0x19,0x40,0xC1,0x72,0xC1,0x7A,0x04,0x23,0x19,0x43,0xC1,0x72,0x12, \ +0xE0,0x02,0x29,0x08,0xD1,0xC1,0x7A,0xF3,0x23,0x19,0x40,0xC1,0x72,0xC1,0x7A, \ +0x08,0x23,0x19,0x43,0xC1,0x72,0x07,0xE0,0xC1,0x7A,0xF3,0x23,0x19,0x40,0xC1, \ +0x72,0xC1,0x7A,0x0C,0x23,0x19,0x43,0xC1,0x72,0xFF,0x20,0xF5,0x30,0x33,0x49, \ +0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07, \ +0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30, \ +0xF8,0xF7,0xC3,0xFC,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8, \ +0x61,0x01,0xF0,0xD2,0xFF,0x05,0x20,0x00,0x06,0xC6,0x62,0x06,0x63,0xE8,0x78, \ +0x01,0x28,0x07,0xD1,0x01,0x22,0x00,0x21,0x28,0x78,0x01,0xF0,0xEB,0xFC,0x05, \ +0x06,0x2D,0x0E,0x06,0xE0,0x00,0x22,0x00,0x21,0x28,0x78,0x01,0xF0,0xE3,0xFC, \ +0x05,0x06,0x2D,0x0E,0x1C,0x49,0x6B,0x20,0xC8,0x60,0x1B,0x48,0xF8,0xF7,0x9F, \ +0xFC,0x00,0x2D,0x05,0xD1,0x05,0x21,0x0A,0x20,0xFF,0xF7,0x31,0xF8,0xBC,0x80, \ +0xF0,0xBD,0x00,0x20,0xF8,0xF7,0xAC,0xFC,0x12,0x48,0x41,0x68,0xC9,0x0B,0x05, \ +0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7,0x24,0xF8,0xBC,0x80,0xF0,0xBD,0x86,0x60, \ +0x20,0x20,0x41,0x05,0x48,0x61,0x0E,0x48,0x01,0x21,0x01,0x73,0xC1,0x74,0xB8, \ +0x88,0x0D,0x4B,0x18,0x43,0xB8,0x80,0x0A,0x20,0xFF,0xF7,0x13,0xF8,0xF0,0xBD, \ +0xE0,0x0A,0x00,0x02,0x78,0x0B,0x00,0x02,0x08,0x08,0x00,0x00,0x93,0x01,0x00, \ +0x02,0xC8,0x02,0x00,0x02,0x98,0x0B,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x01, \ +0x00,0x05,0x40,0x9C,0x00,0x00,0x18,0x09,0x00,0x02,0x48,0x48,0x00,0x00,0xF0, \ +0xB5,0x35,0x4D,0xC0,0xC8,0x29,0x1C,0xC0,0xC1,0x28,0x78,0x0F,0x26,0x36,0x06, \ +0x0E,0x28,0x32,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFE, \ +0xF7,0xEB,0xFF,0xB7,0x80,0xF0,0xBD,0xFF,0x20,0x2D,0x4C,0xF5,0x30,0x61,0x68, \ +0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49, \ +0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7, \ +0x3D,0xFC,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01, \ +0xF0,0x4C,0xFF,0x00,0x20,0xB0,0x80,0xE8,0x78,0x01,0x28,0x07,0xD1,0x01,0x22, \ +0x00,0x21,0x28,0x78,0x01,0xF0,0x67,0xFC,0x05,0x06,0x2D,0x0E,0x06,0xE0,0x00, \ +0x22,0x00,0x21,0x28,0x78,0x01,0xF0,0x5F,0xFC,0x05,0x06,0x2D,0x0E,0x17,0x49, \ +0x6B,0x20,0xC8,0x60,0x16,0x48,0xF8,0xF7,0x1B,0xFC,0x00,0x2D,0x05,0xD1,0x05, \ +0x21,0x0A,0x20,0xFE,0xF7,0xAD,0xFF,0xB7,0x80,0xF0,0xBD,0x14,0x20,0xF8,0xF7, \ +0x10,0xFC,0x00,0x20,0xF8,0xF7,0x25,0xFC,0x60,0x68,0xC0,0x0B,0x05,0xD3,0x06, \ +0x21,0x0A,0x20,0xFE,0xF7,0x9E,0xFF,0xB7,0x80,0xF0,0xBD,0x00,0x20,0x01,0x21, \ +0x89,0x06,0xA0,0x60,0x48,0x61,0x01,0x21,0x0A,0x20,0xB7,0x80,0xFE,0xF7,0x92, \ +0xFF,0xF0,0xBD,0x00,0x00,0xE0,0x0A,0x00,0x02,0x08,0x08,0x00,0x00,0x40,0x00, \ +0x00,0x04,0x00,0x01,0x00,0x05,0x40,0x9C,0x00,0x00,0xF0,0xB5,0x5F,0x4D,0x06, \ +0x23,0x04,0x1C,0x2F,0x1C,0x46,0xCC,0x46,0xC7,0x01,0x3B,0xFB,0xD1,0x44,0xCC, \ +0x44,0xC7,0x5B,0x4E,0xC1,0x1D,0x09,0x31,0x40,0x22,0x30,0x1C,0xFC,0xF7,0xB4, \ +0xFD,0x28,0x7A,0x0F,0x24,0x24,0x06,0x0E,0x28,0x56,0x4F,0x01,0xDC,0x00,0x28, \ +0x05,0xD1,0x03,0x21,0x0A,0x20,0xFE,0xF7,0x68,0xFF,0xA7,0x80,0xF0,0xBD,0x52, \ +0x49,0x88,0x70,0x68,0x78,0x48,0x70,0x28,0x78,0x08,0x70,0x68,0x88,0x88,0x60, \ +0x68,0x68,0xC8,0x60,0x00,0x20,0xC8,0x70,0x48,0x60,0x08,0x61,0xE8,0x68,0x48, \ +0x61,0x00,0x20,0xF8,0xF7,0xD3,0xFB,0xF0,0x1D,0x19,0x30,0x01,0x78,0xFB,0x23, \ +0x19,0x40,0x01,0x70,0x69,0x7A,0x01,0x29,0x04,0xD1,0x01,0x78,0xF7,0x23,0x19, \ +0x40,0x01,0x70,0x0A,0xE0,0x02,0x29,0x04,0xD1,0x01,0x78,0x08,0x23,0x19,0x43, \ +0x01,0x70,0x03,0xE0,0x01,0x78,0x04,0x23,0x19,0x43,0x01,0x70,0x39,0x4D,0x68, \ +0x78,0x30,0x75,0xE8,0x7A,0xF0,0x73,0xFF,0x20,0x3A,0x4E,0xF5,0x30,0x71,0x68, \ +0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x25,0x6D, \ +0x06,0xE8,0x69,0x80,0x23,0x18,0x43,0xE8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7, \ +0x89,0xFB,0xE8,0x69,0x80,0x23,0x98,0x43,0xE8,0x61,0x01,0xF0,0x9A,0xFE,0x00, \ +0x25,0x05,0x20,0x00,0x06,0xC5,0x62,0x05,0x63,0x27,0x48,0x81,0x7A,0x01,0x29, \ +0x05,0xD1,0x01,0x22,0x00,0x21,0x00,0x7A,0x01,0xF0,0xB1,0xFB,0x04,0xE0,0x00, \ +0x22,0x00,0x21,0x00,0x7A,0x01,0xF0,0xAB,0xFB,0x25,0x49,0x6B,0x20,0xC8,0x60, \ +0x24,0x48,0xF8,0xF7,0x69,0xFB,0x00,0x20,0xF8,0xF7,0x7E,0xFB,0x0A,0x20,0xF8, \ +0xF7,0x63,0xFB,0x01,0x20,0x80,0x06,0x45,0x61,0xC0,0x68,0x1C,0x49,0x1E,0x48, \ +0x48,0x61,0x48,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7,0xED, \ +0xFE,0xA7,0x80,0xF0,0xBD,0x15,0x4E,0xF0,0x68,0x00,0xF0,0xA7,0xF8,0x70,0x60, \ +0xF0,0x78,0xF8,0xF7,0xA7,0xFB,0x70,0x68,0xF9,0xF7,0x8E,0xFC,0xA7,0x80,0x31, \ +0x78,0xF0,0x68,0x00,0x22,0x00,0xF0,0x2A,0xF8,0x11,0x49,0xA5,0x80,0x03,0x20, \ +0xC8,0x74,0x0B,0x49,0x22,0x20,0x88,0x60,0x08,0x05,0x41,0x6A,0x0E,0x4B,0xC9, \ +0x18,0x01,0x62,0x0D,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFE,0xF7,0xC8,0xFE, \ +0xF0,0xBD,0x00,0x00,0x28,0x0B,0x00,0x02,0x78,0x0B,0x00,0x02,0x08,0x08,0x00, \ +0x00,0x88,0x0A,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x01,0x00,0x05,0x40,0x9C, \ +0x00,0x00,0x04,0x21,0x00,0x00,0x18,0x09,0x00,0x02,0x10,0x27,0x00,0x00,0x88, \ +0x88,0x00,0x00,0xF0,0xB5,0x07,0x1C,0x00,0x2A,0x0B,0xD1,0x00,0x20,0x00,0x2F, \ +0x14,0x4A,0x06,0xD9,0x09,0x06,0x09,0x0E,0x11,0x70,0x01,0x32,0x01,0x30,0xB8, \ +0x42,0xFA,0xD3,0xF0,0xBD,0xF8,0xF7,0x1A,0xFB,0xFC,0xF7,0x7A,0xFD,0xFC,0xF7, \ +0x5A,0xFD,0xBC,0x08,0x26,0x1C,0x0B,0x4D,0x04,0xD0,0xFC,0xF7,0x54,0xFD,0x01, \ +0xC5,0x01,0x3C,0xFA,0xD1,0xB0,0x00,0x3F,0x1A,0xFC,0xF7,0x4D,0xFD,0x69,0x1C, \ +0x03,0x2F,0x28,0x70,0x02,0xD1,0x00,0x0C,0x08,0x70,0xF0,0xBD,0x02,0x2F,0xE2, \ +0xD1,0x00,0x0A,0x08,0x70,0xF0,0xBD,0x00,0x00,0x00,0x72,0x01,0x02,0x98,0xB4, \ +0x01,0x20,0x80,0x06,0xC1,0x6B,0x00,0xAB,0x19,0x80,0x17,0x49,0x18,0x4F,0xCA, \ +0x7C,0x01,0x0B,0x17,0x4C,0x03,0x2A,0x1B,0xD1,0x00,0xAA,0x12,0x88,0x20,0x23, \ +0x13,0x40,0x15,0x4A,0x06,0xD0,0x84,0x63,0xD4,0x68,0x03,0x23,0x1B,0x03,0x23, \ +0x43,0x43,0x63,0x79,0x60,0x00,0xA9,0x09,0x88,0x89,0x08,0x14,0xD3,0x11,0x69, \ +0x01,0x31,0x11,0x61,0x53,0x69,0x99,0x42,0x0E,0xD2,0x91,0x68,0x42,0x6A,0x89, \ +0x18,0x01,0x62,0x09,0xE0,0x04,0x2A,0x07,0xD1,0x00,0xAA,0x12,0x88,0xD2,0x08, \ +0x03,0xD3,0x84,0x63,0x64,0x22,0x42,0x63,0x79,0x60,0x98,0xBC,0xF7,0x46,0x00, \ +0x00,0x18,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x72,0x01,0x02,0x88,0x0A, \ +0x00,0x02,0xF0,0xB5,0x04,0x30,0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60, \ +0x78,0x01,0x28,0x15,0xD0,0x02,0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20, \ +0x39,0x1C,0xFC,0xF7,0xB3,0xFC,0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFC,0xF7,0xAE, \ +0xFC,0x07,0x1C,0x00,0x2D,0x18,0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20, \ +0xE0,0x70,0x13,0xE0,0x7F,0x08,0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFC, \ +0xF7,0x9E,0xFC,0x0C,0x1C,0x79,0x1A,0x0B,0x20,0xFC,0xF7,0x99,0xFC,0x07,0x1C, \ +0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38, \ +0x04,0x00,0x0C,0xF0,0xBD,0x88,0x0A,0x00,0x02,0xF0,0xB5,0x50,0x4E,0x06,0x23, \ +0x04,0x1C,0x37,0x1C,0x26,0xCC,0x26,0xC7,0x01,0x3B,0xFB,0xD1,0xC1,0x1D,0x4C, \ +0x4D,0x01,0x31,0x40,0x22,0x28,0x1C,0xFC,0xF7,0x38,0xFC,0x30,0x78,0x0F,0x24, \ +0x24,0x06,0x0E,0x28,0x48,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A, \ +0x20,0xFE,0xF7,0xEC,0xFD,0xA7,0x80,0xF0,0xBD,0xE8,0x1D,0x19,0x30,0x01,0x78, \ +0xFB,0x23,0x19,0x40,0x01,0x70,0xB1,0x78,0x01,0x29,0x04,0xD1,0x01,0x78,0xF7, \ +0x23,0x19,0x40,0x01,0x70,0x0A,0xE0,0x02,0x29,0x04,0xD1,0x01,0x78,0x08,0x23, \ +0x19,0x43,0x01,0x70,0x03,0xE0,0x01,0x78,0x04,0x23,0x19,0x43,0x01,0x70,0x70, \ +0x78,0x28,0x75,0x30,0x79,0xE8,0x73,0xFF,0x20,0x35,0x4D,0xF5,0x30,0x69,0x68, \ +0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49, \ +0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7, \ +0x21,0xFA,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01, \ +0xF0,0x30,0xFD,0x29,0x48,0x00,0x21,0xC1,0x60,0x05,0x20,0x00,0x06,0xC1,0x62, \ +0x01,0x63,0xF0,0x78,0x01,0x28,0x04,0xD1,0x30,0x78,0x01,0x22,0x01,0xF0,0x47, \ +0xFA,0x04,0xE0,0x00,0x22,0x00,0x21,0x30,0x78,0x01,0xF0,0x41,0xFA,0x00,0x20, \ +0xF8,0xF7,0x1A,0xFA,0x0A,0x20,0xF8,0xF7,0xFF,0xF9,0x00,0x26,0x01,0x20,0x80, \ +0x06,0x46,0x61,0xC0,0x68,0x1A,0x48,0x68,0x61,0x68,0x68,0xC0,0x0B,0x05,0xD3, \ +0x06,0x21,0x0A,0x20,0xFE,0xF7,0x89,0xFD,0xA7,0x80,0xF0,0xBD,0x01,0x22,0x55, \ +0x21,0x7D,0x20,0xC0,0x00,0xA7,0x80,0xFF,0xF7,0xD0,0xFE,0x12,0x48,0x01,0x21, \ +0x89,0x06,0x88,0x63,0x11,0x48,0x12,0x4A,0x48,0x63,0xA6,0x80,0x04,0x20,0xA8, \ +0x60,0xD0,0x74,0x00,0x03,0x68,0x60,0x0A,0x4A,0x6B,0x20,0xD0,0x60,0x48,0x6A, \ +0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFE,0xF7,0x67, \ +0xFD,0xF0,0xBD,0xE0,0x0A,0x00,0x02,0x78,0x0B,0x00,0x02,0x08,0x08,0x00,0x00, \ +0x40,0x00,0x00,0x04,0x00,0x01,0x00,0x05,0x04,0x21,0x00,0x00,0x00,0x72,0x01, \ +0x02,0x64,0x10,0x00,0x00,0x18,0x09,0x00,0x02,0x88,0x88,0x00,0x00,0x80,0xB4, \ +0x05,0x21,0x09,0x06,0x06,0x4B,0x00,0x20,0x1F,0x18,0x01,0x30,0x0A,0x68,0x00, \ +0x06,0x00,0x0E,0x04,0x31,0x3F,0x28,0x3A,0x76,0xF6,0xDB,0x80,0xBC,0xF7,0x46, \ +0x88,0x0A,0x00,0x02,0x80,0xB5,0x01,0x1C,0x14,0x48,0x40,0x22,0xFC,0xF7,0x7A, \ +0xFB,0xFF,0x20,0x13,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B,0x03,0xD3,0x02,0x1C, \ +0x01,0x38,0x00,0x2A,0xF8,0xD1,0x07,0x27,0x7F,0x06,0xF8,0x69,0x80,0x23,0x18, \ +0x43,0xF8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x8E,0xF9,0xF8,0x69,0x80,0x23, \ +0x98,0x43,0xF8,0x61,0x01,0xF0,0x9F,0xFC,0x01,0xF0,0x84,0xFA,0x06,0x48,0x0F, \ +0x21,0x09,0x06,0x88,0x80,0x01,0x21,0x0A,0x20,0xFE,0xF7,0x16,0xFD,0x80,0xBD, \ +0x00,0x00,0x78,0x0B,0x00,0x02,0x40,0x00,0x00,0x04,0x08,0x08,0x00,0x00,0x80, \ +0xB5,0x15,0x49,0x01,0x27,0xC9,0x7C,0x01,0x29,0x13,0xD1,0x13,0x4B,0x18,0x40, \ +0x0E,0xD0,0x88,0x06,0xC0,0x68,0x81,0x09,0x0A,0xD3,0x04,0x21,0x01,0x40,0x10, \ +0x48,0x03,0xD0,0x41,0x68,0x01,0x31,0x41,0x60,0x02,0xE0,0x01,0x68,0x01,0x31, \ +0x01,0x60,0x38,0x1C,0x80,0xBD,0x02,0x29,0x01,0xD1,0x38,0x1C,0x80,0xBD,0x03, \ +0x29,0x01,0xD0,0x04,0x29,0x06,0xD1,0x07,0x4B,0x18,0x40,0x01,0xD0,0xFF,0xF7, \ +0x69,0xFE,0x38,0x1C,0x80,0xBD,0x00,0x20,0x80,0xBD,0x00,0x00,0x18,0x09,0x00, \ +0x02,0x40,0x40,0x00,0x00,0xC8,0x02,0x00,0x02,0x80,0x80,0x00,0x00,0xF7,0xB5, \ +0x84,0xB0,0x00,0x20,0x00,0x24,0x00,0x26,0x00,0x27,0x00,0x25,0x03,0x90,0x02, \ +0x90,0x01,0x90,0x68,0x46,0x04,0x22,0x33,0x49,0xFC,0xF7,0x0B,0xFB,0x05,0x99, \ +0x00,0x20,0x00,0x29,0x1B,0xDD,0x04,0x99,0x80,0x23,0x09,0x5C,0x0A,0x1C,0x9A, \ +0x43,0x16,0x2A,0x02,0xD1,0x00,0xAB,0xD9,0x70,0x0D,0xE0,0x0B,0x2A,0x02,0xD1, \ +0x00,0xAB,0x99,0x70,0x08,0xE0,0x04,0x2A,0x02,0xD1,0x00,0xAB,0x59,0x70,0x03, \ +0xE0,0x02,0x2A,0x01,0xD1,0x00,0xAB,0x19,0x70,0x05,0x99,0x01,0x30,0x88,0x42, \ +0xE3,0xDB,0x00,0x20,0x69,0x46,0x09,0x5C,0x00,0x29,0x0B,0xD0,0x09,0x0A,0x04, \ +0xD3,0x00,0x2E,0x00,0xD1,0x07,0x1C,0x01,0x26,0x04,0x1C,0x02,0x90,0x00,0x2D, \ +0x01,0xD1,0x01,0x25,0x01,0x90,0x01,0x30,0x04,0x28,0xEC,0xDB,0x00,0x2D,0x02, \ +0xD1,0x00,0x20,0x07,0xB0,0xF0,0xBD,0x00,0x2E,0x01,0xD1,0x01,0x9C,0x27,0x1C, \ +0x14,0x49,0x00,0x20,0x0A,0x18,0x12,0x7C,0x00,0x2A,0x00,0xD0,0x03,0x90,0x01, \ +0x30,0x04,0x28,0xF7,0xDB,0x03,0x9A,0x10,0x48,0x94,0x42,0x02,0xDD,0x03,0x9A, \ +0x82,0x72,0x00,0xE0,0x84,0x72,0x02,0x9A,0xC2,0x72,0xC7,0x71,0x00,0x22,0x6B, \ +0x46,0x9B,0x5C,0x8C,0x18,0x01,0x32,0x04,0x2A,0x23,0x74,0xF8,0xDB,0xC9,0x19, \ +0x0A,0x7C,0x80,0x23,0x1A,0x43,0x0A,0x74,0xC0,0x7A,0x05,0x49,0xC8,0x70,0x01, \ +0x20,0xD2,0xE7,0x00,0x00,0xEC,0x99,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x01, \ +0x00,0x02,0xC0,0x01,0x00,0x02,0xF0,0xB4,0x44,0x78,0x00,0x26,0x05,0x2C,0x01, \ +0xD8,0x00,0x2C,0x02,0xD1,0x30,0x1C,0xF0,0xBC,0xF7,0x46,0x00,0x22,0x00,0x27, \ +0x00,0x2C,0x17,0xD9,0xC3,0x19,0x9D,0x78,0x6B,0x06,0x5B,0x0E,0x02,0x2B,0x08, \ +0xD0,0x04,0x2B,0x06,0xD0,0x0B,0x2B,0x04,0xD0,0x16,0x2B,0x02,0xD0,0x2C,0x2B, \ +0x0B,0xD1,0x04,0xE0,0x2C,0x2B,0x02,0xD0,0x13,0x1C,0xCD,0x54,0x01,0x32,0x01, \ +0x37,0xA7,0x42,0xE8,0xD3,0x03,0xE0,0x00,0x2B,0x01,0xD1,0x30,0x1C,0xDD,0xE7, \ +0x10,0x1C,0xDB,0xE7,0xF1,0xB5,0x85,0xB0,0x00,0x20,0x01,0x90,0x68,0x46,0x04, \ +0x22,0x71,0x49,0xFC,0xF7,0x69,0xFA,0x71,0x4E,0x04,0x24,0x30,0x68,0x45,0x68, \ +0x80,0x89,0x2F,0x28,0x02,0xDA,0x00,0x20,0x06,0xB0,0xF0,0xBD,0x05,0x98,0x6C, \ +0x49,0x01,0x28,0x04,0x91,0x09,0xD1,0x06,0x22,0xE8,0x1D,0x09,0x30,0x04,0x99, \ +0xFC,0xF7,0x36,0xFA,0x00,0x28,0x01,0xD0,0x00,0x20,0xEE,0xE7,0x20,0x20,0xE9, \ +0x1D,0x19,0x31,0x28,0x5C,0x49,0x78,0x09,0x02,0x08,0x43,0x00,0x04,0x00,0x0C, \ +0x02,0x90,0x14,0x28,0x04,0xDB,0x7D,0x23,0x02,0x98,0xDB,0x00,0x98,0x42,0x01, \ +0xDD,0x00,0x20,0xDB,0xE7,0x22,0x20,0x28,0x5C,0x80,0x08,0x01,0xD2,0x00,0x20, \ +0xD5,0xE7,0x30,0x68,0x24,0x27,0x80,0x89,0x04,0x38,0x24,0x28,0x45,0xDD,0x57, \ +0x49,0x03,0x91,0xE8,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x20,0xD0,0x03,0x28, \ +0x39,0xD1,0xE8,0x19,0x41,0x78,0x01,0x29,0x27,0xD0,0x00,0x20,0xC0,0xE7,0xEE, \ +0x19,0x70,0x78,0x00,0x28,0x00,0xD1,0xBB,0xE7,0x4E,0x49,0x4A,0x79,0x82,0x42, \ +0x01,0xD0,0x00,0x20,0xB5,0xE7,0x03,0x99,0xB0,0x1C,0xFC,0xF7,0xF5,0xF9,0x00, \ +0x28,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x01,0x20, \ +0x01,0x90,0x14,0xE0,0xE8,0x19,0x69,0x46,0x06,0x1C,0xFF,0xF7,0x66,0xFF,0x04, \ +0x1C,0x01,0xD1,0x00,0x20,0x9E,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0, \ +0x3E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0x93,0xE7,0x03, \ +0x37,0x36,0x4E,0x30,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xBE,0xDC,0x01,0x98, \ +0x00,0x28,0x01,0xD1,0x00,0x20,0x87,0xE7,0x68,0x46,0x01,0x22,0x21,0x1C,0xFF, \ +0xF7,0xCA,0xFE,0x00,0x28,0x00,0xD1,0x7F,0xE7,0x04,0x20,0xF9,0xF7,0xAA,0xFC, \ +0x30,0x48,0x20,0x23,0x01,0x78,0x30,0x4F,0x19,0x43,0x01,0x70,0x01,0x78,0x10, \ +0x23,0x19,0x43,0x01,0x70,0xE9,0x18,0x0C,0x1C,0xF8,0x1D,0x06,0x22,0x07,0x30, \ +0xFC,0xF7,0xCD,0xF9,0x06,0x22,0x21,0x1C,0x04,0x98,0xFC,0xF7,0xC8,0xF9,0x23, \ +0x4C,0xF8,0x1D,0x62,0x79,0x03,0x99,0x0D,0x30,0xFC,0xF7,0xC1,0xF9,0x24,0x48, \ +0x01,0x25,0xFE,0x1D,0x29,0x36,0x05,0x75,0x35,0x71,0x02,0x98,0x38,0x80,0xA5, \ +0x70,0x05,0x98,0x01,0x28,0x08,0xD1,0x00,0x21,0x00,0x20,0x01,0xF0,0x11,0xFB, \ +0x15,0x49,0x00,0x20,0x09,0x68,0x48,0x61,0x07,0xE0,0xF9,0xF7,0x38,0xFC,0x39, \ +0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xF9,0xF7,0x56,0xFC,0x16,0x49,0x00,0x20, \ +0x48,0x70,0x05,0x20,0x88,0x71,0x05,0x98,0x01,0x28,0x04,0xD1,0x01,0x21,0x04, \ +0x20,0xFE,0xF7,0x58,0xFB,0x00,0xE0,0xB5,0x71,0x10,0x48,0x29,0x1C,0x00,0x68, \ +0x81,0x40,0x07,0x20,0x40,0x06,0x82,0x69,0x11,0x43,0x81,0x61,0x0D,0x48,0x05, \ +0x70,0x28,0x1C,0x27,0xE7,0x00,0x00,0xF0,0x99,0x00,0x00,0x48,0x01,0x00,0x02, \ +0xFC,0x00,0x00,0x02,0xDC,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x00,0x00, \ +0x02,0x93,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x09,0x00,0x02,0x68,0x09, \ +0x00,0x02,0x9C,0x02,0x00,0x02,0x3A,0x01,0x00,0x02,0xF0,0xB5,0x84,0xB0,0x5A, \ +0x49,0x04,0x22,0x01,0xA8,0xFC,0xF7,0x69,0xF9,0x59,0x4F,0x59,0x49,0x38,0x68, \ +0x00,0x25,0x46,0x68,0x06,0x22,0xF0,0x1D,0x09,0x30,0x03,0x91,0xFC,0xF7,0x40, \ +0xF9,0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x39,0x68,0x38,0x1C, \ +0x89,0x89,0x2F,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7,0x20,0x22,0xF3,0x1D,0x19, \ +0x33,0xB2,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12,0x04,0x12,0x0C,0x00,0x92, \ +0x14,0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00,0x9A,0x42,0x01,0xDD,0x00, \ +0x20,0xE3,0xE7,0x22,0x22,0xB2,0x5C,0x52,0x08,0x01,0xD2,0x00,0x20,0xDD,0xE7, \ +0x24,0x27,0x04,0x39,0x24,0x29,0x34,0xDD,0xF0,0x5D,0x00,0x28,0x09,0xD0,0x01, \ +0x28,0x11,0xD0,0x03,0x28,0x2B,0xD1,0xF0,0x19,0x41,0x78,0x01,0x29,0x19,0xD0, \ +0x00,0x20,0xCC,0xE7,0xF0,0x19,0x40,0x78,0x20,0x28,0x01,0xD9,0x00,0x25,0x00, \ +0xE0,0x01,0x25,0xC0,0x19,0x87,0x1C,0x15,0xE0,0xF0,0x19,0x02,0x90,0x01,0xA9, \ +0xFF,0xF7,0x7B,0xFE,0x04,0x1C,0x01,0xD1,0x00,0x20,0xB9,0xE7,0x02,0x98,0x40, \ +0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x2E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42, \ +0x01,0xD0,0x00,0x20,0xAD,0xE7,0x03,0x37,0x28,0x48,0x00,0x68,0x80,0x89,0x04, \ +0x38,0xB8,0x42,0xCC,0xDC,0x00,0x2D,0x01,0xD1,0x00,0x20,0xA2,0xE7,0x01,0x22, \ +0x21,0x1C,0x01,0xA8,0xFF,0xF7,0xDF,0xFD,0x00,0x28,0x00,0xD1,0x9A,0xE7,0x23, \ +0x4C,0x06,0x22,0xE0,0x1D,0x07,0x30,0x20,0x4F,0x03,0x99,0xFC,0xF7,0xEE,0xF8, \ +0xE0,0x1D,0x0D,0x30,0x20,0x22,0xF9,0x1D,0x15,0x31,0xFC,0xF7,0xE7,0xF8,0xF8, \ +0x1D,0x39,0x30,0x81,0x78,0xE0,0x1D,0x29,0x30,0x01,0x71,0x01,0x79,0x19,0x48, \ +0x20,0x23,0x01,0x75,0x00,0x9A,0x18,0x49,0x22,0x80,0x0A,0x78,0x1A,0x43,0x0A, \ +0x70,0x0A,0x78,0x10,0x23,0x1A,0x43,0x0A,0x70,0x00,0x21,0x14,0x4A,0x50,0x30, \ +0x41,0x70,0x91,0x70,0x05,0x21,0x81,0x71,0x04,0x20,0xF9,0xF7,0x95,0xFB,0x01, \ +0x21,0x04,0x20,0xFE,0xF7,0x85,0xFA,0x0F,0x48,0x01,0x68,0x01,0x20,0x02,0x1C, \ +0x8A,0x40,0x07,0x21,0x49,0x06,0x8B,0x69,0x1A,0x43,0x8A,0x61,0x0B,0x49,0x08, \ +0x70,0x5C,0xE7,0xF4,0x99,0x00,0x00,0x48,0x01,0x00,0x02,0xFC,0x00,0x00,0x02, \ +0x00,0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x09,0x00, \ +0x02,0x93,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0x9C,0x02,0x00,0x02,0x3A,0x01, \ +0x00,0x02,0xF0,0xB4,0x1D,0x4A,0x1D,0x4B,0xD1,0x1D,0x69,0x31,0xC9,0x7A,0x49, \ +0x00,0x5F,0x5A,0xD1,0x1D,0x59,0x31,0x0B,0x8B,0x01,0x3B,0x1B,0x04,0x1B,0x14, \ +0x0B,0x83,0x00,0x2B,0x26,0xDD,0x17,0x4B,0x01,0x25,0x5C,0x7A,0x50,0x32,0xD3, \ +0x79,0x00,0x2B,0x04,0xD1,0x05,0x30,0x0E,0x28,0x05,0xD9,0x0E,0x38,0x03,0xE0, \ +0x01,0x30,0x0E,0x28,0x00,0xD9,0x01,0x20,0x00,0x2C,0x05,0xD1,0x2B,0x1C,0x46, \ +0x1E,0xB3,0x40,0x3B,0x40,0x10,0xD1,0x07,0xE0,0xD3,0x79,0x00,0x2B,0x0C,0xD1, \ +0x0A,0x4B,0x1B,0x18,0x5B,0x7B,0x00,0x2B,0x07,0xD1,0x0B,0x8B,0x01,0x3B,0x1B, \ +0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0xDC,0xDC,0x00,0x20,0xF0,0xBC,0xF7,0x46, \ +0x00,0x00,0x18,0x09,0x00,0x02,0x64,0x02,0x00,0x02,0xB0,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C,0x00,0x26,0x27,0x70,0xE0,0x1D, \ +0x03,0x30,0x66,0x70,0x66,0x80,0x06,0x22,0x25,0x49,0xFC,0xF7,0x51,0xF8,0x25, \ +0x4D,0xE0,0x1D,0x09,0x30,0x06,0x22,0xE9,0x1D,0x35,0x31,0xFC,0xF7,0x49,0xF8, \ +0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0,0x71,0x20,0x72,0x60,0x72,0x38, \ +0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0,0xE8,0xF8,0x00,0xF0,0xF0,0xF8, \ +0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x0B,0xF9,0x2D,0x18,0x28,0x1C,0x00, \ +0xF0,0x23,0xF9,0x2D,0x18,0x16,0x48,0x80,0x7D,0x02,0x28,0x03,0xD1,0x28,0x1C, \ +0x00,0xF0,0x33,0xF9,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x3D,0xF9,0x28,0x18,0x00, \ +0x1B,0xF8,0x64,0xB8,0x64,0xF0,0xBD,0x26,0x76,0x0F,0x4E,0xE0,0x1D,0x72,0x79, \ +0x13,0x30,0xE9,0x1D,0x15,0x31,0x62,0x76,0xFC,0xF7,0x16,0xF8,0x70,0x79,0x00, \ +0x19,0x1A,0x30,0x00,0xF0,0x03,0xF9,0x70,0x79,0x20,0x30,0x00,0x06,0x00,0x0E, \ +0xB8,0x64,0xF0,0xBD,0x00,0x00,0xD8,0x07,0x00,0x02,0x5C,0x00,0x00,0x02,0xC0, \ +0x00,0x00,0x02,0x18,0x09,0x00,0x02,0x00,0x00,0x00,0x02,0x04,0x01,0x00,0x02, \ +0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0x9C,0xFF,0x00,0x26,0x80,0x2F,0x47,0x4D,0x0E, \ +0xD1,0xC0,0x20,0xFB,0xF7,0x0F,0xFC,0x04,0x1C,0x45,0x48,0x41,0x7B,0x03,0x29, \ +0x03,0xD0,0x20,0x1C,0xFB,0xF7,0x07,0xFC,0xF8,0xBD,0x01,0x21,0x41,0x73,0x10, \ +0xE0,0x40,0x2F,0x05,0xD1,0x40,0x48,0x01,0x21,0x81,0x74,0x3F,0x48,0x46,0x80, \ +0x08,0xE0,0x50,0x2F,0x06,0xD1,0x3E,0x48,0x3E,0x49,0x06,0x22,0xFB,0xF7,0xD8, \ +0xFF,0x01,0x21,0x29,0x71,0x3C,0x48,0xF7,0xF7,0xCB,0xFE,0x50,0x2F,0x02,0xD1, \ +0x36,0x48,0xC0,0x6C,0x01,0xE0,0x34,0x48,0x80,0x6C,0x33,0x49,0x88,0x66,0x37, \ +0x48,0x89,0x6E,0xC0,0x79,0xF9,0xF7,0x8F,0xF9,0x30,0x49,0x50,0x2F,0xC8,0x66, \ +0x0C,0xD1,0x2E,0x48,0x2E,0x49,0xC0,0x6E,0x48,0x80,0x31,0x48,0xC0,0x79,0xF9, \ +0xF7,0xE5,0xFD,0x2B,0x49,0x49,0x88,0x40,0x18,0x29,0x49,0x48,0x80,0x28,0x48, \ +0x27,0x49,0x80,0x2F,0x48,0x66,0x16,0xD1,0xFC,0xF7,0x2D,0xF8,0x2A,0x49,0x89, \ +0x89,0x49,0x00,0x01,0x31,0x08,0x40,0x21,0x49,0x88,0x62,0x27,0x48,0x00,0x88, \ +0x08,0x62,0x89,0x6A,0x8B,0x00,0x59,0x18,0x89,0x00,0x09,0x18,0x08,0x20,0xF9, \ +0xF7,0x4D,0xFA,0x20,0x1C,0xFB,0xF7,0xB4,0xFB,0xF8,0xF7,0xA4,0xFE,0xF8,0xF7, \ +0x0A,0xFF,0x00,0x90,0x80,0x2F,0x05,0xD1,0x00,0x98,0x00,0x28,0x23,0xD1,0x01, \ +0x21,0x69,0x70,0x20,0xE0,0x40,0x2F,0x1E,0xD1,0x12,0x4C,0xC0,0x20,0xA6,0x74, \ +0xFB,0xF7,0xA0,0xFB,0x07,0x1C,0xA8,0x79,0x01,0x28,0x12,0xD1,0x00,0x98,0x00, \ +0x28,0x0D,0xD1,0xE0,0x1D,0x69,0x30,0x81,0x7A,0x00,0x29,0x0A,0xD1,0x01,0x21, \ +0x81,0x72,0x0E,0x49,0xC8,0x8A,0x81,0x02,0x04,0x20,0xF9,0xF7,0x22,0xFA,0x01, \ +0xE0,0x01,0x21,0x69,0x71,0x38,0x1C,0xFB,0xF7,0x86,0xFB,0x7D,0xE7,0x00,0x00, \ +0x68,0x09,0x00,0x02,0x28,0x09,0x00,0x02,0x18,0x09,0x00,0x02,0xD8,0x07,0x00, \ +0x02,0xDC,0x07,0x00,0x02,0x30,0x01,0x00,0x02,0xEE,0x07,0x00,0x02,0x04,0x01, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0xA4,0x01,0x00,0x02,0x03,0x49,0x02,0x48,0x09, \ +0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0xF8,0x07,0x00,0x02,0x80,0x00,0x00,0x02, \ +0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02,0x80,0xC9,0x7A,0x00,0x29,0x03, \ +0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80,0x08,0x49,0x49,0x7A,0x01,0x29, \ +0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x01,0x88,0x02, \ +0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0xFA,0x07,0x00,0x02,0x80,0x00,0x00,0x02, \ +0x04,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00,0x20,0x0A,0x4A,0x08,0x70,0x53, \ +0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18,0x3F,0x7D,0x0C,0x18,0x01,0x30, \ +0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50,0x79,0x48,0x70,0x50,0x79,0x90, \ +0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x04,0x01,0x00,0x02,0x80,0x00,0x00,0x02, \ +0x90,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x00,0x20,0x08,0x4B,0x00,0x22,0x9F, \ +0x18,0x3F,0x7C,0x00,0x2F,0x02,0xD0,0x0C,0x18,0xA7,0x70,0x01,0x30,0x01,0x32, \ +0x04,0x2A,0xF5,0xD3,0x48,0x70,0x90,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x00, \ +0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22,0x42,0x70,0x01,0x30,0x80,0x18, \ +0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7,0x46,0x00,0x00,0x00,0x00,0x00, \ +0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x04,0x49,0x02,0x30,0x0A,0x89, \ +0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04,0x20,0xF7,0x46,0x00,0x00,0x80, \ +0x00,0x00,0x02,0x0A,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x00,0x21,0x81,0x70, \ +0x02,0x30,0x41,0x1C,0x07,0x20,0x08,0x70,0x04,0x20,0xF7,0x46,0xF0,0xB5,0x83, \ +0xB0,0x51,0x48,0x52,0x4D,0x48,0x21,0x01,0x70,0x01,0x26,0xEC,0x1D,0x29,0x34, \ +0x46,0x70,0x62,0x79,0x11,0x21,0x4E,0x4F,0x02,0x2A,0x01,0xD1,0x41,0x70,0x05, \ +0xE0,0x03,0x2A,0x03,0xD1,0xBA,0x78,0x08,0x2A,0x00,0xD1,0x41,0x70,0x4A,0x49, \ +0x09,0x68,0x89,0x78,0x00,0x29,0x03,0xD0,0x41,0x78,0x08,0x23,0x19,0x43,0x41, \ +0x70,0x46,0x49,0x00,0x23,0x00,0x22,0x46,0x48,0xC9,0x79,0xF7,0xF7,0xAB,0xFD, \ +0x45,0x48,0x45,0x49,0x06,0x22,0xFB,0xF7,0xA0,0xFE,0xE9,0x1D,0x07,0x31,0x0D, \ +0x1C,0x06,0x22,0x42,0x48,0xFB,0xF7,0x99,0xFE,0x29,0x1C,0x06,0x22,0x41,0x48, \ +0xFB,0xF7,0x94,0xFE,0x40,0x4D,0x18,0x20,0xA8,0x66,0x39,0x48,0x18,0x21,0xC0, \ +0x79,0xF9,0xF7,0x56,0xF8,0xE8,0x66,0x32,0x48,0xEE,0x1D,0x68,0x66,0x01,0x20, \ +0x49,0x36,0xF0,0x70,0xF8,0xF7,0x91,0xFD,0xF8,0xF7,0xF7,0xFD,0x02,0x90,0x00, \ +0x20,0xF0,0x70,0x02,0x98,0x00,0x28,0x01,0xD0,0x03,0xB0,0xF0,0xBD,0x02,0x26, \ +0x2C,0x48,0x6E,0x60,0xC0,0x79,0x32,0x49,0x40,0x00,0x08,0x5A,0x31,0x49,0xC9, \ +0x88,0x40,0x18,0x31,0x49,0x09,0x88,0x41,0x18,0x01,0x20,0xF9,0xF7,0x1B,0xF9, \ +0x00,0x22,0xD2,0x43,0x6E,0x74,0x00,0x92,0x01,0x22,0x10,0x21,0x01,0xAB,0x2B, \ +0x48,0xFB,0xF7,0x19,0xFA,0x00,0x20,0x1E,0x49,0x68,0x74,0x0A,0x68,0x53,0x78, \ +0x00,0x2B,0x22,0xD0,0x93,0x78,0x01,0x33,0x1B,0x06,0x1B,0x0E,0x93,0x70,0x04, \ +0x2B,0x02,0xDA,0x09,0x68,0x48,0x70,0xD2,0xE7,0x60,0x79,0x01,0x28,0x1F,0xDD, \ +0x02,0x28,0x03,0xD1,0xBA,0x78,0x08,0x23,0x9A,0x43,0xBA,0x70,0x03,0x28,0x17, \ +0xD1,0x0E,0x48,0x40,0x78,0x40,0x09,0x06,0xD3,0x01,0x20,0xF8,0x70,0xB8,0x78, \ +0x08,0x23,0x98,0x43,0xB8,0x70,0x0C,0xE0,0x01,0x20,0xB8,0x71,0x09,0xE0,0x60, \ +0x79,0x03,0x28,0x06,0xD1,0x05,0x4A,0x01,0x20,0x52,0x78,0x52,0x09,0x00,0xD3, \ +0x00,0x20,0xF8,0x70,0x09,0x68,0x40,0x20,0x08,0x70,0xAB,0xE7,0x00,0x00,0xD8, \ +0x07,0x00,0x02,0x80,0x00,0x00,0x02,0x88,0x09,0x00,0x02,0xC8,0x01,0x00,0x02, \ +0x04,0x01,0x00,0x02,0xDA,0x07,0x00,0x02,0xE2,0x07,0x00,0x02,0x5C,0x00,0x00, \ +0x02,0xE8,0x07,0x00,0x02,0xDC,0x07,0x00,0x02,0x18,0x09,0x00,0x02,0xAC,0x01, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xAA,0x01,0x00,0x02,0xEC,0x06,0x00,0x02,0xF8, \ +0xB4,0x00,0x26,0x82,0x1C,0x06,0x29,0x01,0xD3,0x48,0x08,0x02,0xD3,0x00,0x20, \ +0xF8,0xBC,0xF7,0x46,0x00,0x24,0x03,0x23,0x00,0x25,0xCF,0x1E,0x17,0xD0,0x01, \ +0x39,0xD0,0x5C,0x99,0x42,0x02,0xD1,0x00,0x28,0x0F,0xD1,0x0C,0xE0,0x0E,0x28, \ +0x0C,0xD8,0x01,0x28,0x0A,0xD3,0xA8,0x42,0x08,0xD3,0xD5,0x18,0x6D,0x78,0x03, \ +0x33,0x03,0x34,0x2D,0x18,0xA7,0x42,0xEC,0xD8,0x01,0x2E,0x01,0xD1,0x00,0x20, \ +0xE0,0xE7,0x1B,0x48,0xC0,0x79,0x01,0x28,0x00,0xD1,0xDB,0xE7,0x19,0x48,0xC1, \ +0x1D,0x29,0x31,0x49,0x7A,0x00,0x29,0x01,0xD1,0x01,0x20,0xD3,0xE7,0x91,0x78, \ +0x3A,0x30,0x00,0x23,0x81,0x70,0x51,0x78,0x41,0x70,0x11,0x78,0x01,0x70,0x03, \ +0x21,0x00,0x2F,0x1B,0xD9,0x50,0x5C,0x00,0x28,0x18,0xD0,0x0F,0x4D,0x01,0x26, \ +0x2C,0x18,0x66,0x73,0x54,0x18,0x00,0x94,0x64,0x78,0x24,0x18,0xA0,0x42,0x0A, \ +0xD2,0x0A,0x4D,0x01,0x26,0x2D,0x18,0x6E,0x73,0x00,0x9E,0x10,0x3D,0xB6,0x78, \ +0x01,0x30,0xA0,0x42,0xEE,0x73,0xF4,0xD3,0x03,0x31,0x03,0x33,0x9F,0x42,0xE3, \ +0xD8,0x01,0x20,0xAA,0xE7,0x00,0x00,0x88,0x09,0x00,0x02,0x80,0x00,0x00,0x02, \ +0x14,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x22,0x4F,0x01,0x9E,0x3F,0x68,0x00, \ +0x24,0xBF,0x89,0x00,0x21,0x24,0x20,0x3D,0x1F,0x00,0x95,0x24,0x2D,0x39,0xD9, \ +0x1E,0x4F,0x7F,0x7A,0x35,0x5C,0x03,0x2D,0x08,0xD0,0x07,0x2D,0x0D,0xD1,0x35, \ +0x18,0x6D,0x78,0x01,0x24,0x03,0x1C,0x02,0x35,0x28,0x18,0x0A,0xE0,0x35,0x18, \ +0x6D,0x78,0x01,0x21,0x02,0x1C,0x02,0x35,0x28,0x18,0x05,0xE0,0x35,0x18,0x6D, \ +0x78,0x02,0x35,0x28,0x18,0x00,0x29,0x01,0xD0,0x00,0x2F,0x02,0xD0,0x00,0x9D, \ +0x85,0x42,0xE1,0xD8,0x00,0x29,0x17,0xD0,0xB0,0x18,0x40,0x78,0x01,0x28,0x01, \ +0xD0,0x02,0xB0,0xF0,0xBD,0x01,0x2F,0x0F,0xD1,0x00,0x2C,0x0D,0xD0,0x01,0x98, \ +0xC0,0x18,0x41,0x78,0xFF,0xF7,0x5E,0xFF,0x00,0x28,0x00,0xD1,0xF1,0xE7,0x05, \ +0x48,0xC1,0x79,0x00,0x29,0x01,0xD1,0x01,0x21,0xC1,0x71,0xEA,0xE7,0x48,0x01, \ +0x00,0x02,0xB0,0x00,0x00,0x02,0x88,0x09,0x00,0x02,0x00,0xB5,0x05,0x49,0x89, \ +0x7C,0x01,0x29,0x04,0xD1,0x01,0x78,0x80,0x29,0x01,0xD1,0xFF,0xF7,0xA8,0xFF, \ +0x00,0xBD,0x00,0x00,0xC0,0x00,0x00,0x02,0x90,0xB5,0x10,0x4C,0x60,0x78,0x00, \ +0x28,0x1A,0xD0,0x0F,0x4F,0x38,0x68,0x40,0x68,0x42,0x7E,0x18,0x30,0x00,0x2A, \ +0x09,0xD0,0x0C,0x49,0x49,0x79,0x91,0x42,0x0F,0xD1,0x0B,0x49,0x02,0x30,0xFB, \ +0xF7,0x16,0xFD,0x00,0x28,0x09,0xD1,0x38,0x68,0x40,0x68,0xC1,0x1D,0x03,0x31, \ +0x06,0x22,0x07,0x48,0xFB,0xF7,0x2A,0xFD,0x01,0x20,0xA0,0x70,0x90,0xBD,0x00, \ +0x00,0x68,0x09,0x00,0x02,0x48,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0x94,0x00, \ +0x00,0x02,0x30,0x01,0x00,0x02,0xB0,0xB4,0x03,0x78,0x00,0x27,0x20,0x49,0x20, \ +0x4A,0x08,0x2B,0x37,0xD1,0xD3,0x78,0x00,0x2B,0x04,0xD0,0xD0,0x7A,0x09,0x68, \ +0x88,0x75,0xB0,0xBC,0xF7,0x46,0x00,0x79,0x40,0x08,0x03,0xD3,0x90,0x7A,0x09, \ +0x68,0x88,0x75,0xF6,0xE7,0x0B,0x68,0x99,0x7D,0xD2,0x7A,0x91,0x42,0x01,0xDD, \ +0x9A,0x75,0xEF,0xE7,0x15,0x4C,0x08,0x19,0x00,0x7C,0x00,0x28,0xEA,0xD1,0x08, \ +0x1C,0x01,0x29,0x0A,0xD3,0x01,0x38,0x25,0x18,0x2D,0x7C,0x00,0x2D,0x03,0xD1, \ +0x01,0x28,0xF8,0xD2,0x00,0x2F,0x01,0xD0,0x98,0x75,0xDC,0xE7,0x8A,0x42,0x06, \ +0xD9,0x01,0x31,0x60,0x18,0x00,0x7C,0x00,0x28,0x03,0xD1,0x8A,0x42,0xF8,0xD8, \ +0x00,0x2F,0x01,0xD0,0x99,0x75,0xCF,0xE7,0x9A,0x75,0xCD,0xE7,0xD0,0x79,0x09, \ +0x68,0x88,0x75,0xC9,0xE7,0x00,0x00,0xBC,0x01,0x00,0x02,0x04,0x01,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x00,0xB5,0x07,0x48,0x81,0x79,0x03,0x29,0x02,0xD0,0x81, \ +0x79,0x04,0x29,0x05,0xD1,0x00,0x21,0x81,0x71,0x07,0x21,0x04,0x20,0xFD,0xF7, \ +0x84,0xFE,0x00,0xBD,0x00,0x00,0x68,0x09,0x00,0x02,0x90,0xB5,0x27,0x48,0x27, \ +0x49,0x00,0x68,0x47,0x68,0x22,0x20,0x38,0x5C,0x10,0x23,0x18,0x40,0x03,0xD0, \ +0x08,0x78,0x00,0x28,0x41,0xD0,0x02,0xE0,0x08,0x78,0x00,0x28,0x3D,0xD1,0x24, \ +0x20,0x38,0x5C,0x00,0x28,0x39,0xD1,0xF8,0x1D,0x1D,0x30,0x44,0x78,0x1D,0x49, \ +0x00,0x2C,0x02,0xD0,0x4A,0x79,0xA2,0x42,0x30,0xD1,0x4A,0x79,0x1B,0x49,0x02, \ +0x30,0xFB,0xF7,0x7F,0xFC,0x00,0x28,0x29,0xD1,0x38,0x19,0x20,0x30,0xC0,0x79, \ +0x00,0x19,0x28,0x30,0x39,0x5C,0x03,0x29,0x21,0xD1,0x38,0x18,0x14,0x49,0x80, \ +0x78,0x09,0x7D,0x88,0x42,0x1B,0xD1,0x13,0x48,0x40,0x7A,0x00,0x28,0x05,0xD0, \ +0x12,0x48,0x08,0x18,0x40,0x7B,0x00,0x28,0x12,0xD0,0x09,0xE0,0x10,0x48,0x10, \ +0x4A,0xC0,0x7A,0x40,0x00,0x10,0x5A,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40, \ +0x07,0xD0,0x01,0x20,0xF8,0xF7,0x2C,0xF8,0x00,0x28,0x02,0xD0,0x02,0x20,0xFF, \ +0xF7,0xFF,0xF9,0x90,0xBD,0x48,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x04,0x01, \ +0x00,0x02,0xDC,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0xB0,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x88,0x09,0x00,0x02,0x64,0x02,0x00,0x02,0x80,0xB5,0xFD,0xF7, \ +0x4B,0xFD,0x18,0x48,0x00,0xF0,0x84,0xFD,0x17,0x4B,0x18,0x48,0x59,0x7A,0x01, \ +0x29,0x04,0xD1,0x48,0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x03,0xE0,0x90,0x21, \ +0x41,0x81,0x30,0x21,0x01,0x81,0x41,0x89,0x02,0x89,0x12,0x4F,0x89,0x18,0x10, \ +0x4A,0x11,0x80,0xC2,0x88,0x80,0x88,0x11,0x18,0x09,0x18,0x39,0x80,0x51,0x18, \ +0xFF,0x31,0x0E,0x4A,0x31,0x31,0x11,0x80,0x19,0x88,0x48,0x43,0x0C,0x49,0x08, \ +0x80,0xD9,0x79,0x0C,0x48,0x01,0x70,0x00,0x78,0x0B,0x49,0x08,0x70,0xF7,0xF7, \ +0x7E,0xFA,0xF8,0xF7,0x2C,0xFE,0x80,0xBD,0x00,0x00,0x5C,0x00,0x00,0x02,0x04, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xA8,0x01,0x00,0x02,0xA4,0x01,0x00,0x02, \ +0xA6,0x01,0x00,0x02,0xAA,0x01,0x00,0x02,0x91,0x01,0x00,0x02,0x92,0x01,0x00, \ +0x02,0x80,0xB4,0x21,0x48,0x00,0x21,0x01,0x70,0x00,0x20,0x19,0x27,0x1F,0x4A, \ +0xFF,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43,0x27,0x1C, \ +0x4A,0x7F,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x1A,0x48,0x18,0x4A, \ +0x01,0x80,0x1A,0x48,0x1A,0x4B,0x02,0x60,0x13,0x60,0x02,0x68,0xD7,0x1D,0x15, \ +0x37,0x57,0x60,0x02,0x68,0x08,0x3F,0x97,0x60,0x02,0x68,0x11,0x73,0x02,0x68, \ +0x91,0x73,0x07,0x68,0x03,0x22,0xBA,0x75,0x02,0x68,0x91,0x82,0x00,0x68,0x11, \ +0x4A,0x10,0x60,0x11,0x48,0x0C,0x4A,0x01,0x80,0x11,0x48,0x02,0x60,0x13,0x60, \ +0x02,0x68,0xD3,0x1D,0x11,0x33,0x53,0x60,0x02,0x68,0x91,0x81,0x02,0x68,0x11, \ +0x72,0x00,0x68,0x0C,0x49,0x08,0x60,0x0C,0x49,0x01,0x20,0x08,0x70,0x80,0xBC, \ +0xF7,0x46,0x93,0x01,0x00,0x02,0x00,0x11,0x00,0x02,0x00,0xDA,0x00,0x02,0xE8, \ +0x01,0x00,0x02,0xBC,0x01,0x00,0x02,0x00,0x00,0x00,0x80,0x60,0x02,0x00,0x02, \ +0xEA,0x01,0x00,0x02,0x48,0x01,0x00,0x02,0x5C,0x02,0x00,0x02,0xD7,0x01,0x00, \ +0x02,0xF0,0xB5,0x82,0xB0,0x39,0x4E,0xF7,0x1D,0x69,0x37,0xB8,0x78,0x04,0x23, \ +0x18,0x40,0x40,0x24,0x00,0x25,0x00,0x28,0x03,0xD1,0x7D,0x71,0x3C,0x71,0x02, \ +0xB0,0xF0,0xBD,0x33,0x49,0xA4,0x20,0x08,0x70,0x10,0x20,0x48,0x70,0x32,0x48, \ +0x03,0x23,0xC0,0x88,0x9B,0x03,0x18,0x43,0x48,0x80,0xC8,0x1D,0x03,0x30,0x06, \ +0x22,0x2E,0x49,0xFB,0xF7,0x9D,0xFB,0x2E,0x49,0x2E,0x48,0x06,0x22,0xFB,0xF7, \ +0x98,0xFB,0x10,0x20,0x2D,0x49,0xB0,0x66,0xC8,0x79,0x10,0x21,0xF8,0xF7,0x5B, \ +0xFD,0xF0,0x66,0x24,0x48,0x70,0x66,0x01,0x20,0x38,0x70,0xF8,0xF7,0x98,0xFA, \ +0xF8,0xF7,0xFE,0xFA,0x3D,0x70,0x82,0x25,0x00,0x28,0x2E,0xD1,0x23,0x49,0x24, \ +0x48,0xC9,0x79,0x24,0x4A,0xC0,0x88,0x49,0x00,0x51,0x5A,0x40,0x18,0x22,0x49, \ +0x09,0x88,0x41,0x18,0x01,0x20,0x38,0x71,0x04,0x20,0x70,0x60,0x01,0x20,0xF8, \ +0xF7,0x24,0xFE,0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22,0x11,0x21,0x01,0xAB, \ +0x1B,0x48,0xFA,0xF7,0x23,0xFF,0x01,0x98,0x41,0x08,0x01,0xD3,0x3C,0x71,0x1A, \ +0xE0,0x40,0x09,0x18,0xD3,0x78,0x79,0x17,0x49,0x01,0x30,0x00,0x06,0x00,0x0E, \ +0x78,0x71,0x09,0x7C,0x88,0x42,0x01,0xDA,0x3D,0x71,0x0D,0xE0,0x3C,0x71,0x0B, \ +0xE0,0x78,0x79,0x10,0x49,0x01,0x30,0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C, \ +0x88,0x42,0x01,0xDA,0x3D,0x71,0x00,0xE0,0x3C,0x71,0x97,0xE7,0x18,0x09,0x00, \ +0x02,0xD8,0x07,0x00,0x02,0x80,0x00,0x00,0x02,0x5C,0x00,0x00,0x02,0x8E,0x00, \ +0x00,0x02,0xDC,0x07,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xAC, \ +0x01,0x00,0x02,0xAA,0x01,0x00,0x02,0x2C,0x07,0x00,0x02,0xC0,0x00,0x00,0x02, \ +0x90,0xB5,0xC0,0x20,0xFA,0xF7,0x48,0xFF,0x07,0x1C,0x14,0x48,0x01,0x68,0x01, \ +0x31,0x01,0x60,0x13,0x48,0xFB,0xF7,0x68,0xFB,0x00,0x29,0x1B,0xD1,0x11,0x49, \ +0x07,0x22,0x0C,0x78,0x52,0x06,0x01,0x20,0x00,0x2C,0x0F,0x4B,0x07,0xD1,0x1C, \ +0x68,0x03,0x1C,0xA3,0x40,0x94,0x69,0x23,0x43,0x93,0x61,0x08,0x70,0x0B,0xE0, \ +0x0B,0x4C,0xA4,0x79,0x05,0x2C,0x07,0xD0,0x1C,0x68,0x93,0x69,0xA0,0x40,0xC0, \ +0x43,0x18,0x40,0x90,0x61,0x00,0x20,0x08,0x70,0x38,0x1C,0xFA,0xF7,0x1F,0xFF, \ +0x90,0xBD,0xD0,0x02,0x00,0x02,0x20,0x4E,0x00,0x00,0x3A,0x01,0x00,0x02,0x9C, \ +0x02,0x00,0x02,0x68,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0xFA,0xF7,0x10,0xFF, \ +0x07,0x1C,0x0F,0x48,0x81,0x7A,0x00,0x29,0x15,0xD1,0x01,0x7B,0x01,0x29,0x12, \ +0xD1,0xC1,0x7A,0x00,0x29,0x0F,0xD1,0x00,0x24,0x0A,0x49,0x50,0x30,0x0C,0x70, \ +0x44,0x70,0x00,0xF0,0x64,0xFB,0x08,0x48,0x01,0x21,0x84,0x61,0x07,0x20,0xFD, \ +0xF7,0x9C,0xFC,0x06,0x49,0x01,0x20,0x08,0x70,0x38,0x1C,0xFA,0xF7,0xF2,0xFE, \ +0x90,0xBD,0x00,0x00,0x18,0x09,0x00,0x02,0x3C,0x01,0x00,0x02,0x80,0x00,0x00, \ +0x04,0xA9,0x02,0x00,0x02,0x90,0xB5,0x14,0x4C,0x14,0x4F,0x61,0x79,0x03,0x29, \ +0x0C,0xD1,0x01,0x23,0x1B,0x03,0x98,0x42,0x08,0xD1,0x19,0x21,0xC9,0x02,0x02, \ +0x20,0xF8,0xF7,0x6F,0xFD,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0xB8,0x78, \ +0x01,0x28,0x0D,0xD1,0x01,0x21,0x00,0xF0,0xCF,0xFA,0x60,0x79,0x02,0x28,0x08, \ +0xD1,0xB8,0x78,0x08,0x23,0x18,0x43,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00, \ +0xB8,0x70,0x90,0xBD,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70,0x90,0xBD,0xB0, \ +0x00,0x00,0x02,0x88,0x09,0x00,0x02,0x80,0xB5,0x19,0x48,0x81,0x7A,0x00,0x29, \ +0x1D,0xD1,0x01,0x7B,0x01,0x29,0x1A,0xD1,0xC0,0x7A,0x00,0x28,0x17,0xD1,0x15, \ +0x4F,0xF8,0x1D,0x29,0x30,0x40,0x79,0x03,0x28,0x15,0xD1,0xF8,0xF7,0x19,0xFD, \ +0x39,0x88,0x11,0x4F,0x89,0x02,0x08,0x1A,0x7D,0x23,0xDB,0x00,0x98,0x42,0x08, \ +0xD9,0xC1,0x1A,0x06,0x20,0xF8,0xF7,0x31,0xFD,0x00,0xF0,0xFF,0xFA,0x01,0x20, \ +0xB8,0x70,0x80,0xBD,0x00,0x20,0xB8,0x70,0x80,0xBD,0x01,0x20,0x80,0x06,0x08, \ +0x49,0x40,0x6A,0x7D,0x23,0xDB,0x00,0x49,0x68,0xC0,0x18,0x88,0x42,0xF1,0xD2, \ +0x00,0xF0,0xED,0xFA,0x80,0xBD,0x18,0x09,0x00,0x02,0x80,0x00,0x00,0x02,0x88, \ +0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5,0xC0,0x20,0xFA,0xF7,0x7A,0xFE, \ +0x05,0x1C,0x29,0x48,0x00,0x27,0x07,0x70,0x07,0x20,0x40,0x06,0xC1,0x69,0x10, \ +0x23,0x99,0x43,0xC1,0x61,0x25,0x48,0x26,0x4A,0xC1,0x69,0x03,0x0C,0x19,0x43, \ +0xC1,0x61,0x81,0x21,0xD1,0x60,0x05,0x21,0x09,0x06,0xCF,0x62,0x0F,0x63,0xC1, \ +0x69,0x7B,0x1F,0x19,0x40,0xC1,0x61,0x1F,0x49,0xC2,0x69,0x09,0x68,0xC9,0x43, \ +0x11,0x40,0x0F,0x22,0x12,0x06,0xC1,0x61,0x11,0x89,0x1C,0x49,0xCB,0x69,0x03, \ +0x05,0x00,0x68,0xDC,0x68,0x10,0x88,0xDB,0x6B,0x04,0x26,0x19,0x48,0x01,0x24, \ +0x07,0x70,0x18,0x48,0x8F,0x61,0x06,0x70,0x18,0x48,0x04,0x70,0x18,0x48,0x07, \ +0x70,0x18,0x48,0x07,0x70,0x18,0x48,0xC7,0x74,0x18,0x48,0x07,0x70,0xF9,0xF7, \ +0xEF,0xFF,0x15,0x48,0x16,0x49,0x06,0x75,0x50,0x30,0x84,0x71,0x47,0x70,0x02, \ +0x20,0x08,0x70,0x14,0x49,0x0C,0x70,0x14,0x49,0x4C,0x71,0x14,0x49,0x8F,0x70, \ +0x14,0x49,0x08,0x70,0xF7,0xF7,0x3F,0xF8,0x28,0x1C,0xFA,0xF7,0x28,0xFE,0xF0, \ +0xBD,0x00,0x00,0xD6,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x01,0x00,0x05, \ +0xAC,0x02,0x00,0x02,0x80,0x00,0x00,0x04,0x3C,0x01,0x00,0x02,0x4B,0x02,0x00, \ +0x02,0x56,0x02,0x00,0x02,0x36,0x01,0x00,0x02,0x37,0x01,0x00,0x02,0x18,0x09, \ +0x00,0x02,0x49,0x02,0x00,0x02,0xA8,0x02,0x00,0x02,0xA9,0x02,0x00,0x02,0xB0, \ +0x00,0x00,0x02,0x88,0x09,0x00,0x02,0x3B,0x01,0x00,0x02,0x90,0xB5,0x22,0x49, \ +0x00,0x27,0xC8,0x1D,0x49,0x30,0x82,0x79,0x01,0x2A,0x00,0xD0,0x47,0x71,0xCA, \ +0x1D,0x69,0x32,0x93,0x79,0x1D,0x49,0x00,0x2B,0x03,0xD0,0x97,0x71,0x01,0x20, \ +0x88,0x73,0x90,0xBD,0x52,0x78,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0xA0,0xFB,0x90, \ +0xBD,0x02,0x78,0x00,0x2A,0x03,0xD0,0x47,0x71,0xFD,0xF7,0x4F,0xFA,0x90,0xBD, \ +0x42,0x79,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0x7F,0xFA,0x90,0xBD,0x82,0x78,0x00, \ +0x2A,0x02,0xD0,0xFD,0xF7,0x63,0xFA,0x90,0xBD,0xC9,0x7B,0x00,0x29,0x02,0xD0, \ +0xFD,0xF7,0x69,0xFA,0x90,0xBD,0x80,0x79,0x05,0x28,0x0D,0xD1,0x0A,0x4C,0x20, \ +0x68,0x01,0x7B,0xC9,0x09,0x02,0xD3,0xF9,0xF7,0x96,0xF8,0x90,0xBD,0x01,0x7B, \ +0x10,0x29,0x02,0xD1,0xF9,0xF7,0xA8,0xF9,0x20,0x60,0x38,0x1C,0x90,0xBD,0x00, \ +0x00,0x18,0x09,0x00,0x02,0x28,0x09,0x00,0x02,0xBC,0x01,0x00,0x02,0xF0,0xB5, \ +0x14,0x1C,0x0D,0x1C,0x07,0x1C,0x01,0x28,0x01,0xD3,0x0E,0x2F,0x01,0xD9,0x00, \ +0x20,0xF0,0xBD,0x29,0x4E,0xF0,0x69,0x33,0x0C,0x18,0x43,0xF0,0x61,0x28,0x48, \ +0xF1,0x69,0x00,0x68,0x08,0x43,0xF0,0x61,0x26,0x48,0xF1,0x69,0x00,0x68,0x08, \ +0x43,0xF0,0x61,0xF0,0x69,0x04,0x23,0x18,0x43,0xF0,0x61,0x23,0x48,0xF6,0xF7, \ +0xA4,0xFF,0xF0,0x69,0x40,0x23,0x18,0x43,0xF0,0x61,0x64,0x20,0xF6,0xF7,0x9D, \ +0xFF,0x01,0x2D,0x0F,0xD1,0x00,0xF0,0x45,0xFA,0x1D,0x48,0xC1,0x19,0xC8,0x1F, \ +0x09,0x38,0xC2,0x7B,0xFF,0x2A,0x06,0xD0,0x1A,0x48,0xC3,0x1D,0x19,0x33,0xDA, \ +0x71,0x40,0x31,0x89,0x78,0xC1,0x73,0x00,0xF0,0x87,0xF8,0x07,0x20,0x40,0x06, \ +0x0E,0x2F,0x0F,0xD1,0x00,0x2C,0x0D,0xD0,0x14,0x4B,0x14,0x4A,0x00,0x21,0x51, \ +0x63,0x5C,0x5C,0x01,0x31,0x18,0x29,0x94,0x63,0xF9,0xD3,0x81,0x69,0x49,0x08, \ +0x49,0x00,0x81,0x61,0x03,0xE0,0x81,0x69,0x01,0x23,0x19,0x43,0x81,0x61,0x38, \ +0x1C,0x00,0xF0,0x18,0xF8,0xF0,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xF0,0x61, \ +0x01,0x20,0xF0,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0xAC,0x02,0x00,0x02,0xB0, \ +0x02,0x00,0x02,0xDC,0x05,0x00,0x00,0x2C,0x0A,0x00,0x02,0x78,0x0B,0x00,0x02, \ +0x10,0x9A,0x00,0x00,0x80,0x00,0x00,0x05,0x90,0xB5,0x16,0x4C,0x07,0x1C,0x02, \ +0x20,0x61,0x68,0x00,0xF0,0x31,0xF8,0x00,0x20,0x21,0x68,0x00,0xF0,0x2D,0xF8, \ +0x12,0x48,0xBF,0x00,0x38,0x18,0x40,0x38,0xC1,0x6B,0x01,0x20,0x00,0xF0,0x25, \ +0xF8,0x05,0x20,0xA1,0x68,0x00,0xF0,0x21,0xF8,0x0D,0x48,0x01,0x68,0x08,0x20, \ +0x00,0xF0,0x1C,0xF8,0x0B,0x48,0x01,0x68,0x07,0x20,0x00,0xF0,0x17,0xF8,0x0A, \ +0x48,0x38,0x18,0x40,0x38,0xC1,0x6B,0x04,0x20,0x00,0xF0,0x10,0xF8,0xFF,0x20, \ +0xF5,0x30,0xF6,0xF7,0x2A,0xFF,0x90,0xBD,0x00,0x00,0xD4,0x02,0x00,0x02,0xE0, \ +0x02,0x00,0x02,0xB4,0x02,0x00,0x02,0xB8,0x02,0x00,0x02,0x18,0x03,0x00,0x02, \ +0x90,0xB4,0x0B,0x4A,0x13,0x68,0xDF,0x43,0x0A,0x4B,0xDC,0x69,0x27,0x40,0xDF, \ +0x61,0x07,0x05,0x89,0x00,0x39,0x43,0x80,0x08,0x08,0x43,0x18,0x62,0x18,0x1C, \ +0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x11,0x68,0xC2,0x69,0x11,0x43,0xC1,0x61,0x90, \ +0xBC,0xF7,0x46,0xB0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0xB4,0x31,0x48, \ +0x31,0x4B,0xC0,0x7C,0x00,0x28,0x01,0xD1,0x6B,0x20,0xD8,0x60,0x2F,0x48,0x05, \ +0x21,0x02,0x78,0x09,0x06,0x0A,0x60,0x42,0x78,0x4A,0x60,0x82,0x78,0x8A,0x60, \ +0xC2,0x78,0xCA,0x60,0x02,0x79,0x0A,0x61,0xC2,0x79,0xCA,0x61,0x42,0x7A,0x4A, \ +0x62,0x82,0x7A,0x8A,0x62,0xC2,0x7A,0xCA,0x62,0x02,0x7B,0x0A,0x63,0x42,0x7B, \ +0x4A,0x63,0x82,0x7B,0x8A,0x63,0xC2,0x7B,0xCA,0x63,0x02,0x7C,0x21,0x49,0x0A, \ +0x60,0x42,0x7C,0x4A,0x60,0x82,0x7C,0x8A,0x60,0xC2,0x7C,0xCA,0x60,0x02,0x7D, \ +0x0A,0x61,0x42,0x7D,0x4A,0x61,0x82,0x7D,0x8A,0x61,0xC2,0x7D,0xCA,0x61,0xC2, \ +0x1D,0x19,0x32,0x17,0x78,0x19,0x49,0x30,0x30,0x0F,0x60,0x57,0x78,0x4F,0x60, \ +0x97,0x78,0x8F,0x60,0xD7,0x78,0xCF,0x60,0x17,0x79,0x0F,0x61,0x57,0x79,0x4F, \ +0x61,0x97,0x79,0x8F,0x61,0xD7,0x79,0xCF,0x61,0x57,0x7A,0x4F,0x62,0x97,0x7A, \ +0x8F,0x62,0xD7,0x7A,0xCF,0x62,0x17,0x7B,0x0F,0x63,0xD2,0x7B,0xCA,0x63,0x40, \ +0x7B,0x0C,0x4A,0x50,0x63,0x06,0x20,0x18,0x60,0x0B,0x4A,0x00,0x20,0x48,0x63, \ +0x13,0x5C,0x01,0x30,0x18,0x28,0x8B,0x63,0xF9,0xD3,0x80,0xBC,0xF7,0x46,0x00, \ +0x00,0x18,0x09,0x00,0x02,0x00,0x01,0x00,0x05,0x78,0x0B,0x00,0x02,0x40,0x00, \ +0x00,0x05,0x80,0x00,0x00,0x05,0xC0,0x00,0x00,0x05,0xF8,0x99,0x00,0x00,0xF0, \ +0xB5,0x35,0x4D,0x07,0x1C,0xE8,0x7A,0x03,0x28,0xFC,0xD0,0xC0,0x20,0xFA,0xF7, \ +0x7B,0xFC,0x04,0x1C,0x01,0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B,0x2F,0x48,0x00, \ +0x68,0x0F,0x20,0x00,0x06,0x01,0x88,0x00,0x89,0x2D,0x48,0xC0,0x69,0x2D,0x48, \ +0xC2,0x19,0xD0,0x1F,0x09,0x38,0xC3,0x7B,0x2C,0x48,0xC1,0x1D,0x19,0x31,0xFF, \ +0x2B,0x03,0xD0,0xCB,0x71,0x40,0x32,0x92,0x78,0xC2,0x73,0xCA,0x79,0x28,0x49, \ +0xCA,0x61,0xC0,0x7B,0x05,0x22,0x12,0x06,0xD0,0x63,0x07,0x22,0x52,0x06,0x0E, \ +0x2F,0x0C,0xD1,0x24,0x4B,0x00,0x20,0x48,0x63,0x1E,0x5C,0x01,0x30,0x18,0x28, \ +0x8E,0x63,0xF9,0xD3,0x90,0x69,0x40,0x08,0x40,0x00,0x90,0x61,0x0B,0xE0,0x1E, \ +0x4B,0x00,0x20,0x48,0x63,0x1E,0x5C,0x01,0x30,0x18,0x28,0x8E,0x63,0xF9,0xD3, \ +0x90,0x69,0x01,0x23,0x18,0x43,0x90,0x61,0x19,0x48,0x07,0x75,0x00,0x7D,0xFF, \ +0xF7,0xED,0xFE,0x01,0x20,0xFD,0xF7,0xF4,0xF9,0x06,0x20,0x68,0x72,0x08,0x20, \ +0x15,0x49,0xF8,0xF7,0xC4,0xFA,0x20,0x1C,0xFA,0xF7,0x2B,0xFC,0x68,0x7C,0x01, \ +0x28,0x05,0xD1,0x00,0x22,0x10,0x21,0x10,0x48,0xFA,0xF7,0xC7,0xFA,0xF0,0xBD, \ +0x68,0x7C,0x02,0x28,0xFB,0xD1,0x00,0x22,0x10,0x21,0x0D,0x48,0xFA,0xF7,0xBE, \ +0xFA,0xF0,0xBD,0x00,0x00,0x18,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0x00, \ +0x00,0x04,0x2C,0x0A,0x00,0x02,0x78,0x0B,0x00,0x02,0x80,0x00,0x00,0x05,0x10, \ +0x9A,0x00,0x00,0xF8,0x99,0x00,0x00,0x00,0x00,0x00,0x02,0x88,0x13,0x00,0x00, \ +0xCC,0x06,0x00,0x02,0xEC,0x06,0x00,0x02,0xF0,0xB5,0x0F,0x1C,0x07,0x21,0x49, \ +0x06,0xCA,0x69,0x52,0x09,0x03,0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61, \ +0x27,0x4D,0x01,0x28,0x0C,0xD1,0x26,0x4C,0x66,0x68,0xF6,0xF7,0x0E,0xFE,0x31, \ +0x1A,0x49,0x01,0x09,0x18,0x61,0x60,0x69,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18, \ +0x68,0x63,0x21,0x4C,0x01,0x2F,0x21,0xD1,0xE8,0x69,0xBB,0x02,0x18,0x43,0xE8, \ +0x61,0xE8,0x69,0x04,0x23,0x18,0x43,0xE8,0x61,0x1C,0x49,0x6B,0x20,0xC8,0x60, \ +0x1C,0x49,0x05,0x20,0xCA,0x7A,0x00,0x06,0xC2,0x62,0x09,0x7B,0x01,0x63,0xF6, \ +0xF7,0x02,0xFE,0xE8,0x69,0xFF,0x23,0x01,0x33,0x18,0x43,0xE8,0x61,0xE8,0x69, \ +0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61,0x32,0x20,0xF6,0xF7,0xCD,0xFD,0x08, \ +0xE0,0x11,0x48,0x01,0x22,0x00,0x21,0x00,0x7D,0xFF,0xF7,0x02,0xFE,0xE1,0x1D, \ +0x69,0x31,0x08,0x73,0x01,0x20,0x80,0x06,0xC0,0x68,0x01,0x20,0xFD,0xF7,0x71, \ +0xF9,0x01,0x20,0x20,0x72,0x06,0x20,0x4B,0x21,0xC9,0x00,0x60,0x72,0x08,0x20, \ +0xF8,0xF7,0x3E,0xFA,0xF0,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x00,0x00, \ +0x04,0x18,0x09,0x00,0x02,0x00,0x01,0x00,0x05,0x78,0x0B,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xB0,0xB5,0x23,0x48,0x23,0x4F,0x00,0x68,0xF9,0x69,0xC0,0x43,0x08, \ +0x40,0xF8,0x61,0x0A,0x20,0xF6,0xF7,0x9B,0xFD,0xF8,0x69,0x01,0x23,0x9B,0x02, \ +0x18,0x43,0xF8,0x61,0x11,0x21,0x05,0x20,0x00,0x06,0xC1,0x60,0x1B,0x4A,0x81, \ +0x21,0xD1,0x60,0x00,0x21,0xC1,0x62,0x01,0x63,0xF8,0x69,0x4B,0x1F,0x18,0x40, \ +0xF8,0x61,0x01,0x20,0xF8,0xF7,0x29,0xFA,0x08,0x20,0xF8,0xF7,0x26,0xFA,0x01, \ +0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B,0x12,0x4C,0x38,0x68,0x0F,0x20,0x00,0x06, \ +0x00,0x88,0x01,0x25,0x65,0x72,0xF8,0xF7,0xD1,0xF9,0xE0,0x1D,0x69,0x30,0x85, \ +0x70,0x0D,0x4D,0x6C,0x68,0xF6,0xF7,0x82,0xFD,0x21,0x1A,0x49,0x09,0x09,0x18, \ +0x69,0x60,0x79,0x6B,0x09,0x1A,0x49,0x09,0x08,0x18,0x78,0x63,0x07,0x20,0x40, \ +0x06,0xC1,0x69,0x10,0x23,0x19,0x43,0xC1,0x61,0xB0,0xBD,0xAC,0x02,0x00,0x02, \ +0x40,0x00,0x00,0x04,0x00,0x01,0x00,0x05,0x18,0x09,0x00,0x02,0x80,0x00,0x00, \ +0x04,0x80,0xB4,0x28,0x48,0x08,0x21,0x01,0x70,0x2B,0x21,0x41,0x70,0x07,0x21, \ +0x81,0x70,0x19,0x21,0xC1,0x70,0x00,0x21,0x01,0x71,0x09,0x22,0x23,0x4B,0xC2, \ +0x71,0x1A,0x7F,0x42,0x72,0x5F,0x7F,0x87,0x72,0x13,0x27,0xC7,0x72,0x0C,0x27, \ +0x07,0x73,0x05,0x27,0x47,0x73,0xFF,0x27,0x87,0x73,0x9F,0x7F,0xC7,0x73,0x0A, \ +0x27,0x07,0x74,0x14,0x27,0x47,0x74,0x37,0x27,0x87,0x74,0x6E,0x27,0xC7,0x74, \ +0x03,0x27,0x07,0x75,0x41,0x75,0x81,0x75,0xC1,0x75,0xDA,0x7F,0xC1,0x1D,0x19, \ +0x31,0x0A,0x70,0xAA,0x22,0x4A,0x70,0xF8,0x22,0x8A,0x70,0x1A,0x22,0xCA,0x70, \ +0x1E,0x22,0x0A,0x71,0x24,0x22,0x4A,0x71,0x41,0x22,0x8A,0x71,0xDA,0x1D,0x19, \ +0x32,0x13,0x78,0xCB,0x71,0x53,0x78,0x4B,0x72,0x93,0x78,0x8B,0x72,0xD3,0x78, \ +0xCB,0x72,0x13,0x79,0x0B,0x73,0x52,0x79,0xCA,0x73,0xC2,0x1D,0x2F,0x21,0x29, \ +0x32,0x51,0x73,0x00,0x7D,0x04,0x49,0x08,0x70,0x80,0xBC,0xF7,0x46,0x00,0x00, \ +0x78,0x0B,0x00,0x02,0x2C,0x0A,0x00,0x02,0x91,0x01,0x00,0x02,0x02,0x79,0x41, \ +0x79,0x12,0x02,0x11,0x43,0xC2,0x78,0x12,0x04,0x11,0x43,0x82,0x78,0x12,0x06, \ +0x0A,0x43,0x01,0x21,0x89,0x06,0x8A,0x61,0x42,0x78,0x00,0x78,0x00,0x02,0x10, \ +0x43,0xC8,0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49,0x0D,0x48,0x41,0x61,0x20,0x21, \ +0x81,0x61,0x00,0x22,0x01,0x05,0x0A,0x61,0xC2,0x01,0x42,0x60,0x05,0x22,0xC2, \ +0x60,0x08,0x4A,0x82,0x62,0xF2,0x22,0x82,0x60,0x32,0x22,0x4A,0x61,0xCA,0x68, \ +0xC9,0x6B,0x00,0x68,0x00,0x21,0x00,0x20,0x00,0xF0,0x07,0xF8,0x00,0xBD,0x04, \ +0x84,0x00,0x00,0x40,0x00,0x00,0x04,0x81,0xFF,0x00,0x00,0x02,0x1C,0x01,0x20, \ +0x80,0x06,0x82,0x62,0x41,0x62,0xF7,0x46,0x80,0xB5,0x1E,0x48,0x20,0x23,0x81, \ +0x69,0x1E,0x4F,0x99,0x43,0x81,0x61,0x1C,0x48,0x81,0x78,0x1D,0x48,0x00,0x29, \ +0x0F,0xD0,0x01,0x7D,0x04,0x29,0x0C,0xD0,0x01,0x21,0xC1,0x77,0x03,0x21,0x41, \ +0x77,0xF8,0xF7,0x13,0xF9,0x39,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xF8,0xF7, \ +0x31,0xF9,0x80,0xBD,0xF9,0x1D,0x29,0x31,0x0A,0x79,0x02,0x2A,0xF9,0xD1,0xC2, \ +0x1D,0x49,0x32,0x92,0x79,0x05,0x2A,0xF4,0xD1,0x49,0x79,0x01,0x29,0xF1,0xDD, \ +0xC7,0x1D,0x69,0x37,0xB8,0x78,0x01,0x28,0x05,0xD1,0x01,0x21,0x00,0x20,0xFF, \ +0xF7,0x84,0xFE,0x00,0x20,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70, \ +0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x80,0xBD,0x00,0x00,0x80,0x00,0x00, \ +0x04,0x04,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x09,0x00,0x02,0x80,0xB5, \ +0xF8,0xF7,0xC3,0xFB,0x06,0x48,0x01,0x21,0x41,0x60,0x00,0x27,0x47,0x77,0x01, \ +0x20,0xF6,0xF7,0x89,0xFC,0x03,0x48,0x07,0x83,0x87,0x82,0x80,0xBD,0x00,0x00, \ +0x18,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x01,0x01,0xFF,0xFF,0x02,0x02,0xFC,0xFC, \ +0x15,0x47,0x70,0x70,0x47,0x15,0xFC,0xFC,0x02,0x02,0xFF,0xFF,0x01,0x01,0x00, \ +0xFF,0xFD,0xFD,0x02,0x08,0x04,0xF5,0xEE,0x04,0x33,0x5C,0x5C,0x33,0x04,0xEE, \ +0xF5,0x04,0x08,0x02,0xFD,0xFD,0xFF,0x00} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-rfmd-0.100.4-16.h linux.22-ac2/drivers/usb/atmel/fw-rfmd-0.100.4-16.h --- linux.vanilla/drivers/usb/atmel/fw-rfmd-0.100.4-16.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-rfmd-0.100.4-16.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,2449 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76C503 with RFMD radio * + * Version: 0.100.4 #16 * + ****************************************************************************/ + +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_503RFMD_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0x46,0x13, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x65,0x0E,0x00,0xEA,0x02, \ +0xF0,0x92,0xF8,0x22,0x48,0x87,0x46,0x7E,0x0E,0x00,0xEA,0x01,0xF0,0x9E,0xFE, \ +0x20,0x48,0x87,0x46,0xB5,0x01,0x00,0x00,0xB4,0x03,0x00,0x02,0x00,0x01,0x00, \ +0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x38,0x04,0x00,0x02,0x58,0x04, \ +0x00,0x02,0x5C,0x04,0x00,0x02,0x60,0x04,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0xE0,0x59,0x00,0x01,0xA5,0x3B,0x00,0x00,0x95,0x3C,0x00, \ +0x00,0x00,0xB5,0x03,0xF0,0x4F,0xFC,0x00,0x20,0x00,0xBD,0x80,0xB5,0x86,0xB0, \ +0x07,0x1C,0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03, \ +0x90,0x01,0x91,0x05,0x92,0x02,0x92,0x17,0x4A,0x19,0xA1,0x17,0x48,0x01,0x23, \ +0x00,0x97,0x03,0xF0,0x25,0xFE,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01, \ +0x22,0x05,0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93, \ +0x03,0x90,0x02,0x92,0x01,0x91,0x13,0xA1,0x11,0x4A,0x11,0x48,0x02,0x23,0x03, \ +0xF0,0x10,0xFE,0x13,0x48,0x14,0xA1,0x03,0xF0,0x82,0xFE,0x16,0x48,0x17,0xA1, \ +0x03,0xF0,0x7E,0xFE,0x1A,0x48,0x1B,0xA1,0x03,0xF0,0x7A,0xFE,0x1E,0x48,0x1F, \ +0xA1,0x03,0xF0,0x76,0xFE,0x03,0xF0,0x5A,0xFA,0x06,0xB0,0x80,0xBD,0x05,0x13, \ +0x00,0x00,0xB8,0x05,0x00,0x02,0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x00,0x00,0x91,0x25,0x00,0x00,0x48,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20, \ +0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00,0xD8,0x06,0x00,0x02,0x54,0x78,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xF8,0x06, \ +0x00,0x02,0x4D,0x67,0x6D,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C, \ +0x61,0x67,0x73,0x00,0x00,0x00,0x00,0x18,0x07,0x00,0x02,0x54,0x58,0x20,0x47, \ +0x4F,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00, \ +0x00,0x38,0x07,0x00,0x02,0x50,0x73,0x50,0x6F,0x6C,0x6C,0x20,0x73,0x74,0x61, \ +0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xC3,0x00,0x18,0x18,0x80, \ +0x00,0x80,0x08,0x01,0xD0,0x01,0x38,0xFD,0xD1,0xF7,0x46,0x03,0x49,0x0F,0x20, \ +0x00,0x06,0x81,0x80,0x02,0x49,0x81,0x81,0xF7,0x46,0x00,0x00,0xE8,0xE8,0x00, \ +0x00,0x13,0x13,0x00,0x00,0x01,0x20,0x80,0x06,0x40,0x6A,0xF7,0x46,0x01,0x1C, \ +0x06,0x48,0x04,0xD0,0x41,0x68,0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46,0x41, \ +0x68,0x01,0x23,0x5B,0x03,0x99,0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00,0x04, \ +0x80,0xB5,0x13,0x49,0x15,0x4F,0x08,0x78,0x42,0x01,0x12,0x48,0x42,0x70,0x12, \ +0x4A,0x52,0x7A,0x00,0x2A,0x0B,0xD0,0x09,0x78,0x00,0x29,0x08,0xDD,0x41,0x78, \ +0x10,0x23,0x19,0x43,0x41,0x70,0x48,0x21,0x79,0x81,0x18,0x21,0x39,0x81,0x03, \ +0xE0,0x90,0x21,0x79,0x81,0x30,0x21,0x39,0x81,0x41,0x78,0x01,0x20,0x00,0xF0, \ +0x5B,0xF8,0x78,0x89,0x39,0x89,0x40,0x18,0x06,0x49,0x08,0x80,0x01,0xF0,0xA8, \ +0xFC,0x80,0xBD,0x00,0x00,0x93,0x01,0x00,0x02,0xCC,0x07,0x00,0x02,0x04,0x01, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xAC,0x01,0x00,0x02,0x01,0x1C,0x06,0x48,0x04, \ +0xD0,0x41,0x7C,0x01,0x23,0x19,0x43,0x41,0x74,0xF7,0x46,0x41,0x7C,0xFE,0x23, \ +0x19,0x40,0x41,0x74,0xF7,0x46,0x00,0x00,0xCC,0x07,0x00,0x02,0xF0,0xB4,0x07, \ +0x24,0x64,0x06,0xA2,0x69,0x04,0x23,0x9A,0x43,0xA2,0x61,0xF3,0x22,0x12,0x05, \ +0x93,0x68,0x40,0x23,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69,0x5B, \ +0x08,0xFC,0xD3,0x93,0x68,0x80,0x23,0x03,0x43,0xD3,0x60,0x17,0x69,0xBB,0x08, \ +0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x17,0x1C,0x92,0x68,0x00,0x22,0x00, \ +0x29,0x0D,0xD9,0x0A,0x4D,0x83,0x18,0xEB,0x5C,0xFB,0x60,0x3E,0x69,0xB3,0x08, \ +0xFC,0xD3,0x3B,0x69,0x5B,0x08,0xFC,0xD3,0x01,0x32,0x8A,0x42,0xBB,0x68,0xF2, \ +0xD3,0xA0,0x69,0x04,0x23,0x18,0x43,0xA0,0x61,0xF0,0xBC,0xF7,0x46,0x00,0x00, \ +0xCC,0x07,0x00,0x02,0x90,0xB4,0x07,0x27,0x7F,0x06,0xBA,0x69,0x04,0x23,0x9A, \ +0x43,0xBA,0x61,0xF3,0x22,0x12,0x05,0x93,0x68,0x40,0x23,0xD3,0x60,0x14,0x69, \ +0xA3,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x93,0x68,0xD0,0x60,0x10, \ +0x69,0x80,0x08,0xFC,0xD3,0x10,0x1C,0x02,0x69,0x52,0x08,0xFC,0xD3,0x82,0x68, \ +0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x01,0x69,0x49,0x08,0xFC,0xD3,0x80, \ +0x68,0x04,0x23,0xB8,0x69,0x18,0x43,0xB8,0x61,0x90,0xBC,0xF7,0x46,0x80,0xB4, \ +0x07,0x22,0x52,0x06,0x91,0x69,0x04,0x23,0x99,0x43,0x91,0x61,0xF3,0x21,0x09, \ +0x05,0x8B,0x68,0x40,0x23,0xCB,0x60,0x0F,0x69,0xBB,0x08,0xFC,0xD3,0x0B,0x69, \ +0x5B,0x08,0xFC,0xD3,0x8B,0x68,0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3,0x08, \ +0x69,0x40,0x08,0xFC,0xD3,0x97,0x69,0x04,0x23,0x3B,0x43,0x88,0x68,0x93,0x61, \ +0x97,0x69,0x04,0x23,0x9F,0x43,0x97,0x61,0x41,0x20,0xC8,0x60,0x08,0x69,0x80, \ +0x08,0xFC,0xD3,0x08,0x1C,0x01,0x69,0x49,0x08,0xFC,0xD3,0x81,0x68,0xFF,0x21, \ +0xC1,0x60,0x01,0x69,0x49,0x08,0xFC,0xD3,0x91,0x69,0x04,0x23,0x19,0x43,0x80, \ +0x68,0x91,0x61,0x80,0xBC,0xF7,0x46,0x80,0xB5,0x0B,0x4F,0xF8,0x69,0xBB,0x01, \ +0x18,0x43,0xF8,0x61,0x00,0x20,0xFF,0xF7,0x0A,0xFF,0x08,0x48,0xF9,0x69,0x00, \ +0x68,0xC0,0x43,0x08,0x40,0xF8,0x61,0x07,0x20,0x40,0x06,0x81,0x69,0x01,0x23, \ +0x19,0x43,0x81,0x61,0x03,0x49,0x01,0x20,0x08,0x70,0x80,0xBD,0x40,0x00,0x00, \ +0x04,0xB4,0x02,0x00,0x02,0xB3,0x02,0x00,0x02,0xC1,0x0A,0x01,0xD3,0x00,0x20, \ +0xF7,0x46,0xFF,0x22,0x01,0x32,0x02,0x40,0x01,0x21,0x00,0x2A,0x01,0xD0,0x08, \ +0x1C,0xF7,0x46,0x80,0x0A,0x01,0xD3,0x08,0x1C,0xF7,0x46,0x02,0x20,0xF7,0x46, \ +0xF0,0xB5,0x0F,0x1C,0x19,0x49,0x04,0x1C,0x19,0x4E,0x1A,0x48,0x31,0x60,0x05, \ +0x6C,0x00,0x2D,0x16,0xD0,0x06,0x22,0x20,0x1C,0x31,0x68,0x04,0xF0,0x37,0xFA, \ +0x00,0x28,0x08,0xD1,0x30,0x68,0xC1,0x88,0xB9,0x42,0x01,0xD1,0x01,0x20,0xF0, \ +0xBD,0xC7,0x80,0x00,0x20,0xF0,0xBD,0x30,0x68,0x08,0x30,0x30,0x60,0x28,0x1C, \ +0x01,0x3D,0x00,0x28,0xE8,0xD1,0x0C,0x48,0x01,0x6C,0x01,0x31,0x01,0x64,0x01, \ +0x6C,0x07,0x29,0x03,0xD9,0x06,0x49,0x31,0x60,0x08,0x21,0x01,0x64,0x06,0x22, \ +0x21,0x1C,0x30,0x68,0x04,0xF0,0x33,0xFA,0x30,0x68,0xC7,0x80,0x00,0x20,0xF0, \ +0xBD,0x00,0x00,0x74,0x07,0x00,0x02,0x40,0x01,0x00,0x02,0x44,0x09,0x00,0x02, \ +0x05,0x49,0x0A,0x68,0x12,0x01,0x02,0x70,0x0A,0x68,0x12,0x01,0x12,0x0A,0x42, \ +0x70,0x08,0x68,0x01,0x30,0x08,0x60,0xF7,0x46,0x44,0x01,0x00,0x02,0x00,0x2A, \ +0x0C,0xD1,0x08,0x4A,0x92,0x7A,0x8A,0x42,0x00,0xD8,0x11,0x1C,0x07,0x4A,0x49, \ +0x00,0x51,0x5A,0x06,0x4A,0xD2,0x88,0x89,0x18,0xC9,0x18,0x00,0xE0,0x00,0x21, \ +0x01,0x70,0x09,0x0A,0x41,0x70,0xF7,0x46,0x04,0x01,0x00,0x02,0xB0,0x01,0x00, \ +0x02,0x00,0x00,0x00,0x02,0xF0,0xB5,0x49,0x48,0x00,0x68,0xFF,0xF7,0x85,0xFF, \ +0x04,0x06,0x47,0x48,0x24,0x0E,0x00,0x68,0x47,0x68,0x38,0x78,0x06,0x07,0x36, \ +0x0F,0x02,0x2C,0x73,0xD0,0x44,0x4D,0x28,0x79,0x02,0x28,0x07,0xD1,0xF8,0x1D, \ +0x09,0x30,0x06,0x22,0x41,0x49,0x04,0xF0,0xC9,0xF9,0x00,0x28,0x67,0xD0,0x30, \ +0x06,0x00,0x0E,0x08,0x28,0x64,0xD1,0x3E,0x48,0x80,0x79,0x05,0x28,0x61,0xD1, \ +0x28,0x79,0x3C,0x4E,0x02,0x28,0x0C,0xD1,0xF8,0x1D,0x03,0x30,0x06,0x22,0x31, \ +0x1C,0x04,0xF0,0xB5,0xF9,0x00,0x28,0x55,0xD1,0x78,0x78,0x81,0x08,0x5A,0xD3, \ +0x40,0x08,0x58,0xD2,0x28,0x79,0x01,0x28,0x0C,0xD1,0xF8,0x1D,0x09,0x30,0x06, \ +0x22,0x31,0x1C,0x04,0xF0,0xA5,0xF9,0x00,0x28,0x45,0xD1,0x78,0x78,0x81,0x08, \ +0x4A,0xD2,0x40,0x08,0x48,0xD2,0x38,0x78,0x08,0x28,0x30,0xD1,0x2C,0x48,0x01, \ +0x78,0x00,0x29,0x11,0xD0,0xC0,0x78,0x00,0x28,0x11,0xD0,0x78,0x78,0xC0,0x09, \ +0x0E,0xD2,0xB9,0x7F,0xF8,0x1D,0x09,0x30,0x88,0x29,0x02,0xD1,0xC0,0x7B,0x8E, \ +0x28,0x06,0xD0,0xB8,0x7D,0x00,0x07,0x26,0xD0,0x02,0xE0,0x78,0x78,0xC0,0x09, \ +0x2C,0xD2,0x28,0x79,0x02,0x28,0x14,0xD1,0x78,0x78,0x04,0x21,0x01,0x40,0x20, \ +0x23,0x18,0x40,0x22,0x1C,0x03,0xF0,0x09,0xF9,0x00,0x2C,0x0A,0xD1,0x1A,0x48, \ +0x41,0x68,0x04,0x29,0x06,0xD1,0x01,0x26,0x46,0x60,0x01,0x20,0x01,0xF0,0xB8, \ +0xFB,0x16,0x48,0x06,0x70,0x68,0x79,0x03,0x28,0x0D,0xD1,0x19,0x21,0xC9,0x02, \ +0x02,0x20,0x01,0xF0,0x94,0xFB,0x12,0x48,0x81,0x78,0x02,0xE0,0x08,0xE0,0x08, \ +0xE0,0x06,0xE0,0x08,0x23,0x99,0x43,0x81,0x70,0x39,0x78,0x20,0x1C,0x00,0xF0, \ +0x21,0xFD,0xF0,0xBD,0x00,0x28,0xFC,0xD1,0x20,0x1C,0x00,0xF0,0xED,0xFB,0xF0, \ +0xBD,0xE8,0x01,0x00,0x02,0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0x5C,0x00, \ +0x00,0x02,0x94,0x09,0x00,0x02,0x8E,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x44, \ +0x09,0x00,0x02,0xDC,0x01,0x00,0x02,0xB4,0x09,0x00,0x02,0x08,0xB5,0x00,0x21, \ +0x00,0x91,0x00,0x28,0x0C,0xD1,0x09,0x48,0x00,0x68,0x40,0x68,0x81,0x7D,0xC2, \ +0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C,0x0A,0x30,0xFF,0xF7,0xE3,0xFE, \ +0x00,0x90,0x00,0x98,0x01,0x28,0x01,0xD0,0x00,0xF0,0x03,0xF8,0x08,0xBD,0x48, \ +0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78,0x80,0x09,0x04,0xD3,0x04,0x4F, \ +0x38,0x68,0x02,0xF0,0x9F,0xF8,0x38,0x60,0x80,0xBD,0x00,0x00,0x94,0x01,0x00, \ +0x02,0x48,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x00,0x25,0x7D,0x26,0x36,0x01, \ +0x01,0x21,0x89,0x06,0x88,0x68,0x00,0x0B,0xFC,0x24,0x04,0x40,0xFA,0x48,0xC7, \ +0x6A,0x00,0x2F,0x0F,0xD1,0x00,0x20,0xFF,0xF7,0x9A,0xFD,0xF6,0x48,0xC1,0x69, \ +0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0xF4,0x4B,0x19,0x40,0xC1,0x61,0x01, \ +0x05,0xC8,0x68,0x02,0xB0,0xF0,0xBD,0xF2,0x49,0xA0,0x08,0x08,0x5C,0x00,0x28, \ +0x06,0xD0,0x00,0x20,0xFF,0xF7,0x85,0xFD,0x01,0x21,0x89,0x06,0xC8,0x68,0xF1, \ +0xE7,0xED,0x49,0x04,0x20,0x20,0x40,0x01,0x91,0x61,0xD0,0x04,0x20,0xFF,0xF7, \ +0x2D,0xFE,0xEA,0x49,0x08,0x71,0xA0,0x09,0x01,0xD3,0x14,0x21,0x00,0xE0,0x0E, \ +0x21,0xE7,0x48,0x02,0x22,0x01,0xF0,0x9E,0xFE,0x00,0x28,0x03,0xD1,0x01,0x21, \ +0x89,0x06,0xC8,0x68,0xD8,0xE7,0x01,0x21,0x89,0x06,0xC8,0x68,0x28,0x43,0x01, \ +0xE0,0xCA,0x68,0x10,0x43,0x42,0x09,0x03,0xD2,0x32,0x1C,0x01,0x3E,0x00,0x2A, \ +0xF7,0xD1,0x10,0x23,0x98,0x43,0x05,0x1C,0x00,0x2E,0x01,0xDC,0x28,0x1C,0xC4, \ +0xE7,0xD7,0x49,0x08,0x79,0x0A,0x28,0x09,0xD0,0x14,0x28,0x0B,0xD0,0x37,0x28, \ +0x0D,0xD0,0x6E,0x28,0x0F,0xD1,0xD4,0x4A,0x03,0x20,0x50,0x75,0x14,0xE0,0xD2, \ +0x4A,0x00,0x20,0x50,0x75,0x10,0xE0,0xD0,0x4A,0x01,0x21,0x51,0x75,0x0C,0xE0, \ +0xCE,0x4A,0x02,0x20,0x50,0x75,0x08,0xE0,0x14,0x2F,0x03,0xD2,0xCB,0x4A,0x03, \ +0x20,0x50,0x75,0x02,0xE0,0xC9,0x4A,0x02,0x20,0x50,0x75,0xA8,0x09,0x06,0xD3, \ +0xE8,0x08,0x04,0xD2,0x01,0x20,0xFF,0xF7,0x2A,0xFD,0x28,0x1C,0x98,0xE7,0xC3, \ +0x4F,0x00,0x20,0x38,0x60,0xE8,0x0A,0x1D,0xD3,0xB4,0x2C,0x07,0xD0,0xC4,0x2C, \ +0x16,0xD0,0xD4,0x2C,0x23,0xD1,0x00,0xF0,0xFF,0xF9,0x20,0xE0,0x3D,0xE0,0xB6, \ +0x48,0x40,0x68,0x80,0x0B,0x1B,0xD3,0xB9,0x4C,0x02,0x20,0xE1,0x1D,0x03,0x31, \ +0xB8,0x72,0x01,0x98,0x06,0x22,0xB7,0x4E,0x04,0xF0,0x8D,0xF8,0x60,0x88,0x70, \ +0x80,0x0E,0xE0,0x00,0xF0,0xF8,0xFA,0x0B,0xE0,0xA4,0x2C,0x15,0xD0,0xB4,0x2C, \ +0x13,0xD0,0xC4,0x2C,0x01,0xD0,0xD4,0x2C,0x03,0xD1,0xAD,0x48,0xAF,0x49,0x40, \ +0x88,0x08,0x80,0x78,0x68,0x04,0x28,0x06,0xD1,0x00,0xF0,0xCE,0xF9,0x00,0x22, \ +0x10,0x21,0xAB,0x48,0x03,0xF0,0x31,0xFB,0x28,0x1C,0x5D,0xE7,0x7A,0x7D,0xA9, \ +0x48,0xAA,0x4B,0x52,0x00,0x9A,0x5A,0xC1,0x88,0xA5,0x4B,0x8A,0x18,0x1A,0x80, \ +0xB4,0x2C,0xE8,0xD1,0x80,0x88,0x40,0x00,0x08,0x18,0x19,0x88,0x40,0x18,0x18, \ +0x80,0xE1,0xE7,0xA3,0x49,0x08,0x68,0x00,0x7A,0x00,0x28,0x06,0xD0,0xC4,0x20, \ +0x9A,0x4A,0x01,0x21,0x89,0x06,0x10,0x60,0xC8,0x68,0x3F,0xE7,0x01,0x20,0xFF, \ +0xF7,0xCC,0xFC,0x29,0x2F,0x0D,0xD2,0x07,0x20,0xFF,0xF7,0x7B,0xFD,0x91,0x49, \ +0xC8,0x71,0x0B,0x21,0x79,0x43,0xCF,0x08,0x03,0x21,0x00,0x91,0x00,0x0A,0x1D, \ +0xD3,0x01,0x3F,0x1B,0xE0,0x04,0x20,0xFF,0xF7,0x6D,0xFD,0x00,0x06,0x00,0x0E, \ +0x89,0x4E,0x0A,0x28,0x30,0x71,0x1F,0xD0,0x14,0x28,0x21,0xD0,0x37,0x28,0x23, \ +0xD0,0x6E,0x28,0x03,0xD1,0x07,0x20,0xFF,0xF7,0x5E,0xFD,0xF0,0x71,0x0B,0x20, \ +0x78,0x43,0xC7,0x08,0x03,0x21,0x00,0x91,0xF0,0x79,0x00,0x0A,0x00,0xD3,0x01, \ +0x3F,0x80,0x2C,0x01,0xD0,0x50,0x2C,0x16,0xD1,0x03,0x20,0xFF,0xF7,0x4D,0xFD, \ +0x82,0x49,0x80,0x06,0x09,0x68,0x80,0x0E,0x48,0x74,0x11,0xE0,0xFF,0x08,0x00, \ +0x21,0x00,0x91,0xEE,0xE7,0xBF,0x08,0x01,0x21,0x00,0x91,0xEA,0xE7,0x0B,0x20, \ +0x78,0x43,0x07,0x09,0x02,0x21,0x00,0x91,0xE4,0xE7,0x78,0x49,0x00,0x20,0x09, \ +0x68,0x48,0x74,0x74,0x48,0x80,0x89,0x04,0x30,0xB8,0x42,0x01,0xD3,0x18,0x2F, \ +0x0E,0xD8,0x6C,0x4A,0xC3,0x20,0x10,0x60,0x65,0x48,0xC1,0x69,0x83,0x01,0x19, \ +0x43,0xC1,0x61,0xC1,0x69,0x63,0x4B,0x19,0x40,0xC1,0x61,0x01,0x05,0xC8,0x68, \ +0xDB,0xE6,0x6B,0x4E,0x02,0x22,0x30,0x68,0x18,0x21,0x40,0x68,0x01,0xF0,0x94, \ +0xFD,0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8,0x68,0xCE,0xE6,0x7D,0x20, \ +0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x03,0xE0,0x01,0x22,0x92, \ +0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x03,0xD2,0x02,0x1C,0x01,0x38,0x00,0x2A, \ +0xF5,0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28,0x01,0xDC,0x28,0x1C,0xB6, \ +0xE6,0x58,0x48,0x54,0x49,0x00,0x68,0x00,0x22,0x46,0x68,0x0A,0x80,0x2A,0x0A, \ +0x52,0x07,0x08,0xD1,0x70,0x88,0x4D,0x4A,0x00,0x27,0x08,0x80,0x01,0x21,0x11, \ +0x73,0xD7,0x72,0x28,0x1C,0xA4,0xE6,0x50,0x49,0x0D,0x60,0xE9,0x0A,0x13,0xD3, \ +0x47,0x4A,0x01,0x21,0x91,0x72,0x71,0x78,0xC9,0x08,0x03,0xD3,0x71,0x88,0x45, \ +0x4A,0x51,0x80,0x02,0xE0,0x43,0x49,0x00,0x22,0x4A,0x80,0x40,0x68,0xC1,0x1D, \ +0x03,0x31,0x06,0x22,0x01,0x98,0x03,0xF0,0x9E,0xFF,0x00,0x98,0x3D,0x49,0x48, \ +0x75,0x42,0x48,0x02,0x68,0x97,0x81,0x4A,0x7D,0x03,0x68,0x00,0x27,0x9A,0x73, \ +0x0F,0x60,0x31,0x78,0x48,0x29,0x03,0xD1,0x71,0x78,0x40,0x23,0x99,0x43,0x71, \ +0x70,0x71,0x78,0xC9,0x09,0x2E,0xD2,0x00,0x68,0x02,0x22,0x81,0x89,0x40,0x68, \ +0x18,0x30,0x18,0x39,0x01,0xF0,0x2F,0xFD,0x00,0x28,0x05,0xD1,0x2E,0x48,0x01, \ +0x21,0x01,0x73,0xC7,0x72,0x28,0x1C,0x67,0xE6,0x2B,0x4F,0x03,0x20,0xF8,0x72, \ +0x02,0x20,0x38,0x73,0x80,0x2C,0x15,0xD1,0x2F,0x48,0xC1,0x1D,0x29,0x31,0x09, \ +0x79,0x01,0x29,0x0F,0xD1,0xF9,0x1D,0x49,0x31,0x89,0x79,0x05,0x29,0x0A,0xD1, \ +0xC1,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x09,0x30,0x03,0xF0,0x42,0xFF,0x00, \ +0x28,0x01,0xD1,0x01,0x21,0xB9,0x76,0x28,0x1C,0x48,0xE6,0x24,0x4E,0x31,0x78, \ +0x00,0x29,0x05,0xD1,0x19,0x48,0x01,0x21,0x01,0x73,0xC7,0x72,0x28,0x1C,0x3E, \ +0xE6,0x04,0x1C,0x00,0x68,0x02,0x22,0x40,0x68,0x04,0x21,0x18,0x30,0x01,0xF0, \ +0xF6,0xFC,0x00,0x28,0x01,0xD1,0x28,0x1C,0x32,0xE6,0x7D,0x20,0xC0,0x00,0x01, \ +0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x01,0xE0,0xD3,0x68,0x19,0x43,0x4B,0x09, \ +0x03,0xD2,0x03,0x1C,0x01,0x38,0x00,0x2B,0xF7,0xD1,0x10,0x23,0x99,0x43,0x0D, \ +0x1C,0x00,0x28,0x23,0xDC,0x20,0xE0,0x00,0x00,0x40,0x00,0x00,0x04,0xFF,0xEF, \ +0x00,0x00,0x4C,0x01,0x00,0x02,0x30,0x09,0x00,0x02,0xCC,0x07,0x00,0x02,0xB4, \ +0x07,0x00,0x02,0x44,0x09,0x00,0x02,0x2C,0x09,0x00,0x02,0xE2,0x01,0x00,0x02, \ +0x38,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB0,0x01,0x00,0x02,0x48,0x01,0x00, \ +0x02,0xE4,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x28,0x1C, \ +0xFA,0xE5,0x20,0x68,0x40,0x68,0xC1,0x1D,0x11,0x31,0x40,0x7E,0x0A,0x78,0x00, \ +0x02,0x10,0x43,0x8A,0x78,0xC9,0x78,0x12,0x04,0x10,0x43,0x89,0x09,0x09,0x06, \ +0x09,0x0E,0x0D,0x23,0x59,0x43,0x89,0x19,0x0B,0x7B,0x1B,0x06,0x18,0x43,0x32, \ +0x1C,0x03,0x26,0x76,0x06,0x30,0x60,0x8B,0x7B,0x48,0x7B,0x1B,0x02,0x18,0x43, \ +0xCB,0x7B,0x1B,0x04,0x18,0x43,0x0B,0x7C,0x1B,0x06,0x18,0x43,0x70,0x60,0xD0, \ +0x1D,0x39,0x30,0x00,0x78,0x01,0x28,0x02,0xD1,0x01,0x21,0xB1,0x60,0x19,0xE0, \ +0x02,0x28,0x17,0xD1,0x8A,0x7C,0x48,0x7C,0x12,0x02,0x10,0x43,0xCA,0x7C,0x12, \ +0x04,0x10,0x43,0x0A,0x7D,0x12,0x06,0x10,0x43,0x70,0x61,0x8A,0x7D,0x48,0x7D, \ +0x12,0x02,0x10,0x43,0xCA,0x7D,0x09,0x7E,0x12,0x04,0x10,0x43,0x09,0x06,0x08, \ +0x43,0xB0,0x61,0x81,0x20,0xB0,0x60,0x20,0x68,0x0E,0x22,0x81,0x89,0x40,0x68, \ +0x18,0x30,0x20,0x39,0x01,0xF0,0x6C,0xFC,0x00,0x28,0x06,0xD1,0x08,0x48,0x01, \ +0x21,0x01,0x73,0xC7,0x72,0xB7,0x60,0x28,0x1C,0xA3,0xE5,0x20,0x68,0x81,0x89, \ +0x08,0x39,0x81,0x81,0x03,0x49,0x03,0x20,0xC8,0x72,0x02,0x20,0x08,0x73,0x28, \ +0x1C,0x98,0xE5,0x44,0x09,0x00,0x02,0x00,0xB5,0x03,0x49,0x01,0x20,0x48,0x60, \ +0x01,0xF0,0xD0,0xF8,0x00,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0xF0,0xB5,0x3D, \ +0x4F,0x01,0x24,0x78,0x68,0x04,0x28,0x0C,0xD1,0x01,0x20,0x01,0xF0,0xC3,0xF8, \ +0x7C,0x60,0x01,0x20,0xFF,0xF7,0x0F,0xFB,0x00,0x22,0x01,0x21,0x37,0x48,0x03, \ +0xF0,0x4C,0xF9,0xF0,0xBD,0x78,0x68,0x02,0x28,0xFB,0xD1,0x01,0x20,0x01,0xF0, \ +0xB3,0xF8,0x7C,0x60,0x78,0x6E,0x08,0x23,0x41,0x78,0x32,0x4C,0x99,0x43,0x41, \ +0x70,0x2F,0x49,0x89,0x89,0xB9,0x87,0x22,0x78,0x2F,0x49,0x01,0x2A,0x45,0xD1, \ +0x2F,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0,0x18,0x25,0x00,0xE0,0x1E,0x25,0x2C, \ +0x4E,0x36,0x88,0x75,0x1B,0x2C,0x4E,0x36,0x68,0xAD,0x19,0x2A,0x4E,0x01,0x32, \ +0x35,0x60,0x1A,0x70,0x1A,0x78,0x86,0x7D,0x12,0x07,0x12,0x0F,0x1D,0x1C,0xF0, \ +0x23,0x33,0x40,0x1A,0x43,0x82,0x75,0x42,0x78,0xD2,0x09,0x03,0xD3,0x22,0x4A, \ +0x13,0x68,0x08,0x3B,0x13,0x60,0x21,0x4B,0x2A,0x78,0x1B,0x88,0x9A,0x42,0x0F, \ +0xD1,0x20,0x4A,0x1C,0x4E,0x12,0x88,0x04,0x23,0x32,0x80,0x42,0x78,0x9A,0x43, \ +0x42,0x70,0x02,0x20,0x20,0x70,0x08,0x68,0x80,0x7D,0x31,0x88,0x00,0xF0,0x68, \ +0xFF,0xF8,0x66,0x15,0x4E,0x30,0x88,0xB8,0x66,0x20,0x78,0x02,0x28,0x04,0xD0, \ +0x01,0x21,0x01,0x20,0x00,0xF0,0x2D,0xF8,0xF0,0xBD,0x01,0x21,0x00,0x20,0x00, \ +0xF0,0x28,0xF8,0xF0,0xBD,0x09,0x68,0x00,0x20,0x48,0x73,0x0F,0x49,0x09,0x68, \ +0x48,0x70,0x01,0x20,0xFF,0xF7,0xA6,0xFA,0x00,0x22,0x10,0x21,0x0C,0x48,0x03, \ +0xF0,0xE3,0xF8,0xF0,0xBD,0x44,0x09,0x00,0x02,0x38,0x07,0x00,0x02,0xC0,0x00, \ +0x00,0x02,0x95,0x01,0x00,0x02,0xC4,0x01,0x00,0x02,0x96,0x01,0x00,0x02,0x98, \ +0x01,0x00,0x02,0xA0,0x01,0x00,0x02,0x9C,0x01,0x00,0x02,0x9A,0x01,0x00,0x02, \ +0xD0,0x01,0x00,0x02,0xF8,0x06,0x00,0x02,0xF0,0xB5,0x30,0x4D,0x04,0x1C,0x28, \ +0x68,0x0F,0x1C,0x80,0x7D,0x2E,0x49,0x08,0x70,0x00,0xF0,0x86,0xFF,0x2C,0x49, \ +0x08,0x78,0x03,0x28,0x04,0xD1,0x2B,0x48,0x40,0x6B,0xFF,0xF7,0xB8,0xFA,0x02, \ +0xE0,0x00,0x20,0xFF,0xF7,0xB4,0xFA,0x28,0x68,0x85,0x7D,0x27,0x48,0x80,0x7A, \ +0x85,0x42,0x00,0xDB,0x05,0x1C,0x23,0x48,0x00,0x78,0x01,0xF0,0xAF,0xFB,0x24, \ +0x4A,0x24,0x49,0x10,0x60,0xCB,0x88,0x69,0x00,0x23,0x4D,0x69,0x5A,0x59,0x18, \ +0x00,0x2C,0x12,0xD0,0x00,0x2F,0x11,0xD0,0x21,0x4C,0x21,0x4D,0x24,0x88,0x2D, \ +0x78,0x5B,0x18,0x1B,0x18,0x01,0x3C,0xAC,0x42,0x03,0xD1,0x1E,0x4C,0x24,0x68, \ +0x1E,0x19,0x04,0xE0,0x15,0x4C,0xE4,0x6E,0xE6,0x18,0x00,0xE0,0x00,0x26,0x13, \ +0x4C,0x14,0x4A,0xE3,0x6E,0x18,0x18,0x10,0x60,0x18,0x4A,0x12,0x88,0x10,0x18, \ +0x45,0x18,0x00,0x2F,0x07,0xD0,0x60,0x6E,0x0C,0x49,0x02,0x30,0x33,0x1C,0x00, \ +0x22,0x09,0x78,0xFF,0xF7,0x9E,0xFB,0x01,0x20,0x29,0x1C,0x00,0xF0,0xC8,0xFF, \ +0x02,0x20,0x60,0x60,0x01,0x20,0x0F,0x49,0xE0,0x75,0x09,0x88,0xE0,0x6E,0x06, \ +0x4A,0x40,0x18,0x10,0x60,0xF0,0xBD,0x00,0x00,0xC4,0x01,0x00,0x02,0x93,0x01, \ +0x00,0x02,0x44,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x8C,0x01,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xB0,0x01,0x00,0x02,0x9C,0x01,0x00,0x02,0x96,0x01,0x00,0x02, \ +0xA4,0x01,0x00,0x02,0xAE,0x01,0x00,0x02,0xAC,0x01,0x00,0x02,0x00,0xB5,0x06, \ +0x48,0x40,0x68,0x03,0x28,0x06,0xD1,0x01,0x20,0x00,0xF0,0xB6,0xFF,0x00,0x21, \ +0x01,0x20,0xFF,0xF7,0x7A,0xFF,0x00,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0x00, \ +0xB5,0x11,0x49,0x09,0x68,0x49,0x68,0x0A,0x78,0x13,0x07,0x10,0xD1,0x12,0x11, \ +0x0D,0x2A,0x0D,0xD2,0x01,0xA3,0x9B,0x5C,0x5B,0x00,0x9F,0x44,0x09,0x0A,0x09, \ +0x0A,0x0D,0x06,0x09,0x09,0x06,0x09,0x0A,0x0A,0x0A,0x00,0x08,0x1C,0x00,0xF0, \ +0x10,0xF8,0x00,0xBD,0xFF,0xF7,0x0D,0xFC,0x00,0xBD,0x04,0x48,0x40,0x78,0x00, \ +0x28,0xF7,0xD0,0x07,0xF0,0x12,0xFE,0x00,0xBD,0x00,0x00,0x48,0x01,0x00,0x02, \ +0x94,0x09,0x00,0x02,0xB0,0xB5,0x28,0x4C,0x07,0x1C,0xA0,0x79,0x01,0x28,0x02, \ +0xD1,0x38,0x1C,0x07,0xF0,0xF5,0xFD,0x38,0x78,0x50,0x28,0x0E,0xD0,0xA0,0x79, \ +0x03,0x28,0x03,0xD1,0x07,0xF0,0xD1,0xF9,0x00,0x28,0x3D,0xD0,0xA0,0x79,0x04, \ +0x28,0x04,0xD1,0x01,0x20,0x07,0xF0,0xC7,0xF8,0x00,0x28,0x35,0xD0,0xA0,0x79, \ +0x05,0x28,0x30,0xD1,0x1A,0x4D,0xF8,0x1D,0x09,0x30,0x06,0x22,0xE9,0x1D,0x07, \ +0x31,0x03,0xF0,0x14,0xFD,0x17,0x4C,0x00,0x28,0x20,0xD1,0x20,0x79,0x20,0x37, \ +0x02,0x28,0x0A,0xD1,0xB8,0x78,0x40,0x08,0x20,0xD3,0x06,0x20,0x00,0xF0,0x53, \ +0xFF,0x00,0x20,0x00,0xF0,0x22,0xF8,0x00,0xF0,0xD4,0xF8,0x20,0x79,0x01,0x28, \ +0x13,0xD1,0xB8,0x78,0x80,0x08,0x12,0xD3,0x00,0x20,0x00,0xF0,0x17,0xF8,0x00, \ +0xF0,0x05,0xFF,0x29,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0x00,0xF0,0x23,0xFF, \ +0x04,0xE0,0x20,0x79,0x01,0x28,0x01,0xD1,0x07,0xF0,0x45,0xFE,0xFF,0xF7,0xC9, \ +0xFB,0xB0,0xBD,0x94,0x09,0x00,0x02,0x80,0x00,0x00,0x02,0xB0,0x00,0x00,0x02, \ +0xF1,0xB5,0x83,0xB0,0x3E,0x49,0x00,0x25,0x0B,0x68,0x02,0x93,0x59,0x68,0x4A, \ +0x7E,0x0F,0x7E,0x12,0x02,0x3A,0x43,0x8F,0x7E,0x3F,0x04,0x3A,0x43,0xCF,0x7E, \ +0x3F,0x06,0x3A,0x43,0x16,0x1C,0x4F,0x7F,0x0A,0x7F,0x3F,0x02,0x3A,0x43,0x8F, \ +0x7F,0xC9,0x7F,0x3F,0x04,0x3A,0x43,0x09,0x06,0x0A,0x43,0x99,0x89,0x18,0x39, \ +0xCC,0x00,0x99,0x7B,0x17,0x1C,0x00,0x29,0x26,0xD0,0x01,0x29,0x26,0xD0,0x02, \ +0x29,0x26,0xD0,0x03,0x29,0x0C,0xD1,0x0B,0x20,0x21,0x1C,0x03,0xF0,0x17,0xFD, \ +0x00,0x91,0x61,0x1A,0x0B,0x20,0x03,0xF0,0x12,0xFD,0x00,0x99,0x00,0x29,0x00, \ +0xD9,0x01,0x30,0x01,0x24,0xA4,0x06,0xA2,0x6A,0x61,0x6A,0x02,0x9B,0x30,0x18, \ +0x5B,0x69,0xCB,0x1A,0xC0,0x18,0xB0,0x42,0x00,0xD2,0x01,0x37,0x06,0x1C,0x1F, \ +0x48,0x03,0x79,0x00,0x20,0x02,0x2B,0x14,0xD1,0x01,0x25,0x1F,0xE0,0x20,0x1C, \ +0xE9,0xE7,0x60,0x08,0xE7,0xE7,0x61,0x00,0x01,0x91,0x0B,0x20,0x03,0xF0,0xF1, \ +0xFC,0x0C,0x1C,0x01,0x99,0x09,0x1B,0x0B,0x20,0x03,0xF0,0xEB,0xFC,0x00,0x2C, \ +0xDA,0xD9,0x01,0x30,0xD8,0xE7,0x01,0x2B,0x0A,0xD1,0x12,0x4B,0x97,0x42,0x58, \ +0x70,0x01,0xD9,0x01,0x25,0x04,0xE0,0x97,0x42,0x02,0xD1,0x8E,0x42,0x00,0xD9, \ +0x01,0x25,0x03,0x9A,0x00,0x2A,0x03,0xD0,0x00,0x2D,0x03,0xD1,0x04,0xB0,0xF0, \ +0xBD,0x00,0x2D,0x09,0xD0,0x70,0x1A,0x00,0xF0,0x10,0xF8,0x01,0x23,0xDE,0x42, \ +0x01,0xD1,0x00,0x26,0x01,0x37,0xA7,0x62,0x66,0x62,0x01,0x20,0xEF,0xE7,0x00, \ +0x00,0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0x94,0x09,0x00,0x02,0x90,0xB4, \ +0x10,0x4A,0x00,0x21,0x97,0x69,0x91,0x61,0x01,0x21,0x0E,0x4B,0x8C,0x00,0xE3, \ +0x18,0xDC,0x6A,0x01,0x31,0x24,0x18,0xDC,0x62,0x08,0x29,0xF6,0xD9,0x0B,0x49, \ +0x0B,0x6B,0x1B,0x18,0x0B,0x63,0x0B,0x6B,0x5B,0x00,0x5B,0x08,0x0B,0x63,0xCB, \ +0x6A,0x18,0x18,0xC8,0x62,0xC8,0x6A,0x40,0x00,0x40,0x08,0xC8,0x62,0x97,0x61, \ +0x90,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x44, \ +0x09,0x00,0x02,0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7,0xF5,0xFA,0x00,0xBD, \ +0xB0,0xB5,0x1D,0x48,0x00,0x25,0x02,0x68,0x24,0x20,0x51,0x68,0x92,0x89,0x00, \ +0x24,0x04,0x3A,0x24,0x2A,0x26,0xD9,0x0F,0x5C,0x06,0x2F,0x1C,0xD2,0x01,0xA3, \ +0xDB,0x5D,0x5B,0x00,0x9F,0x44,0x02,0x02,0x18,0x02,0x07,0x20,0x0B,0x18,0x5B, \ +0x78,0x02,0x33,0x18,0x18,0x0E,0xE0,0x0B,0x18,0x5F,0x79,0x1C,0x79,0x3F,0x02, \ +0x27,0x43,0x3D,0x1C,0xDF,0x79,0x9C,0x79,0x5B,0x78,0x3F,0x02,0x27,0x43,0x3C, \ +0x04,0x24,0x0C,0x02,0x33,0x18,0x18,0x82,0x42,0xDF,0xD8,0x00,0x2C,0x04,0xD9, \ +0xAC,0x42,0x02,0xD2,0xA0,0x02,0x00,0xF0,0x0E,0xF8,0xB0,0xBD,0x05,0x4A,0x52, \ +0x79,0x01,0x2A,0xF3,0xDD,0x08,0x18,0x00,0xF0,0x24,0xF8,0xEF,0xE7,0x00,0x00, \ +0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0x0B,0x49,0x09,0x68,0x49,0x69,0x08, \ +0x18,0x0A,0x49,0x4A,0x7A,0x05,0x2A,0x02,0xD1,0x8A,0x6B,0x82,0x42,0x0A,0xD2, \ +0x05,0x22,0x4A,0x72,0x02,0x1C,0x06,0x48,0x80,0x23,0xC2,0x60,0x82,0x69,0x1A, \ +0x43,0x82,0x61,0xC0,0x68,0x88,0x63,0xF7,0x46,0x00,0x00,0x48,0x01,0x00,0x02, \ +0x44,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5,0x02,0x79,0x35,0x4C,0x87, \ +0x78,0xFE,0x21,0x11,0x40,0xE5,0x88,0x03,0x23,0x9B,0x03,0x9D,0x43,0x2B,0x1C, \ +0x00,0x29,0x03,0xD0,0xCD,0x00,0x01,0x3D,0x9D,0x42,0x05,0xD2,0x45,0x78,0x6D, \ +0x18,0xED,0x00,0x18,0x3D,0x9D,0x42,0x01,0xD8,0x00,0x25,0x08,0xE0,0xC9,0x00, \ +0x59,0x1A,0x89,0x09,0x5E,0x07,0x76,0x0F,0x41,0x18,0x49,0x79,0xF1,0x40,0x0D, \ +0x1C,0x00,0x23,0x26,0x49,0x52,0x08,0x8B,0x70,0x05,0xD3,0x00,0x2F,0x03,0xD1, \ +0x8A,0x78,0x02,0x23,0x1A,0x43,0x8A,0x70,0xEA,0x07,0xD2,0x0F,0x03,0xD0,0x8D, \ +0x78,0x04,0x23,0x2B,0x43,0x8B,0x70,0xE3,0x1D,0x29,0x33,0x5B,0x79,0x01,0x25, \ +0x02,0x2B,0x1D,0xD1,0x8E,0x78,0x08,0x23,0x33,0x43,0x8B,0x70,0x00,0x2A,0x03, \ +0xD0,0x0A,0x78,0x00,0x2A,0x00,0xD1,0x4D,0x70,0x00,0x2F,0x00,0xD1,0xC7,0x78, \ +0x15,0x48,0x40,0x8B,0xB8,0x42,0x00,0xD8,0x07,0x1C,0x00,0xF0,0x90,0xFD,0x21, \ +0x88,0x12,0x4B,0x4F,0x43,0xB9,0x02,0x08,0x1A,0xC1,0x18,0x06,0x20,0x00,0xF0, \ +0xAB,0xFD,0xF0,0xBD,0x88,0x78,0xC0,0x08,0x00,0xD3,0x8D,0x71,0x88,0x78,0x40, \ +0x08,0x80,0x07,0x07,0xD1,0x0A,0x48,0x80,0x69,0x80,0x08,0x03,0xD2,0x88,0x78, \ +0x08,0x23,0x18,0x43,0x88,0x70,0x88,0x78,0x04,0x23,0x98,0x43,0x88,0x70,0xF0, \ +0xBD,0x00,0x00,0x80,0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0xC0,0x00,0x00,0x02, \ +0x48,0xF4,0xFF,0xFF,0x80,0x00,0x00,0x04,0xF0,0xB5,0x82,0xB0,0x36,0x48,0x34, \ +0x4E,0xC5,0x1D,0x09,0x35,0x33,0x4C,0xC7,0x1D,0x69,0x37,0x00,0x22,0xD2,0x43, \ +0x00,0x92,0x01,0x22,0x01,0xAB,0x31,0x48,0x32,0x49,0x02,0xF0,0x1E,0xFF,0x01, \ +0x98,0x41,0x0A,0x0C,0xD3,0x80,0x20,0x38,0x71,0x00,0x20,0x78,0x71,0x38,0x79, \ +0x00,0x0A,0x4C,0xD3,0x07,0xF0,0x9E,0xFD,0x38,0x79,0x00,0x0A,0xFA,0xD2,0x46, \ +0xE0,0x41,0x08,0x0F,0xD3,0x30,0x1C,0xFF,0xF7,0x27,0xF9,0x27,0x48,0x41,0x6C, \ +0x09,0x78,0x40,0x29,0x3C,0xD0,0x07,0xF0,0x78,0xFA,0x23,0x48,0x40,0x6C,0x00, \ +0x78,0x40,0x28,0xF8,0xD1,0x34,0xE0,0x41,0x0D,0x03,0xD3,0x40,0x20,0x07,0xF0, \ +0x3B,0xF9,0x2E,0xE0,0x41,0x09,0x03,0xD3,0x50,0x20,0x07,0xF0,0x35,0xF9,0x28, \ +0xE0,0x40,0x0F,0x03,0xD3,0x80,0x20,0x07,0xF0,0x2F,0xF9,0x22,0xE0,0x00,0x21, \ +0x79,0x22,0x52,0x05,0x17,0x48,0x91,0x82,0x10,0x82,0x91,0x80,0x64,0x20,0x10, \ +0x80,0x02,0x20,0x90,0x82,0x12,0x48,0x21,0x72,0x81,0x6B,0x09,0x7B,0x09,0x0A, \ +0x06,0xD3,0x00,0xF0,0x21,0xF8,0x0E,0x48,0x80,0x6B,0x00,0x7B,0x00,0x0A,0xF8, \ +0xD2,0xC0,0x20,0x02,0xF0,0x31,0xFF,0x00,0x21,0x79,0x22,0x52,0x05,0x91,0x82, \ +0x11,0x83,0x21,0x72,0x02,0xF0,0x29,0xFF,0x00,0x20,0xA8,0x73,0x9C,0xE7,0x1A, \ +0x08,0x00,0x02,0xCC,0x01,0x00,0x02,0x44,0x09,0x00,0x02,0x18,0x07,0x00,0x02, \ +0x11,0x11,0x10,0x10,0x8C,0x01,0x00,0x02,0xA0,0x8C,0x00,0x00,0xF0,0xB5,0x83, \ +0xB0,0x87,0x4D,0x00,0x24,0xA8,0x6B,0x47,0x68,0x39,0x79,0x49,0x08,0x01,0xD3, \ +0x01,0x26,0x00,0xE0,0x00,0x26,0x82,0x4D,0x69,0x7A,0x00,0x29,0x73,0xD1,0x81, \ +0x4A,0xD1,0x78,0x00,0x29,0x0C,0xD0,0x39,0x78,0x08,0x29,0x09,0xD1,0x7F,0x4A, \ +0x91,0x78,0x00,0x29,0x05,0xD0,0x81,0x7D,0xD3,0x78,0x99,0x42,0x01,0xDD,0xD1, \ +0x78,0x81,0x75,0x78,0x4B,0x7A,0x49,0x9D,0x6B,0x00,0x22,0x68,0x68,0x00,0x2E, \ +0x48,0x66,0x9A,0x72,0x65,0xD1,0x77,0x48,0xA9,0x8A,0x00,0x89,0x04,0x38,0x81, \ +0x42,0x60,0xDD,0x70,0x4A,0x01,0x21,0x51,0x72,0xF9,0x1D,0x17,0x31,0x51,0x61, \ +0x6D,0x49,0x04,0x04,0x24,0x0C,0x8C,0x81,0xA8,0x8A,0x01,0x1B,0xE0,0x1F,0x11, \ +0x38,0x02,0x90,0x03,0xF0,0x17,0xFB,0x68,0x49,0x01,0x30,0x08,0x82,0xA8,0x8A, \ +0x01,0x1B,0x02,0x98,0x03,0xF0,0x0F,0xFB,0xC8,0x1D,0x63,0x49,0x11,0x30,0xC8, \ +0x81,0xC8,0x89,0x18,0x28,0x04,0xD1,0x60,0x4B,0xDC,0x81,0x18,0x8A,0x01,0x38, \ +0x18,0x82,0x78,0x78,0xC0,0x09,0x06,0xD3,0x5C,0x4B,0xE0,0x1D,0x01,0x30,0x98, \ +0x81,0xD8,0x89,0x08,0x30,0xD8,0x81,0x59,0x4C,0xA8,0x7D,0xE1,0x89,0x00,0xF0, \ +0xBE,0xFB,0xA0,0x61,0xA0,0x6B,0x80,0x7D,0xA1,0x89,0x00,0xF0,0xB8,0xFB,0x56, \ +0x49,0x54,0x4A,0xC8,0x66,0xA0,0x89,0x88,0x66,0xA0,0x6B,0x80,0x7D,0x92,0x7A, \ +0x90,0x42,0x00,0xDA,0x02,0x1C,0x53,0x4B,0x52,0x00,0x9A,0x5A,0x52,0x4B,0xDB, \ +0x88,0x5B,0x00,0xD2,0x18,0x23,0x8A,0xA5,0x7A,0x01,0x3B,0xAB,0x42,0x04,0xD1, \ +0xA1,0x69,0x54,0x18,0x00,0xE0,0x25,0xE0,0x01,0xE0,0xC9,0x6E,0x8C,0x18,0x01, \ +0xF0,0x38,0xF8,0x04,0x19,0x78,0x78,0x04,0x23,0x18,0x43,0x78,0x70,0x12,0xE0, \ +0xFF,0xE7,0x40,0x48,0x42,0x49,0x42,0x72,0xA8,0x8A,0x88,0x66,0x78,0x78,0xC0, \ +0x09,0x02,0xD3,0x88,0x6E,0x08,0x30,0x88,0x66,0x3D,0x49,0x89,0x6E,0xA8,0x7D, \ +0x00,0xF0,0x81,0xFB,0x3B,0x49,0xC8,0x66,0x37,0x48,0x32,0x1C,0x80,0x6B,0x81, \ +0x7D,0xB8,0x1C,0x23,0x1C,0xFF,0xF7,0x37,0xF8,0x30,0x1C,0x00,0xF0,0xF0,0xFA, \ +0x00,0x28,0x0A,0xD0,0x02,0x20,0x33,0x49,0xC2,0x1E,0x48,0x74,0x00,0x92,0x01, \ +0x22,0x11,0x21,0x34,0x48,0x01,0xAB,0x02,0xF0,0xFB,0xFD,0x2E,0x48,0x00,0x24, \ +0x2A,0x4D,0x44,0x74,0xA8,0x6B,0x41,0x7B,0x00,0x29,0x0C,0xD1,0x38,0x1C,0x00, \ +0xF0,0x70,0xF8,0x27,0x4A,0x54,0x70,0x10,0x78,0x01,0x30,0x10,0x70,0x00,0xF0, \ +0xD9,0xFB,0x00,0xF0,0x55,0xF8,0x3E,0xE0,0xE9,0x1D,0x39,0x31,0x0A,0x7A,0x01, \ +0x2A,0x05,0xD1,0x08,0x22,0x42,0x73,0x0C,0x72,0x00,0xF0,0x4A,0xF8,0x33,0xE0, \ +0x40,0x7B,0x04,0x28,0x1F,0xD0,0x00,0xF0,0xB6,0xFB,0xA8,0x6B,0x81,0x7B,0x01, \ +0x31,0x81,0x73,0x78,0x78,0x08,0x23,0x18,0x43,0x78,0x70,0x38,0x78,0x08,0x28, \ +0x12,0xD1,0x14,0x48,0xC0,0x78,0x00,0x28,0x0E,0xD0,0x13,0x4A,0x18,0x4B,0x50, \ +0x78,0x01,0x30,0x00,0x06,0x00,0x0E,0x50,0x70,0xA9,0x6B,0x89,0x7D,0x59,0x5C, \ +0x88,0x42,0x00,0xDD,0x14,0x70,0x00,0xF0,0xA8,0xFB,0xA8,0x6B,0x0D,0x4A,0x81, \ +0x7B,0x12,0x7C,0x91,0x42,0x04,0xDA,0x44,0x73,0xA9,0x6B,0x82,0x20,0x08,0x73, \ +0x05,0xE0,0x01,0x21,0x38,0x1C,0x00,0xF0,0x29,0xF8,0x00,0xF0,0x15,0xF8,0x03, \ +0xB0,0xF0,0xBD,0x00,0x00,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xC8,0x01, \ +0x00,0x02,0x44,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0xB0,0x01,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xF8,0x06,0x00,0x02,0xD6,0x01,0x00,0x02,0x05,0x48,0x00,0x21, \ +0x41,0x72,0x81,0x72,0x04,0x49,0x05,0x4A,0x89,0x89,0x91,0x87,0x80,0x6B,0x10, \ +0x21,0x01,0x73,0xF7,0x46,0x8C,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x44,0x09, \ +0x00,0x02,0x80,0xB4,0x09,0x4A,0x01,0x27,0x53,0x79,0x08,0x4A,0x03,0x2B,0x02, \ +0xD1,0xD7,0x70,0x80,0xBC,0xF7,0x46,0x40,0x78,0x40,0x09,0xFA,0xD3,0x00,0x29, \ +0x02,0xD1,0x00,0x20,0xD0,0x70,0xF5,0xE7,0xD7,0x70,0xF3,0xE7,0xB0,0x00,0x00, \ +0x02,0xB4,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0x02,0xF0,0xC6,0xFD,0x0A,0x4C, \ +0x03,0x21,0xA1,0x73,0x02,0xF0,0xC1,0xFD,0x60,0x7F,0x01,0x28,0x0C,0xD0,0xC0, \ +0x20,0x02,0xF0,0xBB,0xFD,0x07,0x1C,0xA0,0x7B,0x03,0x28,0x02,0xD1,0x00,0x20, \ +0x00,0xF0,0x86,0xF9,0x38,0x1C,0x02,0xF0,0xB1,0xFD,0x90,0xBD,0x44,0x09,0x00, \ +0x02,0x90,0xB5,0xFE,0xF7,0x09,0xFE,0x1E,0x4F,0xF9,0x6A,0x40,0x1A,0x41,0x00, \ +0x78,0x7F,0x49,0x08,0x01,0x28,0x01,0xD1,0xB8,0x6A,0x00,0xE0,0x78,0x6A,0x3B, \ +0x68,0x19,0x4A,0x00,0x2B,0x1C,0xD1,0x84,0x00,0x93,0x8B,0x24,0x18,0xA4,0x00, \ +0xE2,0x18,0x51,0x1A,0x8A,0x42,0x00,0xD2,0x11,0x1C,0x00,0x28,0x0F,0xD1,0x01, \ +0x20,0x78,0x72,0xB8,0x7B,0x03,0x28,0x05,0xD1,0x0D,0x29,0x04,0xD9,0xC8,0x1F, \ +0x01,0x38,0x00,0xF0,0x58,0xF9,0x90,0xBD,0x00,0x20,0x00,0xF0,0x54,0xF9,0x90, \ +0xBD,0x3B,0x62,0x09,0xE0,0x83,0x00,0xD2,0x8B,0x18,0x18,0x80,0x00,0x80,0x18, \ +0x41,0x1A,0x88,0x42,0x00,0xD2,0x01,0x1C,0x3A,0x62,0x08,0x20,0x00,0xF0,0x6A, \ +0xFB,0x04,0x20,0x78,0x72,0x90,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0x8C,0x01, \ +0x00,0x02,0x00,0xB5,0x04,0x49,0x02,0x0A,0x8A,0x74,0xC8,0x74,0x03,0x21,0x11, \ +0x20,0xFE,0xF7,0x15,0xFE,0x00,0xBD,0xCC,0x07,0x00,0x02,0xB0,0xB5,0x82,0xB0, \ +0x11,0x4D,0x01,0x20,0x68,0x74,0x11,0x4F,0x11,0x48,0x00,0x24,0xBC,0x82,0x38, \ +0x82,0xBC,0x80,0x1E,0x20,0x38,0x80,0x02,0x20,0xB8,0x82,0xC2,0x1E,0x00,0x92, \ +0x01,0x22,0x1A,0x21,0x0C,0x48,0x01,0xAB,0x02,0xF0,0xE5,0xFC,0x6C,0x74,0x3C, \ +0x83,0xBC,0x82,0x01,0x98,0x81,0x08,0x06,0xD3,0x00,0x09,0x02,0xD3,0x82,0x20, \ +0x02,0xB0,0xB0,0xBD,0x20,0x1C,0xFB,0xE7,0x42,0x20,0xF9,0xE7,0x44,0x09,0x00, \ +0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00,0x00,0xD8,0x06,0x00,0x02,0xF0,0xB5, \ +0xFF,0x20,0x01,0x25,0xAD,0x06,0xF5,0x30,0x29,0x69,0x89,0x08,0x03,0xD3,0x01, \ +0x1C,0x01,0x38,0x00,0x29,0xF8,0xD8,0x6A,0x4E,0x00,0x27,0xB0,0x7D,0x6A,0x4C, \ +0x00,0x28,0x11,0xD0,0x30,0x6E,0xFF,0xF7,0xB1,0xFF,0x60,0x79,0x11,0x23,0x9B, \ +0x02,0x03,0x43,0x03,0x22,0xF1,0x6D,0xB0,0x6D,0x12,0x03,0x00,0xF0,0xD3,0xF8, \ +0xB7,0x75,0x01,0x20,0xFE,0xF7,0x6F,0xFD,0xF0,0xBD,0xF0,0x7B,0x00,0x28,0x1A, \ +0xD0,0xA0,0x6B,0x5E,0x49,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C, \ +0x5C,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x93,0xFF,0x60,0x79,0x11,0x23,0x9B, \ +0x02,0x03,0x43,0x03,0x22,0x12,0x03,0x10,0x21,0x57,0x48,0x00,0xF0,0xB5,0xF8, \ +0x01,0x20,0xFE,0xF7,0x52,0xFD,0xF7,0x73,0xF0,0xBD,0xF0,0x7D,0x00,0x28,0xDD, \ +0xD0,0x4E,0x4E,0xF0,0x6E,0xFF,0xF7,0x7D,0xFF,0x76,0x6E,0x70,0x78,0xC0,0x09, \ +0x4E,0xD3,0x4A,0x4A,0xD0,0x7A,0x00,0x28,0x00,0xD0,0xD7,0x72,0x07,0x20,0x40, \ +0x06,0x81,0x69,0x08,0x23,0x19,0x43,0x81,0x61,0x81,0x69,0x99,0x43,0x81,0x61, \ +0xE8,0x68,0x00,0xF0,0x8C,0xFF,0x01,0x23,0x9B,0x03,0x9A,0x08,0x1C,0x21,0x44, \ +0x48,0x00,0xF0,0x8D,0xF8,0xFF,0x20,0x43,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B, \ +0x03,0xD3,0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x40,0x48,0x03,0x21,0x00, \ +0x78,0x49,0x06,0x02,0x28,0x02,0xD1,0x81,0x20,0x88,0x60,0x01,0xE0,0x01,0x20, \ +0x88,0x60,0x62,0x7A,0x00,0x2A,0x0F,0xD0,0xA0,0x7A,0x00,0x28,0x03,0xD0,0x00, \ +0x28,0x01,0xDD,0x60,0x69,0x01,0xE0,0x60,0x69,0x06,0x38,0xA1,0x89,0x20,0x39, \ +0x02,0x2A,0x08,0xD1,0x2B,0x4A,0xD7,0x75,0x05,0xE0,0x2A,0x4A,0x50,0x6E,0x91, \ +0x6E,0x18,0x30,0x20,0x39,0xD7,0x75,0x62,0x79,0x17,0x23,0x9B,0x02,0x13,0x43, \ +0x01,0x22,0x52,0x03,0x00,0xF0,0x57,0xF8,0x33,0xE0,0x60,0x7A,0x00,0x28,0x23, \ +0xD0,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x18,0x25,0x00,0xE0,0x1E,0x25,0x01,0x23, \ +0x9B,0x03,0x1D,0x48,0x9A,0x08,0x29,0x1C,0x40,0x6E,0x00,0xF0,0x45,0xF8,0xFF, \ +0x20,0x1F,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B,0x03,0xD3,0x02,0x1C,0x01,0x38, \ +0x00,0x2A,0xF8,0xD1,0x60,0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0xA0,0x89,0x41, \ +0x1B,0x01,0x22,0x52,0x03,0x60,0x69,0x00,0xF0,0x30,0xF8,0x0A,0xE0,0x60,0x79, \ +0x11,0x23,0x9B,0x02,0x03,0x43,0x0D,0x48,0x03,0x22,0x81,0x6E,0x40,0x6E,0x12, \ +0x03,0x00,0xF0,0x24,0xF8,0x09,0x48,0xC7,0x75,0x08,0x48,0x47,0x77,0x50,0x30, \ +0x07,0x71,0x30,0x79,0x40,0x08,0x02,0xD2,0x60,0x7A,0x01,0x28,0x03,0xD1,0x01, \ +0x20,0xFE,0xF7,0xB5,0xFC,0xF0,0xBD,0x00,0x20,0xFE,0xF7,0xB1,0xFC,0xF0,0xBD, \ +0x44,0x09,0x00,0x02,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xB8,0x01,0x00, \ +0x02,0xEC,0x07,0x00,0x02,0x58,0x07,0x00,0x02,0x40,0x00,0x00,0x04,0x58,0x00, \ +0x00,0x02,0xB0,0xB4,0x06,0x4C,0x1F,0x1C,0x65,0x68,0xEB,0x0B,0x04,0xD2,0x0A, \ +0x43,0x21,0x05,0x4A,0x63,0x88,0x63,0x67,0x60,0xB0,0xBC,0xF7,0x46,0x00,0x00, \ +0x40,0x00,0x00,0x04,0xF0,0xB5,0x52,0x49,0x07,0x1C,0x8A,0x7A,0x00,0x20,0x00, \ +0x2A,0x61,0xD1,0x0A,0x7A,0x00,0x2A,0x6B,0xD0,0x4A,0x7A,0x01,0x2A,0x5B,0xD1, \ +0x0A,0x7B,0x01,0x2A,0x58,0xD1,0xCA,0x7A,0x00,0x2A,0x55,0xD1,0xCE,0x1D,0x49, \ +0x36,0xF1,0x78,0xF5,0x1F,0x39,0x3D,0x00,0x29,0x0F,0xD1,0x45,0x49,0xCA,0x1D, \ +0x69,0x32,0x12,0x78,0x00,0x2A,0x09,0xD1,0x6A,0x7B,0x01,0x2A,0x06,0xD0,0x32, \ +0x79,0x00,0x2A,0x03,0xD1,0x0C,0x1C,0x89,0x7C,0x00,0x29,0x09,0xD0,0x3E,0x4A, \ +0x3F,0x4B,0xD1,0x79,0xD9,0x71,0x3B,0x49,0x88,0x75,0x01,0x20,0xC8,0x75,0xCC, \ +0x6E,0x21,0xE0,0x21,0x7F,0x00,0x29,0x12,0xD0,0x39,0x4B,0x37,0x4A,0x99,0x6B, \ +0x89,0x7D,0x92,0x7A,0x91,0x42,0x01,0xDA,0xD9,0x71,0x00,0xE0,0xDA,0x71,0xA0, \ +0x75,0x01,0x21,0xE1,0x73,0xE0,0x75,0xD8,0x79,0x32,0x49,0x40,0x00,0x0C,0x5A, \ +0x0B,0xE0,0xE2,0x7E,0x21,0x1C,0x00,0x2A,0x27,0xD0,0xCC,0x6E,0x88,0x75,0x01, \ +0x20,0x2C,0x4B,0xC8,0x75,0x98,0x6B,0x80,0x7D,0xD8,0x71,0x00,0xF0,0x40,0xF9, \ +0x26,0x4B,0xD8,0x7B,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0x73,0xFC,0x0C, \ +0xE0,0x24,0x4B,0xD8,0x79,0x03,0x28,0x05,0xD1,0x20,0x4B,0x58,0x6B,0xFE,0xF7, \ +0x6A,0xFC,0x03,0xE0,0x3A,0xE0,0x00,0x20,0xFE,0xF7,0x65,0xFC,0x01,0x21,0x89, \ +0x06,0x00,0x2F,0x05,0xD0,0x05,0x2F,0x03,0xD9,0x48,0x6A,0x38,0x18,0x02,0xE0, \ +0x2D,0xE0,0x48,0x6A,0x0A,0x30,0x16,0x4B,0x02,0x22,0x9A,0x73,0x18,0x4B,0x92, \ +0x03,0x5A,0x60,0x08,0x62,0x6A,0x7B,0x01,0x2A,0x02,0xD0,0x32,0x79,0x00,0x2A, \ +0x15,0xD0,0x10,0x4A,0x8D,0x6A,0x4E,0x6A,0xD1,0x79,0x13,0x4A,0x12,0x4F,0x89, \ +0x00,0x51,0x58,0x0D,0x4B,0x08,0x18,0x38,0x60,0xD8,0x79,0x00,0xF0,0x44,0xFD, \ +0x39,0x68,0x40,0x18,0x38,0x60,0xB0,0x42,0x00,0xD2,0x01,0x35,0x7D,0x60,0x38, \ +0x1D,0x06,0x4F,0x3C,0x60,0xF8,0x79,0x00,0xF0,0x37,0xFD,0x39,0x68,0x40,0x18, \ +0x38,0x60,0x01,0x20,0xF0,0xBD,0x44,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x8C, \ +0x01,0x00,0x02,0xB8,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x1C,0x08,0x00,0x02, \ +0x1C,0x09,0x00,0x02,0xF8,0xB5,0x38,0x49,0x04,0x1C,0x88,0x6B,0x37,0x4A,0x85, \ +0x7D,0x46,0x68,0x92,0x7A,0x00,0x27,0x95,0x42,0x00,0xDB,0x15,0x1C,0x34,0x49, \ +0x80,0x8A,0x49,0x89,0x88,0x42,0x2E,0xDD,0x00,0x2C,0x2C,0xD1,0x2F,0x49,0x88, \ +0x7A,0x00,0x28,0x28,0xD1,0x30,0x49,0xB4,0x20,0x08,0x70,0x2F,0x48,0x30,0x4A, \ +0xC0,0x88,0x41,0x00,0x09,0x18,0x68,0x00,0x10,0x5A,0x40,0x00,0x08,0x18,0x2D, \ +0x49,0xC9,0x6E,0x40,0x18,0x28,0x49,0x48,0x80,0x28,0x1C,0x00,0xF0,0xF9,0xFC, \ +0x26,0x49,0x49,0x88,0x40,0x18,0x24,0x49,0x48,0x80,0x31,0x1D,0x06,0x22,0x26, \ +0x48,0x02,0xF0,0x29,0xFF,0xF1,0x1D,0x03,0x31,0x06,0x22,0x24,0x48,0x02,0xF0, \ +0x23,0xFF,0x01,0x20,0x20,0x49,0x01,0x26,0x08,0x77,0x03,0xE0,0x01,0x20,0x1E, \ +0x49,0x00,0x26,0xC8,0x76,0xFF,0xF7,0x6A,0xFD,0xFF,0xF7,0xD4,0xFD,0x00,0x90, \ +0x00,0x98,0x00,0x28,0x1E,0xD1,0x12,0x49,0x00,0x2E,0x8A,0x6B,0x50,0x73,0x01, \ +0xD1,0x00,0x2C,0x01,0xD0,0x01,0x2E,0x19,0xD1,0x13,0x4A,0x68,0x00,0x10,0x5A, \ +0x10,0x4A,0xD2,0x88,0x49,0x8C,0x80,0x18,0x41,0x18,0x01,0x20,0x00,0xF0,0x12, \ +0xF9,0x01,0x2E,0x03,0xD1,0x0D,0x49,0x03,0x20,0x48,0x60,0x02,0xE0,0x0B,0x49, \ +0x02,0x20,0x48,0x60,0x01,0x27,0x03,0xE0,0x03,0x49,0x04,0x20,0x89,0x6B,0x48, \ +0x73,0x38,0x1C,0xF8,0xBD,0x00,0x00,0x8C,0x01,0x00,0x02,0x04,0x01,0x00,0x02, \ +0xC0,0x00,0x00,0x02,0xEC,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB0,0x01,0x00, \ +0x02,0x44,0x09,0x00,0x02,0xF0,0x07,0x00,0x02,0xF6,0x07,0x00,0x02,0x90,0xB5, \ +0x04,0x31,0xCF,0x00,0x01,0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03,0x28,0x27, \ +0xD1,0x0B,0x20,0x39,0x1C,0x02,0xF0,0x11,0xFF,0x0C,0x1C,0x79,0x1A,0x0B,0x20, \ +0x02,0xF0,0x0C,0xFF,0x07,0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18,0xD9,0x01, \ +0x37,0x04,0x2C,0x13,0xD2,0x01,0x21,0x41,0x63,0x13,0xE0,0x7F,0x08,0x11,0xE0, \ +0x79,0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0xFA,0xFE,0x0C,0x1C,0x79,0x1A,0x0B, \ +0x20,0x02,0xF0,0xF5,0xFE,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0, \ +0x41,0x63,0x00,0xE0,0x41,0x63,0x38,0x1C,0x90,0xBD,0x00,0x00,0x44,0x09,0x00, \ +0x02,0xFF,0x21,0x10,0x48,0x31,0x31,0x01,0x80,0x0F,0x49,0x09,0x8C,0xCA,0x1D, \ +0x31,0x32,0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1,0x80,0x0B, \ +0x48,0xA0,0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80,0x0F,0x21, \ +0xC1,0x80,0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23,0x21,0x81, \ +0x60,0x12,0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0xB0,0x01,0x00,0x02,0x8C,0x01, \ +0x00,0x02,0xB8,0x01,0x00,0x02,0x1C,0x09,0x00,0x02,0x00,0xB5,0x07,0x48,0xC1, \ +0x79,0x82,0x79,0x91,0x42,0x07,0xD0,0xC1,0x79,0x81,0x71,0x82,0x79,0x04,0x49, \ +0x89,0x5C,0x41,0x71,0xFE,0xF7,0xFA,0xFA,0x00,0xBD,0x00,0x00,0x8C,0x01,0x00, \ +0x02,0xC0,0x01,0x00,0x02,0x05,0x48,0x81,0x8F,0x49,0x00,0x01,0x31,0x81,0x87, \ +0x04,0x49,0x82,0x8F,0xC9,0x89,0x8A,0x42,0x00,0xDD,0x81,0x87,0xF7,0x46,0x44, \ +0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0x1A,0x49,0x19,0x48,0x89,0x6B,0x1A,0x4B, \ +0x89,0x7D,0x42,0x78,0x5B,0x5C,0x00,0x21,0x9A,0x42,0x15,0xDD,0x41,0x70,0x01, \ +0x70,0xC2,0x78,0x01,0x21,0x00,0x2A,0x0D,0xDD,0xC2,0x78,0x04,0x2A,0x0A,0xDA, \ +0xC2,0x78,0x01,0x3A,0xC2,0x70,0xC2,0x78,0x00,0x2A,0x04,0xD1,0x10,0x4A,0x52, \ +0x7A,0x01,0x2A,0x00,0xD1,0xC1,0x70,0x81,0x70,0xF7,0x46,0x82,0x78,0x00,0x2A, \ +0xFB,0xD0,0x02,0x78,0x02,0x2A,0xF8,0xDD,0x41,0x70,0x01,0x70,0xC2,0x78,0x01, \ +0x32,0x12,0x06,0x12,0x0E,0xC2,0x70,0x03,0x2A,0xEF,0xDD,0x81,0x70,0x03,0x21, \ +0xC1,0x70,0xF7,0x46,0x00,0x00,0xC8,0x01,0x00,0x02,0x8C,0x01,0x00,0x02,0xD6, \ +0x01,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0xB5,0x02,0xF0,0x97,0xFE,0x02,0x49, \ +0x8A,0x8F,0x10,0x40,0x48,0x62,0x00,0xBD,0x44,0x09,0x00,0x02,0xB0,0xB5,0x01, \ +0x20,0x80,0x06,0x85,0x6A,0x41,0x6A,0x0E,0x48,0x00,0x88,0x84,0x02,0x20,0x1C, \ +0x02,0xF0,0x49,0xFE,0x0F,0x1C,0x00,0x2D,0x10,0xD9,0x20,0x1C,0x29,0x1C,0x02, \ +0xF0,0x42,0xFE,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x20,0x1C,0x02,0xF0,0x3C,0xFE, \ +0x48,0x1C,0x45,0x43,0xE9,0x19,0x20,0x1C,0x02,0xF0,0x36,0xFE,0x0F,0x1C,0x38, \ +0x1C,0xB0,0xBD,0x00,0x00,0x80,0x00,0x00,0x02,0x90,0xB5,0x0C,0x1C,0x07,0x1C, \ +0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B,0x20,0x18,0xB9, \ +0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03,0x48,0x82,0x69, \ +0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x00,0x00, \ +0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A,0x69,0xC0,0x43, \ +0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0xF0, \ +0xB5,0x84,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xE5,0x4C,0x00,0x27,0x03,0x90, \ +0xE0,0x7C,0x00,0x28,0x04,0xD0,0x03,0x98,0x06,0xF0,0x12,0xF8,0x00,0x28,0x60, \ +0xD1,0x03,0x98,0xE0,0x4B,0x18,0x40,0x1C,0xD0,0xDF,0x48,0x00,0x68,0x02,0x90, \ +0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x13,0xD3,0x01,0x20,0x80, \ +0x06,0x00,0x6B,0x02,0x99,0x40,0x00,0x40,0x08,0xC9,0x08,0x05,0xD3,0xE0,0x62, \ +0x02,0x27,0x07,0x20,0xFF,0xF7,0xC8,0xFF,0x05,0xE0,0x20,0x63,0x01,0x27,0xFA, \ +0x21,0x07,0x20,0xFF,0xF7,0xA7,0xFF,0x03,0x98,0xD1,0x4B,0xCE,0x49,0x18,0x40, \ +0xCD,0x1D,0xCE,0x1D,0x49,0x36,0x09,0x35,0x00,0x28,0x58,0xD0,0xD8,0x04,0xC1, \ +0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x0D,0xD3,0x00,0x6A,0x40,0x00,0x40,0x08, \ +0x20,0x63,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0xDA,0xFA,0xFF,0xF7,0x64, \ +0xFC,0x01,0x27,0x01,0x22,0x62,0x73,0x01,0x98,0x12,0x23,0x18,0x40,0x40,0xD0, \ +0x00,0x20,0x60,0x73,0xA0,0x7A,0x00,0x28,0x19,0xD0,0x01,0x98,0x80,0x08,0x0E, \ +0xD3,0x20,0x6B,0x21,0x6E,0x40,0x18,0xE0,0x62,0xBC,0x48,0x00,0x78,0x00,0xF0, \ +0x27,0xFB,0xE1,0x6A,0x40,0x18,0xE0,0x62,0xE0,0x6A,0x40,0x00,0x40,0x08,0xE0, \ +0x62,0x01,0x20,0xFE,0xF7,0xD5,0xF9,0x00,0x20,0xA0,0x72,0xA0,0x75,0x0C,0xE0, \ +0x55,0xE1,0xFF,0xF7,0x36,0xFF,0x01,0x98,0x80,0x08,0x06,0xD3,0xB1,0x49,0x20, \ +0x6B,0x09,0x68,0x40,0x18,0x40,0x00,0x40,0x08,0xE0,0x62,0x00,0x2F,0x00,0xD1, \ +0x02,0x27,0x01,0x98,0x40,0x09,0x02,0xD3,0x01,0x20,0xFE,0xF7,0xBB,0xF9,0xA0, \ +0x7B,0x02,0x28,0x09,0xD1,0x00,0x20,0x68,0x73,0x30,0x71,0x00,0xF0,0x41,0xFA, \ +0x01,0x99,0xA5,0x48,0x00,0x22,0x01,0xF0,0xF0,0xFF,0x03,0x98,0xA4,0x4B,0x18, \ +0x40,0x73,0xD0,0x19,0x05,0xC8,0x68,0x00,0x90,0x00,0x98,0x40,0x09,0x14,0xD3, \ +0xE0,0x7A,0x03,0x28,0x11,0xD1,0x04,0x20,0xE0,0x72,0x00,0x98,0xC9,0x68,0x08, \ +0x43,0x9C,0x49,0x00,0x90,0x08,0x68,0x40,0x68,0x40,0x78,0xC0,0x09,0x05,0xD3, \ +0x00,0x98,0x40,0x08,0x02,0xD2,0x98,0x49,0x00,0x20,0x48,0x71,0x00,0x98,0x80, \ +0x08,0x3F,0xD3,0x07,0x20,0xFF,0xF7,0x39,0xFF,0xB0,0x79,0x01,0x28,0x0E,0xD1, \ +0xE0,0x1D,0x69,0x30,0x81,0x7A,0x01,0x29,0x09,0xD1,0x02,0x21,0x81,0x72,0x90, \ +0x48,0x01,0x8B,0xC0,0x8A,0x08,0x1A,0x81,0x02,0x04,0x20,0xFF,0xF7,0x0D,0xFF, \ +0x60,0x7A,0x06,0x28,0x04,0xD1,0x02,0x21,0x61,0x72,0x08,0x20,0xFF,0xF7,0x1F, \ +0xFF,0x00,0x20,0x86,0x49,0x01,0x22,0xC8,0x80,0x22,0x73,0xE0,0x72,0xA0,0x72, \ +0xA0,0x75,0x20,0x74,0x08,0x71,0x4A,0x71,0xFE,0xF7,0xB6,0xFB,0x00,0x99,0x08, \ +0x43,0x00,0x90,0x60,0x68,0x04,0x28,0x0F,0xD1,0x01,0x20,0xFF,0xF7,0x09,0xFF, \ +0x20,0x7B,0x01,0x28,0x09,0xD1,0xE0,0x7A,0x00,0x28,0x06,0xD1,0xFE,0xF7,0x2B, \ +0xFE,0x00,0x22,0x10,0x21,0x79,0x48,0x01,0xF0,0x8E,0xFF,0x00,0x98,0x80,0x09, \ +0x60,0xD3,0x01,0x20,0x20,0x73,0x20,0x74,0x71,0x49,0x87,0x06,0xF8,0x6A,0x09, \ +0x68,0x48,0x61,0xF8,0x6A,0x40,0x00,0x40,0x08,0xE0,0x62,0x60,0x7A,0x05,0x28, \ +0x0C,0xD1,0xA0,0x6B,0x79,0x6A,0x88,0x42,0x00,0xE0,0xB1,0xE0,0x06,0xD2,0x02, \ +0x21,0x6C,0x48,0x61,0x72,0x81,0x69,0x80,0x23,0x99,0x43,0x81,0x61,0x00,0x98, \ +0xC0,0x08,0x0E,0xD3,0x64,0x48,0x01,0x21,0x01,0x71,0xC1,0x88,0x00,0x29,0x33, \ +0xDD,0xC1,0x88,0x01,0x23,0xDB,0x03,0x99,0x42,0x2E,0xDA,0xC0,0x88,0xFF,0xF7, \ +0x92,0xF8,0x2A,0xE0,0x5D,0x49,0x00,0x20,0x08,0x71,0xA0,0x72,0xC1,0x20,0x20, \ +0x60,0x01,0x20,0xFE,0xF7,0x12,0xF9,0x20,0x7E,0x01,0x28,0x14,0xD1,0x61,0x7E, \ +0x00,0x29,0x00,0xD0,0x00,0x20,0x60,0x76,0x58,0x48,0xC1,0x78,0x89,0x06,0x89, \ +0x0E,0xC1,0x70,0x61,0x7E,0x01,0x29,0x03,0xD1,0xC1,0x78,0x40,0x23,0x19,0x43, \ +0xC1,0x70,0xC1,0x78,0x03,0x20,0xFE,0xF7,0x84,0xF9,0x60,0x68,0x04,0x28,0x06, \ +0xD1,0xFE,0xF7,0xCF,0xFD,0x00,0x22,0x10,0x21,0x4B,0x48,0x01,0xF0,0x32,0xFF, \ +0x40,0x48,0x00,0x68,0x02,0x90,0x02,0x98,0xC0,0x08,0x02,0xD3,0x02,0x27,0x09, \ +0xE0,0x30,0xE0,0x38,0x6B,0x40,0x00,0x40,0x08,0x20,0x63,0x01,0x27,0xFA,0x21, \ +0x07,0x20,0xFF,0xF7,0x74,0xFE,0xA0,0x7E,0x00,0x28,0x22,0xD0,0x3D,0x48,0x00, \ +0x79,0x00,0x28,0x1E,0xD0,0x20,0x7C,0x00,0x28,0x1B,0xD0,0x20,0x68,0x00,0x28, \ +0x18,0xD1,0x06,0x20,0xFF,0xF7,0x7E,0xFE,0x00,0x20,0xE8,0x73,0xA0,0x76,0x70, \ +0x70,0x69,0x7B,0x01,0x29,0x02,0xD0,0x69,0x7B,0x03,0x29,0x0B,0xD1,0x69,0x7B, \ +0x01,0x29,0x07,0xD1,0xA1,0x73,0x68,0x73,0x00,0x22,0x10,0x21,0x2B,0x48,0x01, \ +0xF0,0xFC,0xFE,0x00,0xE0,0x68,0x73,0x00,0x20,0xA0,0x76,0xE0,0x7A,0x04,0x28, \ +0x2F,0xD1,0x20,0x7C,0x00,0x28,0x2C,0xD0,0x60,0x7B,0x00,0x28,0x02,0xD1,0x00, \ +0x2F,0x00,0xD1,0x02,0x27,0x00,0x20,0xE0,0x72,0x23,0x4D,0x20,0x74,0x29,0x79, \ +0x01,0x29,0x15,0xD1,0x21,0x68,0x00,0x29,0x12,0xD1,0x69,0x79,0x00,0x29,0x0F, \ +0xD0,0x1D,0x49,0x09,0x68,0x48,0x72,0xA9,0x68,0xE9,0x60,0x28,0x70,0xFE,0xF7, \ +0x17,0xFA,0x28,0x78,0x01,0x28,0x04,0xD1,0x00,0x22,0x01,0x21,0x19,0x48,0x01, \ +0xF0,0xCF,0xFE,0x60,0x68,0x04,0x28,0x06,0xD1,0xFE,0xF7,0x62,0xFD,0x00,0x22, \ +0x10,0x21,0x14,0x48,0x01,0xF0,0xC5,0xFE,0x01,0x2F,0x02,0xD1,0x00,0xF0,0x19, \ +0xF9,0x03,0xE0,0x02,0x2F,0x01,0xD1,0x00,0xF0,0x44,0xF9,0x03,0x98,0x11,0x4B, \ +0x18,0x40,0x01,0xD0,0x01,0xF0,0xE0,0xF8,0x04,0xB0,0xF0,0xBD,0x44,0x09,0x00, \ +0x02,0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x80,0x00,0x00,0x93,0x01, \ +0x00,0x02,0x8C,0x01,0x00,0x02,0xD8,0x06,0x00,0x02,0x40,0x40,0x00,0x00,0x48, \ +0x01,0x00,0x02,0xDC,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x38,0x07,0x00,0x02, \ +0x80,0x00,0x00,0x04,0xCC,0x07,0x00,0x02,0x08,0x08,0x00,0x00,0xF0,0xB5,0x0F, \ +0x20,0x00,0x06,0x05,0x89,0x63,0x48,0x64,0x4E,0x28,0x40,0x01,0x24,0x00,0x28, \ +0x6E,0xD0,0x62,0x49,0xCF,0x69,0x78,0x08,0x3E,0xD3,0x88,0x69,0x40,0x08,0x3B, \ +0xD3,0x88,0x69,0xA0,0x43,0x88,0x61,0x5E,0x48,0x41,0x68,0x04,0x29,0x0A,0xD1, \ +0xFE,0xF7,0x14,0xFD,0x01,0x20,0xFE,0xF7,0x37,0xF8,0x00,0x22,0x10,0x21,0x5A, \ +0x48,0x01,0xF0,0x74,0xFE,0x29,0xE0,0x42,0x68,0x58,0x49,0x02,0x2A,0x05,0xD1, \ +0x09,0x68,0x4C,0x73,0x57,0x49,0x09,0x68,0x4C,0x70,0x05,0xE0,0x42,0x68,0x03, \ +0x2A,0x02,0xD1,0x09,0x68,0x02,0x22,0x4A,0x73,0x4F,0x48,0x44,0x60,0x00,0xF0, \ +0xAB,0xF8,0x01,0x20,0xFE,0xF7,0x1A,0xF8,0x4B,0x48,0x40,0x7C,0x01,0x28,0x05, \ +0xD1,0x00,0x22,0x10,0x21,0x30,0x1C,0x01,0xF0,0x53,0xFE,0x08,0xE0,0x46,0x48, \ +0x40,0x7C,0x02,0x28,0x04,0xD1,0x00,0x22,0x10,0x21,0x47,0x48,0x01,0xF0,0x49, \ +0xFE,0x41,0x48,0x80,0x69,0xC0,0x09,0x03,0xD3,0xF8,0x09,0x01,0xD3,0x07,0xF0, \ +0x0F,0xFB,0x3D,0x48,0x80,0x69,0x00,0x0A,0x32,0xD3,0x38,0x0A,0x30,0xD3,0x3A, \ +0x48,0x80,0x23,0x81,0x69,0x99,0x43,0x81,0x61,0x38,0x48,0x41,0x7A,0x05,0x29, \ +0x02,0xD0,0x41,0x7A,0x06,0x29,0x15,0xD1,0x00,0x21,0x81,0x63,0x01,0x7A,0x01, \ +0x29,0x0D,0xD1,0x44,0x72,0x81,0x7A,0x00,0x29,0x1B,0xD1,0x01,0x7B,0x01,0x29, \ +0x18,0xD1,0xC0,0x7A,0x00,0x28,0x15,0xD1,0xFF,0xF7,0xCC,0xF9,0x12,0xE0,0x3A, \ +0xE0,0x02,0x22,0x42,0x72,0x0E,0xE0,0x44,0x72,0x41,0x7F,0x01,0x29,0x02,0xD1, \ +0x00,0x21,0x81,0x62,0x01,0xE0,0x00,0x21,0x41,0x62,0x80,0x7B,0x03,0x28,0x02, \ +0xD1,0x00,0x20,0xFF,0xF7,0x38,0xFB,0x38,0x09,0x05,0xD3,0x20,0x48,0x80,0x69, \ +0x00,0x09,0x01,0xD3,0x04,0xF0,0x4E,0xFA,0xB8,0x08,0x16,0xD3,0x1C,0x48,0x81, \ +0x69,0x89,0x08,0x12,0xD3,0x81,0x69,0x02,0x23,0x99,0x43,0x81,0x61,0x19,0x48, \ +0xC1,0x1D,0x49,0x31,0x89,0x79,0x05,0x29,0x08,0xD1,0x1B,0x49,0x49,0x79,0x03, \ +0x29,0x04,0xD1,0x70,0x30,0x81,0x78,0x08,0x23,0x19,0x43,0x81,0x70,0xB8,0x09, \ +0x05,0xD3,0x10,0x48,0x80,0x69,0x80,0x09,0x01,0xD3,0x07,0xF0,0x57,0xFA,0x14, \ +0x48,0x28,0x40,0x06,0xD0,0x13,0x48,0x00,0x21,0x04,0x70,0x79,0x20,0x40,0x05, \ +0x01,0x83,0x81,0x82,0xFF,0x20,0x02,0x30,0x28,0x40,0x06,0xD0,0x07,0xF0,0x8B, \ +0xFA,0x00,0x22,0x10,0x21,0x30,0x1C,0x01,0xF0,0xCC,0xFD,0xF0,0xBD,0x00,0x00, \ +0x10,0x10,0x00,0x00,0xD8,0x06,0x00,0x02,0x80,0x00,0x00,0x04,0x44,0x09,0x00, \ +0x02,0x38,0x07,0x00,0x02,0xC4,0x01,0x00,0x02,0xD0,0x01,0x00,0x02,0xF8,0x06, \ +0x00,0x02,0xB0,0x00,0x00,0x02,0x02,0x02,0x00,0x00,0xD4,0x01,0x00,0x02,0x04, \ +0x48,0x01,0x21,0x81,0x73,0x00,0x21,0xC1,0x75,0xC1,0x73,0xC1,0x76,0x01,0x77, \ +0xF7,0x46,0x00,0x00,0x44,0x09,0x00,0x02,0x80,0xB5,0x16,0x4F,0x00,0x20,0x38, \ +0x72,0x79,0x7A,0x02,0x20,0x01,0x29,0x1C,0xD0,0x04,0x29,0x19,0xD1,0x78,0x72, \ +0x08,0x20,0xFF,0xF7,0x08,0xFD,0x38,0x6B,0xF9,0x6A,0x40,0x1A,0x40,0x00,0x39, \ +0x6A,0x40,0x08,0x81,0x42,0x0D,0xD2,0x39,0x6A,0x41,0x1A,0x14,0x20,0x02,0xF0, \ +0x11,0xFB,0x79,0x7F,0x01,0x29,0x08,0xD1,0xB9,0x6A,0x81,0x42,0x02,0xD3,0xB9, \ +0x6A,0x08,0x1A,0xB8,0x62,0x80,0xBD,0x78,0x72,0x80,0xBD,0x79,0x6A,0x81,0x42, \ +0xF9,0xD3,0x79,0x6A,0x08,0x1A,0x78,0x62,0x80,0xBD,0x44,0x09,0x00,0x02,0x00, \ +0xB5,0x0A,0x48,0x01,0x21,0x01,0x72,0x01,0x7B,0x01,0x29,0x0D,0xD1,0xC1,0x7A, \ +0x00,0x29,0x0A,0xD1,0x81,0x7A,0x00,0x29,0x07,0xD1,0x41,0x7A,0x06,0x29,0x04, \ +0xD0,0x40,0x7A,0x05,0x28,0x01,0xD0,0xFF,0xF7,0x11,0xF9,0x00,0xBD,0x44,0x09, \ +0x00,0x02,0xB0,0xB5,0x20,0x4F,0x20,0x48,0x79,0x7D,0x80,0x7A,0x20,0x4C,0x81, \ +0x42,0x02,0xDA,0x78,0x7D,0x20,0x70,0x00,0xE0,0x20,0x70,0xFF,0xF7,0x18,0xFC, \ +0x20,0x78,0x03,0x28,0x03,0xD1,0x01,0x20,0xFD,0xF7,0x4C,0xFF,0x02,0xE0,0x00, \ +0x20,0xFD,0xF7,0x48,0xFF,0xB8,0x7A,0x17,0x4D,0x02,0x28,0x02,0xD1,0xC4,0x20, \ +0x28,0x70,0x04,0xE0,0xB8,0x7A,0x01,0x28,0x01,0xD1,0xD4,0x20,0x28,0x70,0x00, \ +0x20,0x68,0x70,0x69,0x88,0x11,0x48,0x00,0x29,0x07,0xD0,0x23,0x78,0x10,0x4A, \ +0x5B,0x00,0xC3,0x5A,0xD2,0x88,0xD2,0x18,0x89,0x1A,0x69,0x80,0x0A,0x21,0xF9, \ +0x65,0x21,0x78,0x49,0x00,0x40,0x5A,0x38,0x66,0x20,0x78,0x00,0xF0,0x2B,0xF8, \ +0x39,0x6E,0x08,0x1A,0x38,0x66,0xBD,0x65,0x01,0x20,0xB8,0x75,0xB0,0xBD,0x44, \ +0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x93,0x01,0x00,0x02,0x2C,0x09,0x00,0x02, \ +0xB0,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99, \ +0x42,0x01,0xD8,0x00,0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27, \ +0xBF,0x06,0x3D,0x69,0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08, \ +0x3F,0x3A,0x61,0x01,0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0x00,0x28,0x01,0xD1, \ +0xC0,0x20,0xF7,0x46,0x01,0x48,0x00,0x88,0xF7,0x46,0x00,0x00,0xAC,0x01,0x00, \ +0x02,0xF8,0xB5,0x3D,0x48,0x00,0x90,0x3D,0x48,0xC4,0x1D,0x49,0x34,0xC7,0x1D, \ +0x09,0x37,0x3B,0x4D,0x3C,0x4E,0x00,0xF0,0xFA,0xFC,0x00,0xF0,0xEA,0xF8,0x30, \ +0x78,0x00,0x28,0x04,0xD1,0xA0,0x79,0x05,0x28,0x01,0xD0,0x06,0xF0,0xEE,0xFC, \ +0x28,0x78,0x00,0x28,0xF0,0xD0,0xB8,0x7B,0x00,0x28,0xED,0xD1,0x33,0x48,0x01, \ +0x78,0x01,0x29,0x03,0xD1,0x00,0x21,0x01,0x70,0x04,0xF0,0xE0,0xF9,0x30,0x48, \ +0x00,0x78,0x02,0x28,0x46,0xD0,0x2F,0x48,0x00,0x78,0x02,0x28,0x01,0xD1,0x06, \ +0xF0,0x0C,0xFD,0x06,0xF0,0x2E,0xFE,0x28,0x4E,0x05,0x1C,0x30,0x78,0x01,0x28, \ +0x09,0xD1,0x03,0x03,0x9D,0x42,0x03,0xD1,0x28,0x49,0x10,0x20,0x09,0x68,0x08, \ +0x73,0x00,0x21,0xB9,0x73,0xC8,0xE7,0x00,0x2D,0x23,0xD0,0x01,0x23,0x1B,0x03, \ +0x9D,0x42,0x08,0xD0,0x23,0x48,0x80,0x21,0x02,0x68,0x11,0x70,0x02,0x68,0x00, \ +0x21,0x51,0x70,0x00,0x68,0x81,0x70,0xA0,0x79,0x05,0x28,0x0D,0xD1,0x00,0x98, \ +0x40,0x79,0x01,0x28,0x09,0xDD,0xC0,0x20,0x01,0xF0,0xF4,0xFD,0x06,0x1C,0x28, \ +0x1C,0x06,0xF0,0x0A,0xFD,0x30,0x1C,0x01,0xF0,0xED,0xFD,0x29,0x1C,0x00,0x22, \ +0x16,0x48,0x01,0xF0,0x8C,0xFC,0xA2,0xE7,0x00,0x98,0x40,0x79,0x01,0x28,0x9E, \ +0xDD,0xA0,0x79,0x05,0x28,0x9B,0xD1,0x00,0xF0,0x22,0xF8,0x98,0xE7,0x06,0xF0, \ +0xED,0xFD,0x01,0x23,0x1B,0x03,0x98,0x42,0x03,0xD1,0x0A,0x49,0x10,0x20,0x09, \ +0x68,0x08,0x73,0x00,0x21,0xB9,0x73,0x8B,0xE7,0xB0,0x00,0x00,0x02,0x44,0x09, \ +0x00,0x02,0x5E,0x02,0x00,0x02,0xB3,0x02,0x00,0x02,0xD5,0x01,0x00,0x02,0x53, \ +0x02,0x00,0x02,0x3C,0x01,0x00,0x02,0xC4,0x01,0x00,0x02,0xD0,0x01,0x00,0x02, \ +0x18,0x07,0x00,0x02,0x80,0xB5,0xC0,0x20,0x01,0xF0,0xB8,0xFD,0x07,0x1C,0x0D, \ +0x48,0x81,0x78,0x49,0x08,0x89,0x07,0x11,0xD1,0x81,0x78,0x09,0x09,0x0E,0xD3, \ +0x0A,0x49,0x09,0x68,0x09,0x7B,0x09,0x0A,0x09,0xD2,0xC1,0x78,0x00,0x29,0x04, \ +0xD0,0x00,0x21,0xC1,0x70,0x01,0x21,0x81,0x71,0x01,0xE0,0x06,0xF0,0xE8,0xFC, \ +0x38,0x1C,0x01,0xF0,0x9D,0xFD,0x80,0xBD,0xB4,0x09,0x00,0x02,0xC4,0x01,0x00, \ +0x02,0xB0,0xB5,0x1C,0x4C,0x01,0x20,0x1C,0x4D,0xA0,0x77,0x28,0x68,0x00,0xF0, \ +0xBE,0xFE,0x29,0x68,0x00,0x20,0x4F,0x68,0x88,0x73,0x18,0x49,0x8A,0x78,0x00, \ +0x2A,0x00,0xD1,0x48,0x70,0x38,0x78,0x08,0x28,0x19,0xD1,0x20,0x7D,0x01,0x28, \ +0x06,0xD1,0x06,0x22,0xF8,0x1D,0x09,0x30,0x12,0x49,0x02,0xF0,0x5F,0xF9,0x0F, \ +0xE0,0x20,0x7D,0x02,0x28,0x0C,0xD1,0x10,0x48,0x40,0x79,0x02,0x28,0x08,0xD1, \ +0xE0,0x1D,0x49,0x30,0x80,0x79,0x05,0x28,0x03,0xD1,0x78,0x78,0x10,0x23,0x18, \ +0x43,0x78,0x70,0xF8,0x1D,0x0F,0x30,0xFD,0xF7,0x22,0xFF,0x38,0x1C,0x06,0xF0, \ +0x27,0xFA,0x29,0x68,0x80,0x20,0x08,0x73,0x40,0x01,0xB0,0xBD,0x44,0x09,0x00, \ +0x02,0xC4,0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0xFC,0x00,0x00,0x02,0xB0,0x00, \ +0x00,0x02,0x00,0xB5,0x05,0x48,0x01,0x78,0x00,0x29,0x04,0xD0,0x40,0x78,0x00, \ +0x28,0x01,0xD1,0x04,0xF0,0xD6,0xF8,0x00,0xBD,0x00,0x00,0xC4,0x09,0x00,0x02, \ +0xF0,0xB5,0x2E,0x48,0x47,0x6E,0xFD,0xF7,0x9F,0xFD,0x01,0x02,0x2C,0x4C,0x09, \ +0x0A,0x2C,0x48,0x21,0x60,0x43,0x78,0x2C,0x4A,0x13,0x70,0x15,0x78,0x0D,0x23, \ +0x6B,0x43,0x1B,0x18,0x1B,0x7B,0x1B,0x06,0x0B,0x43,0x03,0x21,0x49,0x06,0x0B, \ +0x60,0x15,0x78,0x0D,0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7B,0x5D,0x7B,0x36,0x02, \ +0x35,0x43,0xDE,0x7B,0x1B,0x7C,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43,0x4B, \ +0x60,0xC3,0x1D,0x39,0x33,0x1B,0x78,0x02,0x2B,0x1D,0xD1,0x15,0x78,0x0D,0x23, \ +0x6B,0x43,0x1B,0x18,0x9E,0x7C,0x5D,0x7C,0x36,0x02,0x35,0x43,0xDE,0x7C,0x1B, \ +0x7D,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43,0x4B,0x61,0x15,0x78,0x0D,0x23, \ +0x6B,0x43,0x18,0x18,0x85,0x7D,0x43,0x7D,0x2D,0x02,0x2B,0x43,0xC5,0x7D,0x00, \ +0x7E,0x2D,0x04,0x2B,0x43,0x00,0x06,0x18,0x43,0x88,0x61,0x10,0x78,0x21,0x68, \ +0x0D,0x4A,0x80,0x07,0x01,0x43,0x21,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54,0x01, \ +0x30,0x18,0x28,0xFA,0xD3,0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90,0x76, \ +0x08,0x0E,0xD0,0x76,0xF0,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0xEC,0x01,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x90,0x01,0x00,0x02,0x58,0x07,0x00,0x02,0x80,0xB4, \ +0x11,0x4A,0x11,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x1E,0x29,0x00,0xD1,0x00, \ +0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7B,0x00,0x2F,0x11,0xD1, \ +0x11,0x80,0x0C,0x49,0x03,0x22,0x19,0x60,0xD9,0x1D,0x15,0x31,0x59,0x60,0x08, \ +0x39,0x99,0x60,0x00,0x21,0x19,0x73,0x99,0x73,0x9A,0x75,0x99,0x82,0x03,0x60, \ +0x40,0x21,0x01,0x73,0x18,0x1C,0x80,0xBC,0xF7,0x46,0xF0,0x01,0x00,0x02,0xA4, \ +0x06,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x00,0x00,0x80,0x80,0xB4,0x13,0x4A, \ +0x51,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x14,0x29,0x00,0xD1,0x00,0x21,0x10, \ +0x4F,0x10,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F,0x15,0xD1,0x51,0x80, \ +0x0E,0x49,0x01,0x22,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60,0x9A,0x81,0x00, \ +0x21,0x19,0x72,0x0A,0x4F,0xD9,0x73,0xBF,0x79,0x01,0x2F,0x01,0xD1,0xC2,0x73, \ +0x00,0xE0,0xC1,0x73,0x80,0x21,0x03,0x60,0x01,0x72,0x18,0x1C,0x80,0xBC,0xF7, \ +0x46,0xF0,0x01,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00,0x02,0x00,0x00, \ +0x00,0x80,0xB0,0x00,0x00,0x02,0x01,0x1C,0x00,0x68,0x02,0x08,0x01,0xD3,0x08, \ +0x1C,0xF7,0x46,0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09,0x08,0x02,0xD3, \ +0x40,0x21,0x01,0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x78,0x00,0x2A,0xF9, \ +0xD1,0x02,0x72,0x08,0x1C,0xF7,0x46,0x00,0x00,0x4B,0x02,0x00,0x02,0x0A,0x49, \ +0x01,0x20,0x48,0x63,0x00,0x20,0xCA,0x1D,0x39,0x32,0x88,0x63,0x50,0x83,0x10, \ +0x83,0x08,0x65,0xCB,0x1D,0x49,0x33,0x90,0x82,0x18,0x73,0x98,0x71,0x58,0x73, \ +0x90,0x81,0xD0,0x81,0x60,0x31,0xC8,0x72,0xF7,0x46,0x00,0x00,0xF4,0x01,0x00, \ +0x02,0x80,0xB5,0x07,0x27,0x7F,0x06,0xF8,0x69,0x40,0x23,0x18,0x43,0xF8,0x61, \ +0x14,0x48,0xFD,0xF7,0x8E,0xFC,0xF8,0x69,0x20,0x23,0x18,0x43,0xF8,0x61,0xF8, \ +0x69,0x1B,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xFF,0x21,0x91,0x31,0x01,0x30, \ +0x88,0x42,0xFC,0xD3,0xF8,0x69,0x0C,0x4B,0x18,0x40,0xF8,0x61,0x00,0x20,0x7D, \ +0x21,0x49,0x01,0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7,0xC4,0xFF,0xFD,0xF7, \ +0x7A,0xFC,0x00,0xF0,0x0E,0xF8,0x05,0x49,0x0D,0x20,0x00,0x06,0x01,0x81,0xFF, \ +0x21,0x41,0x31,0x81,0x80,0x80,0xBD,0x50,0xC3,0x00,0x00,0xFF,0xFD,0x00,0x00, \ +0xFF,0x0F,0x00,0x00,0x90,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70,0x0D,0x48,0x80, \ +0x27,0x07,0x73,0x01,0x23,0x03,0x72,0x82,0x22,0x02,0x71,0x07,0x22,0x02,0x70, \ +0x0A,0x48,0x05,0x24,0x04,0x73,0x86,0x24,0x04,0x72,0x02,0x71,0x08,0x48,0x24, \ +0x22,0x02,0x71,0x07,0x72,0x03,0x73,0x06,0x48,0x01,0x71,0x01,0x73,0x90,0xBC, \ +0xF7,0x46,0x00,0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00, \ +0x0D,0xC0,0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x25,0x48,0x01,0x27, \ +0x00,0x7B,0x24,0x4C,0x0A,0x28,0x1F,0xD1,0x24,0x49,0x24,0x4E,0x00,0x20,0x0B, \ +0x7B,0x02,0x1C,0x01,0x30,0x08,0x28,0xB3,0x54,0xF9,0xD1,0xF1,0x78,0xB0,0x78, \ +0xF2,0x79,0x09,0x02,0x08,0x43,0x05,0x1C,0x71,0x79,0x30,0x79,0x09,0x02,0x01, \ +0x43,0xB0,0x79,0x12,0x02,0x02,0x43,0x30,0x78,0x73,0x78,0x00,0x02,0x18,0x43, \ +0x05,0x28,0x08,0xD1,0x28,0x1C,0x00,0xF0,0x7E,0xFB,0x21,0xE0,0x16,0x49,0x00, \ +0x20,0x08,0x73,0x27,0x71,0xF0,0xBD,0x09,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0, \ +0x97,0xFB,0x16,0xE0,0x11,0x4B,0x98,0x42,0x04,0xD1,0xF1,0x78,0x10,0x1C,0x00, \ +0xF0,0xC7,0xFB,0x0E,0xE0,0x0E,0x4B,0x9B,0x7A,0x00,0x2B,0x05,0xD1,0x13,0x1C, \ +0x0A,0x1C,0x29,0x1C,0x00,0xF0,0x17,0xF8,0x04,0xE0,0x13,0x1C,0x0A,0x1C,0x29, \ +0x1C,0x04,0xF0,0x89,0xFC,0x27,0x71,0xF0,0xBD,0x00,0x00,0xF0,0x02,0x00,0x0D, \ +0xD0,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x30,0x02,0x00,0x02,0x70,0x03,0x00, \ +0x0D,0x06,0x80,0x00,0x00,0x54,0x02,0x00,0x02,0x80,0xB5,0x0F,0x1C,0x11,0x1C, \ +0x1A,0x1C,0x08,0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x12,0xF8,0x80, \ +0xBD,0x06,0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x2B,0xF8,0x80,0xBD, \ +0x03,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x33,0xC1,0x00,0x00,0x0E,0x40,0x00, \ +0x00,0x70,0x03,0x00,0x0D,0x0B,0x49,0x0C,0x48,0x4A,0x6B,0x03,0x2A,0x03,0xD1, \ +0x0B,0x4A,0x92,0x78,0x01,0x2A,0x02,0xD0,0x20,0x21,0x01,0x73,0xF7,0x46,0x80, \ +0x22,0x02,0x73,0x50,0x31,0xC9,0x7B,0x06,0x4A,0x10,0x23,0x11,0x73,0x01,0x7B, \ +0x19,0x43,0x01,0x73,0xF7,0x46,0x00,0x00,0xF4,0x01,0x00,0x02,0x70,0x03,0x00, \ +0x0D,0x30,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x13,0x4D,0x17,0x1C, \ +0xA8,0x78,0x12,0x4A,0x02,0x28,0x06,0xD1,0xE8,0x78,0x08,0x28,0x03,0xD1,0x10, \ +0x48,0x43,0x6B,0x03,0x2B,0x02,0xD0,0x20,0x20,0x10,0x73,0xF0,0xBD,0x00,0x26, \ +0x00,0x2F,0x0D,0x4C,0x04,0xD1,0x00,0xF0,0x03,0xFC,0x01,0x20,0xA0,0x72,0x04, \ +0xE0,0x00,0x29,0x01,0xD1,0x09,0x49,0x41,0x66,0x16,0x73,0x27,0x80,0x29,0x1C, \ +0x08,0x22,0x66,0x80,0x07,0x48,0x01,0xF0,0x18,0xFF,0x26,0x73,0xF0,0xBD,0x30, \ +0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xF4,0x01,0x00,0x02,0x54,0x02,0x00,0x02, \ +0x00,0x60,0x00,0x01,0x38,0x02,0x00,0x02,0x90,0xB5,0x17,0x49,0x08,0x78,0x4A, \ +0x78,0x00,0x02,0x10,0x43,0x05,0x28,0x15,0x4A,0x04,0xD1,0x89,0x78,0x50,0x6B, \ +0x00,0xF0,0x59,0xFB,0x90,0xBD,0x13,0x4B,0x01,0x27,0x98,0x42,0x11,0x4C,0x04, \ +0xD1,0xC8,0x78,0x00,0xF0,0x28,0xFA,0x27,0x71,0x90,0xBD,0x09,0x28,0x0A,0xD1, \ +0x0E,0x49,0x20,0x20,0x08,0x73,0x27,0x71,0x50,0x6B,0x03,0x28,0xEB,0xD1,0xD0, \ +0x1D,0x49,0x30,0x47,0x73,0x90,0xBD,0xD1,0x1D,0x59,0x31,0x89,0x7A,0x00,0x29, \ +0x02,0xD1,0x00,0xF0,0x0E,0xF8,0x90,0xBD,0x04,0xF0,0x19,0xFC,0x90,0xBD,0x30, \ +0x02,0x00,0x02,0xF4,0x01,0x00,0x02,0xD0,0x03,0x00,0x0D,0x06,0x80,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x08,0x4B,0x07,0x49,0x98,0x42,0x02,0xD1,0xE0,0x20,0x08, \ +0x73,0x04,0xE0,0x06,0x4B,0x98,0x42,0x01,0xD1,0x20,0x20,0x08,0x73,0x04,0x49, \ +0x01,0x20,0x08,0x71,0xF7,0x46,0x70,0x03,0x00,0x0D,0x33,0xC1,0x00,0x00,0x0E, \ +0x40,0x00,0x00,0xD0,0x03,0x00,0x0D,0x80,0xB5,0x11,0x48,0x11,0x4B,0x01,0x78, \ +0x42,0x78,0x09,0x02,0x11,0x43,0x0F,0x1C,0x9F,0x42,0x03,0xD1,0x80,0x78,0x00, \ +0xF0,0x2D,0xFA,0x05,0xE0,0x0C,0x49,0xE0,0x20,0x08,0x73,0x0C,0x49,0x01,0x20, \ +0x08,0x71,0x0B,0x4B,0x9F,0x42,0x0B,0xD1,0x0B,0x48,0x01,0x7B,0x02,0x29,0x07, \ +0xD1,0x03,0x21,0x01,0x73,0x09,0x48,0x00,0x22,0xC1,0x78,0x80,0x78,0x04,0xF0, \ +0xF5,0xF8,0x80,0xBD,0x30,0x02,0x00,0x02,0x0E,0x40,0x00,0x00,0x70,0x03,0x00, \ +0x0D,0xD0,0x03,0x00,0x0D,0x22,0xC1,0x00,0x00,0x54,0x02,0x00,0x02,0x38,0x02, \ +0x00,0x02,0x00,0xB5,0x0C,0x49,0x08,0x7B,0x02,0x09,0x05,0xD3,0x00,0x20,0x08, \ +0x73,0x0A,0x49,0x01,0x20,0x08,0x71,0x00,0xBD,0xC1,0x08,0x02,0xD3,0xFF,0xF7, \ +0xA3,0xFE,0x00,0xBD,0x41,0x08,0x02,0xD3,0xFF,0xF7,0x68,0xFF,0x00,0xBD,0x80, \ +0x08,0xF2,0xD3,0xFF,0xF7,0xB5,0xFF,0x00,0xBD,0x70,0x03,0x00,0x0D,0xD0,0x03, \ +0x00,0x0D,0xF0,0xB5,0x47,0x4E,0x30,0x79,0x80,0x08,0x4A,0xD3,0x46,0x4D,0x68, \ +0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x57,0xFE,0x44,0x48,0x04,0x79,0xC0,0x20, \ +0x01,0xF0,0x74,0xFA,0x01,0x1C,0x42,0x48,0x04,0x22,0x02,0x71,0x00,0x22,0x02, \ +0x71,0x08,0x1C,0x01,0xF0,0x6B,0xFA,0x3F,0x48,0xC7,0x1D,0x39,0x37,0x39,0x8B, \ +0x40,0x29,0x07,0xDA,0x39,0x8B,0x00,0x29,0x04,0xD0,0x39,0x8B,0x02,0x31,0x09, \ +0x04,0x09,0x0C,0x07,0xE0,0x40,0x2C,0x04,0xDA,0x39,0x8B,0x00,0x29,0x01,0xD1, \ +0x21,0x1C,0x00,0xE0,0x40,0x21,0x7A,0x8B,0x34,0x4C,0x52,0x18,0x19,0x23,0x9B, \ +0x01,0x9A,0x42,0x04,0xD9,0x00,0x22,0x7A,0x83,0x3A,0x83,0x01,0x22,0xA2,0x73, \ +0x2E,0x48,0x40,0x6F,0x80,0x68,0x7A,0x8B,0x80,0x18,0xCD,0x22,0x00,0xF0,0xA7, \ +0xFA,0x00,0x20,0x30,0x71,0x68,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x04,0xFE, \ +0xA1,0x7B,0x26,0x48,0x01,0x29,0x02,0xD1,0x00,0x20,0xA0,0x73,0xF0,0xBD,0x04, \ +0x1C,0x78,0x8B,0x00,0x28,0x28,0xD1,0x60,0x6F,0x81,0x8A,0xC0,0x7D,0x08,0x31, \ +0x08,0x18,0x38,0x83,0x60,0x6F,0x81,0x7D,0x45,0x68,0x03,0x29,0x01,0xDD,0x03, \ +0x21,0x81,0x75,0x38,0x8B,0x1C,0x4B,0x98,0x42,0x13,0xD8,0x38,0x8B,0x00,0x28, \ +0x10,0xD0,0xE8,0x1D,0x03,0x30,0x06,0x22,0x18,0x49,0x01,0xF0,0xDB,0xFD,0x00, \ +0x28,0x08,0xD1,0x28,0x78,0x08,0x28,0x09,0xD0,0x00,0x28,0x07,0xD0,0x20,0x28, \ +0x05,0xD0,0xB0,0x28,0x03,0xD0,0x00,0x20,0x78,0x83,0x38,0x83,0xF0,0xBD,0x38, \ +0x8B,0x40,0x28,0x06,0xDD,0x38,0x8B,0x40,0x38,0x38,0x83,0x78,0x8B,0x40,0x30, \ +0x78,0x83,0xF0,0xBD,0x00,0x20,0x38,0x83,0x78,0x83,0x60,0x6F,0xFF,0xF7,0x18, \ +0xFD,0x60,0x67,0xF0,0xBD,0x70,0x03,0x00,0x0D,0xB0,0x00,0x00,0x02,0xF0,0x02, \ +0x00,0x0D,0x60,0x02,0x00,0x0D,0xF4,0x01,0x00,0x02,0x54,0x02,0x00,0x02,0x32, \ +0x06,0x00,0x00,0x5C,0x00,0x00,0x02,0x90,0xB5,0x1C,0x4F,0x38,0x7A,0x40,0x08, \ +0x1D,0xD3,0x1B,0x48,0x40,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xB7,0xFD,0x00, \ +0x20,0x18,0x4B,0x38,0x72,0x3A,0x1C,0xDF,0x1D,0x49,0x37,0xBC,0x79,0xD9,0x1D, \ +0x39,0x31,0x01,0x2C,0x15,0xD1,0xB8,0x71,0xFB,0x79,0x00,0x2B,0x01,0xD1,0x10, \ +0x23,0x13,0x72,0x89,0x8A,0x01,0x24,0x00,0x29,0x03,0xD0,0x00,0xF0,0x1E,0xF8, \ +0xBC,0x71,0x90,0xBD,0x39,0x7B,0x01,0x29,0x02,0xD1,0x38,0x73,0xBC,0x71,0x90, \ +0xBD,0xB8,0x71,0x90,0xBD,0x18,0x65,0x88,0x82,0x38,0x73,0xB8,0x71,0x18,0x6F, \ +0x01,0x7A,0x10,0x29,0xEE,0xD1,0x1F,0x1C,0xFF,0xF7,0x36,0xFD,0x38,0x67,0x90, \ +0xBD,0x00,0x00,0x60,0x03,0x00,0x0D,0xB0,0x00,0x00,0x02,0xF4,0x01,0x00,0x02, \ +0xF0,0xB4,0x10,0x49,0xCA,0x1D,0x39,0x32,0x90,0x8A,0x40,0x28,0x01,0xDB,0x40, \ +0x20,0x00,0xE0,0x90,0x8A,0x00,0x23,0x00,0x28,0x0F,0x6D,0x0B,0xDD,0xCC,0x1D, \ +0x0A,0x4D,0x49,0x34,0xE6,0x79,0x00,0x2E,0x05,0xD1,0x3E,0x78,0x01,0x37,0x01, \ +0x33,0x83,0x42,0x2E,0x72,0xF6,0xDB,0x93,0x8A,0x1B,0x1A,0x93,0x82,0x0A,0x6D, \ +0x10,0x18,0x08,0x65,0xF0,0xBC,0xF7,0x46,0xF4,0x01,0x00,0x02,0x20,0x03,0x00, \ +0x0D,0xF0,0xB5,0x04,0x1C,0x17,0x48,0x0F,0x1C,0x40,0x79,0x01,0x28,0x01,0xDD, \ +0x00,0xF0,0x53,0xFD,0x15,0x48,0x00,0x25,0xC6,0x1D,0x39,0x36,0x04,0x65,0xB7, \ +0x82,0xC7,0x1D,0x49,0x37,0xBD,0x71,0x3D,0x73,0xB0,0x8A,0x80,0x06,0x80,0x0E, \ +0x01,0x24,0x00,0x28,0x00,0xD1,0x3C,0x73,0xFF,0xF7,0xBE,0xFF,0xF8,0x79,0x00, \ +0x28,0x02,0xD1,0x0B,0x49,0x10,0x20,0x08,0x72,0xB0,0x8A,0x00,0x28,0x03,0xD0, \ +0xFF,0xF7,0xB3,0xFF,0xBC,0x71,0xF0,0xBD,0x38,0x7B,0x00,0x28,0x02,0xD0,0x3D, \ +0x73,0xBC,0x71,0xF0,0xBD,0xBD,0x71,0xF0,0xBD,0x00,0x00,0xB0,0x00,0x00,0x02, \ +0xF4,0x01,0x00,0x02,0x60,0x03,0x00,0x0D,0xB0,0xB5,0x29,0x4D,0xEF,0x1D,0x49, \ +0x37,0x78,0x7B,0x00,0x28,0x39,0xD0,0x27,0x48,0x00,0x78,0x01,0x28,0x35,0xD1, \ +0x26,0x48,0x00,0x24,0x01,0x78,0x01,0x29,0x02,0xD1,0x04,0x70,0x06,0xF0,0xB7, \ +0xF8,0xF8,0x79,0x01,0x28,0x03,0xD1,0xFC,0x71,0xF8,0x7B,0x00,0xF0,0xA4,0xF9, \ +0xE8,0x1D,0x59,0x30,0xC0,0x7A,0x01,0x28,0x02,0xD1,0xF8,0x7B,0x00,0xF0,0x9C, \ +0xF9,0xF8,0x7B,0x02,0x28,0x1C,0xD0,0xFF,0xF7,0x9B,0xFE,0x28,0x6F,0x80,0x23, \ +0x01,0x7A,0x17,0x4F,0x19,0x40,0x0C,0xD0,0x10,0x21,0x01,0x72,0x28,0x6F,0x81, \ +0x89,0x0C,0x30,0x0C,0x31,0xFF,0xF7,0x93,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD, \ +0x00,0xF0,0xD6,0xFC,0xFF,0xF7,0x26,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00, \ +0xF0,0xCF,0xFC,0xB0,0xBD,0x0C,0x48,0x00,0x78,0x00,0x28,0xFA,0xD0,0x28,0x6F, \ +0x01,0x7A,0x10,0x29,0x05,0xD0,0x01,0x7A,0x80,0x29,0x02,0xD0,0x01,0x7A,0x40, \ +0x29,0xF0,0xD1,0xFF,0xF7,0x7D,0xFC,0x28,0x67,0xB0,0xBD,0xF4,0x01,0x00,0x02, \ +0xDF,0x01,0x00,0x02,0xDD,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0xDE,0x01,0x00, \ +0x02,0xB0,0xB4,0x21,0x4F,0x80,0x21,0x21,0x4A,0x39,0x73,0xD1,0x1D,0x59,0x31, \ +0x4C,0x88,0x0D,0x88,0xAC,0x42,0x17,0xD1,0x38,0x7B,0x40,0x23,0x03,0x40,0xE0, \ +0x20,0x00,0x2B,0x0F,0xD1,0x09,0x88,0x49,0x07,0x02,0xD0,0x38,0x73,0xB0,0xBC, \ +0xF7,0x46,0xD1,0x1D,0x49,0x31,0x89,0x7B,0x01,0x29,0x02,0xD1,0xD0,0x20,0x38, \ +0x73,0xF5,0xE7,0x38,0x73,0xF3,0xE7,0x38,0x73,0xF1,0xE7,0x4A,0x88,0x0C,0x88, \ +0xA2,0x42,0xED,0xDA,0x0A,0x88,0x4C,0x88,0x12,0x1B,0x08,0x2A,0x00,0xD9,0x08, \ +0x22,0x01,0x28,0x01,0xD1,0x0C,0x4B,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4B, \ +0x00,0x2A,0x08,0xD0,0x0A,0x48,0x4C,0x88,0x4D,0x88,0x01,0x34,0x4C,0x80,0x5C, \ +0x5D,0x01,0x3A,0x04,0x73,0xF7,0xD1,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73, \ +0xD0,0xE7,0x70,0x03,0x00,0x0D,0xF4,0x01,0x00,0x02,0xF4,0x01,0x00,0x02,0x06, \ +0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x00,0x24,0x01,0x28,0x1F,0x4D, \ +0x1F,0x4E,0x20,0x4F,0x0A,0xD1,0xFD,0xF7,0xE0,0xF9,0x30,0x7B,0x1E,0x49,0xC8, \ +0x73,0x00,0xF0,0x4C,0xF9,0x3C,0x73,0x01,0x20,0x28,0x71,0xF0,0xBD,0x79,0x88, \ +0x3B,0x88,0x1A,0x4A,0x99,0x42,0x1D,0xDA,0x39,0x88,0x7B,0x88,0xC9,0x1A,0x08, \ +0x29,0x00,0xD9,0x08,0x21,0x00,0x29,0x0A,0xD0,0x12,0x4E,0x53,0x6E,0x36,0x7B, \ +0x1E,0x70,0x01,0x33,0x53,0x66,0x7B,0x88,0x01,0x33,0x7B,0x80,0x01,0x39,0xF4, \ +0xD1,0x7A,0x88,0x3B,0x88,0x0F,0x49,0x9A,0x42,0x0D,0xD1,0x02,0x28,0x06,0xD1, \ +0x00,0xF0,0x27,0xF9,0x3C,0x73,0x08,0xE0,0x01,0x20,0x28,0x71,0xF0,0xBD,0x60, \ +0x20,0x08,0x73,0x01,0x20,0x38,0x73,0x00,0xE0,0x0C,0x73,0x01,0x20,0x28,0x71, \ +0xF0,0xBD,0x00,0x00,0xD0,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x54,0x02,0x00, \ +0x02,0x44,0x02,0x00,0x02,0xF4,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x00,0xB5, \ +0x7F,0x28,0x07,0xD8,0x00,0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C,0x4A,0x51, \ +0x6B,0x03,0x29,0x03,0xD1,0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0x01,0x29, \ +0x04,0xD1,0x00,0x28,0x08,0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02,0x29,0x03, \ +0xD1,0x00,0x28,0x01,0xD1,0x01,0x20,0x50,0x63,0x00,0xF0,0xEE,0xF8,0x00,0xBD, \ +0x00,0x00,0xF4,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00,0x29,0x09, \ +0xD1,0x00,0x2A,0x07,0xD1,0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1,0x14,0x49, \ +0x4A,0x6B,0x01,0x2A,0x03,0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80,0xBD,0x12, \ +0x4B,0x02,0x2A,0x09,0xD1,0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63,0x1F,0x7B, \ +0x1A,0x1C,0x02,0x23,0x3B,0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08,0xD1,0x00, \ +0x28,0x06,0xD1,0x02,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23,0x3B,0x40, \ +0x13,0x73,0x88,0x63,0x00,0x20,0x40,0x31,0x88,0x81,0xC8,0x81,0x00,0xF0,0xB8, \ +0xF8,0x80,0xBD,0x00,0x00,0xF4,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xE0,0x03, \ +0x00,0x0D,0x90,0xB5,0x15,0x4F,0xFA,0x1D,0x59,0x32,0x01,0x29,0x02,0xD1,0x12, \ +0x23,0x13,0x80,0x03,0xE0,0x20,0x23,0x02,0x29,0x09,0xD1,0x13,0x80,0x00,0x23, \ +0x50,0x37,0xBB,0x73,0x14,0x88,0xA0,0x42,0x05,0xD8,0xBB,0x73,0x10,0x80,0x0F, \ +0xE0,0x0B,0x48,0x03,0x73,0x90,0xBD,0x14,0x88,0xA0,0x42,0x09,0xD9,0x10,0x88, \ +0x40,0x07,0x01,0xD0,0xBB,0x73,0x04,0xE0,0x10,0x88,0x40,0x07,0x01,0xD1,0x01, \ +0x20,0xB8,0x73,0x53,0x80,0x08,0x1C,0xFF,0xF7,0xDE,0xFE,0x90,0xBD,0x00,0x00, \ +0xF4,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x0E,0x4F,0x0E,0x4A,0x01, \ +0x28,0x06,0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE,0x23,0x18,0x40,0x38,0x73, \ +0x08,0xE0,0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43,0x10,0x72,0x38,0x7B,0x01, \ +0x23,0x18,0x43,0x38,0x73,0x06,0x49,0x20,0x20,0x08,0x73,0x05,0x49,0x01,0x20, \ +0x08,0x71,0x80,0xBC,0xF7,0x46,0x00,0x00,0xE0,0x03,0x00,0x0D,0xC0,0x03,0x00, \ +0x0D,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x0D,0x23,0x1B,0x06,0x99,0x83, \ +0x05,0x49,0x0A,0x70,0x05,0x4A,0x10,0x60,0x02,0x20,0x08,0x72,0x08,0x7A,0x00, \ +0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00,0x20,0x00,0x00,0x0D,0x40,0x00,0x00,0x0D, \ +0x90,0xB5,0x1B,0x4C,0x07,0x1C,0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x5E, \ +0xFB,0x00,0x21,0x02,0x2F,0x17,0x48,0x18,0x4A,0x0F,0xD0,0x43,0x7B,0x02,0x2B, \ +0x03,0xD1,0x41,0x73,0x03,0xF0,0x49,0xFF,0x1A,0xE0,0x11,0x72,0x14,0x48,0x20, \ +0x22,0x02,0x70,0x01,0x70,0x13,0x49,0x86,0x20,0x08,0x72,0x11,0xE0,0x12,0x4B, \ +0x9B,0x7B,0x00,0x2B,0x0D,0xD1,0x17,0x7A,0x7B,0x09,0x0A,0xD2,0x10,0x23,0x13, \ +0x72,0xC1,0x72,0x0E,0x4A,0x01,0x20,0x10,0x70,0x0F,0x20,0x00,0x06,0x81,0x81, \ +0x0C,0x49,0x81,0x80,0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x1E,0xFB,0x03, \ +0xF0,0x80,0xFF,0x90,0xBD,0x00,0x00,0xB0,0x00,0x00,0x02,0x54,0x02,0x00,0x02, \ +0x60,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0x54,0x09,0x00, \ +0x02,0xDE,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x04,0x48,0x01,0x78,0x02,0x78, \ +0x91,0x42,0xFC,0xD0,0x03,0x49,0x60,0x20,0x08,0x73,0xF7,0x46,0x00,0x00,0xF0, \ +0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0xF0,0xB5,0x29,0x4E,0x30,0x78,0x00,0x28, \ +0x01,0xD1,0x00,0xF0,0xB1,0xFA,0x0D,0x24,0x24,0x06,0x27,0x89,0x40,0x20,0x25, \ +0x4D,0x38,0x40,0x08,0xD0,0x28,0x7A,0x00,0x28,0xFC,0xD1,0x23,0x48,0x00,0x7B, \ +0x40,0x08,0x01,0xD3,0xFF,0xF7,0x79,0xFC,0x78,0x0A,0x1F,0xD3,0xF8,0x43,0xFF, \ +0x23,0x01,0x33,0x18,0x43,0x20,0x81,0xFD,0xF7,0x61,0xF8,0x20,0x7B,0x00,0x09, \ +0xFC,0xD2,0x28,0x7A,0x00,0x28,0xFC,0xD1,0xFF,0xF7,0xF2,0xFA,0x18,0x49,0xC8, \ +0x1D,0x49,0x30,0xC2,0x79,0x02,0x2A,0x06,0xD0,0x01,0x22,0xC2,0x71,0xC8,0x1D, \ +0x00,0x23,0x59,0x30,0x43,0x73,0x01,0xE0,0x00,0x23,0xC3,0x71,0xFF,0xF7,0x93, \ +0xFA,0x11,0x49,0x08,0x78,0x01,0x28,0x10,0xD1,0xB8,0x08,0x0E,0xD3,0x0F,0x4A, \ +0x00,0x23,0x10,0x7A,0x13,0x72,0xFA,0x43,0x02,0x23,0x1A,0x43,0x22,0x81,0x09, \ +0x78,0x01,0x29,0x03,0xD1,0x00,0x04,0x00,0x0C,0x03,0xF0,0x90,0xFE,0x30,0x78, \ +0x00,0x28,0x01,0xD1,0x00,0xF0,0x89,0xFA,0xF0,0xBD,0x3D,0x01,0x00,0x02,0x20, \ +0x00,0x00,0x0D,0xD0,0x03,0x00,0x0D,0xF4,0x01,0x00,0x02,0x37,0x01,0x00,0x02, \ +0xE0,0x03,0x00,0x0D,0x90,0xB5,0x41,0x68,0x0A,0x78,0x08,0x2A,0x12,0xD1,0x8A, \ +0x7F,0xCB,0x7F,0x12,0x02,0x1A,0x43,0x15,0x4B,0x12,0x04,0x1F,0x88,0x12,0x0C, \ +0xBA,0x42,0x02,0xD0,0x5B,0x88,0x93,0x42,0x06,0xD1,0xC8,0x1D,0x11,0x30,0x06, \ +0x22,0x10,0x49,0x01,0xF0,0x9E,0xFA,0x90,0xBD,0x03,0x23,0x5B,0x02,0x9A,0x42, \ +0x06,0xDD,0xC8,0x1D,0x11,0x30,0x06,0x22,0x0B,0x49,0x01,0xF0,0x93,0xFA,0x90, \ +0xBD,0xCF,0x1D,0x01,0x37,0x47,0x60,0x18,0x32,0x82,0x82,0x08,0x4C,0x18,0x22, \ +0x20,0x1C,0x01,0xF0,0x88,0xFA,0x18,0x22,0x38,0x1C,0x21,0x1C,0x01,0xF0,0x83, \ +0xFA,0x90,0xBD,0x94,0x02,0x00,0x02,0x8E,0x02,0x00,0x02,0x88,0x02,0x00,0x02, \ +0x40,0x0A,0x00,0x02,0xF0,0xB5,0x00,0xF0,0x55,0xF9,0x71,0x4E,0xFF,0x21,0xF0, \ +0x1D,0x27,0x30,0x01,0x31,0x06,0x22,0x05,0x1C,0x00,0xF0,0xB3,0xF9,0x6D,0x4F, \ +0x12,0x22,0x03,0x21,0x38,0x1C,0x00,0xF0,0xAD,0xF9,0x0E,0x22,0xFF,0x21,0x30, \ +0x1C,0x41,0x31,0x00,0xF0,0xA7,0xF9,0xF0,0x1D,0x07,0x30,0x0E,0x22,0xFF,0x21, \ +0x51,0x31,0x00,0xF0,0xA0,0xF9,0xF0,0x1D,0x2E,0x30,0x0E,0x22,0xFF,0x21,0x61, \ +0x31,0x00,0xF0,0x99,0xF9,0xF0,0x1D,0x3C,0x30,0x0E,0x22,0xFF,0x21,0x71,0x31, \ +0x00,0xF0,0x92,0xF9,0xF0,0x1D,0x15,0x30,0x0E,0x22,0xFF,0x21,0x11,0x31,0x00, \ +0xF0,0x8B,0xF9,0xF0,0x1D,0x2D,0x30,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0xF0, \ +0x84,0xF9,0xF0,0x1D,0x4A,0x30,0x07,0x22,0xFF,0x21,0x81,0x31,0x00,0xF0,0x7D, \ +0xF9,0xF0,0x1D,0x51,0x30,0x03,0x22,0xFF,0x21,0x89,0x31,0x00,0xF0,0x76,0xF9, \ +0xF0,0x1D,0x55,0x30,0x04,0x22,0xFF,0x21,0x8D,0x31,0x00,0xF0,0x6F,0xF9,0x00, \ +0xF0,0x24,0xF9,0xF0,0x1D,0x23,0x30,0x04,0x22,0xF9,0x1D,0x01,0x31,0x01,0xF0, \ +0x21,0xFA,0xF4,0x1D,0x19,0x34,0xA0,0x7B,0xC0,0x07,0xC0,0x0F,0x00,0x27,0x00, \ +0x28,0x10,0xD1,0x44,0x4A,0x17,0x54,0x01,0x30,0x06,0x28,0xFB,0xD3,0x10,0x1C, \ +0x06,0x22,0x29,0x1C,0x01,0xF0,0xF1,0xF9,0x00,0x28,0x04,0xD0,0x29,0x1C,0x06, \ +0x22,0x3E,0x48,0x01,0xF0,0x08,0xFA,0xF0,0x1D,0x29,0x30,0x00,0x79,0x10,0x28, \ +0x0D,0xD0,0x20,0x28,0x0B,0xD0,0x31,0x28,0x09,0xD0,0x30,0x28,0x07,0xD0,0x32, \ +0x28,0x05,0xD0,0x40,0x28,0x03,0xD0,0x41,0x28,0x01,0xD0,0x50,0x28,0x01,0xD1, \ +0x34,0x49,0xC8,0x75,0xF0,0x1D,0x49,0x30,0x43,0x78,0x01,0x22,0x32,0x49,0x55, \ +0x2B,0x13,0xD1,0x83,0x78,0x53,0x2B,0x10,0xD1,0xC3,0x78,0x42,0x2B,0x0D,0xD1, \ +0x03,0x79,0x53,0x2B,0x0A,0xD1,0x43,0x79,0x55,0x2B,0x07,0xD1,0x83,0x79,0x53, \ +0x2B,0x04,0xD1,0xC3,0x79,0x50,0x2B,0x01,0xD1,0x0A,0x70,0x00,0xE0,0x0F,0x70, \ +0xFF,0x25,0x01,0x23,0x06,0x7A,0x5B,0x02,0x01,0x35,0x53,0x2E,0x24,0x49,0x08, \ +0xD1,0x46,0x7A,0x45,0x2E,0x05,0xD1,0x86,0x7A,0x4C,0x2E,0x02,0xD1,0xCD,0x61, \ +0x0B,0x62,0x01,0xE0,0xCB,0x61,0x0D,0x62,0x05,0x7B,0x1E,0x4B,0x48,0x2D,0x12, \ +0xD1,0x45,0x7B,0x57,0x2D,0x0F,0xD1,0x80,0x7B,0x31,0x28,0x02,0xD1,0x03,0x20, \ +0x98,0x70,0x0C,0xE0,0x32,0x28,0x02,0xD1,0x04,0x20,0x98,0x70,0x07,0xE0,0x33, \ +0x28,0x02,0xD1,0x05,0x20,0x98,0x70,0x12,0xE0,0x98,0x78,0x05,0x28,0x0F,0xDA, \ +0x40,0x20,0xC8,0x60,0x4F,0x76,0x98,0x78,0x02,0x28,0x01,0xD1,0x0F,0x76,0x02, \ +0xE0,0x0A,0x76,0x04,0x28,0x03,0xDA,0x20,0x79,0x00,0x28,0x00,0xD0,0x27,0x71, \ +0xF0,0xBD,0x20,0x20,0xC8,0x60,0x4A,0x76,0x0A,0x76,0xF0,0xBD,0x00,0x00,0x58, \ +0x0A,0x00,0x02,0xF4,0x01,0x00,0x02,0x00,0x72,0x01,0x02,0x5C,0x00,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x36,0x01,0x00,0x02,0x98,0x02,0x00,0x02,0x10,0x01,0x00, \ +0x02,0xB0,0xB5,0x2A,0x48,0x04,0x25,0x05,0x70,0x29,0x49,0x00,0x20,0x08,0x70, \ +0x29,0x49,0x02,0x24,0x0C,0x70,0x06,0x21,0x07,0x27,0x7F,0x06,0xB9,0x61,0x78, \ +0x61,0xF8,0x69,0xFB,0x0B,0x98,0x43,0xF8,0x61,0xF8,0x69,0x10,0x23,0x98,0x43, \ +0xF8,0x61,0xFF,0xF7,0xF1,0xFE,0x21,0x48,0x00,0x78,0x00,0x28,0x00,0xD1,0xBD, \ +0x61,0x1F,0x48,0x01,0x21,0xC1,0x76,0x1F,0x49,0xCA,0x69,0x0B,0x0C,0x1A,0x43, \ +0xCA,0x61,0xCA,0x69,0x1B,0x23,0x9A,0x43,0xCA,0x61,0xCA,0x69,0x04,0x23,0x9A, \ +0x43,0xCA,0x61,0xC2,0x68,0xCB,0x69,0xD2,0x43,0x1A,0x40,0xCA,0x61,0xC2,0x69, \ +0xCB,0x69,0xD2,0x43,0x1A,0x40,0xCA,0x61,0x02,0x6A,0xCB,0x69,0x1A,0x43,0xCA, \ +0x61,0xCA,0x69,0x0B,0x0C,0x9A,0x43,0xCA,0x61,0xB9,0x69,0x01,0x23,0x19,0x43, \ +0xB9,0x61,0x84,0x76,0x00,0xF0,0x39,0xF8,0x00,0xF0,0xA3,0xF8,0x0A,0x20,0xFC, \ +0xF7,0x9E,0xFD,0x00,0xF0,0x96,0xF8,0xFF,0xF7,0x02,0xF9,0xF8,0x69,0x01,0x23, \ +0xDB,0x03,0x18,0x43,0xF8,0x61,0xB0,0xBD,0x53,0x02,0x00,0x02,0x5E,0x02,0x00, \ +0x02,0x3B,0x01,0x00,0x02,0x36,0x01,0x00,0x02,0x98,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00,0x21,0x01,0x60,0x01, \ +0x21,0x41,0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69,0x01,0x23,0x5B,0x03, \ +0x1A,0x43,0xCA,0x61,0x04,0x49,0x01,0x63,0x04,0x49,0x41,0x63,0x81,0x63,0xC1, \ +0x63,0x01,0x69,0x80,0x68,0xF7,0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x01,0x02, \ +0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x02,0x4B,0x19,0x40,0xC1,0x61,0xF7, \ +0x46,0x00,0x00,0xFF,0xDF,0x00,0x00,0xF0,0xB5,0x0F,0x1C,0x00,0x21,0xF3,0x24, \ +0x24,0x05,0x00,0x28,0x08,0xD9,0x10,0x4D,0x6B,0x5C,0xE3,0x60,0x26,0x69,0xB3, \ +0x08,0xFC,0xD3,0x01,0x31,0x81,0x42,0xF7,0xD3,0xFF,0x20,0xE0,0x60,0xA1,0x68, \ +0x21,0x1C,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x0C,0x69,0xA3,0x08,0xFC, \ +0xD3,0xC8,0x60,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x3B,0x70,0x01,0x37, \ +0x01,0x3A,0xF3,0xD1,0x02,0x20,0xFC,0xF7,0x39,0xFD,0xF0,0xBD,0xA8,0x02,0x00, \ +0x02,0xF3,0x20,0x00,0x05,0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69,0x89,0x08, \ +0xFC,0xD3,0xFF,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81,0x68,0x01, \ +0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46,0x90,0xB5, \ +0x04,0x1C,0x48,0x09,0x08,0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02,0x43,0x08, \ +0x48,0x02,0x70,0x41,0x70,0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2,0x02,0x20, \ +0xFC,0xF7,0x0F,0xFD,0x02,0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xAA,0xFF,0x90, \ +0xBD,0x00,0x00,0xA8,0x02,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23, \ +0x5B,0x03,0x19,0x43,0xC1,0x61,0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F, \ +0x23,0x1B,0x04,0x99,0x43,0x41,0x60,0x41,0x68,0x19,0x43,0x41,0x60,0xF7,0x46, \ +0x00,0x00,0x80,0xB4,0x14,0x4B,0x5B,0x79,0x01,0x2B,0x0E,0xDD,0x17,0x1C,0x12, \ +0x4A,0x14,0xD1,0x02,0x2B,0x09,0xD1,0x00,0x29,0x07,0xD1,0x00,0x28,0x07,0xD1, \ +0x90,0x78,0x4B,0x1F,0x18,0x40,0x90,0x70,0x00,0x20,0x50,0x70,0x80,0xBC,0xF7, \ +0x46,0x90,0x78,0x04,0x23,0x18,0x43,0x90,0x70,0x01,0x20,0x50,0x70,0xF6,0xE7, \ +0x00,0x28,0x04,0xD1,0x90,0x78,0x02,0x23,0x98,0x43,0x90,0x70,0xEF,0xE7,0x90, \ +0x78,0x02,0x23,0x18,0x43,0x90,0x70,0xEA,0xE7,0x00,0x00,0xB0,0x00,0x00,0x02, \ +0xB4,0x09,0x00,0x02,0x90,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x15, \ +0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x01,0x28,0x01,0xD1,0x08,0x49, \ +0x08,0x70,0x08,0x4C,0x67,0x68,0xFC,0xF7,0xC6,0xFC,0x39,0x1A,0x49,0x01,0x09, \ +0x18,0x06,0x4A,0x61,0x60,0x51,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x50,0x63, \ +0x90,0xBD,0x00,0x00,0x3D,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00, \ +0x04,0x90,0xB5,0x0C,0x48,0x80,0x78,0x01,0x28,0x13,0xD1,0x0B,0x4F,0x7C,0x68, \ +0xFC,0xF7,0xAB,0xFC,0x21,0x1A,0x49,0x09,0x09,0x18,0x79,0x60,0x08,0x49,0x4A, \ +0x6B,0x12,0x1A,0x52,0x09,0x10,0x18,0x48,0x63,0x07,0x20,0x40,0x06,0xC1,0x69, \ +0x10,0x23,0x19,0x43,0xC1,0x61,0x90,0xBD,0xB4,0x09,0x00,0x02,0x80,0x00,0x00, \ +0x04,0x40,0x00,0x00,0x04,0x80,0xB5,0xC0,0x20,0x00,0xF0,0x32,0xFC,0x07,0x1C, \ +0x06,0x48,0x01,0x78,0x00,0x29,0x03,0xD0,0x00,0x21,0x01,0x70,0xFF,0xF7,0xD3, \ +0xFF,0x38,0x1C,0x00,0xF0,0x26,0xFC,0x80,0xBD,0x00,0x00,0x3D,0x01,0x00,0x02, \ +0x80,0xB5,0xC0,0x20,0x00,0xF0,0x1E,0xFC,0x07,0x1C,0x01,0x20,0xFF,0xF7,0xA0, \ +0xFF,0x38,0x1C,0x00,0xF0,0x17,0xFC,0x80,0xBD,0xF0,0xB4,0x13,0x4A,0x00,0x27, \ +0xD7,0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48,0x07, \ +0x70,0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05,0xD2, \ +0x5B,0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70,0x01, \ +0x31,0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01,0x30, \ +0x20,0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46,0x5C, \ +0x03,0x00,0x02,0xE0,0x0A,0x00,0x02,0xE0,0x0B,0x00,0x02,0x90,0xB5,0x0A,0x4F, \ +0x0A,0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7,0x2C, \ +0xFB,0x00,0xF0,0xEC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0xA2,0xFB, \ +0x00,0x20,0x38,0x60,0x00,0xF0,0xF3,0xFB,0x90,0xBD,0xC8,0x03,0x00,0x02,0xF0, \ +0xF0,0xF0,0xF0,0x38,0x04,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38,0x60, \ +0xFC,0xF7,0x14,0xFB,0x00,0xF0,0xD4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD,0x00, \ +0x00,0xF0,0xF0,0xF0,0xF0,0xC8,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F,0x00, \ +0x2D,0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D, \ +0xE9,0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20, \ +0x83,0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04, \ +0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9, \ +0x84,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00,0x9F, \ +0xE5,0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9, \ +0x4C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83, \ +0xE5,0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00, \ +0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x20, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2, \ +0x01,0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0xC8,0x03,0x00, \ +0x02,0xB8,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00,0xA0, \ +0x00,0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00, \ +0x50,0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F, \ +0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10, \ +0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0, \ +0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D, \ +0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00,0x00, \ +0x91,0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x78,0x01,0x00, \ +0xEA,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x75,0x01,0x00,0xEA,0x00,0xA0, \ +0x00,0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3, \ +0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD, \ +0xE8,0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02,0x10, \ +0x01,0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5,0x00, \ +0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40, \ +0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08, \ +0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20,0xA0, \ +0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0, \ +0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x40, \ +0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80, \ +0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00, \ +0x81,0xE5,0x37,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x33,0x01,0x00,0xEA,0xC8,0x03,0x00,0x02,0xB8,0x03,0x00,0x02, \ +0xB4,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0x40,0x04,0x00,0x02,0xD8,0x03,0x00, \ +0x02,0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C,0x60, \ +0xBC,0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29,0xFB, \ +0xD3,0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00,0x22, \ +0x00,0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A,0x02, \ +0x92,0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00,0xF0, \ +0x18,0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x3C,0x04,0x00,0x02,0x60, \ +0x0C,0x00,0x02,0xD1,0x42,0x00,0x00,0xE0,0x0C,0x00,0x02,0x53,0x79,0x73,0x74, \ +0x65,0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61,0x64, \ +0x00,0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A,0xAE, \ +0x4C,0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64,0x00, \ +0x21,0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA,0x06, \ +0xD2,0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61,0x79, \ +0x60,0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01,0x20, \ +0x90,0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7,0x0C, \ +0xC7,0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0x0B,0xFB,0xC0,0x20, \ +0x00,0xF0,0xC8,0xF9,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D,0x79, \ +0x31,0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3,0x1D, \ +0x79,0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60,0x8F, \ +0x60,0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29,0x68, \ +0x01,0x31,0x29,0x60,0x00,0xF0,0xA8,0xF9,0x00,0x2C,0x07,0xD0,0x38,0x1C,0x00, \ +0xF0,0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0,0x20, \ +0x00,0xF0,0x9B,0xF9,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x96,0xF9,0x00, \ +0x20,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x2D,0x44,0x00,0x00,0x6D,0x44,0x00,0x00, \ +0x44,0x52,0x48,0x54,0xC0,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0xD8,0x03,0x00, \ +0x02,0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12,0xC0, \ +0x12,0xC0,0xC0,0x20,0x00,0xF0,0x7B,0xF9,0x0C,0x49,0x0C,0x4B,0x39,0x60,0x19, \ +0x68,0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19,0x68, \ +0xB9,0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68,0x01, \ +0x32,0x0A,0x60,0x00,0xF0,0x65,0xF9,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E,0x44, \ +0x56,0x44,0x6C,0x04,0x00,0x02,0x70,0x04,0x00,0x02,0xF0,0xB5,0x85,0xB0,0x07, \ +0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0x00,0xF0,0x54,0xF9,0xA9,0x08,0x03,0xD3, \ +0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60,0x3C, \ +0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35,0xD1, \ +0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C,0x1E, \ +0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3,0x6F, \ +0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43,0xBA, \ +0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0x00,0xF0,0x23,0xF9,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30,0x00, \ +0xF0,0x34,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x90,0xFA, \ +0x00,0x28,0x01,0xD0,0x00,0xF0,0xF6,0xFA,0x30,0x1C,0x9B,0xE0,0x00,0xF0,0x0E, \ +0xF9,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02,0x93, \ +0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0x00,0xF0,0x01,0xF9,0xC0,0x20,0x00, \ +0xF0,0xFE,0xF8,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03,0x9C, \ +0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C,0x46, \ +0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06,0xD3, \ +0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0,0xA1, \ +0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22,0x6F, \ +0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08,0x03, \ +0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02,0xD1, \ +0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91,0x63, \ +0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79,0x61, \ +0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C,0x26, \ +0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14,0x1C, \ +0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0x00,0xF0,0xA5, \ +0xF8,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00,0x28, \ +0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xB1,0xFA,0x00,0xE0,0xEC,0x64,0xC0, \ +0x20,0x00,0xF0,0x94,0xF8,0x31,0x68,0x01,0x31,0x31,0x60,0x00,0xF0,0x8F,0xF8, \ +0x28,0x1C,0x00,0xF0,0x06,0xFA,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0x00,0xF0,0x87, \ +0xF8,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0x00,0xF0,0x81,0xF8,0x0C,0x48, \ +0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68,0x00, \ +0x28,0x01,0xD1,0x00,0xF0,0x59,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79,0x69, \ +0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7,0xD8, \ +0x03,0x00,0x02,0xB8,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0xC8,0x03,0x00,0x02, \ +0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0x00,0xF0,0x5B,0xF8,0x02, \ +0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02,0xE0, \ +0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60,0x02, \ +0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07,0x24, \ +0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67,0xE5, \ +0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39,0x69, \ +0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69,0x49, \ +0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64,0x67, \ +0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63,0x0E, \ +0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0x00,0xF0,0x18,0xF8,0x01,0x23, \ +0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x97,0xFA,0x20,0x1C,0x00, \ +0xF0,0xCE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0x00,0xF0,0x09,0xF8,0x20,0x1C, \ +0xF9,0xE7,0x00,0x00,0xB8,0x03,0x00,0x02,0xA1,0x46,0x00,0x00,0xD8,0x03,0x00, \ +0x02,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1,0x3F,0x20,0xA0,0xE3,0x02,0x10, \ +0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21,0xE1,0x02,0x00,0xC3,0xE1,0x1E, \ +0xFF,0x2F,0xE1,0x00,0xB5,0xFF,0xF7,0xD7,0xFB,0xFF,0xF7,0xB1,0xFD,0x00,0xF0, \ +0x8F,0xFB,0x00,0xF0,0x95,0xFB,0x00,0xF0,0xF5,0xF9,0x00,0xF0,0x99,0xFB,0x00, \ +0xF0,0x9F,0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0, \ +0x21,0xE1,0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC, \ +0xFF,0xFF,0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5, \ +0x00,0x00,0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82, \ +0xE2,0x04,0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30, \ +0x82,0xE5,0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0, \ +0x80,0xFD,0x08,0xFF,0xDF,0xFD,0xE8,0xBC,0x03,0x00,0x02,0xB8,0x03,0x00,0x02, \ +0x40,0x04,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02, \ +0xB0,0xF0,0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0xFF,0xF7,0x9F,0xFF, \ +0x4A,0x4D,0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A, \ +0x61,0x29,0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68, \ +0x91,0x42,0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0xFF, \ +0xF7,0x87,0xFF,0xC0,0x20,0xFF,0xF7,0x84,0xFF,0x01,0x99,0x00,0x29,0x5C,0xD0, \ +0x01,0x9C,0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A, \ +0x61,0x21,0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69, \ +0x01,0x91,0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04, \ +0xE0,0x27,0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61, \ +0x24,0x61,0x00,0xE0,0xA6,0x61,0xFF,0xF7,0x5E,0xFF,0x00,0x2D,0x02,0xD0,0x38, \ +0x1C,0x00,0xF0,0xFC,0xFB,0xC0,0x20,0xFF,0xF7,0x56,0xFF,0xA2,0x69,0x69,0x46, \ +0x8A,0x42,0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01, \ +0x39,0x20,0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42, \ +0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89, \ +0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69, \ +0x62,0x61,0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1, \ +0x61,0x64,0x61,0x0C,0x60,0xFF,0xF7,0x2A,0xFF,0xC0,0x20,0xFF,0xF7,0x27,0xFF, \ +0x01,0x99,0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E, \ +0x4C,0x03,0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A, \ +0x11,0x68,0x01,0x31,0x11,0x60,0xFF,0xF7,0x13,0xFF,0x20,0x68,0x00,0xF0,0xD0, \ +0xF9,0x6C,0xE7,0xFF,0xF7,0x0D,0xFF,0x69,0xE7,0x4D,0x49,0x54,0x41,0x50,0x04, \ +0x00,0x02,0x4C,0x04,0x00,0x02,0x48,0x04,0x00,0x02,0x54,0x04,0x00,0x02,0xB8, \ +0x03,0x00,0x02,0xD8,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28, \ +0x0C,0xD1,0xC0,0x20,0xFF,0xF7,0xF6,0xFE,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0xFF,0xF7,0xF0,0xFE,0x38,0x1C,0x00,0xF0,0x67,0xF8,0x90,0xBD,0xC0,0x20, \ +0xFF,0xF7,0xE9,0xFE,0xBC,0x6E,0xFF,0xF7,0xE6,0xFE,0x00,0x2C,0xF6,0xD0,0x38, \ +0x1C,0x00,0xF0,0x83,0xFB,0x90,0xBD,0xD8,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F, \ +0x39,0x68,0x88,0x6C,0x49,0x6C,0x00,0xF0,0x76,0xFB,0xC0,0x20,0xFF,0xF7,0xD4, \ +0xFE,0x3A,0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68, \ +0x01,0x32,0x0A,0x60,0xFF,0xF7,0xC9,0xFE,0x38,0x68,0x00,0xF0,0x86,0xF9,0x80, \ +0xBD,0x00,0x00,0xB8,0x03,0x00,0x02,0xD8,0x03,0x00,0x02,0x00,0xA3,0x18,0x47, \ +0x10,0x20,0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0, \ +0xE3,0x00,0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30, \ +0xA0,0xE3,0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14, \ +0x30,0x82,0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5, \ +0x24,0x30,0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90, \ +0xE5,0x30,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30, \ +0x82,0xE5,0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08, \ +0x20,0x80,0xE5,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0,0x20, \ +0xFF,0xF7,0x80,0xFE,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xBA, \ +0x6B,0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02,0x2A, \ +0x37,0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B,0x00, \ +0x2A,0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9,0x6A, \ +0x1D,0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62,0x57, \ +0x62,0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17,0x4A, \ +0x3B,0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A,0x02, \ +0xD1,0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19,0x60, \ +0xD3,0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49,0x12, \ +0x6C,0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0x38,0xFE,0x0B,0x48, \ +0x00,0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28,0x00, \ +0xD1,0x01,0x24,0x20,0x1C,0xF0,0xBD,0xD8,0x03,0x00,0x02,0xBC,0x03,0x00,0x02, \ +0xE0,0x0B,0x00,0x02,0xCC,0x03,0x00,0x02,0xD4,0x03,0x00,0x02,0xD0,0x03,0x00, \ +0x02,0xB8,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00,0x00, \ +0xA0,0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9,0xD3, \ +0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93,0xE5, \ +0x28,0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40,0xA0, \ +0xE3,0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00,0x40, \ +0x82,0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x0A,0xFF,0xFF,0xEA,0xB8, \ +0x03,0x00,0x02,0x40,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60, \ +0xF7,0x46,0x00,0x00,0x6C,0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20,0xFF, \ +0xF7,0xE3,0xFD,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA,0x42, \ +0x04,0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69,0x51, \ +0x61,0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04,0xD1, \ +0x3A,0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7,0xC6, \ +0xFD,0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF,0xF7, \ +0xBE,0xFD,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68,0x1C, \ +0x4B,0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01,0xD1, \ +0x25,0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F,0x7A, \ +0x6F,0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10,0xD1, \ +0xFA,0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF, \ +0xF7,0x98,0xFD,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF,0xF7, \ +0x75,0xFF,0x01,0xE0,0xFF,0xF7,0x8E,0xFD,0x78,0x6E,0x00,0x28,0x04,0xD0,0xF8, \ +0x1D,0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF,0xF7, \ +0x82,0xFD,0xFF,0xF7,0x80,0xFD,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0xD8, \ +0x03,0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x75,0xFD,0x39,0x68, \ +0x00,0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9,0x1F, \ +0x21,0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10,0x4A, \ +0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A,0x89, \ +0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A,0x61, \ +0x0A,0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61,0x03, \ +0xE0,0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0x48,0xFD,0x00,0x20, \ +0x80,0xBD,0x50,0x04,0x00,0x02,0x4C,0x04,0x00,0x02,0x48,0x04,0x00,0x02,0xF0, \ +0xB5,0x05,0x1C,0xC0,0x20,0xFF,0xF7,0x3B,0xFD,0x67,0x49,0x67,0x4C,0x0A,0x68, \ +0x67,0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26,0xAE, \ +0x63,0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29,0x6A, \ +0x6B,0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1,0x2B, \ +0x6A,0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3,0x43, \ +0x0B,0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68,0x9B, \ +0x00,0xD2,0x58,0x0A,0x60,0xFF,0xF7,0x0E,0xFD,0x55,0x49,0x38,0x68,0x09,0x68, \ +0x88,0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26,0x4E, \ +0x4B,0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33,0x40, \ +0x13,0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43,0x48, \ +0x4E,0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14,0xE0, \ +0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32,0x0C, \ +0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10,0x32, \ +0x04,0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B,0x1A, \ +0x60,0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32,0x68, \ +0x36,0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A,0x42, \ +0xD0,0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0xC1,0xFC,0xC0,0x20, \ +0xFF,0xF7,0xBE,0xFC,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60,0x2A, \ +0x49,0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E,0xE0, \ +0x28,0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7,0xA9, \ +0xFC,0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0, \ +0x22,0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E,0x03, \ +0xD0,0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59,0x5C, \ +0x18,0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68,0xB3, \ +0x42,0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19,0x60, \ +0xFF,0xF7,0x82,0xFC,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0,0x20, \ +0x68,0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0x77,0xFC,0x0A,0x49,0x38,0x68, \ +0x09,0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7,0x51, \ +0xFE,0xF0,0xBD,0xD8,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xB8,0x03,0x00,0x02, \ +0xE0,0x0B,0x00,0x02,0xD0,0x03,0x00,0x02,0xBC,0x03,0x00,0x02,0xD4,0x03,0x00, \ +0x02,0xCC,0x03,0x00,0x02,0xE0,0x0A,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60, \ +0x41,0x60,0xF7,0x46,0x00,0x00,0x74,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01, \ +0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x7C,0x04,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x84,0x04,0x00,0x02,0x02,0x48,0x00, \ +0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x8C,0x04,0x00,0x02,0xBC,0x46, \ +0x03,0x1C,0x08,0x43,0x80,0x07,0x13,0xD1,0x12,0x1F,0x05,0xD3,0x01,0xCB,0x80, \ +0xC9,0xC0,0x1B,0x04,0xD1,0x12,0x1F,0xF9,0xD2,0xD2,0x1C,0x0C,0xD3,0x02,0xE0, \ +0x1B,0x1F,0x09,0x1F,0xD2,0x1C,0x18,0x78,0x0F,0x78,0xC0,0x1B,0x04,0xD1,0x5B, \ +0x1C,0x49,0x1C,0x52,0x1E,0xF7,0xD2,0x00,0x20,0x67,0x46,0xF7,0x46,0x43,0x1A, \ +0x93,0x42,0x30,0xD3,0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B, \ +0x78,0x03,0x70,0x40,0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1, \ +0x10,0x3A,0x05,0xD3,0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0, \ +0xBC,0x0C,0x32,0x0F,0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0, \ +0x08,0xC9,0x03,0x70,0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3, \ +0x70,0x00,0x1D,0x12,0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70, \ +0x49,0x1C,0x40,0x1C,0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B, \ +0x43,0x13,0x43,0x9B,0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1, \ +0xF7,0x46,0x52,0x1E,0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x4B, \ +0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0x79,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9, \ +0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91, \ +0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46, \ +0x00,0x00,0xCB,0x17,0x59,0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C, \ +0xB4,0x4B,0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0x5A,0xF8,0x52,0x00,0x9A,0x42, \ +0xFC,0xD9,0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52, \ +0x08,0x91,0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C, \ +0x0C,0xBC,0x5A,0x40,0x50,0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0xB0, \ +0xB5,0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75,0x39,0x9C,0x00, \ +0x0C,0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36,0x23,0x00,0x29,0x01, \ +0x66,0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0,0x51,0x1E,0x41,0x66, \ +0x00,0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x10,0x05,0x00,0x02,0x80, \ +0xB5,0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66,0x07,0x4A,0x00,0x21, \ +0x03,0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01,0x31,0x58,0x43,0x05, \ +0x4B,0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x10,0x05,0x00,0x02,0x94,0x04, \ +0x00,0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0x00,0x47,0x08,0x47,0x10, \ +0x47,0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00,0x00, \ +0x2C,0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00,0x8F, \ +0xE2,0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0x6F,0xED, \ +0xFF,0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62,0x79, \ +0x20,0x7A,0x65,0x72,0x6F,0x00,0x00,0x78,0x05,0x00,0x02,0x78,0x47,0x00,0x00, \ +0x01,0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04,0x00, \ +0xE2,0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78, \ +0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50,0xE3, \ +0x01,0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00, \ +0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0, \ +0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00, \ +0x00,0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3, \ +0x00,0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77,0x6E, \ +0x20,0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42,0x72, \ +0x61,0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A,0x65, \ +0x72,0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65, \ +0x64,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00, \ +0x00,0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20, \ +0x53,0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63,0x68, \ +0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61,0x74, \ +0x61,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41,0x64, \ +0x64,0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F,0x6E, \ +0x00,0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65, \ +0x64,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00,0x02, \ +0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73,0x74, \ +0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00,0x44, \ +0x4C,0x00,0x00,0x5C,0x4C,0x00,0x00,0x78,0x4C,0x00,0x00,0x98,0x4C,0x00,0x00, \ +0xAC,0x4C,0x00,0x00,0xBC,0x4C,0x00,0x00,0xD4,0x4C,0x00,0x00,0xEC,0x4C,0x00, \ +0x00,0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0x07,0xED,0xFF,0xEA,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5,0x01, \ +0x20,0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00,0xEB, \ +0x44,0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10,0x81, \ +0xE0,0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04,0x30, \ +0x90,0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x00, \ +0x20,0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF,0x3A, \ +0x0E,0xF0,0xA0,0xE1,0xE8,0x4D,0x00,0x00,0x00,0x00,0x00,0x02,0xB8,0x05,0x00, \ +0x00,0xB8,0x07,0x00,0x00,0xB8,0x05,0x00,0x02,0x00,0x00,0x00,0x00,0x14,0x00, \ +0x0A,0x00,0x90,0x00,0x30,0x00,0x08,0x06,0x07,0x00,0x82,0x84,0x8B,0x96,0x09, \ +0x04,0x02,0x41,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0xAC,0x6C, \ +0x32,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x64,0x00,0x30,0x75,0x64,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04, \ +0x03,0x00,0x04,0xAC,0x6C,0x32,0x70,0x55,0x4E,0x48,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00,0x00,0x00, \ +0x45,0x55,0x00,0x00,0x00,0x00,0x00,0xFA,0x00,0x00,0x00,0xFA,0x00,0x00,0x2A, \ +0x09,0x2A,0x09,0x1F,0x00,0xFF,0x00,0x08,0x08,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x02,0x00,0x41,0x54,0x4D,0x45,0x4C,0x5F,0x41,0x50,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x05, \ +0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x64,0x02,0x10, \ +0x1E,0x1E,0x1E,0x1E,0x00,0x00,0x28,0x28,0x28,0x00,0x00,0x32,0x3C,0x46,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x01, \ +0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, \ +0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x00,0x01, \ +0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x00,0x01,0x01,0x00,0x00,0x01,0x01,0x00, \ +0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x08,0x10,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xCC,0x01,0x00,0x02,0x00,0x00,0x00, \ +0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12, \ +0x01,0x10,0x01,0xFE,0x01,0x00,0x08,0xEB,0x03,0x05,0x76,0x00,0x01,0x00,0x00, \ +0x00,0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA,0x09,0x04,0x00,0x00, \ +0x02,0xFF,0x00,0xFF,0x00,0x07,0x05,0x85,0x02,0x40,0x00,0x00,0x07,0x05,0x02, \ +0x02,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF, \ +0x07,0xFF,0x07,0xFF,0x1F,0x00,0x06,0x00,0x1E,0x00,0x20,0xFF,0x3F,0xFC,0x01, \ +0x01,0x01,0x01,0x0A,0x0A,0x0E,0x01,0x03,0x00,0x00,0x00,0x00,0xAA,0xAA,0x03, \ +0x00,0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81,0xF3,0x80,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x58, \ +0x00,0x00,0x00,0xD8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00, \ +0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00, \ +0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04, \ +0x00,0x00,0x00,0xF6,0x07,0x00,0x00,0xFB,0x07,0x00,0x00,0x00,0x08,0x00,0x00, \ +0x05,0x08,0x00,0x00,0x0A,0x08,0x00,0x00,0x0F,0x08,0x00,0x00,0x14,0x08,0x00, \ +0x00,0x19,0x08,0x00,0x00,0x1E,0x08,0x00,0x00,0x23,0x08,0x00,0x00,0x28,0x08, \ +0x00,0x00,0x2D,0x08,0x00,0x00,0x32,0x08,0x00,0x00,0x3E,0x08,0x00,0x00,0x43, \ +0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x63,0x29,0x20,0x31,0x39, \ +0x39,0x36,0x2D,0x32,0x30,0x30,0x30,0x20,0x45,0x78,0x70,0x72,0x65,0x73,0x73, \ +0x20,0x4C,0x6F,0x67,0x69,0x63,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2A,0x20,0x54, \ +0x68,0x72,0x65,0x61,0x64,0x58,0x20,0x54,0x48,0x55,0x4D,0x42,0x2D,0x46,0x2F, \ +0x41,0x52,0x4D,0x20,0x56,0x65,0x72,0x73,0x69,0x6F,0x6E,0x20,0x47,0x33,0x2E, \ +0x30,0x66,0x2E,0x33,0x2E,0x30,0x62,0x20,0x2A,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x47,0x2D,0x47,0x42,0x2D,0x47,0x4C,0x2D, \ +0x4D,0x2D,0x44,0x2D,0x44,0x4C,0x2D,0x4B,0x4D,0x4C,0x2D,0x43,0x4D,0x52,0x2D, \ +0x48,0x4D,0x52,0x2D,0x4D,0x4C,0x32,0x2D,0x47,0x5A,0x2D,0x4B,0x48,0x32,0x2D, \ +0x43,0x4D,0x2D,0x52,0x50,0x2D,0x54,0x43,0x2D,0x4E,0x48,0x2D,0x54,0x44,0x2D, \ +0x41,0x50,0x2D,0x48,0x41,0x2D,0x47,0x46,0x2D,0x44,0x44,0x2D,0x41,0x54,0x2D, \ +0x4D,0x46,0x2D,0x4D,0x53,0x2D,0x44,0x57,0x2D,0x55,0x53,0x41,0x2D,0x43,0x41, \ +0x2D,0x53,0x44,0x2D,0x53,0x44,0x53,0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, \ +0x85,0x8E,0xD7,0x66,0x09,0x8C,0xD3,0xD5,0xF5,0xD8,0x09,0x0A,0xFB,0x87,0x1F, \ +0xBF,0x67,0xF7,0x8D,0xCB,0x69,0x07,0xF7,0xBD,0x34,0x12,0x3D,0x50,0xC8,0x84, \ +0x4F,0x7F,0xA3,0x02,0xDE,0x61,0xAE,0x8D,0x40,0xA7,0xE8,0xBD,0x24,0x7A,0xEA, \ +0xA2,0x15,0x51,0x57,0x2E,0xE6,0xBB,0xFF,0x7F,0xD5,0xF6,0x7A,0x83,0x2A,0x63, \ +0x77,0x1D,0x86,0x13,0x7C,0x2E,0x9F,0xE1,0x05,0x57,0x5F,0x69,0x2E,0x6B,0x93, \ +0x87,0x6E,0x9A,0xA1,0x50,0x94,0x0E,0x8B,0x72,0xAE,0x55,0xCC,0xC5,0xB1,0x8A, \ +0x0A,0xB1,0xD7,0x72,0x6F,0x85,0x17,0x5C,0x22,0xD0,0xA3,0xFD,0xC4,0x51,0x61, \ +0x98,0xED,0x89,0x9F,0x82,0xDB,0xF1,0x9D,0xC5,0xFB,0xBC,0x89,0xC1,0xEE,0x83, \ +0x59,0xB1,0x59,0x63,0x30,0x5C,0x50,0xCC,0xC9,0x5A,0xBC,0x9C,0xF9,0x30,0xE2, \ +0x2F,0x42,0x5E,0xF6,0x39,0xD2,0x7B,0x15,0x75,0xFB,0x58,0xC1,0x40,0x3E,0x9A, \ +0xEB,0x27,0xD9,0xA2,0x82,0xC5,0xC2,0xD6,0x69,0x05,0xB3,0x30,0x8E,0xED,0xD2, \ +0xDD,0x83,0x10,0x41,0xA4,0x1D,0x1F,0x15,0xE2,0x60,0x56,0xC5,0x2F,0xF3,0x04, \ +0x99,0xEF,0x8E,0xE1,0x08,0x32,0x59,0x4A,0x4C,0xED,0x7B,0x5B,0x40,0xFC,0x02, \ +0x81,0xD9,0x41,0x53,0x51,0xFA,0x3D,0xFF,0xAC,0xB5,0x6C,0x09,0x6D,0x1D,0xCC, \ +0xB3,0x2B,0xFF,0x15,0x3D,0x25,0x17,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00 \ +}; + +#define FW_503RFMD_EXTERNAL { \ +0x80,0xB5,0x10,0x49,0x00,0x20,0x08,0x70,0x0F,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0x00,0xF0,0xEB,0xFC, \ +0x80,0xBD,0x08,0x21,0x0B,0x20,0x00,0xF0,0xE6,0xFC,0xC0,0x20,0xFE,0xF7,0xFD, \ +0xF8,0x07,0x1C,0x00,0xF0,0xA6,0xFA,0x38,0x1C,0xFE,0xF7,0xF7,0xF8,0x01,0x21, \ +0x0B,0x20,0x00,0xF0,0xD9,0xFC,0x80,0xBD,0x94,0x01,0x00,0x02,0xC4,0x09,0x00, \ +0x02,0xB0,0xB5,0x27,0x4C,0x20,0x78,0x0A,0x28,0x40,0xD2,0x02,0xA3,0x1B,0x5C, \ +0x5B,0x00,0x9F,0x44,0x00,0x1C,0x3C,0x05,0x09,0x0D,0x3C,0x11,0x15,0x19,0x3C, \ +0x1D,0xA0,0x78,0x20,0x49,0x45,0x18,0x16,0xE0,0xA0,0x78,0x1F,0x49,0x45,0x18, \ +0x12,0xE0,0xA0,0x78,0x1E,0x49,0x45,0x18,0x0E,0xE0,0xA0,0x78,0x1D,0x49,0x45, \ +0x18,0x0A,0xE0,0xA0,0x78,0x1C,0x49,0x45,0x18,0x06,0xE0,0xA0,0x78,0x1B,0x49, \ +0x45,0x18,0x02,0xE0,0xA0,0x78,0x1A,0x49,0x45,0x18,0x00,0x2D,0x1E,0xD0,0xC0, \ +0x20,0xFE,0xF7,0xBF,0xF8,0x61,0x78,0x07,0x1C,0x00,0x20,0x00,0x29,0x07,0xD9, \ +0x21,0x18,0x09,0x79,0x01,0x30,0x29,0x70,0x61,0x78,0x01,0x35,0x81,0x42,0xF7, \ +0xD8,0x02,0xF0,0x32,0xFE,0x38,0x1C,0xFE,0xF7,0xAD,0xF8,0x01,0x21,0x01,0x20, \ +0x00,0xF0,0x8F,0xFC,0xB0,0xBD,0x04,0x21,0x01,0x20,0x00,0xF0,0x8A,0xFC,0xB0, \ +0xBD,0x03,0x21,0x01,0x20,0x00,0xF0,0x85,0xFC,0xB0,0xBD,0xCC,0x09,0x00,0x02, \ +0x04,0x01,0x00,0x02,0x5C,0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00,0x00, \ +0x02,0x18,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xF0,0xB5, \ +0x82,0xB0,0x5A,0x49,0x0E,0x20,0x08,0x83,0x5A,0x4A,0x60,0x39,0x57,0x4C,0x01, \ +0x92,0x50,0x7A,0xCD,0x1D,0xCF,0x1D,0xE6,0x1D,0x19,0x36,0x69,0x37,0x49,0x35, \ +0x00,0x28,0x2F,0xD0,0xF0,0x7B,0x54,0x49,0x00,0x28,0x18,0xD0,0xF0,0x79,0x01, \ +0x28,0x01,0xDB,0x0E,0x28,0x05,0xDD,0x03,0x21,0x03,0x20,0x00,0xF0,0x55,0xFC, \ +0x02,0xB0,0xF0,0xBD,0x00,0x20,0x00,0x22,0x0B,0x18,0x9A,0x73,0x0A,0x54,0x01, \ +0x30,0x00,0x04,0x00,0x0C,0x0E,0x28,0xF7,0xDB,0xFA,0x71,0x01,0x21,0xE9,0x71, \ +0x29,0xE0,0xF0,0x79,0x41,0x18,0x49,0x7B,0x00,0x29,0x0A,0xD1,0x02,0xF0,0x96, \ +0xF9,0x00,0x06,0x00,0x0E,0xF0,0x71,0x04,0xD1,0x03,0x21,0x03,0x20,0x00,0xF0, \ +0x36,0xFC,0xDF,0xE7,0x00,0x22,0xEA,0x71,0x16,0xE0,0xF8,0x7A,0x3D,0x49,0x40, \ +0x00,0x08,0x5A,0xF1,0x79,0x00,0x91,0x4A,0x1E,0x01,0x21,0x91,0x40,0x08,0x40, \ +0x0B,0xD1,0x00,0x98,0x02,0xF0,0x7C,0xF9,0xF0,0x71,0xF0,0x79,0x00,0x28,0x04, \ +0xD1,0x03,0x21,0x03,0x20,0x00,0xF0,0x1C,0xFC,0xC5,0xE7,0xC0,0x20,0xFE,0xF7, \ +0x32,0xF8,0x06,0x1C,0x04,0x20,0xFB,0xF7,0x40,0xFE,0x2F,0x49,0x00,0x20,0x2F, \ +0x4A,0x0B,0x18,0x12,0x5C,0x01,0x30,0x00,0x04,0x00,0x0C,0x04,0x28,0x1A,0x74, \ +0xF6,0xDB,0x2C,0x48,0x2C,0x4A,0x00,0x88,0x00,0x23,0x10,0x80,0xC8,0x1D,0x09, \ +0x30,0x0E,0x22,0x04,0x21,0x01,0xF0,0x8C,0xFE,0x01,0x21,0xA9,0x71,0x27,0x48, \ +0x04,0x21,0x01,0x75,0x00,0x21,0xB9,0x72,0x06,0x22,0x21,0x1C,0x25,0x48,0xFE, \ +0xF7,0xF0,0xFB,0xA1,0x1D,0x20,0x22,0x23,0x48,0xFE,0xF7,0xEB,0xFB,0xE0,0x1D, \ +0x19,0x30,0x81,0x7B,0x21,0x4A,0x51,0x71,0x00,0x21,0x69,0x70,0x01,0x9A,0x20, \ +0x23,0x91,0x71,0x81,0x79,0x1E,0x4A,0x91,0x74,0xC0,0x79,0xD0,0x74,0x20,0x8D, \ +0x90,0x82,0x60,0x8D,0xD0,0x82,0xA0,0x8D,0x10,0x83,0x1A,0x48,0x01,0x78,0x19, \ +0x43,0x01,0x70,0x01,0x21,0xF9,0x70,0x01,0x9A,0x51,0x71,0xB8,0x78,0x01,0x28, \ +0x02,0xD1,0x00,0x20,0x03,0xF0,0xC0,0xF9,0x00,0x20,0xB8,0x70,0x30,0x1C,0xFD, \ +0xF7,0xDF,0xFF,0x01,0x20,0x28,0x70,0x08,0x21,0x03,0x20,0x00,0xF0,0xBF,0xFB, \ +0x68,0xE7,0xCC,0x09,0x00,0x02,0xA4,0x09,0x00,0x02,0xB0,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0x6C,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0x84,0x02,0x00,0x02, \ +0x38,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0xFC,0x00,0x00, \ +0x02,0xDC,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x94,0x01, \ +0x00,0x02,0xF0,0xB5,0x82,0xB0,0x41,0x49,0x40,0x4E,0x01,0x91,0x48,0x7A,0xF4, \ +0x1D,0x19,0x34,0x00,0x28,0x3F,0x4F,0x13,0xD0,0xF8,0x79,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x04,0x20,0x00,0xF0,0x8E,0xFB,0x02,0xB0,0xF0,0xBD,0xE0,0x79,0x3A, \ +0x49,0x40,0x18,0x40,0x7B,0x00,0x28,0x13,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0, \ +0x82,0xFB,0xF2,0xE7,0xF8,0x7A,0x35,0x49,0x40,0x00,0x08,0x5A,0xE1,0x79,0x01, \ +0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x04,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0, \ +0x73,0xFB,0xE3,0xE7,0xC0,0x20,0xFD,0xF7,0x89,0xFF,0x00,0x90,0xA0,0x79,0x2D, \ +0x4D,0x02,0x28,0x02,0xD1,0x03,0x20,0xA8,0x71,0x03,0xE0,0x01,0x28,0x40,0xD1, \ +0x04,0x20,0xA8,0x71,0x04,0x20,0xFB,0xF7,0x8C,0xFD,0x27,0x49,0x00,0x20,0x88, \ +0x70,0xA0,0x79,0x26,0x49,0x06,0x22,0x88,0x70,0x08,0x1F,0x31,0x1C,0xFE,0xF7, \ +0x53,0xFB,0xB1,0x1D,0x20,0x22,0x23,0x48,0xFE,0xF7,0x4E,0xFB,0xA0,0x7A,0x1F, \ +0x49,0x48,0x71,0x00,0x20,0x68,0x70,0x01,0x99,0x88,0x71,0x08,0x21,0x04,0x20, \ +0x00,0xF0,0x45,0xFB,0x01,0x20,0xF8,0x70,0x01,0x99,0x48,0x71,0xB8,0x78,0x01, \ +0x28,0x02,0xD1,0x00,0x20,0x03,0xF0,0x31,0xF9,0x00,0x20,0xB8,0x70,0x17,0x48, \ +0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x99,0x43,0x01, \ +0x70,0x00,0x98,0xFD,0xF7,0x47,0xFF,0x30,0x8D,0x81,0x02,0x04,0x20,0xFB,0xF7, \ +0x3A,0xFD,0xE0,0x79,0x03,0xF0,0x93,0xF8,0x95,0xE7,0x03,0x21,0x04,0x20,0x00, \ +0xF0,0x20,0xFB,0x00,0x98,0xFD,0xF7,0x37,0xFF,0x8D,0xE7,0xCC,0x09,0x00,0x02, \ +0xB0,0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0x14,0x01,0x00,0x02,0x6C,0x02,0x00, \ +0x02,0x94,0x09,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x01,0x00,0x02,0xDC,0x00, \ +0x00,0x02,0x94,0x01,0x00,0x02,0xF0,0xB5,0x25,0x48,0x10,0x23,0x01,0x78,0x22, \ +0x4C,0x99,0x43,0x01,0x70,0x01,0x78,0x20,0x23,0x99,0x43,0x01,0x70,0x21,0x48, \ +0x21,0x49,0xC0,0x7A,0x40,0x00,0x09,0x5A,0xE7,0x18,0xF8,0x79,0x01,0x25,0x42, \ +0x1E,0x2B,0x1C,0x93,0x40,0x19,0x40,0x04,0xD1,0x03,0x21,0x05,0x20,0x00,0xF0, \ +0xEC,0xFA,0xF0,0xBD,0xB9,0x79,0x01,0x29,0x04,0xD0,0x03,0x21,0x05,0x20,0x00, \ +0xF0,0xE4,0xFA,0xF0,0xBD,0x03,0xF0,0x4F,0xF8,0xC0,0x20,0xFD,0xF7,0xF8,0xFE, \ +0x06,0x1C,0x38,0x7A,0x12,0x4F,0x78,0x71,0x12,0x48,0xC1,0x1D,0x39,0x31,0x8D, \ +0x70,0xA1,0x1D,0x1C,0x30,0x0C,0x1C,0x7A,0x79,0xFE,0xF7,0xCE,0xFA,0x7A,0x79, \ +0x0E,0x4F,0x21,0x1C,0xF8,0x1D,0x0D,0x30,0xFE,0xF7,0xC7,0xFA,0x00,0x20,0xF9, \ +0x1D,0x29,0x31,0x88,0x71,0x00,0xF0,0x13,0xF8,0x30,0x1C,0xFD,0xF7,0xDA,0xFE, \ +0xF0,0xBD,0x00,0x00,0xCC,0x09,0x00,0x02,0x94,0x01,0x00,0x02,0xB4,0x09,0x00, \ +0x02,0x6C,0x02,0x00,0x02,0x04,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00, \ +0x00,0x02,0xF0,0xB5,0xF9,0xF7,0x25,0xFF,0xFE,0xF7,0x47,0xFB,0xF9,0xF7,0x21, \ +0xFF,0x2C,0x4F,0x02,0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38,0x74,0x01,0x0A, \ +0x79,0x74,0x01,0x0C,0x00,0x0E,0xB9,0x74,0x27,0x4E,0xF8,0x74,0xF9,0x1D,0x07, \ +0x31,0xF0,0x1D,0x06,0x22,0x35,0x30,0xFE,0xF7,0x93,0xFA,0x24,0x4C,0x01,0x25, \ +0xF8,0x1D,0x29,0x30,0x25,0x75,0x05,0x71,0x22,0x48,0xF1,0x1D,0x42,0x79,0xF8, \ +0x1D,0x0D,0x30,0x15,0x31,0xFE,0xF7,0x85,0xFA,0x1F,0x48,0x1F,0x4A,0x00,0x21, \ +0x53,0x5C,0x46,0x18,0x01,0x31,0x04,0x29,0x33,0x74,0xF9,0xD3,0x1C,0x49,0x00, \ +0x23,0x09,0x88,0x39,0x80,0x02,0x7D,0x04,0x21,0x10,0x30,0x01,0xF0,0x03,0xFD, \ +0x19,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19, \ +0x43,0x01,0x70,0x10,0x48,0x85,0x70,0xFB,0xF7,0x56,0xFC,0x39,0x88,0x89,0x02, \ +0x09,0x1A,0x06,0x20,0xFB,0xF7,0x74,0xFC,0xE0,0x1D,0x49,0x30,0x45,0x70,0x05, \ +0x21,0x81,0x71,0x0E,0x48,0x01,0x68,0x0E,0x48,0xC2,0x69,0x11,0x43,0xC1,0x61, \ +0x0D,0x48,0x01,0x21,0x05,0x70,0x05,0x20,0x00,0xF0,0x51,0xFA,0xF0,0xBD,0x80, \ +0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0x04,0x01,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x84,0x02,0x00,0x02,0x38,0x01,0x00,0x02,0x94,0x01,0x00, \ +0x02,0xA4,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x3A,0x01,0x00,0x02,0xF0,0xB5, \ +0x54,0x4F,0x54,0x4E,0xFC,0x1D,0xF9,0x1D,0x09,0x31,0x59,0x34,0x0D,0x1C,0xF0, \ +0x1D,0x0D,0x30,0x22,0x79,0xFE,0xF7,0x2B,0xFA,0x22,0x79,0x29,0x1C,0x4F,0x48, \ +0xFE,0xF7,0x26,0xFA,0x20,0x79,0x4E,0x49,0x4E,0x4A,0x48,0x71,0xB8,0x7B,0x00, \ +0x28,0x03,0xD1,0x10,0x70,0xF0,0x72,0x50,0x70,0x08,0xE0,0x01,0x20,0x10,0x70, \ +0xF0,0x72,0xF8,0x7B,0xD1,0x1D,0x39,0x31,0x50,0x70,0xF8,0x78,0x08,0x70,0x00, \ +0x25,0x0D,0x20,0x68,0x43,0xC1,0x19,0x43,0x4A,0x30,0x31,0x80,0x18,0x0D,0x22, \ +0x0C,0x30,0xFE,0xF7,0x07,0xFA,0x01,0x35,0x04,0x2D,0xF2,0xD3,0x60,0x79,0x00, \ +0x28,0x03,0xD0,0x3C,0x49,0x01,0x20,0x48,0x72,0x02,0xE0,0x3A,0x49,0x00,0x20, \ +0x48,0x72,0x78,0x7B,0x3A,0x49,0x0E,0x28,0x02,0xDC,0x01,0x28,0x00,0xDB,0x08, \ +0x75,0xB8,0x78,0x37,0x4A,0x10,0x74,0x38,0x7B,0x01,0x28,0x02,0xD1,0x32,0x4B, \ +0xD8,0x70,0x02,0xE0,0x30,0x4B,0x00,0x20,0xD8,0x70,0xF8,0x88,0x10,0x81,0xB8, \ +0x88,0x50,0x81,0x38,0x78,0x2D,0x4A,0xD0,0x70,0xE0,0x88,0x2F,0x4A,0x30,0x80, \ +0x00,0x20,0x3B,0x18,0x1C,0x7A,0x0D,0x18,0x2C,0x74,0x1B,0x7A,0x13,0x54,0x01, \ +0x30,0x04,0x28,0xF6,0xD3,0x30,0x88,0x29,0x4A,0x00,0x23,0x10,0x80,0xC8,0x1D, \ +0x09,0x30,0x0F,0x1C,0x0E,0x22,0x04,0x21,0x01,0xF0,0x58,0xFC,0x00,0xF0,0xFA, \ +0xF8,0x24,0x4C,0x25,0x49,0xE0,0x1D,0x69,0x30,0xC0,0x7A,0x08,0x5C,0x38,0x75, \ +0x23,0x4F,0x38,0x78,0x02,0x28,0x28,0xD1,0x02,0xF0,0xA5,0xFB,0x03,0xF0,0xC6, \ +0xF8,0x17,0x49,0x88,0x78,0x00,0x28,0x07,0xD0,0xFB,0xF7,0xA1,0xFB,0x31,0x88, \ +0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0xBF,0xFB,0x01,0x20,0x00,0xF0,0xC6, \ +0xF9,0x02,0xF0,0x44,0xFB,0x01,0x20,0xF9,0xF7,0x21,0xFE,0x01,0x20,0x80,0x06, \ +0x80,0x69,0xFE,0xF7,0x3C,0xFA,0xFB,0xF7,0x82,0xFB,0xFB,0xF7,0x08,0xF8,0xFE, \ +0xF7,0x18,0xFA,0x80,0x06,0x80,0x0E,0xA0,0x62,0x01,0x20,0x38,0x70,0xF0,0xBD, \ +0x02,0xF0,0x2E,0xFB,0xF0,0xBD,0x00,0x00,0xCC,0x09,0x00,0x02,0x80,0x00,0x00, \ +0x02,0xDC,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0x84,0x02,0x00,0x02,0x38,0x01,0x00,0x02,0x44, \ +0x09,0x00,0x02,0x7C,0x02,0x00,0x02,0x3B,0x01,0x00,0x02,0x80,0xB5,0x1D,0x49, \ +0x1B,0x4A,0x0F,0x68,0x0B,0x2F,0x23,0xD2,0x01,0xA3,0xDB,0x5D,0x5B,0x00,0x9F, \ +0x44,0x1F,0x05,0x0A,0x0D,0x10,0x12,0x15,0x18,0x1F,0x1B,0x1E,0x00,0x06,0x23, \ +0xFF,0x20,0x01,0x30,0x8B,0x60,0x14,0xE0,0xFF,0x20,0x41,0x30,0x11,0xE0,0xFF, \ +0x20,0x51,0x30,0x0E,0xE0,0x0B,0x20,0x0C,0xE0,0xFF,0x20,0x31,0x30,0x09,0xE0, \ +0xFF,0x20,0x11,0x30,0x06,0xE0,0xFF,0x20,0x61,0x30,0x03,0xE0,0xFF,0x20,0x71, \ +0x30,0x00,0xE0,0x00,0x20,0x01,0x23,0x4B,0x60,0x89,0x68,0x00,0xF0,0xD4,0xF9, \ +0x04,0x21,0x0C,0x20,0x00,0xF0,0x45,0xF9,0x0F,0x20,0x00,0x06,0x81,0x88,0x03, \ +0x4B,0x19,0x43,0x81,0x80,0x80,0xBD,0x58,0x0A,0x00,0x02,0x98,0x02,0x00,0x02, \ +0x08,0x08,0x00,0x00,0xB0,0xB5,0x0D,0x4D,0x00,0x24,0xE8,0x1D,0x49,0x30,0x0C, \ +0x4F,0x04,0x70,0xF8,0x7C,0x02,0xF0,0x9C,0xFE,0xE8,0x1D,0x69,0x30,0x84,0x72, \ +0x38,0x8B,0x81,0x02,0x04,0x20,0xFB,0xF7,0x38,0xFB,0xB8,0x7C,0x00,0x28,0x03, \ +0xD1,0x01,0x20,0xA8,0x77,0x00,0x05,0xB0,0xBD,0x20,0x1C,0xB0,0xBD,0x00,0x00, \ +0x44,0x09,0x00,0x02,0xC0,0x00,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49, \ +0x32,0x91,0x70,0x01,0x21,0x81,0x77,0x10,0x20,0xF7,0x46,0x00,0x00,0x44,0x09, \ +0x00,0x02,0x03,0x48,0x00,0x21,0xC1,0x73,0x01,0x21,0x81,0x73,0x08,0x07,0xF7, \ +0x46,0x00,0x00,0x54,0x09,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49,0x32, \ +0x51,0x71,0x01,0x21,0x81,0x77,0x08,0x05,0xF7,0x46,0x00,0x00,0x44,0x09,0x00, \ +0x02,0xB0,0xB5,0x04,0x20,0xFB,0xF7,0x1E,0xFB,0x0F,0x48,0xC7,0x1D,0x49,0x37, \ +0xB9,0x79,0x01,0x29,0x16,0xD1,0x03,0x21,0x70,0x30,0x81,0x72,0x00,0x25,0x0B, \ +0x4C,0x7D,0x71,0xE0,0x7C,0x01,0xF0,0x3B,0xFE,0x00,0x28,0x07,0xD1,0x3D,0x70, \ +0x02,0x20,0xB8,0x71,0x01,0x21,0x03,0x20,0x00,0xF0,0xDA,0xF8,0xB0,0xBD,0x01, \ +0x21,0x39,0x70,0xE0,0x74,0xB0,0xBD,0x02,0xF0,0xFB,0xF9,0xB0,0xBD,0x44,0x09, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0x12,0x49,0xC9,0x7D,0x32,0x29,0x1A,0xD0,0x09, \ +0xDC,0x10,0x29,0x11,0xD0,0x20,0x29,0x11,0xD0,0x30,0x29,0x11,0xD0,0x31,0x29, \ +0x08,0xD1,0x03,0x20,0x06,0xE0,0x40,0x29,0x0F,0xD0,0x41,0x29,0x0F,0xD0,0x50, \ +0x29,0x00,0xD1,0x07,0x20,0x08,0x49,0xC8,0x72,0xF7,0x46,0x00,0x20,0xFA,0xE7, \ +0x01,0x20,0xF8,0xE7,0x02,0x20,0xF6,0xE7,0x04,0x20,0xF4,0xE7,0x05,0x20,0xF2, \ +0xE7,0x06,0x20,0xF0,0xE7,0x00,0x00,0x00,0x00,0x00,0x02,0xB4,0x09,0x00,0x02, \ +0xF0,0xB5,0x1E,0x4D,0x01,0x24,0x28,0x78,0x01,0x28,0x30,0xD1,0x1C,0x4C,0x1D, \ +0x49,0xE0,0x7A,0x1D,0x4E,0x08,0x5C,0x30,0x75,0xC0,0x20,0xFD,0xF7,0xAF,0xFC, \ +0x07,0x1C,0x1A,0x48,0x01,0x78,0x02,0x29,0x06,0xD1,0x01,0x21,0x01,0x70,0x30, \ +0x7D,0x02,0xF0,0x03,0xFD,0x20,0x73,0x0D,0xE0,0x07,0x20,0x40,0x06,0xC1,0x69, \ +0x10,0x23,0x99,0x43,0xC1,0x61,0x13,0x48,0x01,0x21,0x41,0x71,0x00,0x20,0x02, \ +0xF0,0x73,0xFE,0x00,0x20,0xA0,0x70,0x20,0x7B,0x01,0x28,0x01,0xD1,0x00,0x20, \ +0x28,0x70,0x20,0x7B,0x01,0x21,0x00,0x28,0x00,0xD1,0x05,0x21,0x38,0x1C,0x0C, \ +0x1C,0xFD,0xF7,0x87,0xFC,0x21,0x06,0x09,0x0E,0x06,0x20,0x00,0xF0,0x68,0xF8, \ +0xF0,0xBD,0x00,0x00,0xB3,0x02,0x00,0x02,0xB4,0x09,0x00,0x02,0x7C,0x02,0x00, \ +0x02,0x00,0x00,0x00,0x02,0xB2,0x02,0x00,0x02,0xB0,0x00,0x00,0x02,0x00,0xB5, \ +0x12,0x48,0x01,0x78,0x0D,0x29,0x1A,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F, \ +0x44,0x00,0x1C,0x16,0x07,0x16,0x07,0x07,0x07,0x0B,0x0E,0x16,0x16,0x07,0x07, \ +0x07,0x00,0x0B,0x49,0x01,0x20,0x08,0x70,0x00,0xBD,0xFF,0xF7,0x9F,0xFF,0x00, \ +0xBD,0x08,0x49,0x02,0x20,0x08,0x70,0x08,0x21,0x07,0x20,0x00,0xF0,0x3B,0xF8, \ +0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0,0x36,0xF8,0x00,0xBD,0x00,0x00,0xC4, \ +0x09,0x00,0x02,0xD5,0x01,0x00,0x02,0x3C,0x01,0x00,0x02,0x00,0xB5,0x15,0x48, \ +0x01,0x78,0x0D,0x29,0x20,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00, \ +0x1C,0x1C,0x07,0x1C,0x0A,0x0D,0x13,0x1C,0x1C,0x1C,0x1C,0x10,0x16,0x19,0x00, \ +0xFF,0xF7,0x47,0xFB,0x00,0xBD,0xFF,0xF7,0xA4,0xFB,0x00,0xBD,0xFF,0xF7,0x75, \ +0xFC,0x00,0xBD,0x00,0xF0,0xA2,0xFD,0x00,0xBD,0xFF,0xF7,0x07,0xFD,0x00,0xBD, \ +0xFF,0xF7,0x12,0xFB,0x00,0xBD,0xFF,0xF7,0x91,0xFE,0x00,0xBD,0x00,0x78,0x02, \ +0x21,0x00,0xF0,0x04,0xF8,0x00,0xBD,0x00,0x00,0xC4,0x09,0x00,0x02,0x04,0x4A, \ +0x10,0x70,0x04,0x48,0x01,0x70,0x04,0x48,0x00,0x21,0x01,0x70,0x41,0x70,0xF7, \ +0x46,0x00,0x00,0x5C,0x02,0x00,0x02,0x5D,0x02,0x00,0x02,0xC4,0x09,0x00,0x02, \ +0x04,0x48,0x00,0x21,0xC2,0x1D,0x69,0x32,0x51,0x70,0x01,0x21,0x81,0x77,0x08, \ +0x02,0xF7,0x46,0x00,0x00,0x44,0x09,0x00,0x02,0x80,0xB5,0x0F,0x4F,0x01,0x28, \ +0x03,0xD1,0xF9,0xF7,0x56,0xFC,0xF8,0x62,0x38,0x63,0x0C,0x48,0x01,0x21,0x80, \ +0x89,0x0C,0x4A,0xB8,0x87,0x39,0x72,0x79,0x72,0x39,0x73,0x00,0x20,0x38,0x74, \ +0x38,0x60,0xB8,0x72,0xF8,0x72,0x10,0x70,0xB9,0x73,0x79,0x60,0x06,0x49,0xCA, \ +0x7A,0x06,0x49,0xCA,0x70,0x88,0x70,0x08,0x70,0x80,0xBD,0x00,0x00,0x44,0x09, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0xE0,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xC8, \ +0x01,0x00,0x02,0xB0,0xB5,0xF3,0x25,0x2D,0x05,0x07,0x1C,0xA8,0x68,0x06,0x20, \ +0xE8,0x60,0x0C,0x1C,0x28,0x69,0x80,0x08,0xFC,0xD3,0x0A,0x20,0xF9,0xF7,0x10, \ +0xFC,0xA8,0x68,0x78,0x09,0x08,0x23,0x18,0x40,0x02,0x23,0x18,0x43,0xE8,0x60, \ +0x28,0x69,0x80,0x08,0xFC,0xD3,0x38,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80, \ +0x08,0xFC,0xD3,0xA8,0x68,0x20,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08, \ +0xFC,0xD3,0xA8,0x68,0xB0,0xBD,0xF0,0xB5,0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFC, \ +0xF7,0x69,0xFE,0x00,0x26,0x00,0x2F,0x10,0xD9,0xFC,0xF7,0xB6,0xFE,0x40,0x08, \ +0xFB,0xD2,0x28,0x20,0xF9,0xF7,0xE7,0xFB,0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xC5, \ +0xFF,0x28,0x20,0xF9,0xF7,0xE0,0xFB,0x01,0x36,0xBE,0x42,0xEE,0xD3,0xFC,0xF7, \ +0x71,0xFE,0x00,0x20,0xF0,0xBD,0xF0,0xB5,0x84,0xB0,0x02,0x1C,0x48,0x4B,0x08, \ +0x1C,0x19,0x68,0x46,0x4F,0x00,0x29,0x74,0xD0,0x59,0x68,0x01,0x29,0x72,0xD1, \ +0x00,0x24,0x0F,0x21,0x09,0x06,0x8C,0x80,0x8C,0x81,0x0C,0x88,0xFE,0x1D,0x3C, \ +0x36,0xF5,0x1F,0x09,0x89,0x07,0x3D,0xEC,0x1F,0x12,0x3C,0x19,0x68,0xE3,0x1F, \ +0x07,0x3B,0x03,0x93,0x20,0x33,0x02,0x93,0x04,0x3B,0x01,0x93,0x0A,0x33,0x00, \ +0x93,0x0A,0x29,0x2F,0xD1,0x0B,0x22,0x04,0x20,0x01,0x99,0xFF,0xF7,0xB8,0xFF, \ +0xFF,0x22,0x06,0x20,0x01,0x32,0x02,0x99,0xFF,0xF7,0xB2,0xFF,0xFF,0x22,0x0E, \ +0x20,0x39,0x1C,0x41,0x32,0xFF,0xF7,0xAC,0xFF,0xFF,0x22,0x0E,0x20,0x51,0x32, \ +0x03,0x99,0xFF,0xF7,0xA6,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x1C,0x11,0x32,0xFF, \ +0xF7,0xA0,0xFF,0xFF,0x22,0x0E,0x20,0x29,0x1C,0x61,0x32,0xFF,0xF7,0x9A,0xFF, \ +0xFF,0x22,0x0E,0x20,0x31,0x1C,0x71,0x32,0xFF,0xF7,0x94,0xFF,0xFF,0x22,0x01, \ +0x20,0x31,0x32,0x00,0x99,0xFF,0xF7,0x8E,0xFF,0x02,0xE0,0x39,0x1C,0xFF,0xF7, \ +0x8A,0xFF,0xFC,0xF7,0xF7,0xFD,0x06,0x22,0xFF,0x21,0x01,0x31,0x02,0x98,0xFC, \ +0xF7,0x58,0xFE,0x04,0x22,0x0B,0x21,0x01,0x98,0xFC,0xF7,0x53,0xFE,0x0E,0x22, \ +0xFF,0x21,0x38,0x1C,0x41,0x31,0xFC,0xF7,0x4D,0xFE,0x0E,0x22,0xFF,0x21,0x51, \ +0x31,0x03,0x98,0xFC,0xF7,0x47,0xFE,0x0E,0x22,0xFF,0x21,0x28,0x1C,0x61,0x31, \ +0xFC,0xF7,0x41,0xFE,0x0E,0x22,0xFF,0x21,0x30,0x1C,0x71,0x31,0xFC,0xF7,0x3B, \ +0xFE,0x01,0xE0,0x11,0xE0,0x10,0xE0,0x0E,0x22,0xFF,0x21,0x20,0x1C,0x11,0x31, \ +0xFC,0xF7,0x32,0xFE,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0x98,0xFC,0xF7,0x2C, \ +0xFE,0xFC,0xF7,0xE1,0xFD,0x03,0x4B,0x00,0x24,0x1C,0x60,0x04,0xB0,0xF0,0xBD, \ +0x58,0x0A,0x00,0x02,0x98,0x02,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49,0x00, \ +0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x2C,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0B, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03,0x2B, \ +0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x04, \ +0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3,0xE7, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21,0x01, \ +0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1,0x00, \ +0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20,0x20, \ +0x08,0x73,0x00,0xBD,0xFC,0xF7,0x4F,0xFB,0x04,0x49,0x00,0x20,0x08,0x80,0x03, \ +0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x40,0x02,0x00,0x02, \ +0x42,0x02,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1,0x02, \ +0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08,0x06, \ +0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7,0x00, \ +0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08,0xD0, \ +0x80,0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0,0x00, \ +0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00,0x21, \ +0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7,0x25, \ +0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0x42,0x02,0x00,0x02,0x40,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1,0x00, \ +0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09,0x0E, \ +0x01,0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80,0x01, \ +0xE0,0x05,0x49,0x08,0x80,0xFC,0xF7,0xED,0xFA,0x90,0xBD,0x27,0x73,0x90,0xBD, \ +0x70,0x03,0x00,0x0D,0x42,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0x80,0xB4,0x0C, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B, \ +0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05, \ +0x48,0x00,0x21,0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73, \ +0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x02, \ +0x28,0x03,0xD1,0x0A,0x29,0x26,0xD1,0x16,0x4B,0x24,0xE0,0x04,0x28,0x01,0xD1, \ +0x15,0x4B,0x20,0xE0,0x05,0x28,0x01,0xD1,0x14,0x4B,0x1C,0xE0,0x00,0x28,0x1A, \ +0xD1,0x0A,0x29,0x17,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x13,0x05,0x07,0x09,0x13,0x0B,0x0D,0x0F,0x13,0x11,0x0D,0x4B,0x0C,0xE0,0x0D, \ +0x4B,0x0A,0xE0,0x0D,0x4B,0x08,0xE0,0x0D,0x4B,0x06,0xE0,0x0D,0x4B,0x04,0xE0, \ +0x0D,0x4B,0x02,0xE0,0x0D,0x4B,0x00,0xE0,0x0D,0x4B,0x0D,0x49,0x98,0x18,0x08, \ +0x60,0x00,0xF0,0x5B,0xF8,0x00,0xBD,0x58,0x0A,0x00,0x02,0xC4,0x02,0x00,0x02, \ +0xC8,0x0A,0x00,0x02,0x04,0x01,0x00,0x02,0x5C,0x00,0x00,0x02,0xC0,0x00,0x00, \ +0x02,0x80,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x14,0x01, \ +0x00,0x02,0x10,0x01,0x00,0x02,0x58,0x02,0x00,0x02,0x80,0xB4,0x17,0x1C,0x00, \ +0x22,0x01,0x2F,0x17,0x4B,0x23,0xD1,0x02,0x28,0x10,0xD1,0x16,0x48,0x87,0x79, \ +0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29,0x07,0xD0,0x14,0x48,0x87,0x60,0x0C, \ +0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01,0x60,0x42,0x60,0x80,0xBC,0xF7,0x46, \ +0x06,0x28,0xFB,0xD1,0x0F,0x48,0x00,0x78,0x01,0x28,0xF7,0xD1,0xFF,0x20,0x0D, \ +0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0B,0x49,0x01,0x20,0x08,0x71,0x0B,0x49, \ +0x08,0x70,0xEC,0xE7,0x18,0x79,0x18,0x70,0x5A,0x70,0x9A,0x70,0x18,0x78,0x0A, \ +0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2,0xE7,0xC4,0x09,0x00,0x02,0x30,0x02, \ +0x00,0x02,0x98,0x02,0x00,0x02,0x36,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x37, \ +0x01,0x00,0x02,0x94,0x01,0x00,0x02,0x90,0xB4,0x1A,0x4A,0x80,0x20,0x10,0x73, \ +0x19,0x49,0x1A,0x48,0x0B,0x88,0x07,0x88,0xBB,0x42,0x11,0xD1,0x11,0x7B,0xC9, \ +0x09,0x09,0xD2,0x00,0x88,0x40,0x07,0x03,0xD0,0xE0,0x20,0x10,0x73,0x90,0xBC, \ +0xF7,0x46,0xD0,0x20,0x10,0x73,0xFA,0xE7,0x10,0x7B,0x20,0x23,0x18,0x43,0x10, \ +0x73,0xF5,0xE7,0x00,0x88,0x0B,0x88,0xC0,0x1A,0x08,0x28,0x00,0xD9,0x08,0x20, \ +0x0B,0x88,0x1B,0x18,0x0B,0x80,0x00,0x28,0x08,0xD0,0x0A,0x4B,0x0A,0x49,0x0F, \ +0x68,0x3C,0x78,0x01,0x37,0x0F,0x60,0x1C,0x73,0x01,0x38,0xF8,0xD1,0x10,0x7B, \ +0x10,0x23,0x18,0x43,0x10,0x73,0xDC,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x56, \ +0x02,0x00,0x02,0x54,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x58,0x02,0x00,0x02, \ +0x90,0xB5,0x20,0x24,0x00,0x28,0x0B,0x4F,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03, \ +0x2B,0x01,0xD0,0x3C,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28, \ +0x01,0xD1,0x3C,0x73,0x90,0xBD,0x04,0x48,0x00,0x79,0x00,0xF0,0x3A,0xF8,0x60, \ +0x20,0x38,0x73,0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x30,0x02,0x00,0x02, \ +0xB0,0xB4,0x13,0x48,0x01,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0xB0,0xBC,0xF7, \ +0x46,0x10,0x49,0x00,0x23,0x0D,0x78,0x02,0x22,0x0F,0x4C,0x10,0x4F,0x01,0x2D, \ +0x02,0xD0,0x0D,0x78,0x02,0x2D,0x02,0xD1,0x0A,0x70,0x3B,0x70,0x23,0x70,0x80, \ +0x21,0x01,0x73,0x0B,0x49,0x01,0x25,0x0D,0x73,0x0B,0x73,0x0A,0x73,0x0B,0x73, \ +0x3A,0x78,0x10,0x23,0x0A,0x73,0x22,0x78,0x0A,0x73,0x01,0x7B,0x19,0x43,0x01, \ +0x73,0xDE,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x60,0x02,0x00,0x02,0x5D,0x02, \ +0x00,0x02,0x5C,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x01,0x22,0x00, \ +0x23,0x02,0x28,0x10,0x49,0x12,0xD1,0x18,0x1C,0x10,0x4B,0x04,0x27,0x18,0x71, \ +0x0F,0x4B,0x1F,0x70,0x18,0x70,0x0F,0x4F,0x82,0x23,0x3B,0x71,0x0E,0x4B,0x18, \ +0x80,0x0E,0x4B,0x18,0x80,0x0E,0x4B,0x18,0x80,0x0A,0x70,0x80,0xBC,0xF7,0x46, \ +0x85,0x28,0xFB,0xD1,0x0C,0x48,0x03,0x80,0x0C,0x48,0x02,0x70,0x08,0x78,0x01, \ +0x28,0xF4,0xD1,0x02,0x20,0x08,0x70,0xF1,0xE7,0x00,0x00,0x61,0x02,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xB0,0x03,0x00,0x0D,0x4E,0x02,0x00, \ +0x02,0x4C,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0x42,0x02,0x00,0x02,0x4B,0x02, \ +0x00,0x02,0x90,0xB5,0x0F,0x1C,0x19,0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98, \ +0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xAC,0xFD, \ +0x90,0xBD,0x24,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0xC2,0xFD,0x90,0xBD,0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xD3,0xFD,0x90,0xBD,0xFF, \ +0x23,0x0C,0x33,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C, \ +0xFF,0xF7,0xE6,0xFD,0x90,0xBD,0x41,0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13, \ +0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xF7,0xFD,0x90,0xBD,0x0F,0x4B, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x29, \ +0xFE,0x90,0xBD,0x01,0x23,0xDB,0x03,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x40,0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08, \ +0x73,0x90,0xBD,0x00,0x00,0x28,0x02,0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81, \ +0x00,0x00,0x03,0x02,0x00,0x00,0x70,0x03,0x00,0x0D,0x10,0x49,0x09,0x78,0x01, \ +0x29,0x1B,0xD1,0x40,0x08,0x19,0xD3,0x0D,0x20,0x00,0x06,0x01,0x78,0x20,0x23, \ +0x19,0x43,0x01,0x70,0x0B,0x48,0x00,0x68,0xC1,0x43,0x0B,0x48,0xC2,0x69,0x11, \ +0x40,0xC1,0x61,0x00,0x20,0x07,0x21,0x49,0x06,0x7D,0x22,0x12,0x01,0x88,0x61, \ +0x01,0x30,0x90,0x42,0xFC,0xD3,0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xF7, \ +0x46,0x00,0x00,0x36,0x01,0x00,0x02,0xA4,0x02,0x00,0x02,0x40,0x00,0x00,0x04, \ +0xF0,0xB5,0xC0,0x20,0xFD,0xF7,0x28,0xF8,0x22,0x4C,0x23,0x4F,0x21,0x7A,0x23, \ +0x4A,0x39,0x70,0x11,0x79,0x79,0x70,0x21,0x7B,0xF9,0x70,0x11,0x7B,0xB9,0x70, \ +0x0D,0x21,0x09,0x06,0x8B,0x88,0x07,0x25,0x6D,0x06,0xBB,0x80,0xEE,0x69,0x01, \ +0x23,0x5B,0x02,0x33,0x43,0xEB,0x61,0x00,0x23,0x01,0x33,0x32,0x2B,0xFC,0xD3, \ +0xEE,0x69,0x18,0x4B,0x33,0x40,0xEB,0x61,0x00,0x23,0x01,0x33,0x64,0x2B,0xFC, \ +0xD3,0x15,0x4D,0x00,0x23,0x2B,0x70,0x15,0x4B,0x80,0x25,0x1D,0x73,0x01,0x25, \ +0x1D,0x72,0x82,0x25,0x1D,0x71,0x07,0x25,0x1D,0x70,0x11,0x4B,0x05,0x26,0x1E, \ +0x73,0x86,0x26,0x1E,0x72,0x1D,0x71,0x24,0x23,0x23,0x71,0x3B,0x78,0x23,0x72, \ +0xFB,0x78,0x23,0x73,0x7B,0x78,0x13,0x71,0xBB,0x78,0x13,0x73,0x0A,0x4A,0x0A, \ +0x81,0xBA,0x88,0x8A,0x80,0xFC,0xF7,0xE4,0xFF,0xF0,0xBD,0x00,0x00,0xC0,0x03, \ +0x00,0x0D,0xBC,0x02,0x00,0x02,0xE0,0x03,0x00,0x0D,0xFF,0xFD,0x00,0x00,0x10, \ +0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xFF,0x0F,0x00,0x00, \ +0x80,0xB5,0x0C,0x49,0x00,0x20,0x08,0x60,0x0B,0x49,0x0E,0x4F,0x08,0x80,0x0B, \ +0x49,0x08,0x70,0x0B,0x49,0x08,0x70,0x38,0x68,0x01,0x7A,0x10,0x29,0x02,0xD1, \ +0xFB,0xF7,0x4A,0xFB,0x38,0x60,0x38,0x68,0x01,0x7A,0x40,0x29,0x02,0xD1,0xFB, \ +0xF7,0x43,0xFB,0x38,0x60,0x80,0xBD,0x44,0x02,0x00,0x02,0x48,0x02,0x00,0x02, \ +0x50,0x02,0x00,0x02,0x4A,0x02,0x00,0x02,0x64,0x02,0x00,0x02,0xF0,0xB5,0x23, \ +0x4E,0x04,0x1C,0x0F,0x1C,0x13,0x1C,0x20,0x22,0xB5,0x78,0xF1,0x78,0x03,0x2B, \ +0x20,0x48,0x01,0xD0,0x02,0x73,0xF0,0xBD,0x02,0x2D,0x09,0xD1,0x01,0x29,0x01, \ +0xD3,0x0A,0x29,0x01,0xD9,0x02,0x73,0xF0,0xBD,0x08,0x29,0x01,0xD1,0x02,0x73, \ +0xF0,0xBD,0x00,0x2F,0x09,0xD1,0xFC,0xF7,0x51,0xF8,0x06,0x2D,0x07,0xD1,0xF9, \ +0xF7,0xDC,0xF8,0x15,0x48,0x00,0x21,0x01,0x70,0x01,0xE0,0x00,0x21,0x01,0x73, \ +0x13,0x48,0x02,0x2D,0x07,0xD1,0x00,0x2C,0x0E,0xD1,0x11,0x49,0x01,0x60,0x11, \ +0x48,0x00,0x21,0x01,0x70,0x08,0xE0,0x01,0x2D,0xD7,0xD0,0x0F,0x49,0x01,0x60, \ +0x0F,0x48,0x00,0x21,0x01,0x70,0x0F,0x48,0x01,0x70,0x0F,0x48,0x31,0x1C,0x07, \ +0x80,0x0E,0x48,0x00,0x27,0x07,0x80,0x0E,0x48,0x08,0x22,0xFD,0xF7,0x4D,0xFB, \ +0x03,0x48,0x07,0x70,0xF0,0xBD,0x30,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x60, \ +0x02,0x00,0x02,0x58,0x02,0x00,0x02,0x58,0x0A,0x00,0x02,0x94,0x01,0x00,0x02, \ +0xC8,0x09,0x00,0x02,0x5D,0x02,0x00,0x02,0x5C,0x02,0x00,0x02,0x54,0x02,0x00, \ +0x02,0x56,0x02,0x00,0x02,0x38,0x02,0x00,0x02,0xB0,0xB5,0x11,0x4F,0x14,0x1C, \ +0xBB,0x78,0xFF,0x78,0x03,0x2C,0x0F,0x4A,0x02,0xD0,0x20,0x20,0x10,0x73,0xB0, \ +0xBD,0x0E,0x4D,0x00,0x24,0x2C,0x80,0x0D,0x4C,0x01,0x2B,0x21,0x80,0x0A,0xD1, \ +0x80,0x20,0x10,0x73,0x0B,0x48,0x0C,0x49,0x00,0x78,0x10,0x23,0x08,0x73,0x10, \ +0x7B,0x18,0x43,0x10,0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C,0x39,0x1C,0xFF,0xF7, \ +0x26,0xFD,0xB0,0xBD,0x00,0x00,0x30,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x56, \ +0x02,0x00,0x02,0x54,0x02,0x00,0x02,0x53,0x02,0x00,0x02,0x30,0x03,0x00,0x0D, \ +0xB0,0xB5,0x0F,0x1C,0x18,0x4D,0x19,0x1C,0x14,0x1C,0xA8,0x42,0x02,0xD0,0x17, \ +0x4B,0x00,0x22,0x1A,0x70,0x16,0x4A,0xA8,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xD1,0xFD,0xB0,0xBD,0x12,0x4B,0x98,0x42,0x04, \ +0xD1,0x12,0x68,0x20,0x1C,0xFF,0xF7,0x55,0xFF,0xB0,0xBD,0x0F,0x4B,0x98,0x42, \ +0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xDD,0xFD,0xB0, \ +0xBD,0x0B,0x4B,0x98,0x42,0x04,0xD1,0x12,0x68,0x20,0x1C,0xFF,0xF7,0xA3,0xFF, \ +0xB0,0xBD,0x0B,0x1C,0x39,0x1C,0x22,0x1C,0xFF,0xF7,0x39,0xFE,0xB0,0xBD,0x01, \ +0x02,0x00,0x00,0x61,0x02,0x00,0x02,0x28,0x02,0x00,0x02,0x0E,0x40,0x00,0x00, \ +0x22,0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0xF0,0xB5,0x22,0x4B,0xE0,0x25,0x01, \ +0x27,0x98,0x42,0x1D,0x49,0x1D,0x4C,0x1E,0x4A,0x08,0xD1,0x90,0x78,0x01,0x28, \ +0x01,0xD1,0x0D,0x73,0x01,0xE0,0xFF,0xF7,0x54,0xFD,0x27,0x71,0xF0,0xBD,0x1A, \ +0x4B,0x20,0x26,0x98,0x42,0x21,0xD1,0x0E,0x73,0x19,0x48,0x27,0x71,0x00,0x78, \ +0x00,0x28,0xF4,0xD1,0x90,0x78,0x02,0x28,0x02,0xD1,0xD0,0x78,0x08,0x28,0xEE, \ +0xD0,0x90,0x78,0x01,0x28,0x0C,0xD1,0x13,0x49,0x00,0x20,0x08,0x70,0x12,0x48, \ +0x00,0x78,0x02,0x28,0x02,0xD1,0x11,0x48,0x07,0x70,0xF0,0xBD,0x11,0x48,0x07, \ +0x70,0xF0,0xBD,0xD1,0x78,0x90,0x78,0x01,0x22,0xFF,0xF7,0xEB,0xFC,0xF0,0xBD, \ +0x10,0x78,0x00,0x0A,0x01,0xD2,0x0E,0x73,0x00,0xE0,0x0D,0x73,0x27,0x71,0xF0, \ +0xBD,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x30,0x02,0x00,0x02,0x33,0xC1, \ +0x00,0x00,0x0E,0x40,0x00,0x00,0x60,0x02,0x00,0x02,0x94,0x01,0x00,0x02,0x53, \ +0x02,0x00,0x02,0x5F,0x02,0x00,0x02,0xDD,0x01,0x00,0x02,0x80,0xB5,0x00,0x20, \ +0x1C,0x49,0x0F,0x27,0x3F,0x06,0x08,0x70,0xB8,0x80,0x39,0x88,0xB8,0x81,0x1A, \ +0x4A,0x39,0x89,0xD1,0x69,0xD1,0x04,0xCB,0x68,0xC9,0x6B,0x18,0x49,0x09,0x68, \ +0x90,0x61,0x17,0x49,0x02,0x20,0xC8,0x74,0x17,0x48,0x01,0x7A,0x0C,0x30,0x08, \ +0x29,0x19,0xD2,0x01,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x15,0x03,0x06,0x15, \ +0x09,0x0C,0x0F,0x12,0x00,0xF0,0xFA,0xFB,0x80,0xBD,0x00,0xF0,0x7B,0xF9,0x80, \ +0xBD,0x00,0xF0,0x10,0xFA,0x80,0xBD,0x00,0xF0,0x1B,0xF8,0x80,0xBD,0x00,0xF0, \ +0xC4,0xF8,0x80,0xBD,0x00,0xF0,0x73,0xFA,0x80,0xBD,0x02,0x21,0x0A,0x20,0xFF, \ +0xF7,0x38,0xFA,0x06,0x48,0xB8,0x80,0x80,0xBD,0x00,0x00,0x94,0x01,0x00,0x02, \ +0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x44,0x09,0x00,0x02,0xC4,0x09,0x00, \ +0x02,0x08,0x08,0x00,0x00,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1, \ +0x38,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x48,0x4C,0x01,0xDC,0x00,0x28,0x05, \ +0xD1,0x03,0x21,0x0A,0x20,0xFF,0xF7,0x17,0xFA,0xAC,0x80,0xF0,0xBD,0x44,0x48, \ +0x90,0x21,0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01, \ +0xE0,0x40,0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78, \ +0x41,0x7C,0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01, \ +0x75,0x79,0x79,0x41,0x75,0x38,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79, \ +0x41,0x77,0xFF,0x20,0xF5,0x30,0x35,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01, \ +0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23, \ +0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x45,0xFE,0x07,0x21,0x49, \ +0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0xE6,0xF8,0x38,0x78, \ +0x00,0x21,0x01,0xF0,0x4D,0xFE,0x00,0x21,0x08,0x20,0xF8,0xF7,0xD7,0xFE,0x00, \ +0x21,0x09,0x20,0xF8,0xF7,0xD3,0xFE,0x00,0x21,0x0A,0x20,0xF8,0xF7,0xCF,0xFE, \ +0x20,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7,0x3D, \ +0xFE,0x0A,0x20,0xF8,0xF7,0x22,0xFE,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68, \ +0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF, \ +0xF7,0xB1,0xF9,0xAC,0x80,0xF0,0xBD,0x00,0x22,0xFF,0x21,0x7D,0x20,0xC0,0x00, \ +0xAC,0x80,0x00,0xF0,0xA6,0xFA,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x10, \ +0x48,0x11,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0,0x74,0xB8,0x60,0x00,0x03, \ +0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xA8,0x80,0x0A, \ +0x20,0xFF,0xF7,0x92,0xF9,0xF0,0xBD,0x00,0x00,0xCC,0x02,0x00,0x02,0x08,0x08, \ +0x00,0x00,0xCC,0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04, \ +0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x44,0x09,0x00,0x02, \ +0x88,0x88,0x00,0x00,0xF0,0xB5,0x4C,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38, \ +0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x49,0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFF,0xF7,0x6B,0xF9,0xAC,0x80,0xF0,0xBD,0x45,0x48,0x90, \ +0x21,0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0, \ +0x40,0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41, \ +0x7C,0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75, \ +0x79,0x79,0x41,0x75,0x39,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41, \ +0x77,0xFF,0x20,0xF5,0x30,0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x99,0xFD,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0x3A,0xF8,0x38,0x78,0x00, \ +0x21,0x01,0xF0,0xA1,0xFD,0x0B,0x21,0x08,0x20,0xF8,0xF7,0x2B,0xFE,0xB7,0x21, \ +0x09,0x20,0xF8,0xF7,0x27,0xFE,0x00,0x21,0x0A,0x20,0xF8,0xF7,0x23,0xFE,0x14, \ +0x20,0xF8,0xF7,0x7E,0xFD,0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61, \ +0x00,0x20,0xF8,0xF7,0x8E,0xFD,0x0A,0x20,0xF8,0xF7,0x73,0xFD,0x01,0x20,0x80, \ +0x06,0x46,0x61,0xC0,0x68,0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3, \ +0x06,0x21,0x0A,0x20,0xFF,0xF7,0x02,0xF9,0xAC,0x80,0xF0,0xBD,0x00,0x22,0x55, \ +0x21,0x7D,0x20,0xC0,0x00,0xAC,0x80,0x00,0xF0,0xF7,0xF9,0x11,0x48,0x01,0x21, \ +0x89,0x06,0x88,0x63,0x10,0x48,0x10,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0, \ +0x74,0xB8,0x60,0x00,0x03,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48, \ +0x01,0x21,0xA8,0x80,0x0A,0x20,0xFF,0xF7,0xE3,0xF8,0xF0,0xBD,0xCC,0x02,0x00, \ +0x02,0x08,0x08,0x00,0x00,0xCC,0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x04,0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x44, \ +0x09,0x00,0x02,0x88,0x88,0x00,0x00,0xF0,0xB5,0x42,0x4C,0xC0,0xC8,0x21,0x1C, \ +0xC0,0xC1,0xA0,0x78,0x40,0x4D,0x80,0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00, \ +0x28,0x05,0xD0,0x03,0x21,0x0A,0x20,0xFF,0xF7,0xBD,0xF8,0xBD,0x80,0xF0,0xBD, \ +0x20,0x78,0x0E,0x28,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFF, \ +0xF7,0xB2,0xF8,0xBD,0x80,0xF0,0xBD,0x08,0x21,0x0A,0x20,0xFF,0xF7,0xAC,0xF8, \ +0x33,0x48,0x00,0x26,0x06,0x70,0x33,0x48,0x06,0x60,0x46,0x60,0x00,0x20,0xF8, \ +0xF7,0x1F,0xFD,0xA1,0x78,0x30,0x48,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0, \ +0x40,0x21,0xC1,0x70,0x21,0x79,0x01,0x75,0x61,0x79,0x41,0x75,0x2C,0x49,0x09, \ +0x78,0x01,0x29,0x01,0xD1,0xE1,0x79,0x41,0x77,0xFF,0x20,0xF5,0x30,0x29,0x49, \ +0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07, \ +0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30, \ +0xF8,0xF7,0xE1,0xFC,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8, \ +0x61,0x01,0xF0,0x82,0xFF,0x20,0x78,0x00,0x21,0x01,0xF0,0xE9,0xFC,0x00,0x28, \ +0x05,0xD1,0x05,0x21,0x0A,0x20,0xFF,0xF7,0x6B,0xF8,0xBD,0x80,0xF0,0xBD,0x14, \ +0x20,0xF8,0xF7,0xCA,0xFC,0x00,0x20,0xF8,0xF7,0xDF,0xFC,0x13,0x48,0x41,0x68, \ +0xC9,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7,0x5B,0xF8,0xBD,0x80,0xF0, \ +0xBD,0x86,0x60,0x20,0x20,0x41,0x05,0x48,0x61,0x0D,0x48,0x01,0x21,0x01,0x73, \ +0xC1,0x74,0xB8,0x88,0x0B,0x4B,0x18,0x43,0xB8,0x80,0x0A,0x20,0xFF,0xF7,0x4A, \ +0xF8,0xF0,0xBD,0x00,0x00,0xCC,0x02,0x00,0x02,0x08,0x08,0x00,0x00,0x94,0x01, \ +0x00,0x02,0xC4,0x02,0x00,0x02,0xCC,0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40, \ +0x00,0x00,0x04,0x44,0x09,0x00,0x02,0x48,0x48,0x00,0x00,0xF0,0xB5,0x2F,0x4F, \ +0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x2C, \ +0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFF,0xF7,0x25,0xF8, \ +0xAC,0x80,0xF0,0xBD,0x28,0x48,0x00,0x26,0x46,0x70,0x41,0x7C,0xFD,0x23,0x19, \ +0x40,0x41,0x74,0x25,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77, \ +0xFF,0x20,0xF5,0x30,0x22,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01, \ +0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43, \ +0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x66,0xFC,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0x07,0xFF,0xAE,0x80,0x38,0x78, \ +0x00,0x21,0x01,0xF0,0x6D,0xFC,0x00,0x28,0x02,0xD1,0x13,0x49,0x05,0x20,0x48, \ +0x70,0x14,0x20,0xF8,0xF7,0x51,0xFC,0x00,0x20,0xF8,0xF7,0x66,0xFC,0x0D,0x48, \ +0x41,0x68,0xC9,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7,0xE2,0xFF,0xAC, \ +0x80,0xF0,0xBD,0x86,0x60,0x01,0x20,0x80,0x06,0x46,0x61,0x01,0x21,0x0A,0x20, \ +0xAC,0x80,0xFE,0xF7,0xD7,0xFF,0xF0,0xBD,0xCC,0x02,0x00,0x02,0x08,0x08,0x00, \ +0x00,0xCC,0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xC4,0x09, \ +0x00,0x02,0xF0,0xB5,0x01,0x1C,0xB8,0xC9,0x58,0x4E,0x30,0x1C,0xB8,0xC0,0x30, \ +0x7A,0x0F,0x24,0x24,0x06,0x0E,0x28,0x55,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFE,0xF7,0xB8,0xFF,0xA7,0x80,0xF0,0xBD,0x51,0x4D,0xA8, \ +0x70,0x70,0x78,0x68,0x70,0x30,0x78,0x28,0x70,0x70,0x88,0xA8,0x60,0x70,0x68, \ +0xE8,0x60,0x00,0x20,0xE8,0x70,0x68,0x60,0x28,0x61,0xF0,0x68,0x68,0x61,0x00, \ +0x20,0xF8,0xF7,0x1F,0xFC,0x6A,0x78,0x40,0x21,0x48,0x48,0x00,0x2A,0x16,0xD0, \ +0x01,0x2A,0x17,0xD0,0x02,0x2A,0x18,0xD0,0x03,0x2A,0x01,0xD1,0x60,0x22,0x42, \ +0x70,0x42,0x7C,0x92,0x07,0x92,0x0F,0x42,0x74,0xB3,0x7A,0x42,0x7C,0x9B,0x00, \ +0x1A,0x43,0x42,0x74,0x72,0x7A,0x01,0x2A,0x0A,0xD1,0x00,0x22,0xC2,0x70,0x08, \ +0xE0,0x00,0x22,0x42,0x70,0xED,0xE7,0x20,0x22,0x42,0x70,0xEA,0xE7,0x41,0x70, \ +0xE8,0xE7,0xC1,0x70,0x37,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF1,0x7A,0x41, \ +0x77,0xFF,0x20,0x35,0x4E,0xF5,0x30,0x71,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0xC8,0xFB,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0x69,0xFE,0xA8,0x78,0x00, \ +0x21,0x01,0xF0,0xD0,0xFB,0x25,0x49,0xC8,0x69,0x8B,0x01,0x18,0x43,0xC8,0x61, \ +0x14,0x20,0xF8,0xF7,0xB4,0xFB,0x00,0x20,0xF8,0xF7,0xC9,0xFB,0x0A,0x20,0xF8, \ +0xF7,0xAE,0xFB,0x00,0x26,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x1C,0x49, \ +0x1C,0x48,0x48,0x61,0x48,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE, \ +0xF7,0x3B,0xFF,0xA7,0x80,0xF0,0xBD,0xE8,0x68,0x00,0xF0,0xAC,0xF8,0x68,0x60, \ +0xE8,0x78,0xF8,0xF7,0xF0,0xFB,0x68,0x68,0xF9,0xF7,0xDF,0xFD,0xA7,0x80,0x29, \ +0x78,0xE8,0x68,0x00,0x22,0x00,0xF0,0x27,0xF8,0x10,0x49,0xA6,0x80,0x03,0x20, \ +0xC8,0x74,0x0C,0x49,0x22,0x20,0x88,0x60,0x08,0x05,0x41,0x6A,0x0C,0x4B,0xC9, \ +0x18,0x01,0x62,0x0C,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFE,0xF7,0x17,0xFF, \ +0xF0,0xBD,0xD0,0x0A,0x00,0x02,0x08,0x08,0x00,0x00,0xB8,0x0A,0x00,0x02,0xCC, \ +0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00,0x00, \ +0x44,0x09,0x00,0x02,0x10,0x27,0x00,0x00,0x88,0x88,0x00,0x00,0xF0,0xB5,0x07, \ +0x1C,0x00,0x2A,0x0B,0xD1,0x00,0x20,0x00,0x2F,0x14,0x4A,0x06,0xD9,0x09,0x06, \ +0x09,0x0E,0x11,0x70,0x01,0x32,0x01,0x30,0xB8,0x42,0xFA,0xD3,0xF0,0xBD,0xF8, \ +0xF7,0x68,0xFB,0xFC,0xF7,0x8A,0xFF,0xFC,0xF7,0x6A,0xFF,0xBC,0x08,0x26,0x1C, \ +0x0B,0x4D,0x04,0xD0,0xFC,0xF7,0x64,0xFF,0x01,0xC5,0x01,0x3C,0xFA,0xD1,0xB0, \ +0x00,0x3F,0x1A,0xFC,0xF7,0x5D,0xFF,0x69,0x1C,0x03,0x2F,0x28,0x70,0x02,0xD1, \ +0x00,0x0C,0x08,0x70,0xF0,0xBD,0x02,0x2F,0xE2,0xD1,0x00,0x0A,0x08,0x70,0xF0, \ +0xBD,0x00,0x00,0x00,0x72,0x01,0x02,0x88,0xB4,0x01,0x20,0x80,0x06,0xC1,0x6B, \ +0x00,0xAB,0x19,0x80,0x1A,0x49,0x1B,0x4A,0xC9,0x7C,0x1B,0x4F,0x03,0x29,0x21, \ +0xD1,0x00,0xA9,0x09,0x88,0x20,0x23,0x0B,0x40,0x18,0x49,0x0C,0xD0,0x87,0x63, \ +0xCF,0x68,0x03,0x23,0x1B,0x03,0x3B,0x43,0x43,0x63,0x4B,0x78,0x15,0x4F,0xFF, \ +0x5C,0x11,0x23,0x9B,0x02,0x3B,0x43,0x53,0x60,0x00,0xAA,0x12,0x88,0x92,0x08, \ +0x16,0xD3,0x0A,0x69,0x01,0x32,0x0A,0x61,0x4B,0x69,0x9A,0x42,0x10,0xD2,0x89, \ +0x68,0x42,0x6A,0x89,0x18,0x01,0x62,0x0B,0xE0,0x04,0x29,0x09,0xD1,0x00,0xA9, \ +0x09,0x88,0xC9,0x08,0x05,0xD3,0x87,0x63,0x64,0x21,0x41,0x63,0x01,0x20,0x80, \ +0x03,0x50,0x60,0x88,0xBC,0xF7,0x46,0x44,0x09,0x00,0x02,0x40,0x00,0x00,0x04, \ +0x00,0x72,0x01,0x02,0xB8,0x0A,0x00,0x02,0xC0,0x01,0x00,0x02,0xF0,0xB5,0x04, \ +0x30,0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0, \ +0x02,0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFC,0xF7,0xBB, \ +0xFE,0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFC,0xF7,0xB6,0xFE,0x07,0x1C,0x00,0x2D, \ +0x18,0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F, \ +0x08,0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFC,0xF7,0xA6,0xFE,0x0C,0x1C, \ +0x79,0x1A,0x0B,0x20,0xFC,0xF7,0xA1,0xFE,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01, \ +0x37,0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD, \ +0xB8,0x0A,0x00,0x02,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38, \ +0x78,0x0F,0x26,0x36,0x06,0x0E,0x28,0x48,0x4D,0x01,0xDC,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFE,0xF7,0x41,0xFE,0xB5,0x80,0xF0,0xBD,0x44,0x48,0x90, \ +0x21,0x41,0x70,0xB9,0x78,0x00,0x24,0x01,0x29,0x01,0xD1,0xC4,0x70,0x01,0xE0, \ +0x40,0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41, \ +0x7C,0x92,0x00,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75,0x79,0x79,0x41,0x75, \ +0x39,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77,0xFF,0x20,0xF5, \ +0x30,0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29, \ +0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF, \ +0x20,0x2D,0x30,0xF8,0xF7,0x71,0xFA,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23, \ +0x98,0x43,0xC8,0x61,0x01,0xF0,0x12,0xFD,0x38,0x78,0x00,0x21,0x01,0xF0,0x79, \ +0xFA,0x0B,0x21,0x08,0x20,0xF8,0xF7,0x03,0xFB,0xB7,0x21,0x09,0x20,0xF8,0xF7, \ +0xFF,0xFA,0x00,0x21,0x0A,0x20,0xF8,0xF7,0xFB,0xFA,0x14,0x20,0xF8,0xF7,0x56, \ +0xFA,0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7, \ +0x66,0xFA,0x0A,0x20,0xF8,0xF7,0x4B,0xFA,0x01,0x20,0x80,0x06,0x44,0x61,0xC0, \ +0x68,0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20, \ +0xFE,0xF7,0xDA,0xFD,0xB5,0x80,0xF0,0xBD,0x01,0x22,0x55,0x21,0x7D,0x20,0xC0, \ +0x00,0xB5,0x80,0xFF,0xF7,0xCF,0xFE,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63, \ +0x10,0x48,0x10,0x4A,0x48,0x63,0xB4,0x80,0x04,0x20,0xB8,0x60,0xD0,0x74,0x00, \ +0x03,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xB0,0x80, \ +0x0A,0x20,0xFE,0xF7,0xBB,0xFD,0xF0,0xBD,0xCC,0x02,0x00,0x02,0x08,0x08,0x00, \ +0x00,0xCC,0x07,0x00,0x02,0xB0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x44,0x09,0x00,0x02,0x88, \ +0x88,0x00,0x00,0x80,0xB5,0x15,0x49,0x01,0x27,0xC9,0x7C,0x01,0x29,0x13,0xD1, \ +0x13,0x4B,0x18,0x40,0x0E,0xD0,0x88,0x06,0xC0,0x68,0x81,0x09,0x0A,0xD3,0x04, \ +0x21,0x01,0x40,0x10,0x48,0x03,0xD0,0x41,0x68,0x01,0x31,0x41,0x60,0x02,0xE0, \ +0x01,0x68,0x01,0x31,0x01,0x60,0x38,0x1C,0x80,0xBD,0x02,0x29,0x01,0xD1,0x38, \ +0x1C,0x80,0xBD,0x03,0x29,0x01,0xD0,0x04,0x29,0x06,0xD1,0x07,0x4B,0x18,0x40, \ +0x01,0xD0,0xFF,0xF7,0xAF,0xFE,0x38,0x1C,0x80,0xBD,0x00,0x20,0x80,0xBD,0x00, \ +0x00,0x44,0x09,0x00,0x02,0x40,0x40,0x00,0x00,0xC4,0x02,0x00,0x02,0x80,0x80, \ +0x00,0x00,0xFF,0xB5,0x84,0xB0,0x00,0x20,0x00,0x24,0x00,0x26,0x00,0x27,0x00, \ +0x25,0x03,0x90,0x02,0x90,0x01,0x90,0x68,0x46,0x04,0x22,0x5A,0x49,0xFC,0xF7, \ +0x61,0xFD,0x05,0x99,0x00,0x20,0x00,0x29,0x1B,0xDD,0x04,0x99,0x80,0x23,0x09, \ +0x5C,0x0A,0x1C,0x9A,0x43,0x16,0x2A,0x02,0xD1,0x00,0xAB,0xD9,0x70,0x0D,0xE0, \ +0x0B,0x2A,0x02,0xD1,0x00,0xAB,0x99,0x70,0x08,0xE0,0x04,0x2A,0x02,0xD1,0x00, \ +0xAB,0x59,0x70,0x03,0xE0,0x02,0x2A,0x01,0xD1,0x00,0xAB,0x19,0x70,0x05,0x99, \ +0x01,0x30,0x88,0x42,0xE3,0xDB,0x00,0x20,0x69,0x46,0x09,0x5C,0x00,0x29,0x0D, \ +0xD0,0x09,0x0A,0x04,0xD3,0x00,0x2E,0x00,0xD1,0x07,0x1C,0x01,0x26,0x04,0x1C, \ +0x01,0x99,0x02,0x90,0x00,0x29,0x02,0xD1,0x01,0x21,0x01,0x91,0x05,0x1C,0x01, \ +0x30,0x04,0x28,0xEA,0xDB,0x01,0x99,0x00,0x20,0x00,0x29,0x01,0xD1,0x08,0xB0, \ +0xF0,0xBD,0x00,0x2E,0x01,0xD1,0x2C,0x1C,0x2F,0x1C,0x3A,0x49,0x00,0x22,0x8B, \ +0x18,0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x03,0x92,0x01,0x32,0x04,0x2A,0xF7,0xDB, \ +0x06,0x9B,0x01,0x26,0x0E,0x2B,0x34,0x4A,0x03,0xD1,0x34,0x4B,0x1B,0x78,0x01, \ +0x2B,0x0A,0xD1,0x03,0x98,0x84,0x42,0x02,0xDD,0x03,0x98,0x90,0x72,0x00,0xE0, \ +0x94,0x72,0x02,0x98,0xD0,0x72,0xD7,0x71,0x42,0xE0,0x2D,0x4B,0x1B,0x78,0x00, \ +0x2B,0x3E,0xD1,0x01,0x2D,0x10,0xD9,0xD0,0x71,0x96,0x72,0xD6,0x72,0x07,0x9B, \ +0x00,0x27,0x01,0x2B,0x35,0xD1,0x82,0x20,0x00,0xAB,0x18,0x70,0x84,0x20,0x58, \ +0x70,0x0B,0x20,0x98,0x70,0x16,0x20,0xD8,0x70,0x2B,0xE0,0x01,0x2C,0x0D,0xDD, \ +0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0x96,0x72,0x08,0xE0,0x00,0xAC,0x24, \ +0x78,0x23,0x0A,0x01,0xD3,0x90,0x72,0x02,0xE0,0x95,0x72,0x00,0xE0,0x94,0x72, \ +0x01,0x2F,0x0D,0xD9,0x00,0xAC,0x24,0x78,0x23,0x0A,0x01,0xD3,0xD0,0x71,0x08, \ +0xE0,0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0xD6,0x71,0x02,0xE0,0xD5,0x71, \ +0x00,0xE0,0xD7,0x71,0x02,0x9B,0x00,0x2B,0x05,0xDD,0x00,0xAB,0x5B,0x78,0x00, \ +0x2B,0x01,0xD0,0xD6,0x72,0x00,0xE0,0xD0,0x72,0x00,0x20,0x6B,0x46,0x1B,0x5C, \ +0x0C,0x18,0x01,0x30,0x04,0x28,0x23,0x74,0xF8,0xDB,0xC8,0x19,0x01,0x7C,0x80, \ +0x23,0x19,0x43,0x01,0x74,0xD0,0x7A,0x05,0x49,0xC8,0x70,0x30,0x1C,0x86,0xE7, \ +0x60,0x99,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0xB1,0x02,0x00, \ +0x02,0xC8,0x01,0x00,0x02,0xF0,0xB4,0x44,0x78,0x00,0x26,0x05,0x2C,0x01,0xD8, \ +0x00,0x2C,0x02,0xD1,0x30,0x1C,0xF0,0xBC,0xF7,0x46,0x00,0x22,0x00,0x27,0x00, \ +0x2C,0x17,0xD9,0xC3,0x19,0x9D,0x78,0x6B,0x06,0x5B,0x0E,0x02,0x2B,0x08,0xD0, \ +0x04,0x2B,0x06,0xD0,0x0B,0x2B,0x04,0xD0,0x16,0x2B,0x02,0xD0,0x2C,0x2B,0x0B, \ +0xD1,0x04,0xE0,0x2C,0x2B,0x02,0xD0,0x13,0x1C,0xCD,0x54,0x01,0x32,0x01,0x37, \ +0xA7,0x42,0xE8,0xD3,0x03,0xE0,0x00,0x2B,0x01,0xD1,0x30,0x1C,0xDD,0xE7,0x10, \ +0x1C,0xDB,0xE7,0xF1,0xB5,0x85,0xB0,0x00,0x20,0x01,0x90,0x68,0x46,0x04,0x22, \ +0x70,0x49,0xFC,0xF7,0x6F,0xFC,0x70,0x4E,0x04,0x24,0x30,0x68,0x45,0x68,0x80, \ +0x89,0x2F,0x28,0x02,0xDA,0x00,0x20,0x06,0xB0,0xF0,0xBD,0x05,0x98,0x6B,0x49, \ +0x01,0x28,0x04,0x91,0x09,0xD1,0x06,0x22,0xE8,0x1D,0x09,0x30,0x04,0x99,0xFC, \ +0xF7,0x3C,0xFC,0x00,0x28,0x01,0xD0,0x00,0x20,0xEE,0xE7,0x20,0x20,0xE9,0x1D, \ +0x19,0x31,0x28,0x5C,0x49,0x78,0x09,0x02,0x08,0x43,0x00,0x04,0x00,0x0C,0x02, \ +0x90,0x14,0x28,0x04,0xDB,0x7D,0x23,0x02,0x98,0xDB,0x00,0x98,0x42,0x01,0xDD, \ +0x00,0x20,0xDB,0xE7,0x22,0x20,0x28,0x5C,0x80,0x08,0x01,0xD2,0x00,0x20,0xD5, \ +0xE7,0x30,0x68,0x24,0x27,0x80,0x89,0x04,0x38,0x24,0x28,0x45,0xDD,0x56,0x49, \ +0x03,0x91,0xE8,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x20,0xD0,0x03,0x28,0x39, \ +0xD1,0xE8,0x19,0x41,0x78,0x01,0x29,0x27,0xD0,0x00,0x20,0xC0,0xE7,0xEE,0x19, \ +0x70,0x78,0x00,0x28,0x00,0xD1,0xBB,0xE7,0x4D,0x49,0x4A,0x79,0x82,0x42,0x01, \ +0xD0,0x00,0x20,0xB5,0xE7,0x03,0x99,0xB0,0x1C,0xFC,0xF7,0xFB,0xFB,0x00,0x28, \ +0x01,0xD0,0x00,0x20,0xAD,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x01,0x20,0x01, \ +0x90,0x14,0xE0,0xE8,0x19,0x69,0x46,0x06,0x1C,0xFF,0xF7,0x66,0xFF,0x04,0x1C, \ +0x01,0xD1,0x00,0x20,0x9E,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x3D, \ +0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0x93,0xE7,0x03,0x37, \ +0x35,0x4E,0x30,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xBE,0xDC,0x01,0x98,0x00, \ +0x28,0x01,0xD1,0x00,0x20,0x87,0xE7,0x34,0x49,0x68,0x46,0x01,0x23,0x0A,0x7D, \ +0x21,0x1C,0xFF,0xF7,0x78,0xFE,0x00,0x28,0x00,0xD1,0x7D,0xE7,0x04,0x20,0xF9, \ +0xF7,0x10,0xFE,0x2E,0x48,0x20,0x23,0x01,0x78,0x2E,0x4F,0x19,0x43,0x01,0x70, \ +0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70,0xE9,0x18,0x0C,0x1C,0xF8,0x1D,0x06, \ +0x22,0x07,0x30,0xFC,0xF7,0xD1,0xFB,0x06,0x22,0x21,0x1C,0x04,0x98,0xFC,0xF7, \ +0xCC,0xFB,0x21,0x4C,0xF8,0x1D,0x62,0x79,0x03,0x99,0x0D,0x30,0xFC,0xF7,0xC5, \ +0xFB,0x22,0x48,0x01,0x25,0xFE,0x1D,0x29,0x36,0x05,0x75,0x35,0x71,0x02,0x98, \ +0x38,0x80,0xA5,0x70,0x05,0x98,0x01,0x28,0x08,0xD1,0x00,0x21,0x00,0x20,0x01, \ +0xF0,0xE5,0xFA,0x13,0x49,0x00,0x20,0x09,0x68,0x48,0x61,0x07,0xE0,0xF9,0xF7, \ +0x9E,0xFD,0x39,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xF9,0xF7,0xBC,0xFD,0x14, \ +0x49,0x00,0x20,0x48,0x70,0x05,0x20,0x88,0x71,0x05,0x98,0x01,0x28,0x04,0xD1, \ +0x01,0x21,0x04,0x20,0xFE,0xF7,0x9E,0xFB,0x00,0xE0,0xB5,0x71,0x0E,0x48,0x01, \ +0x68,0x0E,0x48,0xC2,0x69,0x11,0x43,0xC1,0x61,0x0D,0x48,0x05,0x70,0x28,0x1C, \ +0x28,0xE7,0x64,0x99,0x00,0x00,0x48,0x01,0x00,0x02,0xFC,0x00,0x00,0x02,0xDC, \ +0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x94,0x01,0x00,0x02, \ +0x80,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0x94,0x09,0x00,0x02,0xA4,0x02,0x00, \ +0x02,0x40,0x00,0x00,0x04,0x3A,0x01,0x00,0x02,0xF0,0xB5,0x84,0xB0,0x5A,0x49, \ +0x04,0x22,0x01,0xA8,0xFC,0xF7,0x6F,0xFB,0x59,0x4F,0x59,0x49,0x38,0x68,0x00, \ +0x25,0x46,0x68,0x06,0x22,0xF0,0x1D,0x09,0x30,0x03,0x91,0xFC,0xF7,0x46,0xFB, \ +0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x39,0x68,0x38,0x1C,0x89, \ +0x89,0x2F,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7,0x20,0x22,0xF3,0x1D,0x19,0x33, \ +0xB2,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12,0x04,0x12,0x0C,0x00,0x92,0x14, \ +0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00,0x9A,0x42,0x01,0xDD,0x00,0x20, \ +0xE3,0xE7,0x22,0x22,0xB2,0x5C,0x52,0x08,0x01,0xD2,0x00,0x20,0xDD,0xE7,0x24, \ +0x27,0x04,0x39,0x24,0x29,0x34,0xDD,0xF0,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28, \ +0x11,0xD0,0x03,0x28,0x2B,0xD1,0xF0,0x19,0x41,0x78,0x01,0x29,0x19,0xD0,0x00, \ +0x20,0xCC,0xE7,0xF0,0x19,0x40,0x78,0x20,0x28,0x01,0xD9,0x00,0x25,0x00,0xE0, \ +0x01,0x25,0xC0,0x19,0x87,0x1C,0x15,0xE0,0xF0,0x19,0x02,0x90,0x01,0xA9,0xFF, \ +0xF7,0x7B,0xFE,0x04,0x1C,0x01,0xD1,0x00,0x20,0xB9,0xE7,0x02,0x98,0x40,0x78, \ +0xC0,0x19,0x87,0x1C,0x07,0xE0,0x2E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01, \ +0xD0,0x00,0x20,0xAD,0xE7,0x03,0x37,0x28,0x48,0x00,0x68,0x80,0x89,0x04,0x38, \ +0xB8,0x42,0xCC,0xDC,0x00,0x2D,0x01,0xD1,0x00,0x20,0xA2,0xE7,0x25,0x49,0x01, \ +0x23,0x0A,0x7D,0x21,0x1C,0x01,0xA8,0xFF,0xF7,0x8D,0xFD,0x00,0x28,0x00,0xD1, \ +0x98,0xE7,0x22,0x4C,0x06,0x22,0xE0,0x1D,0x07,0x30,0x1F,0x4F,0x03,0x99,0xFC, \ +0xF7,0xF2,0xFA,0xE0,0x1D,0x0D,0x30,0x20,0x22,0xF9,0x1D,0x15,0x31,0xFC,0xF7, \ +0xEB,0xFA,0xF8,0x1D,0x39,0x30,0x81,0x78,0xE0,0x1D,0x29,0x30,0x01,0x71,0x01, \ +0x79,0x18,0x48,0x20,0x23,0x01,0x75,0x00,0x9A,0x17,0x49,0x22,0x80,0x0A,0x78, \ +0x1A,0x43,0x0A,0x70,0x0A,0x78,0x10,0x23,0x1A,0x43,0x0A,0x70,0x00,0x21,0x13, \ +0x4A,0x50,0x30,0x41,0x70,0x91,0x70,0x05,0x21,0x81,0x71,0x04,0x20,0xF9,0xF7, \ +0xFB,0xFC,0x01,0x21,0x04,0x20,0xFE,0xF7,0xCB,0xFA,0x0E,0x48,0x01,0x68,0x0E, \ +0x48,0xC2,0x69,0x11,0x43,0xC1,0x61,0x0D,0x49,0x01,0x20,0x08,0x70,0x5D,0xE7, \ +0x00,0x00,0x68,0x99,0x00,0x00,0x48,0x01,0x00,0x02,0xFC,0x00,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xC0,0x00,0x00,0x02,0x80,0x00,0x00,0x02,0x44,0x09,0x00,0x02, \ +0x94,0x01,0x00,0x02,0x04,0x01,0x00,0x02,0xA4,0x02,0x00,0x02,0x40,0x00,0x00, \ +0x04,0x3A,0x01,0x00,0x02,0xF0,0xB4,0x1D,0x4A,0x1D,0x4B,0xD1,0x1D,0x69,0x31, \ +0xC9,0x7A,0x49,0x00,0x5F,0x5A,0xD1,0x1D,0x59,0x31,0x0B,0x8B,0x01,0x3B,0x1B, \ +0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0x26,0xDD,0x17,0x4B,0x01,0x25,0x5C,0x7A, \ +0x50,0x32,0xD3,0x79,0x00,0x2B,0x04,0xD1,0x05,0x30,0x0E,0x28,0x05,0xD9,0x0E, \ +0x38,0x03,0xE0,0x01,0x30,0x0E,0x28,0x00,0xD9,0x01,0x20,0x00,0x2C,0x05,0xD1, \ +0x2B,0x1C,0x46,0x1E,0xB3,0x40,0x3B,0x40,0x10,0xD1,0x07,0xE0,0xD3,0x79,0x00, \ +0x2B,0x0C,0xD1,0x0A,0x4B,0x1B,0x18,0x5B,0x7B,0x00,0x2B,0x07,0xD1,0x0B,0x8B, \ +0x01,0x3B,0x1B,0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0xDC,0xDC,0x00,0x20,0xF0, \ +0xBC,0xF7,0x46,0x00,0x00,0x44,0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0xB0,0x00, \ +0x00,0x02,0x14,0x01,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C,0x00,0x26,0x27, \ +0x70,0xE0,0x1D,0x03,0x30,0x66,0x70,0x66,0x80,0x06,0x22,0x25,0x49,0xFC,0xF7, \ +0x55,0xFA,0x25,0x4D,0xE0,0x1D,0x09,0x30,0x06,0x22,0xE9,0x1D,0x35,0x31,0xFC, \ +0xF7,0x4D,0xFA,0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0,0x71,0x20,0x72, \ +0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0,0xE8,0xF8,0x00, \ +0xF0,0xF0,0xF8,0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x0B,0xF9,0x2D,0x18, \ +0x28,0x1C,0x00,0xF0,0x23,0xF9,0x2D,0x18,0x16,0x48,0x80,0x7D,0x02,0x28,0x03, \ +0xD1,0x28,0x1C,0x00,0xF0,0x33,0xF9,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x3D,0xF9, \ +0x28,0x18,0x00,0x1B,0xF8,0x64,0xB8,0x64,0xF0,0xBD,0x26,0x76,0x0F,0x4E,0xE0, \ +0x1D,0x72,0x79,0x13,0x30,0xE9,0x1D,0x15,0x31,0x62,0x76,0xFC,0xF7,0x1A,0xFA, \ +0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0,0x03,0xF9,0x70,0x79,0x20,0x30,0x00, \ +0x06,0x00,0x0E,0xB8,0x64,0xF0,0xBD,0x00,0x00,0x04,0x08,0x00,0x02,0x5C,0x00, \ +0x00,0x02,0xC0,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0x00,0x00,0x00,0x02,0x04, \ +0x01,0x00,0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0x9C,0xFF,0x00,0x26,0x80,0x2F, \ +0x47,0x4D,0x0E,0xD1,0xC0,0x20,0xFB,0xF7,0x13,0xFE,0x04,0x1C,0x45,0x48,0x41, \ +0x7B,0x03,0x29,0x03,0xD0,0x20,0x1C,0xFB,0xF7,0x0B,0xFE,0xF8,0xBD,0x01,0x21, \ +0x41,0x73,0x10,0xE0,0x40,0x2F,0x05,0xD1,0x40,0x48,0x01,0x21,0x81,0x74,0x3F, \ +0x48,0x46,0x80,0x08,0xE0,0x50,0x2F,0x06,0xD1,0x3E,0x48,0x3E,0x49,0x06,0x22, \ +0xFC,0xF7,0xDC,0xF9,0x01,0x21,0x29,0x71,0x3C,0x48,0xF7,0xF7,0xAF,0xFF,0x50, \ +0x2F,0x02,0xD1,0x36,0x48,0xC0,0x6C,0x01,0xE0,0x34,0x48,0x80,0x6C,0x33,0x49, \ +0x88,0x66,0x37,0x48,0x89,0x6E,0xC0,0x79,0xF9,0xF7,0xEF,0xFA,0x30,0x49,0x50, \ +0x2F,0xC8,0x66,0x0C,0xD1,0x2E,0x48,0x2E,0x49,0xC0,0x6E,0x48,0x80,0x31,0x48, \ +0xC0,0x79,0xF9,0xF7,0x83,0xFF,0x2B,0x49,0x49,0x88,0x40,0x18,0x29,0x49,0x48, \ +0x80,0x28,0x48,0x27,0x49,0x80,0x2F,0x48,0x66,0x16,0xD1,0xFC,0xF7,0x31,0xFA, \ +0x2A,0x49,0x89,0x89,0x49,0x00,0x01,0x31,0x08,0x40,0x21,0x49,0x88,0x62,0x27, \ +0x48,0x00,0x88,0x08,0x62,0x89,0x6A,0x8B,0x00,0x59,0x18,0x89,0x00,0x09,0x18, \ +0x08,0x20,0xF9,0xF7,0xB3,0xFB,0x20,0x1C,0xFB,0xF7,0xB8,0xFD,0xF8,0xF7,0xEC, \ +0xFF,0xF9,0xF7,0x56,0xF8,0x00,0x90,0x80,0x2F,0x05,0xD1,0x00,0x98,0x00,0x28, \ +0x23,0xD1,0x01,0x21,0x69,0x70,0x20,0xE0,0x40,0x2F,0x1E,0xD1,0x12,0x4C,0xC0, \ +0x20,0xA6,0x74,0xFB,0xF7,0xA4,0xFD,0x07,0x1C,0xA8,0x79,0x01,0x28,0x12,0xD1, \ +0x00,0x98,0x00,0x28,0x0D,0xD1,0xE0,0x1D,0x69,0x30,0x81,0x7A,0x00,0x29,0x0A, \ +0xD1,0x01,0x21,0x81,0x72,0x0E,0x49,0xC8,0x8A,0x81,0x02,0x04,0x20,0xF9,0xF7, \ +0x88,0xFB,0x01,0xE0,0x01,0x21,0x69,0x71,0x38,0x1C,0xFB,0xF7,0x8A,0xFD,0x7D, \ +0xE7,0x00,0x00,0x94,0x09,0x00,0x02,0x54,0x09,0x00,0x02,0x44,0x09,0x00,0x02, \ +0x04,0x08,0x00,0x02,0x08,0x08,0x00,0x02,0x30,0x01,0x00,0x02,0x1A,0x08,0x00, \ +0x02,0x04,0x01,0x00,0x02,0xC0,0x00,0x00,0x02,0xA8,0x01,0x00,0x02,0x03,0x49, \ +0x02,0x48,0x09,0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0x24,0x08,0x00,0x02,0x80, \ +0x00,0x00,0x02,0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02,0x80,0xC9,0x7A, \ +0x00,0x29,0x03,0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80,0x08,0x49,0x49, \ +0x7A,0x01,0x29,0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01,0x80,0xF7,0x46, \ +0x01,0x88,0x02,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x26,0x08,0x00,0x02,0x80, \ +0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00,0x20,0x0A,0x4A, \ +0x08,0x70,0x53,0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18,0x3F,0x7D,0x0C, \ +0x18,0x01,0x30,0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50,0x79,0x48,0x70, \ +0x50,0x79,0x90,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x04,0x01,0x00,0x02,0x80, \ +0x00,0x00,0x02,0x90,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x00,0x20,0x08,0x4B, \ +0x00,0x22,0x9F,0x18,0x3F,0x7C,0x00,0x2F,0x02,0xD0,0x0C,0x18,0xA7,0x70,0x01, \ +0x30,0x01,0x32,0x04,0x2A,0xF5,0xD3,0x48,0x70,0x90,0xBC,0x02,0x30,0xF7,0x46, \ +0x00,0x00,0x00,0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22,0x42,0x70,0x01, \ +0x30,0x80,0x18,0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7,0x46,0x00,0x00, \ +0x00,0x00,0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x04,0x49,0x02, \ +0x30,0x0A,0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04,0x20,0xF7,0x46, \ +0x00,0x00,0x80,0x00,0x00,0x02,0x0A,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x00, \ +0x21,0x81,0x70,0x02,0x30,0x41,0x1C,0x07,0x20,0x08,0x70,0x04,0x20,0xF7,0x46, \ +0xF0,0xB5,0x83,0xB0,0x51,0x48,0x52,0x4D,0x48,0x21,0x01,0x70,0x01,0x26,0xEC, \ +0x1D,0x29,0x34,0x46,0x70,0x62,0x79,0x11,0x21,0x4E,0x4F,0x02,0x2A,0x01,0xD1, \ +0x41,0x70,0x05,0xE0,0x03,0x2A,0x03,0xD1,0xBA,0x78,0x08,0x2A,0x00,0xD1,0x41, \ +0x70,0x4A,0x49,0x09,0x68,0x89,0x78,0x00,0x29,0x03,0xD0,0x41,0x78,0x08,0x23, \ +0x19,0x43,0x41,0x70,0x46,0x49,0x00,0x23,0x00,0x22,0x46,0x48,0xC9,0x79,0xF7, \ +0xF7,0x8F,0xFE,0x45,0x48,0x45,0x49,0x06,0x22,0xFC,0xF7,0xA4,0xF8,0xE9,0x1D, \ +0x07,0x31,0x0D,0x1C,0x06,0x22,0x42,0x48,0xFC,0xF7,0x9D,0xF8,0x29,0x1C,0x06, \ +0x22,0x41,0x48,0xFC,0xF7,0x98,0xF8,0x40,0x4D,0x18,0x20,0xA8,0x66,0x39,0x48, \ +0x18,0x21,0xC0,0x79,0xF9,0xF7,0xB6,0xF9,0xE8,0x66,0x32,0x48,0xEE,0x1D,0x68, \ +0x66,0x01,0x20,0x49,0x36,0xF0,0x70,0xF8,0xF7,0xD9,0xFE,0xF8,0xF7,0x43,0xFF, \ +0x02,0x90,0x00,0x20,0xF0,0x70,0x02,0x98,0x00,0x28,0x01,0xD0,0x03,0xB0,0xF0, \ +0xBD,0x02,0x26,0x2C,0x48,0x6E,0x60,0xC0,0x79,0x32,0x49,0x40,0x00,0x08,0x5A, \ +0x31,0x49,0xC9,0x88,0x40,0x18,0x31,0x49,0x09,0x88,0x41,0x18,0x01,0x20,0xF9, \ +0xF7,0x81,0xFA,0x00,0x22,0xD2,0x43,0x6E,0x74,0x00,0x92,0x01,0x22,0x10,0x21, \ +0x01,0xAB,0x2B,0x48,0xFB,0xF7,0x1D,0xFC,0x00,0x20,0x1E,0x49,0x68,0x74,0x0A, \ +0x68,0x53,0x78,0x00,0x2B,0x22,0xD0,0x93,0x78,0x01,0x33,0x1B,0x06,0x1B,0x0E, \ +0x93,0x70,0x04,0x2B,0x02,0xDA,0x09,0x68,0x48,0x70,0xD2,0xE7,0x60,0x79,0x01, \ +0x28,0x1F,0xDD,0x02,0x28,0x03,0xD1,0xBA,0x78,0x08,0x23,0x9A,0x43,0xBA,0x70, \ +0x03,0x28,0x17,0xD1,0x0E,0x48,0x40,0x78,0x40,0x09,0x06,0xD3,0x01,0x20,0xF8, \ +0x70,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x0C,0xE0,0x01,0x20,0xB8,0x71, \ +0x09,0xE0,0x60,0x79,0x03,0x28,0x06,0xD1,0x05,0x4A,0x01,0x20,0x52,0x78,0x52, \ +0x09,0x00,0xD3,0x00,0x20,0xF8,0x70,0x09,0x68,0x40,0x20,0x08,0x70,0xAB,0xE7, \ +0x00,0x00,0x04,0x08,0x00,0x02,0x80,0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0xD0, \ +0x01,0x00,0x02,0x04,0x01,0x00,0x02,0x06,0x08,0x00,0x02,0x0E,0x08,0x00,0x02, \ +0x5C,0x00,0x00,0x02,0x14,0x08,0x00,0x02,0x08,0x08,0x00,0x02,0x44,0x09,0x00, \ +0x02,0xB0,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xAE,0x01,0x00,0x02,0xF8,0x06, \ +0x00,0x02,0xF8,0xB4,0x00,0x26,0x82,0x1C,0x06,0x29,0x01,0xD3,0x48,0x08,0x02, \ +0xD3,0x00,0x20,0xF8,0xBC,0xF7,0x46,0x00,0x24,0x03,0x23,0x00,0x25,0xCF,0x1E, \ +0x17,0xD0,0x01,0x39,0xD0,0x5C,0x99,0x42,0x02,0xD1,0x00,0x28,0x0F,0xD1,0x0C, \ +0xE0,0x0E,0x28,0x0C,0xD8,0x01,0x28,0x0A,0xD3,0xA8,0x42,0x08,0xD3,0xD5,0x18, \ +0x6D,0x78,0x03,0x33,0x03,0x34,0x2D,0x18,0xA7,0x42,0xEC,0xD8,0x01,0x2E,0x01, \ +0xD1,0x00,0x20,0xE0,0xE7,0x1B,0x48,0xC0,0x79,0x01,0x28,0x00,0xD1,0xDB,0xE7, \ +0x19,0x48,0xC1,0x1D,0x29,0x31,0x49,0x7A,0x00,0x29,0x01,0xD1,0x01,0x20,0xD3, \ +0xE7,0x91,0x78,0x3A,0x30,0x00,0x23,0x81,0x70,0x51,0x78,0x41,0x70,0x11,0x78, \ +0x01,0x70,0x03,0x21,0x00,0x2F,0x1B,0xD9,0x50,0x5C,0x00,0x28,0x18,0xD0,0x0F, \ +0x4D,0x01,0x26,0x2C,0x18,0x66,0x73,0x54,0x18,0x00,0x94,0x64,0x78,0x24,0x18, \ +0xA0,0x42,0x0A,0xD2,0x0A,0x4D,0x01,0x26,0x2D,0x18,0x6E,0x73,0x00,0x9E,0x10, \ +0x3D,0xB6,0x78,0x01,0x30,0xA0,0x42,0xEE,0x73,0xF4,0xD3,0x03,0x31,0x03,0x33, \ +0x9F,0x42,0xE3,0xD8,0x01,0x20,0xAA,0xE7,0x00,0x00,0xB4,0x09,0x00,0x02,0x80, \ +0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x22,0x4F,0x01,0x9E, \ +0x3F,0x68,0x00,0x24,0xBF,0x89,0x00,0x21,0x24,0x20,0x3D,0x1F,0x00,0x95,0x24, \ +0x2D,0x39,0xD9,0x1E,0x4F,0x7F,0x7A,0x35,0x5C,0x03,0x2D,0x08,0xD0,0x07,0x2D, \ +0x0D,0xD1,0x35,0x18,0x6D,0x78,0x01,0x24,0x03,0x1C,0x02,0x35,0x28,0x18,0x0A, \ +0xE0,0x35,0x18,0x6D,0x78,0x01,0x21,0x02,0x1C,0x02,0x35,0x28,0x18,0x05,0xE0, \ +0x35,0x18,0x6D,0x78,0x02,0x35,0x28,0x18,0x00,0x29,0x01,0xD0,0x00,0x2F,0x02, \ +0xD0,0x00,0x9D,0x85,0x42,0xE1,0xD8,0x00,0x29,0x17,0xD0,0xB0,0x18,0x40,0x78, \ +0x01,0x28,0x01,0xD0,0x02,0xB0,0xF0,0xBD,0x01,0x2F,0x0F,0xD1,0x00,0x2C,0x0D, \ +0xD0,0x01,0x98,0xC0,0x18,0x41,0x78,0xFF,0xF7,0x5E,0xFF,0x00,0x28,0x00,0xD1, \ +0xF1,0xE7,0x05,0x48,0xC1,0x79,0x00,0x29,0x01,0xD1,0x01,0x21,0xC1,0x71,0xEA, \ +0xE7,0x48,0x01,0x00,0x02,0xB0,0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0x00,0xB5, \ +0x05,0x49,0x89,0x7C,0x01,0x29,0x04,0xD1,0x01,0x78,0x80,0x29,0x01,0xD1,0xFF, \ +0xF7,0xA8,0xFF,0x00,0xBD,0x00,0x00,0xC0,0x00,0x00,0x02,0x90,0xB5,0x10,0x4C, \ +0x60,0x78,0x00,0x28,0x1A,0xD0,0x0F,0x4F,0x38,0x68,0x40,0x68,0x42,0x7E,0x18, \ +0x30,0x00,0x2A,0x09,0xD0,0x0C,0x49,0x49,0x79,0x91,0x42,0x0F,0xD1,0x0B,0x49, \ +0x02,0x30,0xFB,0xF7,0x1A,0xFF,0x00,0x28,0x09,0xD1,0x38,0x68,0x40,0x68,0xC1, \ +0x1D,0x03,0x31,0x06,0x22,0x07,0x48,0xFB,0xF7,0x2E,0xFF,0x01,0x20,0xA0,0x70, \ +0x90,0xBD,0x00,0x00,0x94,0x09,0x00,0x02,0x48,0x01,0x00,0x02,0x04,0x01,0x00, \ +0x02,0x94,0x00,0x00,0x02,0x30,0x01,0x00,0x02,0xB0,0xB4,0x03,0x78,0x00,0x27, \ +0x20,0x49,0x20,0x4A,0x08,0x2B,0x37,0xD1,0xD3,0x78,0x00,0x2B,0x04,0xD0,0xD0, \ +0x7A,0x09,0x68,0x88,0x75,0xB0,0xBC,0xF7,0x46,0x00,0x79,0x40,0x08,0x03,0xD3, \ +0x90,0x7A,0x09,0x68,0x88,0x75,0xF6,0xE7,0x0B,0x68,0x99,0x7D,0xD2,0x7A,0x91, \ +0x42,0x01,0xDD,0x9A,0x75,0xEF,0xE7,0x15,0x4C,0x08,0x19,0x00,0x7C,0x00,0x28, \ +0xEA,0xD1,0x08,0x1C,0x01,0x29,0x0A,0xD3,0x01,0x38,0x25,0x18,0x2D,0x7C,0x00, \ +0x2D,0x03,0xD1,0x01,0x28,0xF8,0xD2,0x00,0x2F,0x01,0xD0,0x98,0x75,0xDC,0xE7, \ +0x8A,0x42,0x06,0xD9,0x01,0x31,0x60,0x18,0x00,0x7C,0x00,0x28,0x03,0xD1,0x8A, \ +0x42,0xF8,0xD8,0x00,0x2F,0x01,0xD0,0x99,0x75,0xCF,0xE7,0x9A,0x75,0xCD,0xE7, \ +0xD0,0x79,0x09,0x68,0x88,0x75,0xC9,0xE7,0x00,0x00,0xC4,0x01,0x00,0x02,0x04, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0xB5,0x07,0x48,0x81,0x79,0x03,0x29, \ +0x02,0xD0,0x81,0x79,0x04,0x29,0x05,0xD1,0x00,0x21,0x81,0x71,0x07,0x21,0x04, \ +0x20,0xFD,0xF7,0xCA,0xFE,0x00,0xBD,0x00,0x00,0x94,0x09,0x00,0x02,0x90,0xB5, \ +0x27,0x48,0x27,0x49,0x00,0x68,0x47,0x68,0x22,0x20,0x38,0x5C,0x10,0x23,0x18, \ +0x40,0x03,0xD0,0x08,0x78,0x00,0x28,0x41,0xD0,0x02,0xE0,0x08,0x78,0x00,0x28, \ +0x3D,0xD1,0x24,0x20,0x38,0x5C,0x00,0x28,0x39,0xD1,0xF8,0x1D,0x1D,0x30,0x44, \ +0x78,0x1D,0x49,0x00,0x2C,0x02,0xD0,0x4A,0x79,0xA2,0x42,0x30,0xD1,0x4A,0x79, \ +0x1B,0x49,0x02,0x30,0xFB,0xF7,0x83,0xFE,0x00,0x28,0x29,0xD1,0x38,0x19,0x20, \ +0x30,0xC0,0x79,0x00,0x19,0x28,0x30,0x39,0x5C,0x03,0x29,0x21,0xD1,0x38,0x18, \ +0x14,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x1B,0xD1,0x13,0x48,0x40,0x7A,0x00, \ +0x28,0x05,0xD0,0x12,0x48,0x08,0x18,0x40,0x7B,0x00,0x28,0x12,0xD0,0x09,0xE0, \ +0x10,0x48,0x10,0x4A,0xC0,0x7A,0x40,0x00,0x10,0x5A,0x01,0x22,0x01,0x39,0x8A, \ +0x40,0x10,0x40,0x07,0xD0,0x01,0x20,0xF8,0xF7,0x7A,0xF9,0x00,0x28,0x02,0xD0, \ +0x02,0x20,0xFF,0xF7,0xFD,0xF9,0x90,0xBD,0x48,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x04,0x01,0x00,0x02,0xDC,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0xB0,0x00, \ +0x00,0x02,0x14,0x01,0x00,0x02,0xB4,0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0x80, \ +0xB5,0xFD,0xF7,0x93,0xFD,0x1A,0x48,0x00,0xF0,0x58,0xFD,0x19,0x4B,0x1A,0x48, \ +0x59,0x7A,0x01,0x29,0x04,0xD1,0x48,0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x03, \ +0xE0,0x90,0x21,0x41,0x81,0x30,0x21,0x01,0x81,0x41,0x89,0x02,0x89,0x14,0x4F, \ +0x89,0x18,0x12,0x4A,0x11,0x80,0xC2,0x88,0x80,0x88,0x11,0x18,0x09,0x18,0x39, \ +0x80,0x51,0x18,0xFF,0x31,0x10,0x4A,0x31,0x31,0x11,0x80,0x19,0x88,0x10,0x4F, \ +0x48,0x43,0x0E,0x49,0x08,0x80,0xD8,0x79,0x0E,0x49,0x38,0x70,0x38,0x78,0x08, \ +0x70,0xF7,0xF7,0xC0,0xFA,0xF8,0xF7,0x8C,0xFF,0x39,0x78,0x0B,0x48,0x40,0x5C, \ +0x0B,0x49,0x08,0x70,0x80,0xBD,0x5C,0x00,0x00,0x02,0x04,0x01,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xAC,0x01,0x00,0x02,0xA8,0x01,0x00,0x02,0xAA,0x01,0x00,0x02, \ +0xAE,0x01,0x00,0x02,0x92,0x01,0x00,0x02,0x93,0x01,0x00,0x02,0xC0,0x01,0x00, \ +0x02,0x91,0x01,0x00,0x02,0x80,0xB4,0x21,0x48,0x00,0x21,0x01,0x70,0x00,0x20, \ +0x19,0x27,0x1F,0x4A,0xFF,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00, \ +0x20,0x43,0x27,0x1C,0x4A,0x7F,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB, \ +0x1A,0x48,0x18,0x4A,0x01,0x80,0x1A,0x48,0x1A,0x4B,0x02,0x60,0x13,0x60,0x02, \ +0x68,0xD7,0x1D,0x15,0x37,0x57,0x60,0x02,0x68,0x08,0x3F,0x97,0x60,0x02,0x68, \ +0x11,0x73,0x02,0x68,0x91,0x73,0x07,0x68,0x03,0x22,0xBA,0x75,0x02,0x68,0x91, \ +0x82,0x00,0x68,0x11,0x4A,0x10,0x60,0x11,0x48,0x0C,0x4A,0x01,0x80,0x11,0x48, \ +0x02,0x60,0x13,0x60,0x02,0x68,0xD3,0x1D,0x11,0x33,0x53,0x60,0x02,0x68,0x91, \ +0x81,0x02,0x68,0x11,0x72,0x00,0x68,0x0C,0x49,0x08,0x60,0x0C,0x49,0x01,0x20, \ +0x08,0x70,0x80,0xBC,0xF7,0x46,0x94,0x01,0x00,0x02,0x00,0x11,0x00,0x02,0x00, \ +0xDA,0x00,0x02,0xF0,0x01,0x00,0x02,0xC4,0x01,0x00,0x02,0x00,0x00,0x00,0x80, \ +0x68,0x02,0x00,0x02,0xF2,0x01,0x00,0x02,0x48,0x01,0x00,0x02,0x64,0x02,0x00, \ +0x02,0xDF,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x39,0x4E,0xF7,0x1D,0x69,0x37, \ +0xB8,0x78,0x04,0x23,0x18,0x40,0x40,0x24,0x00,0x25,0x00,0x28,0x03,0xD1,0x7D, \ +0x71,0x3C,0x71,0x02,0xB0,0xF0,0xBD,0x33,0x49,0xA4,0x20,0x08,0x70,0x10,0x20, \ +0x48,0x70,0x32,0x48,0x03,0x23,0xC0,0x88,0x9B,0x03,0x18,0x43,0x48,0x80,0xC8, \ +0x1D,0x03,0x30,0x06,0x22,0x2E,0x49,0xFB,0xF7,0x99,0xFD,0x2E,0x49,0x2E,0x48, \ +0x06,0x22,0xFB,0xF7,0x94,0xFD,0x10,0x20,0x2D,0x49,0xB0,0x66,0xC8,0x79,0x10, \ +0x21,0xF8,0xF7,0xB3,0xFE,0xF0,0x66,0x24,0x48,0x70,0x66,0x01,0x20,0x38,0x70, \ +0xF8,0xF7,0xD8,0xFB,0xF8,0xF7,0x42,0xFC,0x3D,0x70,0x82,0x25,0x00,0x28,0x2E, \ +0xD1,0x23,0x49,0x24,0x48,0xC9,0x79,0x24,0x4A,0xC0,0x88,0x49,0x00,0x51,0x5A, \ +0x40,0x18,0x22,0x49,0x09,0x88,0x41,0x18,0x01,0x20,0x38,0x71,0x04,0x20,0x70, \ +0x60,0x01,0x20,0xF8,0xF7,0x82,0xFF,0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22, \ +0x11,0x21,0x01,0xAB,0x1B,0x48,0xFB,0xF7,0x1F,0xF9,0x01,0x98,0x41,0x08,0x01, \ +0xD3,0x3C,0x71,0x1A,0xE0,0x40,0x09,0x18,0xD3,0x78,0x79,0x17,0x49,0x01,0x30, \ +0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C,0x88,0x42,0x01,0xDA,0x3D,0x71,0x0D, \ +0xE0,0x3C,0x71,0x0B,0xE0,0x78,0x79,0x10,0x49,0x01,0x30,0x00,0x06,0x00,0x0E, \ +0x78,0x71,0x09,0x7C,0x88,0x42,0x01,0xDA,0x3D,0x71,0x00,0xE0,0x3C,0x71,0x97, \ +0xE7,0x44,0x09,0x00,0x02,0x04,0x08,0x00,0x02,0x80,0x00,0x00,0x02,0x5C,0x00, \ +0x00,0x02,0x8E,0x00,0x00,0x02,0x08,0x08,0x00,0x02,0x04,0x01,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xB0,0x01,0x00,0x02,0xAE,0x01,0x00,0x02,0x38,0x07,0x00,0x02, \ +0xC0,0x00,0x00,0x02,0x80,0xB5,0xC0,0x20,0xFB,0xF7,0x44,0xF9,0x07,0x1C,0x12, \ +0x48,0x01,0x68,0x01,0x31,0x01,0x60,0x11,0x48,0xFB,0xF7,0x64,0xFD,0x00,0x29, \ +0x17,0xD1,0x0F,0x48,0x10,0x4A,0x03,0x78,0x10,0x49,0x00,0x2B,0x06,0xD1,0x09, \ +0x68,0xD3,0x69,0x19,0x43,0xD1,0x61,0x01,0x21,0x01,0x70,0x0A,0xE0,0x0C,0x4B, \ +0x9B,0x79,0x05,0x2B,0x06,0xD0,0x09,0x68,0xD3,0x69,0xC9,0x43,0x19,0x40,0xD1, \ +0x61,0x00,0x21,0x01,0x70,0x38,0x1C,0xFB,0xF7,0x1F,0xF9,0x80,0xBD,0xD4,0x02, \ +0x00,0x02,0x20,0x4E,0x00,0x00,0x3A,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xA4, \ +0x02,0x00,0x02,0x94,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0xFB,0xF7,0x0E,0xF9, \ +0x07,0x1C,0x0F,0x48,0x81,0x7A,0x00,0x29,0x15,0xD1,0x01,0x7B,0x01,0x29,0x12, \ +0xD1,0xC1,0x7A,0x00,0x29,0x0F,0xD1,0x00,0x24,0x0A,0x49,0x50,0x30,0x0C,0x70, \ +0x44,0x70,0x00,0xF0,0x1E,0xFB,0x08,0x48,0x01,0x21,0x84,0x61,0x07,0x20,0xFD, \ +0xF7,0xDC,0xFC,0x06,0x49,0x01,0x20,0x08,0x70,0x38,0x1C,0xFB,0xF7,0xF0,0xF8, \ +0x90,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0x3C,0x01,0x00,0x02,0x80,0x00,0x00, \ +0x04,0xB3,0x02,0x00,0x02,0x90,0xB5,0x14,0x4C,0x14,0x4F,0x61,0x79,0x03,0x29, \ +0x0C,0xD1,0x01,0x23,0x1B,0x03,0x98,0x42,0x08,0xD1,0x19,0x21,0xC9,0x02,0x02, \ +0x20,0xF8,0xF7,0xCF,0xFE,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0xB8,0x78, \ +0x01,0x28,0x0C,0xD1,0x00,0xF0,0xAA,0xFA,0x60,0x79,0x02,0x28,0x08,0xD1,0xB8, \ +0x78,0x08,0x23,0x18,0x43,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70, \ +0x90,0xBD,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70,0x90,0xBD,0x00,0x00,0xB0, \ +0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0x80,0xB5,0x18,0x48,0x81,0x7A,0x00,0x29, \ +0x1C,0xD1,0x01,0x7B,0x01,0x29,0x19,0xD1,0xC0,0x7A,0x00,0x28,0x16,0xD1,0x14, \ +0x4F,0xF8,0x1D,0x29,0x30,0x40,0x79,0x03,0x28,0x14,0xD1,0xF8,0xF7,0x79,0xFE, \ +0x39,0x88,0x11,0x4B,0x10,0x4F,0x89,0x02,0x08,0x1A,0x98,0x42,0x08,0xD9,0xC1, \ +0x1A,0x06,0x20,0xF8,0xF7,0x92,0xFE,0x00,0xF0,0xBA,0xFA,0x01,0x20,0xB8,0x70, \ +0x80,0xBD,0x00,0x20,0xB8,0x70,0x80,0xBD,0x01,0x20,0x80,0x06,0x08,0x49,0x40, \ +0x6A,0x06,0x4B,0x49,0x68,0xC0,0x18,0x88,0x42,0xF2,0xD2,0x00,0xF0,0xA9,0xFA, \ +0x80,0xBD,0x44,0x09,0x00,0x02,0x80,0x00,0x00,0x02,0xB4,0x09,0x00,0x02,0xB8, \ +0x0B,0x00,0x00,0x80,0x00,0x00,0x04,0xF0,0xB5,0xC0,0x20,0xFB,0xF7,0x78,0xF8, \ +0x05,0x1C,0x00,0x26,0x34,0x48,0x07,0x24,0x64,0x06,0x06,0x70,0xE0,0x69,0x10, \ +0x23,0x98,0x43,0xE0,0x61,0x31,0x48,0xC1,0x69,0x03,0x0C,0x19,0x43,0xC1,0x61, \ +0xC1,0x69,0x1B,0x23,0x99,0x43,0xC1,0x61,0xC1,0x69,0x73,0x1F,0x19,0x40,0xC1, \ +0x61,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0x2A,0x49,0xC2,0x69,0x09,0x68, \ +0xC9,0x43,0x11,0x40,0xC1,0x61,0xA1,0x69,0x01,0x23,0x19,0x43,0xA1,0x61,0x0F, \ +0x22,0x12,0x06,0x25,0x4B,0x11,0x89,0xD9,0x69,0x01,0x05,0x00,0x68,0xCF,0x68, \ +0x10,0x88,0xC9,0x6B,0x04,0x27,0x21,0x48,0x23,0x49,0x06,0x70,0x21,0x48,0x9E, \ +0x61,0x07,0x70,0x01,0x20,0x08,0x70,0x20,0x48,0x06,0x70,0x20,0x48,0x06,0x70, \ +0x20,0x48,0xC6,0x74,0x20,0x48,0x06,0x70,0xFA,0xF7,0xA0,0xF9,0x1B,0x48,0x00, \ +0x78,0x00,0x28,0x03,0xD1,0xA0,0x69,0xFD,0x23,0x18,0x40,0xA0,0x61,0xFA,0xF7, \ +0x0C,0xFB,0xFA,0xF7,0x76,0xFB,0x0A,0x20,0xF7,0xF7,0x71,0xF8,0xFA,0xF7,0x69, \ +0xFB,0x15,0x48,0x01,0x21,0xC2,0x1D,0x49,0x32,0x07,0x75,0x91,0x71,0x56,0x70, \ +0x13,0x4B,0x02,0x22,0x1A,0x70,0x13,0x4B,0x70,0x30,0x19,0x70,0x12,0x4B,0x59, \ +0x71,0x86,0x70,0x12,0x48,0x02,0x70,0xF7,0xF7,0x63,0xF8,0x28,0x1C,0xFB,0xF7, \ +0x0E,0xF8,0xF0,0xBD,0x00,0x00,0xDE,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xB4, \ +0x02,0x00,0x02,0x80,0x00,0x00,0x04,0x3C,0x01,0x00,0x02,0x53,0x02,0x00,0x02, \ +0x5E,0x02,0x00,0x02,0x36,0x01,0x00,0x02,0x37,0x01,0x00,0x02,0x44,0x09,0x00, \ +0x02,0x51,0x02,0x00,0x02,0xB2,0x02,0x00,0x02,0xB3,0x02,0x00,0x02,0xB0,0x00, \ +0x00,0x02,0x3B,0x01,0x00,0x02,0x90,0xB5,0x22,0x49,0x00,0x27,0xC8,0x1D,0x49, \ +0x30,0x82,0x79,0x01,0x2A,0x00,0xD0,0x47,0x71,0xCA,0x1D,0x69,0x32,0x93,0x79, \ +0x1D,0x49,0x00,0x2B,0x03,0xD0,0x97,0x71,0x01,0x20,0x88,0x73,0x90,0xBD,0x52, \ +0x78,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0xCC,0xFB,0x90,0xBD,0x02,0x78,0x00,0x2A, \ +0x03,0xD0,0x47,0x71,0xFD,0xF7,0x7D,0xFA,0x90,0xBD,0x42,0x79,0x00,0x2A,0x02, \ +0xD0,0xFD,0xF7,0xAD,0xFA,0x90,0xBD,0x82,0x78,0x00,0x2A,0x02,0xD0,0xFD,0xF7, \ +0x91,0xFA,0x90,0xBD,0xC9,0x7B,0x00,0x29,0x02,0xD0,0xFD,0xF7,0x97,0xFA,0x90, \ +0xBD,0x80,0x79,0x05,0x28,0x0D,0xD1,0x0A,0x4C,0x20,0x68,0x01,0x7B,0xC9,0x09, \ +0x02,0xD3,0xF9,0xF7,0x1A,0xFA,0x90,0xBD,0x01,0x7B,0x10,0x29,0x02,0xD1,0xF9, \ +0xF7,0x2C,0xFB,0x20,0x60,0x38,0x1C,0x90,0xBD,0x00,0x00,0x44,0x09,0x00,0x02, \ +0x54,0x09,0x00,0x02,0xC4,0x01,0x00,0x02,0xB0,0xB5,0x0C,0x1C,0x07,0x1C,0x01, \ +0x28,0x01,0xD3,0x0E,0x2F,0x01,0xD9,0x00,0x20,0xB0,0xBD,0x1B,0x4D,0xE8,0x69, \ +0x2B,0x0C,0x18,0x43,0xE8,0x61,0x19,0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8, \ +0x61,0x18,0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61,0xE8,0x69,0x04,0x23, \ +0x18,0x43,0xE8,0x61,0x14,0x48,0xF6,0xF7,0xCD,0xFF,0x00,0xF0,0xBF,0xF8,0x01, \ +0x2C,0x01,0xD1,0x00,0xF0,0xF1,0xF9,0x00,0xF0,0x81,0xF8,0x10,0x48,0x00,0x78, \ +0x01,0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40,0x23,0x98,0x43,0xE8, \ +0x61,0x03,0xE0,0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x38,0x1C,0x00,0xF0, \ +0x11,0xF8,0xE8,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61,0x01,0x20,0xB0, \ +0xBD,0x40,0x00,0x00,0x04,0xB4,0x02,0x00,0x02,0xB8,0x02,0x00,0x02,0xDC,0x05, \ +0x00,0x00,0xB1,0x02,0x00,0x02,0x90,0xB5,0x07,0x1C,0x07,0x20,0x40,0x06,0x81, \ +0x69,0x04,0x23,0x19,0x43,0x81,0x61,0xFA,0xF7,0x2E,0xFA,0x0A,0x20,0xF6,0xF7, \ +0x95,0xFF,0x17,0x4C,0x02,0x20,0x61,0x68,0x00,0xF0,0x30,0xF8,0x00,0x20,0x21, \ +0x68,0x00,0xF0,0x2C,0xF8,0x13,0x48,0xBF,0x00,0x38,0x18,0x40,0x38,0xC1,0x6B, \ +0x01,0x20,0x00,0xF0,0x24,0xF8,0x05,0x20,0x21,0x69,0x00,0xF0,0x20,0xF8,0x08, \ +0x20,0xA1,0x68,0x00,0xF0,0x1C,0xF8,0x07,0x20,0xE1,0x68,0x00,0xF0,0x18,0xF8, \ +0x0A,0x48,0x38,0x18,0x40,0x38,0xC1,0x6B,0x04,0x20,0x00,0xF0,0x11,0xF8,0xFF, \ +0x20,0xF5,0x30,0xF6,0xF7,0x6D,0xFF,0xFA,0xF7,0x65,0xFA,0x0A,0x20,0xF6,0xF7, \ +0x68,0xFF,0x90,0xBD,0x00,0x00,0xD8,0x02,0x00,0x02,0xEC,0x02,0x00,0x02,0x24, \ +0x03,0x00,0x02,0x90,0xB4,0x0B,0x4A,0x13,0x68,0xDF,0x43,0x0A,0x4B,0xDC,0x69, \ +0x27,0x40,0xDF,0x61,0x07,0x05,0x89,0x00,0x39,0x43,0x80,0x08,0x08,0x43,0x18, \ +0x62,0x18,0x1C,0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x11,0x68,0xC2,0x69,0x11,0x43, \ +0xC1,0x61,0x90,0xBC,0xF7,0x46,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x80, \ +0xB5,0x19,0x4F,0x00,0x20,0x39,0x78,0xF6,0xF7,0xE0,0xFF,0x79,0x78,0x01,0x20, \ +0xF6,0xF7,0xDC,0xFF,0xB9,0x78,0x02,0x20,0xF6,0xF7,0xD8,0xFF,0xF9,0x78,0x03, \ +0x20,0xF6,0xF7,0xD4,0xFF,0x79,0x7C,0x11,0x20,0xF6,0xF7,0xD0,0xFF,0x39,0x7D, \ +0x14,0x20,0xF6,0xF7,0xCC,0xFF,0x79,0x7D,0x15,0x20,0xF6,0xF7,0xC8,0xFF,0x39, \ +0x7F,0x1C,0x20,0xF6,0xF7,0xC4,0xFF,0xB9,0x7C,0x12,0x20,0xF6,0xF7,0xC0,0xFF, \ +0xF9,0x7C,0x13,0x20,0xF6,0xF7,0xBC,0xFF,0x05,0x48,0x00,0x78,0x01,0x28,0x03, \ +0xD1,0x79,0x7F,0x1D,0x20,0xF6,0xF7,0xB4,0xFF,0x80,0xBD,0x00,0x00,0xCC,0x07, \ +0x00,0x02,0xB0,0x02,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F,0x06,0xB8,0x69,0x40, \ +0x08,0x40,0x00,0xB8,0x61,0xB8,0x69,0x01,0x23,0x18,0x43,0xB8,0x61,0x05,0x20, \ +0xF6,0xF7,0xFE,0xFE,0xB8,0x69,0x40,0x08,0x40,0x00,0xB8,0x61,0x05,0x20,0xF6, \ +0xF7,0xF7,0xFE,0x80,0xBD,0xF0,0xB5,0x38,0x4E,0x07,0x1C,0xF0,0x7A,0x03,0x28, \ +0xFC,0xD0,0xC0,0x20,0xFA,0xF7,0xA3,0xFE,0x35,0x4D,0x04,0x1C,0xE8,0x69,0xAB, \ +0x01,0x18,0x43,0xE8,0x61,0x98,0x03,0xC1,0x68,0xC0,0x6B,0x28,0x68,0x0F,0x20, \ +0x00,0x06,0x01,0x88,0x00,0x89,0x2F,0x48,0xC0,0x69,0x2F,0x48,0xC1,0x19,0xC8, \ +0x1F,0x09,0x38,0xC2,0x7B,0x2D,0x48,0xFF,0x2A,0x00,0xD0,0x02,0x75,0x4A,0x7B, \ +0xFF,0x2A,0x00,0xD0,0x42,0x75,0x40,0x31,0x89,0x78,0xFF,0x29,0x02,0xD0,0x8A, \ +0x07,0x00,0xD1,0x41,0x74,0x26,0x48,0x01,0x7D,0x14,0x20,0xF6,0xF7,0x66,0xFF, \ +0x23,0x48,0x41,0x7D,0x15,0x20,0xF6,0xF7,0x61,0xFF,0x22,0x48,0x00,0x78,0x01, \ +0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40,0x23,0x98,0x43,0xE8,0x61, \ +0x03,0xE0,0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x1B,0x48,0x07,0x75,0x00, \ +0x7D,0xFF,0xF7,0x09,0xFF,0x01,0x20,0xFD,0xF7,0x60,0xFA,0xE8,0x69,0x18,0x4B, \ +0x18,0x40,0xE8,0x61,0x06,0x20,0x70,0x72,0xFA,0x21,0x07,0x20,0xF8,0xF7,0x4C, \ +0xFC,0x14,0x49,0x08,0x20,0xF8,0xF7,0x48,0xFC,0x20,0x1C,0xFA,0xF7,0x4D,0xFE, \ +0x70,0x7C,0x01,0x28,0x05,0xD1,0x00,0x22,0x10,0x21,0x0F,0x48,0xFA,0xF7,0xE9, \ +0xFC,0xF0,0xBD,0x70,0x7C,0x02,0x28,0xFB,0xD1,0x00,0x22,0x10,0x21,0x0C,0x48, \ +0xFA,0xF7,0xE0,0xFC,0xF0,0xBD,0x00,0x00,0x44,0x09,0x00,0x02,0x40,0x00,0x00, \ +0x04,0x80,0x00,0x00,0x04,0x58,0x0A,0x00,0x02,0xCC,0x07,0x00,0x02,0xB1,0x02, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xFF,0xEF,0x00,0x00,0x88,0x13,0x00,0x00,0xD8, \ +0x06,0x00,0x02,0xF8,0x06,0x00,0x02,0xB0,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69, \ +0x52,0x09,0x03,0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x18,0x4C,0x01, \ +0x28,0x0C,0xD1,0x18,0x4D,0x6F,0x68,0xF6,0xF7,0x71,0xFE,0x39,0x1A,0x49,0x01, \ +0x09,0x18,0x69,0x60,0x61,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x60,0x63,0x12, \ +0x48,0x00,0x21,0x00,0x7D,0xFF,0xF7,0x63,0xFE,0x11,0x4F,0x11,0x4B,0xF9,0x1D, \ +0x69,0x31,0x08,0x73,0x01,0x20,0x80,0x06,0xC0,0x68,0xE0,0x69,0x18,0x40,0xE0, \ +0x61,0x01,0x20,0xFD,0xF7,0xF9,0xF9,0x01,0x20,0x38,0x72,0x06,0x20,0x78,0x72, \ +0x07,0x20,0xFF,0x21,0x2D,0x31,0xF8,0xF7,0xE6,0xFB,0x4B,0x21,0xC9,0x00,0x08, \ +0x20,0xF8,0xF7,0xE1,0xFB,0xB0,0xBD,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04, \ +0x00,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0xFF,0xEF,0x00,0x00,0xF0,0xB5,0x24, \ +0x4F,0xF8,0x69,0x3B,0x0C,0x18,0x43,0xF8,0x61,0xF8,0x69,0x1B,0x23,0x98,0x43, \ +0xF8,0x61,0xF8,0x69,0x04,0x23,0x98,0x43,0xF8,0x61,0xF8,0x69,0x9B,0x02,0x18, \ +0x43,0xF8,0x61,0x1C,0x48,0xF9,0x69,0x00,0x68,0xC0,0x43,0x08,0x40,0x07,0x24, \ +0x64,0x06,0xF8,0x61,0xA0,0x69,0x01,0x23,0x18,0x43,0xA0,0x61,0x01,0x20,0xF8, \ +0xF7,0xCF,0xFB,0x08,0x20,0xF8,0xF7,0xCC,0xFB,0x07,0x20,0xF8,0xF7,0xC9,0xFB, \ +0x01,0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B,0x11,0x4D,0x38,0x68,0x0F,0x20,0x00, \ +0x06,0x00,0x88,0x01,0x26,0x6E,0x72,0xF8,0xF7,0x74,0xFB,0xE8,0x1D,0x69,0x30, \ +0x0C,0x4D,0x86,0x70,0x6E,0x68,0xF6,0xF7,0x01,0xFE,0x31,0x1A,0x49,0x09,0x09, \ +0x18,0x69,0x60,0x79,0x6B,0x09,0x1A,0x49,0x09,0x08,0x18,0x78,0x63,0xE0,0x69, \ +0x10,0x23,0x18,0x43,0xE0,0x61,0xF0,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0xB4, \ +0x02,0x00,0x02,0x44,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB4,0x2E,0x4D, \ +0x01,0x27,0xE9,0x1D,0x19,0x31,0xCC,0x78,0x00,0x20,0x2C,0x4A,0xFF,0x2C,0x13, \ +0xD0,0x23,0x09,0x11,0xD3,0x2B,0x7F,0x13,0x70,0x6E,0x7F,0x56,0x70,0xAE,0x7F, \ +0x96,0x70,0xEB,0x7F,0xD3,0x70,0x0B,0x78,0x53,0x74,0x4B,0x78,0x13,0x75,0x8B, \ +0x78,0x53,0x75,0x14,0x77,0x97,0x74,0xD0,0x74,0x0E,0xE0,0x10,0x70,0x60,0x23, \ +0x53,0x70,0x40,0x23,0x93,0x70,0xD3,0x70,0x50,0x74,0xFF,0x23,0x13,0x75,0x57, \ +0x23,0x53,0x75,0x48,0x23,0x13,0x77,0x97,0x74,0xD0,0x74,0x1A,0x4B,0x9C,0x78, \ +0x1A,0x4B,0x04,0x2C,0x01,0xDA,0x58,0x73,0x05,0xE0,0x09,0x79,0xFF,0x29,0x01, \ +0xD0,0x59,0x73,0x00,0xE0,0x58,0x73,0xD1,0x78,0x15,0x4B,0xC0,0x29,0x02,0xD1, \ +0x1F,0x76,0xD0,0x70,0x00,0xE0,0x18,0x76,0x12,0x48,0x00,0x7D,0x40,0x19,0xC1, \ +0x1F,0x09,0x39,0xC9,0x7B,0xFF,0x29,0x00,0xD0,0x11,0x75,0x41,0x7B,0xFF,0x29, \ +0x00,0xD0,0x51,0x75,0x40,0x30,0x80,0x78,0xFF,0x28,0x02,0xD0,0x81,0x07,0x00, \ +0xD1,0x50,0x74,0x50,0x78,0x09,0x49,0x40,0x09,0x80,0x07,0x80,0x0F,0x08,0x70, \ +0xF0,0xBC,0xF7,0x46,0x58,0x0A,0x00,0x02,0xCC,0x07,0x00,0x02,0x10,0x01,0x00, \ +0x02,0xDC,0x07,0x00,0x02,0x44,0x09,0x00,0x02,0x00,0x00,0x00,0x02,0x92,0x01, \ +0x00,0x02,0x02,0x79,0x41,0x79,0x12,0x02,0x11,0x43,0xC2,0x78,0x12,0x04,0x11, \ +0x43,0x82,0x78,0x12,0x06,0x0A,0x43,0x01,0x21,0x89,0x06,0x8A,0x61,0x42,0x78, \ +0x00,0x78,0x00,0x02,0x10,0x43,0xC8,0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49,0x0D, \ +0x48,0x41,0x61,0x23,0x21,0x81,0x61,0x00,0x22,0x01,0x05,0x0A,0x61,0xC2,0x01, \ +0x42,0x60,0x05,0x22,0xC2,0x60,0x08,0x4A,0x82,0x62,0xF2,0x22,0x82,0x60,0x32, \ +0x22,0x4A,0x61,0xCA,0x68,0xC9,0x6B,0x00,0x68,0x00,0x21,0x00,0x20,0x00,0xF0, \ +0x07,0xF8,0x00,0xBD,0x04,0x90,0x00,0x00,0x40,0x00,0x00,0x04,0x81,0xFF,0x00, \ +0x00,0x02,0x1C,0x01,0x20,0x80,0x06,0x82,0x62,0x41,0x62,0xF7,0x46,0x80,0xB5, \ +0x1D,0x48,0x20,0x23,0x81,0x69,0x1D,0x4F,0x99,0x43,0x81,0x61,0x1B,0x48,0x81, \ +0x78,0x1C,0x48,0x00,0x29,0x0F,0xD0,0x01,0x7D,0x04,0x29,0x0C,0xD0,0x01,0x21, \ +0xC1,0x77,0x03,0x21,0x41,0x77,0xF8,0xF7,0xA5,0xFA,0x39,0x88,0x89,0x02,0x09, \ +0x1A,0x06,0x20,0xF8,0xF7,0xC3,0xFA,0x80,0xBD,0xF9,0x1D,0x29,0x31,0x0A,0x79, \ +0x02,0x2A,0xF9,0xD1,0xC2,0x1D,0x49,0x32,0x92,0x79,0x05,0x2A,0xF4,0xD1,0x49, \ +0x79,0x01,0x29,0xF1,0xDD,0xC7,0x1D,0x69,0x37,0xB8,0x78,0x01,0x28,0x04,0xD1, \ +0x00,0x20,0xFF,0xF7,0x91,0xFE,0x00,0x20,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40, \ +0x00,0xB8,0x70,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x80,0xBD,0x80,0x00, \ +0x00,0x04,0x04,0x01,0x00,0x02,0x80,0x00,0x00,0x02,0x44,0x09,0x00,0x02,0x80, \ +0xB5,0xF8,0xF7,0x8F,0xFD,0x06,0x48,0x01,0x21,0x41,0x60,0x00,0x27,0x47,0x77, \ +0x01,0x20,0xF6,0xF7,0xF9,0xFC,0x03,0x48,0x07,0x83,0x87,0x82,0x80,0xBD,0x00, \ +0x00,0x44,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0x80,0xB5,0x0F,0x48,0x40,0x23, \ +0x81,0x69,0x0E,0x4F,0x99,0x43,0x81,0x61,0xF8,0x69,0x9B,0x01,0x18,0x43,0xF8, \ +0x61,0x14,0x20,0xF6,0xF7,0xCA,0xFC,0xF8,0x69,0x0A,0x4B,0x0A,0x49,0x18,0x40, \ +0xF8,0x61,0x01,0x20,0x08,0x72,0x4A,0x7A,0x06,0x2A,0x00,0xD0,0x48,0x72,0x08, \ +0x73,0x00,0x20,0xC8,0x72,0x05,0x49,0x08,0x70,0x80,0xBD,0x00,0x00,0x80,0x00, \ +0x00,0x04,0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0x44,0x09,0x00,0x02,0xE0, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-rfmd-0.90.2-140.h linux.22-ac2/drivers/usb/atmel/fw-rfmd-0.90.2-140.h --- linux.vanilla/drivers/usb/atmel/fw-rfmd-0.90.2-140.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-rfmd-0.90.2-140.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,2392 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76C503 with RFMD radio * + * Version: 0.90.2 #140 * + ****************************************************************************/ + +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_503RFMD_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0x49,0x14, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x77,0x0F,0x00,0xEA,0x02, \ +0xF0,0xB0,0xF9,0x22,0x48,0x87,0x46,0x90,0x0F,0x00,0xEA,0x01,0xF0,0xC4,0xFF, \ +0x20,0x48,0x87,0x46,0xB5,0x01,0x00,0x00,0xDC,0x03,0x00,0x02,0x00,0x02,0x00, \ +0x00,0x00,0x02,0x00,0x00,0x00,0x01,0x00,0x00,0x60,0x04,0x00,0x02,0x80,0x04, \ +0x00,0x02,0x84,0x04,0x00,0x02,0x88,0x04,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0x00,0x57,0x00,0x01,0xED,0x3F,0x00,0x00,0xDD,0x40,0x00, \ +0x00,0x00,0xB5,0x03,0xF0,0x73,0xFE,0x00,0x20,0x00,0xBD,0xF0,0xB5,0x86,0xB0, \ +0x07,0x1C,0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03, \ +0x90,0x01,0x91,0x05,0x92,0x02,0x92,0x50,0x4A,0x52,0xA1,0x50,0x48,0x01,0x23, \ +0x00,0x97,0x04,0xF0,0x49,0xF8,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01, \ +0x22,0x05,0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93, \ +0x03,0x90,0x02,0x92,0x01,0x91,0x4C,0xA1,0x4A,0x4A,0x4A,0x48,0x02,0x23,0x04, \ +0xF0,0x34,0xF8,0x4C,0x48,0x4D,0xA1,0x04,0xF0,0xA6,0xF8,0x4F,0x48,0x50,0xA1, \ +0x04,0xF0,0xA2,0xF8,0x53,0x48,0x54,0xA1,0x04,0xF0,0x9E,0xF8,0x57,0x48,0x58, \ +0xA1,0x04,0xF0,0x9A,0xF8,0x5B,0x48,0x5C,0xA1,0x04,0xF0,0x96,0xF8,0x5F,0x48, \ +0x04,0x26,0x06,0x70,0x5F,0x48,0x00,0x25,0x05,0x60,0x02,0x24,0x5E,0x4F,0x06, \ +0x21,0xBC,0x63,0x7D,0x74,0xBD,0x74,0x07,0x20,0x40,0x06,0xFD,0x74,0x81,0x61, \ +0x45,0x61,0xC1,0x69,0xC3,0x0B,0x99,0x43,0xC1,0x61,0xC1,0x69,0x10,0x23,0x99, \ +0x43,0xC1,0x61,0x56,0x48,0x85,0x61,0x56,0x48,0x05,0x70,0x03,0xF0,0xC5,0xFB, \ +0x78,0x7C,0x00,0x28,0x02,0xD1,0x07,0x20,0x40,0x06,0x86,0x61,0x01,0x21,0xF8, \ +0x1D,0x29,0x30,0x01,0x73,0x50,0x49,0xCA,0x69,0x0B,0x0C,0x1A,0x43,0xCA,0x61, \ +0xCA,0x69,0x1B,0x23,0x9A,0x43,0xCA,0x61,0xCA,0x69,0x04,0x23,0x9A,0x43,0xCA, \ +0x61,0x4A,0x4A,0xCB,0x69,0x12,0x68,0xD2,0x43,0x1A,0x40,0xCA,0x61,0x3A,0x6B, \ +0xCB,0x69,0xD2,0x43,0x1A,0x40,0xCA,0x61,0x7A,0x6B,0xCB,0x69,0x1A,0x43,0xCA, \ +0x61,0xCA,0x69,0x0B,0x0C,0x9A,0x43,0x07,0x27,0x7F,0x06,0xCA,0x61,0xB9,0x69, \ +0x01,0x23,0x19,0x43,0xB9,0x61,0x84,0x73,0x03,0xF0,0x92,0xFC,0x03,0xF0,0x02, \ +0xFD,0x0A,0x20,0x00,0xF0,0xA9,0xF8,0x03,0xF0,0xF5,0xFC,0x3A,0x48,0x05,0x70, \ +0x45,0x70,0x39,0x48,0x05,0x60,0x39,0x48,0x05,0x60,0x02,0xF0,0x9C,0xFD,0x00, \ +0x20,0x00,0xF0,0x73,0xF8,0x37,0x48,0x37,0x49,0x45,0x73,0x09,0x79,0x60,0x30, \ +0x01,0x23,0x41,0x73,0xF8,0x69,0xDB,0x03,0x18,0x43,0xF8,0x61,0x06,0xB0,0xF0, \ +0xBD,0xD5,0x14,0x00,0x00,0x9C,0x06,0x00,0x02,0x54,0x78,0x20,0x74,0x68,0x72, \ +0x65,0x61,0x64,0x00,0x00,0x00,0x59,0x28,0x00,0x00,0x2C,0x07,0x00,0x02,0x4D, \ +0x67,0x6D,0x20,0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00,0xBC,0x07,0x00,0x02, \ +0x54,0x78,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73, \ +0x00,0xDC,0x07,0x00,0x02,0x4D,0x67,0x6D,0x20,0x73,0x74,0x61,0x74,0x75,0x73, \ +0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x00,0x00,0xFC,0x07,0x00,0x02,0x54, \ +0x58,0x20,0x47,0x4F,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61, \ +0x67,0x73,0x00,0x00,0x1C,0x08,0x00,0x02,0x4D,0x4E,0x47,0x20,0x47,0x4F,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x3C,0x08, \ +0x00,0x02,0x50,0x73,0x50,0x6F,0x6C,0x6C,0x20,0x73,0x74,0x61,0x74,0x75,0x73, \ +0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x77,0x02,0x00,0x02,0x9C,0x02,0x00,0x02, \ +0x74,0x01,0x00,0x02,0x60,0x06,0x00,0x02,0x75,0x02,0x00,0x02,0x40,0x00,0x00, \ +0x04,0xE8,0x02,0x00,0x02,0xA8,0x0A,0x00,0x02,0x98,0x02,0x00,0x02,0x94,0x02, \ +0x00,0x02,0xE0,0x05,0x00,0x02,0xC8,0x00,0x00,0x02,0x80,0xB5,0x10,0x4F,0x01, \ +0x28,0x03,0xD1,0x00,0xF0,0x6A,0xF8,0xB8,0x61,0xF8,0x61,0x0D,0x48,0x81,0x89, \ +0x38,0x1C,0xC2,0x1D,0xB9,0x63,0x01,0x21,0x39,0x72,0xB9,0x74,0xB9,0x73,0x00, \ +0x27,0xC7,0x62,0x07,0x60,0x47,0x72,0x07,0x73,0x79,0x32,0xD7,0x61,0xC1,0x72, \ +0x41,0x60,0x02,0x20,0x01,0xF0,0xBF,0xFD,0x04,0x49,0x48,0x70,0x0F,0x70,0x4F, \ +0x80,0x80,0xBD,0xE0,0x05,0x00,0x02,0xD8,0x00,0x00,0x02,0x2C,0x01,0x00,0x02, \ +0xC3,0x00,0x18,0x18,0x80,0x00,0x80,0x08,0x01,0xD0,0x01,0x38,0xFD,0xD1,0xF7, \ +0x46,0x03,0x49,0x0F,0x20,0x00,0x06,0x81,0x80,0x02,0x49,0x81,0x81,0xF7,0x46, \ +0x00,0x00,0xE8,0xE8,0x00,0x00,0x13,0x13,0x00,0x00,0x02,0x79,0x41,0x79,0x12, \ +0x02,0x11,0x43,0xC2,0x78,0x12,0x04,0x11,0x43,0x82,0x78,0x12,0x06,0x0A,0x43, \ +0x01,0x21,0x89,0x06,0x8A,0x61,0x42,0x78,0x00,0x78,0x00,0x02,0x10,0x43,0xC8, \ +0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49,0x0D,0x48,0x41,0x61,0x23,0x21,0x81,0x61, \ +0x00,0x22,0x01,0x05,0x0A,0x61,0xC2,0x01,0x42,0x60,0x05,0x22,0xC2,0x60,0x08, \ +0x4A,0x82,0x62,0xF2,0x22,0x82,0x60,0x32,0x22,0x4A,0x61,0xCA,0x68,0xC9,0x6B, \ +0x00,0x68,0x00,0x21,0x00,0x20,0x00,0xF0,0x0B,0xF8,0x00,0xBD,0x04,0x90,0x00, \ +0x00,0x40,0x00,0x00,0x04,0x81,0xFF,0x00,0x00,0x01,0x20,0x80,0x06,0x40,0x6A, \ +0xF7,0x46,0x02,0x1C,0x01,0x20,0x80,0x06,0x82,0x62,0x41,0x62,0xF7,0x46,0x01, \ +0x1C,0x06,0x48,0x04,0xD0,0x41,0x68,0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46, \ +0x41,0x68,0x01,0x23,0x5B,0x03,0x99,0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00, \ +0x04,0x80,0xB5,0x13,0x49,0x15,0x4F,0x08,0x78,0x42,0x01,0x12,0x48,0x42,0x70, \ +0x12,0x4A,0x52,0x7A,0x00,0x2A,0x0B,0xD0,0x09,0x78,0x00,0x29,0x08,0xDD,0x41, \ +0x78,0x10,0x23,0x19,0x43,0x41,0x70,0x48,0x21,0x79,0x81,0x18,0x21,0x39,0x81, \ +0x03,0xE0,0x90,0x21,0x79,0x81,0x30,0x21,0x39,0x81,0x41,0x78,0x01,0x20,0x00, \ +0xF0,0x5B,0xF8,0x78,0x89,0x39,0x89,0x40,0x18,0x06,0x49,0x08,0x80,0x01,0xF0, \ +0xD4,0xFC,0x80,0xBD,0x00,0x00,0x69,0x01,0x00,0x02,0xF4,0x09,0x00,0x02,0x1C, \ +0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x64,0x01,0x00,0x02,0x01,0x1C,0x06,0x48, \ +0x04,0xD0,0x41,0x7C,0x01,0x23,0x19,0x43,0x41,0x74,0xF7,0x46,0x41,0x7C,0xFE, \ +0x23,0x19,0x40,0x41,0x74,0xF7,0x46,0x00,0x00,0xF4,0x09,0x00,0x02,0xF0,0xB4, \ +0x07,0x24,0x64,0x06,0xA2,0x69,0x04,0x23,0x9A,0x43,0xA2,0x61,0xF3,0x22,0x12, \ +0x05,0x93,0x68,0x40,0x23,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69, \ +0x5B,0x08,0xFC,0xD3,0x93,0x68,0x80,0x23,0x03,0x43,0xD3,0x60,0x17,0x69,0xBB, \ +0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x17,0x1C,0x92,0x68,0x00,0x22, \ +0x00,0x29,0x0D,0xD9,0x0A,0x4D,0x83,0x18,0xEB,0x5C,0xFB,0x60,0x3E,0x69,0xB3, \ +0x08,0xFC,0xD3,0x3B,0x69,0x5B,0x08,0xFC,0xD3,0x01,0x32,0x8A,0x42,0xBB,0x68, \ +0xF2,0xD3,0xA0,0x69,0x04,0x23,0x18,0x43,0xA0,0x61,0xF0,0xBC,0xF7,0x46,0x00, \ +0x00,0xF4,0x09,0x00,0x02,0x90,0xB4,0x07,0x27,0x7F,0x06,0xBA,0x69,0x04,0x23, \ +0x9A,0x43,0xBA,0x61,0xF3,0x22,0x12,0x05,0x93,0x68,0x40,0x23,0xD3,0x60,0x14, \ +0x69,0xA3,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x93,0x68,0xD0,0x60, \ +0x10,0x69,0x80,0x08,0xFC,0xD3,0x10,0x1C,0x02,0x69,0x52,0x08,0xFC,0xD3,0x82, \ +0x68,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x01,0x69,0x49,0x08,0xFC,0xD3, \ +0x80,0x68,0x04,0x23,0xB8,0x69,0x18,0x43,0xB8,0x61,0x90,0xBC,0xF7,0x46,0x80, \ +0xB4,0x07,0x22,0x52,0x06,0x91,0x69,0x04,0x23,0x99,0x43,0x91,0x61,0xF3,0x21, \ +0x09,0x05,0x8B,0x68,0x40,0x23,0xCB,0x60,0x0F,0x69,0xBB,0x08,0xFC,0xD3,0x0B, \ +0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3, \ +0x08,0x69,0x40,0x08,0xFC,0xD3,0x97,0x69,0x04,0x23,0x3B,0x43,0x88,0x68,0x93, \ +0x61,0x97,0x69,0x04,0x23,0x9F,0x43,0x97,0x61,0x41,0x20,0xC8,0x60,0x08,0x69, \ +0x80,0x08,0xFC,0xD3,0x08,0x1C,0x01,0x69,0x49,0x08,0xFC,0xD3,0x81,0x68,0xFF, \ +0x21,0xC1,0x60,0x01,0x69,0x49,0x08,0xFC,0xD3,0x91,0x69,0x04,0x23,0x19,0x43, \ +0x80,0x68,0x91,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0xC1,0x0A,0x01,0xD3,0x00, \ +0x20,0xF7,0x46,0xFF,0x22,0x01,0x32,0x02,0x40,0x01,0x21,0x00,0x2A,0x01,0xD0, \ +0x08,0x1C,0xF7,0x46,0x80,0x0A,0x01,0xD3,0x08,0x1C,0xF7,0x46,0x02,0x20,0xF7, \ +0x46,0x80,0xB4,0x00,0x2A,0x0A,0xD9,0x07,0x78,0x0B,0x78,0x01,0x31,0x01,0x30, \ +0x9F,0x42,0x02,0xD0,0x01,0x20,0x80,0xBC,0xF7,0x46,0x01,0x3A,0xF4,0xD1,0x00, \ +0x20,0xF9,0xE7,0x00,0x2A,0x05,0xD9,0x03,0x78,0x01,0x30,0x0B,0x70,0x01,0x31, \ +0x01,0x3A,0xF9,0xD1,0xF7,0x46,0xF8,0xB5,0x0C,0x1C,0x1C,0x49,0x07,0x1C,0x1C, \ +0x4E,0x1C,0x48,0x31,0x60,0x80,0x6C,0x00,0x25,0xA8,0x42,0x19,0xD9,0x06,0x22, \ +0x38,0x1C,0x31,0x68,0xFF,0xF7,0xD7,0xFF,0x00,0x90,0x00,0x98,0x00,0x28,0x08, \ +0xD1,0x30,0x68,0xC1,0x88,0xA1,0x42,0x01,0xD1,0x01,0x20,0xF8,0xBD,0xC4,0x80, \ +0x00,0x20,0xFB,0xE7,0x30,0x68,0x01,0x35,0x08,0x30,0x30,0x60,0x0E,0x48,0x80, \ +0x6C,0xA8,0x42,0xE5,0xD8,0x0C,0x48,0x81,0x6C,0x01,0x31,0x81,0x64,0x81,0x6C, \ +0x07,0x29,0x03,0xD9,0x07,0x49,0x31,0x60,0x08,0x21,0x81,0x64,0x39,0x88,0x30, \ +0x68,0x01,0x80,0x79,0x88,0x41,0x80,0xB9,0x88,0x81,0x80,0x30,0x68,0xC4,0x80, \ +0x00,0x20,0xDD,0xE7,0xA4,0x09,0x00,0x02,0x4C,0x01,0x00,0x02,0xE0,0x05,0x00, \ +0x02,0x02,0x78,0x11,0x43,0x01,0x70,0xF7,0x46,0x00,0x78,0x08,0x40,0x01,0xD0, \ +0x01,0x20,0xF7,0x46,0x00,0x20,0xF7,0x46,0x06,0x49,0x4A,0x6C,0x12,0x01,0x02, \ +0x70,0x4A,0x6C,0x12,0x01,0x12,0x0A,0x42,0x70,0x48,0x6C,0x01,0x30,0x48,0x64, \ +0xF7,0x46,0x00,0x00,0xE0,0x05,0x00,0x02,0xB0,0xB4,0x00,0x2A,0x16,0xD1,0x0D, \ +0x4A,0x0F,0x06,0x92,0x7A,0x3F,0x0E,0xBA,0x42,0x00,0xDC,0x11,0x1C,0x4F,0x00, \ +0x0B,0x49,0x09,0x4A,0xCD,0x88,0xD4,0x5B,0x64,0x19,0xE4,0x18,0x04,0x70,0xD2, \ +0x5B,0xC9,0x88,0x51,0x18,0xC9,0x18,0x09,0x0A,0x41,0x70,0xB0,0xBC,0xF7,0x46, \ +0x00,0x21,0x01,0x70,0x41,0x70,0xF9,0xE7,0x1C,0x01,0x00,0x02,0x50,0x01,0x00, \ +0x02,0x18,0x00,0x00,0x02,0xF8,0xB5,0x4A,0x48,0x00,0x68,0xFF,0xF7,0x51,0xFF, \ +0x07,0x1C,0x48,0x48,0x00,0x68,0x44,0x68,0x20,0x78,0x06,0x07,0x36,0x0F,0x02, \ +0x2F,0x00,0xD1,0xF8,0xBD,0x45,0x4D,0x28,0x79,0x02,0x28,0x0A,0xD1,0xE0,0x1D, \ +0x09,0x30,0x06,0x22,0x42,0x49,0xFF,0xF7,0x4F,0xFF,0x00,0x90,0x00,0x98,0x00, \ +0x28,0x00,0xD1,0xEF,0xE7,0x30,0x06,0x00,0x0E,0x08,0x28,0x6B,0xD1,0x3D,0x48, \ +0xC0,0x79,0x05,0x28,0x00,0xD0,0xE6,0xE7,0x28,0x79,0x3B,0x4E,0x02,0x28,0x11, \ +0xD1,0xE0,0x1D,0x03,0x30,0x06,0x22,0x31,0x1C,0xFF,0xF7,0x37,0xFF,0x00,0x90, \ +0x00,0x98,0x01,0x28,0x00,0xD1,0xD7,0xE7,0x60,0x78,0x81,0x08,0x00,0xD2,0xD3, \ +0xE7,0x40,0x08,0x00,0xD3,0xD0,0xE7,0x28,0x79,0x01,0x28,0x10,0xD1,0xE0,0x1D, \ +0x09,0x30,0x06,0x22,0x31,0x1C,0xFF,0xF7,0x22,0xFF,0x00,0x90,0x00,0x98,0x01, \ +0x28,0x00,0xD1,0xC2,0xE7,0x60,0x78,0x81,0x08,0x01,0xD2,0x40,0x08,0x00,0xD3, \ +0xBC,0xE7,0x20,0x78,0x08,0x28,0x34,0xD1,0xA1,0x7F,0xE0,0x1D,0x09,0x30,0x88, \ +0x29,0x02,0xD1,0xC0,0x7B,0x8E,0x28,0x0E,0xD0,0x22,0x48,0x01,0x78,0x00,0x29, \ +0x06,0xD0,0xC0,0x78,0x00,0x28,0x07,0xD0,0x60,0x78,0xC0,0x09,0x04,0xD2,0xA6, \ +0xE7,0x60,0x78,0xC0,0x09,0x00,0xD3,0xA2,0xE7,0x28,0x79,0x02,0x28,0x1A,0xD1, \ +0x60,0x1C,0x04,0x21,0x05,0x1C,0xFF,0xF7,0x53,0xFF,0x06,0x1C,0x20,0x21,0x28, \ +0x1C,0xFF,0xF7,0x4E,0xFF,0x31,0x1C,0x3A,0x1C,0x03,0xF0,0xFC,0xF9,0x00,0x2F, \ +0x0A,0xD1,0x12,0x48,0x41,0x68,0x04,0x29,0x06,0xD1,0x01,0x25,0x45,0x60,0x01, \ +0x20,0x01,0xF0,0xBF,0xFB,0x0F,0x48,0x05,0x70,0x21,0x78,0x38,0x1C,0x00,0xF0, \ +0x3B,0xFD,0x05,0xE0,0xFF,0xE7,0x00,0x28,0x02,0xD1,0x38,0x1C,0x00,0xF0,0xF4, \ +0xFB,0x78,0xE7,0x00,0x00,0x00,0x02,0x00,0x02,0x10,0x00,0x00,0x02,0xC8,0x00, \ +0x00,0x02,0x74,0x00,0x00,0x02,0x50,0x06,0x00,0x02,0xA6,0x00,0x00,0x02,0x30, \ +0x00,0x00,0x02,0xE0,0x05,0x00,0x02,0xF8,0x01,0x00,0x02,0x08,0xB5,0x00,0x21, \ +0x00,0x91,0x00,0x28,0x0C,0xD1,0x0B,0x48,0x00,0x68,0x40,0x68,0x81,0x7D,0xC2, \ +0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C,0x0A,0x30,0xFF,0xF7,0xC8,0xFE, \ +0x00,0x90,0x00,0x98,0x01,0x28,0x03,0xD1,0x04,0x48,0x80,0x79,0x00,0x28,0x01, \ +0xD0,0x00,0xF0,0x05,0xF8,0x08,0xBD,0x10,0x00,0x00,0x02,0x1C,0x01,0x00,0x02, \ +0x80,0xB5,0x05,0x48,0x00,0x78,0x80,0x09,0x04,0xD3,0x04,0x4F,0x38,0x68,0x02, \ +0xF0,0xDF,0xF9,0x38,0x60,0x80,0xBD,0x00,0x00,0x7B,0x01,0x00,0x02,0x10,0x00, \ +0x00,0x02,0xF0,0xB5,0x84,0xB0,0x00,0x20,0x7D,0x25,0x2D,0x01,0x01,0x26,0xB6, \ +0x06,0x00,0x90,0xB0,0x68,0x00,0x0B,0xFC,0x24,0x04,0x40,0xF9,0x48,0xC7,0x6A, \ +0x00,0x2F,0x0E,0xD1,0x00,0x20,0xFF,0xF7,0x7D,0xFD,0xF6,0x48,0xC1,0x69,0x83, \ +0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0xF4,0x4B,0x19,0x40,0xC1,0x61,0xF0,0x68, \ +0x04,0xB0,0xF0,0xBD,0xF2,0x49,0xA0,0x08,0x08,0x5C,0x00,0x28,0x04,0xD0,0x00, \ +0x20,0xFF,0xF7,0x69,0xFD,0xF0,0x68,0xF3,0xE7,0xEE,0x49,0x04,0x20,0x03,0x91, \ +0xED,0x49,0x20,0x40,0x02,0x91,0x6F,0xD0,0x01,0x97,0x04,0x20,0xFF,0xF7,0x10, \ +0xFE,0xEA,0x49,0x08,0x71,0xA0,0x09,0x01,0xD3,0x14,0x21,0x00,0xE0,0x0E,0x21, \ +0xE8,0x48,0x02,0x22,0x01,0xF0,0xF7,0xFE,0x00,0x28,0x01,0xD1,0xF0,0x68,0xD9, \ +0xE7,0x00,0x98,0xF2,0x68,0x10,0x43,0x31,0x1C,0x01,0xE0,0xCA,0x68,0x10,0x43, \ +0x42,0x09,0x03,0xD2,0x2A,0x1C,0x01,0x3D,0x00,0x2A,0xF7,0xD1,0x10,0x23,0x98, \ +0x43,0x07,0x1C,0x00,0x2D,0x01,0xDC,0x38,0x1C,0xC5,0xE7,0xD9,0x49,0x08,0x79, \ +0x0A,0x28,0x09,0xD0,0x14,0x28,0x0B,0xD0,0x37,0x28,0x0D,0xD0,0x6E,0x28,0x0F, \ +0xD1,0x02,0x99,0x03,0x20,0x88,0x73,0x15,0xE0,0x02,0x99,0x00,0x20,0x88,0x73, \ +0x11,0xE0,0x02,0x99,0x01,0x20,0x88,0x73,0x0D,0xE0,0x02,0x99,0x02,0x20,0x88, \ +0x73,0x09,0xE0,0x01,0x98,0x14,0x28,0x03,0xD2,0x02,0x99,0x03,0x20,0x88,0x73, \ +0x02,0xE0,0x02,0x99,0x02,0x20,0x88,0x73,0xB8,0x09,0x09,0xD3,0xF8,0x08,0x07, \ +0xD2,0x01,0x20,0xFF,0xF7,0x0E,0xFD,0xC5,0x49,0xC1,0x20,0x08,0x60,0x38,0x1C, \ +0x95,0xE7,0xC3,0x4D,0x00,0x20,0x28,0x60,0xF8,0x0A,0x07,0xD3,0xB4,0x2C,0x12, \ +0xD0,0xC4,0x2C,0x1C,0xD0,0xD4,0x2C,0x01,0xD1,0x00,0xF0,0xCA,0xF9,0x68,0x68, \ +0x04,0x28,0x06,0xD1,0x00,0xF0,0xBB,0xF9,0x00,0x22,0x10,0x21,0xBA,0x48,0x03, \ +0xF0,0x60,0xFC,0x38,0x1C,0x7C,0xE7,0x0E,0xE0,0xAF,0x48,0x40,0x68,0x80,0x0B, \ +0xEE,0xD3,0x02,0x20,0x68,0x72,0x03,0x99,0xB4,0x48,0x06,0x22,0xFF,0xF7,0xF4, \ +0xFD,0xE6,0xE7,0x00,0xF0,0xAF,0xFA,0xE3,0xE7,0xB1,0x49,0x08,0x68,0x00,0x7A, \ +0x00,0x28,0x04,0xD0,0xAC,0x49,0xC4,0x20,0x08,0x60,0xF0,0x68,0x62,0xE7,0x01, \ +0x20,0xFF,0xF7,0xD3,0xFC,0x29,0x2F,0x0C,0xD2,0x07,0x20,0xFF,0xF7,0x82,0xFD, \ +0xA3,0x49,0xC8,0x71,0x0B,0x21,0x79,0x43,0xCF,0x08,0x03,0x25,0x00,0x0A,0x31, \ +0xD3,0x01,0x3F,0x2F,0xE0,0x04,0x20,0xFF,0xF7,0x75,0xFD,0x00,0x06,0x00,0x0E, \ +0x9C,0x49,0x0A,0x28,0x08,0x71,0x12,0xD0,0x14,0x28,0x13,0xD0,0x37,0x28,0x14, \ +0xD0,0x6E,0x28,0x17,0xD1,0x07,0x20,0xFF,0xF7,0x66,0xFD,0x95,0x49,0xC8,0x71, \ +0x0B,0x21,0x79,0x43,0xCF,0x08,0x03,0x25,0x00,0x0A,0x15,0xD3,0x01,0x3F,0x13, \ +0xE0,0xFF,0x08,0x00,0x25,0x10,0xE0,0xBF,0x08,0x01,0x25,0x0D,0xE0,0x0B,0x20, \ +0x78,0x43,0x07,0x09,0x02,0x25,0x08,0xE0,0x0B,0x20,0x78,0x43,0x8A,0x49,0xC7, \ +0x08,0xC8,0x79,0x03,0x25,0x00,0x0A,0x00,0xD3,0x01,0x3F,0x80,0x2C,0x01,0xD0, \ +0x50,0x2C,0x08,0xD1,0x03,0x20,0xFF,0xF7,0x41,0xFD,0x88,0x49,0x80,0x06,0x09, \ +0x68,0x80,0x0E,0x48,0x74,0x03,0xE0,0x85,0x49,0x00,0x20,0x09,0x68,0x48,0x74, \ +0x84,0x48,0x80,0x89,0x04,0x30,0xB8,0x42,0x01,0xD3,0x18,0x2F,0x0D,0xD8,0x7C, \ +0x49,0xC3,0x20,0x08,0x60,0x74,0x48,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61, \ +0xC1,0x69,0x72,0x4B,0x19,0x40,0xC1,0x61,0xF0,0x68,0xFA,0xE6,0x78,0x48,0x02, \ +0x22,0x00,0x68,0x18,0x21,0x40,0x68,0x01,0xF0,0x0D,0xFE,0x00,0x28,0x01,0xD1, \ +0xF0,0x68,0xEF,0xE6,0x00,0x99,0xF3,0x68,0x7D,0x20,0xC0,0x00,0x19,0x43,0x32, \ +0x1C,0x03,0xE0,0x01,0x22,0x92,0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x03,0xD2, \ +0x02,0x1C,0x01,0x38,0x00,0x2A,0xF5,0xD1,0x10,0x23,0x99,0x43,0x0E,0x1C,0x00, \ +0x28,0x01,0xDC,0x30,0x1C,0xD7,0xE6,0x69,0x48,0x06,0x60,0xF0,0x0A,0x02,0xD3, \ +0x62,0x49,0x01,0x20,0x48,0x72,0x02,0x98,0x06,0x22,0x85,0x73,0x62,0x48,0x03, \ +0x99,0x00,0x68,0x40,0x68,0x0A,0x30,0xFF,0xF7,0x4A,0xFD,0x5E,0x48,0x00,0x68, \ +0x87,0x81,0x5D,0x48,0x00,0x27,0x00,0x68,0x85,0x73,0x58,0x48,0x07,0x60,0x5A, \ +0x48,0x01,0x68,0x48,0x68,0x42,0x78,0x05,0x1C,0xD2,0x09,0x2E,0xD2,0x89,0x89, \ +0x02,0x22,0x18,0x30,0x18,0x39,0x01,0xF0,0xCA,0xFD,0x00,0x28,0x05,0xD1,0x4F, \ +0x49,0x01,0x20,0x88,0x73,0x0F,0x73,0x30,0x1C,0xA8,0xE6,0x4C,0x4F,0x03,0x20, \ +0x38,0x73,0x02,0x20,0xB8,0x73,0x80,0x2C,0x17,0xD1,0x4F,0x48,0xC1,0x1D,0x29, \ +0x31,0x09,0x79,0x01,0x29,0x11,0xD1,0xF9,0x1D,0x69,0x31,0xC9,0x79,0x05,0x29, \ +0x0C,0xD1,0xC1,0x1D,0x07,0x31,0xE8,0x1D,0x06,0x22,0x09,0x30,0xFF,0xF7,0x03, \ +0xFD,0x00,0x28,0x03,0xD1,0x01,0x20,0xF9,0x1D,0x99,0x31,0x48,0x73,0x30,0x1C, \ +0x87,0xE6,0x43,0x4C,0x21,0x78,0x00,0x29,0x05,0xD1,0x3A,0x49,0x01,0x20,0x88, \ +0x73,0x0F,0x73,0x30,0x1C,0x7D,0xE6,0x02,0x22,0x04,0x21,0x18,0x30,0x38,0x4D, \ +0x01,0xF0,0x91,0xFD,0x00,0x28,0x01,0xD1,0x30,0x1C,0x73,0xE6,0x7D,0x20,0xC0, \ +0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x31,0x43,0x01,0xE0,0xD3,0x68,0x19,0x43, \ +0x4B,0x09,0x03,0xD2,0x03,0x1C,0x01,0x38,0x00,0x2B,0xF7,0xD1,0x10,0x23,0x99, \ +0x43,0x0E,0x1C,0x00,0x28,0x01,0xDC,0x30,0x1C,0x5D,0xE6,0x28,0x68,0x40,0x68, \ +0xC1,0x1D,0x11,0x31,0x40,0x7E,0x0A,0x78,0x00,0x02,0x10,0x43,0x8A,0x78,0xC9, \ +0x78,0x12,0x04,0x10,0x43,0x89,0x09,0x0A,0x06,0x12,0x0E,0xE3,0x1D,0x39,0x33, \ +0x1B,0x78,0x21,0x1C,0x03,0x24,0x64,0x06,0x01,0x2B,0x14,0xD1,0x0D,0x23,0x5A, \ +0x43,0x51,0x18,0x0A,0x7B,0x12,0x06,0x10,0x43,0x20,0x60,0x8A,0x7B,0x48,0x7B, \ +0x12,0x02,0x10,0x43,0xCA,0x7B,0x09,0x7C,0x12,0x04,0x10,0x43,0x09,0x06,0x08, \ +0x43,0x60,0x60,0x01,0x20,0xA0,0x60,0x4A,0xE0,0x02,0x2B,0x48,0xD1,0x0D,0x23, \ +0x5A,0x43,0x51,0x18,0x0A,0x7B,0x12,0x06,0x10,0x43,0x20,0x60,0x8A,0x7B,0x48, \ +0x7B,0x12,0x02,0x10,0x43,0xCA,0x7B,0x12,0x04,0x10,0x43,0x0A,0x7C,0x12,0x06, \ +0x10,0x43,0x60,0x60,0x1D,0xE0,0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0xB4, \ +0x01,0x00,0x02,0x78,0x09,0x00,0x02,0x40,0x06,0x00,0x02,0xF4,0x09,0x00,0x02, \ +0x90,0x0A,0x00,0x02,0xE0,0x05,0x00,0x02,0x3C,0x08,0x00,0x02,0x9A,0x0A,0x00, \ +0x02,0x10,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0xFC,0x01,0x00,0x02,0x98,0x00, \ +0x00,0x02,0x30,0x00,0x00,0x02,0x8A,0x7C,0x48,0x7C,0x12,0x02,0x10,0x43,0xCA, \ +0x7C,0x12,0x04,0x10,0x43,0x0A,0x7D,0x12,0x06,0x10,0x43,0x60,0x61,0x8A,0x7D, \ +0x48,0x7D,0x12,0x02,0x10,0x43,0xCA,0x7D,0x09,0x7E,0x12,0x04,0x10,0x43,0x09, \ +0x06,0x08,0x43,0xA0,0x61,0x81,0x20,0xA0,0x60,0x28,0x68,0x0E,0x22,0x81,0x89, \ +0x40,0x68,0x18,0x30,0x20,0x39,0x01,0xF0,0xF8,0xFC,0x00,0x28,0x06,0xD1,0x08, \ +0x49,0x01,0x20,0x88,0x73,0x0F,0x73,0xA7,0x60,0x30,0x1C,0xD5,0xE5,0x28,0x68, \ +0x81,0x89,0x08,0x39,0x81,0x81,0x03,0x49,0x03,0x20,0x08,0x73,0x02,0x20,0x88, \ +0x73,0x30,0x1C,0xCA,0xE5,0xE0,0x05,0x00,0x02,0x00,0xB5,0x03,0x49,0x01,0x20, \ +0x48,0x60,0x01,0xF0,0x14,0xF9,0x00,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02,0xF0, \ +0xB5,0x6E,0x4F,0x01,0x26,0x78,0x68,0x04,0x28,0x0C,0xD1,0x01,0x20,0x01,0xF0, \ +0x07,0xF9,0x7E,0x60,0x01,0x20,0xFF,0xF7,0x25,0xFB,0x00,0x22,0x01,0x21,0x68, \ +0x48,0x03,0xF0,0x8E,0xFA,0xF0,0xBD,0x78,0x68,0x02,0x28,0xFB,0xD1,0x01,0x20, \ +0x01,0xF0,0xF7,0xF8,0xFC,0x1D,0x79,0x34,0x7E,0x60,0x3A,0x1C,0x67,0x68,0x08, \ +0x23,0x78,0x78,0x60,0x4D,0x98,0x43,0x78,0x70,0x28,0x78,0x01,0x28,0x61,0xD1, \ +0x5E,0x49,0x08,0x78,0x00,0x28,0x01,0xD0,0x18,0x21,0x00,0xE0,0x1E,0x21,0xA2, \ +0x68,0x5B,0x4B,0x52,0x1A,0x12,0x04,0x1B,0x68,0x12,0x0C,0xD2,0x18,0x58,0x4B, \ +0x1A,0x60,0xA2,0x68,0x58,0x4B,0x51,0x1A,0x1A,0x88,0x51,0x1A,0x19,0x80,0x53, \ +0x49,0x01,0x30,0x08,0x70,0x08,0x78,0xB9,0x7D,0x00,0x07,0x00,0x0F,0xF0,0x23, \ +0x19,0x40,0x08,0x43,0xB8,0x75,0x78,0x78,0xC0,0x09,0x07,0xD3,0x4D,0x48,0x01, \ +0x68,0x08,0x39,0x01,0x60,0x4C,0x49,0x08,0x88,0x08,0x30,0x08,0x80,0x4B,0x48, \ +0x01,0x89,0x49,0x48,0x03,0x88,0xCA,0x1F,0x15,0x3A,0x9A,0x42,0x03,0xDA,0x08, \ +0x1F,0x48,0x49,0x08,0x80,0x09,0xE0,0x79,0x78,0x04,0x23,0x99,0x43,0x79,0x70, \ +0x00,0x88,0x44,0x49,0x18,0x30,0x08,0x80,0x02,0x20,0x28,0x70,0x78,0x78,0xC0, \ +0x09,0x03,0xD3,0x40,0x49,0x08,0x88,0x08,0x30,0x08,0x80,0x3E,0x49,0x3E,0x48, \ +0x09,0x88,0xA1,0x60,0x00,0x68,0x80,0x7D,0x00,0xF0,0x8F,0xFF,0xE0,0x60,0x3A, \ +0x48,0x3B,0x49,0x00,0x68,0x80,0x7D,0x08,0x70,0x00,0xF0,0xE7,0xFF,0x38,0x49, \ +0x08,0x78,0x03,0x28,0x05,0xD1,0x2D,0x48,0x40,0x6A,0xFF,0xF7,0xED,0xFA,0x03, \ +0xE0,0x42,0xE0,0x00,0x20,0xFF,0xF7,0xE8,0xFA,0x30,0x1C,0x28,0x4E,0xF1,0x1D, \ +0x69,0x31,0x88,0x71,0x28,0x78,0x2F,0x4D,0x02,0x28,0x01,0xD1,0x00,0x23,0x0F, \ +0xE0,0x2B,0x48,0x2D,0x49,0x00,0x68,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB, \ +0x08,0x1C,0x2B,0x49,0x40,0x00,0x08,0x5A,0xE9,0x88,0x49,0x00,0x40,0x18,0xE1, \ +0x68,0x43,0x18,0xB8,0x1C,0x23,0x4F,0x00,0x22,0x39,0x78,0xFF,0xF7,0xF9,0xFB, \ +0x25,0x49,0xE0,0x68,0x09,0x88,0x22,0x4A,0x40,0x18,0xE9,0x88,0x40,0x18,0x39, \ +0x78,0x49,0x00,0x51,0x5A,0x40,0x18,0x20,0x49,0x09,0x88,0x41,0x18,0x01,0x20, \ +0x01,0xF0,0x38,0xF8,0x02,0x20,0x70,0x60,0x00,0x20,0xF0,0x63,0x13,0x48,0x1A, \ +0x49,0x80,0x89,0xB0,0x63,0x09,0x88,0xE0,0x68,0x40,0x18,0x19,0x49,0x08,0x60, \ +0xF0,0xBD,0x00,0x20,0x0E,0x49,0xD0,0x63,0x89,0x89,0x91,0x63,0x0E,0x49,0x09, \ +0x68,0x48,0x73,0x01,0x20,0xFF,0xF7,0x5B,0xFA,0x00,0x22,0x10,0x21,0x12,0x48, \ +0x03,0xF0,0xC4,0xF9,0xF0,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02,0x3C,0x08,0x00, \ +0x02,0x79,0x01,0x00,0x02,0x7A,0x01,0x00,0x02,0x7C,0x01,0x00,0x02,0x80,0x01, \ +0x00,0x02,0xD8,0x00,0x00,0x02,0x82,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x69, \ +0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x50,0x01,0x00,0x02, \ +0x64,0x01,0x00,0x02,0x66,0x01,0x00,0x02,0xF4,0x01,0x00,0x02,0xDC,0x07,0x00, \ +0x02,0xB0,0xB5,0x1F,0x4F,0x78,0x68,0x03,0x28,0x39,0xD1,0x01,0x20,0x01,0xF0, \ +0x08,0xF8,0x1C,0x4C,0x1D,0x4D,0x20,0x68,0x80,0x7D,0x28,0x70,0x00,0xF0,0x57, \ +0xFF,0x28,0x78,0x03,0x28,0x03,0xD1,0x78,0x6A,0xFF,0xF7,0x5F,0xFA,0x02,0xE0, \ +0x00,0x20,0xFF,0xF7,0x5B,0xFA,0x20,0x68,0x15,0x49,0x80,0x7D,0x89,0x7A,0x88, \ +0x42,0x00,0xDB,0x08,0x1C,0x13,0x4C,0xFD,0x1D,0x21,0x88,0x79,0x35,0xEA,0x68, \ +0x05,0x31,0x89,0x18,0x11,0x4A,0xD2,0x88,0x89,0x18,0x10,0x4A,0x40,0x00,0x10, \ +0x5A,0x08,0x18,0x0F,0x49,0x09,0x88,0x41,0x18,0x01,0x20,0x00,0xF0,0xC2,0xFF, \ +0x02,0x20,0x78,0x60,0xF9,0x1D,0x69,0x31,0x01,0x20,0x88,0x71,0x21,0x88,0xE8, \ +0x68,0x40,0x18,0x09,0x49,0x08,0x60,0xB0,0xBD,0xE0,0x05,0x00,0x02,0x04,0x00, \ +0x00,0x02,0x69,0x01,0x00,0x02,0x1C,0x01,0x00,0x02,0x64,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x50,0x01,0x00,0x02,0x66,0x01,0x00,0x02,0xF4,0x01,0x00,0x02, \ +0x00,0xB5,0x11,0x49,0x09,0x68,0x49,0x68,0x0A,0x78,0x13,0x07,0x10,0xD1,0x12, \ +0x11,0x0D,0x2A,0x0D,0xD2,0x01,0xA3,0x9B,0x5C,0x5B,0x00,0x9F,0x44,0x09,0x0A, \ +0x09,0x0A,0x0D,0x06,0x09,0x09,0x06,0x09,0x0A,0x0A,0x0A,0x00,0x08,0x1C,0x00, \ +0xF0,0x10,0xF8,0x00,0xBD,0xFF,0xF7,0x05,0xFC,0x00,0xBD,0x04,0x48,0x40,0x7A, \ +0x00,0x28,0xF7,0xD0,0x07,0xF0,0xFE,0xFA,0x00,0xBD,0x00,0x00,0x10,0x00,0x00, \ +0x02,0x40,0x06,0x00,0x02,0xB0,0xB5,0xC5,0x1D,0x19,0x35,0x07,0x1C,0x68,0x79, \ +0xC1,0x19,0x20,0x31,0xC9,0x79,0x40,0x18,0xC0,0x19,0x20,0x30,0x2B,0x49,0x80, \ +0x7A,0x09,0x7D,0x88,0x42,0x50,0xD1,0x29,0x4C,0xE0,0x79,0x01,0x28,0x02,0xD0, \ +0xE0,0x79,0x07,0x28,0x02,0xD1,0x38,0x1C,0x07,0xF0,0xD0,0xFA,0x38,0x78,0x50, \ +0x28,0x0E,0xD0,0xE0,0x79,0x03,0x28,0x03,0xD1,0x06,0xF0,0x0C,0xFF,0x00,0x28, \ +0x3C,0xD0,0xE0,0x79,0x04,0x28,0x04,0xD1,0x01,0x20,0x06,0xF0,0x00,0xFE,0x00, \ +0x28,0x34,0xD0,0xE0,0x79,0x05,0x28,0x2F,0xD1,0x1B,0x4C,0xF8,0x1D,0x09,0x30, \ +0x06,0x22,0xE1,0x1D,0x07,0x31,0xFF,0xF7,0x81,0xFA,0x18,0x4F,0x00,0x28,0x1F, \ +0xD1,0x38,0x79,0x02,0x28,0x0A,0xD1,0xA8,0x78,0x40,0x08,0x20,0xD3,0x06,0x20, \ +0x00,0xF0,0x53,0xFF,0x00,0x20,0x00,0xF0,0x24,0xF8,0x00,0xF0,0xD6,0xF8,0x38, \ +0x79,0x01,0x28,0x13,0xD1,0xA8,0x78,0x80,0x08,0x12,0xD3,0x00,0x20,0x00,0xF0, \ +0x19,0xF8,0x00,0xF0,0x01,0xFF,0x21,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0x00, \ +0xF0,0x23,0xFF,0x04,0xE0,0x38,0x79,0x01,0x28,0x01,0xD1,0x07,0xF0,0x27,0xFB, \ +0xFF,0xF7,0xB7,0xFB,0xB0,0xBD,0x18,0x00,0x00,0x02,0x50,0x06,0x00,0x02,0x98, \ +0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0xF1,0xB5,0x83,0xB0,0x3E,0x49,0x00,0x25, \ +0x0B,0x68,0x02,0x93,0x59,0x68,0x4A,0x7E,0x0F,0x7E,0x12,0x02,0x3A,0x43,0x8F, \ +0x7E,0x3F,0x04,0x3A,0x43,0xCF,0x7E,0x3F,0x06,0x3A,0x43,0x16,0x1C,0x4F,0x7F, \ +0x0A,0x7F,0x3F,0x02,0x3A,0x43,0x8F,0x7F,0xC9,0x7F,0x3F,0x04,0x3A,0x43,0x09, \ +0x06,0x0A,0x43,0x99,0x89,0x18,0x39,0xCC,0x00,0x99,0x7B,0x17,0x1C,0x00,0x29, \ +0x26,0xD0,0x01,0x29,0x26,0xD0,0x02,0x29,0x26,0xD0,0x03,0x29,0x0C,0xD1,0x0B, \ +0x20,0x21,0x1C,0x03,0xF0,0xB1,0xFD,0x00,0x91,0x61,0x1A,0x0B,0x20,0x03,0xF0, \ +0xAC,0xFD,0x00,0x99,0x00,0x29,0x00,0xD9,0x01,0x30,0x01,0x24,0xA4,0x06,0xA2, \ +0x6A,0x61,0x6A,0x02,0x9B,0x30,0x18,0x5B,0x69,0xCB,0x1A,0xC0,0x18,0xB0,0x42, \ +0x00,0xD2,0x01,0x37,0x06,0x1C,0x1F,0x48,0x03,0x79,0x00,0x20,0x02,0x2B,0x14, \ +0xD1,0x01,0x25,0x1F,0xE0,0x20,0x1C,0xE9,0xE7,0x60,0x08,0xE7,0xE7,0x61,0x00, \ +0x01,0x91,0x0B,0x20,0x03,0xF0,0x8B,0xFD,0x0C,0x1C,0x01,0x99,0x09,0x1B,0x0B, \ +0x20,0x03,0xF0,0x85,0xFD,0x00,0x2C,0xDA,0xD9,0x01,0x30,0xD8,0xE7,0x01,0x2B, \ +0x0A,0xD1,0x12,0x4B,0x97,0x42,0x58,0x72,0x01,0xD9,0x01,0x25,0x04,0xE0,0x97, \ +0x42,0x02,0xD1,0x8E,0x42,0x00,0xD9,0x01,0x25,0x03,0x9A,0x00,0x2A,0x03,0xD0, \ +0x00,0x2D,0x03,0xD1,0x04,0xB0,0xF0,0xBD,0x00,0x2D,0x09,0xD0,0x70,0x1A,0x00, \ +0xF0,0x10,0xF8,0x01,0x23,0xDE,0x42,0x01,0xD1,0x00,0x26,0x01,0x37,0xA7,0x62, \ +0x66,0x62,0x01,0x20,0xEF,0xE7,0x00,0x00,0x10,0x00,0x00,0x02,0xC8,0x00,0x00, \ +0x02,0x40,0x06,0x00,0x02,0x90,0xB4,0x10,0x4A,0x00,0x21,0x97,0x69,0x91,0x61, \ +0x01,0x21,0x0E,0x4B,0x8C,0x00,0xE3,0x18,0xDC,0x6A,0x01,0x31,0x24,0x18,0xDC, \ +0x62,0x08,0x29,0xF6,0xD9,0x0B,0x49,0xCB,0x69,0x1B,0x18,0xCB,0x61,0xCB,0x69, \ +0x5B,0x00,0x5B,0x08,0xCB,0x61,0x8B,0x69,0x18,0x18,0x88,0x61,0x88,0x69,0x40, \ +0x00,0x40,0x08,0x88,0x61,0x97,0x61,0x90,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00, \ +0x00,0x04,0x40,0x00,0x00,0x04,0xE0,0x05,0x00,0x02,0x00,0xB5,0x08,0x29,0x01, \ +0xD1,0xFF,0xF7,0xDB,0xFA,0x00,0xBD,0xF0,0xB5,0x45,0x48,0x02,0x68,0x24,0x20, \ +0x51,0x68,0x92,0x89,0x04,0x3A,0x24,0x2A,0x23,0xDD,0x0F,0x5C,0x06,0x2F,0x20, \ +0xD2,0x01,0xA3,0xDB,0x5D,0x5B,0x00,0x9F,0x44,0x02,0x09,0x1C,0x10,0x14,0x1D, \ +0x0B,0x18,0x5B,0x78,0x18,0x18,0x02,0x30,0x00,0x04,0x00,0x0C,0x10,0xE0,0x0B, \ +0x18,0x5B,0x78,0x18,0x18,0x02,0x30,0x00,0x04,0x00,0x0C,0x09,0xE0,0x03,0x30, \ +0x00,0x04,0x00,0x0C,0x05,0xE0,0x0B,0x18,0x5B,0x78,0x18,0x18,0x02,0x30,0x00, \ +0x04,0x00,0x0C,0x82,0x42,0xDB,0xDC,0xF0,0xBD,0x0C,0x18,0x22,0x79,0x2E,0x4B, \ +0xA0,0x78,0xE1,0x78,0xDE,0x88,0xFE,0x25,0x15,0x40,0x67,0x1D,0xB3,0x04,0x9B, \ +0x0C,0x00,0x2D,0x03,0xD0,0xEE,0x00,0x01,0x3E,0x9E,0x42,0x05,0xDA,0x64,0x78, \ +0x64,0x19,0xE4,0x00,0x18,0x3C,0x9C,0x42,0x01,0xDC,0x00,0x24,0x0D,0xE0,0xEC, \ +0x00,0x1C,0x1B,0x00,0xD5,0x07,0x34,0xE4,0x10,0x24,0x06,0x24,0x0E,0x3F,0x5D, \ +0x5B,0x07,0x5B,0x0F,0xDF,0x40,0x1C,0x1C,0x3C,0x06,0x24,0x0E,0x1C,0x4F,0xFB, \ +0x1D,0x29,0x33,0x5B,0x79,0x02,0x2B,0xD1,0xD1,0x08,0x23,0x19,0x4D,0x52,0x08, \ +0xAB,0x71,0x05,0xD3,0x00,0x28,0x03,0xD1,0xAA,0x79,0x02,0x23,0x1A,0x43,0xAA, \ +0x71,0x62,0x08,0x08,0xD3,0xAA,0x79,0x04,0x23,0x1A,0x43,0xAA,0x71,0x2A,0x79, \ +0x00,0x2A,0x01,0xD1,0x01,0x22,0x6A,0x71,0x10,0x4A,0x00,0x28,0x05,0xD0,0x51, \ +0x8B,0x81,0x42,0x00,0xDC,0x08,0x1C,0x04,0x1C,0x04,0xE0,0x50,0x8B,0x88,0x42, \ +0x00,0xDC,0x01,0x1C,0x0C,0x1C,0x00,0xF0,0xB5,0xFD,0x39,0x88,0x08,0x4B,0x4C, \ +0x43,0xA1,0x02,0x08,0x1A,0xC1,0x18,0x06,0x20,0x00,0xF0,0xD4,0xFD,0xF0,0xBD, \ +0x00,0x00,0x10,0x00,0x00,0x02,0x98,0x00,0x00,0x02,0x80,0x06,0x00,0x02,0xD8, \ +0x00,0x00,0x02,0x48,0xF4,0xFF,0xFF,0xF0,0xB5,0x82,0xB0,0x21,0x4E,0x22,0x4F, \ +0x22,0x4D,0x23,0x4C,0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22,0x30,0x1C,0x01, \ +0xAB,0x20,0x49,0x03,0xF0,0x5D,0xF8,0x01,0x98,0x41,0x0A,0x0C,0xD3,0x80,0x20, \ +0x38,0x72,0x00,0x20,0x78,0x72,0x38,0x7A,0x00,0x0A,0x25,0xD3,0x07,0xF0,0xC1, \ +0xFA,0x38,0x7A,0x00,0x0A,0xFA,0xD2,0x1F,0xE0,0x41,0x08,0x02,0xD3,0x07,0xF0, \ +0x1D,0xF8,0x1A,0xE0,0x41,0x0D,0x03,0xD3,0x40,0x20,0x06,0xF0,0xB9,0xFE,0x14, \ +0xE0,0x41,0x09,0x03,0xD3,0x50,0x20,0x06,0xF0,0xB3,0xFE,0x0E,0xE0,0x40,0x0F, \ +0x05,0xD3,0x80,0x20,0x06,0xF0,0xAD,0xFE,0x08,0xE0,0x00,0xF0,0x1A,0xF8,0x28, \ +0x78,0x40,0x09,0x03,0xD3,0x20,0x68,0x00,0x7B,0x00,0x0A,0xF6,0xD2,0x00,0x22, \ +0x01,0x21,0x07,0x48,0x02,0xF0,0x2E,0xFF,0xC0,0xE7,0x00,0x00,0xFC,0x07,0x00, \ +0x02,0x80,0x06,0x00,0x02,0x7B,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x11,0x11, \ +0x10,0x10,0x1C,0x08,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x97,0x48,0x00,0x26,0x00, \ +0x68,0x47,0x68,0x39,0x79,0x49,0x08,0x00,0xD3,0x01,0x26,0x94,0x49,0xC9,0x78, \ +0x00,0x29,0x14,0xD0,0x39,0x78,0x08,0x29,0x11,0xD1,0x92,0x4A,0x11,0x78,0x00, \ +0x29,0x0D,0xD0,0x81,0x7D,0x53,0x78,0x8D,0x4D,0x99,0x42,0x01,0xDD,0x51,0x78, \ +0x81,0x75,0x28,0x68,0x00,0x23,0x81,0x7D,0xB8,0x1C,0x32,0x1C,0xFF,0xF7,0x03, \ +0xF9,0x8A,0x4B,0x8A,0x4C,0x18,0x78,0x00,0x28,0x4B,0xD1,0x84,0x4D,0x00,0x2E, \ +0x28,0x68,0x20,0x61,0x41,0x68,0x61,0x60,0x39,0xD1,0x86,0x49,0x82,0x8A,0x09, \ +0x89,0x8A,0x42,0x34,0xDB,0x01,0x22,0x1A,0x70,0x83,0x4B,0x00,0x22,0x1A,0x70, \ +0xFA,0x1D,0x82,0x4B,0x17,0x32,0x1A,0x60,0x82,0x8A,0x81,0x4B,0x1E,0x3A,0x1A, \ +0x80,0x0A,0x1F,0x80,0x49,0x0A,0x80,0x7A,0x78,0xD2,0x09,0x02,0xD3,0x0A,0x88, \ +0x08,0x32,0x0A,0x80,0x09,0x88,0xA1,0x60,0x80,0x7D,0x00,0xF0,0x37,0xFC,0xE0, \ +0x60,0x28,0x68,0x81,0x7D,0x70,0x48,0x80,0x7A,0x81,0x42,0x00,0xDA,0x08,0x1C, \ +0x77,0x4A,0x40,0x00,0x10,0x5A,0x76,0x4A,0xD2,0x88,0x52,0x00,0x80,0x18,0xE2, \ +0x68,0x83,0x18,0xB8,0x1C,0x32,0x1C,0xFF,0xF7,0xC2,0xF8,0x04,0x21,0x78,0x1C, \ +0xFF,0xF7,0xA4,0xF8,0x0A,0xE0,0x6A,0x4B,0x00,0x21,0x19,0x70,0x81,0x8A,0x80, \ +0x7D,0x00,0xF0,0x16,0xFC,0xE0,0x60,0x28,0x68,0x80,0x8A,0xA0,0x60,0x78,0x1C, \ +0x40,0x21,0x05,0x1C,0xFF,0xF7,0x97,0xF8,0x00,0x28,0x0E,0xD0,0x5E,0x4B,0x18, \ +0x78,0x00,0x28,0x0A,0xD1,0x59,0x48,0x00,0x68,0x81,0x8A,0x08,0x31,0xA1,0x60, \ +0x81,0x8A,0x80,0x7D,0x08,0x31,0x00,0xF0,0xFD,0xFB,0xE0,0x60,0x30,0x1C,0x00, \ +0xF0,0x77,0xFB,0x00,0x28,0x0A,0xD0,0x02,0x20,0x5C,0x49,0xC2,0x1E,0x08,0x63, \ +0x00,0x92,0x01,0x22,0x11,0x21,0x5A,0x48,0x01,0xAB,0x02,0xF0,0x80,0xFF,0x57, \ +0x49,0x00,0x26,0x4A,0x48,0x0E,0x63,0x00,0x68,0x57,0x4C,0x41,0x7B,0x00,0x29, \ +0x3B,0xD1,0x78,0x78,0x40,0x09,0x00,0xD3,0xE6,0x71,0x38,0x78,0x08,0x28,0x20, \ +0xD1,0x44,0x48,0xC0,0x78,0x00,0x28,0x1C,0xD0,0x43,0x4F,0x38,0x78,0x01,0x28, \ +0x28,0xD0,0x02,0x28,0x16,0xD1,0x01,0x20,0x00,0xF0,0x5D,0xFC,0x78,0x70,0x78, \ +0x88,0x01,0x30,0x00,0x04,0x00,0x0C,0x78,0x80,0x03,0x28,0x0B,0xDB,0x02,0x20, \ +0x00,0xF0,0x52,0xFC,0x79,0x78,0x88,0x42,0x05,0xD1,0x79,0x21,0x49,0x05,0x8E, \ +0x82,0x0E,0x80,0x3E,0x70,0x7E,0x80,0x36,0x4B,0x30,0x1C,0x18,0x70,0x37,0x4B, \ +0x3D,0x49,0x18,0x70,0xCE,0x63,0x08,0x64,0x34,0x48,0x80,0x89,0x88,0x63,0x2D, \ +0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x53,0xE0,0x02,0x20,0x38,0x70,0x7E,0x80, \ +0xEB,0xE7,0x40,0x7B,0x04,0x28,0x0A,0xD0,0x00,0xF0,0x1C,0xFC,0x26,0x48,0x00, \ +0x68,0x81,0x7B,0x01,0x31,0x81,0x73,0x08,0x21,0x28,0x1C,0xFF,0xF7,0x1F,0xF8, \ +0x22,0x4D,0x26,0x4A,0x28,0x68,0x81,0x7B,0x12,0x7C,0x91,0x42,0x04,0xDA,0x46, \ +0x73,0x29,0x68,0x82,0x20,0x08,0x73,0x35,0xE0,0x78,0x78,0x40,0x09,0x01,0xD3, \ +0x01,0x22,0xE2,0x71,0x1A,0x48,0xC0,0x78,0x00,0x28,0x1E,0xD0,0x38,0x78,0x08, \ +0x28,0x1B,0xD1,0x18,0x4F,0x38,0x78,0x00,0x28,0x02,0xD0,0x38,0x78,0x02,0x28, \ +0x0A,0xD1,0x79,0x20,0x40,0x05,0x20,0x4A,0x86,0x82,0x02,0x82,0x31,0x1C,0x81, \ +0x80,0x1F,0x49,0x01,0x80,0x02,0x21,0x81,0x82,0x38,0x78,0x02,0x28,0x00,0xD1, \ +0x7E,0x80,0x00,0x20,0x00,0xF0,0xF6,0xFB,0x78,0x70,0x01,0x22,0x3A,0x70,0x0B, \ +0x4B,0x30,0x1C,0x18,0x70,0x0C,0x4B,0x12,0x49,0x18,0x70,0xCE,0x63,0x09,0x4A, \ +0x08,0x64,0x90,0x89,0x88,0x63,0x29,0x68,0x10,0x20,0x08,0x73,0x02,0xB0,0xF0, \ +0xBD,0x00,0x00,0x04,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x2C,0x01,0x00,0x02, \ +0x79,0x01,0x00,0x02,0x60,0x06,0x00,0x02,0xD8,0x00,0x00,0x02,0x7A,0x01,0x00, \ +0x02,0x7C,0x01,0x00,0x02,0x80,0x01,0x00,0x02,0x82,0x01,0x00,0x02,0x50,0x01, \ +0x00,0x02,0x18,0x00,0x00,0x02,0xE0,0x05,0x00,0x02,0xDC,0x07,0x00,0x02,0x80, \ +0x06,0x00,0x02,0xA0,0x8C,0x00,0x00,0x10,0x27,0x00,0x00,0x80,0xB5,0x0F,0x27, \ +0x3F,0x06,0xB8,0x88,0x1C,0x4B,0x18,0x40,0xB8,0x80,0xB8,0x89,0x1B,0x4B,0x18, \ +0x40,0xB8,0x81,0x1B,0x48,0x03,0x21,0xC1,0x72,0xB9,0x89,0xDB,0x43,0x19,0x43, \ +0xB9,0x81,0xB9,0x88,0x18,0x4B,0x19,0x43,0xB9,0x80,0x41,0x7B,0x01,0x29,0x23, \ +0xD0,0x41,0x7A,0x00,0x29,0x20,0xD1,0x01,0x7A,0x00,0x29,0x1D,0xD0,0x81,0x7C, \ +0x01,0x29,0x1A,0xD1,0x81,0x7B,0x01,0x29,0x17,0xD1,0x01,0x7B,0x00,0x29,0x14, \ +0xD1,0xB9,0x88,0x99,0x43,0xB9,0x80,0xB9,0x89,0x09,0x4B,0x19,0x40,0xB9,0x81, \ +0xC0,0x7A,0x03,0x28,0x02,0xD1,0x00,0x20,0x00,0xF0,0xC7,0xF9,0xB8,0x89,0x07, \ +0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x04,0x4B,0x18,0x43,0xB8,0x80,0x80,0xBD, \ +0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0xE0,0x05,0x00,0x02,0xE8,0xE8,0x00, \ +0x00,0x13,0x13,0x00,0x00,0x80,0xB5,0xFE,0xF7,0x09,0xFE,0x20,0x4F,0xB9,0x69, \ +0x40,0x1A,0x41,0x00,0x78,0x7B,0x49,0x08,0x01,0x28,0x01,0xD1,0xB8,0x6A,0x00, \ +0xE0,0x38,0x6D,0x3A,0x68,0x00,0x2A,0x20,0xD1,0x1A,0x4B,0x82,0x00,0x1B,0x88, \ +0x12,0x18,0x92,0x00,0xD2,0x18,0x51,0x1A,0x8A,0x42,0x00,0xD2,0x11,0x1C,0x00, \ +0x28,0x0F,0xD1,0x01,0x20,0xB8,0x74,0xF8,0x7A,0x03,0x28,0x05,0xD1,0x0D,0x29, \ +0x04,0xD9,0xC8,0x1F,0x01,0x38,0x00,0xF0,0x8C,0xF9,0x80,0xBD,0x00,0x20,0x00, \ +0xF0,0x88,0xF9,0x80,0xBD,0x7B,0x61,0x08,0x20,0x00,0xF0,0xA9,0xFB,0x0D,0xE0, \ +0x0A,0x4A,0x83,0x00,0x12,0x88,0x18,0x18,0x80,0x00,0x80,0x18,0x41,0x1A,0x88, \ +0x42,0x00,0xD2,0x01,0x1C,0x7A,0x61,0x08,0x20,0x00,0xF0,0x9A,0xFB,0x04,0x20, \ +0xB8,0x74,0x80,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02,0x60,0x01,0x00,0x02,0x62, \ +0x01,0x00,0x02,0x00,0xB5,0x04,0x49,0x02,0x0A,0x8A,0x74,0xC8,0x74,0x03,0x21, \ +0x11,0x20,0xFE,0xF7,0x15,0xFE,0x00,0xBD,0xF4,0x09,0x00,0x02,0xB0,0xB5,0x82, \ +0xB0,0x11,0x4D,0x01,0x20,0x28,0x63,0x11,0x4F,0x11,0x48,0x00,0x24,0xBC,0x82, \ +0x38,0x82,0xBC,0x80,0x1E,0x20,0x38,0x80,0x02,0x20,0xB8,0x82,0xC2,0x1E,0x00, \ +0x92,0x01,0x22,0x1A,0x21,0x0C,0x48,0x01,0xAB,0x02,0xF0,0x11,0xFE,0x2C,0x63, \ +0x3C,0x83,0xBC,0x82,0x01,0x98,0x81,0x08,0x06,0xD3,0x00,0x09,0x02,0xD3,0x82, \ +0x20,0x02,0xB0,0xB0,0xBD,0x20,0x1C,0xFB,0xE7,0x42,0x20,0xF9,0xE7,0xE0,0x05, \ +0x00,0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00,0x00,0xBC,0x07,0x00,0x02,0xF0, \ +0xB5,0xFF,0x20,0x01,0x26,0xB6,0x06,0xF5,0x30,0x31,0x69,0x89,0x08,0x03,0xD3, \ +0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD8,0x78,0x4D,0xEC,0x1D,0x69,0x34,0x60, \ +0x79,0xEF,0x1D,0x79,0x37,0x00,0x28,0x0D,0xD0,0x38,0x68,0xFF,0xF7,0xAF,0xFF, \ +0xE9,0x6F,0xA8,0x6F,0x00,0x22,0x00,0xF0,0xFE,0xF8,0x00,0x21,0x61,0x71,0x01, \ +0x20,0xFE,0xF7,0x71,0xFD,0xF0,0xBD,0xA8,0x7A,0x00,0x28,0x17,0xD0,0x6D,0x48, \ +0x6D,0x49,0x00,0x68,0x80,0x7D,0x89,0x7A,0x88,0x42,0x00,0xDB,0x08,0x1C,0x6B, \ +0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x94,0xFF,0x00,0x22,0x10,0x21,0x68,0x48, \ +0x00,0xF0,0xE3,0xF8,0x01,0x20,0xFE,0xF7,0x58,0xFD,0x00,0x21,0xA9,0x72,0xF0, \ +0xBD,0xA0,0x79,0x00,0x28,0xE0,0xD0,0xF8,0x68,0xFF,0xF7,0x83,0xFF,0x7D,0x68, \ +0x68,0x78,0xC0,0x09,0x64,0xD3,0x5B,0x48,0x01,0x7B,0x00,0x29,0x01,0xD0,0x00, \ +0x21,0x01,0x73,0x03,0x20,0x40,0x06,0x80,0x68,0x40,0x08,0xFA,0xD2,0x07,0x20, \ +0x40,0x06,0x81,0x69,0x08,0x23,0x19,0x43,0x81,0x61,0x81,0x69,0x99,0x43,0x81, \ +0x61,0xF0,0x68,0x01,0xF0,0xD0,0xF8,0x54,0x48,0x41,0x68,0xC9,0x0B,0xFC,0xD2, \ +0x53,0x49,0x71,0x63,0x53,0x49,0xB1,0x63,0x01,0x21,0x89,0x03,0x41,0x60,0xFF, \ +0x21,0xF5,0x31,0x42,0x68,0xD2,0x0B,0x03,0xD3,0x0A,0x1C,0x01,0x39,0x00,0x2A, \ +0xF8,0xD1,0x4D,0x49,0x09,0x78,0x02,0x29,0x04,0xD1,0x81,0x21,0x03,0x22,0x52, \ +0x06,0x91,0x60,0x03,0xE0,0x01,0x21,0x03,0x22,0x52,0x06,0x91,0x60,0x48,0x49, \ +0x09,0x78,0x00,0x29,0x13,0xD0,0x47,0x4A,0x12,0x78,0x00,0x2A,0x04,0xD0,0x00, \ +0x2A,0x02,0xDD,0x45,0x4A,0x12,0x68,0x02,0xE0,0x43,0x4A,0x12,0x68,0x06,0x3A, \ +0x43,0x4B,0x1B,0x88,0x20,0x3B,0x02,0x29,0x0A,0xD1,0x00,0x21,0xA1,0x71,0x07, \ +0xE0,0x79,0x68,0xCA,0x1D,0xB9,0x68,0x11,0x32,0xCB,0x1F,0x19,0x3B,0x00,0x21, \ +0xA1,0x71,0x19,0x1C,0x01,0x23,0x1B,0x03,0x19,0x43,0xB2,0x63,0x71,0x63,0x39, \ +0x49,0x17,0x23,0x09,0x78,0x9B,0x02,0x19,0x43,0x41,0x60,0x3C,0xE0,0xFF,0xE7, \ +0x31,0x48,0x00,0x78,0x00,0x28,0x30,0xD0,0x30,0x48,0x00,0x78,0x00,0x28,0x02, \ +0xD0,0x18,0x20,0x28,0x49,0x01,0xE0,0x1E,0x20,0xFB,0xE7,0x4A,0x68,0xD2,0x0B, \ +0xFC,0xD2,0x01,0x22,0x52,0x03,0x02,0x43,0x72,0x63,0x7A,0x68,0xB2,0x63,0x01, \ +0x22,0x92,0x03,0x4A,0x60,0xFF,0x22,0xF5,0x32,0x4F,0x68,0xFB,0x0B,0x03,0xD3, \ +0x13,0x1C,0x01,0x3A,0x00,0x2B,0xF8,0xD1,0x23,0x4B,0x1A,0x88,0x10,0x1A,0x01, \ +0x23,0x1B,0x03,0x18,0x43,0x1F,0x4A,0x70,0x63,0x10,0x68,0x11,0x23,0xB0,0x63, \ +0x1E,0x48,0x9B,0x02,0x00,0x78,0x18,0x43,0x48,0x60,0x00,0x21,0xA1,0x71,0x06, \ +0xE0,0x00,0x21,0xA1,0x71,0xB9,0x68,0x78,0x68,0x00,0x22,0x00,0xF0,0x31,0xF8, \ +0x0A,0x48,0x00,0x21,0x41,0x73,0x60,0x30,0x01,0x73,0x28,0x79,0x40,0x08,0x03, \ +0xD2,0x0F,0x48,0x00,0x78,0x01,0x28,0x03,0xD1,0x01,0x20,0xFE,0xF7,0x9A,0xFC, \ +0xF0,0xBD,0x00,0x20,0xFE,0xF7,0x96,0xFC,0xF0,0xBD,0x00,0x00,0xE0,0x05,0x00, \ +0x02,0x04,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x58,0x01,0x00,0x02,0x8C,0x09, \ +0x00,0x02,0x40,0x00,0x00,0x04,0x1C,0x20,0x00,0x00,0x74,0x0A,0x00,0x02,0x70, \ +0x00,0x00,0x02,0x79,0x01,0x00,0x02,0x7A,0x01,0x00,0x02,0x7C,0x01,0x00,0x02, \ +0x82,0x01,0x00,0x02,0x84,0x01,0x00,0x02,0x90,0xB4,0x08,0x4F,0x7C,0x68,0xE3, \ +0x0B,0xFC,0xD2,0x0A,0x43,0x01,0x21,0x89,0x06,0x4A,0x63,0x88,0x63,0x04,0x48, \ +0x11,0x23,0x00,0x78,0x9B,0x02,0x18,0x43,0x78,0x60,0x90,0xBC,0xF7,0x46,0x40, \ +0x00,0x00,0x04,0x84,0x01,0x00,0x02,0xF0,0xB5,0x4F,0x4D,0x07,0x1C,0x68,0x7A, \ +0x00,0x21,0x00,0x28,0x0B,0xD1,0x28,0x7A,0x00,0x28,0x08,0xD0,0xA8,0x7C,0x01, \ +0x28,0x05,0xD1,0xA8,0x7B,0x01,0x28,0x02,0xD1,0x28,0x7B,0x00,0x28,0x01,0xD0, \ +0x08,0x1C,0xF0,0xBD,0xEE,0x1D,0x44,0x48,0x59,0x36,0xF3,0x7A,0xC2,0x1D,0x79, \ +0x32,0x70,0x30,0x00,0x2B,0x0D,0xD1,0xEB,0x1D,0x99,0x33,0x1C,0x79,0x00,0x2C, \ +0x08,0xD1,0x6C,0x7B,0x01,0x2C,0x05,0xD0,0x34,0x7B,0x00,0x2C,0x02,0xD1,0x6C, \ +0x6B,0x00,0x2C,0x08,0xD0,0x3A,0x4B,0x3B,0x4C,0xDB,0x79,0x23,0x70,0x41,0x71, \ +0x01,0x21,0x81,0x71,0xD4,0x68,0x26,0xE0,0xDC,0x7B,0x00,0x2C,0x15,0xD0,0x36, \ +0x4A,0x34,0x4B,0x12,0x68,0x92,0x7D,0x9B,0x7A,0x9A,0x42,0x02,0xDA,0x32,0x4B, \ +0x1A,0x70,0x01,0xE0,0x30,0x4A,0x13,0x70,0x41,0x71,0x01,0x22,0xAA,0x72,0x2E, \ +0x4A,0x81,0x71,0x10,0x78,0x2E,0x49,0x40,0x00,0x0C,0x5A,0x0D,0xE0,0x9B,0x7B, \ +0x00,0x2B,0x01,0xD1,0x08,0x1C,0xF0,0xBD,0xD4,0x68,0x41,0x71,0x01,0x22,0x82, \ +0x71,0x27,0x4A,0x10,0x68,0x25,0x4A,0x80,0x7D,0x10,0x70,0x00,0xF0,0x38,0xF9, \ +0xA8,0x7A,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0x40,0xFC,0x0A,0xE0,0x1F, \ +0x4A,0x10,0x78,0x03,0x28,0x03,0xD1,0x68,0x6A,0xFE,0xF7,0x38,0xFC,0x02,0xE0, \ +0x00,0x20,0xFE,0xF7,0x34,0xFC,0x01,0x20,0x80,0x06,0x00,0x2F,0x04,0xD0,0x05, \ +0x2F,0x02,0xD9,0x41,0x6A,0x79,0x18,0x01,0xE0,0x41,0x6A,0x0A,0x31,0x02,0x22, \ +0xEA,0x72,0x16,0x4B,0x92,0x03,0x5A,0x60,0x01,0x62,0x6B,0x7B,0x14,0x4A,0x01, \ +0x2B,0x02,0xD0,0x33,0x7B,0x00,0x2B,0x10,0xD0,0x15,0x88,0x13,0x4E,0x87,0x6A, \ +0x49,0x19,0x0A,0x4D,0x40,0x6A,0xED,0x79,0x0F,0x4B,0xAD,0x00,0x75,0x59,0x49, \ +0x19,0x19,0x60,0x81,0x42,0x00,0xD2,0x01,0x37,0x5F,0x60,0x18,0x1D,0x10,0x88, \ +0x0B,0x49,0x20,0x18,0x08,0x60,0x01,0x20,0xF0,0xBD,0x00,0x00,0xE0,0x05,0x00, \ +0x02,0x1C,0x01,0x00,0x02,0x69,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x58,0x01, \ +0x00,0x02,0x40,0x00,0x00,0x04,0x64,0x01,0x00,0x02,0x74,0x08,0x00,0x02,0xE4, \ +0x09,0x00,0x02,0xF4,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x34,0x49,0x35,0x4A, \ +0x08,0x68,0x00,0x27,0x84,0x7D,0x46,0x68,0x92,0x7A,0x94,0x42,0x00,0xDB,0x14, \ +0x1C,0x81,0x8A,0x31,0x48,0x31,0x4D,0x40,0x89,0x81,0x42,0x26,0xDD,0x01,0x98, \ +0x00,0x28,0x23,0xD1,0x2F,0x49,0xB4,0x20,0x08,0x70,0x2E,0x48,0x2F,0x4A,0xC0, \ +0x88,0x43,0x00,0x18,0x18,0x61,0x00,0x51,0x5A,0x49,0x00,0x40,0x18,0x2C,0x49, \ +0x06,0x22,0x09,0x88,0x40,0x18,0xE9,0x1F,0x19,0x39,0xC9,0x68,0x40,0x18,0x25, \ +0x49,0x48,0x80,0x04,0x31,0x30,0x1D,0xFE,0xF7,0x8F,0xFC,0x06,0x22,0xF0,0x1D, \ +0x03,0x30,0x24,0x49,0xFE,0xF7,0x89,0xFC,0x01,0x20,0xE8,0x73,0x01,0x25,0x02, \ +0xE0,0x01,0x20,0xA8,0x73,0x00,0x25,0xFF,0xF7,0x12,0xFD,0xFF,0xF7,0xAE,0xFD, \ +0x00,0x90,0x00,0x98,0x00,0x28,0x20,0xD1,0x13,0x49,0x00,0x2D,0x09,0x68,0x48, \ +0x73,0x02,0xD1,0x01,0x98,0x00,0x28,0x01,0xD0,0x01,0x2D,0x1A,0xD1,0x14,0x4A, \ +0x13,0x49,0x60,0x00,0x10,0x5A,0xC9,0x88,0x40,0x18,0x14,0x49,0x09,0x88,0x41, \ +0x18,0x01,0x20,0x00,0xF0,0x18,0xF9,0x01,0x2D,0x03,0xD1,0x11,0x49,0x03,0x20, \ +0x48,0x60,0x02,0xE0,0x0F,0x49,0x02,0x20,0x48,0x60,0x01,0x27,0x03,0xE0,0x03, \ +0x49,0x04,0x20,0x09,0x68,0x48,0x73,0x38,0x1C,0x02,0xB0,0xF0,0xBD,0x04,0x00, \ +0x00,0x02,0x1C,0x01,0x00,0x02,0xD8,0x00,0x00,0x02,0x80,0x06,0x00,0x02,0x8C, \ +0x09,0x00,0x02,0x18,0x00,0x00,0x02,0x50,0x01,0x00,0x02,0x64,0x01,0x00,0x02, \ +0x96,0x09,0x00,0x02,0x66,0x01,0x00,0x02,0xE0,0x05,0x00,0x02,0x90,0xB5,0x04, \ +0x31,0xCF,0x00,0x01,0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03,0x28,0x27,0xD1, \ +0x0B,0x20,0x39,0x1C,0x02,0xF0,0xAF,0xFF,0x0C,0x1C,0x79,0x1A,0x0B,0x20,0x02, \ +0xF0,0xAA,0xFF,0x07,0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18,0xD9,0x01,0x37, \ +0x04,0x2C,0x13,0xD2,0x01,0x21,0x41,0x62,0x13,0xE0,0x7F,0x08,0x11,0xE0,0x79, \ +0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0x98,0xFF,0x0C,0x1C,0x79,0x1A,0x0B,0x20, \ +0x02,0xF0,0x93,0xFF,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02,0xE0,0x41, \ +0x62,0x00,0xE0,0x41,0x62,0x38,0x1C,0x90,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02, \ +0x10,0x48,0x01,0x88,0x10,0x48,0xCA,0x1D,0x69,0x32,0x02,0x80,0xCA,0x1D,0x31, \ +0x32,0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1,0x80,0x0B,0x48, \ +0xA0,0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80,0x0F,0x21,0xC1, \ +0x80,0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23,0x21,0x81,0x60, \ +0x12,0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0x64,0x01,0x00,0x02,0x50,0x01,0x00, \ +0x02,0x58,0x01,0x00,0x02,0xE4,0x09,0x00,0x02,0x00,0xB5,0x08,0x49,0x08,0x48, \ +0x0A,0x78,0x03,0x78,0x9A,0x42,0x08,0xD0,0x09,0x78,0x01,0x70,0x00,0x78,0x05, \ +0x49,0x08,0x5C,0x05,0x49,0x08,0x70,0xFE,0xF7,0xCC,0xFA,0x00,0xBD,0x00,0x00, \ +0x69,0x01,0x00,0x02,0x68,0x01,0x00,0x02,0x70,0x01,0x00,0x02,0x84,0x01,0x00, \ +0x02,0x07,0x48,0xC1,0x6B,0x01,0x31,0xC1,0x63,0x81,0x6B,0x49,0x00,0x01,0x31, \ +0x81,0x63,0x04,0x49,0x82,0x6B,0xC9,0x89,0x8A,0x42,0x00,0xD9,0x81,0x63,0xF7, \ +0x46,0x00,0x00,0xE0,0x05,0x00,0x02,0xD8,0x00,0x00,0x02,0x80,0xB4,0x07,0x1C, \ +0x13,0x48,0x14,0x4B,0x41,0x78,0x40,0x78,0x0D,0xD0,0x13,0x4A,0x01,0x2F,0xD2, \ +0x7A,0x13,0xD0,0x02,0x2F,0x00,0xD1,0x11,0x1C,0x5A,0x18,0x12,0x7C,0x00,0x2A, \ +0x00,0xD0,0x08,0x1C,0x80,0xBC,0xF7,0x46,0x00,0x29,0xF6,0xD0,0x01,0x39,0x09, \ +0x06,0x09,0x0E,0x5A,0x18,0x12,0x7C,0x00,0x2A,0xF6,0xD0,0xEE,0xE7,0x8A,0x42, \ +0xEC,0xDD,0x01,0x31,0x09,0x06,0x09,0x0E,0x5F,0x18,0x3F,0x7C,0x00,0x2F,0xF6, \ +0xD0,0xE4,0xE7,0x00,0x00,0x2C,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x1C,0x01, \ +0x00,0x02,0x00,0xB5,0x02,0xF0,0x13,0xFF,0x02,0x49,0x8A,0x6B,0x10,0x40,0x08, \ +0x65,0x00,0xBD,0xE0,0x05,0x00,0x02,0xB0,0xB5,0x01,0x20,0x80,0x06,0x81,0x6A, \ +0x44,0x6A,0x10,0x48,0x00,0x88,0x87,0x02,0x00,0x29,0x14,0xD9,0x38,0x1C,0x02, \ +0xF0,0xE7,0xFE,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x38,0x1C,0x02,0xF0,0xE1,0xFE, \ +0x48,0x1C,0x45,0x43,0x38,0x1C,0x21,0x1C,0x02,0xF0,0xDB,0xFE,0x69,0x18,0x38, \ +0x1C,0x02,0xF0,0xD7,0xFE,0x08,0x1C,0xB0,0xBD,0x38,0x1C,0x21,0x1C,0x02,0xF0, \ +0xD1,0xFE,0x08,0x1C,0xB0,0xBD,0x00,0x00,0x98,0x00,0x00,0x02,0x90,0xB5,0x0C, \ +0x1C,0x07,0x1C,0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B, \ +0x20,0x18,0xB9,0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03, \ +0x48,0x82,0x69,0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04, \ +0x80,0x00,0x00,0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A, \ +0x69,0xC0,0x43,0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00, \ +0x00,0x04,0xF0,0xB5,0x86,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xE2,0x4F,0x00, \ +0x24,0x03,0x90,0xB8,0x69,0x00,0x28,0x04,0xD0,0x03,0x98,0x05,0xF0,0x4A,0xFD, \ +0x00,0x28,0x73,0xD1,0x03,0x98,0xDD,0x4B,0xDD,0x49,0x18,0x40,0xCD,0x1D,0xCE, \ +0x1D,0x69,0x36,0xA9,0x35,0x00,0x28,0x32,0xD0,0xDA,0x48,0x00,0x68,0x02,0x90, \ +0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x29,0xD3,0x02,0x98,0xC0, \ +0x08,0x0D,0xD3,0x01,0x20,0x80,0x06,0x00,0x6B,0xD2,0x49,0x40,0x00,0x40,0x08, \ +0x88,0x61,0x02,0x24,0x07,0x20,0xFF,0xF7,0xC1,0xFF,0x00,0x20,0xB8,0x63,0x18, \ +0xE0,0x01,0x20,0x80,0x06,0x00,0x6B,0xCB,0x49,0x40,0x00,0x40,0x08,0xC8,0x61, \ +0xFA,0x21,0x01,0x24,0x07,0x20,0xFF,0xF7,0x98,0xFF,0x00,0x20,0xB8,0x63,0xF0, \ +0x79,0x01,0x28,0x07,0xD1,0x28,0x79,0x01,0x28,0x04,0xD1,0x02,0x20,0x28,0x71, \ +0x04,0x20,0xFF,0xF7,0xA5,0xFF,0x03,0x98,0xC1,0x4B,0xBF,0x49,0x18,0x40,0xCA, \ +0x1D,0x59,0x32,0xA0,0x31,0x04,0x91,0x00,0x28,0x05,0x92,0x60,0xD0,0xD8,0x04, \ +0xC1,0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x0F,0xD3,0x00,0x6A,0xB7,0x49,0x40, \ +0x00,0x40,0x08,0xC8,0x61,0x48,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0x00,0xFB, \ +0xFF,0xF7,0x18,0xFC,0x01,0x24,0xB1,0x48,0x01,0x21,0x01,0x66,0x01,0x98,0x12, \ +0x23,0x18,0x40,0x46,0xD0,0xAD,0x49,0x00,0x20,0x08,0x66,0x48,0x7A,0x00,0x28, \ +0x15,0xD0,0x01,0x98,0x80,0x08,0x0A,0xD3,0xC8,0x69,0x3A,0x68,0x80,0x18,0xAA, \ +0x4A,0x12,0x88,0x80,0x18,0x40,0x00,0x40,0x08,0x00,0xE0,0x3B,0xE1,0x88,0x61, \ +0x01,0x20,0xFE,0xF7,0x8B,0xF9,0xA2,0x49,0x00,0x20,0x48,0x72,0x70,0x71,0x0C, \ +0xE0,0xFF,0xF7,0x16,0xFF,0x01,0x98,0x80,0x08,0x07,0xD3,0x9D,0x49,0xA0,0x4A, \ +0xC8,0x69,0x12,0x68,0x80,0x18,0x40,0x00,0x40,0x08,0x88,0x61,0x00,0x2C,0x00, \ +0xD1,0x02,0x24,0x01,0x98,0x40,0x09,0x02,0xD3,0x01,0x20,0xFE,0xF7,0x70,0xF9, \ +0x94,0x49,0xC8,0x7A,0x02,0x28,0x10,0xD1,0x00,0x20,0x88,0x72,0x48,0x73,0x05, \ +0x9A,0x10,0x73,0x01,0x22,0xCA,0x72,0x04,0x99,0x00,0x22,0x88,0x73,0xB0,0x71, \ +0x04,0x99,0xC8,0x73,0x01,0x99,0x90,0x48,0x02,0xF0,0xC9,0xF8,0x03,0x98,0x8F, \ +0x4B,0x18,0x40,0x73,0xD0,0x18,0x05,0xC0,0x68,0x00,0x90,0x00,0x98,0x40,0x09, \ +0x15,0xD3,0x85,0x49,0x08,0x7B,0x03,0x28,0x11,0xD1,0x04,0x20,0x08,0x73,0x00, \ +0x98,0x19,0x05,0xC9,0x68,0x87,0x4B,0x08,0x43,0x00,0x90,0x18,0x68,0x40,0x68, \ +0x40,0x78,0xC0,0x09,0x04,0xD3,0x00,0x98,0x40,0x08,0x01,0xD2,0x00,0x20,0x38, \ +0x62,0x00,0x98,0x80,0x08,0x27,0xD3,0x07,0x20,0xFF,0xF7,0x13,0xFF,0x00,0x20, \ +0x76,0x4A,0xB8,0x63,0x01,0x21,0x91,0x73,0x10,0x73,0x50,0x72,0x70,0x71,0xF8, \ +0x61,0xD0,0x62,0x39,0x62,0xFE,0xF7,0x98,0xFB,0x00,0x99,0x70,0x4A,0x08,0x43, \ +0x00,0x90,0x50,0x68,0x04,0x28,0x10,0xD1,0x01,0x20,0xFF,0xF7,0xFC,0xFE,0x6B, \ +0x48,0x81,0x7B,0x01,0x29,0x09,0xD1,0x00,0x7B,0x00,0x28,0x06,0xD1,0xFE,0xF7, \ +0xD9,0xFD,0x00,0x22,0x10,0x21,0x6E,0x48,0x02,0xF0,0x7E,0xF8,0x00,0x98,0x80, \ +0x09,0x70,0xD3,0x01,0x20,0x62,0x4A,0x02,0x24,0x90,0x73,0xD0,0x62,0x67,0x4B, \ +0x80,0x06,0xC1,0x6A,0x1B,0x68,0x59,0x61,0xC0,0x6A,0x40,0x00,0x40,0x08,0x90, \ +0x61,0x00,0x98,0xC0,0x08,0x02,0xD3,0x01,0x20,0xF8,0x61,0x2F,0xE0,0x00,0x20, \ +0x58,0x49,0xF8,0x61,0x49,0x7A,0x02,0x29,0x01,0xD1,0x56,0x49,0x48,0x72,0x01, \ +0x20,0xFE,0xF7,0xEE,0xF8,0xA8,0x79,0x01,0x28,0x16,0xD1,0xE9,0x79,0x00,0x29, \ +0x00,0xD0,0x00,0x20,0xE8,0x71,0x58,0x48,0xC1,0x78,0x00,0xE0,0x85,0xE0,0x89, \ +0x06,0x89,0x0E,0xC1,0x70,0xE9,0x79,0x01,0x29,0x03,0xD1,0xC1,0x78,0x40,0x23, \ +0x19,0x43,0xC1,0x70,0xC1,0x78,0x03,0x20,0xFE,0xF7,0x5E,0xF9,0x46,0x48,0x40, \ +0x68,0x04,0x28,0x06,0xD1,0xFE,0xF7,0x92,0xFD,0x00,0x22,0x10,0x21,0x4A,0x48, \ +0x02,0xF0,0x37,0xF8,0x04,0x99,0x40,0x4D,0x48,0x7B,0x00,0x28,0x2A,0xD0,0xF8, \ +0x69,0x00,0x28,0x27,0xD0,0xE8,0x6A,0x00,0x28,0x24,0xD0,0x28,0x68,0x00,0x28, \ +0x21,0xD1,0x06,0x20,0xFF,0xF7,0x96,0xFE,0x00,0x20,0x28,0x74,0x04,0x99,0x48, \ +0x73,0x05,0x9A,0x50,0x72,0x68,0x7B,0x01,0x28,0x02,0xD0,0x68,0x7B,0x03,0x28, \ +0x12,0xD1,0x68,0x7B,0x01,0x28,0x0D,0xD1,0x08,0x20,0xFF,0xF7,0x84,0xFE,0x01, \ +0x20,0xE8,0x72,0x00,0x20,0x68,0x73,0x00,0x22,0x10,0x21,0x31,0x48,0x02,0xF0, \ +0x0B,0xF8,0x02,0xE0,0x01,0xE0,0x00,0x20,0x68,0x73,0x29,0x49,0x08,0x7B,0x04, \ +0x28,0x38,0xD1,0xC8,0x6A,0x00,0x28,0x35,0xD0,0x08,0x6E,0x0D,0x1C,0x00,0x28, \ +0x02,0xD1,0x00,0x2C,0x00,0xD1,0x02,0x24,0x00,0x20,0x28,0x73,0xE8,0x62,0xF9, \ +0x69,0x01,0x29,0x17,0xD1,0x29,0x68,0x00,0x29,0x14,0xD1,0x39,0x6A,0x00,0x29, \ +0x18,0xD0,0x23,0x4B,0x25,0x4F,0x19,0x68,0x48,0x72,0x79,0x68,0xB9,0x60,0x38, \ +0x70,0xFE,0xF7,0x0E,0xFA,0x38,0x78,0x01,0x28,0x0C,0xD1,0x00,0x22,0x01,0x21, \ +0x1D,0x48,0x01,0xF0,0xDC,0xFF,0x06,0xE0,0x68,0x72,0x70,0x71,0xC1,0x20,0x18, \ +0x4B,0x28,0x60,0x19,0x68,0x48,0x72,0x68,0x68,0x04,0x28,0x06,0xD1,0xFE,0xF7, \ +0x25,0xFD,0x00,0x22,0x10,0x21,0x14,0x48,0x01,0xF0,0xCA,0xFF,0x01,0x2C,0x02, \ +0xD1,0x00,0xF0,0x66,0xF9,0x03,0xE0,0x02,0x2C,0x01,0xD1,0x00,0xF0,0x91,0xF9, \ +0x03,0x98,0x10,0x4B,0x18,0x40,0x01,0xD0,0x01,0xF0,0x49,0xFA,0x06,0xB0,0xF0, \ +0xBD,0x00,0x00,0x60,0x06,0x00,0x02,0x20,0x20,0x00,0x00,0xE0,0x05,0x00,0x02, \ +0x40,0x00,0x00,0x04,0x80,0x80,0x00,0x00,0x64,0x01,0x00,0x02,0xF4,0x01,0x00, \ +0x02,0xBC,0x07,0x00,0x02,0x40,0x40,0x00,0x00,0x10,0x00,0x00,0x02,0x3C,0x08, \ +0x00,0x02,0xF4,0x09,0x00,0x02,0xF8,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0xF0, \ +0xB5,0x0F,0x20,0x00,0x06,0x06,0x89,0x8C,0x48,0x8D,0x4D,0x30,0x40,0xEF,0x1D, \ +0x29,0x37,0x00,0x28,0x73,0xD0,0x8B,0x49,0xCC,0x69,0x60,0x08,0x43,0xD3,0x88, \ +0x69,0x40,0x08,0x40,0xD3,0x88,0x69,0x40,0x08,0x40,0x00,0x88,0x61,0xE8,0x1F, \ +0x69,0x38,0x41,0x68,0x04,0x29,0x0A,0xD1,0xFE,0xF7,0xD5,0xFC,0x01,0x20,0xFE, \ +0xF7,0x0E,0xF8,0x00,0x22,0x10,0x21,0x80,0x48,0x01,0xF0,0x77,0xFF,0x2C,0xE0, \ +0x42,0x68,0x7F,0x49,0x02,0x2A,0x03,0xD1,0x09,0x68,0x01,0x22,0x4A,0x73,0x05, \ +0xE0,0x42,0x68,0x03,0x2A,0x02,0xD1,0x09,0x68,0x02,0x22,0x4A,0x73,0x79,0x4A, \ +0x01,0x21,0xD1,0x72,0x00,0x20,0xA8,0x71,0x90,0x72,0x51,0x60,0xB8,0x73,0xF8, \ +0x73,0x01,0x20,0xFD,0xF7,0xEE,0xFF,0x73,0x4A,0x10,0x6B,0x01,0x28,0x05,0xD1, \ +0x00,0x22,0x10,0x21,0x71,0x48,0x01,0xF0,0x53,0xFF,0x08,0xE0,0x6E,0x48,0x00, \ +0x6B,0x02,0x28,0x04,0xD1,0x00,0x22,0x10,0x21,0x6D,0x48,0x01,0xF0,0x49,0xFF, \ +0x67,0x48,0x80,0x69,0xC0,0x09,0x22,0xD3,0xE0,0x09,0x20,0xD3,0x64,0x48,0x40, \ +0x23,0x81,0x69,0x99,0x43,0x81,0x61,0x67,0x49,0xC8,0x69,0x8B,0x01,0x18,0x43, \ +0xC8,0x61,0x14,0x20,0xFD,0xF7,0x76,0xFF,0x63,0x49,0x64,0x4B,0xC8,0x69,0x18, \ +0x40,0xC8,0x61,0x5E,0x48,0x01,0x21,0x01,0x72,0x81,0x74,0x81,0x73,0x00,0x21, \ +0x01,0x73,0x80,0x30,0xC1,0x61,0x82,0x6B,0x01,0x2A,0x02,0xD1,0x81,0x63,0xFF, \ +0xF7,0x9E,0xF9,0x53,0x48,0x80,0x69,0x00,0x0A,0x21,0xD3,0x00,0xE0,0x72,0xE0, \ +0x20,0x0A,0x1D,0xD3,0x4F,0x48,0x80,0x23,0x81,0x69,0x99,0x43,0x81,0x61,0x50, \ +0x48,0x41,0x7B,0x01,0x29,0x0A,0xD1,0x00,0x21,0x81,0x62,0x01,0x21,0x81,0x74, \ +0xC0,0x7A,0x03,0x28,0x0D,0xD1,0x00,0x20,0xFF,0xF7,0x37,0xFB,0x09,0xE0,0x00, \ +0x21,0x01,0x65,0x01,0x21,0x81,0x74,0xC0,0x7A,0x03,0x28,0x02,0xD1,0x00,0x20, \ +0xFF,0xF7,0x2C,0xFB,0x20,0x09,0x03,0xD3,0x3F,0x48,0x80,0x69,0x00,0x09,0x05, \ +0xD2,0x3D,0x48,0x80,0x69,0xC0,0x08,0x03,0xD3,0xE0,0x08,0x01,0xD3,0x04,0xF0, \ +0x7C,0xF9,0x60,0x09,0x05,0xD3,0x38,0x48,0x80,0x69,0x40,0x09,0x01,0xD3,0x06, \ +0xF0,0x2C,0xF9,0xA0,0x09,0x3A,0xD3,0x34,0x48,0x81,0x69,0x89,0x09,0x36,0xD3, \ +0x81,0x69,0x20,0x23,0x99,0x43,0x81,0x61,0x38,0x48,0x39,0x4C,0x80,0x78,0x00, \ +0x28,0x12,0xD0,0x31,0x49,0xC8,0x1D,0x59,0x30,0x40,0x7B,0x04,0x28,0x0C,0xD0, \ +0x01,0x22,0x0A,0x74,0x03,0x20,0x48,0x73,0xFF,0xF7,0xF9,0xFC,0x21,0x88,0x89, \ +0x02,0x09,0x1A,0x06,0x20,0xFF,0xF7,0x1B,0xFD,0x1A,0xE0,0xE0,0x1D,0x29,0x30, \ +0x01,0x79,0x02,0x29,0x15,0xD1,0xE9,0x79,0x05,0x29,0x12,0xD1,0x40,0x79,0x02, \ +0x28,0x0F,0xD1,0xB8,0x79,0x01,0x28,0x04,0xD1,0x00,0x20,0x06,0xF0,0xBD,0xFC, \ +0x00,0x20,0xB8,0x71,0xB8,0x79,0x40,0x08,0x40,0x00,0xB8,0x71,0xB8,0x79,0x08, \ +0x23,0x98,0x43,0xB8,0x71,0x21,0x48,0x30,0x40,0x0C,0xD0,0x00,0x24,0x79,0x20, \ +0x40,0x05,0x04,0x83,0x84,0x82,0x1E,0x49,0x02,0x20,0x0C,0x70,0xFF,0xF7,0x90, \ +0xFC,0x1B,0x49,0x48,0x70,0x4C,0x80,0xFF,0x20,0x02,0x30,0x30,0x40,0x14,0xD0, \ +0x10,0x48,0x01,0x21,0xC1,0x72,0x00,0x24,0xAC,0x71,0x84,0x72,0x41,0x60,0x44, \ +0x73,0xBC,0x73,0xFC,0x73,0x01,0x20,0xFD,0xF7,0x1A,0xFF,0x12,0x48,0x00,0x22, \ +0x04,0x83,0x84,0x82,0x08,0x48,0x10,0x21,0x01,0xF0,0x80,0xFE,0xF0,0xBD,0x00, \ +0x00,0x10,0x10,0x00,0x00,0x50,0x06,0x00,0x02,0x80,0x00,0x00,0x04,0x3C,0x08, \ +0x00,0x02,0x04,0x00,0x00,0x02,0xE0,0x05,0x00,0x02,0xBC,0x07,0x00,0x02,0xDC, \ +0x07,0x00,0x02,0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0x1C,0x01,0x00,0x02, \ +0x98,0x00,0x00,0x02,0x02,0x02,0x00,0x00,0x2C,0x01,0x00,0x02,0x20,0x00,0x20, \ +0x0F,0x80,0xB5,0x16,0x4F,0x00,0x20,0x38,0x72,0xB9,0x7C,0x02,0x20,0x01,0x29, \ +0x1C,0xD0,0x04,0x29,0x19,0xD1,0xB8,0x74,0x08,0x20,0xFF,0xF7,0xC2,0xFC,0xF8, \ +0x69,0xB9,0x69,0x40,0x1A,0x40,0x00,0x79,0x69,0x40,0x08,0x81,0x42,0x0D,0xD2, \ +0x79,0x69,0x41,0x1A,0x14,0x20,0x02,0xF0,0xB9,0xFB,0x79,0x7B,0x01,0x29,0x08, \ +0xD1,0xB9,0x6A,0x81,0x42,0x02,0xD3,0xB9,0x6A,0x08,0x1A,0xB8,0x62,0x80,0xBD, \ +0xB8,0x74,0x80,0xBD,0x39,0x6D,0x81,0x42,0xF9,0xD3,0x39,0x6D,0x08,0x1A,0x38, \ +0x65,0x80,0xBD,0xE0,0x05,0x00,0x02,0x00,0xB5,0x09,0x48,0x01,0x21,0x01,0x72, \ +0x81,0x7B,0x01,0x29,0x0A,0xD1,0x01,0x7B,0x00,0x29,0x07,0xD1,0x41,0x7A,0x00, \ +0x29,0x04,0xD1,0x80,0x7C,0x05,0x28,0x01,0xD0,0xFF,0xF7,0x9A,0xF8,0x00,0xBD, \ +0x00,0x00,0xE0,0x05,0x00,0x02,0xB0,0xB5,0x21,0x4C,0x21,0x48,0xE1,0x1D,0x59, \ +0x31,0x8A,0x7B,0x80,0x7A,0x20,0x4F,0x82,0x42,0x02,0xDA,0x88,0x7B,0x38,0x70, \ +0x00,0xE0,0x38,0x70,0xFF,0xF7,0xD0,0xFB,0x38,0x78,0x03,0x28,0x03,0xD1,0x01, \ +0x20,0xFD,0xF7,0xD8,0xFE,0x02,0xE0,0x00,0x20,0xFD,0xF7,0xD4,0xFE,0x21,0x1C, \ +0x64,0x7A,0x00,0x23,0x16,0x4A,0x16,0x48,0x01,0x2C,0x0E,0xD0,0x02,0x2C,0x10, \ +0xD1,0xC4,0x24,0x04,0x70,0x14,0x4C,0xE5,0x88,0x3C,0x78,0x64,0x00,0x14,0x5B, \ +0x2C,0x19,0x45,0x88,0x2C,0x1B,0x44,0x80,0x43,0x70,0x03,0xE0,0x43,0x80,0xD4, \ +0x24,0x04,0x70,0x43,0x70,0x0A,0x23,0xCB,0x67,0x3B,0x78,0x5B,0x00,0xD2,0x5A, \ +0x0B,0x4B,0x1B,0x88,0xD2,0x1A,0xCB,0x1D,0x79,0x33,0x1A,0x60,0x88,0x67,0x01, \ +0x20,0x70,0x31,0x48,0x71,0xB0,0xBD,0xE0,0x05,0x00,0x02,0x1C,0x01,0x00,0x02, \ +0x69,0x01,0x00,0x02,0x50,0x01,0x00,0x02,0x74,0x09,0x00,0x02,0x18,0x00,0x00, \ +0x02,0x64,0x01,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99,0x42,0x01,0xD8, \ +0x00,0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27,0xBF,0x06,0x3D, \ +0x69,0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08,0x3F,0x3A,0x61, \ +0x01,0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0xF0,0xB5,0x84,0xB0,0x5C,0x48,0x5D, \ +0x4C,0x03,0x90,0xE0,0x1D,0x00,0x26,0x29,0x30,0x02,0x90,0x5B,0x4F,0x5B,0x4D, \ +0x00,0x22,0x00,0x92,0x01,0x22,0x01,0x21,0x38,0x1C,0x01,0xAB,0x01,0xF0,0x97, \ +0xFE,0x01,0x98,0x40,0x08,0x03,0xD3,0x00,0x20,0x01,0x90,0x55,0x48,0xC6,0x73, \ +0x00,0xF0,0xDA,0xFD,0x00,0xF0,0x70,0xF9,0x28,0x78,0x00,0x28,0x04,0xD1,0xE0, \ +0x79,0x05,0x28,0x01,0xD0,0x06,0xF0,0x84,0xF9,0x4E,0x48,0xC0,0x7B,0x00,0x28, \ +0xE0,0xD1,0x4D,0x48,0x00,0x78,0x02,0x28,0x01,0xD1,0x06,0xF0,0xC4,0xF9,0x00, \ +0xF0,0xFE,0xF8,0x07,0x1C,0x28,0x78,0x01,0x28,0x0A,0xD1,0x03,0x03,0x9F,0x42, \ +0x03,0xD1,0x47,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x43,0x49,0x30,0x1C,0xC8, \ +0x73,0xC7,0xE7,0x00,0x2F,0x31,0xD0,0x03,0x98,0x40,0x79,0x02,0x28,0x27,0xD1, \ +0xE0,0x79,0x05,0x28,0x24,0xD1,0x0F,0x25,0x2D,0x06,0xA8,0x88,0x3E,0x4B,0x18, \ +0x40,0xA8,0x80,0xA8,0x89,0x3D,0x4B,0x18,0x40,0xA8,0x81,0x02,0x98,0x80,0x79, \ +0x01,0x28,0x0E,0xD1,0x02,0x98,0x80,0x79,0x02,0x99,0x40,0x08,0x40,0x00,0x88, \ +0x71,0x02,0x99,0x08,0x23,0x88,0x79,0x02,0x99,0x18,0x43,0x88,0x71,0x01,0x20, \ +0x06,0xF0,0x53,0xFB,0xA8,0x89,0x32,0x4B,0x18,0x43,0xA8,0x81,0xA8,0x88,0x31, \ +0x4B,0x18,0x43,0xA8,0x80,0x39,0x1C,0x00,0x22,0x30,0x48,0x01,0xF0,0x3C,0xFD, \ +0x93,0xE7,0x03,0x98,0x40,0x79,0x02,0x28,0x8F,0xD1,0xE0,0x79,0x05,0x28,0x8C, \ +0xD1,0x0F,0x25,0x2D,0x06,0xA8,0x88,0x25,0x4B,0x21,0x49,0x18,0x40,0xA8,0x80, \ +0xA8,0x89,0x23,0x4B,0x18,0x40,0xA8,0x81,0x02,0x98,0x80,0x79,0x40,0x08,0x80, \ +0x07,0x26,0xD1,0x02,0x98,0x80,0x79,0x00,0x09,0x22,0xD3,0x1B,0x48,0x00,0x68, \ +0x00,0x7B,0x00,0x0A,0x1D,0xD2,0x02,0x98,0xC0,0x79,0x00,0x28,0x05,0xD0,0x02, \ +0x99,0x01,0x20,0xCE,0x71,0x02,0x99,0x88,0x72,0x13,0xE0,0x01,0x20,0x80,0x06, \ +0x1A,0x4A,0x40,0x6A,0x18,0x4B,0x52,0x68,0xC0,0x18,0x90,0x42,0x0A,0xD2,0x48, \ +0x7A,0x00,0x28,0x07,0xD1,0x88,0x7B,0x01,0x28,0x04,0xD1,0x08,0x7B,0x00,0x28, \ +0x01,0xD1,0x06,0xF0,0x43,0xFB,0xA8,0x89,0x0C,0x4B,0x18,0x43,0xA8,0x81,0xA8, \ +0x88,0x0B,0x4B,0x18,0x43,0xA8,0x80,0x4C,0xE7,0xC8,0x00,0x00,0x02,0x50,0x06, \ +0x00,0x02,0x1C,0x08,0x00,0x02,0xB0,0x01,0x00,0x02,0xE0,0x05,0x00,0x02,0xB1, \ +0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF, \ +0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xFC,0x07,0x00,0x02,0xB8,0x0B,0x00, \ +0x00,0x80,0x00,0x00,0x04,0xB0,0xB5,0x21,0x4D,0x01,0x20,0x21,0x4C,0xE8,0x73, \ +0x20,0x68,0x00,0xF0,0xB6,0xFF,0x20,0x68,0x00,0x21,0x47,0x68,0x81,0x73,0x38, \ +0x78,0x08,0x28,0x1B,0xD1,0xE8,0x1D,0x59,0x30,0x41,0x7B,0x01,0x29,0x06,0xD1, \ +0xF9,0x1D,0x09,0x31,0x06,0x22,0x18,0x48,0xFD,0xF7,0x60,0xFE,0x0F,0xE0,0x40, \ +0x7B,0x02,0x28,0x0C,0xD1,0x15,0x48,0x40,0x79,0x02,0x28,0x08,0xD1,0xE8,0x1D, \ +0x69,0x30,0xC0,0x79,0x05,0x28,0x03,0xD1,0x78,0x78,0x10,0x23,0x18,0x43,0x78, \ +0x70,0xF8,0x1D,0x0F,0x30,0xFD,0xF7,0xA0,0xFE,0x38,0x79,0x40,0x08,0x01,0xD2, \ +0x00,0x22,0x00,0xE0,0x01,0x22,0x20,0x68,0x00,0x23,0x81,0x7D,0xB8,0x1C,0xFD, \ +0xF7,0xA3,0xFE,0x38,0x1C,0x05,0xF0,0x96,0xFE,0x21,0x68,0x80,0x20,0x08,0x73, \ +0x40,0x01,0xB0,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02,0x04,0x00,0x00,0x02,0x14, \ +0x01,0x00,0x02,0xC8,0x00,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x99,0x32, \ +0x51,0x71,0x01,0x21,0xC1,0x73,0x08,0x02,0xF7,0x46,0x00,0x00,0xE0,0x05,0x00, \ +0x02,0x90,0xB5,0x29,0x49,0x00,0x27,0xCB,0x1D,0x69,0x33,0xDA,0x79,0xC8,0x1D, \ +0x59,0x30,0x01,0x2A,0x00,0xD0,0xC7,0x73,0xCA,0x1D,0x99,0x32,0x94,0x7A,0x00, \ +0x2C,0x03,0xD0,0x97,0x72,0x01,0x20,0xC8,0x73,0x90,0xBD,0x52,0x79,0x00,0x2A, \ +0x02,0xD0,0xFF,0xF7,0xDB,0xFF,0x90,0xBD,0x02,0x7A,0x00,0x2A,0x03,0xD0,0xC7, \ +0x73,0x03,0xF0,0xAC,0xFE,0x90,0xBD,0xC2,0x7B,0x00,0x2A,0x0A,0xD0,0xFD,0xF7, \ +0xE2,0xFC,0x17,0x49,0x18,0x4B,0x49,0x68,0xC9,0x18,0x88,0x42,0x25,0xD9,0x03, \ +0xF0,0xD4,0xFE,0x90,0xBD,0x80,0x7A,0x00,0x28,0x02,0xD0,0x03,0xF0,0xB8,0xFE, \ +0x90,0xBD,0x08,0x7C,0x00,0x28,0x02,0xD0,0x03,0xF0,0xBE,0xFE,0x90,0xBD,0xD8, \ +0x79,0x05,0x28,0x13,0xD1,0x0D,0x48,0x10,0x23,0x00,0x78,0x0D,0x4C,0x18,0x40, \ +0x06,0xD0,0x20,0x68,0x00,0x7B,0xC0,0x09,0x02,0xD3,0xFF,0xF7,0x5E,0xFF,0x90, \ +0xBD,0x20,0x68,0x01,0x7B,0x10,0x29,0x02,0xD1,0x00,0xF0,0x39,0xF9,0x20,0x60, \ +0x38,0x1C,0x90,0xBD,0xE0,0x05,0x00,0x02,0x04,0x02,0x00,0x02,0x10,0x27,0x00, \ +0x00,0x7B,0x01,0x00,0x02,0x04,0x00,0x00,0x02,0x00,0xB5,0x1D,0x48,0x01,0x78, \ +0x00,0x29,0x13,0xD0,0x41,0x78,0x00,0x29,0x10,0xD1,0x01,0x78,0x0D,0x29,0x2B, \ +0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x27,0x07,0x27,0x0A, \ +0x0D,0x13,0x16,0x19,0x27,0x27,0x10,0x21,0x24,0x00,0x03,0xF0,0x67,0xFA,0x00, \ +0xBD,0x03,0xF0,0xD8,0xFA,0x00,0xBD,0x03,0xF0,0xBF,0xFB,0x00,0xBD,0x04,0xF0, \ +0xDC,0xFA,0x00,0xBD,0x03,0xF0,0x5F,0xFC,0x00,0xBD,0x03,0xF0,0xD8,0xFE,0x00, \ +0xBD,0x0A,0x49,0x02,0x20,0x08,0x70,0x08,0x21,0x07,0x20,0x00,0xF0,0x10,0xF8, \ +0x00,0xBD,0x03,0xF0,0x13,0xFA,0x00,0xBD,0x03,0xF0,0xF4,0xFD,0x00,0xBD,0x00, \ +0x78,0x02,0x21,0x00,0xF0,0x05,0xF8,0x00,0xBD,0xA8,0x0A,0x00,0x02,0xB1,0x01, \ +0x00,0x02,0x04,0x4A,0x10,0x60,0x04,0x48,0x01,0x60,0x04,0x49,0x00,0x20,0x08, \ +0x70,0x48,0x70,0xF7,0x46,0x00,0x00,0x94,0x02,0x00,0x02,0x98,0x02,0x00,0x02, \ +0xA8,0x0A,0x00,0x02,0xF0,0xB5,0x3B,0x48,0x47,0x68,0xFD,0xF7,0x55,0xFC,0x02, \ +0x02,0x39,0x4D,0x12,0x0A,0x39,0x49,0x2A,0x60,0x4B,0x78,0x39,0x48,0x03,0x70, \ +0xCB,0x1D,0x39,0x33,0x1B,0x78,0x03,0x24,0x64,0x06,0x01,0x2B,0x17,0xD1,0x06, \ +0x78,0x0D,0x23,0x73,0x43,0x5B,0x18,0x1B,0x7B,0x1B,0x06,0x1A,0x43,0x22,0x60, \ +0x02,0x78,0x0D,0x23,0x5A,0x43,0x51,0x18,0x8B,0x7B,0x4A,0x7B,0x1B,0x02,0x1A, \ +0x43,0xCB,0x7B,0x09,0x7C,0x1B,0x04,0x1A,0x43,0x09,0x06,0x11,0x43,0x61,0x60, \ +0x36,0xE0,0x02,0x2B,0x34,0xD1,0x06,0x78,0x0D,0x23,0x73,0x43,0x5B,0x18,0x1B, \ +0x7B,0x1B,0x06,0x1A,0x43,0x22,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52,0x18, \ +0x96,0x7B,0x53,0x7B,0x36,0x02,0x33,0x43,0xD6,0x7B,0x12,0x7C,0x36,0x04,0x33, \ +0x43,0x12,0x06,0x1A,0x43,0x62,0x60,0x02,0x78,0x0D,0x23,0x5A,0x43,0x52,0x18, \ +0x96,0x7C,0x53,0x7C,0x36,0x02,0x33,0x43,0xD6,0x7C,0x12,0x7D,0x36,0x04,0x33, \ +0x43,0x12,0x06,0x1A,0x43,0x62,0x61,0x02,0x78,0x0D,0x23,0x5A,0x43,0x51,0x18, \ +0x8B,0x7D,0x4A,0x7D,0x1B,0x02,0x1A,0x43,0xCB,0x7D,0x09,0x7E,0x1B,0x04,0x1A, \ +0x43,0x09,0x06,0x11,0x43,0xA1,0x61,0x00,0x78,0x29,0x68,0x0D,0x4A,0x80,0x07, \ +0x01,0x43,0x29,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54,0x01,0x30,0x18,0x28,0xFA, \ +0xD3,0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90,0x76,0x08,0x0E,0xD0,0x76, \ +0xF0,0xBD,0x00,0x00,0x60,0x06,0x00,0x02,0x74,0x01,0x00,0x02,0x30,0x00,0x00, \ +0x02,0x78,0x01,0x00,0x02,0x74,0x0A,0x00,0x02,0x80,0xB4,0x10,0x4A,0x11,0x68, \ +0x01,0x31,0x1E,0x29,0x00,0xD1,0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43,0xFB, \ +0x18,0x1F,0x7B,0x00,0x2F,0x11,0xD1,0x11,0x60,0x0C,0x49,0x03,0x22,0x19,0x60, \ +0xD9,0x1D,0x15,0x31,0x59,0x60,0x08,0x39,0x99,0x60,0x00,0x21,0x19,0x73,0x99, \ +0x73,0x9A,0x75,0x99,0x82,0x03,0x60,0x40,0x21,0x01,0x73,0x18,0x1C,0x80,0xBC, \ +0xF7,0x46,0x00,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0x11,0x00,0x02,0x00, \ +0x00,0x00,0x80,0x80,0xB4,0x12,0x4A,0x11,0x68,0x01,0x31,0x14,0x29,0x00,0xD1, \ +0x00,0x21,0x10,0x4F,0x10,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F,0x15, \ +0xD1,0x11,0x60,0x0E,0x49,0x01,0x22,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60, \ +0x9A,0x81,0x00,0x21,0x19,0x72,0x0A,0x4F,0xD9,0x73,0xBF,0x79,0x01,0x2F,0x01, \ +0xD1,0xC2,0x73,0x00,0xE0,0xC1,0x73,0x80,0x21,0x03,0x60,0x01,0x72,0x18,0x1C, \ +0x80,0xBC,0xF7,0x46,0x0C,0x00,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00, \ +0x02,0x00,0x00,0x00,0x80,0xC8,0x00,0x00,0x02,0x01,0x1C,0x00,0x68,0x02,0x08, \ +0x01,0xD3,0x08,0x1C,0xF7,0x46,0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09, \ +0x08,0x02,0xD3,0x40,0x21,0x01,0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x68, \ +0x00,0x2A,0xF9,0xD1,0x02,0x72,0x08,0x1C,0xF7,0x46,0x00,0x00,0xA0,0x02,0x00, \ +0x02,0x0B,0x49,0x01,0x20,0x48,0x63,0x00,0x20,0x48,0x64,0xC8,0x63,0x48,0x66, \ +0x08,0x66,0x48,0x65,0xCA,0x1D,0x59,0x32,0x88,0x65,0x10,0x72,0xC8,0x65,0x50, \ +0x72,0xCA,0x1D,0x39,0x32,0x10,0x82,0x50,0x82,0x08,0x64,0x7C,0x31,0xC8,0x61, \ +0xF7,0x46,0x00,0x00,0x0C,0x02,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F,0x06,0xF8, \ +0x69,0x40,0x23,0x18,0x43,0xF8,0x61,0x14,0x48,0xFD,0xF7,0xF8,0xFA,0xF8,0x69, \ +0x20,0x23,0x18,0x43,0xF8,0x61,0xF8,0x69,0x1B,0x01,0x18,0x43,0xF8,0x61,0x00, \ +0x20,0xFF,0x21,0x91,0x31,0x01,0x30,0x88,0x42,0xFC,0xD3,0xF8,0x69,0x0C,0x4B, \ +0x18,0x40,0xF8,0x61,0x00,0x20,0x7D,0x21,0x49,0x01,0x01,0x30,0x88,0x42,0xFC, \ +0xD3,0xFF,0xF7,0xC2,0xFF,0xFD,0xF7,0xE4,0xFA,0x00,0xF0,0x0E,0xF8,0x05,0x49, \ +0x0D,0x20,0x00,0x06,0x01,0x81,0xFF,0x21,0x41,0x31,0x81,0x80,0x80,0xBD,0x50, \ +0xC3,0x00,0x00,0xFF,0xFD,0x00,0x00,0xFF,0x0F,0x00,0x00,0x90,0xB4,0x0E,0x48, \ +0x00,0x21,0x01,0x70,0x0D,0x48,0x80,0x27,0x07,0x73,0x01,0x23,0x03,0x72,0x82, \ +0x22,0x02,0x71,0x07,0x22,0x02,0x70,0x0A,0x48,0x05,0x24,0x04,0x73,0x86,0x24, \ +0x04,0x72,0x02,0x71,0x08,0x48,0x24,0x22,0x02,0x71,0x07,0x72,0x03,0x73,0x06, \ +0x48,0x01,0x71,0x01,0x73,0x90,0xBC,0xF7,0x46,0x00,0x00,0x10,0x00,0x00,0x0D, \ +0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xE0,0x03,0x00, \ +0x0D,0xF0,0xB5,0x92,0x48,0x08,0x22,0x01,0x7B,0x91,0x4C,0x0A,0x40,0x00,0x25, \ +0x01,0x27,0x00,0x2A,0x02,0xD0,0x05,0x73,0x27,0x71,0xF0,0xBD,0x04,0x22,0x0A, \ +0x40,0x8D,0x4E,0x66,0xD0,0x8D,0x49,0x09,0x7B,0x0A,0x29,0x22,0xD1,0x8C,0x4A, \ +0x00,0x21,0x15,0x7B,0x0B,0x1C,0x01,0x31,0x08,0x29,0xF5,0x54,0xF9,0xD1,0x86, \ +0x4E,0xF2,0x78,0xB1,0x78,0x73,0x79,0x12,0x02,0x0A,0x43,0x31,0x79,0x1B,0x02, \ +0xF5,0x79,0x19,0x43,0xB3,0x79,0x2D,0x02,0x1D,0x43,0x33,0x78,0x76,0x78,0x1B, \ +0x02,0x1E,0x43,0x80,0x4B,0x9E,0x42,0x09,0xD1,0x80,0x48,0x43,0x6B,0x10,0x1C, \ +0x2A,0x1C,0x04,0xF0,0x1B,0xF8,0x47,0xE0,0x05,0x73,0x27,0x71,0xF0,0xBD,0x7C, \ +0x4B,0x9E,0x42,0x06,0xD1,0x79,0x48,0x42,0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0, \ +0xF0,0xFA,0x3A,0xE0,0x78,0x4B,0x9E,0x42,0x06,0xD1,0x70,0x4E,0x28,0x1C,0xB2, \ +0x78,0xF1,0x78,0x00,0xF0,0xC6,0xFB,0x30,0xE0,0x05,0x2E,0x04,0xD1,0x10,0x1C, \ +0x2A,0x1C,0x00,0xF0,0x63,0xFB,0x29,0xE0,0x09,0x2E,0x04,0xD1,0x10,0x1C,0x2A, \ +0x1C,0x00,0xF0,0x80,0xFB,0x22,0xE0,0x6D,0x4B,0x9E,0x42,0x06,0xD1,0x68,0x48, \ +0x43,0x6B,0x10,0x1C,0x2A,0x1C,0x04,0xF0,0x04,0xF8,0x18,0xE0,0x69,0x4B,0x9E, \ +0x42,0x06,0xD1,0x63,0x48,0x42,0x6B,0x08,0x1C,0x29,0x1C,0x00,0xF0,0x00,0xFC, \ +0x0E,0xE0,0x65,0x4B,0x5B,0x69,0x00,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0x07, \ +0xE0,0x08,0xE0,0x2B,0x1C,0x0D,0x1C,0x11,0x1C,0x30,0x1C,0x2A,0x1C,0x04,0xF0, \ +0x3A,0xF8,0x27,0x71,0xF0,0xBD,0xCB,0x07,0xDB,0x0F,0xE0,0x22,0x00,0x2B,0x66, \ +0xD0,0x51,0x4E,0x31,0x78,0x73,0x78,0x09,0x02,0x19,0x43,0x05,0x29,0x07,0xD1, \ +0x4D,0x48,0x81,0x78,0x50,0x48,0x40,0x6B,0x00,0xF0,0xAA,0xFB,0x27,0x71,0xF0, \ +0xBD,0x4F,0x4B,0x99,0x42,0x08,0xD1,0x80,0x21,0x01,0x73,0x47,0x48,0x81,0x78, \ +0xC0,0x78,0x00,0xF0,0x4C,0xFA,0x27,0x71,0xF0,0xBD,0x46,0x4B,0x99,0x42,0x07, \ +0xD1,0x20,0x21,0x01,0x73,0x41,0x48,0x27,0x71,0x00,0x79,0x03,0xF0,0xE2,0xFF, \ +0xF0,0xBD,0x43,0x4B,0x47,0x4E,0x99,0x42,0x22,0xD1,0x20,0x21,0x01,0x73,0x3B, \ +0x48,0x27,0x71,0x81,0x78,0x02,0x29,0x03,0xD1,0xC1,0x78,0x08,0x29,0x00,0xD1, \ +0x4F,0xE7,0x81,0x78,0x01,0x29,0x0F,0xD1,0xF0,0x7A,0x02,0x28,0x02,0xD0,0x02, \ +0xF0,0xBE,0xF8,0xF0,0xBD,0x3D,0x48,0x3D,0x49,0x05,0x70,0x0F,0x20,0x00,0x06, \ +0x81,0x80,0x38,0x4B,0x85,0x81,0xDF,0x61,0xF0,0xBD,0xC1,0x78,0x80,0x78,0x03, \ +0xF0,0xFD,0xFE,0xF0,0xBD,0x32,0x4B,0x99,0x42,0x0B,0xD1,0x29,0x4E,0xB1,0x78, \ +0x01,0x29,0x01,0xD1,0x02,0x73,0x03,0xE0,0x80,0x21,0x01,0x73,0x03,0xF0,0x2F, \ +0xFF,0x27,0x71,0xF0,0xBD,0x09,0x29,0x09,0xD1,0x20,0x21,0x01,0x73,0x25,0x48, \ +0x27,0x71,0x40,0x6B,0x03,0x28,0xCE,0xD1,0x77,0x72,0xF0,0xBD,0x25,0xE0,0x24, \ +0x4B,0x99,0x42,0x02,0xD1,0x02,0x73,0x27,0x71,0xF0,0xBD,0x27,0x4B,0x99,0x42, \ +0x0E,0xD0,0x26,0x4B,0x99,0x42,0x0B,0xD0,0x81,0x23,0x1B,0x02,0x99,0x42,0x07, \ +0xD0,0x41,0x23,0x5B,0x02,0x99,0x42,0x03,0xD0,0x01,0x23,0xDB,0x03,0x99,0x42, \ +0x02,0xD1,0x02,0x73,0x27,0x71,0xF0,0xBD,0xFF,0x23,0x0C,0x33,0x99,0x42,0x02, \ +0xD0,0x1C,0x4B,0x99,0x42,0xA9,0xD1,0x20,0x21,0x01,0x73,0x27,0x71,0xF0,0xBD, \ +0x89,0x08,0xA3,0xD3,0x31,0x78,0x73,0x78,0x09,0x02,0x19,0x43,0x0C,0x4B,0x99, \ +0x42,0x03,0xD1,0xB0,0x78,0x00,0xF0,0x61,0xFA,0x00,0xE0,0x02,0x73,0x27,0x71, \ +0xF0,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x54,0x02,0x00, \ +0x02,0xF0,0x02,0x00,0x0D,0x30,0x03,0x00,0x0D,0x01,0x02,0x00,0x00,0x0C,0x02, \ +0x00,0x02,0x0E,0x40,0x00,0x00,0x06,0x80,0x00,0x00,0x22,0xC1,0x00,0x00,0x33, \ +0xC1,0x00,0x00,0x88,0x02,0x00,0x02,0x6C,0x02,0x00,0x02,0x7B,0x01,0x00,0x02, \ +0x08,0x08,0x00,0x00,0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00, \ +0x00,0xF0,0xB5,0x51,0x4D,0x28,0x79,0x80,0x08,0x55,0xD3,0x50,0x4C,0x60,0x79, \ +0x02,0x28,0x01,0xD1,0x00,0xF0,0x35,0xFE,0x4E,0x48,0x4E,0x4A,0x00,0x79,0x4E, \ +0x4B,0xD0,0x62,0x0F,0x20,0x00,0x06,0x81,0x88,0x19,0x40,0x81,0x80,0x81,0x89, \ +0x4B,0x4B,0x19,0x40,0x81,0x81,0x4B,0x49,0x04,0x23,0x0B,0x71,0x00,0x26,0x0E, \ +0x71,0x81,0x89,0x49,0x4B,0x19,0x43,0x81,0x81,0x81,0x88,0x48,0x4B,0x19,0x43, \ +0x81,0x80,0xD0,0x1F,0x75,0x38,0x01,0x6E,0x40,0x29,0x06,0xD2,0x01,0x6E,0x00, \ +0x29,0x03,0xD0,0x01,0x6E,0x02,0x31,0x91,0x62,0x09,0xE0,0xD1,0x6A,0x40,0x29, \ +0x04,0xD2,0x03,0x6E,0x00,0x2B,0x01,0xD1,0x91,0x62,0x01,0xE0,0x40,0x21,0x91, \ +0x62,0x43,0x6E,0x91,0x6A,0x5F,0x18,0x19,0x23,0x9B,0x01,0x9F,0x42,0x03,0xD9, \ +0x46,0x66,0x06,0x66,0x01,0x23,0x53,0x62,0x37,0x4F,0x37,0x48,0x3B,0x68,0x9B, \ +0x68,0x40,0x6E,0x18,0x18,0xCD,0x22,0x00,0xF0,0xC4,0xFA,0x2E,0x71,0x60,0x79, \ +0x02,0x28,0x01,0xD1,0x00,0xF0,0xC4,0xFD,0x29,0x4C,0x60,0x6A,0x01,0x28,0x01, \ +0xD1,0x66,0x62,0xF0,0xBD,0x2D,0x4D,0x68,0x6E,0x00,0x28,0x2E,0xD1,0x38,0x68, \ +0x2C,0x4B,0x81,0x8A,0xC2,0x7D,0x08,0x31,0x89,0x18,0x29,0x66,0x29,0x6E,0x99, \ +0x42,0x02,0xD8,0x29,0x6E,0x00,0x29,0x02,0xD1,0x6E,0x66,0x2E,0x66,0xF0,0xBD, \ +0x81,0x7D,0x03,0x29,0x01,0xDD,0x03,0x21,0x81,0x75,0x38,0x68,0x41,0x68,0x08, \ +0x78,0x08,0x28,0x08,0xD0,0x00,0x28,0x06,0xD0,0x20,0x28,0x04,0xD0,0xB0,0x28, \ +0x02,0xD0,0x6E,0x66,0x2E,0x66,0xF0,0xBD,0xC8,0x1D,0x03,0x30,0x06,0x22,0x1A, \ +0x49,0xFD,0xF7,0x23,0xFA,0x20,0x62,0x01,0x28,0x02,0xD1,0x6E,0x66,0x2E,0x66, \ +0xF0,0xBD,0x28,0x6E,0x40,0x28,0x06,0xD9,0x28,0x6E,0x40,0x38,0x28,0x66,0x68, \ +0x6E,0x40,0x30,0x68,0x66,0xF0,0xBD,0x68,0x6E,0x29,0x6E,0x40,0x18,0x68,0x66, \ +0x2E,0x66,0x6E,0x66,0x38,0x68,0xFF,0xF7,0x28,0xFD,0x38,0x60,0xF0,0xBD,0x70, \ +0x03,0x00,0x0D,0xC8,0x00,0x00,0x02,0xF0,0x02,0x00,0x0D,0x88,0x02,0x00,0x02, \ +0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x60,0x02,0x00,0x0D,0x13,0x13,0x00, \ +0x00,0xE8,0xE8,0x00,0x00,0x08,0x00,0x00,0x02,0x0C,0x02,0x00,0x02,0x32,0x06, \ +0x00,0x00,0x74,0x00,0x00,0x02,0x90,0xB5,0x1C,0x4F,0x38,0x7A,0x40,0x08,0x1D, \ +0xD3,0x1B,0x48,0x40,0x79,0x02,0x28,0x01,0xD1,0x00,0xF0,0x77,0xFD,0x19,0x4A, \ +0x00,0x20,0x38,0x72,0xD3,0x6D,0xD1,0x1D,0x59,0x31,0x01,0x2B,0x18,0xD1,0xCB, \ +0x1D,0x15,0x33,0xD0,0x65,0x9B,0x69,0x00,0x2B,0x01,0xD1,0x10,0x23,0x3B,0x72, \ +0x17,0x1C,0x92,0x6D,0x01,0x24,0x00,0x2A,0x03,0xD0,0x00,0xF0,0x20,0xF8,0xFC, \ +0x65,0x90,0xBD,0x0A,0x7A,0x01,0x2A,0x02,0xD1,0x08,0x72,0xFC,0x65,0x90,0xBD, \ +0xF8,0x65,0x90,0xBD,0x50,0x65,0x90,0x65,0x08,0x72,0x08,0x4F,0xD0,0x65,0x38, \ +0x68,0x01,0x7A,0x10,0x29,0xED,0xD1,0xFF,0xF7,0x38,0xFD,0x38,0x60,0x90,0xBD, \ +0x00,0x00,0x60,0x03,0x00,0x0D,0xC8,0x00,0x00,0x02,0x0C,0x02,0x00,0x02,0x14, \ +0x00,0x00,0x02,0xB0,0xB4,0x0F,0x4A,0x90,0x6D,0x40,0x28,0x01,0xD3,0x40,0x20, \ +0x00,0xE0,0x90,0x6D,0x00,0x21,0x00,0x28,0x53,0x6D,0x0A,0xDD,0x0A,0x4C,0x0B, \ +0x4F,0xA5,0x69,0x00,0x2D,0x05,0xD1,0x1D,0x78,0x01,0x33,0x01,0x31,0x81,0x42, \ +0x3D,0x72,0xF6,0xDB,0x91,0x6D,0x09,0x1A,0x91,0x65,0x51,0x6D,0x08,0x18,0x50, \ +0x65,0xB0,0xBC,0xF7,0x46,0x00,0x00,0x0C,0x02,0x00,0x02,0x88,0x02,0x00,0x02, \ +0x20,0x03,0x00,0x0D,0xF0,0xB5,0x07,0x1C,0x17,0x48,0x0C,0x1C,0x40,0x79,0x02, \ +0x28,0x01,0xD1,0x00,0xF0,0x11,0xFD,0x38,0x1C,0x14,0x4F,0x00,0x26,0x78,0x65, \ +0xBC,0x65,0xFC,0x1D,0x59,0x34,0xFE,0x65,0x26,0x72,0xB8,0x6D,0x80,0x06,0x80, \ +0x0E,0x01,0x25,0x00,0x28,0x00,0xD1,0x25,0x72,0xFF,0xF7,0xBF,0xFF,0x0D,0x48, \ +0x80,0x69,0x00,0x28,0x02,0xD1,0x0C,0x49,0x10,0x20,0x08,0x72,0xB8,0x6D,0x00, \ +0x28,0x03,0xD0,0xFF,0xF7,0xB3,0xFF,0xFD,0x65,0xF0,0xBD,0x20,0x7A,0x00,0x28, \ +0x02,0xD0,0x26,0x72,0xFD,0x65,0xF0,0xBD,0xFE,0x65,0xF0,0xBD,0x00,0x00,0xC8, \ +0x00,0x00,0x02,0x0C,0x02,0x00,0x02,0x88,0x02,0x00,0x02,0x60,0x03,0x00,0x0D, \ +0x90,0xB5,0x1C,0x4F,0x78,0x7A,0x00,0x28,0x33,0xD0,0x1B,0x48,0x00,0x78,0x01, \ +0x28,0x2F,0xD1,0xFC,0x1D,0x15,0x34,0xA0,0x69,0x01,0x28,0x04,0xD1,0x00,0x20, \ +0xA0,0x61,0xF8,0x7A,0x00,0xF0,0xDD,0xF9,0xE0,0x69,0x01,0x28,0x02,0xD1,0xF8, \ +0x7A,0x00,0xF0,0xD7,0xF9,0xF8,0x7A,0x02,0x28,0x1C,0xD0,0xFF,0xF7,0x82,0xFE, \ +0x0F,0x49,0x80,0x23,0x08,0x68,0x0F,0x4F,0x02,0x7A,0x1A,0x40,0x0C,0xD0,0x10, \ +0x22,0x02,0x72,0x08,0x68,0x81,0x89,0x0C,0x30,0x0C,0x31,0xFF,0xF7,0x99,0xFF, \ +0x78,0x79,0x02,0x28,0x01,0xD1,0x00,0xF0,0x88,0xFC,0xFF,0xF7,0x2A,0xFF,0x78, \ +0x79,0x02,0x28,0x01,0xD1,0x00,0xF0,0x81,0xFC,0x90,0xBD,0x6C,0x02,0x00,0x02, \ +0x87,0x01,0x00,0x02,0x14,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x90,0xB4,0x1E, \ +0x4A,0x1E,0x4C,0x91,0x6B,0xD3,0x6B,0x8B,0x42,0x19,0xD1,0x20,0x7B,0x40,0x23, \ +0x03,0x40,0xE0,0x20,0x00,0x2B,0x11,0xD1,0x49,0x07,0x02,0xD0,0x20,0x73,0x90, \ +0xBC,0xF7,0x46,0xD1,0x1D,0x59,0x31,0x8A,0x7A,0x01,0x2A,0x02,0xD1,0xD0,0x20, \ +0x20,0x73,0xF5,0xE7,0x89,0x7A,0x00,0x29,0xF2,0xD1,0x20,0x73,0xF0,0xE7,0x20, \ +0x73,0xEE,0xE7,0x8B,0x42,0xEC,0xD2,0xC9,0x1A,0x08,0x29,0x00,0xD9,0x08,0x21, \ +0x01,0x28,0x01,0xD1,0x0C,0x4F,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4F,0x00, \ +0x29,0x08,0xD0,0x0A,0x48,0xD3,0x6B,0xFB,0x5C,0x03,0x73,0xD3,0x6B,0x01,0x33, \ +0xD3,0x63,0x01,0x39,0xF7,0xD1,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD1, \ +0xE7,0x0C,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x0C,0x02,0x00,0x02,0x1E,0x02, \ +0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x04,0x1C,0x1D,0x48,0x0F,0x1C,0x86, \ +0x78,0xC5,0x78,0x20,0x21,0x03,0x2A,0x1B,0x48,0x01,0xD0,0x01,0x73,0xF0,0xBD, \ +0x02,0x2E,0x05,0xD1,0x01,0x2D,0x01,0xD3,0x0A,0x2D,0x01,0xD9,0x01,0x73,0xF0, \ +0xBD,0x00,0x2F,0x07,0xD1,0x00,0xF0,0x94,0xF9,0x08,0x2D,0x05,0xD1,0x13,0x49, \ +0x01,0x20,0x48,0x61,0x01,0xE0,0x00,0x21,0x01,0x73,0x11,0x48,0x02,0x2E,0x0C, \ +0xD1,0x00,0x2C,0x12,0xD1,0x08,0x2D,0x02,0xD1,0x0F,0x49,0x81,0x67,0x0D,0xE0, \ +0x0E,0x49,0x81,0x67,0x0E,0x49,0x00,0x20,0x08,0x70,0x07,0xE0,0x01,0x2E,0xD8, \ +0xD0,0x0C,0x49,0x81,0x67,0x06,0x49,0x00,0x20,0x08,0x61,0xC8,0x60,0x04,0x49, \ +0x00,0x20,0x4F,0x60,0x88,0x60,0xF0,0xBD,0x00,0x00,0x54,0x02,0x00,0x02,0x70, \ +0x03,0x00,0x0D,0x88,0x02,0x00,0x02,0x0C,0x02,0x00,0x02,0x00,0x60,0x00,0x01, \ +0x14,0x0A,0x00,0x02,0x7B,0x01,0x00,0x02,0xAC,0x0A,0x00,0x02,0x90,0xB5,0x16, \ +0x4A,0x16,0x4B,0x01,0x28,0x06,0xD1,0x18,0x7B,0xD1,0x1D,0x59,0x31,0xC8,0x72, \ +0x00,0xF0,0x53,0xF9,0x90,0xBD,0x12,0x49,0x88,0x68,0x4F,0x68,0xB8,0x42,0xF9, \ +0xD2,0x48,0x68,0x8F,0x68,0xC0,0x1B,0x08,0x28,0x00,0xD9,0x08,0x20,0x00,0x28, \ +0x09,0xD0,0x1C,0x7B,0x97,0x6F,0x3C,0x70,0x01,0x37,0x97,0x67,0x8F,0x68,0x01, \ +0x37,0x8F,0x60,0x01,0x38,0xF5,0xD1,0x88,0x68,0x49,0x68,0x88,0x42,0x02,0xD1, \ +0x00,0xF0,0x35,0xF9,0x90,0xBD,0x04,0x49,0x00,0x20,0x08,0x73,0x90,0xBD,0x0C, \ +0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x88,0x02,0x00,0x02,0x70,0x03,0x00,0x0D, \ +0x00,0xB5,0x7F,0x28,0x07,0xD8,0x00,0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C, \ +0x4A,0x51,0x6B,0x03,0x29,0x03,0xD1,0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD, \ +0x01,0x29,0x04,0xD1,0x00,0x28,0x08,0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02, \ +0x29,0x03,0xD1,0x00,0x28,0x01,0xD1,0x01,0x20,0x50,0x63,0x00,0xF0,0x0A,0xF9, \ +0x00,0xBD,0x00,0x00,0x0C,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00, \ +0x29,0x09,0xD1,0x00,0x2A,0x07,0xD1,0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1, \ +0x14,0x49,0x4A,0x6B,0x01,0x2A,0x03,0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80, \ +0xBD,0x12,0x4B,0x02,0x2A,0x09,0xD1,0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63, \ +0x1F,0x7B,0x1A,0x1C,0x02,0x23,0x3B,0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08, \ +0xD1,0x00,0x28,0x06,0xD1,0x02,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23, \ +0x3B,0x40,0x13,0x73,0x48,0x64,0x00,0x20,0x40,0x31,0x08,0x82,0x48,0x82,0x00, \ +0xF0,0xD4,0xF8,0x80,0xBD,0x00,0x00,0x0C,0x02,0x00,0x02,0x70,0x03,0x00,0x0D, \ +0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x12,0x4D,0x12,0x4C,0x01,0x29,0x02,0xD1,0x12, \ +0x23,0xA3,0x63,0x03,0xE0,0x20,0x23,0x02,0x29,0x0A,0xD1,0xA3,0x63,0xE3,0x1D, \ +0x59,0x33,0x00,0x27,0x9F,0x72,0xA6,0x6B,0xB0,0x42,0x04,0xD8,0x9F,0x72,0xA0, \ +0x63,0x07,0xE0,0x2B,0x73,0xF0,0xBD,0x70,0x07,0x01,0xD0,0x9F,0x72,0x01,0xE0, \ +0x01,0x20,0x98,0x72,0x80,0x20,0xE7,0x63,0x28,0x73,0x08,0x1C,0x11,0x1C,0xFF, \ +0xF7,0xB3,0xFE,0xF0,0xBD,0x70,0x03,0x00,0x0D,0x0C,0x02,0x00,0x02,0x80,0xB4, \ +0x0C,0x4F,0x0C,0x4A,0x01,0x28,0x06,0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE, \ +0x23,0x18,0x40,0x38,0x73,0x08,0xE0,0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43, \ +0x10,0x72,0x38,0x7B,0x01,0x23,0x18,0x43,0x38,0x73,0x04,0x49,0x20,0x20,0x08, \ +0x73,0x80,0xBC,0xF7,0x46,0xE0,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0x70,0x03, \ +0x00,0x0D,0x0D,0x23,0x1B,0x06,0x99,0x83,0x05,0x49,0x0A,0x70,0x05,0x4A,0x10, \ +0x60,0x02,0x20,0x08,0x72,0x08,0x7A,0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00, \ +0x20,0x00,0x00,0x0D,0x40,0x00,0x00,0x0D,0xB0,0xB5,0x10,0x4F,0x03,0x2A,0xBB, \ +0x78,0xFC,0x78,0x0F,0x4F,0x02,0xD0,0x20,0x20,0x38,0x73,0xB0,0xBD,0x00,0x25, \ +0x0D,0x4A,0x01,0x2B,0xD5,0x66,0x11,0x67,0x0B,0xD1,0x80,0x20,0x38,0x73,0xD0, \ +0x1D,0x59,0x30,0xC0,0x7A,0x09,0x49,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43, \ +0x38,0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C,0x21,0x1C,0x03,0xF0,0xE3,0xFA,0xB0, \ +0xBD,0x54,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x0C,0x02,0x00,0x02,0x30,0x03, \ +0x00,0x0D,0xB0,0xB5,0x1B,0x4C,0x07,0x1C,0x60,0x79,0x02,0x28,0x01,0xD1,0x00, \ +0xF0,0xE8,0xFA,0x00,0x25,0x02,0x2F,0x17,0x48,0x08,0xD0,0x05,0x72,0x17,0x48, \ +0x20,0x21,0x01,0x70,0x05,0x70,0x16,0x49,0x86,0x20,0x08,0x72,0x06,0xE0,0x01, \ +0x7A,0x49,0x09,0x03,0xD2,0x10,0x21,0x01,0x72,0x12,0x48,0xC5,0x61,0x60,0x79, \ +0x02,0x28,0x01,0xD1,0x00,0xF0,0xA9,0xFA,0x10,0x48,0x10,0x4F,0x45,0x65,0xC1, \ +0x1D,0x59,0x31,0x85,0x65,0x0D,0x72,0xC5,0x65,0x38,0x68,0x01,0x7A,0x10,0x29, \ +0x02,0xD1,0xFF,0xF7,0xAE,0xFA,0x38,0x60,0x38,0x68,0x01,0x7A,0x40,0x29,0x02, \ +0xD1,0xFF,0xF7,0xA7,0xFA,0x38,0x60,0xB0,0xBD,0xC8,0x00,0x00,0x02,0x60,0x03, \ +0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0x88,0x02,0x00,0x02,0x0C, \ +0x02,0x00,0x02,0x14,0x00,0x00,0x02,0x05,0x48,0x06,0x49,0x02,0x78,0x0A,0x63, \ +0x0A,0x6B,0x03,0x78,0x9A,0x42,0xFB,0xD0,0x03,0x49,0x60,0x20,0x08,0x73,0xF7, \ +0x46,0xF0,0x03,0x00,0x0D,0x88,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xF0,0xB5, \ +0x25,0x4D,0x28,0x78,0x00,0x28,0x01,0xD1,0x00,0xF0,0x37,0xFA,0x0D,0x24,0x24, \ +0x06,0x27,0x89,0x40,0x20,0x21,0x4E,0x38,0x40,0x08,0xD0,0x30,0x7A,0x00,0x28, \ +0xFC,0xD1,0x1F,0x48,0x00,0x7B,0x40,0x08,0x01,0xD3,0xFF,0xF7,0xF7,0xFA,0x78, \ +0x0A,0x17,0xD3,0xF8,0x43,0xFF,0x23,0x01,0x33,0x18,0x43,0x20,0x81,0x20,0x7B, \ +0x00,0x09,0xFC,0xD2,0x30,0x7A,0x00,0x28,0xFC,0xD1,0xFF,0xF7,0xC0,0xFA,0x15, \ +0x48,0x81,0x69,0x02,0x29,0x02,0xD0,0x01,0x21,0x81,0x61,0x01,0xE0,0x00,0x23, \ +0x83,0x61,0xFF,0xF7,0x65,0xFA,0x11,0x49,0x08,0x78,0x01,0x28,0x10,0xD1,0xB8, \ +0x08,0x0E,0xD3,0x0F,0x4A,0x00,0x23,0x10,0x7A,0x13,0x72,0xFA,0x43,0x02,0x23, \ +0x1A,0x43,0x22,0x81,0x09,0x78,0x01,0x29,0x03,0xD1,0x00,0x04,0x00,0x0C,0x03, \ +0xF0,0xE0,0xFB,0x28,0x78,0x00,0x28,0x01,0xD1,0x00,0xF0,0x0F,0xFA,0xF0,0xBD, \ +0x9F,0x01,0x00,0x02,0x20,0x00,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x88,0x02,0x00, \ +0x02,0x86,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x80,0xB5,0x86,0xB0,0x42,0x68, \ +0x11,0x78,0x08,0x29,0x01,0xD0,0x06,0xB0,0x80,0xBD,0x91,0x7F,0xD3,0x7F,0x09, \ +0x02,0x19,0x43,0x15,0x4B,0x09,0x04,0x1F,0x88,0x09,0x0C,0xB9,0x42,0x02,0xD0, \ +0x5B,0x88,0x8B,0x42,0x06,0xD1,0xD1,0x1D,0x11,0x31,0x06,0x22,0x10,0x48,0xFC, \ +0xF7,0xA1,0xFE,0x19,0xE0,0x03,0x23,0x5B,0x02,0x99,0x42,0x06,0xDD,0xD1,0x1D, \ +0x11,0x31,0x06,0x22,0x0C,0x48,0xFC,0xF7,0x96,0xFE,0x0E,0xE0,0xD7,0x1D,0x01, \ +0x37,0x47,0x60,0x18,0x31,0x81,0x82,0x69,0x46,0x10,0x1C,0x18,0x22,0xFC,0xF7, \ +0x8B,0xFE,0x68,0x46,0x18,0x22,0x39,0x1C,0xFC,0xF7,0x86,0xFE,0xCE,0xE7,0x00, \ +0x00,0xCC,0x02,0x00,0x02,0xC6,0x02,0x00,0x02,0xC0,0x02,0x00,0x02,0xF0,0xB5, \ +0x00,0xF0,0xDF,0xF8,0x65,0x4F,0xFF,0x21,0xF8,0x1D,0x27,0x30,0x01,0x31,0x06, \ +0x22,0x04,0x1C,0x00,0xF0,0x43,0xF9,0x61,0x4D,0x12,0x22,0x03,0x21,0x28,0x1C, \ +0x00,0xF0,0x3D,0xF9,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41,0x31,0x00,0xF0,0x37, \ +0xF9,0xF8,0x1D,0x07,0x30,0x0E,0x22,0xFF,0x21,0x51,0x31,0x00,0xF0,0x30,0xF9, \ +0xF8,0x1D,0x2E,0x30,0x0E,0x22,0xFF,0x21,0x61,0x31,0x00,0xF0,0x29,0xF9,0xF8, \ +0x1D,0x3C,0x30,0x0E,0x22,0xFF,0x21,0x71,0x31,0x00,0xF0,0x22,0xF9,0xF8,0x1D, \ +0x15,0x30,0x0E,0x22,0xFF,0x21,0x11,0x31,0x00,0xF0,0x1B,0xF9,0xF8,0x1D,0x2D, \ +0x30,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0xF0,0x14,0xF9,0xF8,0x1D,0x4A,0x30, \ +0x07,0x22,0xFF,0x21,0x81,0x31,0x00,0xF0,0x0D,0xF9,0xF8,0x1D,0x51,0x30,0x03, \ +0x22,0xFF,0x21,0x89,0x31,0x00,0xF0,0x06,0xF9,0xF8,0x1D,0x55,0x30,0x04,0x22, \ +0xFF,0x21,0x8D,0x31,0x00,0xF0,0xFF,0xF8,0x00,0xF0,0xAE,0xF8,0xE8,0x1D,0x01, \ +0x30,0x04,0x22,0xF9,0x1D,0x23,0x31,0xFC,0xF7,0x25,0xFE,0xF8,0x1D,0x19,0x30, \ +0x80,0x7B,0xC0,0x07,0xC0,0x0F,0x00,0x25,0x00,0x28,0x10,0xD1,0x38,0x4A,0x15, \ +0x54,0x01,0x30,0x06,0x28,0xFB,0xD3,0x10,0x1C,0x06,0x22,0x21,0x1C,0xFC,0xF7, \ +0x03,0xFE,0x00,0x28,0x04,0xD0,0x20,0x1C,0x06,0x22,0x32,0x49,0xFC,0xF7,0x0C, \ +0xFE,0xF8,0x1D,0x29,0x30,0x00,0x79,0x10,0x28,0x0B,0xD0,0x20,0x28,0x09,0xD0, \ +0x31,0x28,0x07,0xD0,0x30,0x28,0x05,0xD0,0x32,0x28,0x03,0xD0,0x40,0x28,0x01, \ +0xD0,0x41,0x28,0x01,0xD1,0x29,0x49,0xC8,0x75,0xF8,0x1D,0x49,0x30,0x42,0x78, \ +0x01,0x27,0x27,0x49,0x55,0x2A,0x13,0xD1,0x82,0x78,0x53,0x2A,0x10,0xD1,0xC2, \ +0x78,0x42,0x2A,0x0D,0xD1,0x02,0x79,0x53,0x2A,0x0A,0xD1,0x42,0x79,0x55,0x2A, \ +0x07,0xD1,0x82,0x79,0x53,0x2A,0x04,0xD1,0xC2,0x79,0x50,0x2A,0x01,0xD1,0x0F, \ +0x70,0x00,0xE0,0x0D,0x70,0xFF,0x24,0x01,0x34,0x06,0x7A,0x01,0x22,0x52,0x02, \ +0x53,0x2E,0x19,0x4B,0x19,0x49,0x08,0xD1,0x46,0x7A,0x45,0x2E,0x05,0xD1,0x86, \ +0x7A,0x4C,0x2E,0x02,0xD1,0x0C,0x60,0x1A,0x60,0x01,0xE0,0x0A,0x60,0x1C,0x60, \ +0x03,0x7B,0x13,0x4A,0x14,0x49,0x48,0x2B,0x0C,0xD1,0x43,0x7B,0x57,0x2B,0x09, \ +0xD1,0x80,0x7B,0x33,0x28,0x06,0xD1,0x0F,0x75,0x4F,0x75,0x05,0x20,0x90,0x70, \ +0x20,0x20,0x88,0x61,0xF0,0xBD,0x0D,0x75,0x4D,0x75,0x02,0x20,0x90,0x70,0x40, \ +0x20,0x88,0x61,0xF0,0xBD,0x14,0x0A,0x00,0x02,0x0C,0x02,0x00,0x02,0x00,0x72, \ +0x01,0x02,0x74,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x85,0x01,0x00,0x02,0xA8, \ +0x01,0x00,0x02,0xA4,0x01,0x00,0x02,0x28,0x01,0x00,0x02,0xD0,0x02,0x00,0x02, \ +0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00,0x21,0x01,0x60,0x01,0x21,0x41, \ +0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69,0x01,0x23,0x5B,0x03,0x1A,0x43, \ +0xCA,0x61,0x04,0x49,0x01,0x63,0x04,0x49,0x41,0x63,0x81,0x63,0xC1,0x63,0x01, \ +0x69,0x80,0x68,0xF7,0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x01,0x02,0x00,0x02, \ +0x07,0x20,0x40,0x06,0xC1,0x69,0x02,0x4B,0x19,0x40,0xC1,0x61,0xF7,0x46,0x00, \ +0x00,0xFF,0xDF,0x00,0x00,0xF0,0xB5,0x0F,0x1C,0x00,0x21,0xF3,0x24,0x24,0x05, \ +0x00,0x28,0x08,0xD9,0x13,0x4D,0x6B,0x5C,0xE3,0x60,0x26,0x69,0xB3,0x08,0xFC, \ +0xD3,0x01,0x31,0x81,0x42,0xF7,0xD3,0xFF,0x23,0xE3,0x60,0xA0,0x68,0x21,0x1C, \ +0x08,0x69,0x40,0x08,0xFC,0xD3,0x88,0x68,0x00,0x20,0x00,0x2A,0x0D,0xD9,0x1C, \ +0x1C,0x0D,0x69,0xAB,0x08,0xFC,0xD3,0xCC,0x60,0x0B,0x69,0x5B,0x08,0xFC,0xD3, \ +0x8B,0x68,0x01,0x30,0x3B,0x70,0x01,0x37,0x90,0x42,0xF2,0xD3,0x02,0x20,0xFC, \ +0xF7,0xE6,0xFB,0xF0,0xBD,0x00,0x00,0xEC,0x02,0x00,0x02,0xF3,0x20,0x00,0x05, \ +0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0xFF,0x21,0xC1, \ +0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81,0x68,0x01,0x69,0x49,0x08,0xFC,0xD3, \ +0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46,0x90,0xB5,0x04,0x1C,0x48,0x09,0x08, \ +0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02,0x43,0x08,0x48,0x02,0x70,0x41,0x70, \ +0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2,0x02,0x20,0xFC,0xF7,0xBB,0xFB,0x02, \ +0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xA4,0xFF,0x90,0xBD,0x00,0x00,0xEC,0x02, \ +0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B,0x03,0x19,0x43,0xC1, \ +0x61,0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F,0x23,0x1B,0x04,0x99,0x43, \ +0x41,0x60,0x41,0x68,0x19,0x43,0x41,0x60,0xF7,0x46,0x00,0x00,0x12,0x4B,0x5B, \ +0x79,0x02,0x2B,0x0C,0xD1,0x13,0x1C,0x11,0x4A,0x11,0xD1,0x00,0x28,0x08,0xD1, \ +0x00,0x29,0x03,0xD1,0x90,0x79,0x4B,0x1F,0x18,0x40,0x90,0x71,0x00,0x20,0x50, \ +0x71,0xF7,0x46,0x90,0x79,0x04,0x23,0x18,0x43,0x90,0x71,0x01,0x20,0x50,0x71, \ +0xF7,0x46,0x00,0x28,0x04,0xD1,0x90,0x79,0x02,0x23,0x98,0x43,0x90,0x71,0xF7, \ +0x46,0x90,0x79,0x02,0x23,0x18,0x43,0x90,0x71,0xF7,0x46,0x00,0x00,0xC8,0x00, \ +0x00,0x02,0x80,0x06,0x00,0x02,0x90,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52, \ +0x09,0x0F,0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x01,0x28,0x01,0xD1, \ +0x05,0x49,0x08,0x70,0x05,0x4C,0x67,0x68,0xFC,0xF7,0xAA,0xFB,0x39,0x1A,0x49, \ +0x01,0x08,0x18,0x60,0x60,0x90,0xBD,0x00,0x00,0x9F,0x01,0x00,0x02,0x80,0x00, \ +0x00,0x04,0x90,0xB5,0x09,0x48,0x80,0x79,0x01,0x28,0x0D,0xD1,0x08,0x4F,0x7C, \ +0x68,0xFC,0xF7,0x97,0xFB,0x21,0x1A,0x49,0x09,0x08,0x18,0x78,0x60,0x07,0x20, \ +0x40,0x06,0xC1,0x69,0x10,0x23,0x19,0x43,0xC1,0x61,0x90,0xBD,0x80,0x06,0x00, \ +0x02,0x80,0x00,0x00,0x04,0x80,0xB5,0x0F,0x27,0x3F,0x06,0xB8,0x88,0x0B,0x4B, \ +0x18,0x40,0xB8,0x80,0xB8,0x89,0x0A,0x4B,0x18,0x40,0xB8,0x81,0x0A,0x48,0x01, \ +0x78,0x00,0x29,0x03,0xD0,0x00,0x21,0x01,0x70,0xFF,0xF7,0xD5,0xFF,0xB8,0x89, \ +0x06,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x05,0x4B,0x18,0x43,0xB8,0x80,0x80, \ +0xBD,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x9F,0x01,0x00,0x02,0x13,0x13, \ +0x00,0x00,0xE8,0xE8,0x00,0x00,0x80,0xB5,0x0F,0x27,0x3F,0x06,0xB8,0x88,0x09, \ +0x4B,0x18,0x40,0xB8,0x80,0xB8,0x89,0x08,0x4B,0x18,0x40,0xB8,0x81,0x01,0x20, \ +0xFF,0xF7,0x98,0xFF,0xB8,0x89,0x06,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x05, \ +0x4B,0x18,0x43,0xB8,0x80,0x80,0xBD,0x00,0x00,0x17,0x17,0xFF,0xFF,0xEC,0xEC, \ +0xFF,0xFF,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xF0,0xB4,0x13,0x4A,0x00, \ +0x27,0xD7,0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48, \ +0x07,0x70,0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05, \ +0xD2,0x5B,0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70, \ +0x01,0x31,0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01, \ +0x30,0x20,0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46, \ +0x84,0x03,0x00,0x02,0x3C,0x0B,0x00,0x02,0x3C,0x0C,0x00,0x02,0x90,0xB5,0x0A, \ +0x4F,0x0A,0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7, \ +0x08,0xF9,0x00,0xF0,0xDC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0x7E, \ +0xF9,0x00,0x20,0x38,0x60,0x00,0xF0,0xE3,0xFB,0x90,0xBD,0xF0,0x03,0x00,0x02, \ +0xF0,0xF0,0xF0,0xF0,0x60,0x04,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38, \ +0x60,0xFC,0xF7,0xF0,0xF8,0x00,0xF0,0xC4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD, \ +0x00,0x00,0xF0,0xF0,0xF0,0xF0,0xF0,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F, \ +0x00,0x2D,0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82, \ +0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40, \ +0x2D,0xE9,0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3, \ +0x04,0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D, \ +0xE9,0x84,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00, \ +0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D, \ +0xE9,0x4C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20, \ +0x83,0xE5,0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04, \ +0x00,0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9, \ +0x20,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E, \ +0xE2,0x01,0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0xF0,0x03, \ +0x00,0x02,0xE0,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00, \ +0xA0,0x00,0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52, \ +0xE3,0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00, \ +0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00, \ +0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93, \ +0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0, \ +0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E, \ +0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5, \ +0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21, \ +0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00, \ +0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00, \ +0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0, \ +0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x70,0x01, \ +0x00,0xEA,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x6D,0x01,0x00,0xEA,0x00, \ +0xA0,0x00,0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52, \ +0xE3,0x03,0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00, \ +0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02, \ +0x10,0x01,0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5, \ +0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30, \ +0x9F,0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01, \ +0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1, \ +0x08,0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21, \ +0xE1,0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20, \ +0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05, \ +0xF0,0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9, \ +0x40,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F, \ +0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20, \ +0x80,0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00, \ +0x00,0x81,0xE5,0x2F,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3, \ +0x00,0xF0,0x21,0xE1,0x2B,0x01,0x00,0xEA,0xF0,0x03,0x00,0x02,0xE0,0x03,0x00, \ +0x02,0xDC,0x03,0x00,0x02,0xE4,0x03,0x00,0x02,0x68,0x04,0x00,0x02,0x00,0x04, \ +0x00,0x02,0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C, \ +0x60,0xBC,0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29, \ +0xFB,0xD3,0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00, \ +0x22,0x00,0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A, \ +0x02,0x92,0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00, \ +0xF0,0x18,0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x64,0x04,0x00,0x02, \ +0xBC,0x0C,0x00,0x02,0xF9,0x46,0x00,0x00,0x3C,0x0D,0x00,0x02,0x53,0x79,0x73, \ +0x74,0x65,0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61, \ +0x64,0x00,0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A, \ +0xAE,0x4C,0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64, \ +0x00,0x21,0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA, \ +0x06,0xD2,0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61, \ +0x79,0x60,0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01, \ +0x20,0x90,0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7, \ +0x0C,0xC7,0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0xFB,0xFA,0xC0, \ +0x20,0x00,0xF0,0x32,0xFB,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D, \ +0x79,0x31,0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3, \ +0x1D,0x79,0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60, \ +0x8F,0x60,0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29, \ +0x68,0x01,0x31,0x29,0x60,0x00,0xF0,0x12,0xFB,0x00,0x2C,0x07,0xD0,0x38,0x1C, \ +0x00,0xF0,0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0, \ +0x20,0x00,0xF0,0x05,0xFB,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x00,0xFB, \ +0x00,0x20,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x55,0x48,0x00,0x00,0x95,0x48,0x00, \ +0x00,0x44,0x52,0x48,0x54,0xE8,0x03,0x00,0x02,0xEC,0x03,0x00,0x02,0x00,0x04, \ +0x00,0x02,0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12, \ +0xC0,0x12,0xC0,0xC0,0x20,0x00,0xF0,0xE5,0xFA,0x0C,0x49,0x0C,0x4B,0x39,0x60, \ +0x19,0x68,0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19, \ +0x68,0xB9,0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68, \ +0x01,0x32,0x0A,0x60,0x00,0xF0,0xCF,0xFA,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E, \ +0x44,0x56,0x44,0x94,0x04,0x00,0x02,0x98,0x04,0x00,0x02,0xF0,0xB5,0x85,0xB0, \ +0x07,0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0x00,0xF0,0xBE,0xFA,0xA9,0x08,0x03, \ +0xD3,0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60, \ +0x3C,0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35, \ +0xD1,0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C, \ +0x1E,0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3, \ +0x6F,0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43, \ +0xBA,0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0x00,0xF0,0x8D,0xFA,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30, \ +0x00,0xF0,0x34,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x90, \ +0xFA,0x00,0x28,0x01,0xD0,0x00,0xF0,0xF6,0xFA,0x30,0x1C,0x9B,0xE0,0x00,0xF0, \ +0x78,0xFA,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02, \ +0x93,0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0x00,0xF0,0x6B,0xFA,0xC0,0x20, \ +0x00,0xF0,0x68,0xFA,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03, \ +0x9C,0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C, \ +0x46,0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06, \ +0xD3,0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0, \ +0xA1,0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22, \ +0x6F,0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08, \ +0x03,0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02, \ +0xD1,0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91, \ +0x63,0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79, \ +0x61,0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C, \ +0x26,0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14, \ +0x1C,0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0x00,0xF0, \ +0x0F,0xFA,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00, \ +0x28,0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xB1,0xFA,0x00,0xE0,0xEC,0x64, \ +0xC0,0x20,0x00,0xF0,0xFE,0xF9,0x31,0x68,0x01,0x31,0x31,0x60,0x00,0xF0,0xF9, \ +0xF9,0x28,0x1C,0x00,0xF0,0x06,0xFA,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0x00,0xF0, \ +0xF1,0xF9,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0x00,0xF0,0xEB,0xF9,0x0C, \ +0x48,0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68, \ +0x00,0x28,0x01,0xD1,0x00,0xF0,0x59,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79, \ +0x69,0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7, \ +0x00,0x04,0x00,0x02,0xE0,0x03,0x00,0x02,0xE4,0x03,0x00,0x02,0xF0,0x03,0x00, \ +0x02,0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0x00,0xF0,0xC5,0xF9, \ +0x02,0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02, \ +0xE0,0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60, \ +0x02,0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07, \ +0x24,0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67, \ +0xE5,0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39, \ +0x69,0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69, \ +0x49,0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64, \ +0x67,0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63, \ +0x0E,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0x00,0xF0,0x82,0xF9,0x01, \ +0x23,0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x97,0xFA,0x20,0x1C, \ +0x00,0xF0,0xCE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0x00,0xF0,0x73,0xF9,0x20, \ +0x1C,0xF9,0xE7,0x00,0x00,0xE0,0x03,0x00,0x02,0xE9,0x4A,0x00,0x00,0x00,0x04, \ +0x00,0x02,0x00,0xB5,0xFF,0xF7,0xE7,0xFB,0xFF,0xF7,0xC1,0xFD,0x00,0xF0,0x9F, \ +0xFB,0x00,0xF0,0xA5,0xFB,0x00,0xF0,0x05,0xFA,0x00,0xF0,0xA9,0xFB,0x00,0xF0, \ +0xAF,0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0,0x21, \ +0xE1,0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC,0xFF, \ +0xFF,0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82,0xE2, \ +0x04,0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30,0x82, \ +0xE5,0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0,0x80, \ +0xFD,0x08,0xFF,0xDF,0xFD,0xE8,0xE4,0x03,0x00,0x02,0xE0,0x03,0x00,0x02,0x68, \ +0x04,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02,0xB0, \ +0xF0,0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0x00,0xF0,0x19,0xF9,0x4A, \ +0x4D,0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A,0x61, \ +0x29,0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68,0x91, \ +0x42,0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0x00,0xF0, \ +0x01,0xF9,0xC0,0x20,0x00,0xF0,0xFE,0xF8,0x01,0x99,0x00,0x29,0x5C,0xD0,0x01, \ +0x9C,0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A,0x61, \ +0x21,0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69,0x01, \ +0x91,0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04,0xE0, \ +0x27,0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61,0x24, \ +0x61,0x00,0xE0,0xA6,0x61,0x00,0xF0,0xD8,0xF8,0x00,0x2D,0x02,0xD0,0x38,0x1C, \ +0x00,0xF0,0xEE,0xFB,0xC0,0x20,0x00,0xF0,0xD0,0xF8,0xA2,0x69,0x69,0x46,0x8A, \ +0x42,0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39, \ +0x20,0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42,0x07, \ +0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89,0x00, \ +0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69,0x62, \ +0x61,0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1,0x61, \ +0x64,0x61,0x0C,0x60,0x00,0xF0,0xA4,0xF8,0xC0,0x20,0x00,0xF0,0xA1,0xF8,0x01, \ +0x99,0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E,0x4C, \ +0x03,0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A,0x11, \ +0x68,0x01,0x31,0x11,0x60,0x00,0xF0,0x8D,0xF8,0x20,0x68,0x00,0xF0,0xE0,0xF9, \ +0x6C,0xE7,0x00,0xF0,0x87,0xF8,0x69,0xE7,0x4D,0x49,0x54,0x41,0x78,0x04,0x00, \ +0x02,0x74,0x04,0x00,0x02,0x70,0x04,0x00,0x02,0x7C,0x04,0x00,0x02,0xE0,0x03, \ +0x00,0x02,0x00,0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28,0x0C, \ +0xD1,0xC0,0x20,0x00,0xF0,0x70,0xF8,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0x00,0xF0,0x6A,0xF8,0x38,0x1C,0x00,0xF0,0x77,0xF8,0x90,0xBD,0xC0,0x20,0x00, \ +0xF0,0x63,0xF8,0xBC,0x6E,0x00,0xF0,0x60,0xF8,0x00,0x2C,0xF6,0xD0,0x38,0x1C, \ +0x00,0xF0,0x75,0xFB,0x90,0xBD,0x00,0x04,0x00,0x02,0x80,0xB5,0x0C,0x4F,0x39, \ +0x68,0x88,0x6C,0x49,0x6C,0x00,0xF0,0x68,0xFB,0xC0,0x20,0x00,0xF0,0x4E,0xF8, \ +0x3A,0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68,0x01, \ +0x32,0x0A,0x60,0x00,0xF0,0x43,0xF8,0x38,0x68,0x00,0xF0,0x96,0xF9,0x80,0xBD, \ +0x00,0x00,0xE0,0x03,0x00,0x02,0x00,0x04,0x00,0x02,0x00,0xA3,0x18,0x47,0x10, \ +0x20,0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0,0xE3, \ +0x00,0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30,0xA0, \ +0xE3,0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14,0x30, \ +0x82,0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5,0x24, \ +0x30,0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90,0xE5, \ +0x30,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30,0x82, \ +0xE5,0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08,0x20, \ +0x80,0xE5,0x1E,0xFF,0x2F,0xE1,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1,0x3F, \ +0x20,0xA0,0xE3,0x02,0x10,0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21,0xE1, \ +0x02,0x00,0xC3,0xE1,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0, \ +0x20,0xFF,0xF7,0xEA,0xFF,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60, \ +0xBA,0x6B,0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02, \ +0x2A,0x37,0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B, \ +0x00,0x2A,0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9, \ +0x6A,0x1D,0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62, \ +0x57,0x62,0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17, \ +0x4A,0x3B,0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A, \ +0x02,0xD1,0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19, \ +0x60,0xD3,0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49, \ +0x12,0x6C,0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0xA2,0xFF,0x0B, \ +0x48,0x00,0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28, \ +0x00,0xD1,0x01,0x24,0x20,0x1C,0xF0,0xBD,0x00,0x04,0x00,0x02,0xE4,0x03,0x00, \ +0x02,0x3C,0x0C,0x00,0x02,0xF4,0x03,0x00,0x02,0xFC,0x03,0x00,0x02,0xF8,0x03, \ +0x00,0x02,0xE0,0x03,0x00,0x02,0xF0,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00, \ +0x00,0xA0,0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9, \ +0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93, \ +0xE5,0x28,0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40, \ +0xA0,0xE3,0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00, \ +0x40,0x82,0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x02,0xFF,0xFF,0xEA, \ +0xE0,0x03,0x00,0x02,0x68,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41, \ +0x60,0xF7,0x46,0x00,0x00,0x94,0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20, \ +0xFF,0xF7,0x4D,0xFF,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA, \ +0x42,0x04,0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69, \ +0x51,0x61,0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04, \ +0xD1,0x3A,0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7, \ +0x30,0xFF,0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF, \ +0xF7,0x28,0xFF,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68, \ +0x1C,0x4B,0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01, \ +0xD1,0x25,0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F, \ +0x7A,0x6F,0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10, \ +0xD1,0xFA,0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0xFF,0xF7,0x02,0xFF,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF, \ +0xF7,0x75,0xFF,0x01,0xE0,0xFF,0xF7,0xF8,0xFE,0x78,0x6E,0x00,0x28,0x04,0xD0, \ +0xF8,0x1D,0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF, \ +0xF7,0xEC,0xFE,0xFF,0xF7,0xEA,0xFE,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44, \ +0x00,0x04,0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0xDF,0xFE,0x39, \ +0x68,0x00,0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9, \ +0x1F,0x21,0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10, \ +0x4A,0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A, \ +0x89,0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A, \ +0x61,0x0A,0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61, \ +0x03,0xE0,0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0xB2,0xFE,0x00, \ +0x20,0x80,0xBD,0x78,0x04,0x00,0x02,0x74,0x04,0x00,0x02,0x70,0x04,0x00,0x02, \ +0xF0,0xB5,0x05,0x1C,0xC0,0x20,0xFF,0xF7,0xA5,0xFE,0x67,0x49,0x67,0x4C,0x0A, \ +0x68,0x67,0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26, \ +0xAE,0x63,0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29, \ +0x6A,0x6B,0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1, \ +0x2B,0x6A,0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3, \ +0x43,0x0B,0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68, \ +0x9B,0x00,0xD2,0x58,0x0A,0x60,0xFF,0xF7,0x78,0xFE,0x55,0x49,0x38,0x68,0x09, \ +0x68,0x88,0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26, \ +0x4E,0x4B,0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33, \ +0x40,0x13,0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43, \ +0x48,0x4E,0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14, \ +0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32, \ +0x0C,0xE0,0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10, \ +0x32,0x04,0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B, \ +0x1A,0x60,0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32, \ +0x68,0x36,0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A, \ +0x42,0xD0,0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x2B,0xFE,0xC0, \ +0x20,0xFF,0xF7,0x28,0xFE,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60, \ +0x2A,0x49,0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E, \ +0xE0,0x28,0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7, \ +0x13,0xFE,0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03, \ +0xD0,0x22,0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E, \ +0x03,0xD0,0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59, \ +0x5C,0x18,0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68, \ +0xB3,0x42,0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19, \ +0x60,0xFF,0xF7,0xEC,0xFD,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0, \ +0x20,0x68,0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0xE1,0xFD,0x0A,0x49,0x38, \ +0x68,0x09,0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7, \ +0x51,0xFE,0xF0,0xBD,0x00,0x04,0x00,0x02,0xF0,0x03,0x00,0x02,0xE0,0x03,0x00, \ +0x02,0x3C,0x0C,0x00,0x02,0xF8,0x03,0x00,0x02,0xE4,0x03,0x00,0x02,0xFC,0x03, \ +0x00,0x02,0xF4,0x03,0x00,0x02,0x3C,0x0B,0x00,0x02,0x02,0x48,0x00,0x21,0x01, \ +0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x9C,0x04,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xA4,0x04,0x00,0x02,0x02,0x48,0x00, \ +0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xAC,0x04,0x00,0x02,0x02,0x48, \ +0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0xB4,0x04,0x00,0x02,0x4B, \ +0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0xBD,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9, \ +0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91, \ +0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46, \ +0x00,0x00,0xB0,0xB5,0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75, \ +0x39,0x9C,0x00,0x0C,0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36,0x23, \ +0x00,0x29,0x01,0x66,0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0,0x51, \ +0x1E,0x41,0x66,0x00,0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x38,0x05, \ +0x00,0x02,0x80,0xB5,0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66,0x07, \ +0x4A,0x00,0x21,0x03,0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01,0x31, \ +0x58,0x43,0x05,0x4B,0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x38,0x05,0x00, \ +0x02,0xBC,0x04,0x00,0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0xCB,0x17, \ +0x59,0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C,0xB4,0x4B,0x08,0x02, \ +0x1C,0x02,0xD1,0x00,0xF0,0x64,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00,0x23, \ +0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42,0xF9, \ +0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0x0C,0xBC,0x5A,0x40, \ +0x50,0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0x43,0x1A,0x93,0x42,0x30, \ +0xD3,0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B,0x78,0x03,0x70, \ +0x40,0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1,0x10,0x3A,0x05, \ +0xD3,0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0,0xBC,0x0C,0x32, \ +0x0F,0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0,0x08,0xC9,0x03, \ +0x70,0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3,0x70,0x00,0x1D, \ +0x12,0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70,0x49,0x1C,0x40, \ +0x1C,0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B,0x43,0x13,0x43, \ +0x9B,0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1,0xF7,0x46,0x52, \ +0x1E,0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x00,0x47,0x08,0x47, \ +0x10,0x47,0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00, \ +0x00,0x2C,0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00, \ +0x8F,0xE2,0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0x6C, \ +0xEC,0xFF,0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62, \ +0x79,0x20,0x7A,0x65,0x72,0x6F,0x00,0x00,0xA0,0x05,0x00,0x02,0x78,0x47,0x00, \ +0x00,0x01,0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04, \ +0x00,0xE2,0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E, \ +0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1, \ +0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50, \ +0xE3,0x01,0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47, \ +0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01, \ +0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3, \ +0x00,0x00,0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E, \ +0xE3,0x00,0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77, \ +0x6E,0x20,0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42, \ +0x72,0x61,0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A, \ +0x65,0x72,0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E, \ +0x65,0x64,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00, \ +0x00,0x00,0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64, \ +0x20,0x53,0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F, \ +0x6E,0x00,0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63, \ +0x68,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61, \ +0x74,0x61,0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41, \ +0x64,0x64,0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F, \ +0x6E,0x00,0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C, \ +0x65,0x64,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00, \ +0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73, \ +0x74,0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00, \ +0x50,0x50,0x00,0x00,0x68,0x50,0x00,0x00,0x84,0x50,0x00,0x00,0xA4,0x50,0x00, \ +0x00,0xB8,0x50,0x00,0x00,0xC8,0x50,0x00,0x00,0xE0,0x50,0x00,0x00,0xF8,0x50, \ +0x00,0x00,0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0x04,0xEC,0xFF,0xEA,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5, \ +0x01,0x20,0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00, \ +0xEB,0x44,0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10, \ +0x81,0xE0,0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04, \ +0x30,0x90,0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1, \ +0x00,0x20,0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF, \ +0x3A,0x0E,0xF0,0xA0,0xE1,0x28,0x52,0x00,0x00,0x00,0x00,0x00,0x02,0xE0,0x05, \ +0x00,0x00,0xEC,0x07,0x00,0x00,0xE0,0x05,0x00,0x02,0x78,0x47,0x00,0x00,0xD3, \ +0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3, \ +0x04,0x10,0x80,0xE5,0x0C,0x10,0x80,0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90, \ +0xE5,0x00,0x00,0xA0,0xE3,0x10,0xFF,0x2F,0xE1,0x00,0x00,0xA0,0xE1,0x00,0x00, \ +0xA0,0xE1,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x14,0x00,0x0A,0x00,0x90,0x00,0x30,0x00,0x08,0x06,0x07,0x00,0x82,0x84,0x8B, \ +0x96,0x09,0x04,0x02,0x41,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x04, \ +0xAC,0x6C,0x32,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x64,0x00,0x30,0x75,0x64,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x04,0x03,0x00,0x04,0xAC,0x6C,0x32,0x70,0x55,0x4E,0x48,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00, \ +0x00,0x00,0x45,0x55,0x00,0x00,0x00,0x00,0x00,0xFA,0x00,0x00,0x00,0xFA,0x00, \ +0x00,0x2A,0x09,0x2A,0x09,0x07,0x00,0x3F,0x00,0x08,0x08,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x02,0x00,0x41,0x54,0x4D,0x45,0x4C,0x5F,0x41,0x50,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01, \ +0x00,0x05,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x5A, \ +0x02,0x8C,0x00,0x00,0x00,0x00,0x1E,0x1E,0x1E,0x1E,0x00,0x00,0x28,0x28,0x28, \ +0x00,0x00,0x32,0x3C,0x46,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x08,0x10,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x07,0xFF,0x07,0xFF,0x1F,0x00,0x06,0x00, \ +0x1E,0x00,0x20,0xFF,0x3F,0x01,0x01,0x01,0x0A,0x0A,0x0E,0x01,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01, \ +0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00, \ +0x01,0x01,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x00, \ +0x01,0x01,0x00,0x00,0x01,0x01,0x00,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01, \ +0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x12,0x01,0x10,0x01,0xFE,0x01,0x00,0x08,0xEB,0x03,0x05,0x76,0x00,0x01, \ +0x00,0x00,0x00,0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA,0x09,0x04, \ +0x00,0x00,0x02,0xFF,0x00,0xFF,0x00,0x07,0x05,0x85,0x02,0x40,0x00,0x00,0x07, \ +0x05,0x02,0x02,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0xAA,0xAA,0x03,0x00,0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81, \ +0xF3,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x58,0x00, \ +0x00,0x00,0xD8,0x05,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00, \ +0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00, \ +0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00, \ +0x00,0x00,0x04,0x00,0x00,0x00,0xF6,0x07,0x00,0x00,0xFB,0x07,0x00,0x00,0x00, \ +0x08,0x00,0x00,0x05,0x08,0x00,0x00,0x0A,0x08,0x00,0x00,0x0F,0x08,0x00,0x00, \ +0x14,0x08,0x00,0x00,0x19,0x08,0x00,0x00,0x1E,0x08,0x00,0x00,0x23,0x08,0x00, \ +0x00,0x28,0x08,0x00,0x00,0x2D,0x08,0x00,0x00,0x32,0x08,0x00,0x00,0x3E,0x08, \ +0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x63,0x29, \ +0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30,0x30,0x30,0x20,0x45,0x78,0x70,0x72, \ +0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69,0x63,0x20,0x49,0x6E,0x63,0x2E,0x20, \ +0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x58,0x20,0x54,0x48,0x55,0x4D,0x42, \ +0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56,0x65,0x72,0x73,0x69,0x6F,0x6E,0x20, \ +0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E,0x30,0x62,0x20,0x2A,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x47,0x2D,0x47,0x42,0x2D, \ +0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44,0x4C,0x2D,0x4B,0x4D,0x4C,0x2D,0x43, \ +0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D,0x4C,0x32,0x2D,0x47,0x5A,0x2D,0x4B, \ +0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50,0x2D,0x54,0x43,0x2D,0x4E,0x48,0x2D, \ +0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41,0x2D,0x47,0x46,0x2D,0x44,0x44,0x2D, \ +0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53,0x2D,0x44,0x57,0x2D,0x55,0x53,0x41, \ +0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53,0x44,0x53,0x55,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01, \ +0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09,0x8C,0xD3,0xD5,0xF5,0xD8,0x09,0x0A, \ +0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB,0x69,0x07,0xF7,0xBD,0x34,0x12,0x3D, \ +0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE,0x61,0xAE,0x8D,0x40,0xA7,0xE8,0xBD, \ +0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E,0xE6,0xBB,0xFF,0x7F,0xD5,0xF6,0x7A, \ +0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C,0x2E,0x9F,0xE1,0x05,0x57,0x5F,0x69, \ +0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50,0x94,0x0E,0x8B,0x72,0xAE,0x55,0xCC, \ +0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F,0x85,0x17,0x5C,0x22,0xD0,0xA3,0xFD, \ +0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82,0xDB,0xF1,0x9D,0xC5,0xFB,0xBC,0x89, \ +0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30,0x5C,0x50,0xCC,0xC9,0x5A,0xBC,0x9C, \ +0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39,0xD2,0x7B,0x15,0x75,0xFB,0x58,0xC1, \ +0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82,0xC5,0xC2,0xD6,0x69,0x05,0xB3,0x30, \ +0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4,0x1D,0x1F,0x15,0xE2,0x60,0x56,0xC5, \ +0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08,0x32,0x59,0x4A,0x4C,0xED,0x7B,0x5B, \ +0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51,0xFA,0x3D,0xFF,0xAC,0xB5,0x6C,0x09, \ +0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D,0x25,0x17,0x00,0x00,0x00,0x36,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_503RFMD_EXTERNAL { \ +0x80,0xB5,0x16,0x49,0x00,0x20,0x08,0x70,0x15,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0xFC,0xF7,0xEB,0xFD, \ +0x80,0xBD,0x08,0x21,0x0B,0x20,0xFC,0xF7,0xE6,0xFD,0x0F,0x27,0x3F,0x06,0xB8, \ +0x88,0x0D,0x4B,0x18,0x40,0xB8,0x80,0xB8,0x89,0x0C,0x4B,0x18,0x40,0xB8,0x81, \ +0x00,0xF0,0x02,0xFB,0xB8,0x89,0x0A,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x09, \ +0x4B,0x18,0x43,0xB8,0x80,0x01,0x21,0x0B,0x20,0xFC,0xF7,0xCE,0xFD,0x80,0xBD, \ +0x00,0x00,0x7B,0x01,0x00,0x02,0xA8,0x0A,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC, \ +0xEC,0xFF,0xFF,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0x80,0xB5,0x2D,0x48, \ +0x01,0x78,0x0A,0x29,0x4B,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00, \ +0x1C,0x47,0x05,0x09,0x0D,0x47,0x11,0x15,0x19,0x47,0x1D,0x81,0x78,0x26,0x4A, \ +0x89,0x18,0x16,0xE0,0x81,0x78,0x25,0x4A,0x89,0x18,0x12,0xE0,0x81,0x78,0x24, \ +0x4A,0x89,0x18,0x0E,0xE0,0x81,0x78,0x23,0x4A,0x89,0x18,0x0A,0xE0,0x81,0x78, \ +0x22,0x4A,0x89,0x18,0x06,0xE0,0x81,0x78,0x21,0x4A,0x89,0x18,0x02,0xE0,0x81, \ +0x78,0x20,0x4A,0x89,0x18,0x00,0x29,0x29,0xD0,0x0F,0x27,0x3F,0x06,0xBA,0x88, \ +0x1E,0x4B,0x1A,0x40,0xBA,0x80,0xBA,0x89,0x1D,0x4B,0x1A,0x40,0xBA,0x81,0x43, \ +0x78,0x00,0x22,0x00,0x2B,0x07,0xD9,0x83,0x18,0x1B,0x79,0x01,0x32,0x0B,0x70, \ +0x43,0x78,0x01,0x31,0x93,0x42,0xF7,0xD8,0x02,0xF0,0x20,0xFC,0xB8,0x89,0x15, \ +0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x14,0x4B,0x18,0x43,0xB8,0x80,0x01,0x21, \ +0x01,0x20,0xFC,0xF7,0x70,0xFD,0x80,0xBD,0x04,0x21,0x01,0x20,0xFC,0xF7,0x6B, \ +0xFD,0x80,0xBD,0x03,0x21,0x01,0x20,0xFC,0xF7,0x66,0xFD,0x80,0xBD,0x00,0x00, \ +0xB0,0x0A,0x00,0x02,0x1C,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0xD8,0x00,0x00, \ +0x02,0x98,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x30,0x01, \ +0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x13,0x13,0x00,0x00,0xE8, \ +0xE8,0x00,0x00,0xF0,0xB5,0x83,0xB0,0x63,0x49,0x0E,0x20,0x08,0x63,0x80,0x39, \ +0x62,0x4D,0x60,0x4C,0xCA,0x1D,0x68,0x7A,0xA9,0x32,0xCF,0x1D,0xE6,0x1D,0x19, \ +0x36,0x99,0x37,0x00,0x28,0x02,0x92,0x2F,0xD0,0xF0,0x7B,0x5D,0x49,0x00,0x28, \ +0x18,0xD0,0xF0,0x79,0x01,0x28,0x01,0xDB,0x0E,0x28,0x05,0xDD,0x03,0x21,0x03, \ +0x20,0xFC,0xF7,0x2D,0xFD,0x03,0xB0,0xF0,0xBD,0x00,0x20,0x00,0x22,0x0B,0x18, \ +0x9A,0x73,0x0A,0x54,0x01,0x30,0x00,0x04,0x00,0x0C,0x0E,0x28,0xF7,0xDB,0xFA, \ +0x72,0x01,0x22,0x3A,0x73,0x2A,0xE0,0xF0,0x79,0x41,0x18,0x49,0x7B,0x00,0x29, \ +0x0A,0xD1,0x01,0xF0,0xD0,0xFF,0x00,0x06,0x00,0x0E,0xF0,0x71,0x04,0xD1,0x03, \ +0x21,0x03,0x20,0xFC,0xF7,0x0E,0xFD,0xDF,0xE7,0x00,0x22,0x3A,0x73,0x17,0xE0, \ +0x02,0x98,0x46,0x49,0x40,0x79,0x40,0x00,0x08,0x5A,0xF1,0x79,0x01,0x22,0x01, \ +0x91,0x01,0x39,0x8A,0x40,0x10,0x40,0x0B,0xD1,0x01,0x98,0x01,0xF0,0xB5,0xFF, \ +0xF0,0x71,0xF0,0x79,0x00,0x28,0x04,0xD1,0x03,0x21,0x03,0x20,0xFC,0xF7,0xF3, \ +0xFC,0xC4,0xE7,0x0F,0x20,0x00,0x06,0x81,0x88,0x3A,0x4B,0x3C,0x4A,0x19,0x40, \ +0x81,0x80,0x81,0x89,0x38,0x4B,0x19,0x40,0x81,0x81,0x38,0x49,0x00,0x20,0x13, \ +0x5C,0x0E,0x18,0x01,0x30,0x00,0x04,0x00,0x0C,0x04,0x28,0x33,0x74,0xF7,0xDB, \ +0x35,0x48,0x35,0x4A,0x00,0x88,0x00,0x23,0x10,0x80,0xC8,0x1D,0x09,0x30,0x0E, \ +0x22,0x04,0x21,0x01,0xF0,0xBE,0xFC,0x31,0x49,0x01,0x20,0xCA,0x1D,0x69,0x32, \ +0xD0,0x71,0x60,0x31,0x00,0x91,0x04,0x20,0x48,0x73,0x02,0x98,0x00,0x22,0x02, \ +0x71,0x2C,0x4E,0x06,0x22,0x20,0x1C,0xF1,0x1D,0x35,0x31,0xFA,0xF7,0x43,0xFA, \ +0x20,0x22,0xA0,0x1D,0xF1,0x1D,0x15,0x31,0xFA,0xF7,0x3D,0xFA,0xE0,0x1D,0x19, \ +0x30,0x81,0x7B,0x25,0x4A,0x51,0x71,0x00,0x99,0x00,0x22,0x4A,0x72,0xAA,0x71, \ +0x81,0x79,0x20,0x23,0xB1,0x74,0xC0,0x79,0xF0,0x74,0x20,0x8D,0xB0,0x82,0x60, \ +0x8D,0xF0,0x82,0xA0,0x8D,0x30,0x83,0x1E,0x48,0x01,0x78,0x19,0x43,0x01,0x70, \ +0x01,0x20,0xF8,0x71,0x68,0x71,0xB8,0x79,0x01,0x28,0x02,0xD1,0x00,0x20,0x02, \ +0xF0,0x84,0xFE,0x00,0x20,0xB8,0x71,0x0F,0x20,0x00,0x06,0x81,0x89,0x16,0x4B, \ +0x19,0x43,0x81,0x81,0x81,0x88,0x15,0x4B,0x19,0x43,0x81,0x80,0x00,0x99,0x01, \ +0x20,0x08,0x72,0x08,0x21,0x03,0x20,0xFC,0xF7,0x85,0xFC,0x56,0xE7,0xB0,0x0A, \ +0x00,0x02,0x60,0x06,0x00,0x02,0xC8,0x00,0x00,0x02,0x30,0x01,0x00,0x02,0x8A, \ +0x01,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x18,0x00,0x00,0x02, \ +0xA0,0x01,0x00,0x02,0x88,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0xE0,0x05,0x00, \ +0x02,0xD8,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x7B,0x01,0x00,0x02,0x13,0x13, \ +0x00,0x00,0xE8,0xE8,0x00,0x00,0xF0,0xB5,0x44,0x4E,0x42,0x4D,0x70,0x7A,0xEC, \ +0x1D,0x19,0x34,0x00,0x28,0x42,0x4F,0x12,0xD0,0xF8,0x7A,0x00,0x28,0x04,0xD1, \ +0x03,0x21,0x04,0x20,0xFC,0xF7,0x52,0xFC,0xF0,0xBD,0xE0,0x79,0x3D,0x49,0x40, \ +0x18,0x40,0x7B,0x00,0x28,0x14,0xD1,0x03,0x21,0x04,0x20,0xFC,0xF7,0x47,0xFC, \ +0xF0,0xBD,0x39,0x48,0x3A,0x49,0x40,0x79,0x40,0x00,0x08,0x5A,0xE1,0x79,0x01, \ +0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x04,0xD1,0x03,0x21,0x04,0x20,0xFC,0xF7, \ +0x37,0xFC,0xF0,0xBD,0x0F,0x20,0x00,0x06,0x81,0x88,0x32,0x4B,0x19,0x40,0x81, \ +0x80,0x81,0x89,0x31,0x4B,0x19,0x40,0x81,0x81,0xA0,0x79,0x30,0x49,0x02,0x28, \ +0x02,0xD1,0x03,0x22,0xCA,0x71,0x03,0xE0,0x01,0x28,0x39,0xD1,0x04,0x22,0xCA, \ +0x71,0x2C,0x49,0x00,0x22,0x8A,0x70,0x2B,0x49,0x06,0x22,0x88,0x70,0x04,0x39, \ +0x28,0x1C,0xFA,0xF7,0x9B,0xF9,0x20,0x22,0xA8,0x1D,0x28,0x49,0xFA,0xF7,0x96, \ +0xF9,0xA0,0x7A,0x24,0x49,0x00,0x22,0x48,0x71,0x25,0x48,0x08,0x21,0x42,0x72, \ +0xB2,0x71,0x04,0x20,0xFC,0xF7,0x07,0xFC,0x01,0x22,0xFA,0x71,0x72,0x71,0xB8, \ +0x79,0x01,0x28,0x02,0xD1,0x00,0x20,0x02,0xF0,0xEA,0xFD,0x00,0x20,0xB8,0x71, \ +0x0F,0x20,0x00,0x06,0x81,0x89,0x1C,0x4B,0x19,0x43,0x81,0x81,0x81,0x88,0x1B, \ +0x4B,0x19,0x43,0x81,0x80,0xE0,0x79,0x02,0xF0,0x49,0xFD,0x28,0x8D,0x81,0x02, \ +0x05,0x20,0xFB,0xF7,0x22,0xFE,0xF0,0xBD,0x03,0x21,0x04,0x20,0xFC,0xF7,0xE5, \ +0xFB,0x0F,0x20,0x00,0x06,0x81,0x89,0x10,0x4B,0x19,0x43,0x81,0x81,0x81,0x88, \ +0x0F,0x4B,0x19,0x43,0x81,0x80,0xF0,0xBD,0xB0,0x0A,0x00,0x02,0xC8,0x00,0x00, \ +0x02,0x80,0x06,0x00,0x02,0x30,0x01,0x00,0x02,0x90,0x06,0x00,0x02,0x8A,0x01, \ +0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x50,0x06,0x00,0x02,0x1C, \ +0x01,0x00,0x02,0x18,0x01,0x00,0x02,0xF4,0x00,0x00,0x02,0x40,0x06,0x00,0x02, \ +0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xF0,0xB5,0x2B,0x48,0x10,0x23,0x01, \ +0x78,0x28,0x4D,0x99,0x43,0x01,0x70,0x01,0x78,0x20,0x23,0x99,0x43,0x01,0x70, \ +0x27,0x48,0x27,0x49,0x40,0x79,0x40,0x00,0x09,0x5A,0xEC,0x18,0xE0,0x79,0x01, \ +0x26,0x32,0x1C,0x47,0x1E,0xBA,0x40,0x11,0x40,0x04,0xD1,0x03,0x21,0x05,0x20, \ +0xFC,0xF7,0xA0,0xFB,0xF0,0xBD,0xA1,0x79,0x01,0x29,0x04,0xD0,0x03,0x21,0x05, \ +0x20,0xFC,0xF7,0x98,0xFB,0xF0,0xBD,0x02,0xF0,0xEF,0xFC,0x0F,0x27,0x3F,0x06, \ +0xB8,0x88,0x19,0x4B,0x18,0x40,0xB8,0x80,0xB8,0x89,0x18,0x4B,0x18,0x40,0xB8, \ +0x81,0x20,0x7A,0x17,0x4C,0x60,0x71,0x17,0x48,0xC1,0x1D,0x39,0x31,0x8E,0x70, \ +0xC1,0x1D,0x15,0x31,0xA8,0x1D,0x05,0x1C,0x62,0x79,0xFA,0xF7,0x01,0xF9,0x62, \ +0x79,0x12,0x4C,0x28,0x1C,0xE1,0x1D,0x0D,0x31,0xFA,0xF7,0xFA,0xF8,0x00,0x20, \ +0xE1,0x1D,0x29,0x31,0x88,0x71,0x00,0xF0,0x20,0xF8,0xB8,0x89,0x0D,0x4B,0x18, \ +0x43,0xB8,0x81,0xB8,0x88,0x0C,0x4B,0x18,0x43,0xB8,0x80,0xF0,0xBD,0x00,0x00, \ +0xB0,0x0A,0x00,0x02,0x7B,0x01,0x00,0x02,0x90,0x06,0x00,0x02,0x8A,0x01,0x00, \ +0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x1C,0x01,0x00,0x02,0xD8,0x00, \ +0x00,0x02,0x98,0x00,0x00,0x02,0x13,0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xF0, \ +0xB5,0xF9,0xF7,0xB7,0xFF,0xFE,0xF7,0x85,0xFC,0xF9,0xF7,0xB3,0xFF,0x2C,0x4F, \ +0x02,0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38,0x74,0x01,0x0A,0x79,0x74,0x01, \ +0x0C,0x00,0x0E,0xB9,0x74,0xF8,0x74,0x27,0x4E,0xF8,0x1D,0x07,0x30,0x06,0x22, \ +0xF1,0x1D,0x35,0x31,0xFA,0xF7,0xB9,0xF8,0x24,0x4C,0x01,0x25,0xF8,0x1D,0x29, \ +0x30,0x65,0x73,0x05,0x71,0x22,0x48,0xF9,0x1D,0x42,0x79,0xF0,0x1D,0x15,0x30, \ +0x0D,0x31,0xFA,0xF7,0xAB,0xF8,0x1F,0x48,0x1F,0x4A,0x00,0x21,0x53,0x5C,0x46, \ +0x18,0x01,0x31,0x04,0x29,0x33,0x74,0xF9,0xD3,0x1C,0x49,0x00,0x23,0x09,0x88, \ +0x39,0x80,0x02,0x7D,0x04,0x21,0x10,0x30,0x01,0xF0,0x01,0xFB,0x19,0x48,0x20, \ +0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01,0x70, \ +0x10,0x48,0x85,0x70,0xFB,0xF7,0x18,0xFD,0x39,0x88,0x89,0x02,0x09,0x1A,0x06, \ +0x20,0xFB,0xF7,0x3A,0xFD,0x10,0x48,0x05,0x21,0x70,0x30,0x65,0x72,0xC1,0x71, \ +0x0F,0x48,0x01,0x68,0x0F,0x48,0xC2,0x69,0x11,0x43,0xC1,0x61,0x0E,0x48,0x01, \ +0x21,0x05,0x70,0x05,0x20,0xFC,0xF7,0xF1,0xFA,0xF0,0xBD,0x98,0x00,0x00,0x02, \ +0xD8,0x00,0x00,0x02,0x40,0x06,0x00,0x02,0x1C,0x01,0x00,0x02,0x18,0x00,0x00, \ +0x02,0xA0,0x01,0x00,0x02,0x88,0x01,0x00,0x02,0x7B,0x01,0x00,0x02,0xE0,0x05, \ +0x00,0x02,0xE8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x02,0x00,0x02,0xF0, \ +0xB5,0x54,0x4F,0x54,0x4E,0xFC,0x1D,0x59,0x34,0xF8,0x1D,0xF1,0x1D,0x0D,0x31, \ +0x09,0x30,0x05,0x1C,0x22,0x79,0xFA,0xF7,0x4F,0xF8,0x22,0x79,0x4F,0x49,0x28, \ +0x1C,0xFA,0xF7,0x4A,0xF8,0x20,0x79,0x4E,0x49,0x48,0x71,0xB9,0x7B,0x4D,0x48, \ +0x00,0x29,0x03,0xD1,0x01,0x70,0xF1,0x72,0x41,0x70,0x08,0xE0,0x01,0x21,0x01, \ +0x70,0xF1,0x72,0xF9,0x7B,0xC2,0x1D,0x39,0x32,0x41,0x70,0xF9,0x78,0x11,0x70, \ +0x00,0x25,0x0D,0x20,0x68,0x43,0x44,0x49,0x0D,0x22,0x41,0x18,0xC0,0x19,0x30, \ +0x30,0x0C,0x31,0xFA,0xF7,0x2B,0xF8,0x01,0x35,0x04,0x2D,0xF2,0xD3,0x60,0x79, \ +0x00,0x28,0x03,0xD0,0x3C,0x48,0x01,0x21,0x41,0x72,0x02,0xE0,0x3A,0x49,0x00, \ +0x20,0x48,0x72,0x78,0x7B,0x3A,0x49,0x0E,0x28,0x02,0xDC,0x01,0x28,0x00,0xDB, \ +0x08,0x75,0xB8,0x78,0x37,0x4A,0x10,0x74,0x38,0x7B,0x01,0x28,0x02,0xD1,0x32, \ +0x4B,0xD8,0x70,0x02,0xE0,0x30,0x4B,0x00,0x20,0xD8,0x70,0xF8,0x88,0x10,0x81, \ +0xB8,0x88,0x50,0x81,0x38,0x78,0x2D,0x4A,0xD0,0x70,0xE0,0x88,0x2F,0x4A,0x30, \ +0x80,0x00,0x20,0x3B,0x18,0x1C,0x7A,0x0D,0x18,0x2C,0x74,0x1B,0x7A,0x13,0x54, \ +0x01,0x30,0x04,0x28,0xF6,0xD3,0x30,0x88,0x29,0x4A,0x00,0x23,0x10,0x80,0xC8, \ +0x1D,0x09,0x30,0x0F,0x1C,0x0E,0x22,0x04,0x21,0x01,0xF0,0x54,0xFA,0x00,0xF0, \ +0x00,0xF9,0x24,0x4C,0x25,0x49,0xE0,0x1D,0xA9,0x30,0x40,0x79,0x08,0x5C,0x38, \ +0x75,0x23,0x4F,0x38,0x68,0x02,0x28,0x28,0xD1,0x02,0xF0,0x4B,0xF9,0xF9,0xF7, \ +0xA0,0xFE,0x17,0x48,0x80,0x78,0x00,0x28,0x07,0xD0,0xFB,0xF7,0x61,0xFC,0x31, \ +0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0x83,0xFC,0x01,0x20,0xF9,0xF7, \ +0x42,0xFE,0x02,0xF0,0xEA,0xF8,0x01,0x20,0xF9,0xF7,0xB7,0xFE,0x01,0x20,0x80, \ +0x06,0x80,0x69,0xFE,0xF7,0x78,0xFB,0xFB,0xF7,0x42,0xFC,0xFB,0xF7,0x98,0xF8, \ +0xFE,0xF7,0x54,0xFB,0x80,0x06,0x80,0x0E,0xA0,0x62,0x01,0x20,0x38,0x60,0xF0, \ +0xBD,0x02,0xF0,0xD4,0xF8,0xF0,0xBD,0x00,0x00,0xB0,0x0A,0x00,0x02,0x98,0x00, \ +0x00,0x02,0xF4,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x30,0x00,0x00,0x02,0x18, \ +0x00,0x00,0x02,0xD8,0x00,0x00,0x02,0xA0,0x01,0x00,0x02,0x88,0x01,0x00,0x02, \ +0xE0,0x05,0x00,0x02,0x98,0x01,0x00,0x02,0xAC,0x01,0x00,0x02,0x80,0xB5,0x1D, \ +0x4A,0x1D,0x49,0x0A,0x60,0x1D,0x49,0x0F,0x68,0x0B,0x2F,0x23,0xD2,0x01,0xA3, \ +0xDB,0x5D,0x5B,0x00,0x9F,0x44,0x1F,0x05,0x0A,0x0D,0x10,0x12,0x15,0x18,0x1F, \ +0x1B,0x1E,0x00,0x06,0x23,0xFF,0x20,0x01,0x30,0xCB,0x60,0x14,0xE0,0xFF,0x20, \ +0x41,0x30,0x11,0xE0,0xFF,0x20,0x51,0x30,0x0E,0xE0,0x0B,0x20,0x0C,0xE0,0xFF, \ +0x20,0x31,0x30,0x09,0xE0,0xFF,0x20,0x11,0x30,0x06,0xE0,0xFF,0x20,0x61,0x30, \ +0x03,0xE0,0xFF,0x20,0x71,0x30,0x00,0xE0,0x00,0x20,0x01,0x23,0x8B,0x60,0xC9, \ +0x68,0x00,0xF0,0x4A,0xF9,0x04,0x21,0x0C,0x20,0xFC,0xF7,0xE1,0xF9,0x0F,0x20, \ +0x00,0x06,0x81,0x88,0x04,0x4B,0x19,0x43,0x81,0x80,0x80,0xBD,0x14,0x0A,0x00, \ +0x02,0xBC,0x02,0x00,0x02,0xD0,0x02,0x00,0x02,0x08,0x08,0x00,0x00,0xB0,0xB5, \ +0x0D,0x4D,0x00,0x24,0xE8,0x1D,0x59,0x30,0x0C,0x4F,0x04,0x72,0xF8,0x7C,0x02, \ +0xF0,0x22,0xFB,0xE8,0x1D,0xA9,0x30,0x04,0x71,0x38,0x8B,0x81,0x02,0x03,0x20, \ +0xFB,0xF7,0xF8,0xFB,0xB8,0x7C,0x00,0x28,0x03,0xD1,0x01,0x20,0xE8,0x73,0x00, \ +0x05,0xB0,0xBD,0x20,0x1C,0xB0,0xBD,0x00,0x00,0xE0,0x05,0x00,0x02,0xD8,0x00, \ +0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x59,0x32,0x91,0x72,0x01,0x21,0xC1, \ +0x73,0x10,0x20,0xF7,0x46,0x00,0x00,0xE0,0x05,0x00,0x02,0x03,0x48,0x00,0x21, \ +0x01,0x74,0x01,0x21,0xC1,0x73,0x08,0x07,0xF7,0x46,0x00,0x00,0xE0,0x05,0x00, \ +0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x59,0x32,0xD1,0x73,0x01,0x21,0xC1,0x73, \ +0x08,0x05,0xF7,0x46,0x00,0x00,0xE0,0x05,0x00,0x02,0x90,0xB5,0x04,0x20,0xFB, \ +0xF7,0xDE,0xFB,0x03,0x20,0xFB,0xF7,0xDB,0xFB,0x0E,0x48,0xC4,0x1D,0x69,0x34, \ +0xE1,0x79,0x01,0x29,0x12,0xD1,0x03,0x21,0xB0,0x30,0x0B,0x4F,0x01,0x71,0xF8, \ +0x7C,0x01,0xF0,0x36,0xFC,0xF8,0x74,0xF8,0x7C,0x08,0x49,0x00,0x28,0x07,0xD1, \ +0x08,0x72,0x02,0x20,0xE0,0x71,0x01,0x21,0x03,0x20,0xFC,0xF7,0x70,0xF9,0x90, \ +0xBD,0x01,0x20,0x08,0x72,0x90,0xBD,0xE0,0x05,0x00,0x02,0xD8,0x00,0x00,0x02, \ +0x40,0x06,0x00,0x02,0x10,0x49,0xC9,0x7D,0x31,0x29,0x16,0xD0,0x07,0xDC,0x10, \ +0x29,0x0F,0xD0,0x20,0x29,0x0F,0xD0,0x30,0x29,0x08,0xD1,0x02,0x20,0x06,0xE0, \ +0x32,0x29,0x0D,0xD0,0x40,0x29,0x0D,0xD0,0x41,0x29,0x00,0xD1,0x06,0x20,0x07, \ +0x49,0x48,0x71,0xF7,0x46,0x00,0x20,0xFA,0xE7,0x01,0x20,0xF8,0xE7,0x03,0x20, \ +0xF6,0xE7,0x04,0x20,0xF4,0xE7,0x05,0x20,0xF2,0xE7,0x00,0x00,0x18,0x00,0x00, \ +0x02,0x90,0x06,0x00,0x02,0xF0,0xB5,0x25,0x4E,0x01,0x20,0x31,0x78,0x01,0x29, \ +0x3E,0xD1,0x23,0x4F,0x24,0x49,0xF8,0x1D,0xA9,0x30,0x40,0x79,0x0F,0x25,0x2D, \ +0x06,0x09,0x5C,0x21,0x48,0x22,0x4B,0x01,0x75,0xA9,0x88,0x19,0x40,0xA9,0x80, \ +0xA9,0x89,0x20,0x4B,0x19,0x40,0xA9,0x81,0x1F,0x49,0x00,0x24,0x0A,0x78,0x02, \ +0x2A,0x07,0xD1,0x01,0x22,0x0A,0x70,0x00,0x7D,0x01,0x21,0x02,0xF0,0x81,0xF9, \ +0x78,0x74,0x0E,0xE0,0x07,0x20,0x40,0x06,0xC1,0x69,0x10,0x23,0x99,0x43,0xC1, \ +0x61,0x17,0x48,0x01,0x22,0x42,0x71,0x00,0x20,0x02,0xF0,0xFB,0xFA,0xF8,0x1D, \ +0x99,0x30,0x84,0x71,0x78,0x7C,0x01,0x28,0x00,0xD1,0x34,0x70,0x79,0x7C,0x01, \ +0x20,0x00,0x29,0x00,0xD1,0x05,0x20,0xA9,0x89,0x0E,0x4B,0x19,0x43,0xA9,0x81, \ +0xA9,0x88,0x0D,0x4B,0x19,0x43,0xA9,0x80,0x01,0x06,0x09,0x0E,0x06,0x20,0xFC, \ +0xF7,0xF6,0xF8,0xF0,0xBD,0x00,0x00,0xB0,0x01,0x00,0x02,0xE0,0x05,0x00,0x02, \ +0x98,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF, \ +0xFF,0xB2,0x01,0x00,0x02,0xC8,0x00,0x00,0x02,0x13,0x13,0x00,0x00,0xE8,0xE8, \ +0x00,0x00,0xB0,0xB5,0xF3,0x25,0x2D,0x05,0x07,0x1C,0xA8,0x68,0x06,0x20,0xE8, \ +0x60,0x0C,0x1C,0x28,0x69,0x80,0x08,0xFC,0xD3,0x0A,0x20,0xF9,0xF7,0xF4,0xFC, \ +0xA8,0x68,0x78,0x09,0x08,0x23,0x18,0x40,0x02,0x23,0x18,0x43,0xE8,0x60,0x28, \ +0x69,0x80,0x08,0xFC,0xD3,0x38,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08, \ +0xFC,0xD3,0xA8,0x68,0x20,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC, \ +0xD3,0xA8,0x68,0xB0,0xBD,0xF0,0xB5,0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFD,0xF7, \ +0x9B,0xF8,0x00,0x26,0x00,0x2F,0x10,0xD9,0xFD,0xF7,0xEE,0xF8,0x40,0x08,0xFB, \ +0xD2,0x28,0x20,0xF9,0xF7,0xCB,0xFC,0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xC5,0xFF, \ +0x28,0x20,0xF9,0xF7,0xC4,0xFC,0x01,0x36,0xBE,0x42,0xEE,0xD3,0xFD,0xF7,0xA3, \ +0xF8,0x00,0x20,0xF0,0xBD,0xF0,0xB5,0x84,0xB0,0x02,0x1C,0x48,0x4B,0x08,0x1C, \ +0x19,0x68,0x46,0x4F,0x00,0x29,0x74,0xD0,0x99,0x68,0x01,0x29,0x72,0xD1,0x00, \ +0x24,0x0F,0x21,0x09,0x06,0x8C,0x80,0x8C,0x81,0x0C,0x88,0xFE,0x1D,0x3C,0x36, \ +0xF5,0x1F,0x09,0x89,0x07,0x3D,0xEC,0x1F,0x12,0x3C,0x19,0x68,0xE3,0x1F,0x07, \ +0x3B,0x03,0x93,0x20,0x33,0x02,0x93,0x04,0x3B,0x01,0x93,0x0A,0x33,0x00,0x93, \ +0x0A,0x29,0x2F,0xD1,0x0B,0x22,0x04,0x20,0x01,0x99,0xFF,0xF7,0xB8,0xFF,0xFF, \ +0x22,0x06,0x20,0x01,0x32,0x02,0x99,0xFF,0xF7,0xB2,0xFF,0xFF,0x22,0x0E,0x20, \ +0x39,0x1C,0x41,0x32,0xFF,0xF7,0xAC,0xFF,0xFF,0x22,0x0E,0x20,0x51,0x32,0x03, \ +0x99,0xFF,0xF7,0xA6,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x1C,0x11,0x32,0xFF,0xF7, \ +0xA0,0xFF,0xFF,0x22,0x0E,0x20,0x29,0x1C,0x61,0x32,0xFF,0xF7,0x9A,0xFF,0xFF, \ +0x22,0x0E,0x20,0x31,0x1C,0x71,0x32,0xFF,0xF7,0x94,0xFF,0xFF,0x22,0x01,0x20, \ +0x31,0x32,0x00,0x99,0xFF,0xF7,0x8E,0xFF,0x02,0xE0,0x39,0x1C,0xFF,0xF7,0x8A, \ +0xFF,0xFD,0xF7,0x29,0xF8,0x06,0x22,0xFF,0x21,0x01,0x31,0x02,0x98,0xFD,0xF7, \ +0x90,0xF8,0x04,0x22,0x0B,0x21,0x01,0x98,0xFD,0xF7,0x8B,0xF8,0x0E,0x22,0xFF, \ +0x21,0x38,0x1C,0x41,0x31,0xFD,0xF7,0x85,0xF8,0x0E,0x22,0xFF,0x21,0x51,0x31, \ +0x03,0x98,0xFD,0xF7,0x7F,0xF8,0x0E,0x22,0xFF,0x21,0x28,0x1C,0x61,0x31,0xFD, \ +0xF7,0x79,0xF8,0x0E,0x22,0xFF,0x21,0x30,0x1C,0x71,0x31,0xFD,0xF7,0x73,0xF8, \ +0x01,0xE0,0x11,0xE0,0x10,0xE0,0x0E,0x22,0xFF,0x21,0x20,0x1C,0x11,0x31,0xFD, \ +0xF7,0x6A,0xF8,0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0x98,0xFD,0xF7,0x64,0xF8, \ +0xFD,0xF7,0x13,0xF8,0x03,0x4B,0x00,0x24,0x1C,0x60,0x04,0xB0,0xF0,0xBD,0x14, \ +0x0A,0x00,0x02,0xD0,0x02,0x00,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05,0xD1, \ +0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20,0x38, \ +0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49,0x00,0x68, \ +0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x50,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0B,0x4F, \ +0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03,0x2B,0x03, \ +0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x04,0x49, \ +0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3,0xE7,0x70, \ +0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05,0xD1, \ +0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20,0x38, \ +0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21,0x01,0x73, \ +0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1,0x00,0x29, \ +0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20,0x20,0x08, \ +0x73,0x00,0xBD,0xFC,0xF7,0xF9,0xFD,0x04,0x49,0x00,0x20,0x08,0x80,0x03,0x49, \ +0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x5C,0x02,0x00,0x02,0x5E, \ +0x02,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1,0x02,0x2A, \ +0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08,0x06,0x00, \ +0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7,0x00,0x27, \ +0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08,0xD0,0x80, \ +0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0,0x00,0x27, \ +0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00,0x21,0x07, \ +0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7,0x25,0x73, \ +0xD5,0xE7,0x70,0x03,0x00,0x0D,0x5E,0x02,0x00,0x02,0x5C,0x02,0x00,0x02,0x30, \ +0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1,0x00,0x2A, \ +0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09,0x0E,0x01, \ +0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80,0x01,0xE0, \ +0x05,0x49,0x08,0x80,0xFC,0xF7,0x97,0xFD,0x90,0xBD,0x27,0x73,0x90,0xBD,0x70, \ +0x03,0x00,0x0D,0x5E,0x02,0x00,0x02,0x5C,0x02,0x00,0x02,0x80,0xB4,0x0D,0x4F, \ +0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03, \ +0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x06,0x48, \ +0x01,0x68,0x06,0x48,0x01,0x73,0x00,0x21,0x01,0x73,0x38,0x7B,0x10,0x23,0x18, \ +0x43,0x38,0x73,0xF0,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x4C,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0x80,0xB5,0x20,0x4F,0x02,0x28,0x04,0xD1,0x0A,0x29,0x31, \ +0xD1,0x1E,0x48,0x38,0x60,0x2E,0xE0,0x04,0x28,0x02,0xD1,0x1D,0x48,0x38,0x60, \ +0x29,0xE0,0x05,0x28,0x02,0xD1,0x1B,0x48,0x38,0x60,0x24,0xE0,0x00,0x28,0x22, \ +0xD1,0x0A,0x29,0x1E,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x1A,0x05,0x08,0x0B,0x1A,0x0E,0x11,0x14,0x1A,0x17,0x14,0x48,0x38,0x60,0x13, \ +0xE0,0x13,0x48,0x38,0x60,0x10,0xE0,0x13,0x48,0x38,0x60,0x0D,0xE0,0x12,0x48, \ +0x38,0x60,0x0A,0xE0,0x12,0x48,0x38,0x60,0x07,0xE0,0x11,0x48,0x38,0x60,0x04, \ +0xE0,0x11,0x48,0x38,0x60,0x01,0xE0,0x10,0x48,0x38,0x60,0x38,0x68,0x10,0x49, \ +0x80,0x18,0x38,0x60,0x80,0x20,0x08,0x73,0x00,0xF0,0x5C,0xF8,0x80,0xBD,0x00, \ +0x00,0x80,0x02,0x00,0x02,0x14,0x0A,0x00,0x02,0xF4,0x02,0x00,0x02,0x34,0x0B, \ +0x00,0x02,0x1C,0x01,0x00,0x02,0x74,0x00,0x00,0x02,0xD8,0x00,0x00,0x02,0x98, \ +0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0x30,0x01,0x00,0x02, \ +0x28,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x00,0x22,0x02,0x28,0x17, \ +0x4B,0x10,0xD1,0x17,0x48,0x87,0x79,0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29, \ +0x07,0xD0,0x14,0x48,0xC7,0x60,0x0C,0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01, \ +0x60,0x82,0x60,0x80,0xBC,0xF7,0x46,0x06,0x28,0x0E,0xD1,0x0F,0x48,0x00,0x78, \ +0x01,0x28,0xF7,0xD1,0xFF,0x20,0x0D,0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0C, \ +0x49,0x01,0x20,0x08,0x71,0x0B,0x49,0x08,0x70,0xEC,0xE7,0x18,0x79,0x18,0x70, \ +0x5A,0x70,0x9A,0x70,0x18,0x78,0x0A,0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2, \ +0xE7,0x00,0x00,0xA8,0x0A,0x00,0x02,0x54,0x02,0x00,0x02,0xD0,0x02,0x00,0x02, \ +0x85,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x86,0x01,0x00,0x02,0x7B,0x01,0x00, \ +0x02,0xB0,0xB4,0x1B,0x4A,0x1B,0x48,0x11,0x68,0x07,0x68,0x1B,0x4B,0xB9,0x42, \ +0x12,0xD1,0x1A,0x7B,0x19,0x1C,0xD2,0x09,0x09,0xD2,0x00,0x68,0x40,0x07,0x03, \ +0xD0,0xE0,0x20,0x08,0x73,0xB0,0xBC,0xF7,0x46,0xD0,0x20,0x08,0x73,0xFA,0xE7, \ +0x08,0x7B,0x20,0x23,0x18,0x43,0x08,0x73,0xF5,0xE7,0x00,0x68,0x11,0x68,0x40, \ +0x1A,0x08,0x28,0x03,0xD9,0x08,0x20,0x0E,0x4F,0x0E,0x49,0x02,0xE0,0x00,0x28, \ +0xFA,0xD1,0x09,0xE0,0x0D,0x68,0x2C,0x78,0x01,0x35,0x0D,0x60,0x3C,0x73,0x14, \ +0x68,0x01,0x34,0x14,0x60,0x01,0x38,0xF5,0xD1,0x19,0x7B,0x18,0x1C,0x10,0x23, \ +0x19,0x43,0x01,0x73,0xD9,0xE7,0x00,0x00,0x78,0x02,0x00,0x02,0x7C,0x02,0x00, \ +0x02,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0x02,0x00,0x02,0x90,0xB5, \ +0x20,0x27,0x00,0x28,0x09,0x4C,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x01, \ +0xD0,0x27,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28,0x01,0xD1, \ +0x27,0x73,0x90,0xBD,0xFC,0xF7,0x7A,0xFC,0x90,0xBD,0x00,0x00,0x70,0x03,0x00, \ +0x0D,0x0D,0x48,0x01,0x2B,0x02,0xD1,0x20,0x21,0x01,0x73,0xF7,0x46,0x80,0x21, \ +0x01,0x73,0x0A,0x49,0x01,0x22,0x0A,0x73,0x00,0x22,0x0A,0x73,0x02,0x23,0x0B, \ +0x73,0x0A,0x73,0x07,0x4A,0x10,0x23,0x12,0x68,0x0A,0x73,0x06,0x4A,0x12,0x68, \ +0x0A,0x73,0x01,0x7B,0x19,0x43,0x01,0x73,0xF7,0x46,0x00,0x00,0x70,0x03,0x00, \ +0x0D,0x30,0x03,0x00,0x0D,0x94,0x02,0x00,0x02,0x98,0x02,0x00,0x02,0x00,0x21, \ +0x02,0x28,0x10,0xD1,0x08,0x1C,0x0B,0x49,0x04,0x22,0x08,0x71,0x0B,0x49,0x0A, \ +0x70,0x08,0x70,0x0A,0x4A,0x82,0x21,0x11,0x71,0x0A,0x49,0x08,0x60,0x0A,0x49, \ +0x08,0x60,0x0A,0x49,0x08,0x80,0xF7,0x46,0x85,0x28,0xFC,0xD1,0x08,0x4A,0x01, \ +0x20,0x10,0x60,0x08,0x48,0x01,0x80,0xF7,0x46,0x70,0x03,0x00,0x0D,0xC0,0x03, \ +0x00,0x0D,0xB0,0x03,0x00,0x0D,0x70,0x02,0x00,0x02,0x6C,0x02,0x00,0x02,0x5C, \ +0x02,0x00,0x02,0xA0,0x02,0x00,0x02,0x5E,0x02,0x00,0x02,0x90,0xB5,0x0F,0x1C, \ +0x19,0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A, \ +0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xB6,0xFD,0x90,0xBD,0x24,0x4B,0x98,0x42, \ +0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xCC,0xFD,0x90, \ +0xBD,0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C, \ +0x21,0x1C,0xFF,0xF7,0xDD,0xFD,0x90,0xBD,0xFF,0x23,0x0C,0x33,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xF0,0xFD,0x90,0xBD, \ +0x41,0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0x01,0xFE,0x90,0xBD,0x0F,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68, \ +0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x33,0xFE,0x90,0xBD,0x01,0x23,0xDB, \ +0x03,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7, \ +0x4A,0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08,0x73,0x90,0xBD,0x00,0x00,0x40, \ +0x02,0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81,0x00,0x00,0x03,0x02,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x10,0x49,0x09,0x78,0x01,0x29,0x1B,0xD1,0x40,0x08,0x19, \ +0xD3,0x0D,0x20,0x00,0x06,0x01,0x78,0x20,0x23,0x19,0x43,0x01,0x70,0x0B,0x48, \ +0x00,0x68,0xC1,0x43,0x0B,0x48,0xC2,0x69,0x11,0x40,0xC1,0x61,0x00,0x20,0x07, \ +0x21,0x49,0x06,0x7D,0x22,0x12,0x01,0x88,0x61,0x01,0x30,0x90,0x42,0xFC,0xD3, \ +0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xF7,0x46,0x00,0x00,0x85,0x01,0x00, \ +0x02,0xE8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0xB5,0x00,0x20,0x1C,0x49, \ +0x0F,0x27,0x3F,0x06,0x08,0x70,0xB8,0x80,0x39,0x88,0xB8,0x81,0x1A,0x4A,0x39, \ +0x89,0xD1,0x69,0xD1,0x04,0xCB,0x68,0xC9,0x6B,0x18,0x49,0x09,0x68,0x90,0x61, \ +0x17,0x49,0x02,0x20,0x88,0x61,0x17,0x48,0x01,0x7A,0x0C,0x30,0x08,0x29,0x19, \ +0xD2,0x01,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x15,0x03,0x06,0x15,0x09,0x0C, \ +0x0F,0x12,0x00,0xF0,0x20,0xFC,0x80,0xBD,0x00,0xF0,0x8D,0xF9,0x80,0xBD,0x00, \ +0xF0,0x2A,0xFA,0x80,0xBD,0x00,0xF0,0x1B,0xF8,0x80,0xBD,0x00,0xF0,0xCC,0xF8, \ +0x80,0xBD,0x00,0xF0,0x91,0xFA,0x80,0xBD,0x02,0x21,0x0A,0x20,0xFB,0xF7,0x0A, \ +0xFD,0x06,0x48,0xB8,0x80,0x80,0xBD,0x00,0x00,0x7B,0x01,0x00,0x02,0x80,0x00, \ +0x00,0x04,0x40,0x00,0x00,0x04,0x60,0x06,0x00,0x02,0xA8,0x0A,0x00,0x02,0x08, \ +0x08,0x00,0x00,0xF0,0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x78,0xFA, \ +0x00,0xA8,0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x4B,0x4C,0x03,0xDC,0x00, \ +0xA8,0x00,0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xE5,0xFC, \ +0xAC,0x80,0x02,0xB0,0xF0,0xBD,0x45,0x48,0x90,0x21,0x41,0x70,0x00,0xA9,0x89, \ +0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40,0x21,0xC1,0x70, \ +0x41,0x7C,0x00,0xAA,0x89,0x07,0x89,0x0F,0x41,0x74,0xD2,0x78,0x41,0x7C,0x92, \ +0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x01,0xA9,0x09,0x78,0x01,0x75, \ +0x01,0xA9,0x49,0x78,0x41,0x75,0x37,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x01, \ +0xA9,0xC9,0x78,0x41,0x77,0xFF,0x20,0x34,0x4F,0xF5,0x30,0x79,0x68,0xC9,0x0B, \ +0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF9,0xF7,0xCB,0xF8, \ +0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF9,0xF7,0xEA, \ +0xF8,0x00,0xA8,0x00,0x78,0x00,0x21,0x01,0xF0,0x00,0xFD,0x00,0x21,0x08,0x20, \ +0xF9,0xF7,0x96,0xF9,0x00,0x21,0x09,0x20,0xF9,0xF7,0x92,0xF9,0x00,0x21,0x0A, \ +0x20,0xF9,0xF7,0x8E,0xF9,0xF8,0x69,0x01,0x23,0x1B,0x03,0x18,0x43,0xF8,0x61, \ +0x00,0x20,0xF9,0xF7,0xFC,0xF8,0x0A,0x20,0xF9,0xF7,0xA7,0xF8,0x01,0x20,0x80, \ +0x06,0x46,0x61,0xC0,0x68,0x18,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3, \ +0x06,0x21,0x0A,0x20,0xFB,0xF7,0x78,0xFC,0xAC,0x80,0x91,0xE7,0x00,0x22,0xFF, \ +0x21,0x7D,0x20,0xC0,0x00,0xAC,0x80,0x00,0xF0,0xC1,0xFA,0x10,0x48,0x01,0x21, \ +0x89,0x06,0x88,0x63,0x0F,0x48,0x0F,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0x90, \ +0x61,0xB8,0x60,0x00,0x03,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0B,0x48, \ +0x01,0x21,0xA8,0x80,0x0A,0x20,0xFB,0xF7,0x59,0xFC,0x73,0xE7,0x08,0x08,0x00, \ +0x00,0xF4,0x09,0x00,0x02,0xE4,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x20,0x00,0x00,0x60,0x06,0x00,0x02,0x88, \ +0x88,0x00,0x00,0xF0,0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0xC4,0xF9, \ +0x00,0xA8,0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x4D,0x4C,0x03,0xDC,0x00, \ +0xA8,0x00,0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x31,0xFC, \ +0xAC,0x80,0x02,0xB0,0xF0,0xBD,0x47,0x48,0x90,0x21,0x41,0x70,0x00,0xA9,0x89, \ +0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40,0x21,0xC1,0x70, \ +0x41,0x7C,0x00,0xAA,0x89,0x07,0x89,0x0F,0x41,0x74,0xD2,0x78,0x41,0x7C,0x92, \ +0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x01,0xA9,0x09,0x78,0x01,0x75, \ +0x01,0xA9,0x49,0x78,0x41,0x75,0x39,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x01, \ +0xA9,0xC9,0x78,0x41,0x77,0xFF,0x20,0x36,0x4F,0xF5,0x30,0x79,0x68,0xC9,0x0B, \ +0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF9,0xF7,0x17,0xF8, \ +0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF9,0xF7,0x36, \ +0xF8,0x00,0xA8,0x00,0x78,0x00,0x21,0x01,0xF0,0x4C,0xFC,0x0B,0x21,0x08,0x20, \ +0xF9,0xF7,0xE2,0xF8,0xB7,0x21,0x09,0x20,0xF9,0xF7,0xDE,0xF8,0x00,0x21,0x0A, \ +0x20,0xF9,0xF7,0xDA,0xF8,0x14,0x20,0xF8,0xF7,0xFB,0xFF,0xF8,0x69,0x01,0x23, \ +0x1B,0x03,0x18,0x43,0xF8,0x61,0x00,0x20,0xF9,0xF7,0x45,0xF8,0x0A,0x20,0xF8, \ +0xF7,0xF0,0xFF,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x18,0x48,0x78,0x61, \ +0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFB,0xF7,0xC1,0xFB,0xAC, \ +0x80,0x8E,0xE7,0x00,0x22,0x55,0x21,0x7D,0x20,0xC0,0x00,0xAC,0x80,0x00,0xF0, \ +0x0A,0xFA,0x10,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x0F,0x48,0x10,0x4A,0x48, \ +0x63,0xAE,0x80,0x04,0x20,0x90,0x61,0xB8,0x60,0x00,0x03,0x78,0x60,0x48,0x6A, \ +0x0A,0x30,0x08,0x62,0x0B,0x48,0x01,0x21,0xA8,0x80,0x0A,0x20,0xFB,0xF7,0xA2, \ +0xFB,0x70,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0xF4,0x09,0x00,0x02,0xE4,0x02, \ +0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64, \ +0x20,0x00,0x00,0x60,0x06,0x00,0x02,0x88,0x88,0x00,0x00,0xF0,0xB5,0x82,0xB0, \ +0x69,0x46,0x08,0x22,0xF9,0xF7,0x0C,0xF9,0x00,0xA8,0x80,0x78,0x43,0x4C,0x80, \ +0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28,0x06,0xD0,0x03,0x21,0x0A,0x20, \ +0xFB,0xF7,0x7B,0xFB,0xBC,0x80,0x02,0xB0,0xF0,0xBD,0x00,0xA8,0x00,0x78,0x0E, \ +0x28,0x03,0xDC,0x00,0xA8,0x00,0x78,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20, \ +0xFB,0xF7,0x6C,0xFB,0xBC,0x80,0xEF,0xE7,0x08,0x21,0x0A,0x20,0xFB,0xF7,0x66, \ +0xFB,0x34,0x48,0x00,0x25,0x05,0x70,0x34,0x48,0x05,0x60,0x45,0x60,0x00,0x20, \ +0xF8,0xF7,0xD1,0xFF,0x00,0xA9,0x89,0x78,0x31,0x48,0x01,0x29,0x01,0xD1,0xC5, \ +0x70,0x01,0xE0,0x40,0x21,0xC1,0x70,0x01,0xA9,0x09,0x78,0x01,0x75,0x01,0xA9, \ +0x49,0x78,0x41,0x75,0x2B,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x01,0xA9,0xC9, \ +0x78,0x41,0x77,0xFF,0x20,0x28,0x4E,0xF5,0x30,0x71,0x68,0xC9,0x0B,0x03,0xD3, \ +0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80, \ +0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x55,0xFF,0x07,0x21, \ +0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF8,0xF7,0x74,0xFF,0x00, \ +0xA8,0x00,0x78,0x00,0x21,0x01,0xF0,0x8A,0xFB,0x00,0x28,0x05,0xD1,0x05,0x21, \ +0x0A,0x20,0xFB,0xF7,0x20,0xFB,0xBC,0x80,0xA3,0xE7,0x14,0x20,0xF8,0xF7,0x3D, \ +0xFF,0x00,0x20,0xF8,0xF7,0x8C,0xFF,0x70,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21, \ +0x0A,0x20,0xFB,0xF7,0x11,0xFB,0xBC,0x80,0x94,0xE7,0x20,0x20,0x41,0x05,0xB5, \ +0x60,0x48,0x61,0x0C,0x48,0x01,0x21,0x81,0x73,0x80,0x30,0x81,0x61,0xB8,0x88, \ +0x0A,0x4B,0x18,0x43,0xB8,0x80,0x0A,0x20,0xFB,0xF7,0xFF,0xFA,0x83,0xE7,0x08, \ +0x08,0x00,0x00,0x7B,0x01,0x00,0x02,0xF4,0x02,0x00,0x02,0xF4,0x09,0x00,0x02, \ +0xE4,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xE0,0x05,0x00,0x02,0x48,0x48,0x00, \ +0x00,0xF0,0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF9,0xF7,0x6C,0xF8,0x00,0xA8, \ +0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x2D,0x4F,0x03,0xDC,0x00,0xA8,0x00, \ +0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xD9,0xFA,0xAF,0x80, \ +0x02,0xB0,0xF0,0xBD,0x27,0x48,0x00,0x26,0x46,0x70,0x41,0x7C,0xFD,0x23,0x19, \ +0x40,0x41,0x74,0x25,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x01,0xA9,0xC9,0x78, \ +0x41,0x77,0xFF,0x20,0x22,0x4C,0xF5,0x30,0x61,0x68,0xC9,0x0B,0x03,0xD3,0x01, \ +0x1C,0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23, \ +0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0xD6,0xFE,0x07,0x21,0x49, \ +0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF8,0xF7,0xF5,0xFE,0xAE,0x80, \ +0x00,0xA8,0x00,0x78,0x00,0x21,0x01,0xF0,0x0A,0xFB,0x00,0x28,0x02,0xD1,0x11, \ +0x49,0x05,0x20,0x48,0x70,0x14,0x20,0xF8,0xF7,0xC0,0xFE,0x00,0x20,0xF8,0xF7, \ +0x0F,0xFF,0x60,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFB,0xF7,0x94, \ +0xFA,0xAF,0x80,0xB9,0xE7,0x01,0x20,0x80,0x06,0xA6,0x60,0x46,0x61,0x01,0x21, \ +0x0A,0x20,0xAF,0x80,0xFB,0xF7,0x89,0xFA,0xAF,0xE7,0x08,0x08,0x00,0x00,0xF4, \ +0x09,0x00,0x02,0xE4,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xA8,0x0A,0x00,0x02, \ +0xF0,0xB5,0x84,0xB0,0x69,0x46,0x10,0x22,0xF8,0xF7,0xFC,0xFF,0x02,0xA8,0x00, \ +0x78,0x0F,0x24,0x24,0x06,0x0E,0x28,0x59,0x4F,0x03,0xDC,0x02,0xA8,0x00,0x78, \ +0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0x69,0xFA,0xA7,0x80,0x04, \ +0xB0,0xF0,0xBD,0x02,0xA8,0x00,0x78,0x52,0x4E,0x00,0x25,0xB0,0x70,0x00,0xA8, \ +0x40,0x78,0x70,0x70,0x00,0xA8,0x00,0x78,0x30,0x70,0x00,0xA8,0x40,0x88,0xB0, \ +0x60,0x01,0x98,0xF0,0x60,0xF5,0x70,0x75,0x60,0x35,0x61,0x03,0x98,0x70,0x61, \ +0x00,0x20,0xF8,0xF7,0xC2,0xFE,0x72,0x78,0x40,0x21,0x47,0x48,0x00,0x2A,0x17, \ +0xD0,0x01,0x2A,0x17,0xD0,0x02,0x2A,0x18,0xD0,0x03,0x2A,0x01,0xD1,0x60,0x22, \ +0x42,0x70,0x42,0x7C,0x02,0xAB,0x92,0x07,0x92,0x0F,0x42,0x74,0x9B,0x78,0x42, \ +0x7C,0x9B,0x00,0x1A,0x43,0x42,0x74,0x02,0xAA,0x52,0x78,0x01,0x2A,0x08,0xD1, \ +0xC5,0x70,0x07,0xE0,0x45,0x70,0xED,0xE7,0x20,0x22,0x42,0x70,0xEA,0xE7,0x41, \ +0x70,0xE8,0xE7,0xC1,0x70,0x37,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x02,0xA9, \ +0xC9,0x78,0x41,0x77,0xFF,0x20,0xF5,0x30,0x33,0x49,0x49,0x68,0xC9,0x0B,0x03, \ +0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69, \ +0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x30,0xFE,0x07, \ +0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF8,0xF7,0x4F,0xFE, \ +0xB0,0x78,0x00,0x21,0x01,0xF0,0x66,0xFA,0x24,0x49,0xC8,0x69,0x8B,0x01,0x18, \ +0x43,0xC8,0x61,0x14,0x20,0xF8,0xF7,0x1C,0xFE,0x00,0x20,0xF8,0xF7,0x6B,0xFE, \ +0x0A,0x20,0xF8,0xF7,0x16,0xFE,0x01,0x20,0x80,0x06,0x45,0x61,0xC0,0x68,0x1B, \ +0x49,0x1C,0x48,0x48,0x61,0x48,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20, \ +0xFB,0xF7,0xE6,0xF9,0xA7,0x80,0x7B,0xE7,0xF0,0x68,0x00,0xF0,0xAB,0xF8,0x70, \ +0x60,0xF0,0x78,0xF8,0xF7,0x93,0xFE,0x70,0x68,0xFA,0xF7,0x82,0xF8,0xA7,0x80, \ +0x31,0x78,0xF0,0x68,0x00,0x22,0x00,0xF0,0x26,0xF8,0x0F,0x49,0xA5,0x80,0x03, \ +0x20,0x88,0x61,0x0B,0x49,0x22,0x20,0x88,0x60,0x08,0x05,0x41,0x6A,0x0C,0x4B, \ +0xC9,0x18,0x01,0x62,0x0B,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFB,0xF7,0xC2, \ +0xF9,0x58,0xE7,0x00,0x00,0x08,0x08,0x00,0x00,0x24,0x0B,0x00,0x02,0xF4,0x09, \ +0x00,0x02,0xE4,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00,0x00,0x60, \ +0x06,0x00,0x02,0x10,0x27,0x00,0x00,0x88,0x88,0x00,0x00,0xF0,0xB5,0x07,0x1C, \ +0x00,0x2A,0x0B,0xD1,0x00,0x20,0x00,0x2F,0x14,0x4A,0x06,0xD9,0x09,0x06,0x09, \ +0x0E,0x11,0x70,0x01,0x32,0x01,0x30,0xB8,0x42,0xFA,0xD3,0xF0,0xBD,0xF8,0xF7, \ +0x06,0xFE,0xFD,0xF7,0xD4,0xFA,0xFD,0xF7,0xB4,0xFA,0xBC,0x08,0x26,0x1C,0x0B, \ +0x4D,0x04,0xD0,0xFD,0xF7,0xAE,0xFA,0x01,0xC5,0x01,0x3C,0xFA,0xD1,0xB0,0x00, \ +0x3F,0x1A,0xFD,0xF7,0xA7,0xFA,0x69,0x1C,0x03,0x2F,0x28,0x70,0x02,0xD1,0x00, \ +0x0C,0x08,0x70,0xF0,0xBD,0x02,0x2F,0xE2,0xD1,0x00,0x0A,0x08,0x70,0xF0,0xBD, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x88,0xB4,0x01,0x20,0x80,0x06,0xC1,0x6B,0x00, \ +0xAB,0x19,0x80,0x19,0x49,0x1A,0x4A,0x89,0x69,0x1A,0x4F,0x03,0x29,0x1E,0xD1, \ +0x00,0xA9,0x09,0x88,0x20,0x23,0x0B,0x40,0x17,0x49,0x09,0xD0,0x87,0x63,0xCB, \ +0x68,0x16,0x4F,0x43,0x63,0x4B,0x78,0xFF,0x5C,0x11,0x23,0x9B,0x02,0x3B,0x43, \ +0x53,0x60,0x00,0xAA,0x12,0x88,0x92,0x08,0x16,0xD3,0x0A,0x69,0x01,0x32,0x0A, \ +0x61,0x4B,0x69,0x9A,0x42,0x10,0xD2,0x89,0x68,0x42,0x6A,0x89,0x18,0x01,0x62, \ +0x0B,0xE0,0x04,0x29,0x09,0xD1,0x00,0xA9,0x09,0x88,0xC9,0x08,0x05,0xD3,0x09, \ +0x49,0x87,0x63,0x41,0x63,0x01,0x20,0x80,0x03,0x50,0x60,0x88,0xBC,0xF7,0x46, \ +0x00,0x00,0x60,0x06,0x00,0x02,0x40,0x00,0x00,0x04,0x00,0x72,0x01,0x02,0x24, \ +0x0B,0x00,0x02,0x70,0x01,0x00,0x02,0x64,0x30,0x00,0x00,0xF0,0xB5,0x04,0x30, \ +0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0,0x02, \ +0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFD,0xF7,0x29,0xFA, \ +0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFD,0xF7,0x24,0xFA,0x07,0x1C,0x00,0x2D,0x18, \ +0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFD,0xF7,0x14,0xFA,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0xFD,0xF7,0x0F,0xFA,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD,0x24, \ +0x0B,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x69,0x46,0x08,0x22,0xF8,0xF7,0x7C,0xFE, \ +0x00,0xA8,0x00,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x4C,0x4C,0x03,0xDC,0x00, \ +0xA8,0x00,0x78,0x00,0x28,0x06,0xD1,0x03,0x21,0x0A,0x20,0xFB,0xF7,0xE9,0xF8, \ +0xAC,0x80,0x02,0xB0,0xF0,0xBD,0x46,0x48,0x90,0x21,0x41,0x70,0x00,0xA9,0x89, \ +0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40,0x21,0xC1,0x70, \ +0x41,0x7C,0x00,0xAA,0x89,0x07,0x89,0x0F,0x41,0x74,0xD2,0x78,0x41,0x7C,0x92, \ +0x00,0x11,0x43,0x41,0x74,0x01,0xA9,0x09,0x78,0x01,0x75,0x01,0xA9,0x49,0x78, \ +0x41,0x75,0x39,0x49,0x09,0x78,0x01,0x29,0x02,0xD1,0x01,0xA9,0xC9,0x78,0x41, \ +0x77,0xFF,0x20,0x36,0x4F,0xF5,0x30,0x79,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0xD1,0xFC,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0xF8,0xF7,0xF0,0xFC,0x00,0xA8,0x00, \ +0x78,0x00,0x21,0x01,0xF0,0x06,0xF9,0x0B,0x21,0x08,0x20,0xF8,0xF7,0x9C,0xFD, \ +0xB7,0x21,0x09,0x20,0xF8,0xF7,0x98,0xFD,0x00,0x21,0x0A,0x20,0xF8,0xF7,0x94, \ +0xFD,0x14,0x20,0xF8,0xF7,0xB5,0xFC,0xF8,0x69,0x01,0x23,0x1B,0x03,0x18,0x43, \ +0xF8,0x61,0x00,0x20,0xF8,0xF7,0xFF,0xFC,0x0A,0x20,0xF8,0xF7,0xAA,0xFC,0x01, \ +0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x18,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B, \ +0x05,0xD3,0x06,0x21,0x0A,0x20,0xFB,0xF7,0x7B,0xF8,0xAC,0x80,0x90,0xE7,0x01, \ +0x22,0x55,0x21,0x7D,0x20,0xC0,0x00,0xAC,0x80,0xFF,0xF7,0xC4,0xFE,0x10,0x48, \ +0x01,0x21,0x89,0x06,0x88,0x63,0x0F,0x48,0x10,0x4A,0x48,0x63,0xAE,0x80,0x04, \ +0x20,0xB8,0x60,0x90,0x61,0x08,0x0B,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62, \ +0x0B,0x48,0x01,0x21,0xA8,0x80,0x0A,0x20,0xFB,0xF7,0x5C,0xF8,0x72,0xE7,0x00, \ +0x00,0x08,0x08,0x00,0x00,0xF4,0x09,0x00,0x02,0xE4,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x04,0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x20,0x00,0x00,0x60, \ +0x06,0x00,0x02,0x88,0x88,0x00,0x00,0x80,0xB5,0x15,0x49,0x01,0x27,0x89,0x69, \ +0x01,0x29,0x13,0xD1,0x13,0x4B,0x18,0x40,0x0E,0xD0,0x88,0x06,0xC0,0x68,0x81, \ +0x09,0x0A,0xD3,0x04,0x21,0x01,0x40,0x10,0x48,0x03,0xD0,0x41,0x68,0x01,0x31, \ +0x41,0x60,0x02,0xE0,0x01,0x68,0x01,0x31,0x01,0x60,0x38,0x1C,0x80,0xBD,0x02, \ +0x29,0x01,0xD1,0x38,0x1C,0x80,0xBD,0x03,0x29,0x01,0xD0,0x04,0x29,0x06,0xD1, \ +0x07,0x4B,0x18,0x40,0x01,0xD0,0xFF,0xF7,0xA5,0xFE,0x38,0x1C,0x80,0xBD,0x00, \ +0x20,0x80,0xBD,0x00,0x00,0x60,0x06,0x00,0x02,0x40,0x40,0x00,0x00,0xF4,0x02, \ +0x00,0x02,0x80,0x80,0x00,0x00,0xFF,0xB5,0x84,0xB0,0x00,0x20,0x00,0x24,0x00, \ +0x26,0x00,0x27,0x00,0x25,0x03,0x90,0x02,0x90,0x01,0x90,0x68,0x46,0x04,0x22, \ +0x5B,0x49,0xFD,0xF7,0x7F,0xF9,0x05,0x99,0x00,0x20,0x00,0x29,0x1B,0xDD,0x04, \ +0x99,0x80,0x23,0x09,0x5C,0x0A,0x1C,0x9A,0x43,0x16,0x2A,0x02,0xD1,0x00,0xAB, \ +0xD9,0x70,0x0D,0xE0,0x0B,0x2A,0x02,0xD1,0x00,0xAB,0x99,0x70,0x08,0xE0,0x04, \ +0x2A,0x02,0xD1,0x00,0xAB,0x59,0x70,0x03,0xE0,0x02,0x2A,0x01,0xD1,0x00,0xAB, \ +0x19,0x70,0x05,0x99,0x01,0x30,0x88,0x42,0xE3,0xDB,0x00,0x20,0x69,0x46,0x09, \ +0x5C,0x00,0x29,0x0D,0xD0,0x09,0x0A,0x04,0xD3,0x00,0x2E,0x00,0xD1,0x07,0x1C, \ +0x01,0x26,0x04,0x1C,0x01,0x99,0x02,0x90,0x00,0x29,0x02,0xD1,0x01,0x21,0x01, \ +0x91,0x05,0x1C,0x01,0x30,0x04,0x28,0xEA,0xDB,0x01,0x99,0x00,0x20,0x00,0x29, \ +0x01,0xD1,0x08,0xB0,0xF0,0xBD,0x00,0x2E,0x01,0xD1,0x2C,0x1C,0x2F,0x1C,0x3B, \ +0x49,0x00,0x22,0x8B,0x18,0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x03,0x92,0x01,0x32, \ +0x04,0x2A,0xF7,0xDB,0x06,0x9B,0x01,0x26,0x0E,0x2B,0x35,0x4A,0x03,0xD1,0x35, \ +0x4B,0x1B,0x78,0x01,0x2B,0x0A,0xD1,0x03,0x98,0x84,0x42,0x02,0xDD,0x03,0x98, \ +0x90,0x72,0x00,0xE0,0x94,0x72,0x02,0x98,0xD0,0x72,0xD7,0x71,0x42,0xE0,0x2E, \ +0x4B,0x1B,0x78,0x00,0x2B,0x3E,0xD1,0x01,0x2D,0x10,0xD9,0xD0,0x71,0x96,0x72, \ +0xD6,0x72,0x07,0x9B,0x00,0x27,0x01,0x2B,0x35,0xD1,0x82,0x20,0x00,0xAB,0x18, \ +0x70,0x84,0x20,0x58,0x70,0x0B,0x20,0x98,0x70,0x16,0x20,0xD8,0x70,0x2B,0xE0, \ +0x01,0x2C,0x0D,0xDD,0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0x96,0x72,0x08, \ +0xE0,0x00,0xAC,0x24,0x78,0x23,0x0A,0x01,0xD3,0x90,0x72,0x02,0xE0,0x95,0x72, \ +0x00,0xE0,0x94,0x72,0x01,0x2F,0x0D,0xD9,0x00,0xAC,0x24,0x78,0x23,0x0A,0x01, \ +0xD3,0xD0,0x71,0x08,0xE0,0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0xD6,0x71, \ +0x02,0xE0,0xD5,0x71,0x00,0xE0,0xD7,0x71,0x02,0x9B,0x00,0x2B,0x05,0xDD,0x00, \ +0xAB,0x5B,0x78,0x00,0x2B,0x01,0xD0,0xD6,0x72,0x00,0xE0,0xD0,0x72,0x00,0x20, \ +0x6A,0x46,0x12,0x5C,0x0B,0x18,0x01,0x30,0x04,0x28,0x1A,0x74,0xF8,0xDB,0xC8, \ +0x19,0x01,0x7C,0x80,0x23,0x19,0x43,0x01,0x74,0x02,0x20,0xFA,0xF7,0x2A,0xF9, \ +0x05,0x49,0x48,0x70,0x30,0x1C,0x84,0xE7,0xB4,0x91,0x00,0x00,0x18,0x00,0x00, \ +0x02,0x1C,0x01,0x00,0x02,0xE5,0x02,0x00,0x02,0x2C,0x01,0x00,0x02,0xF0,0xB4, \ +0x44,0x78,0x00,0x26,0x05,0x2C,0x01,0xD8,0x00,0x2C,0x02,0xD1,0x30,0x1C,0xF0, \ +0xBC,0xF7,0x46,0x00,0x22,0x00,0x27,0x00,0x2C,0x17,0xD9,0xC3,0x19,0x9D,0x78, \ +0x6B,0x06,0x5B,0x0E,0x02,0x2B,0x08,0xD0,0x04,0x2B,0x06,0xD0,0x0B,0x2B,0x04, \ +0xD0,0x16,0x2B,0x02,0xD0,0x2C,0x2B,0x0B,0xD1,0x04,0xE0,0x2C,0x2B,0x02,0xD0, \ +0x13,0x1C,0xCD,0x54,0x01,0x32,0x01,0x37,0xA7,0x42,0xE8,0xD3,0x03,0xE0,0x00, \ +0x2B,0x01,0xD1,0x30,0x1C,0xDD,0xE7,0x10,0x1C,0xDB,0xE7,0xF1,0xB5,0x85,0xB0, \ +0x00,0x20,0x01,0x90,0x68,0x46,0x04,0x22,0x71,0x49,0xFD,0xF7,0x8B,0xF8,0x71, \ +0x4E,0x04,0x24,0x30,0x68,0x45,0x68,0x80,0x89,0x2F,0x28,0x02,0xDA,0x00,0x20, \ +0x06,0xB0,0xF0,0xBD,0x05,0x98,0x6C,0x49,0x01,0x28,0x04,0x91,0x09,0xD1,0x06, \ +0x22,0xE8,0x1D,0x09,0x30,0x04,0x99,0xF8,0xF7,0x70,0xFC,0x00,0x28,0x01,0xD0, \ +0x00,0x20,0xEE,0xE7,0x20,0x20,0xE9,0x1D,0x19,0x31,0x28,0x5C,0x49,0x78,0x09, \ +0x02,0x08,0x43,0x01,0x04,0x09,0x0C,0x02,0x91,0x14,0x29,0x04,0xDB,0x7D,0x23, \ +0x02,0x99,0xDB,0x00,0x99,0x42,0x01,0xDD,0x00,0x20,0xDB,0xE7,0x22,0x20,0x28, \ +0x5C,0x80,0x08,0x01,0xD2,0x00,0x20,0xD5,0xE7,0x30,0x68,0x24,0x27,0x80,0x89, \ +0x04,0x38,0x24,0x28,0x45,0xDD,0x57,0x48,0x03,0x90,0xE8,0x5D,0x00,0x28,0x09, \ +0xD0,0x01,0x28,0x20,0xD0,0x03,0x28,0x39,0xD1,0xE8,0x19,0x41,0x78,0x01,0x29, \ +0x27,0xD0,0x00,0x20,0xC0,0xE7,0xEE,0x19,0x70,0x78,0x00,0x28,0x00,0xD1,0xBB, \ +0xE7,0x4E,0x49,0x4A,0x79,0x82,0x42,0x01,0xD0,0x00,0x20,0xB5,0xE7,0x03,0x99, \ +0xB0,0x1C,0xF8,0xF7,0x2F,0xFC,0x00,0x28,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x70, \ +0x78,0xC0,0x19,0x87,0x1C,0x01,0x20,0x01,0x90,0x14,0xE0,0xE8,0x19,0x69,0x46, \ +0x06,0x1C,0xFF,0xF7,0x66,0xFF,0x04,0x1C,0x01,0xD1,0x00,0x20,0x9E,0xE7,0x70, \ +0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x3E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42, \ +0x01,0xD0,0x00,0x20,0x93,0xE7,0x03,0x37,0x36,0x4E,0x30,0x68,0x80,0x89,0x04, \ +0x38,0xB8,0x42,0xBE,0xDC,0x01,0x98,0x00,0x28,0x01,0xD1,0x00,0x20,0x87,0xE7, \ +0x35,0x49,0x68,0x46,0x01,0x23,0x0A,0x7D,0x21,0x1C,0xFF,0xF7,0x76,0xFE,0x00, \ +0x28,0x00,0xD1,0x7D,0xE7,0x05,0x20,0xFA,0xF7,0xD6,0xF8,0x2F,0x48,0x20,0x23, \ +0x01,0x78,0x2F,0x4F,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23,0x19,0x43,0x01, \ +0x70,0xF9,0x1D,0x07,0x31,0xE8,0x18,0x06,0x22,0x04,0x1C,0xF8,0xF7,0xF7,0xFB, \ +0x06,0x22,0x20,0x1C,0x04,0x99,0xF8,0xF7,0xF2,0xFB,0x22,0x4C,0xF9,0x1D,0x62, \ +0x79,0x03,0x98,0x0D,0x31,0xF8,0xF7,0xEB,0xFB,0x23,0x4E,0x01,0x20,0xFD,0x1D, \ +0x29,0x35,0x70,0x73,0x28,0x71,0x02,0x99,0x39,0x80,0xA0,0x70,0x05,0x98,0x01, \ +0x28,0x08,0xD1,0x00,0x21,0x00,0x20,0xF8,0xF7,0xC5,0xFA,0x14,0x49,0x00,0x20, \ +0x09,0x68,0x48,0x61,0x07,0xE0,0xFA,0xF7,0x60,0xF8,0x39,0x88,0x89,0x02,0x09, \ +0x1A,0x06,0x20,0xFA,0xF7,0x82,0xF8,0x00,0x20,0x15,0x49,0x70,0x72,0x05,0x20, \ +0x70,0x31,0xC8,0x71,0x05,0x98,0x01,0x28,0x04,0xD1,0x01,0x21,0x04,0x20,0xFA, \ +0xF7,0x3D,0xFE,0x01,0xE0,0x01,0x20,0xA8,0x71,0x0E,0x48,0x01,0x68,0x0E,0x48, \ +0xC2,0x69,0x11,0x43,0xC1,0x61,0x0D,0x49,0x01,0x20,0x08,0x70,0x26,0xE7,0xB8, \ +0x91,0x00,0x00,0x10,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0xF4,0x00,0x00,0x02, \ +0x1C,0x01,0x00,0x02,0x18,0x00,0x00,0x02,0x7B,0x01,0x00,0x02,0x98,0x00,0x00, \ +0x02,0x40,0x06,0x00,0x02,0xE0,0x05,0x00,0x02,0xE8,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x04,0x02,0x00,0x02,0xF0,0xB5,0x84,0xB0,0x5A,0x49,0x04,0x22,0x01, \ +0xA8,0xFC,0xF7,0x89,0xFF,0x59,0x4F,0x59,0x49,0x38,0x68,0x00,0x25,0x46,0x68, \ +0x06,0x22,0xF0,0x1D,0x09,0x30,0x03,0x91,0xF8,0xF7,0x78,0xFB,0x00,0x28,0x02, \ +0xD0,0x00,0x20,0x04,0xB0,0xF0,0xBD,0x39,0x68,0x38,0x1C,0x89,0x89,0x2F,0x29, \ +0x01,0xDA,0x00,0x20,0xF6,0xE7,0x20,0x22,0xF3,0x1D,0x19,0x33,0xB2,0x5C,0x5B, \ +0x78,0x1B,0x02,0x1A,0x43,0x12,0x04,0x12,0x0C,0x00,0x92,0x14,0x2A,0x04,0xDB, \ +0x7D,0x23,0x00,0x9A,0xDB,0x00,0x9A,0x42,0x01,0xDD,0x00,0x20,0xE3,0xE7,0x22, \ +0x22,0xB2,0x5C,0x52,0x08,0x01,0xD2,0x00,0x20,0xDD,0xE7,0x24,0x27,0x04,0x39, \ +0x24,0x29,0x34,0xDD,0xF0,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x11,0xD0,0x03, \ +0x28,0x2B,0xD1,0xF0,0x19,0x41,0x78,0x01,0x29,0x19,0xD0,0x00,0x20,0xCC,0xE7, \ +0xF0,0x19,0x40,0x78,0x20,0x28,0x01,0xD9,0x00,0x25,0x00,0xE0,0x01,0x25,0xC0, \ +0x19,0x87,0x1C,0x15,0xE0,0xF0,0x19,0x02,0x90,0x01,0xA9,0xFF,0xF7,0x79,0xFE, \ +0x04,0x1C,0x01,0xD1,0x00,0x20,0xB9,0xE7,0x02,0x98,0x40,0x78,0xC0,0x19,0x87, \ +0x1C,0x07,0xE0,0x2E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20, \ +0xAD,0xE7,0x03,0x37,0x28,0x48,0x00,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xCC, \ +0xDC,0x00,0x2D,0x01,0xD1,0x00,0x20,0xA2,0xE7,0x25,0x49,0x01,0x23,0x0A,0x7D, \ +0x21,0x1C,0x01,0xA8,0xFF,0xF7,0x89,0xFD,0x00,0x28,0x00,0xD1,0x98,0xE7,0x21, \ +0x4F,0x22,0x4C,0xF9,0x1D,0x07,0x31,0x06,0x22,0x03,0x98,0xF8,0xF7,0x16,0xFB, \ +0xE0,0x1D,0x15,0x30,0x20,0x22,0xF9,0x1D,0x0D,0x31,0xF8,0xF7,0x0F,0xFB,0xE0, \ +0x1D,0x39,0x30,0x81,0x78,0xF8,0x1D,0x29,0x30,0x01,0x71,0x19,0x49,0x02,0x79, \ +0xC8,0x1D,0x59,0x30,0x42,0x73,0x00,0x9A,0x20,0x23,0x3A,0x80,0x16,0x4A,0x17, \ +0x78,0x3B,0x43,0x13,0x70,0x17,0x78,0x10,0x23,0x3B,0x43,0x13,0x70,0x00,0x22, \ +0x42,0x72,0x12,0x48,0x82,0x70,0x05,0x20,0x70,0x31,0xC8,0x71,0xF9,0xF7,0xBE, \ +0xFF,0x01,0x21,0x04,0x20,0xFA,0xF7,0x68,0xFD,0x0D,0x48,0x01,0x68,0x0D,0x48, \ +0xC2,0x69,0x11,0x43,0xC1,0x61,0x0C,0x49,0x01,0x20,0x08,0x70,0x5C,0xE7,0xBC, \ +0x91,0x00,0x00,0x10,0x00,0x00,0x02,0x14,0x01,0x00,0x02,0x18,0x00,0x00,0x02, \ +0x98,0x00,0x00,0x02,0xD8,0x00,0x00,0x02,0xE0,0x05,0x00,0x02,0x7B,0x01,0x00, \ +0x02,0x1C,0x01,0x00,0x02,0xE8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x02, \ +0x00,0x02,0xF0,0xB4,0x17,0x4A,0x17,0x4B,0xD1,0x1D,0xA9,0x31,0x49,0x79,0x49, \ +0x00,0x5C,0x5A,0xD1,0x1D,0x79,0x31,0x0B,0x6B,0x01,0x3B,0x0B,0x63,0x00,0x2B, \ +0x1C,0xDD,0x12,0x4B,0x01,0x25,0x5F,0x7A,0xA0,0x32,0x01,0x30,0x0E,0x28,0x00, \ +0xD9,0x01,0x20,0x00,0x2F,0x05,0xD1,0x2B,0x1C,0x46,0x1E,0xB3,0x40,0x23,0x40, \ +0x0E,0xD1,0x07,0xE0,0x13,0x7B,0x00,0x2B,0x0A,0xD1,0x09,0x4B,0x1B,0x18,0x5B, \ +0x7B,0x00,0x2B,0x05,0xD1,0x0B,0x6B,0x01,0x3B,0x0B,0x63,0x00,0x2B,0xE6,0xDC, \ +0x00,0x20,0xF0,0xBC,0xF7,0x46,0x00,0x00,0xE0,0x05,0x00,0x02,0x8A,0x01,0x00, \ +0x02,0xC8,0x00,0x00,0x02,0x30,0x01,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C, \ +0x00,0x26,0x27,0x70,0xE1,0x1D,0x03,0x31,0x66,0x70,0x66,0x80,0x06,0x22,0x25, \ +0x48,0xF8,0xF7,0x85,0xFA,0x25,0x4D,0xE1,0x1D,0x09,0x31,0x06,0x22,0xE8,0x1D, \ +0x35,0x30,0xF8,0xF7,0x7D,0xFA,0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0, \ +0x71,0x20,0x72,0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0, \ +0x14,0xF9,0x00,0xF0,0x1C,0xF9,0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x37, \ +0xF9,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x4F,0xF9,0x2D,0x18,0x16,0x48,0x80,0x7D, \ +0x02,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x5F,0xF9,0x2D,0x18,0x28,0x1C,0x00, \ +0xF0,0x69,0xF9,0x28,0x18,0x00,0x1B,0x78,0x66,0x78,0x65,0xF0,0xBD,0x26,0x76, \ +0x0F,0x4E,0xE8,0x1D,0x72,0x79,0x15,0x30,0xE1,0x1D,0x13,0x31,0x62,0x76,0xF8, \ +0xF7,0x4A,0xFA,0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0,0x2F,0xF9,0x70,0x79, \ +0x20,0x30,0x00,0x06,0x00,0x0E,0x78,0x65,0xF0,0xBD,0x00,0x00,0x5C,0x08,0x00, \ +0x02,0x74,0x00,0x00,0x02,0xD8,0x00,0x00,0x02,0xE0,0x05,0x00,0x02,0x18,0x00, \ +0x00,0x02,0x1C,0x01,0x00,0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0x9C,0xFF,0x00, \ +0x26,0x80,0x2F,0x57,0x4D,0x58,0x4C,0x1A,0xD1,0x0F,0x20,0x00,0x06,0x81,0x88, \ +0x56,0x4B,0x19,0x40,0x81,0x80,0x81,0x89,0x55,0x4B,0x19,0x40,0xEA,0x1F,0x59, \ +0x3A,0x81,0x81,0x51,0x7B,0x03,0x29,0x08,0xD0,0x81,0x89,0xDB,0x43,0x19,0x43, \ +0x81,0x81,0x81,0x88,0x50,0x4B,0x19,0x43,0x81,0x80,0xF8,0xBD,0x01,0x20,0x50, \ +0x73,0x0F,0xE0,0x40,0x2F,0x04,0xD1,0x4C,0x4A,0x01,0x20,0x50,0x63,0x66,0x80, \ +0x08,0xE0,0x50,0x2F,0x06,0xD1,0x21,0x1D,0x06,0x22,0x49,0x48,0xF8,0xF7,0x00, \ +0xFA,0x01,0x20,0x28,0x73,0x47,0x48,0xF8,0xF7,0x50,0xFA,0x47,0x48,0x47,0x4C, \ +0x50,0x2F,0x08,0xD0,0x42,0x4A,0x51,0x6D,0xA1,0x60,0x51,0x6D,0xC0,0x79,0xF9, \ +0xF7,0xB4,0xFD,0xE0,0x60,0x0D,0xE0,0x3D,0x4A,0x51,0x6E,0xA1,0x60,0x51,0x6E, \ +0xC0,0x79,0xF9,0xF7,0xAB,0xFD,0x3F,0x49,0xE0,0x60,0x09,0x88,0xE0,0x68,0x40, \ +0x18,0x33,0x49,0x48,0x80,0x32,0x48,0x80,0x2F,0x60,0x60,0x1A,0xD1,0xFC,0xF7, \ +0x72,0xFD,0x80,0x06,0x33,0x49,0x80,0x0E,0x88,0x62,0x37,0x48,0x00,0x88,0x48, \ +0x61,0x89,0x6A,0x8B,0x00,0x59,0x18,0x89,0x00,0x09,0x18,0x08,0x20,0xF9,0xF7, \ +0x7F,0xFE,0x0F,0x20,0x00,0x06,0x81,0x89,0x31,0x4B,0x19,0x43,0x81,0x81,0x81, \ +0x88,0x27,0x4B,0x19,0x43,0x81,0x80,0xF9,0xF7,0x51,0xFA,0xF9,0xF7,0xED,0xFA, \ +0x00,0x90,0x80,0x2F,0x05,0xD1,0x00,0x98,0x00,0x28,0x38,0xD1,0x01,0x20,0x68, \ +0x72,0x35,0xE0,0x40,0x2F,0x33,0xD1,0x1F,0x48,0x0F,0x27,0x3F,0x06,0x46,0x63, \ +0xB9,0x88,0x1A,0x4B,0x19,0x40,0xB9,0x80,0xB9,0x89,0x19,0x4B,0x19,0x40,0xB9, \ +0x81,0xC1,0x1D,0x69,0x31,0xC9,0x79,0x01,0x29,0x1A,0xD1,0x00,0x99,0x00,0x29, \ +0x0B,0xD1,0xB0,0x30,0x01,0x79,0x00,0x29,0x07,0xD1,0x01,0x21,0x01,0x71,0x1A, \ +0x48,0xC0,0x8A,0x81,0x02,0x04,0x20,0xF9,0xF7,0x46,0xFE,0xF8,0xF7,0x76,0xF8, \ +0x17,0x49,0x08,0x60,0x00,0x9A,0x00,0x2A,0x02,0xD0,0x16,0x4B,0xC0,0x18,0x08, \ +0x60,0x01,0x20,0xE8,0x73,0xB8,0x89,0x10,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88, \ +0x06,0x4B,0x18,0x43,0xB8,0x80,0x6A,0xE7,0x00,0x00,0x40,0x06,0x00,0x02,0x5C, \ +0x08,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0xE8,0xE8,0x00,0x00, \ +0xE0,0x05,0x00,0x02,0x6A,0x01,0x00,0x02,0x72,0x08,0x00,0x02,0x1C,0x01,0x00, \ +0x02,0x60,0x06,0x00,0x02,0x64,0x01,0x00,0x02,0x60,0x01,0x00,0x02,0x13,0x13, \ +0x00,0x00,0xD8,0x00,0x00,0x02,0x08,0x02,0x00,0x02,0xF0,0xD8,0xFF,0xFF,0x03, \ +0x49,0x02,0x48,0x09,0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0x7C,0x08,0x00,0x02, \ +0x98,0x00,0x00,0x02,0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02,0x80,0xC9, \ +0x7A,0x00,0x29,0x03,0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80,0x08,0x49, \ +0x49,0x7A,0x01,0x29,0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01,0x80,0xF7, \ +0x46,0x01,0x88,0x02,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x7E,0x08,0x00,0x02, \ +0x98,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00,0x20,0x0A, \ +0x4A,0x08,0x70,0x53,0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18,0x3F,0x7D, \ +0x0C,0x18,0x01,0x30,0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50,0x79,0x48, \ +0x70,0x50,0x79,0x90,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x1C,0x01,0x00,0x02, \ +0x98,0x00,0x00,0x02,0x90,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x00,0x20,0x08, \ +0x4B,0x00,0x22,0x9F,0x18,0x3F,0x7C,0x00,0x2F,0x02,0xD0,0x0C,0x18,0xA7,0x70, \ +0x01,0x30,0x01,0x32,0x04,0x2A,0xF5,0xD3,0x48,0x70,0x90,0xBC,0x02,0x30,0xF7, \ +0x46,0x00,0x00,0x18,0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22,0x42,0x70, \ +0x01,0x30,0x80,0x18,0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7,0x46,0x00, \ +0x00,0x18,0x00,0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70,0x04,0x49, \ +0x02,0x30,0x0A,0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04,0x20,0xF7, \ +0x46,0x00,0x00,0x98,0x00,0x00,0x02,0x0A,0x21,0x01,0x70,0x02,0x21,0x41,0x70, \ +0x00,0x21,0x81,0x70,0x02,0x30,0x41,0x1C,0x07,0x20,0x08,0x70,0x04,0x20,0xF7, \ +0x46,0xF8,0xB5,0x1C,0x4F,0x48,0x20,0x38,0x70,0x1B,0x4C,0x01,0x25,0xE0,0x1D, \ +0x29,0x30,0x7D,0x70,0x40,0x79,0x02,0x28,0x01,0xD1,0x11,0x20,0x78,0x70,0x00, \ +0x26,0xF9,0x1D,0x03,0x31,0x06,0x22,0x7E,0x80,0x15,0x48,0xF8,0xF7,0xBE,0xF8, \ +0xE0,0x1D,0xF9,0x1D,0x09,0x31,0x07,0x30,0x06,0x22,0x04,0x1C,0xF8,0xF7,0xB6, \ +0xF8,0x06,0x22,0x20,0x1C,0x39,0x1D,0xF8,0xF7,0xB1,0xF8,0xF8,0x1D,0x0F,0x30, \ +0xF8,0xF7,0x02,0xF9,0x0C,0x4C,0x18,0x20,0xA0,0x60,0x0B,0x48,0x18,0x21,0xC0, \ +0x79,0xF9,0xF7,0x69,0xFC,0xE0,0x60,0x67,0x60,0x09,0x4F,0xFD,0x72,0xF9,0xF7, \ +0x31,0xF9,0xF9,0xF7,0xCD,0xF9,0x00,0x90,0xFE,0x72,0xF8,0xBD,0x5C,0x08,0x00, \ +0x02,0x98,0x00,0x00,0x02,0x74,0x00,0x00,0x02,0x60,0x06,0x00,0x02,0x1C,0x01, \ +0x00,0x02,0x40,0x06,0x00,0x02,0xF8,0xB5,0x00,0x26,0x87,0x1C,0x06,0x29,0x01, \ +0xD3,0x48,0x08,0x01,0xD3,0x00,0x20,0xF8,0xBD,0x00,0x23,0x03,0x22,0x00,0x25, \ +0xCC,0x1E,0x17,0xD0,0x01,0x39,0xB8,0x5C,0x91,0x42,0x02,0xD1,0x00,0x28,0x0F, \ +0xD1,0x0C,0xE0,0x0E,0x28,0x0C,0xD8,0x01,0x28,0x0A,0xD3,0xA8,0x42,0x08,0xD3, \ +0xBD,0x18,0x6D,0x78,0x03,0x32,0x03,0x33,0x2D,0x18,0x9C,0x42,0xEC,0xD8,0x01, \ +0x2E,0x01,0xD1,0x00,0x20,0xE1,0xE7,0x1A,0x48,0xC0,0x7A,0x01,0x28,0x00,0xD1, \ +0xDC,0xE7,0x19,0x48,0xC1,0x1D,0x29,0x31,0x49,0x7A,0x00,0x29,0x01,0xD1,0x01, \ +0x20,0xD4,0xE7,0xC1,0x1D,0x33,0x31,0x03,0x22,0x38,0x1C,0xF8,0xF7,0x55,0xF8, \ +0x00,0x22,0x03,0x21,0x00,0x2C,0x1B,0xD9,0x78,0x5C,0x00,0x28,0x18,0xD0,0x0F, \ +0x4D,0x01,0x26,0x2B,0x18,0x5E,0x73,0x7B,0x18,0x00,0x93,0x5B,0x78,0x1B,0x18, \ +0x98,0x42,0x0A,0xD2,0x0A,0x4D,0x01,0x26,0x2D,0x18,0x6E,0x73,0x00,0x9E,0x10, \ +0x3D,0xB6,0x78,0x01,0x30,0x98,0x42,0xEE,0x73,0xF4,0xD3,0x03,0x31,0x03,0x32, \ +0x94,0x42,0xE3,0xD8,0x01,0x20,0xAC,0xE7,0x00,0x00,0x80,0x06,0x00,0x02,0x98, \ +0x00,0x00,0x02,0x30,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x22,0x4F,0x01,0x9E, \ +0x3F,0x68,0x00,0x24,0xBF,0x89,0x00,0x21,0x24,0x20,0x3D,0x1F,0x00,0x95,0x24, \ +0x2D,0x39,0xD9,0x1E,0x4F,0x7F,0x7A,0x35,0x5C,0x03,0x2D,0x08,0xD0,0x07,0x2D, \ +0x0D,0xD1,0x35,0x18,0x6D,0x78,0x01,0x24,0x03,0x1C,0x02,0x35,0x28,0x18,0x0A, \ +0xE0,0x35,0x18,0x6D,0x78,0x01,0x21,0x02,0x1C,0x02,0x35,0x28,0x18,0x05,0xE0, \ +0x35,0x18,0x6D,0x78,0x02,0x35,0x28,0x18,0x00,0x29,0x01,0xD0,0x00,0x2F,0x02, \ +0xD0,0x00,0x9D,0x85,0x42,0xE1,0xD8,0x00,0x29,0x17,0xD0,0xB0,0x18,0x40,0x78, \ +0x01,0x28,0x01,0xD0,0x02,0xB0,0xF0,0xBD,0x01,0x2F,0x0F,0xD1,0x00,0x2C,0x0D, \ +0xD0,0x01,0x98,0xC0,0x18,0x41,0x78,0xFF,0xF7,0x60,0xFF,0x00,0x28,0x00,0xD1, \ +0xF1,0xE7,0x05,0x48,0xC1,0x7A,0x00,0x29,0x01,0xD1,0x01,0x21,0xC1,0x72,0xEA, \ +0xE7,0x10,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x80,0x06,0x00,0x02,0x00,0xB5, \ +0x05,0x49,0x89,0x7C,0x01,0x29,0x04,0xD1,0x01,0x78,0x80,0x29,0x01,0xD1,0xFF, \ +0xF7,0xA8,0xFF,0x00,0xBD,0x00,0x00,0xD8,0x00,0x00,0x02,0x90,0xB5,0x0F,0x4C, \ +0x60,0x7A,0x00,0x28,0x19,0xD0,0x0E,0x4F,0x38,0x68,0x40,0x68,0x42,0x7E,0x18, \ +0x30,0x00,0x2A,0x09,0xD0,0x0B,0x49,0x49,0x79,0x91,0x42,0x0E,0xD1,0x0A,0x49, \ +0x02,0x30,0xF7,0xF7,0xAC,0xFF,0x00,0x28,0x08,0xD1,0x38,0x68,0x08,0x49,0x40, \ +0x68,0x0A,0x30,0x06,0x22,0xF7,0xF7,0xB3,0xFF,0x01,0x20,0xA0,0x72,0x90,0xBD, \ +0x40,0x06,0x00,0x02,0x10,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0xAC,0x00,0x00, \ +0x02,0x6A,0x01,0x00,0x02,0xB0,0xB4,0x03,0x78,0x00,0x27,0x20,0x49,0x20,0x4A, \ +0x08,0x2B,0x37,0xD1,0xD3,0x78,0x00,0x2B,0x04,0xD0,0xD0,0x7A,0x09,0x68,0x88, \ +0x75,0xB0,0xBC,0xF7,0x46,0x00,0x79,0x40,0x08,0x03,0xD3,0x90,0x7A,0x09,0x68, \ +0x88,0x75,0xF6,0xE7,0x0B,0x68,0x99,0x7D,0xD2,0x7A,0x91,0x42,0x01,0xDD,0x9A, \ +0x75,0xEF,0xE7,0x15,0x4C,0x08,0x19,0x00,0x7C,0x00,0x28,0xEA,0xD1,0x08,0x1C, \ +0x01,0x29,0x0A,0xD3,0x01,0x38,0x25,0x18,0x2D,0x7C,0x00,0x2D,0x03,0xD1,0x01, \ +0x28,0xF8,0xD2,0x00,0x2F,0x01,0xD0,0x98,0x75,0xDC,0xE7,0x8A,0x42,0x06,0xD9, \ +0x01,0x31,0x60,0x18,0x00,0x7C,0x00,0x28,0x03,0xD1,0x8A,0x42,0xF8,0xD8,0x00, \ +0x2F,0x01,0xD0,0x99,0x75,0xCF,0xE7,0x9A,0x75,0xCD,0xE7,0xD0,0x79,0x09,0x68, \ +0x88,0x75,0xC9,0xE7,0x00,0x00,0x04,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x18, \ +0x00,0x00,0x02,0x00,0xB5,0x05,0x20,0xF9,0xF7,0x26,0xFC,0x09,0x48,0x00,0x21, \ +0xC2,0x79,0x03,0x2A,0x05,0xD1,0xC1,0x71,0x07,0x21,0x04,0x20,0xFA,0xF7,0xCA, \ +0xF9,0x00,0xBD,0xC2,0x79,0x04,0x2A,0xFB,0xD1,0xC1,0x71,0x07,0x21,0x04,0x20, \ +0xFA,0xF7,0xC1,0xF9,0x00,0xBD,0x50,0x06,0x00,0x02,0x90,0xB5,0x27,0x48,0x27, \ +0x49,0x00,0x68,0x47,0x68,0x22,0x20,0x38,0x5C,0x10,0x23,0x18,0x40,0x03,0xD0, \ +0x08,0x78,0x00,0x28,0x41,0xD0,0x02,0xE0,0x08,0x78,0x00,0x28,0x3D,0xD1,0x24, \ +0x20,0x38,0x5C,0x00,0x28,0x39,0xD1,0xF8,0x1D,0x1D,0x30,0x44,0x78,0x1D,0x49, \ +0x00,0x2C,0x02,0xD0,0x4A,0x79,0xA2,0x42,0x30,0xD1,0x4A,0x79,0x1B,0x49,0x02, \ +0x30,0xF7,0xF7,0x0F,0xFF,0x00,0x28,0x29,0xD1,0x38,0x19,0x20,0x30,0xC0,0x79, \ +0x00,0x19,0x28,0x30,0x39,0x5C,0x03,0x29,0x21,0xD1,0x38,0x18,0x14,0x49,0x80, \ +0x78,0x09,0x7D,0x88,0x42,0x1B,0xD1,0x13,0x48,0x40,0x7A,0x00,0x28,0x05,0xD0, \ +0x12,0x48,0x08,0x18,0x40,0x7B,0x00,0x28,0x12,0xD0,0x09,0xE0,0x10,0x48,0x10, \ +0x4A,0x40,0x79,0x40,0x00,0x10,0x5A,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40, \ +0x07,0xD0,0x01,0x20,0xF8,0xF7,0x9A,0xFC,0x00,0x28,0x02,0xD0,0x02,0x20,0xFF, \ +0xF7,0x55,0xFA,0x90,0xBD,0x10,0x00,0x00,0x02,0x30,0x00,0x00,0x02,0x1C,0x01, \ +0x00,0x02,0xF4,0x00,0x00,0x02,0x18,0x00,0x00,0x02,0xC8,0x00,0x00,0x02,0x30, \ +0x01,0x00,0x02,0x90,0x06,0x00,0x02,0x8A,0x01,0x00,0x02,0x80,0xB5,0xFD,0xF7, \ +0xF3,0xFF,0x1A,0x48,0xF7,0xF7,0x8C,0xFD,0x19,0x4B,0x1A,0x48,0x59,0x7A,0x01, \ +0x29,0x04,0xD1,0x48,0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x03,0xE0,0x90,0x21, \ +0x41,0x81,0x30,0x21,0x01,0x81,0x41,0x89,0x02,0x89,0x14,0x4F,0x89,0x18,0x12, \ +0x4A,0x11,0x80,0xC2,0x88,0x80,0x88,0x11,0x18,0x09,0x18,0x39,0x80,0x51,0x18, \ +0xFF,0x31,0x10,0x4A,0x31,0x31,0x11,0x80,0x19,0x88,0x10,0x4F,0x48,0x43,0x0E, \ +0x49,0x08,0x80,0xD8,0x79,0x0E,0x49,0x38,0x70,0x38,0x78,0x08,0x70,0xF7,0xF7, \ +0xB0,0xFD,0xF9,0xF7,0xA8,0xFA,0x39,0x78,0x0B,0x48,0x40,0x5C,0x0B,0x49,0x08, \ +0x70,0x80,0xBD,0x74,0x00,0x00,0x02,0x1C,0x01,0x00,0x02,0x18,0x00,0x00,0x02, \ +0x64,0x01,0x00,0x02,0x60,0x01,0x00,0x02,0x62,0x01,0x00,0x02,0x66,0x01,0x00, \ +0x02,0x68,0x01,0x00,0x02,0x69,0x01,0x00,0x02,0x70,0x01,0x00,0x02,0x84,0x01, \ +0x00,0x02,0x80,0xB4,0x21,0x48,0x00,0x21,0x01,0x70,0x00,0x20,0x19,0x27,0x1F, \ +0x4A,0xFF,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43,0x27, \ +0x1C,0x4A,0x7F,0x02,0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x1A,0x48,0x18, \ +0x4A,0x01,0x60,0x1A,0x48,0x1A,0x4B,0x02,0x60,0x13,0x60,0x02,0x68,0xD7,0x1D, \ +0x15,0x37,0x57,0x60,0x02,0x68,0x08,0x3F,0x97,0x60,0x02,0x68,0x11,0x73,0x02, \ +0x68,0x91,0x73,0x07,0x68,0x03,0x22,0xBA,0x75,0x02,0x68,0x91,0x82,0x00,0x68, \ +0x11,0x4A,0x10,0x60,0x11,0x48,0x0C,0x4A,0x01,0x60,0x11,0x48,0x02,0x60,0x13, \ +0x60,0x02,0x68,0xD3,0x1D,0x11,0x33,0x53,0x60,0x02,0x68,0x91,0x81,0x02,0x68, \ +0x11,0x72,0x00,0x68,0x0C,0x49,0x08,0x60,0x0C,0x49,0x01,0x20,0x08,0x70,0x80, \ +0xBC,0xF7,0x46,0x7B,0x01,0x00,0x02,0x00,0x11,0x00,0x02,0x00,0xDA,0x00,0x02, \ +0x00,0x00,0x00,0x02,0x04,0x00,0x00,0x02,0x00,0x00,0x00,0x80,0x08,0x00,0x00, \ +0x02,0x0C,0x00,0x00,0x02,0x10,0x00,0x00,0x02,0x14,0x00,0x00,0x02,0x87,0x01, \ +0x00,0x02,0xF0,0xB5,0x82,0xB0,0x39,0x49,0xCF,0x1D,0x99,0x37,0xB8,0x79,0x04, \ +0x23,0x18,0x40,0x40,0x24,0x00,0x26,0x00,0x28,0x03,0xD1,0x7E,0x72,0x3C,0x72, \ +0x02,0xB0,0xF0,0xBD,0x33,0x48,0xA4,0x22,0x02,0x70,0x10,0x25,0x45,0x70,0x79, \ +0x7A,0x00,0x29,0x01,0xD0,0x18,0x21,0x41,0x70,0x2F,0x49,0x03,0x23,0xC9,0x88, \ +0x9B,0x03,0x2C,0x48,0x19,0x43,0x41,0x80,0xC1,0x1D,0x03,0x31,0x06,0x22,0x2B, \ +0x48,0xF7,0xF7,0x11,0xFE,0x2B,0x49,0x2B,0x48,0x06,0x22,0xF7,0xF7,0x0C,0xFE, \ +0x28,0x1C,0x2A,0x4D,0x2A,0x49,0xA8,0x60,0xC8,0x79,0x10,0x21,0xF9,0xF7,0xC8, \ +0xF9,0xE8,0x60,0x21,0x48,0x68,0x60,0x01,0x25,0x3D,0x71,0xF8,0xF7,0x8F,0xFE, \ +0xF8,0xF7,0x2B,0xFF,0x3E,0x71,0x82,0x26,0x00,0x28,0x2A,0xD1,0x21,0x49,0x21, \ +0x48,0xC9,0x79,0x21,0x4A,0xC0,0x88,0x49,0x00,0x51,0x5A,0x04,0x22,0x40,0x18, \ +0x1F,0x49,0x09,0x88,0x3D,0x72,0x41,0x18,0x13,0x48,0x42,0x60,0x01,0x20,0xF9, \ +0xF7,0x99,0xFA,0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22,0x11,0x21,0x01,0xAB, \ +0x19,0x48,0xFB,0xF7,0x34,0xFD,0x01,0x98,0x41,0x08,0x01,0xD3,0x3C,0x72,0x12, \ +0xE0,0x40,0x09,0x10,0xD3,0x15,0x49,0x78,0x7A,0x09,0x7C,0x88,0x42,0x01,0xDA, \ +0x3E,0x72,0x09,0xE0,0x3C,0x72,0x07,0xE0,0x10,0x49,0x78,0x7A,0x09,0x7C,0x88, \ +0x42,0x01,0xDA,0x3E,0x72,0x00,0xE0,0x3C,0x72,0x98,0xE7,0x00,0x00,0xE0,0x05, \ +0x00,0x02,0x5C,0x08,0x00,0x02,0x98,0x00,0x00,0x02,0x74,0x00,0x00,0x02,0x60, \ +0x08,0x00,0x02,0xA6,0x00,0x00,0x02,0x60,0x06,0x00,0x02,0x1C,0x01,0x00,0x02, \ +0x18,0x00,0x00,0x02,0x50,0x01,0x00,0x02,0x66,0x01,0x00,0x02,0x3C,0x08,0x00, \ +0x02,0xD8,0x00,0x00,0x02,0x80,0xB5,0x0F,0x27,0x3F,0x06,0xB8,0x88,0x18,0x4B, \ +0x18,0x40,0xB8,0x80,0xB8,0x89,0x17,0x4B,0x18,0x40,0xB8,0x81,0x17,0x48,0x01, \ +0x68,0x01,0x31,0x01,0x60,0x16,0x48,0xFC,0xF7,0x16,0xF9,0x00,0x29,0x17,0xD1, \ +0x14,0x48,0x15,0x4A,0x03,0x78,0x15,0x49,0x00,0x2B,0x06,0xD1,0x09,0x68,0xD3, \ +0x69,0x19,0x43,0xD1,0x61,0x01,0x21,0x01,0x70,0x0A,0xE0,0x11,0x4B,0xDB,0x79, \ +0x05,0x2B,0x06,0xD0,0x09,0x68,0xD3,0x69,0xC9,0x43,0x19,0x40,0xD1,0x61,0x00, \ +0x21,0x01,0x70,0xB8,0x89,0x0C,0x4B,0x18,0x43,0xB8,0x81,0xB8,0x88,0x0B,0x4B, \ +0x18,0x43,0xB8,0x80,0x80,0xBD,0x00,0x00,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF, \ +0xFF,0xFC,0x02,0x00,0x02,0x20,0x4E,0x00,0x00,0x04,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0xE8,0x02,0x00,0x02,0x50,0x06,0x00,0x02,0x13,0x13,0x00,0x00,0xE8, \ +0xE8,0x00,0x00,0x90,0xB5,0x0F,0x24,0x24,0x06,0xA0,0x88,0x14,0x4B,0x18,0x40, \ +0xA0,0x80,0xA0,0x89,0x13,0x4B,0x18,0x40,0xA0,0x81,0x13,0x48,0x41,0x7A,0x00, \ +0x29,0x15,0xD1,0x81,0x7B,0x01,0x29,0x12,0xD1,0x01,0x7B,0x00,0x29,0x0F,0xD1, \ +0x00,0x27,0x0E,0x49,0x60,0x30,0x0F,0x70,0x47,0x72,0x00,0xF0,0xE6,0xF9,0x0C, \ +0x48,0x01,0x21,0x87,0x61,0x07,0x20,0xF9,0xF7,0xB8,0xFF,0x0A,0x49,0x01,0x20, \ +0x08,0x70,0xA0,0x89,0x09,0x4B,0x18,0x43,0xA0,0x81,0xA0,0x88,0x08,0x4B,0x18, \ +0x43,0xA0,0x80,0x90,0xBD,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0xE0,0x05, \ +0x00,0x02,0xB1,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0xB0,0x01,0x00,0x02,0x13, \ +0x13,0x00,0x00,0xE8,0xE8,0x00,0x00,0xB0,0xB5,0x0C,0x1C,0x07,0x1C,0x01,0x28, \ +0x01,0xD3,0x0E,0x2F,0x01,0xD9,0x00,0x20,0xB0,0xBD,0x01,0x20,0x1A,0x4D,0x80, \ +0x02,0xE8,0x61,0x1A,0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61,0x18,0x48, \ +0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61,0xE8,0x69,0x04,0x23,0x18,0x43,0xE8, \ +0x61,0x15,0x48,0xF7,0xF7,0xA0,0xFB,0x00,0xF0,0xC0,0xF8,0x01,0x2C,0x01,0xD1, \ +0x00,0xF0,0xEC,0xF9,0x00,0xF0,0x82,0xF8,0x10,0x48,0x00,0x78,0x01,0x28,0x0A, \ +0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40,0x23,0x98,0x43,0xE8,0x61,0x03,0xE0, \ +0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x38,0x1C,0x00,0xF0,0x12,0xF8,0xE8, \ +0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61,0x01,0x20,0xB0,0xBD,0x00,0x00, \ +0x40,0x00,0x00,0x04,0xA4,0x01,0x00,0x02,0xA8,0x01,0x00,0x02,0xDC,0x05,0x00, \ +0x00,0xE5,0x02,0x00,0x02,0x90,0xB5,0x07,0x1C,0x07,0x20,0x40,0x06,0x81,0x69, \ +0x04,0x23,0x19,0x43,0x81,0x61,0xFA,0xF7,0x4E,0xFF,0x0A,0x20,0xF7,0xF7,0x67, \ +0xFB,0x17,0x4C,0x02,0x20,0x61,0x68,0x00,0xF0,0x30,0xF8,0x00,0x20,0x21,0x68, \ +0x00,0xF0,0x2C,0xF8,0x13,0x48,0xBF,0x00,0x38,0x18,0x40,0x38,0xC1,0x6B,0x01, \ +0x20,0x00,0xF0,0x24,0xF8,0x05,0x20,0xE1,0x68,0x00,0xF0,0x20,0xF8,0x08,0x20, \ +0xA1,0x68,0x00,0xF0,0x1C,0xF8,0x07,0x20,0x21,0x69,0x00,0xF0,0x18,0xF8,0x0A, \ +0x48,0x38,0x18,0x40,0x38,0xC1,0x6B,0x04,0x20,0x00,0xF0,0x11,0xF8,0xFF,0x20, \ +0xF5,0x30,0xF7,0xF7,0x3F,0xFB,0xFA,0xF7,0x8B,0xFF,0x0A,0x20,0xF7,0xF7,0x3A, \ +0xFB,0x90,0xBD,0x00,0x00,0x00,0x03,0x00,0x02,0x14,0x03,0x00,0x02,0x4C,0x03, \ +0x00,0x02,0x90,0xB4,0x0B,0x4A,0x13,0x68,0xDF,0x43,0x0A,0x4B,0xDC,0x69,0x27, \ +0x40,0xDF,0x61,0x07,0x05,0x89,0x00,0x39,0x43,0x80,0x08,0x08,0x43,0x18,0x62, \ +0x18,0x1C,0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x11,0x68,0xC2,0x69,0x11,0x43,0xC1, \ +0x61,0x90,0xBC,0xF7,0x46,0xA8,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0xB5, \ +0x19,0x4F,0x00,0x20,0x39,0x78,0xF7,0xF7,0xEC,0xFB,0x79,0x78,0x01,0x20,0xF7, \ +0xF7,0xE8,0xFB,0xB9,0x78,0x02,0x20,0xF7,0xF7,0xE4,0xFB,0xF9,0x78,0x03,0x20, \ +0xF7,0xF7,0xE0,0xFB,0x79,0x7C,0x11,0x20,0xF7,0xF7,0xDC,0xFB,0x39,0x7D,0x14, \ +0x20,0xF7,0xF7,0xD8,0xFB,0x79,0x7D,0x15,0x20,0xF7,0xF7,0xD4,0xFB,0x39,0x7F, \ +0x1C,0x20,0xF7,0xF7,0xD0,0xFB,0xB9,0x7C,0x12,0x20,0xF7,0xF7,0xCC,0xFB,0xF9, \ +0x7C,0x13,0x20,0xF7,0xF7,0xC8,0xFB,0x05,0x48,0x00,0x78,0x01,0x28,0x03,0xD1, \ +0x79,0x7F,0x1D,0x20,0xF7,0xF7,0xC0,0xFB,0x80,0xBD,0x00,0x00,0xF4,0x09,0x00, \ +0x02,0xE4,0x02,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F,0x06,0xB8,0x69,0x40,0x08, \ +0x40,0x00,0xB8,0x61,0xB8,0x69,0x01,0x23,0x18,0x43,0xB8,0x61,0x05,0x20,0xF7, \ +0xF7,0xD0,0xFA,0xB8,0x69,0x40,0x08,0x40,0x00,0xB8,0x61,0x05,0x20,0xF7,0xF7, \ +0xC9,0xFA,0x80,0xBD,0xF0,0xB5,0x3A,0x4E,0x07,0x1C,0x30,0x7B,0x03,0x28,0xFC, \ +0xD0,0x0F,0x25,0x2D,0x06,0xA8,0x88,0x37,0x4B,0x38,0x4C,0x18,0x40,0xA8,0x80, \ +0xA8,0x89,0x35,0x4B,0x18,0x40,0xA8,0x81,0xE0,0x69,0xA3,0x01,0x18,0x43,0xE0, \ +0x61,0x98,0x03,0xC1,0x68,0xC0,0x6B,0x28,0x89,0x28,0x88,0x20,0x68,0x31,0x48, \ +0xC0,0x69,0x31,0x48,0xC1,0x19,0xC8,0x1F,0x09,0x38,0xC2,0x7B,0x2F,0x48,0xFF, \ +0x2A,0x00,0xD0,0x02,0x75,0x4A,0x7B,0xFF,0x2A,0x00,0xD0,0x42,0x75,0x40,0x31, \ +0x89,0x78,0xFF,0x29,0x02,0xD0,0x8A,0x07,0x00,0xD1,0x41,0x74,0x28,0x48,0x01, \ +0x7D,0x14,0x20,0xF7,0xF7,0x6E,0xFB,0x25,0x48,0x41,0x7D,0x15,0x20,0xF7,0xF7, \ +0x69,0xFB,0x24,0x48,0x00,0x78,0x01,0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE0, \ +0x69,0x40,0x23,0x98,0x43,0xE0,0x61,0x03,0xE0,0xE0,0x69,0x40,0x23,0x18,0x43, \ +0xE0,0x61,0x1D,0x48,0x07,0x75,0x00,0x7D,0xFF,0xF7,0x05,0xFF,0x01,0x20,0xF7, \ +0xF7,0x4E,0xFA,0xE0,0x69,0x1A,0x4B,0x18,0x40,0x7D,0x21,0x09,0x01,0xE0,0x61, \ +0x07,0x20,0xF9,0xF7,0x83,0xF8,0xA8,0x89,0x16,0x4B,0x18,0x43,0xA8,0x81,0xA8, \ +0x88,0x15,0x4B,0x18,0x43,0xA8,0x80,0x30,0x6B,0x01,0x28,0x05,0xD1,0x00,0x22, \ +0x10,0x21,0x12,0x48,0xFB,0xF7,0x1D,0xFA,0xF0,0xBD,0x30,0x6B,0x02,0x28,0xFB, \ +0xD1,0x00,0x22,0x10,0x21,0x0F,0x48,0xFB,0xF7,0x14,0xFA,0xF0,0xBD,0x00,0x00, \ +0xE0,0x05,0x00,0x02,0x17,0x17,0xFF,0xFF,0xEC,0xEC,0xFF,0xFF,0x40,0x00,0x00, \ +0x04,0x80,0x00,0x00,0x04,0x14,0x0A,0x00,0x02,0xF4,0x09,0x00,0x02,0xE5,0x02, \ +0x00,0x02,0x18,0x00,0x00,0x02,0xFF,0xEF,0x00,0x00,0x13,0x13,0x00,0x00,0xE8, \ +0xE8,0x00,0x00,0xBC,0x07,0x00,0x02,0xDC,0x07,0x00,0x02,0x90,0xB5,0x07,0x21, \ +0x49,0x06,0xCA,0x69,0x52,0x09,0x03,0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA, \ +0x61,0x01,0x28,0x07,0xD1,0x12,0x4C,0x67,0x68,0xF7,0xF7,0x6E,0xFA,0x39,0x1A, \ +0x49,0x01,0x08,0x18,0x60,0x60,0x0F,0x48,0x00,0x21,0x00,0x7D,0xFF,0xF7,0x5F, \ +0xFE,0x0E,0x4F,0x0F,0x4B,0x78,0x74,0x01,0x20,0x80,0x06,0xC0,0x68,0x0C,0x48, \ +0xC1,0x69,0x19,0x40,0xC1,0x61,0x01,0x20,0xF7,0xF7,0xE8,0xF9,0x00,0x20,0x38, \ +0x72,0xF9,0x1D,0x79,0x31,0x01,0x20,0x88,0x63,0x7D,0x21,0x09,0x01,0x07,0x20, \ +0xF9,0xF7,0x1B,0xF8,0x90,0xBD,0x80,0x00,0x00,0x04,0x18,0x00,0x00,0x02,0xE0, \ +0x05,0x00,0x02,0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0xB0,0xB5,0x21,0x4C, \ +0xE0,0x69,0x23,0x0C,0x18,0x43,0xE0,0x61,0xE0,0x69,0x1B,0x23,0x98,0x43,0xE0, \ +0x61,0xE0,0x69,0x04,0x23,0x98,0x43,0xE0,0x61,0xE0,0x69,0x9B,0x02,0x18,0x43, \ +0xE0,0x61,0x19,0x48,0xE1,0x69,0x00,0x68,0xC0,0x43,0x08,0x40,0x07,0x27,0x7F, \ +0x06,0xE0,0x61,0xB8,0x69,0x01,0x23,0x18,0x43,0xB8,0x61,0x01,0x20,0xF9,0xF7, \ +0x09,0xF8,0x08,0x20,0xF9,0xF7,0x06,0xF8,0x07,0x20,0xF9,0xF7,0x03,0xF8,0x01, \ +0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B,0x0E,0x4D,0x20,0x68,0x0F,0x20,0x00,0x06, \ +0x00,0x88,0x01,0x24,0xAC,0x74,0xF8,0xF7,0xAA,0xFF,0xE8,0x1D,0x99,0x30,0x09, \ +0x4D,0x84,0x71,0x6C,0x68,0xF7,0xF7,0x07,0xFA,0x21,0x1A,0x49,0x09,0x08,0x18, \ +0x68,0x60,0xF8,0x69,0x10,0x23,0x18,0x43,0xF8,0x61,0xB0,0xBD,0x40,0x00,0x00, \ +0x04,0xA4,0x01,0x00,0x02,0xE0,0x05,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB4, \ +0x2E,0x4D,0x01,0x27,0xE9,0x1D,0x19,0x31,0xCC,0x78,0x00,0x20,0x2C,0x4A,0xFF, \ +0x2C,0x13,0xD0,0x23,0x09,0x11,0xD3,0x2B,0x7F,0x13,0x70,0x6E,0x7F,0x56,0x70, \ +0xAE,0x7F,0x96,0x70,0xEB,0x7F,0xD3,0x70,0x0B,0x78,0x53,0x74,0x4B,0x78,0x13, \ +0x75,0x8B,0x78,0x53,0x75,0x14,0x77,0x97,0x74,0xD0,0x74,0x0E,0xE0,0x10,0x70, \ +0x60,0x23,0x53,0x70,0x40,0x23,0x93,0x70,0xD3,0x70,0x50,0x74,0xFF,0x23,0x13, \ +0x75,0x57,0x23,0x53,0x75,0x48,0x23,0x13,0x77,0x97,0x74,0xD0,0x74,0x1A,0x4B, \ +0x9C,0x78,0x1A,0x4B,0x05,0x2C,0x01,0xD0,0x58,0x73,0x05,0xE0,0x09,0x79,0xFF, \ +0x29,0x01,0xD0,0x59,0x73,0x00,0xE0,0x58,0x73,0xD1,0x78,0x15,0x4B,0xC0,0x29, \ +0x02,0xD1,0x9F,0x71,0xD0,0x70,0x00,0xE0,0x98,0x71,0x12,0x48,0x00,0x7D,0x40, \ +0x19,0xC1,0x1F,0x09,0x39,0xC9,0x7B,0xFF,0x29,0x00,0xD0,0x11,0x75,0x41,0x7B, \ +0xFF,0x29,0x00,0xD0,0x51,0x75,0x40,0x30,0x80,0x78,0xFF,0x28,0x02,0xD0,0x81, \ +0x07,0x00,0xD1,0x50,0x74,0x50,0x78,0x09,0x49,0x40,0x09,0x80,0x07,0x80,0x0F, \ +0x08,0x70,0xF0,0xBC,0xF7,0x46,0x14,0x0A,0x00,0x02,0xF4,0x09,0x00,0x02,0x28, \ +0x01,0x00,0x02,0x04,0x0A,0x00,0x02,0x90,0x06,0x00,0x02,0x18,0x00,0x00,0x02, \ +0x68,0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-rfmd-1.101.0-84.h linux.22-ac2/drivers/usb/atmel/fw-rfmd-1.101.0-84.h --- linux.vanilla/drivers/usb/atmel/fw-rfmd-1.101.0-84.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-rfmd-1.101.0-84.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,2564 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: AT76c503a with RFMD radio * + * Version: 1.101.0-84 * + ****************************************************************************/ + +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_503RFMD_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0x41,0x14, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x60,0x0F,0x00,0xEA,0x02, \ +0xF0,0xD4,0xFA,0x22,0x48,0x87,0x46,0x79,0x0F,0x00,0xEA,0x02,0xF0,0xE8,0xF8, \ +0x20,0x48,0x87,0x46,0xCD,0x05,0x00,0x00,0xC0,0x03,0x00,0x02,0x00,0x01,0x00, \ +0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x44,0x04,0x00,0x02,0x64,0x04, \ +0x00,0x02,0x68,0x04,0x00,0x02,0x6C,0x04,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0xE0,0x59,0x00,0x01,0x91,0x3F,0x00,0x00,0x81,0x40,0x00, \ +0x00,0x78,0x47,0x00,0x00,0xF0,0x40,0x2D,0xE9,0x80,0x31,0x9F,0xE5,0x03,0x00, \ +0x83,0xE8,0x3F,0x40,0x01,0xE2,0x00,0x00,0x54,0xE3,0x01,0x50,0xA0,0x03,0x07, \ +0x50,0xC3,0x05,0x00,0x00,0xA0,0xE3,0x5F,0x00,0x00,0xEB,0x60,0x31,0x9F,0xE5, \ +0xB4,0x20,0xD3,0xE1,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x01,0x00,0xA0, \ +0xE3,0x59,0x00,0x00,0xEB,0x0B,0x00,0x00,0xEA,0x07,0x00,0xD3,0xE5,0x01,0x00, \ +0x50,0xE3,0x03,0x00,0x00,0x0A,0x3C,0x01,0x9F,0xE5,0x10,0x10,0xA0,0xE3,0x00, \ +0x10,0xC0,0xE5,0x07,0x00,0x00,0xEA,0x00,0x10,0xA0,0xE3,0x07,0x10,0xC3,0xE5, \ +0x24,0x01,0x9F,0xE5,0x10,0x10,0xA0,0xE3,0x00,0x10,0xC0,0xE5,0x14,0x31,0x9F, \ +0xE5,0x01,0x00,0xA0,0xE3,0x06,0x00,0xC3,0xE5,0xF0,0x40,0xBD,0xE8,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0xF0,0x40,0x2D,0xE9,0xFC,0x00,0x9F,0xE5,0x00, \ +0x10,0xD0,0xE5,0x01,0x00,0x51,0xE3,0x39,0x00,0x00,0x1A,0xF0,0x30,0x9F,0xE5, \ +0x35,0x30,0xD3,0xE5,0x01,0x00,0x53,0xE3,0x00,0x00,0x00,0xCA,0x04,0x00,0x00, \ +0xEA,0xE0,0x20,0x9F,0xE5,0x01,0x20,0x82,0xE3,0x0F,0xE0,0xA0,0xE1,0x12,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0xC4,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE3,0x00, \ +0x10,0xC0,0xE5,0xB4,0x00,0x9F,0xE5,0x06,0x10,0xD0,0xE5,0x00,0x00,0x51,0xE3, \ +0x18,0x00,0x00,0x0A,0x00,0x20,0xA0,0xE3,0x06,0x20,0xC0,0xE5,0xB4,0x10,0xD0, \ +0xE1,0x00,0x00,0x51,0xE3,0x05,0x00,0x00,0x0A,0x01,0x00,0xA0,0xE3,0x29,0x00, \ +0x00,0xEB,0x88,0x00,0x9F,0xE5,0x01,0x10,0xA0,0xE3,0x06,0x10,0xC0,0xE5,0x1D, \ +0x00,0x00,0xEA,0x07,0x30,0xD0,0xE5,0x00,0x00,0x53,0xE3,0x01,0x00,0x00,0x0A, \ +0x01,0x30,0xA0,0x13,0xB6,0x30,0xC0,0x11,0x08,0x40,0xD0,0xE5,0x00,0x00,0x54, \ +0xE3,0x15,0x00,0x00,0x1A,0x5C,0x40,0x9F,0xE5,0x10,0x50,0xA0,0xE3,0x00,0x50, \ +0xC4,0xE5,0x00,0x50,0xA0,0xE3,0x07,0x50,0xC0,0xE5,0x0F,0x00,0x00,0xEA,0x40, \ +0x00,0x9F,0xE5,0x00,0x10,0x80,0xE5,0x04,0x10,0x80,0xE5,0x44,0x00,0x9F,0xE5, \ +0x00,0x10,0x90,0xE5,0x08,0x20,0xD1,0xE5,0x10,0x00,0x52,0xE3,0x07,0x00,0x00, \ +0x1A,0x01,0x00,0xA0,0xE1,0x30,0x20,0x9F,0xE5,0x01,0x20,0x82,0xE3,0x0F,0xE0, \ +0xA0,0xE1,0x12,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x18,0x10,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0xF0,0x40,0xBD,0xE8,0x1E,0xFF,0x2F,0xE1,0xD4,0x51,0x00,0x00, \ +0x68,0x03,0x00,0x0D,0x84,0x00,0x00,0x02,0xD1,0x3D,0x00,0x00,0x64,0x02,0x00, \ +0x02,0xCF,0x2D,0x00,0x00,0x01,0x40,0x2D,0xE9,0x00,0x40,0xA0,0xE1,0x54,0x02, \ +0x9F,0xE5,0x54,0x12,0x9F,0xE5,0x08,0x20,0xD0,0xE5,0x00,0x00,0x52,0xE3,0x8F, \ +0x00,0x00,0x1A,0x00,0x20,0x90,0xE5,0x00,0x30,0xD2,0xE5,0x00,0x00,0x54,0xE3, \ +0x00,0x30,0xC1,0x05,0x10,0x00,0x00,0x0A,0x34,0x42,0x9F,0xE5,0x10,0x50,0xA0, \ +0xE3,0xC0,0x60,0xA0,0xE3,0x00,0x70,0x0F,0xE1,0x3F,0x80,0xA0,0xE3,0x08,0x90, \ +0x07,0xE0,0x06,0x90,0x89,0xE1,0x09,0xF0,0x21,0xE1,0x08,0x60,0xC7,0xE1,0x00, \ +0x50,0xC4,0xE5,0x00,0x30,0xC1,0xE5,0x00,0x70,0x0F,0xE1,0x3F,0x80,0xA0,0xE3, \ +0x08,0x90,0x07,0xE0,0x06,0x90,0x89,0xE1,0x09,0xF0,0x21,0xE1,0x08,0x60,0xC7, \ +0xE1,0xB4,0x30,0xD0,0xE1,0x40,0x00,0x53,0xE3,0x3F,0x00,0x00,0xAA,0x01,0x30, \ +0x43,0xE2,0x01,0x20,0x82,0xE2,0x00,0x00,0x53,0xE3,0x70,0x00,0x00,0x0A,0x01, \ +0x20,0x42,0xE2,0xE0,0x01,0xB2,0xE8,0x03,0x40,0xA0,0xE3,0x05,0x00,0x00,0xEA, \ +0xE0,0x01,0xB2,0xE8,0x03,0x40,0xA0,0xE3,0x00,0x50,0xC1,0xE5,0x01,0x30,0x43, \ +0xE2,0x00,0x00,0x53,0xE3,0x66,0x00,0x00,0x0A,0x25,0x54,0xA0,0xE1,0x00,0x50, \ +0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x60, \ +0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3, \ +0x00,0x60,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x59,0x00,0x00, \ +0x0A,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30, \ +0x43,0xE2,0x00,0x00,0x53,0xE3,0x53,0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7, \ +0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3,0x00,0x70,0xC1,0xE5,0x01,0x30,0x43,0xE2, \ +0x00,0x00,0x53,0xE3,0x4C,0x00,0x00,0x0A,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1, \ +0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x46,0x00, \ +0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3,0x00, \ +0x80,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x3F,0x00,0x00,0x0A, \ +0x28,0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43, \ +0xE2,0x00,0x00,0x53,0xE3,0x39,0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF, \ +0xFF,0x1A,0x00,0x00,0x53,0xE3,0xC8,0xFF,0xFF,0x1A,0x34,0x00,0x00,0xEA,0x03, \ +0x30,0xA0,0xE3,0xE0,0x03,0xB2,0xE8,0x01,0x00,0x00,0xEA,0xE0,0x03,0xB2,0xE8, \ +0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0, \ +0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x00,0x60, \ +0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00, \ +0x60,0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x00,0x70,0xC1,0xE5, \ +0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1,0xE5,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1, \ +0xE5,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1,0xE5,0x00,0x80,0xC1,0xE5,0x28,0x84, \ +0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x28,0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x28, \ +0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0,0xE1, \ +0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0,0xE1,0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0, \ +0xE1,0x00,0x90,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0xD8,0xFF, \ +0xFF,0x1A,0x00,0x50,0x92,0xE5,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00, \ +0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1, \ +0x00,0x50,0xC1,0xE5,0x04,0x20,0x82,0xE2,0xB4,0x30,0xD0,0xE1,0x40,0x30,0x43, \ +0xE2,0x00,0x20,0x80,0xE5,0xB4,0x30,0xC0,0xE1,0x01,0x40,0xBD,0xE8,0x0E,0xF0, \ +0xA0,0xE1,0xD4,0x51,0x00,0x00,0x28,0x03,0x00,0x0D,0x68,0x03,0x00,0x0D,0x00, \ +0xB5,0x03,0xF0,0x39,0xFC,0x00,0x20,0x00,0xBD,0x80,0xB5,0x86,0xB0,0x07,0x1C, \ +0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03,0x90,0x01, \ +0x91,0x05,0x92,0x02,0x92,0x17,0x4A,0x19,0xA1,0x17,0x48,0x01,0x23,0x00,0x97, \ +0x03,0xF0,0x0F,0xFE,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01,0x22,0x05, \ +0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93,0x03,0x90, \ +0x02,0x92,0x01,0x91,0x13,0xA1,0x11,0x4A,0x11,0x48,0x02,0x23,0x03,0xF0,0xFA, \ +0xFD,0x13,0x48,0x14,0xA1,0x03,0xF0,0x6C,0xFE,0x16,0x48,0x17,0xA1,0x03,0xF0, \ +0x68,0xFE,0x1A,0x48,0x1B,0xA1,0x03,0xF0,0x64,0xFE,0x1E,0x48,0x1F,0xA1,0x03, \ +0xF0,0x60,0xFE,0x03,0xF0,0x40,0xFA,0x06,0xB0,0x80,0xBD,0x8D,0x17,0x00,0x00, \ +0xC4,0x05,0x00,0x02,0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00, \ +0x00,0x11,0x2A,0x00,0x00,0x54,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20,0x74,0x68, \ +0x72,0x65,0x61,0x64,0x00,0x00,0xE4,0x06,0x00,0x02,0x54,0x78,0x20,0x73,0x74, \ +0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x04,0x07,0x00,0x02, \ +0x4D,0x67,0x6D,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67, \ +0x73,0x00,0x00,0x00,0x00,0x24,0x07,0x00,0x02,0x54,0x58,0x20,0x47,0x4F,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x44, \ +0x07,0x00,0x02,0x50,0x73,0x50,0x6F,0x6C,0x6C,0x20,0x73,0x74,0x61,0x74,0x75, \ +0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xC3,0x00,0x18,0x18,0x80,0x00,0x80, \ +0x08,0x01,0xD0,0x01,0x38,0xFD,0xD1,0xF7,0x46,0x03,0x49,0x0F,0x20,0x00,0x06, \ +0x81,0x80,0x00,0x21,0x81,0x81,0xF7,0x46,0x00,0x00,0xFB,0xFB,0x00,0x00,0x01, \ +0x20,0x80,0x06,0x40,0x6A,0xF7,0x46,0x01,0x1C,0x06,0x48,0x04,0xD0,0x41,0x68, \ +0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46,0x41,0x68,0x01,0x23,0x5B,0x03,0x99, \ +0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00,0x04,0x80,0xB5,0x13,0x49,0x15,0x4F, \ +0x08,0x78,0x42,0x01,0x12,0x48,0x42,0x70,0x12,0x4A,0x52,0x7A,0x00,0x2A,0x0B, \ +0xD0,0x09,0x78,0x00,0x29,0x08,0xDD,0x41,0x78,0x10,0x23,0x19,0x43,0x41,0x70, \ +0x48,0x21,0x79,0x81,0x18,0x21,0x39,0x81,0x03,0xE0,0x90,0x21,0x79,0x81,0x30, \ +0x21,0x39,0x81,0x41,0x78,0x01,0x20,0x00,0xF0,0x5B,0xF8,0x78,0x89,0x39,0x89, \ +0x40,0x18,0x06,0x49,0x08,0x80,0x01,0xF0,0xE8,0xFC,0x80,0xBD,0x00,0x00,0x9B, \ +0x01,0x00,0x02,0xD8,0x07,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02, \ +0xB4,0x01,0x00,0x02,0x01,0x1C,0x06,0x48,0x04,0xD0,0x41,0x7C,0x01,0x23,0x19, \ +0x43,0x41,0x74,0xF7,0x46,0x41,0x7C,0xFE,0x23,0x19,0x40,0x41,0x74,0xF7,0x46, \ +0x00,0x00,0xD8,0x07,0x00,0x02,0xF0,0xB4,0x07,0x24,0x64,0x06,0xA2,0x69,0x04, \ +0x23,0x9A,0x43,0xA2,0x61,0xF3,0x22,0x12,0x05,0x93,0x68,0x40,0x23,0xD3,0x60, \ +0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x93,0x68,0x80, \ +0x23,0x03,0x43,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08, \ +0xFC,0xD3,0x17,0x1C,0x92,0x68,0x00,0x22,0x00,0x29,0x0D,0xD9,0x0A,0x4D,0x83, \ +0x18,0xEB,0x5C,0xFB,0x60,0x3E,0x69,0xB3,0x08,0xFC,0xD3,0x3B,0x69,0x5B,0x08, \ +0xFC,0xD3,0x01,0x32,0x8A,0x42,0xBB,0x68,0xF2,0xD3,0xA0,0x69,0x04,0x23,0x18, \ +0x43,0xA0,0x61,0xF0,0xBC,0xF7,0x46,0x00,0x00,0xD8,0x07,0x00,0x02,0x90,0xB4, \ +0x07,0x27,0x7F,0x06,0xBA,0x69,0x04,0x23,0x9A,0x43,0xBA,0x61,0xF3,0x22,0x12, \ +0x05,0x93,0x68,0x40,0x23,0xD3,0x60,0x14,0x69,0xA3,0x08,0xFC,0xD3,0x13,0x69, \ +0x5B,0x08,0xFC,0xD3,0x93,0x68,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x10, \ +0x1C,0x02,0x69,0x52,0x08,0xFC,0xD3,0x82,0x68,0xC1,0x60,0x01,0x69,0x89,0x08, \ +0xFC,0xD3,0x01,0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x04,0x23,0xB8,0x69,0x18, \ +0x43,0xB8,0x61,0x90,0xBC,0xF7,0x46,0x80,0xB4,0x07,0x22,0x52,0x06,0x91,0x69, \ +0x04,0x23,0x99,0x43,0x91,0x61,0xF3,0x21,0x09,0x05,0x8B,0x68,0x40,0x23,0xCB, \ +0x60,0x0F,0x69,0xBB,0x08,0xFC,0xD3,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68, \ +0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3,0x08,0x69,0x40,0x08,0xFC,0xD3,0x97, \ +0x69,0x04,0x23,0x3B,0x43,0x88,0x68,0x93,0x61,0x97,0x69,0x04,0x23,0x9F,0x43, \ +0x97,0x61,0x41,0x20,0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3,0x08,0x1C,0x01, \ +0x69,0x49,0x08,0xFC,0xD3,0x81,0x68,0xFF,0x21,0xC1,0x60,0x01,0x69,0x49,0x08, \ +0xFC,0xD3,0x91,0x69,0x04,0x23,0x19,0x43,0x80,0x68,0x91,0x61,0x80,0xBC,0xF7, \ +0x46,0x80,0xB5,0x0B,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20, \ +0xFF,0xF7,0x0A,0xFF,0x08,0x48,0xF9,0x69,0x00,0x68,0xC0,0x43,0x08,0x40,0xF8, \ +0x61,0x07,0x20,0x40,0x06,0x81,0x69,0x01,0x23,0x19,0x43,0x81,0x61,0x03,0x49, \ +0x01,0x20,0x08,0x70,0x80,0xBD,0x40,0x00,0x00,0x04,0xBC,0x02,0x00,0x02,0xBB, \ +0x02,0x00,0x02,0xC1,0x0A,0x01,0xD3,0x00,0x20,0xF7,0x46,0xFF,0x22,0x01,0x32, \ +0x02,0x40,0x01,0x21,0x00,0x2A,0x01,0xD0,0x08,0x1C,0xF7,0x46,0x80,0x0A,0x01, \ +0xD3,0x08,0x1C,0xF7,0x46,0x02,0x20,0xF7,0x46,0xF0,0xB5,0x0F,0x1C,0x19,0x49, \ +0x04,0x1C,0x19,0x4E,0x1A,0x48,0x31,0x60,0x05,0x6C,0x00,0x2D,0x16,0xD0,0x06, \ +0x22,0x20,0x1C,0x31,0x68,0x04,0xF0,0x23,0xFA,0x00,0x28,0x08,0xD1,0x30,0x68, \ +0xC1,0x88,0xB9,0x42,0x01,0xD1,0x01,0x20,0xF0,0xBD,0xC7,0x80,0x00,0x20,0xF0, \ +0xBD,0x30,0x68,0x08,0x30,0x30,0x60,0x28,0x1C,0x01,0x3D,0x00,0x28,0xE8,0xD1, \ +0x0C,0x48,0x01,0x6C,0x01,0x31,0x01,0x64,0x01,0x6C,0x07,0x29,0x03,0xD9,0x06, \ +0x49,0x31,0x60,0x08,0x21,0x01,0x64,0x06,0x22,0x21,0x1C,0x30,0x68,0x04,0xF0, \ +0x1F,0xFA,0x30,0x68,0xC7,0x80,0x00,0x20,0xF0,0xBD,0x00,0x00,0x80,0x07,0x00, \ +0x02,0x44,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x05,0x49,0x0A,0x68,0x12,0x01, \ +0x02,0x70,0x0A,0x68,0x12,0x01,0x12,0x0A,0x42,0x70,0x08,0x68,0x01,0x30,0x08, \ +0x60,0xF7,0x46,0x48,0x01,0x00,0x02,0x00,0x2A,0x0C,0xD1,0x08,0x4A,0x92,0x7A, \ +0x8A,0x42,0x00,0xD8,0x11,0x1C,0x07,0x4A,0x49,0x00,0x51,0x5A,0x06,0x4A,0xD2, \ +0x88,0x89,0x18,0xC9,0x18,0x00,0xE0,0x00,0x21,0x01,0x70,0x09,0x0A,0x41,0x70, \ +0xF7,0x46,0x08,0x01,0x00,0x02,0xB8,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xF0, \ +0xB5,0x5A,0x4E,0x30,0x68,0x47,0x68,0x38,0x78,0x05,0x07,0x2D,0x0F,0x08,0x28, \ +0x01,0xD0,0x00,0x2D,0x73,0xD1,0x56,0x4C,0x20,0x79,0x02,0x28,0x07,0xD1,0xF8, \ +0x1D,0x09,0x30,0x06,0x22,0x53,0x49,0x04,0xF0,0xB9,0xF9,0x00,0x28,0x68,0xD0, \ +0x52,0x48,0x00,0x68,0xFF,0xF7,0x6F,0xFF,0x31,0x68,0x48,0x72,0x30,0x68,0x46, \ +0x7A,0x28,0x06,0x00,0x0E,0x08,0x28,0x61,0xD1,0x4D,0x48,0x80,0x79,0x05,0x28, \ +0x5E,0xD1,0x20,0x79,0x4B,0x4D,0x02,0x28,0x0C,0xD1,0xF8,0x1D,0x03,0x30,0x06, \ +0x22,0x29,0x1C,0x04,0xF0,0x9D,0xF9,0x00,0x28,0x52,0xD1,0x78,0x78,0x81,0x08, \ +0x73,0xD3,0x40,0x08,0x72,0xD2,0x20,0x79,0x01,0x28,0x0C,0xD1,0xF8,0x1D,0x09, \ +0x30,0x06,0x22,0x29,0x1C,0x04,0xF0,0x8D,0xF9,0x00,0x28,0x42,0xD1,0x78,0x78, \ +0x81,0x08,0x64,0xD2,0x40,0x08,0x62,0xD2,0x38,0x78,0x08,0x28,0x60,0xD1,0x3B, \ +0x48,0x01,0x78,0x00,0x29,0x11,0xD0,0xC0,0x78,0x00,0x28,0x11,0xD0,0x78,0x78, \ +0xC0,0x09,0x0E,0xD2,0xB9,0x7F,0xF8,0x1D,0x09,0x30,0x88,0x29,0x02,0xD1,0xC0, \ +0x7B,0x8E,0x28,0x06,0xD0,0xB8,0x7D,0x00,0x07,0x1F,0xD0,0x02,0xE0,0x78,0x78, \ +0xC0,0x09,0x46,0xD2,0x20,0x79,0x02,0x28,0x44,0xD1,0x00,0x2E,0x0A,0xD1,0x2D, \ +0x48,0x41,0x68,0x04,0x29,0x06,0xD1,0x01,0x27,0x47,0x60,0x01,0x20,0x01,0xF0, \ +0xFC,0xFB,0x29,0x48,0x07,0x70,0x60,0x79,0x03,0x28,0x34,0xD1,0x19,0x21,0xC9, \ +0x02,0x02,0x20,0x01,0xF0,0xD8,0xFB,0x25,0x48,0x81,0x78,0x01,0xE0,0x34,0xE0, \ +0x33,0xE0,0x08,0x23,0x99,0x43,0x81,0x70,0x2D,0xE0,0x00,0xE0,0x2D,0xE0,0x00, \ +0x28,0x22,0xD1,0x39,0x78,0x80,0x29,0x01,0xD0,0x50,0x29,0x1D,0xD1,0xFA,0x1D, \ +0x19,0x32,0x50,0x79,0x1C,0x4C,0xC3,0x19,0x20,0x33,0xDB,0x79,0xC0,0x18,0x2A, \ +0x30,0x3B,0x5C,0x24,0x7D,0xA3,0x42,0x10,0xD1,0x92,0x78,0x52,0x08,0x14,0xD3, \ +0x80,0x29,0x0B,0xD1,0x01,0x30,0x39,0x5C,0x04,0x29,0x07,0xD1,0x38,0x18,0xC1, \ +0x79,0x09,0x02,0x09,0x04,0x09,0x0C,0x02,0xE0,0x09,0xE0,0x08,0xE0,0x05,0xE0, \ +0x80,0x79,0x08,0x43,0x02,0xD0,0x80,0x02,0x00,0xF0,0x7B,0xFD,0x00,0xF0,0x6D, \ +0xF8,0xF0,0xBD,0x4C,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0x60,0x00,0x00,0x02, \ +0xF0,0x01,0x00,0x02,0xA0,0x09,0x00,0x02,0x92,0x00,0x00,0x02,0x1C,0x00,0x00, \ +0x02,0x50,0x09,0x00,0x02,0xE4,0x01,0x00,0x02,0xC0,0x09,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xF0,0xB5,0x1A,0x4A,0x00,0x21,0x50,0x68,0x45,0x7A,0x41,0x72,0x50, \ +0x68,0x44,0x68,0x20,0x78,0x00,0x07,0x00,0x0F,0x08,0x28,0x1A,0xD1,0x15,0x48, \ +0x46,0x79,0xC0,0x20,0x03,0xF0,0xCE,0xFB,0x07,0x1C,0x01,0x2E,0x0A,0xD9,0x20, \ +0x78,0x08,0x28,0x07,0xD1,0x60,0x78,0x04,0x21,0x01,0x40,0x20,0x23,0x18,0x40, \ +0x2A,0x1C,0x03,0xF0,0x74,0xF8,0x38,0x1C,0x03,0xF0,0xBD,0xFB,0x28,0x06,0x00, \ +0x0E,0x00,0xF0,0x13,0xF8,0x05,0xE0,0x00,0x28,0x03,0xD1,0x28,0x06,0x00,0x0E, \ +0x00,0xF0,0xE6,0xFB,0x03,0x4A,0x80,0x20,0x51,0x68,0x08,0x72,0x50,0x68,0x00, \ +0x68,0x50,0x60,0xF0,0xBD,0x4C,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0x88,0xB5, \ +0x00,0x21,0x00,0x91,0x00,0x28,0x0A,0x4F,0x0B,0xD1,0x78,0x68,0x40,0x68,0x81, \ +0x7D,0xC2,0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C,0x0A,0x30,0xFF,0xF7, \ +0x85,0xFE,0x00,0x90,0x00,0x98,0x01,0x28,0x02,0xD1,0x79,0x68,0xC1,0x20,0x48, \ +0x72,0x88,0xBD,0x00,0x00,0x4C,0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78, \ +0x80,0x09,0x04,0xD3,0x04,0x4F,0x38,0x68,0x02,0xF0,0x7F,0xF8,0x38,0x60,0x80, \ +0xBD,0x00,0x00,0x9C,0x01,0x00,0x02,0x4C,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0, \ +0x00,0x25,0x7D,0x26,0x36,0x01,0x01,0x21,0x89,0x06,0x88,0x68,0x00,0x0B,0xFC, \ +0x24,0x04,0x40,0xFA,0x48,0xC7,0x6A,0x00,0x2F,0x0F,0xD1,0x00,0x20,0xFF,0xF7, \ +0x3A,0xFD,0xF6,0x48,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0xF4, \ +0x4B,0x19,0x40,0xC1,0x61,0x01,0x05,0xC8,0x68,0x02,0xB0,0xF0,0xBD,0xF2,0x49, \ +0xA0,0x08,0x08,0x5C,0x00,0x28,0x06,0xD0,0x00,0x20,0xFF,0xF7,0x25,0xFD,0x01, \ +0x21,0x89,0x06,0xC8,0x68,0xF1,0xE7,0xED,0x49,0x04,0x20,0x20,0x40,0x01,0x91, \ +0x61,0xD0,0x04,0x20,0xFF,0xF7,0xCD,0xFD,0xEA,0x49,0x08,0x71,0xA0,0x09,0x01, \ +0xD3,0x14,0x21,0x00,0xE0,0x0E,0x21,0xE7,0x48,0x02,0x22,0x01,0xF0,0x74,0xFE, \ +0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8,0x68,0xD8,0xE7,0x01,0x21,0x89, \ +0x06,0xC8,0x68,0x28,0x43,0x01,0xE0,0xCA,0x68,0x10,0x43,0x42,0x09,0x03,0xD2, \ +0x32,0x1C,0x01,0x3E,0x00,0x2A,0xF7,0xD1,0x10,0x23,0x98,0x43,0x05,0x1C,0x00, \ +0x2E,0x01,0xDC,0x28,0x1C,0xC4,0xE7,0xD7,0x49,0x08,0x79,0x0A,0x28,0x09,0xD0, \ +0x14,0x28,0x0B,0xD0,0x37,0x28,0x0D,0xD0,0x6E,0x28,0x0F,0xD1,0xD4,0x4A,0x03, \ +0x20,0x50,0x75,0x14,0xE0,0xD2,0x4A,0x00,0x20,0x50,0x75,0x10,0xE0,0xD0,0x4A, \ +0x01,0x21,0x51,0x75,0x0C,0xE0,0xCE,0x4A,0x02,0x20,0x50,0x75,0x08,0xE0,0x14, \ +0x2F,0x03,0xD2,0xCB,0x4A,0x03,0x20,0x50,0x75,0x02,0xE0,0xC9,0x4A,0x02,0x20, \ +0x50,0x75,0xA8,0x09,0x06,0xD3,0xE8,0x08,0x04,0xD2,0x01,0x20,0xFF,0xF7,0xCA, \ +0xFC,0x28,0x1C,0x98,0xE7,0xC3,0x4F,0x00,0x20,0x38,0x60,0xE8,0x0A,0x1D,0xD3, \ +0xB4,0x2C,0x07,0xD0,0xC4,0x2C,0x16,0xD0,0xD4,0x2C,0x23,0xD1,0x00,0xF0,0xFF, \ +0xF9,0x20,0xE0,0x3D,0xE0,0xB6,0x48,0x40,0x68,0x80,0x0B,0x1B,0xD3,0xB9,0x4C, \ +0x02,0x20,0xE1,0x1D,0x03,0x31,0xB8,0x72,0x01,0x98,0x06,0x22,0xB7,0x4E,0x04, \ +0xF0,0x19,0xF8,0x60,0x88,0x70,0x80,0x0E,0xE0,0x00,0xF0,0xF8,0xFA,0x0B,0xE0, \ +0xA4,0x2C,0x15,0xD0,0xB4,0x2C,0x13,0xD0,0xC4,0x2C,0x01,0xD0,0xD4,0x2C,0x03, \ +0xD1,0xAD,0x48,0xAF,0x49,0x40,0x88,0x08,0x80,0x78,0x68,0x04,0x28,0x06,0xD1, \ +0x00,0xF0,0xCE,0xF9,0x00,0x22,0x10,0x21,0xAB,0x48,0x03,0xF0,0xCD,0xFA,0x28, \ +0x1C,0x5D,0xE7,0x7A,0x7D,0xA9,0x48,0xAA,0x4B,0x52,0x00,0x9A,0x5A,0xC1,0x88, \ +0xA5,0x4B,0x8A,0x18,0x1A,0x80,0xB4,0x2C,0xE8,0xD1,0x80,0x88,0x40,0x00,0x08, \ +0x18,0x19,0x88,0x40,0x18,0x18,0x80,0xE1,0xE7,0xA3,0x49,0x08,0x68,0x00,0x7A, \ +0x00,0x28,0x06,0xD0,0xC4,0x20,0x9A,0x4A,0x01,0x21,0x89,0x06,0x10,0x60,0xC8, \ +0x68,0x3F,0xE7,0x01,0x20,0xFF,0xF7,0x6C,0xFC,0x29,0x2F,0x0D,0xD2,0x07,0x20, \ +0xFF,0xF7,0x1B,0xFD,0x91,0x49,0xC8,0x71,0x0B,0x21,0x79,0x43,0xCF,0x08,0x03, \ +0x21,0x00,0x91,0x00,0x0A,0x1D,0xD3,0x01,0x3F,0x1B,0xE0,0x04,0x20,0xFF,0xF7, \ +0x0D,0xFD,0x00,0x06,0x00,0x0E,0x89,0x4E,0x0A,0x28,0x30,0x71,0x1F,0xD0,0x14, \ +0x28,0x21,0xD0,0x37,0x28,0x23,0xD0,0x6E,0x28,0x03,0xD1,0x07,0x20,0xFF,0xF7, \ +0xFE,0xFC,0xF0,0x71,0x0B,0x20,0x78,0x43,0xC7,0x08,0x03,0x21,0x00,0x91,0xF0, \ +0x79,0x00,0x0A,0x00,0xD3,0x01,0x3F,0x80,0x2C,0x01,0xD0,0x50,0x2C,0x16,0xD1, \ +0x03,0x20,0xFF,0xF7,0xED,0xFC,0x82,0x49,0x80,0x06,0x09,0x68,0x80,0x0E,0x48, \ +0x74,0x11,0xE0,0xFF,0x08,0x00,0x21,0x00,0x91,0xEE,0xE7,0xBF,0x08,0x01,0x21, \ +0x00,0x91,0xEA,0xE7,0x0B,0x20,0x78,0x43,0x07,0x09,0x02,0x21,0x00,0x91,0xE4, \ +0xE7,0x78,0x49,0x00,0x20,0x09,0x68,0x48,0x74,0x74,0x48,0x80,0x89,0x04,0x30, \ +0xB8,0x42,0x01,0xD3,0x18,0x2F,0x0E,0xD8,0x6C,0x4A,0xC3,0x20,0x10,0x60,0x65, \ +0x48,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0x63,0x4B,0x19,0x40, \ +0xC1,0x61,0x01,0x05,0xC8,0x68,0xDB,0xE6,0x6B,0x4E,0x02,0x22,0x30,0x68,0x18, \ +0x21,0x40,0x68,0x01,0xF0,0x6A,0xFD,0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06, \ +0xC8,0x68,0xCE,0xE6,0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29, \ +0x43,0x03,0xE0,0x01,0x22,0x92,0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x03,0xD2, \ +0x02,0x1C,0x01,0x38,0x00,0x2A,0xF5,0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00, \ +0x28,0x01,0xDC,0x28,0x1C,0xB6,0xE6,0x58,0x48,0x54,0x49,0x00,0x68,0x00,0x22, \ +0x46,0x68,0x0A,0x80,0x2A,0x0A,0x52,0x07,0x08,0xD1,0x70,0x88,0x4D,0x4A,0x00, \ +0x27,0x08,0x80,0x01,0x21,0x11,0x73,0xD7,0x72,0x28,0x1C,0xA4,0xE6,0x50,0x49, \ +0x0D,0x60,0xE9,0x0A,0x13,0xD3,0x47,0x4A,0x01,0x21,0x91,0x72,0x71,0x78,0xC9, \ +0x08,0x03,0xD3,0x71,0x88,0x45,0x4A,0x51,0x80,0x02,0xE0,0x43,0x49,0x00,0x22, \ +0x4A,0x80,0x40,0x68,0xC1,0x1D,0x03,0x31,0x06,0x22,0x01,0x98,0x03,0xF0,0x2A, \ +0xFF,0x00,0x98,0x3D,0x49,0x48,0x75,0x42,0x48,0x02,0x68,0x97,0x81,0x4A,0x7D, \ +0x03,0x68,0x00,0x27,0x9A,0x73,0x0F,0x60,0x31,0x78,0x48,0x29,0x03,0xD1,0x71, \ +0x78,0x40,0x23,0x99,0x43,0x71,0x70,0x71,0x78,0xC9,0x09,0x2E,0xD2,0x00,0x68, \ +0x02,0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x18,0x39,0x01,0xF0,0x05,0xFD,0x00, \ +0x28,0x05,0xD1,0x2E,0x48,0x01,0x21,0x01,0x73,0xC7,0x72,0x28,0x1C,0x67,0xE6, \ +0x2B,0x4F,0x03,0x20,0xF8,0x72,0x02,0x20,0x38,0x73,0x80,0x2C,0x15,0xD1,0x2F, \ +0x48,0xC1,0x1D,0x29,0x31,0x09,0x79,0x01,0x29,0x0F,0xD1,0xF9,0x1D,0x49,0x31, \ +0x89,0x79,0x05,0x29,0x0A,0xD1,0xC1,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x09, \ +0x30,0x03,0xF0,0xCE,0xFE,0x00,0x28,0x01,0xD1,0x01,0x21,0xB9,0x76,0x28,0x1C, \ +0x48,0xE6,0x24,0x4E,0x31,0x78,0x00,0x29,0x05,0xD1,0x19,0x48,0x01,0x21,0x01, \ +0x73,0xC7,0x72,0x28,0x1C,0x3E,0xE6,0x04,0x1C,0x00,0x68,0x02,0x22,0x40,0x68, \ +0x04,0x21,0x18,0x30,0x01,0xF0,0xCC,0xFC,0x00,0x28,0x01,0xD1,0x28,0x1C,0x32, \ +0xE6,0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x01,0xE0, \ +0xD3,0x68,0x19,0x43,0x4B,0x09,0x03,0xD2,0x03,0x1C,0x01,0x38,0x00,0x2B,0xF7, \ +0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28,0x23,0xDC,0x20,0xE0,0x00,0x00, \ +0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0x54,0x01,0x00,0x02,0x3C,0x09,0x00, \ +0x02,0xD8,0x07,0x00,0x02,0xC0,0x07,0x00,0x02,0x50,0x09,0x00,0x02,0x38,0x09, \ +0x00,0x02,0xEA,0x01,0x00,0x02,0x44,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB8, \ +0x01,0x00,0x02,0x4C,0x01,0x00,0x02,0xEC,0x01,0x00,0x02,0x84,0x00,0x00,0x02, \ +0x1C,0x00,0x00,0x02,0x28,0x1C,0xFA,0xE5,0x20,0x68,0x40,0x68,0xC1,0x1D,0x11, \ +0x31,0x40,0x7E,0x0A,0x78,0x00,0x02,0x10,0x43,0x8A,0x78,0xC9,0x78,0x12,0x04, \ +0x10,0x43,0x89,0x09,0x09,0x06,0x09,0x0E,0x0D,0x23,0x59,0x43,0x89,0x19,0x0B, \ +0x7B,0x1B,0x06,0x18,0x43,0x32,0x1C,0x03,0x26,0x76,0x06,0x30,0x60,0x8B,0x7B, \ +0x48,0x7B,0x1B,0x02,0x18,0x43,0xCB,0x7B,0x1B,0x04,0x18,0x43,0x0B,0x7C,0x1B, \ +0x06,0x18,0x43,0x70,0x60,0xD0,0x1D,0x39,0x30,0x00,0x78,0x01,0x28,0x02,0xD1, \ +0x01,0x21,0xB1,0x60,0x19,0xE0,0x02,0x28,0x17,0xD1,0x8A,0x7C,0x48,0x7C,0x12, \ +0x02,0x10,0x43,0xCA,0x7C,0x12,0x04,0x10,0x43,0x0A,0x7D,0x12,0x06,0x10,0x43, \ +0x70,0x61,0x8A,0x7D,0x48,0x7D,0x12,0x02,0x10,0x43,0xCA,0x7D,0x09,0x7E,0x12, \ +0x04,0x10,0x43,0x09,0x06,0x08,0x43,0xB0,0x61,0x81,0x20,0xB0,0x60,0x20,0x68, \ +0x0E,0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x20,0x39,0x01,0xF0,0x42,0xFC,0x00, \ +0x28,0x06,0xD1,0x08,0x48,0x01,0x21,0x01,0x73,0xC7,0x72,0xB7,0x60,0x28,0x1C, \ +0xA3,0xE5,0x20,0x68,0x81,0x89,0x08,0x39,0x81,0x81,0x03,0x49,0x03,0x20,0xC8, \ +0x72,0x02,0x20,0x08,0x73,0x28,0x1C,0x98,0xE5,0x50,0x09,0x00,0x02,0x00,0xB5, \ +0x03,0x49,0x01,0x20,0x48,0x60,0x01,0xF0,0xB0,0xF8,0x00,0xBD,0x00,0x00,0x50, \ +0x09,0x00,0x02,0xF0,0xB5,0x3D,0x4F,0x01,0x24,0x78,0x68,0x04,0x28,0x0C,0xD1, \ +0x01,0x20,0x01,0xF0,0xA3,0xF8,0x7C,0x60,0x01,0x20,0xFF,0xF7,0xAF,0xFA,0x00, \ +0x22,0x01,0x21,0x37,0x48,0x03,0xF0,0xE8,0xF8,0xF0,0xBD,0x78,0x68,0x02,0x28, \ +0xFB,0xD1,0x01,0x20,0x01,0xF0,0x93,0xF8,0x7C,0x60,0x78,0x6E,0x08,0x23,0x41, \ +0x78,0x32,0x4C,0x99,0x43,0x41,0x70,0x2F,0x49,0x89,0x89,0xB9,0x87,0x22,0x78, \ +0x2F,0x49,0x01,0x2A,0x45,0xD1,0x2F,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0,0x18, \ +0x25,0x00,0xE0,0x1E,0x25,0x2C,0x4E,0x36,0x88,0x75,0x1B,0x2C,0x4E,0x36,0x68, \ +0xAD,0x19,0x2A,0x4E,0x01,0x32,0x35,0x60,0x1A,0x70,0x1A,0x78,0x86,0x7D,0x12, \ +0x07,0x12,0x0F,0x1D,0x1C,0xF0,0x23,0x33,0x40,0x1A,0x43,0x82,0x75,0x42,0x78, \ +0xD2,0x09,0x03,0xD3,0x22,0x4A,0x13,0x68,0x08,0x3B,0x13,0x60,0x21,0x4B,0x2A, \ +0x78,0x1B,0x88,0x9A,0x42,0x0F,0xD1,0x20,0x4A,0x1C,0x4E,0x12,0x88,0x04,0x23, \ +0x32,0x80,0x42,0x78,0x9A,0x43,0x42,0x70,0x02,0x20,0x20,0x70,0x08,0x68,0x80, \ +0x7D,0x31,0x88,0x00,0xF0,0x48,0xFF,0xF8,0x66,0x15,0x4E,0x30,0x88,0xB8,0x66, \ +0x20,0x78,0x02,0x28,0x04,0xD0,0x01,0x21,0x01,0x20,0x00,0xF0,0x2D,0xF8,0xF0, \ +0xBD,0x01,0x21,0x00,0x20,0x00,0xF0,0x28,0xF8,0xF0,0xBD,0x09,0x68,0x00,0x20, \ +0x48,0x73,0x0F,0x49,0x09,0x68,0x48,0x70,0x01,0x20,0xFF,0xF7,0x46,0xFA,0x00, \ +0x22,0x10,0x21,0x0C,0x48,0x03,0xF0,0x7F,0xF8,0xF0,0xBD,0x50,0x09,0x00,0x02, \ +0x44,0x07,0x00,0x02,0xC4,0x00,0x00,0x02,0x9D,0x01,0x00,0x02,0xCC,0x01,0x00, \ +0x02,0x9E,0x01,0x00,0x02,0xA0,0x01,0x00,0x02,0xA8,0x01,0x00,0x02,0xA4,0x01, \ +0x00,0x02,0xA2,0x01,0x00,0x02,0xD8,0x01,0x00,0x02,0x04,0x07,0x00,0x02,0xF0, \ +0xB5,0x30,0x4D,0x04,0x1C,0x28,0x68,0x0F,0x1C,0x80,0x7D,0x2E,0x49,0x08,0x70, \ +0x00,0xF0,0x66,0xFF,0x2C,0x49,0x08,0x78,0x03,0x28,0x04,0xD1,0x2B,0x48,0x40, \ +0x6B,0xFF,0xF7,0x58,0xFA,0x02,0xE0,0x00,0x20,0xFF,0xF7,0x54,0xFA,0x28,0x68, \ +0x85,0x7D,0x27,0x48,0x80,0x7A,0x85,0x42,0x00,0xDB,0x05,0x1C,0x23,0x48,0x00, \ +0x78,0x01,0xF0,0x85,0xFB,0x24,0x4A,0x24,0x49,0x10,0x60,0xC9,0x88,0x49,0x00, \ +0x6B,0x00,0x23,0x4D,0xEB,0x5A,0xC9,0x18,0x00,0x2C,0x11,0xD0,0x00,0x2F,0x10, \ +0xD0,0x20,0x4C,0x21,0x4D,0x24,0x88,0x2D,0x78,0x0B,0x18,0x01,0x3C,0xAC,0x42, \ +0x03,0xD1,0x1E,0x4C,0x24,0x68,0x1E,0x19,0x04,0xE0,0x15,0x4C,0xE4,0x6E,0xE6, \ +0x18,0x00,0xE0,0x00,0x26,0x13,0x4C,0x14,0x4A,0xE3,0x6E,0x18,0x18,0x10,0x60, \ +0x18,0x4A,0x12,0x88,0x10,0x18,0x45,0x18,0x00,0x2F,0x07,0xD0,0x60,0x6E,0x0C, \ +0x49,0x02,0x30,0x33,0x1C,0x00,0x22,0x09,0x78,0xFF,0xF7,0x3E,0xFB,0x01,0x20, \ +0x29,0x1C,0x00,0xF0,0xA8,0xFF,0x02,0x20,0x60,0x60,0x01,0x20,0x0F,0x49,0xE0, \ +0x75,0x09,0x88,0xE0,0x6E,0x06,0x4A,0x40,0x18,0x10,0x60,0xF0,0xBD,0x00,0x00, \ +0xCC,0x01,0x00,0x02,0x9B,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x08,0x01,0x00, \ +0x02,0x94,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01,0x00,0x02,0xA4,0x01, \ +0x00,0x02,0x9E,0x01,0x00,0x02,0xAC,0x01,0x00,0x02,0xB6,0x01,0x00,0x02,0xB4, \ +0x01,0x00,0x02,0x00,0xB5,0x06,0x48,0x40,0x68,0x03,0x28,0x06,0xD1,0x01,0x20, \ +0x00,0xF0,0x96,0xFF,0x00,0x21,0x01,0x20,0xFF,0xF7,0x7A,0xFF,0x00,0xBD,0x00, \ +0x00,0x50,0x09,0x00,0x02,0xB0,0xB5,0x17,0x4C,0x61,0x68,0x4A,0x68,0x13,0x78, \ +0x1D,0x07,0x2D,0x0F,0xC1,0x27,0x00,0x2D,0x22,0xD1,0x1D,0x11,0x0D,0x2D,0x1F, \ +0xD2,0x02,0xA3,0x5B,0x5D,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1B,0x0B,0x1B,0x0B, \ +0x12,0x07,0x1B,0x1B,0x07,0x1B,0x0F,0x0B,0x0F,0x00,0x10,0x1C,0x00,0xF0,0x18, \ +0xF8,0xB0,0xBD,0x00,0x28,0x01,0xD0,0x4F,0x72,0xB0,0xBD,0xFF,0xF7,0x03,0xFC, \ +0xB0,0xBD,0x06,0x48,0x40,0x78,0x00,0x28,0x01,0xD0,0x07,0xF0,0x1A,0xFC,0x60, \ +0x68,0x47,0x72,0xB0,0xBD,0x4F,0x72,0xB0,0xBD,0x00,0x00,0x4C,0x01,0x00,0x02, \ +0xA0,0x09,0x00,0x02,0xF0,0xB5,0x30,0x4C,0x07,0x1C,0xA0,0x79,0x01,0x28,0x02, \ +0xD1,0x38,0x1C,0x07,0xF0,0xF9,0xFB,0x38,0x78,0xC1,0x25,0x80,0x28,0x12,0xD1, \ +0xA0,0x79,0x03,0x28,0x05,0xD1,0x06,0xF0,0xC6,0xFF,0x00,0x28,0x01,0xD1,0x28, \ +0x1C,0xF0,0xBD,0xA0,0x79,0x04,0x28,0x06,0xD1,0x01,0x20,0x06,0xF0,0xB0,0xFE, \ +0x00,0x28,0x01,0xD1,0x28,0x1C,0xF0,0xBD,0xA0,0x79,0x05,0x28,0x01,0xD0,0x00, \ +0x20,0xF0,0xBD,0x1F,0x48,0x06,0x22,0xC6,0x1D,0xC1,0x1D,0x29,0x36,0x07,0x31, \ +0xF8,0x1D,0x09,0x30,0x34,0x79,0x03,0xF0,0x8A,0xFC,0x00,0x28,0x27,0xD1,0x02, \ +0x2C,0x01,0xD0,0x01,0x2C,0x21,0xD1,0xC0,0x20,0x02,0xF0,0x5F,0xFF,0x07,0x1C, \ +0x02,0x2C,0x02,0xD1,0x06,0x20,0x00,0xF0,0x1B,0xFF,0x00,0x20,0x00,0xF0,0x24, \ +0xF8,0x02,0x2C,0x05,0xD1,0x70,0x79,0x01,0x28,0x0B,0xDD,0x08,0xF0,0x45,0xF8, \ +0x08,0xE0,0x0C,0x48,0x00,0x88,0x84,0x02,0x00,0xF0,0xCD,0xFE,0x21,0x1A,0x06, \ +0x20,0x00,0xF0,0xED,0xFE,0x38,0x1C,0x02,0xF0,0x42,0xFF,0x00,0x20,0xF0,0xBD, \ +0x28,0x1C,0xF0,0xBD,0x01,0x2C,0x02,0xD1,0x07,0xF0,0x3A,0xFC,0xF0,0xBD,0x28, \ +0x1C,0xF0,0xBD,0x00,0x00,0xA0,0x09,0x00,0x02,0x84,0x00,0x00,0x02,0xF1,0xB5, \ +0x83,0xB0,0x3E,0x49,0x00,0x25,0x4B,0x68,0x02,0x93,0x59,0x68,0x4A,0x7E,0x0F, \ +0x7E,0x12,0x02,0x3A,0x43,0x8F,0x7E,0x3F,0x04,0x3A,0x43,0xCF,0x7E,0x3F,0x06, \ +0x3A,0x43,0x16,0x1C,0x4F,0x7F,0x0A,0x7F,0x3F,0x02,0x3A,0x43,0x8F,0x7F,0xC9, \ +0x7F,0x3F,0x04,0x3A,0x43,0x09,0x06,0x0A,0x43,0x99,0x89,0x18,0x39,0xCC,0x00, \ +0x99,0x7B,0x17,0x1C,0x00,0x29,0x26,0xD0,0x01,0x29,0x26,0xD0,0x02,0x29,0x26, \ +0xD0,0x03,0x29,0x0C,0xD1,0x0B,0x20,0x21,0x1C,0x03,0xF0,0x89,0xFC,0x00,0x91, \ +0x61,0x1A,0x0B,0x20,0x03,0xF0,0x84,0xFC,0x00,0x99,0x00,0x29,0x00,0xD9,0x01, \ +0x30,0x01,0x24,0xA4,0x06,0xA2,0x6A,0x61,0x6A,0x02,0x9B,0x30,0x18,0x5B,0x69, \ +0xCB,0x1A,0xC0,0x18,0xB0,0x42,0x00,0xD2,0x01,0x37,0x06,0x1C,0x1F,0x48,0x03, \ +0x79,0x00,0x20,0x02,0x2B,0x14,0xD1,0x01,0x25,0x1F,0xE0,0x20,0x1C,0xE9,0xE7, \ +0x60,0x08,0xE7,0xE7,0x61,0x00,0x01,0x91,0x0B,0x20,0x03,0xF0,0x63,0xFC,0x0C, \ +0x1C,0x01,0x99,0x09,0x1B,0x0B,0x20,0x03,0xF0,0x5D,0xFC,0x00,0x2C,0xDA,0xD9, \ +0x01,0x30,0xD8,0xE7,0x01,0x2B,0x0A,0xD1,0x12,0x4B,0x97,0x42,0x58,0x70,0x01, \ +0xD9,0x01,0x25,0x04,0xE0,0x97,0x42,0x02,0xD1,0x8E,0x42,0x00,0xD9,0x01,0x25, \ +0x03,0x9A,0x00,0x2A,0x03,0xD0,0x00,0x2D,0x03,0xD1,0x04,0xB0,0xF0,0xBD,0x00, \ +0x2D,0x09,0xD0,0x70,0x1A,0x00,0xF0,0x10,0xF8,0x01,0x23,0xDE,0x42,0x01,0xD1, \ +0x00,0x26,0x01,0x37,0xA7,0x62,0x66,0x62,0x01,0x20,0xEF,0xE7,0x00,0x00,0x4C, \ +0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0xA0,0x09,0x00,0x02,0x90,0xB4,0x10,0x4A, \ +0x00,0x21,0x97,0x69,0x91,0x61,0x01,0x21,0x0E,0x4B,0x8C,0x00,0xE3,0x18,0xDC, \ +0x6A,0x01,0x31,0x24,0x18,0xDC,0x62,0x08,0x29,0xF6,0xD9,0x0B,0x49,0x0B,0x6B, \ +0x1B,0x18,0x0B,0x63,0x0B,0x6B,0x5B,0x00,0x5B,0x08,0x0B,0x63,0xCB,0x6A,0x18, \ +0x18,0xC8,0x62,0xC8,0x6A,0x40,0x00,0x40,0x08,0xC8,0x62,0x97,0x61,0x90,0xBC, \ +0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x50,0x09,0x00, \ +0x02,0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7,0xD9,0xFA,0x00,0xBD,0x0B,0x49, \ +0x09,0x68,0x49,0x69,0x08,0x18,0x0A,0x49,0x4A,0x7A,0x05,0x2A,0x02,0xD1,0x8A, \ +0x6B,0x82,0x42,0x0A,0xD2,0x05,0x22,0x4A,0x72,0x02,0x1C,0x06,0x48,0x80,0x23, \ +0xC2,0x60,0x82,0x69,0x1A,0x43,0x82,0x61,0xC0,0x68,0x88,0x63,0xF7,0x46,0x00, \ +0x00,0x4C,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5, \ +0x02,0x79,0x35,0x4C,0x87,0x78,0xFE,0x21,0x11,0x40,0xE5,0x88,0x03,0x23,0x9B, \ +0x03,0x9D,0x43,0x2B,0x1C,0x00,0x29,0x03,0xD0,0xCD,0x00,0x01,0x3D,0x9D,0x42, \ +0x05,0xD2,0x45,0x78,0x6D,0x18,0xED,0x00,0x18,0x3D,0x9D,0x42,0x01,0xD8,0x00, \ +0x25,0x08,0xE0,0xC9,0x00,0x59,0x1A,0xC9,0x08,0x5E,0x07,0x76,0x0F,0x41,0x18, \ +0x49,0x79,0xF1,0x40,0x0D,0x1C,0x00,0x23,0x26,0x49,0x52,0x08,0x8B,0x70,0x05, \ +0xD3,0x00,0x2F,0x03,0xD1,0x8A,0x78,0x02,0x23,0x1A,0x43,0x8A,0x70,0xEA,0x07, \ +0xD2,0x0F,0x03,0xD0,0x8D,0x78,0x04,0x23,0x2B,0x43,0x8B,0x70,0xE3,0x1D,0x29, \ +0x33,0x5B,0x79,0x01,0x25,0x02,0x2B,0x1D,0xD1,0x8E,0x78,0x08,0x23,0x33,0x43, \ +0x8B,0x70,0x00,0x2A,0x03,0xD0,0x0A,0x78,0x00,0x2A,0x00,0xD1,0x4D,0x70,0x00, \ +0x2F,0x00,0xD1,0xC7,0x78,0x15,0x48,0x40,0x8B,0xB8,0x42,0x00,0xD8,0x07,0x1C, \ +0x00,0xF0,0x96,0xFD,0x21,0x88,0x12,0x4B,0x4F,0x43,0xB9,0x02,0x08,0x1A,0xC1, \ +0x18,0x06,0x20,0x00,0xF0,0xB1,0xFD,0xF0,0xBD,0x88,0x78,0xC0,0x08,0x00,0xD3, \ +0x8D,0x71,0x88,0x78,0x40,0x08,0x80,0x07,0x07,0xD1,0x0A,0x48,0x80,0x69,0x80, \ +0x08,0x03,0xD2,0x88,0x78,0x08,0x23,0x18,0x43,0x88,0x70,0x88,0x78,0x04,0x23, \ +0x98,0x43,0x88,0x70,0xF0,0xBD,0x00,0x00,0x84,0x00,0x00,0x02,0xC0,0x09,0x00, \ +0x02,0xC4,0x00,0x00,0x02,0x48,0xF4,0xFF,0xFF,0x80,0x00,0x00,0x04,0xF0,0xB5, \ +0x82,0xB0,0x36,0x48,0x34,0x4E,0xC5,0x1D,0x09,0x35,0x33,0x4C,0xC7,0x1D,0x69, \ +0x37,0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22,0x01,0xAB,0x31,0x48,0x32,0x49, \ +0x02,0xF0,0xE0,0xFE,0x01,0x98,0x41,0x0A,0x0C,0xD3,0x80,0x20,0x38,0x71,0x00, \ +0x20,0x78,0x71,0x38,0x79,0x00,0x0A,0x4C,0xD3,0x07,0xF0,0xFA,0xFB,0x38,0x79, \ +0x00,0x0A,0xFA,0xD2,0x46,0xE0,0x41,0x08,0x0F,0xD3,0x30,0x1C,0xFF,0xF7,0xED, \ +0xF8,0x27,0x48,0x41,0x6C,0x09,0x78,0x40,0x29,0x3C,0xD0,0x07,0xF0,0xA6,0xF8, \ +0x23,0x48,0x40,0x6C,0x00,0x78,0x40,0x28,0xF8,0xD1,0x34,0xE0,0x41,0x0D,0x03, \ +0xD3,0x40,0x20,0x06,0xF0,0x69,0xFF,0x2E,0xE0,0x41,0x09,0x03,0xD3,0x50,0x20, \ +0x06,0xF0,0x63,0xFF,0x28,0xE0,0x40,0x0F,0x03,0xD3,0x80,0x20,0x06,0xF0,0x5D, \ +0xFF,0x22,0xE0,0x00,0x21,0x79,0x22,0x52,0x05,0x17,0x48,0x91,0x82,0x10,0x82, \ +0x91,0x80,0x64,0x20,0x10,0x80,0x02,0x20,0x90,0x82,0x12,0x48,0x21,0x72,0x81, \ +0x6B,0x09,0x7B,0x09,0x0A,0x06,0xD3,0x00,0xF0,0x21,0xF8,0x0E,0x48,0x80,0x6B, \ +0x00,0x7B,0x00,0x0A,0xF8,0xD2,0xC0,0x20,0x02,0xF0,0x87,0xFD,0x00,0x21,0x79, \ +0x22,0x52,0x05,0x91,0x82,0x11,0x83,0x21,0x72,0x02,0xF0,0x7F,0xFD,0x00,0x20, \ +0xA8,0x73,0x9C,0xE7,0x26,0x08,0x00,0x02,0xD4,0x01,0x00,0x02,0x50,0x09,0x00, \ +0x02,0x24,0x07,0x00,0x02,0x11,0x11,0x10,0x10,0x94,0x01,0x00,0x02,0xA0,0x8C, \ +0x00,0x00,0xF0,0xB5,0x83,0xB0,0x87,0x4D,0x00,0x24,0xA8,0x6B,0x47,0x68,0x39, \ +0x79,0x49,0x08,0x01,0xD3,0x01,0x26,0x00,0xE0,0x00,0x26,0x82,0x4D,0x69,0x7A, \ +0x00,0x29,0x73,0xD1,0x81,0x4A,0xD1,0x78,0x00,0x29,0x0C,0xD0,0x39,0x78,0x08, \ +0x29,0x09,0xD1,0x7F,0x4A,0x91,0x78,0x00,0x29,0x05,0xD0,0x81,0x7D,0xD3,0x78, \ +0x99,0x42,0x01,0xDD,0xD1,0x78,0x81,0x75,0x78,0x4B,0x7A,0x49,0x9D,0x6B,0x00, \ +0x22,0x68,0x68,0x00,0x2E,0x48,0x66,0x9A,0x72,0x65,0xD1,0x77,0x48,0xA9,0x8A, \ +0x00,0x89,0x04,0x38,0x81,0x42,0x60,0xDD,0x70,0x4A,0x01,0x21,0x51,0x72,0xF9, \ +0x1D,0x17,0x31,0x51,0x61,0x6D,0x49,0x04,0x04,0x24,0x0C,0x8C,0x81,0xA8,0x8A, \ +0x01,0x1B,0xE0,0x1F,0x11,0x38,0x02,0x90,0x03,0xF0,0xC9,0xFA,0x68,0x49,0x01, \ +0x30,0x08,0x82,0xA8,0x8A,0x01,0x1B,0x02,0x98,0x03,0xF0,0xC1,0xFA,0xC8,0x1D, \ +0x63,0x49,0x11,0x30,0xC8,0x81,0xC8,0x89,0x18,0x28,0x04,0xD1,0x60,0x4B,0xDC, \ +0x81,0x18,0x8A,0x01,0x38,0x18,0x82,0x78,0x78,0xC0,0x09,0x06,0xD3,0x5C,0x4B, \ +0xE0,0x1D,0x01,0x30,0x98,0x81,0xD8,0x89,0x08,0x30,0xD8,0x81,0x59,0x4C,0xA8, \ +0x7D,0xE1,0x89,0x00,0xF0,0xC4,0xFB,0xA0,0x61,0xA0,0x6B,0x80,0x7D,0xA1,0x89, \ +0x00,0xF0,0xBE,0xFB,0x56,0x49,0x54,0x4A,0xC8,0x66,0xA0,0x89,0x88,0x66,0xA0, \ +0x6B,0x80,0x7D,0x92,0x7A,0x90,0x42,0x00,0xDA,0x02,0x1C,0x53,0x4B,0x52,0x00, \ +0x9A,0x5A,0x52,0x4B,0xDB,0x88,0x5B,0x00,0xD2,0x18,0x23,0x8A,0xA5,0x7A,0x01, \ +0x3B,0xAB,0x42,0x04,0xD1,0xA1,0x69,0x54,0x18,0x00,0xE0,0x25,0xE0,0x01,0xE0, \ +0xC9,0x6E,0x8C,0x18,0x01,0xF0,0x34,0xF8,0x04,0x19,0x78,0x78,0x04,0x23,0x18, \ +0x43,0x78,0x70,0x12,0xE0,0xFF,0xE7,0x40,0x48,0x42,0x49,0x42,0x72,0xA8,0x8A, \ +0x88,0x66,0x78,0x78,0xC0,0x09,0x02,0xD3,0x88,0x6E,0x08,0x30,0x88,0x66,0x3D, \ +0x49,0x89,0x6E,0xA8,0x7D,0x00,0xF0,0x87,0xFB,0x3B,0x49,0xC8,0x66,0x37,0x48, \ +0x32,0x1C,0x80,0x6B,0x81,0x7D,0xB8,0x1C,0x23,0x1C,0xFE,0xF7,0xFD,0xFF,0x30, \ +0x1C,0x00,0xF0,0xF6,0xFA,0x00,0x28,0x0A,0xD0,0x02,0x20,0x33,0x49,0xC2,0x1E, \ +0x48,0x74,0x00,0x92,0x01,0x22,0x11,0x21,0x34,0x48,0x01,0xAB,0x02,0xF0,0xBD, \ +0xFD,0x2E,0x48,0x00,0x24,0x2A,0x4D,0x44,0x74,0xA8,0x6B,0x41,0x7B,0x00,0x29, \ +0x0C,0xD1,0x38,0x1C,0x00,0xF0,0x70,0xF8,0x27,0x4A,0x54,0x70,0x10,0x78,0x01, \ +0x30,0x10,0x70,0x00,0xF0,0xDF,0xFB,0x00,0xF0,0x55,0xF8,0x3E,0xE0,0xE9,0x1D, \ +0x39,0x31,0x0A,0x7A,0x01,0x2A,0x05,0xD1,0x08,0x22,0x42,0x73,0x0C,0x72,0x00, \ +0xF0,0x4A,0xF8,0x33,0xE0,0x40,0x7B,0x04,0x28,0x1F,0xD0,0x00,0xF0,0xBC,0xFB, \ +0xA8,0x6B,0x81,0x7B,0x01,0x31,0x81,0x73,0x78,0x78,0x08,0x23,0x18,0x43,0x78, \ +0x70,0x38,0x78,0x08,0x28,0x12,0xD1,0x14,0x48,0xC0,0x78,0x00,0x28,0x0E,0xD0, \ +0x13,0x4A,0x18,0x4B,0x50,0x78,0x01,0x30,0x00,0x06,0x00,0x0E,0x50,0x70,0xA9, \ +0x6B,0x89,0x7D,0x59,0x5C,0x88,0x42,0x00,0xDD,0x14,0x70,0x00,0xF0,0xAE,0xFB, \ +0xA8,0x6B,0x0D,0x4A,0x81,0x7B,0x12,0x7C,0x91,0x42,0x04,0xDA,0x44,0x73,0xA9, \ +0x6B,0x82,0x20,0x08,0x73,0x05,0xE0,0x01,0x21,0x38,0x1C,0x00,0xF0,0x29,0xF8, \ +0x00,0xF0,0x15,0xF8,0x03,0xB0,0xF0,0xBD,0x00,0x00,0x94,0x01,0x00,0x02,0x08, \ +0x01,0x00,0x02,0xD0,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0xC4,0x00,0x00,0x02, \ +0xB8,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x04,0x07,0x00,0x02,0xDE,0x01,0x00, \ +0x02,0x05,0x48,0x00,0x21,0x41,0x72,0x81,0x72,0x04,0x49,0x05,0x4A,0x89,0x89, \ +0x91,0x87,0x80,0x6B,0x10,0x21,0x01,0x73,0xF7,0x46,0x94,0x01,0x00,0x02,0xC4, \ +0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x80,0xB4,0x09,0x4A,0x01,0x27,0x53,0x79, \ +0x08,0x4A,0x03,0x2B,0x02,0xD1,0xD7,0x70,0x80,0xBC,0xF7,0x46,0x40,0x78,0x40, \ +0x09,0xFA,0xD3,0x00,0x29,0x02,0xD1,0x00,0x20,0xD0,0x70,0xF5,0xE7,0xD7,0x70, \ +0xF3,0xE7,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0x02, \ +0xF0,0x1C,0xFC,0x0A,0x4C,0x03,0x21,0xA1,0x73,0x02,0xF0,0x17,0xFC,0x60,0x7F, \ +0x01,0x28,0x0C,0xD0,0xC0,0x20,0x02,0xF0,0x11,0xFC,0x07,0x1C,0xA0,0x7B,0x03, \ +0x28,0x02,0xD1,0x00,0x20,0x00,0xF0,0x8C,0xF9,0x38,0x1C,0x02,0xF0,0x07,0xFC, \ +0x90,0xBD,0x50,0x09,0x00,0x02,0x90,0xB5,0xFE,0xF7,0xCF,0xFD,0x1E,0x4F,0xF9, \ +0x6A,0x40,0x1A,0x41,0x00,0x78,0x7F,0x49,0x08,0x01,0x28,0x01,0xD1,0xB8,0x6A, \ +0x00,0xE0,0x78,0x6A,0x3B,0x68,0x19,0x4A,0x00,0x2B,0x1C,0xD1,0x84,0x00,0x93, \ +0x8B,0x24,0x18,0xA4,0x00,0xE2,0x18,0x51,0x1A,0x8A,0x42,0x00,0xD2,0x11,0x1C, \ +0x00,0x28,0x0F,0xD1,0x01,0x20,0x78,0x72,0xB8,0x7B,0x03,0x28,0x05,0xD1,0x0D, \ +0x29,0x04,0xD9,0xC8,0x1F,0x01,0x38,0x00,0xF0,0x5E,0xF9,0x90,0xBD,0x00,0x20, \ +0x00,0xF0,0x5A,0xF9,0x90,0xBD,0x3B,0x62,0x09,0xE0,0x83,0x00,0xD2,0x8B,0x18, \ +0x18,0x80,0x00,0x80,0x18,0x41,0x1A,0x88,0x42,0x00,0xD2,0x01,0x1C,0x3A,0x62, \ +0x08,0x20,0x00,0xF0,0x70,0xFB,0x04,0x20,0x78,0x72,0x90,0xBD,0x00,0x00,0x50, \ +0x09,0x00,0x02,0x94,0x01,0x00,0x02,0x00,0xB5,0x04,0x49,0x02,0x0A,0x8A,0x74, \ +0xC8,0x74,0x03,0x21,0x11,0x20,0xFE,0xF7,0xDB,0xFD,0x00,0xBD,0xD8,0x07,0x00, \ +0x02,0xB0,0xB5,0x82,0xB0,0x11,0x4D,0x01,0x20,0x68,0x74,0x11,0x4F,0x11,0x48, \ +0x00,0x24,0xBC,0x82,0x38,0x82,0xBC,0x80,0x1E,0x20,0x38,0x80,0x02,0x20,0xB8, \ +0x82,0xC2,0x1E,0x00,0x92,0x01,0x22,0x1A,0x21,0x0C,0x48,0x01,0xAB,0x02,0xF0, \ +0xA7,0xFC,0x6C,0x74,0x3C,0x83,0xBC,0x82,0x01,0x98,0x81,0x08,0x06,0xD3,0x00, \ +0x09,0x02,0xD3,0x82,0x20,0x02,0xB0,0xB0,0xBD,0x20,0x1C,0xFB,0xE7,0x42,0x20, \ +0xF9,0xE7,0x50,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00,0x00,0xE4, \ +0x06,0x00,0x02,0xF0,0xB5,0xFF,0x20,0x01,0x25,0xAD,0x06,0xF5,0x30,0x29,0x69, \ +0x89,0x08,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD8,0x6D,0x4E,0x00, \ +0x27,0xB0,0x7D,0x6D,0x4C,0x00,0x28,0x11,0xD0,0x30,0x6E,0xFF,0xF7,0xB1,0xFF, \ +0x60,0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x03,0x22,0xF1,0x6D,0xB0,0x6D,0x12, \ +0x03,0x00,0xF0,0xD9,0xF8,0xB7,0x75,0x01,0x20,0xFE,0xF7,0x35,0xFD,0xF0,0xBD, \ +0xF0,0x7B,0x00,0x28,0x1A,0xD0,0xA0,0x6B,0x61,0x49,0x80,0x7D,0x89,0x7A,0x88, \ +0x42,0x00,0xDB,0x08,0x1C,0x5F,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x93,0xFF, \ +0x60,0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x03,0x22,0x12,0x03,0x10,0x21,0x5A, \ +0x48,0x00,0xF0,0xBB,0xF8,0x01,0x20,0xFE,0xF7,0x18,0xFD,0xF7,0x73,0xF0,0xBD, \ +0xF0,0x7D,0x00,0x28,0xDD,0xD0,0x51,0x4E,0xF0,0x6E,0xFF,0xF7,0x7D,0xFF,0x76, \ +0x6E,0x70,0x78,0xC0,0x09,0x4E,0xD3,0x4D,0x4A,0xD0,0x7A,0x00,0x28,0x00,0xD0, \ +0xD7,0x72,0x07,0x20,0x40,0x06,0x81,0x69,0x08,0x23,0x19,0x43,0x81,0x61,0x81, \ +0x69,0x99,0x43,0x81,0x61,0xE8,0x68,0x00,0xF0,0x92,0xFF,0x01,0x23,0x9B,0x03, \ +0x9A,0x08,0x1C,0x21,0x47,0x48,0x00,0xF0,0x93,0xF8,0xFF,0x20,0x46,0x49,0xF5, \ +0x30,0x4A,0x68,0xD2,0x0B,0x03,0xD3,0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1, \ +0x43,0x48,0x03,0x21,0x00,0x78,0x49,0x06,0x02,0x28,0x02,0xD1,0x81,0x20,0x88, \ +0x60,0x01,0xE0,0x01,0x20,0x88,0x60,0x62,0x7A,0x00,0x2A,0x0F,0xD0,0xA0,0x7A, \ +0x00,0x28,0x03,0xD0,0x00,0x28,0x01,0xDD,0x60,0x69,0x01,0xE0,0x60,0x69,0x06, \ +0x38,0xA1,0x89,0x20,0x39,0x02,0x2A,0x08,0xD1,0x2E,0x4A,0xD7,0x75,0x05,0xE0, \ +0x2D,0x4A,0x50,0x6E,0x91,0x6E,0x18,0x30,0x20,0x39,0xD7,0x75,0x62,0x79,0x17, \ +0x23,0x9B,0x02,0x13,0x43,0x01,0x22,0x52,0x03,0x00,0xF0,0x5D,0xF8,0x33,0xE0, \ +0x60,0x7A,0x00,0x28,0x23,0xD0,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x18,0x25,0x00, \ +0xE0,0x1E,0x25,0x01,0x23,0x9B,0x03,0x20,0x48,0x9A,0x08,0x29,0x1C,0x40,0x6E, \ +0x00,0xF0,0x4B,0xF8,0xFF,0x20,0x22,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B,0x03, \ +0xD3,0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x60,0x79,0x11,0x23,0x9B,0x02, \ +0x03,0x43,0xA0,0x89,0x41,0x1B,0x01,0x22,0x52,0x03,0x60,0x69,0x00,0xF0,0x36, \ +0xF8,0x0A,0xE0,0x60,0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x10,0x48,0x03,0x22, \ +0x81,0x6E,0x40,0x6E,0x12,0x03,0x00,0xF0,0x2A,0xF8,0x0C,0x48,0xC7,0x75,0x0B, \ +0x48,0x42,0x7F,0xC1,0x1D,0x09,0x31,0x01,0x2A,0x00,0xD1,0x4F,0x73,0x50,0x30, \ +0x07,0x71,0x30,0x79,0x40,0x08,0x02,0xD2,0x60,0x7A,0x01,0x28,0x03,0xD1,0x01, \ +0x20,0xFE,0xF7,0x76,0xFC,0xF0,0xBD,0x00,0x20,0xFE,0xF7,0x72,0xFC,0xF0,0xBD, \ +0x00,0x00,0x50,0x09,0x00,0x02,0x94,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xC0, \ +0x01,0x00,0x02,0xF8,0x07,0x00,0x02,0x64,0x07,0x00,0x02,0x40,0x00,0x00,0x04, \ +0x5C,0x00,0x00,0x02,0xB0,0xB4,0x06,0x4C,0x1F,0x1C,0x65,0x68,0xEB,0x0B,0x04, \ +0xD2,0x0A,0x43,0x21,0x05,0x4A,0x63,0x88,0x63,0x67,0x60,0xB0,0xBC,0xF7,0x46, \ +0x00,0x00,0x40,0x00,0x00,0x04,0xF0,0xB5,0x52,0x49,0x07,0x1C,0x8A,0x7A,0x00, \ +0x20,0x00,0x2A,0x61,0xD1,0x0A,0x7A,0x00,0x2A,0x6B,0xD0,0x4A,0x7A,0x01,0x2A, \ +0x5B,0xD1,0x0A,0x7B,0x01,0x2A,0x58,0xD1,0xCA,0x7A,0x00,0x2A,0x55,0xD1,0xCE, \ +0x1D,0x49,0x36,0xF1,0x78,0xF5,0x1F,0x39,0x3D,0x00,0x29,0x0F,0xD1,0x45,0x49, \ +0xCA,0x1D,0x69,0x32,0x12,0x78,0x00,0x2A,0x09,0xD1,0x6A,0x7B,0x01,0x2A,0x06, \ +0xD0,0x32,0x79,0x00,0x2A,0x03,0xD1,0x0C,0x1C,0x89,0x7C,0x00,0x29,0x09,0xD0, \ +0x3E,0x4A,0x3F,0x4B,0xD1,0x79,0xD9,0x71,0x3B,0x49,0x88,0x75,0x01,0x20,0xC8, \ +0x75,0xCC,0x6E,0x21,0xE0,0x21,0x7F,0x00,0x29,0x12,0xD0,0x39,0x4B,0x37,0x4A, \ +0x99,0x6B,0x89,0x7D,0x92,0x7A,0x91,0x42,0x01,0xDA,0xD9,0x71,0x00,0xE0,0xDA, \ +0x71,0xA0,0x75,0x01,0x21,0xE1,0x73,0xE0,0x75,0xD8,0x79,0x32,0x49,0x40,0x00, \ +0x0C,0x5A,0x0B,0xE0,0xE2,0x7E,0x21,0x1C,0x00,0x2A,0x27,0xD0,0xCC,0x6E,0x88, \ +0x75,0x01,0x20,0x2C,0x4B,0xC8,0x75,0x98,0x6B,0x80,0x7D,0xD8,0x71,0x00,0xF0, \ +0x40,0xF9,0x26,0x4B,0xD8,0x7B,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0x33, \ +0xFC,0x0C,0xE0,0x24,0x4B,0xD8,0x79,0x03,0x28,0x05,0xD1,0x20,0x4B,0x58,0x6B, \ +0xFE,0xF7,0x2A,0xFC,0x03,0xE0,0x3A,0xE0,0x00,0x20,0xFE,0xF7,0x25,0xFC,0x01, \ +0x21,0x89,0x06,0x00,0x2F,0x05,0xD0,0x05,0x2F,0x03,0xD9,0x48,0x6A,0x38,0x18, \ +0x02,0xE0,0x2D,0xE0,0x48,0x6A,0x0A,0x30,0x16,0x4B,0x02,0x22,0x9A,0x73,0x18, \ +0x4B,0x92,0x03,0x5A,0x60,0x08,0x62,0x6A,0x7B,0x01,0x2A,0x02,0xD0,0x32,0x79, \ +0x00,0x2A,0x15,0xD0,0x10,0x4A,0x8D,0x6A,0x4E,0x6A,0xD1,0x79,0x13,0x4A,0x12, \ +0x4F,0x89,0x00,0x51,0x58,0x0D,0x4B,0x08,0x18,0x38,0x60,0xD8,0x79,0x00,0xF0, \ +0x3A,0xFD,0x39,0x68,0x40,0x18,0x38,0x60,0xB0,0x42,0x00,0xD2,0x01,0x35,0x7D, \ +0x60,0x38,0x1D,0x06,0x4F,0x3C,0x60,0xF8,0x79,0x00,0xF0,0x2D,0xFD,0x39,0x68, \ +0x40,0x18,0x38,0x60,0x01,0x20,0xF0,0xBD,0x50,0x09,0x00,0x02,0x08,0x01,0x00, \ +0x02,0x94,0x01,0x00,0x02,0xC0,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x28,0x08, \ +0x00,0x02,0x28,0x09,0x00,0x02,0xF8,0xB5,0x38,0x49,0x04,0x1C,0x88,0x6B,0x37, \ +0x4A,0x85,0x7D,0x46,0x68,0x92,0x7A,0x00,0x27,0x95,0x42,0x00,0xDB,0x15,0x1C, \ +0x34,0x49,0x80,0x8A,0x49,0x89,0x88,0x42,0x2E,0xDD,0x00,0x2C,0x2C,0xD1,0x2F, \ +0x49,0x88,0x7A,0x00,0x28,0x28,0xD1,0x30,0x49,0xB4,0x20,0x08,0x70,0x2F,0x48, \ +0x30,0x4A,0xC0,0x88,0x41,0x00,0x09,0x18,0x68,0x00,0x10,0x5A,0x40,0x00,0x08, \ +0x18,0x2D,0x49,0xC9,0x6E,0x40,0x18,0x28,0x49,0x48,0x80,0x28,0x1C,0x00,0xF0, \ +0xEF,0xFC,0x26,0x49,0x49,0x88,0x40,0x18,0x24,0x49,0x48,0x80,0x31,0x1D,0x06, \ +0x22,0x26,0x48,0x02,0xF0,0xD5,0xFE,0xF1,0x1D,0x03,0x31,0x06,0x22,0x24,0x48, \ +0x02,0xF0,0xCF,0xFE,0x01,0x20,0x20,0x49,0x01,0x26,0x08,0x77,0x03,0xE0,0x01, \ +0x20,0x1E,0x49,0x00,0x26,0xC8,0x76,0xFF,0xF7,0x64,0xFD,0xFF,0xF7,0xCE,0xFD, \ +0x00,0x90,0x00,0x98,0x00,0x28,0x1E,0xD1,0x12,0x49,0x00,0x2E,0x8A,0x6B,0x50, \ +0x73,0x01,0xD1,0x00,0x2C,0x01,0xD0,0x01,0x2E,0x19,0xD1,0x13,0x4A,0x68,0x00, \ +0x10,0x5A,0x10,0x4A,0xD2,0x88,0x49,0x8C,0x80,0x18,0x41,0x18,0x01,0x20,0x00, \ +0xF0,0x12,0xF9,0x01,0x2E,0x03,0xD1,0x0D,0x49,0x03,0x20,0x48,0x60,0x02,0xE0, \ +0x0B,0x49,0x02,0x20,0x48,0x60,0x01,0x27,0x03,0xE0,0x03,0x49,0x04,0x20,0x89, \ +0x6B,0x48,0x73,0x38,0x1C,0xF8,0xBD,0x00,0x00,0x94,0x01,0x00,0x02,0x08,0x01, \ +0x00,0x02,0xC4,0x00,0x00,0x02,0xF8,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB8, \ +0x01,0x00,0x02,0x50,0x09,0x00,0x02,0xFC,0x07,0x00,0x02,0x02,0x08,0x00,0x02, \ +0x90,0xB5,0x04,0x31,0xCF,0x00,0x01,0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03, \ +0x28,0x27,0xD1,0x0B,0x20,0x39,0x1C,0x02,0xF0,0xBD,0xFE,0x0C,0x1C,0x79,0x1A, \ +0x0B,0x20,0x02,0xF0,0xB8,0xFE,0x07,0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18, \ +0xD9,0x01,0x37,0x04,0x2C,0x13,0xD2,0x01,0x21,0x41,0x63,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0xA6,0xFE,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0x02,0xF0,0xA1,0xFE,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0x41,0x63,0x00,0xE0,0x41,0x63,0x38,0x1C,0x90,0xBD,0x00,0x00,0x50, \ +0x09,0x00,0x02,0xFF,0x21,0x10,0x48,0x31,0x31,0x01,0x80,0x0F,0x49,0x09,0x8C, \ +0xCA,0x1D,0x31,0x32,0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1, \ +0x80,0x0B,0x48,0xA0,0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80, \ +0x0F,0x21,0xC1,0x80,0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23, \ +0x21,0x81,0x60,0x12,0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0xB8,0x01,0x00,0x02, \ +0x94,0x01,0x00,0x02,0xC0,0x01,0x00,0x02,0x28,0x09,0x00,0x02,0x00,0xB5,0x07, \ +0x48,0xC1,0x79,0x82,0x79,0x91,0x42,0x07,0xD0,0xC1,0x79,0x81,0x71,0x82,0x79, \ +0x04,0x49,0x89,0x5C,0x41,0x71,0xFE,0xF7,0xBA,0xFA,0x00,0xBD,0x00,0x00,0x94, \ +0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0x05,0x48,0x81,0x8F,0x49,0x00,0x01,0x31, \ +0x81,0x87,0x04,0x49,0x82,0x8F,0xC9,0x89,0x8A,0x42,0x00,0xDD,0x81,0x87,0xF7, \ +0x46,0x50,0x09,0x00,0x02,0xC4,0x00,0x00,0x02,0x1A,0x49,0x19,0x48,0x89,0x6B, \ +0x1A,0x4B,0x89,0x7D,0x42,0x78,0x5B,0x5C,0x00,0x21,0x9A,0x42,0x15,0xDD,0x41, \ +0x70,0x01,0x70,0xC2,0x78,0x01,0x21,0x00,0x2A,0x0D,0xDD,0xC2,0x78,0x04,0x2A, \ +0x0A,0xDA,0xC2,0x78,0x01,0x3A,0xC2,0x70,0xC2,0x78,0x00,0x2A,0x04,0xD1,0x10, \ +0x4A,0x52,0x7A,0x01,0x2A,0x00,0xD1,0xC1,0x70,0x81,0x70,0xF7,0x46,0x82,0x78, \ +0x00,0x2A,0xFB,0xD0,0x02,0x78,0x02,0x2A,0xF8,0xDD,0x41,0x70,0x01,0x70,0xC2, \ +0x78,0x01,0x32,0x12,0x06,0x12,0x0E,0xC2,0x70,0x03,0x2A,0xEF,0xDD,0x81,0x70, \ +0x03,0x21,0xC1,0x70,0xF7,0x46,0x00,0x00,0xD0,0x01,0x00,0x02,0x94,0x01,0x00, \ +0x02,0xDE,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0xB5,0x02,0xF0,0x43,0xFE, \ +0x02,0x49,0x8A,0x8F,0x10,0x40,0x48,0x62,0x00,0xBD,0x50,0x09,0x00,0x02,0xB0, \ +0xB5,0x01,0x20,0x80,0x06,0x85,0x6A,0x41,0x6A,0x0E,0x48,0x00,0x88,0x84,0x02, \ +0x20,0x1C,0x02,0xF0,0xF5,0xFD,0x0F,0x1C,0x00,0x2D,0x10,0xD9,0x20,0x1C,0x29, \ +0x1C,0x02,0xF0,0xEE,0xFD,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x20,0x1C,0x02,0xF0, \ +0xE8,0xFD,0x48,0x1C,0x45,0x43,0xE9,0x19,0x20,0x1C,0x02,0xF0,0xE2,0xFD,0x0F, \ +0x1C,0x38,0x1C,0xB0,0xBD,0x00,0x00,0x84,0x00,0x00,0x02,0x90,0xB5,0x0C,0x1C, \ +0x07,0x1C,0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B,0x20, \ +0x18,0xB9,0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03,0x48, \ +0x82,0x69,0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80, \ +0x00,0x00,0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A,0x69, \ +0xC0,0x43,0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00, \ +0x04,0xF0,0xB5,0x84,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xE1,0x4C,0x00,0x27, \ +0x03,0x90,0xE0,0x7C,0x00,0x28,0x04,0xD0,0x03,0x98,0x05,0xF0,0x38,0xFE,0x00, \ +0x28,0x60,0xD1,0x03,0x98,0xDC,0x4B,0x18,0x40,0x1C,0xD0,0xDB,0x48,0x00,0x68, \ +0x02,0x90,0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x13,0xD3,0x01, \ +0x20,0x80,0x06,0x00,0x6B,0x02,0x99,0x40,0x00,0x40,0x08,0xC9,0x08,0x05,0xD3, \ +0xE0,0x62,0x02,0x27,0x07,0x20,0xFF,0xF7,0xC8,0xFF,0x05,0xE0,0x20,0x63,0x01, \ +0x27,0xFA,0x21,0x07,0x20,0xFF,0xF7,0xA7,0xFF,0x03,0x98,0xCD,0x4B,0xCA,0x49, \ +0x18,0x40,0xCD,0x1D,0xCE,0x1D,0x49,0x36,0x09,0x35,0x00,0x28,0x5C,0xD0,0xD8, \ +0x04,0xC1,0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x0D,0xD3,0x00,0x6A,0x40,0x00, \ +0x40,0x08,0x20,0x63,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0xD0,0xFA,0xFF, \ +0xF7,0x5E,0xFC,0x01,0x27,0x01,0x22,0x62,0x73,0x01,0x98,0x12,0x23,0x18,0x40, \ +0x44,0xD0,0x00,0x20,0x60,0x73,0xA0,0x7A,0x00,0x28,0x19,0xD0,0x01,0x98,0x80, \ +0x08,0x0E,0xD3,0x20,0x6B,0x21,0x6E,0x40,0x18,0xE0,0x62,0xB8,0x48,0x00,0x78, \ +0x00,0xF0,0x1D,0xFB,0xE1,0x6A,0x40,0x18,0xE0,0x62,0xE0,0x6A,0x40,0x00,0x40, \ +0x08,0xE0,0x62,0x01,0x20,0xFE,0xF7,0x95,0xF9,0x00,0x20,0xA0,0x72,0xA0,0x75, \ +0x0C,0xE0,0x4C,0xE1,0xFF,0xF7,0x36,0xFF,0x01,0x98,0x80,0x08,0x06,0xD3,0xAD, \ +0x49,0x20,0x6B,0x09,0x68,0x40,0x18,0x40,0x00,0x40,0x08,0xE0,0x62,0x00,0x2F, \ +0x00,0xD1,0x02,0x27,0x01,0x98,0x40,0x09,0x02,0xD3,0x01,0x20,0xFE,0xF7,0x7B, \ +0xF9,0xA0,0x7B,0x02,0x28,0x0D,0xD1,0x68,0x7B,0x01,0x28,0x01,0xD1,0x00,0x20, \ +0x68,0x73,0x00,0x20,0x30,0x71,0x00,0xF0,0x33,0xFA,0x01,0x99,0x9F,0x48,0x00, \ +0x22,0x01,0xF0,0xA8,0xFF,0x03,0x98,0x9E,0x4B,0x18,0x40,0x73,0xD0,0x18,0x05, \ +0xC0,0x68,0x00,0x90,0x00,0x98,0x40,0x09,0x15,0xD3,0xE0,0x7A,0x03,0x28,0x12, \ +0xD1,0x04,0x20,0xE0,0x72,0x00,0x98,0x19,0x05,0xC9,0x68,0x96,0x4A,0x08,0x43, \ +0x00,0x90,0x10,0x68,0x40,0x68,0x40,0x78,0xC0,0x09,0x05,0xD3,0x00,0x98,0x40, \ +0x08,0x02,0xD2,0x92,0x49,0x00,0x20,0x48,0x71,0x00,0x98,0x80,0x08,0x3F,0xD3, \ +0x07,0x20,0xFF,0xF7,0x34,0xFF,0xB0,0x79,0x01,0x28,0x0E,0xD1,0xE0,0x1D,0x69, \ +0x30,0x81,0x7A,0x01,0x29,0x09,0xD1,0x02,0x21,0x81,0x72,0x89,0x48,0x01,0x8B, \ +0xC0,0x8A,0x08,0x1A,0x81,0x02,0x04,0x20,0xFF,0xF7,0x08,0xFF,0x60,0x7A,0x06, \ +0x28,0x04,0xD1,0x02,0x21,0x61,0x72,0x08,0x20,0xFF,0xF7,0x1A,0xFF,0x00,0x20, \ +0x80,0x49,0x01,0x22,0xC8,0x80,0x22,0x73,0xE0,0x72,0xA0,0x72,0xA0,0x75,0x20, \ +0x74,0x08,0x71,0x4A,0x71,0xFE,0xF7,0xD1,0xFB,0x00,0x99,0x08,0x43,0x00,0x90, \ +0x60,0x68,0x04,0x28,0x0F,0xD1,0x01,0x20,0xFF,0xF7,0x04,0xFF,0x20,0x7B,0x01, \ +0x28,0x09,0xD1,0xE0,0x7A,0x00,0x28,0x06,0xD1,0xFE,0xF7,0x46,0xFE,0x00,0x22, \ +0x10,0x21,0x72,0x48,0x01,0xF0,0x45,0xFF,0x00,0x98,0x80,0x09,0x73,0xD3,0x01, \ +0x20,0x20,0x73,0x20,0x74,0x02,0x27,0x6A,0x4A,0x80,0x06,0xC1,0x6A,0x12,0x68, \ +0x51,0x61,0xC1,0x6A,0x49,0x00,0x49,0x08,0xE1,0x62,0x61,0x7A,0x05,0x29,0x0C, \ +0xD1,0xA1,0x6B,0x00,0xE0,0xA1,0xE0,0x40,0x6A,0x81,0x42,0x06,0xD2,0x02,0x21, \ +0x65,0x48,0x61,0x72,0x81,0x69,0x80,0x23,0x99,0x43,0x81,0x61,0x00,0x98,0xC0, \ +0x08,0x0E,0xD3,0x5D,0x48,0x01,0x21,0x01,0x71,0xC1,0x88,0x00,0x29,0x33,0xDD, \ +0xC1,0x88,0x01,0x23,0xDB,0x03,0x99,0x42,0x2E,0xDA,0xC0,0x88,0xFF,0xF7,0x86, \ +0xF8,0x2A,0xE0,0x56,0x49,0x00,0x20,0x08,0x71,0xA0,0x72,0xC1,0x20,0x20,0x60, \ +0x01,0x20,0xFE,0xF7,0xCC,0xF8,0x20,0x7E,0x01,0x28,0x14,0xD1,0x61,0x7E,0x00, \ +0x29,0x00,0xD0,0x00,0x20,0x60,0x76,0x51,0x48,0xC1,0x78,0x89,0x06,0x89,0x0E, \ +0xC1,0x70,0x61,0x7E,0x01,0x29,0x03,0xD1,0xC1,0x78,0x40,0x23,0x19,0x43,0xC1, \ +0x70,0xC1,0x78,0x03,0x20,0xFE,0xF7,0x3E,0xF9,0x60,0x68,0x04,0x28,0x06,0xD1, \ +0xFE,0xF7,0xE9,0xFD,0x00,0x22,0x10,0x21,0x44,0x48,0x01,0xF0,0xE8,0xFE,0xA0, \ +0x7E,0x00,0x28,0x22,0xD0,0x3F,0x48,0x00,0x79,0x00,0x28,0x1E,0xD0,0x20,0x7C, \ +0x00,0x28,0x1B,0xD0,0x20,0x68,0x00,0x28,0x18,0xD1,0x06,0x20,0xFF,0xF7,0x8A, \ +0xFE,0x00,0x20,0xE8,0x73,0xA0,0x76,0x70,0x70,0x69,0x7B,0x01,0x29,0x0D,0xD1, \ +0x68,0x73,0xA0,0x7B,0x00,0xE0,0x0C,0xE0,0x03,0x28,0x08,0xD1,0x01,0x20,0xA0, \ +0x73,0x00,0x22,0x10,0x21,0x2D,0x48,0x01,0xF0,0xC4,0xFE,0x00,0xE0,0x68,0x73, \ +0x00,0x20,0xA0,0x76,0xE0,0x7A,0x04,0x28,0x2F,0xD1,0x20,0x7C,0x00,0x28,0x2C, \ +0xD0,0x60,0x7B,0x00,0x28,0x02,0xD1,0x00,0x2F,0x00,0xD1,0x02,0x27,0x00,0x20, \ +0xE0,0x72,0x25,0x4D,0x20,0x74,0x29,0x79,0x01,0x29,0x15,0xD1,0x21,0x68,0x00, \ +0x29,0x12,0xD1,0x69,0x79,0x00,0x29,0x0F,0xD0,0x1F,0x4A,0x11,0x68,0x48,0x72, \ +0xA9,0x68,0xE9,0x60,0x28,0x70,0xFE,0xF7,0xE3,0xF9,0x28,0x78,0x01,0x28,0x04, \ +0xD1,0x00,0x22,0x01,0x21,0x1B,0x48,0x01,0xF0,0x97,0xFE,0x60,0x68,0x04,0x28, \ +0x06,0xD1,0xFE,0xF7,0x8E,0xFD,0x00,0x22,0x10,0x21,0x16,0x48,0x01,0xF0,0x8D, \ +0xFE,0x01,0x2F,0x02,0xD1,0x00,0xF0,0x1B,0xF9,0x03,0xE0,0x02,0x2F,0x01,0xD1, \ +0x00,0xF0,0x46,0xF9,0x03,0x98,0x00,0xF0,0x27,0xF8,0x03,0x98,0x11,0x4B,0x18, \ +0x40,0x01,0xD0,0x01,0xF0,0x71,0xF8,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x50,0x09, \ +0x00,0x02,0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x80,0x00,0x00,0x9B, \ +0x01,0x00,0x02,0x94,0x01,0x00,0x02,0xE4,0x06,0x00,0x02,0x40,0x40,0x00,0x00, \ +0x4C,0x01,0x00,0x02,0xE4,0x01,0x00,0x02,0xC4,0x00,0x00,0x02,0x44,0x07,0x00, \ +0x02,0x80,0x00,0x00,0x04,0xD8,0x07,0x00,0x02,0x08,0x08,0x00,0x00,0xF0,0xB5, \ +0x64,0x4B,0x07,0x1C,0x18,0x40,0x01,0x25,0x00,0x28,0x62,0x4E,0x6E,0xD0,0x62, \ +0x49,0xCC,0x69,0x60,0x08,0x3E,0xD3,0x88,0x69,0x40,0x08,0x3B,0xD3,0x88,0x69, \ +0xA8,0x43,0x88,0x61,0x5E,0x49,0x48,0x68,0x04,0x28,0x0A,0xD1,0xFE,0xF7,0x3E, \ +0xFD,0x01,0x20,0xFE,0xF7,0x01,0xF8,0x00,0x22,0x10,0x21,0x5A,0x48,0x01,0xF0, \ +0x3A,0xFE,0x29,0xE0,0x4A,0x68,0x58,0x48,0x02,0x2A,0x05,0xD1,0x00,0x68,0x45, \ +0x73,0x57,0x48,0x00,0x68,0x45,0x70,0x05,0xE0,0x4A,0x68,0x03,0x2A,0x02,0xD1, \ +0x00,0x68,0x02,0x22,0x42,0x73,0x4F,0x48,0x45,0x60,0x00,0xF0,0xAB,0xF8,0x01, \ +0x20,0xFD,0xF7,0xE4,0xFF,0x4B,0x48,0x40,0x7C,0x01,0x28,0x05,0xD1,0x00,0x22, \ +0x10,0x21,0x30,0x1C,0x01,0xF0,0x19,0xFE,0x08,0xE0,0x46,0x49,0x48,0x7C,0x02, \ +0x28,0x04,0xD1,0x00,0x22,0x10,0x21,0x47,0x48,0x01,0xF0,0x0F,0xFE,0x41,0x48, \ +0x80,0x69,0xC0,0x09,0x03,0xD3,0xE0,0x09,0x01,0xD3,0x07,0xF0,0x3B,0xFA,0x3D, \ +0x48,0x80,0x69,0x00,0x0A,0x32,0xD3,0x20,0x0A,0x30,0xD3,0x3A,0x48,0x80,0x23, \ +0x81,0x69,0x99,0x43,0x81,0x61,0x38,0x48,0x41,0x7A,0x05,0x29,0x02,0xD0,0x41, \ +0x7A,0x06,0x29,0x15,0xD1,0x00,0x21,0x81,0x63,0x01,0x7A,0x01,0x29,0x0D,0xD1, \ +0x45,0x72,0x81,0x7A,0x00,0x29,0x1B,0xD1,0x01,0x7B,0x01,0x29,0x18,0xD1,0xC0, \ +0x7A,0x00,0x28,0x15,0xD1,0xFF,0xF7,0xD0,0xF9,0x12,0xE0,0x3A,0xE0,0x02,0x22, \ +0x42,0x72,0x0E,0xE0,0x45,0x72,0x41,0x7F,0x01,0x29,0x02,0xD1,0x00,0x21,0x81, \ +0x62,0x01,0xE0,0x00,0x21,0x41,0x62,0x80,0x7B,0x03,0x28,0x02,0xD1,0x00,0x20, \ +0xFF,0xF7,0x42,0xFB,0x20,0x09,0x05,0xD3,0x20,0x48,0x80,0x69,0x00,0x09,0x01, \ +0xD3,0x04,0xF0,0x48,0xF8,0xA0,0x08,0x16,0xD3,0x1C,0x48,0x81,0x69,0x89,0x08, \ +0x12,0xD3,0x81,0x69,0x02,0x23,0x99,0x43,0x81,0x61,0x19,0x48,0xC1,0x1D,0x49, \ +0x31,0x89,0x79,0x05,0x29,0x08,0xD1,0x1B,0x49,0x49,0x79,0x03,0x29,0x04,0xD1, \ +0x70,0x30,0x81,0x78,0x08,0x23,0x19,0x43,0x81,0x70,0xA0,0x09,0x05,0xD3,0x10, \ +0x48,0x80,0x69,0x80,0x09,0x01,0xD3,0x07,0xF0,0x7F,0xF9,0x14,0x48,0x38,0x40, \ +0x06,0xD0,0x13,0x48,0x00,0x21,0x05,0x70,0x79,0x20,0x40,0x05,0x01,0x83,0x81, \ +0x82,0xFF,0x20,0x02,0x30,0x38,0x40,0x06,0xD0,0x07,0xF0,0xB3,0xF9,0x00,0x22, \ +0x10,0x21,0x30,0x1C,0x01,0xF0,0x92,0xFD,0xF0,0xBD,0x00,0x00,0x10,0x10,0x00, \ +0x00,0xE4,0x06,0x00,0x02,0x80,0x00,0x00,0x04,0x50,0x09,0x00,0x02,0x44,0x07, \ +0x00,0x02,0xCC,0x01,0x00,0x02,0xD8,0x01,0x00,0x02,0x04,0x07,0x00,0x02,0xB4, \ +0x00,0x00,0x02,0x02,0x02,0x00,0x00,0xDC,0x01,0x00,0x02,0x04,0x48,0x01,0x21, \ +0x81,0x73,0x00,0x21,0xC1,0x75,0xC1,0x73,0xC1,0x76,0x01,0x77,0xF7,0x46,0x00, \ +0x00,0x50,0x09,0x00,0x02,0x80,0xB5,0x16,0x4F,0x00,0x20,0x38,0x72,0x79,0x7A, \ +0x02,0x20,0x01,0x29,0x1C,0xD0,0x04,0x29,0x19,0xD1,0x78,0x72,0x08,0x20,0xFF, \ +0xF7,0x12,0xFD,0x38,0x6B,0xF9,0x6A,0x40,0x1A,0x40,0x00,0x39,0x6A,0x40,0x08, \ +0x81,0x42,0x0D,0xD2,0x39,0x6A,0x41,0x1A,0x14,0x20,0x02,0xF0,0xC7,0xFA,0x79, \ +0x7F,0x01,0x29,0x08,0xD1,0xB9,0x6A,0x81,0x42,0x02,0xD3,0xB9,0x6A,0x08,0x1A, \ +0xB8,0x62,0x80,0xBD,0x78,0x72,0x80,0xBD,0x79,0x6A,0x81,0x42,0xF9,0xD3,0x79, \ +0x6A,0x08,0x1A,0x78,0x62,0x80,0xBD,0x50,0x09,0x00,0x02,0x00,0xB5,0x0A,0x48, \ +0x01,0x21,0x01,0x72,0x01,0x7B,0x01,0x29,0x0D,0xD1,0xC1,0x7A,0x00,0x29,0x0A, \ +0xD1,0x81,0x7A,0x00,0x29,0x07,0xD1,0x41,0x7A,0x06,0x29,0x04,0xD0,0x40,0x7A, \ +0x05,0x28,0x01,0xD0,0xFF,0xF7,0x15,0xF9,0x00,0xBD,0x50,0x09,0x00,0x02,0xB0, \ +0xB5,0x20,0x4F,0x20,0x48,0x79,0x7D,0x80,0x7A,0x20,0x4C,0x81,0x42,0x02,0xDA, \ +0x78,0x7D,0x20,0x70,0x00,0xE0,0x20,0x70,0xFF,0xF7,0x22,0xFC,0x20,0x78,0x03, \ +0x28,0x03,0xD1,0x01,0x20,0xFD,0xF7,0x16,0xFF,0x02,0xE0,0x00,0x20,0xFD,0xF7, \ +0x12,0xFF,0xB8,0x7A,0x17,0x4D,0x02,0x28,0x02,0xD1,0xC4,0x20,0x28,0x70,0x04, \ +0xE0,0xB8,0x7A,0x01,0x28,0x01,0xD1,0xD4,0x20,0x28,0x70,0x00,0x20,0x68,0x70, \ +0x69,0x88,0x11,0x48,0x00,0x29,0x07,0xD0,0x23,0x78,0x10,0x4A,0x5B,0x00,0xC3, \ +0x5A,0xD2,0x88,0xD2,0x18,0x89,0x1A,0x69,0x80,0x0A,0x21,0xF9,0x65,0x21,0x78, \ +0x49,0x00,0x40,0x5A,0x38,0x66,0x20,0x78,0x00,0xF0,0x2B,0xF8,0x39,0x6E,0x08, \ +0x1A,0x38,0x66,0xBD,0x65,0x01,0x20,0xB8,0x75,0xB0,0xBD,0x50,0x09,0x00,0x02, \ +0x08,0x01,0x00,0x02,0x9B,0x01,0x00,0x02,0x38,0x09,0x00,0x02,0xB8,0x01,0x00, \ +0x02,0x00,0x00,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99,0x42,0x01,0xD8, \ +0x00,0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27,0xBF,0x06,0x3D, \ +0x69,0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08,0x3F,0x3A,0x61, \ +0x01,0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0x00,0x28,0x01,0xD1,0xC0,0x20,0xF7, \ +0x46,0x01,0x48,0x00,0x88,0xF7,0x46,0x00,0x00,0xB4,0x01,0x00,0x02,0xF8,0xB5, \ +0x41,0x48,0x00,0x90,0x41,0x48,0xC4,0x1D,0x49,0x34,0xC7,0x1D,0x09,0x37,0x3F, \ +0x4E,0x40,0x4D,0x30,0x68,0x00,0x7A,0x20,0x28,0x01,0xD1,0xFE,0xF7,0xB6,0xF8, \ +0x00,0xF0,0x76,0xFC,0x00,0xF0,0xEE,0xF8,0x3B,0x48,0x00,0x78,0x00,0x28,0x04, \ +0xD1,0xA0,0x79,0x05,0x28,0x01,0xD0,0x06,0xF0,0x47,0xFB,0x28,0x78,0x00,0x28, \ +0xE9,0xD0,0xB8,0x7B,0x00,0x28,0xE6,0xD1,0x35,0x48,0x01,0x78,0x01,0x29,0x03, \ +0xD1,0x00,0x21,0x01,0x70,0x03,0xF0,0xDB,0xFF,0x32,0x48,0x00,0x78,0x02,0x28, \ +0x46,0xD0,0x31,0x48,0x00,0x78,0x02,0x28,0x01,0xD1,0x06,0xF0,0x65,0xFB,0x06, \ +0xF0,0x8B,0xFC,0x05,0x1C,0x29,0x48,0x00,0x78,0x01,0x28,0x09,0xD1,0x03,0x03, \ +0x9D,0x42,0x03,0xD1,0x2A,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00,0x21,0xB9, \ +0x73,0xC1,0xE7,0x00,0x2D,0x23,0xD0,0x01,0x23,0x1B,0x03,0x9D,0x42,0x08,0xD0, \ +0x24,0x48,0x80,0x21,0x02,0x68,0x11,0x70,0x02,0x68,0x00,0x21,0x51,0x70,0x00, \ +0x68,0x81,0x70,0xA0,0x79,0x05,0x28,0x0D,0xD1,0x00,0x98,0x40,0x79,0x01,0x28, \ +0x09,0xDD,0xC0,0x20,0x01,0xF0,0x47,0xFC,0x06,0x1C,0x28,0x1C,0x06,0xF0,0x63, \ +0xFB,0x30,0x1C,0x01,0xF0,0x40,0xFC,0x29,0x1C,0x00,0x22,0x17,0x48,0x01,0xF0, \ +0x4B,0xFC,0x9B,0xE7,0x00,0x98,0x40,0x79,0x01,0x28,0x97,0xDD,0xA0,0x79,0x05, \ +0x28,0x94,0xD1,0x00,0xF0,0x25,0xF8,0x91,0xE7,0x06,0xF0,0x4A,0xFC,0x01,0x23, \ +0x1B,0x03,0x98,0x42,0x03,0xD1,0x0B,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00, \ +0x21,0xB9,0x73,0x84,0xE7,0x00,0x00,0xB4,0x00,0x00,0x02,0x50,0x09,0x00,0x02, \ +0x50,0x01,0x00,0x02,0x5E,0x02,0x00,0x02,0xBB,0x02,0x00,0x02,0xDD,0x01,0x00, \ +0x02,0x53,0x02,0x00,0x02,0x40,0x01,0x00,0x02,0xCC,0x01,0x00,0x02,0xD8,0x01, \ +0x00,0x02,0x24,0x07,0x00,0x02,0x80,0xB5,0xC0,0x20,0x01,0xF0,0x08,0xFC,0x07, \ +0x1C,0x0D,0x48,0x81,0x78,0x49,0x08,0x89,0x07,0x11,0xD1,0x81,0x78,0x09,0x09, \ +0x0E,0xD3,0x0A,0x49,0x09,0x68,0x09,0x7B,0x09,0x0A,0x09,0xD2,0xC1,0x78,0x00, \ +0x29,0x04,0xD0,0x00,0x21,0xC1,0x70,0x01,0x21,0x81,0x71,0x01,0xE0,0x06,0xF0, \ +0x42,0xFB,0x38,0x1C,0x01,0xF0,0xED,0xFB,0x80,0xBD,0xC0,0x09,0x00,0x02,0xCC, \ +0x01,0x00,0x02,0xB0,0xB5,0x1C,0x4C,0x01,0x20,0x1C,0x4D,0xA0,0x77,0x28,0x68, \ +0x00,0xF0,0x46,0xFE,0x29,0x68,0x00,0x20,0x4F,0x68,0x88,0x73,0x18,0x49,0x8A, \ +0x78,0x00,0x2A,0x00,0xD1,0x48,0x70,0x38,0x78,0x08,0x28,0x19,0xD1,0x20,0x7D, \ +0x01,0x28,0x06,0xD1,0x06,0x22,0xF8,0x1D,0x09,0x30,0x12,0x49,0x02,0xF0,0x0B, \ +0xF9,0x0F,0xE0,0x20,0x7D,0x02,0x28,0x0C,0xD1,0x10,0x48,0x40,0x79,0x02,0x28, \ +0x08,0xD1,0xE0,0x1D,0x49,0x30,0x80,0x79,0x05,0x28,0x03,0xD1,0x78,0x78,0x10, \ +0x23,0x18,0x43,0x78,0x70,0xF8,0x1D,0x0F,0x30,0xFD,0xF7,0xE2,0xFE,0x38,0x1C, \ +0x06,0xF0,0x57,0xF8,0x29,0x68,0x80,0x20,0x08,0x73,0x40,0x01,0xB0,0xBD,0x50, \ +0x09,0x00,0x02,0xCC,0x01,0x00,0x02,0xD0,0x01,0x00,0x02,0x00,0x01,0x00,0x02, \ +0xB4,0x00,0x00,0x02,0x00,0xB5,0x05,0x48,0x01,0x78,0x00,0x29,0x04,0xD0,0x40, \ +0x78,0x00,0x28,0x01,0xD1,0x03,0xF0,0xCE,0xFE,0x00,0xBD,0x00,0x00,0xD0,0x09, \ +0x00,0x02,0xF0,0xB5,0x2E,0x48,0x47,0x6E,0xFD,0xF7,0x5F,0xFD,0x01,0x02,0x2C, \ +0x4C,0x09,0x0A,0x2C,0x48,0x21,0x60,0x43,0x78,0x2C,0x4A,0x13,0x70,0x15,0x78, \ +0x0D,0x23,0x6B,0x43,0x1B,0x18,0x1B,0x7B,0x1B,0x06,0x0B,0x43,0x03,0x21,0x49, \ +0x06,0x0B,0x60,0x15,0x78,0x0D,0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7B,0x5D,0x7B, \ +0x36,0x02,0x35,0x43,0xDE,0x7B,0x1B,0x7C,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B, \ +0x43,0x4B,0x60,0xC3,0x1D,0x39,0x33,0x1B,0x78,0x02,0x2B,0x1D,0xD1,0x15,0x78, \ +0x0D,0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7C,0x5D,0x7C,0x36,0x02,0x35,0x43,0xDE, \ +0x7C,0x1B,0x7D,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43,0x4B,0x61,0x15,0x78, \ +0x0D,0x23,0x6B,0x43,0x18,0x18,0x85,0x7D,0x43,0x7D,0x2D,0x02,0x2B,0x43,0xC5, \ +0x7D,0x00,0x7E,0x2D,0x04,0x2B,0x43,0x00,0x06,0x18,0x43,0x88,0x61,0x10,0x78, \ +0x21,0x68,0x0D,0x4A,0x80,0x07,0x01,0x43,0x21,0x60,0x00,0x20,0x3B,0x5C,0x13, \ +0x54,0x01,0x30,0x18,0x28,0xFA,0xD3,0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C, \ +0x90,0x76,0x08,0x0E,0xD0,0x76,0xF0,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0xF4, \ +0x01,0x00,0x02,0x1C,0x00,0x00,0x02,0x98,0x01,0x00,0x02,0x64,0x07,0x00,0x02, \ +0x80,0xB4,0x11,0x4A,0x11,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x1E,0x29,0x00, \ +0xD1,0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7B,0x00,0x2F, \ +0x11,0xD1,0x11,0x80,0x0C,0x49,0x03,0x22,0x19,0x60,0xD9,0x1D,0x15,0x31,0x59, \ +0x60,0x08,0x39,0x99,0x60,0x00,0x21,0x19,0x73,0x99,0x73,0x9A,0x75,0x99,0x82, \ +0x03,0x60,0x40,0x21,0x01,0x73,0x18,0x1C,0x80,0xBC,0xF7,0x46,0xF8,0x01,0x00, \ +0x02,0xA4,0x06,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x00,0x00,0x80,0x80,0xB4, \ +0x13,0x4A,0x51,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x14,0x29,0x00,0xD1,0x00, \ +0x21,0x10,0x4F,0x10,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F,0x15,0xD1, \ +0x51,0x80,0x0E,0x49,0x01,0x22,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60,0x9A, \ +0x81,0x00,0x21,0x19,0x72,0x0A,0x4F,0xD9,0x73,0xBF,0x79,0x01,0x2F,0x01,0xD1, \ +0xC2,0x73,0x00,0xE0,0xC1,0x73,0x20,0x21,0x03,0x60,0x01,0x72,0x18,0x1C,0x80, \ +0xBC,0xF7,0x46,0xF8,0x01,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00,0x02, \ +0x00,0x00,0x00,0x80,0xB4,0x00,0x00,0x02,0x01,0x1C,0x00,0x68,0x02,0x08,0x01, \ +0xD3,0x08,0x1C,0xF7,0x46,0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09,0x08, \ +0x02,0xD3,0x40,0x21,0x01,0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x7A,0x00, \ +0x2A,0xF9,0xD1,0x02,0x72,0x08,0x1C,0xF7,0x46,0x00,0x00,0xD4,0x51,0x00,0x00, \ +0xF0,0xB5,0x00,0x27,0x0A,0x4E,0x00,0x25,0x34,0x68,0x20,0x7A,0x80,0x28,0x0B, \ +0xD1,0x60,0x7A,0x00,0x28,0x0A,0xD0,0x20,0x1C,0xFF,0xF7,0xE1,0xFF,0x30,0x60, \ +0xA0,0x42,0x02,0xD0,0x01,0x35,0x13,0x2D,0xEF,0xD3,0x38,0x1C,0xF0,0xBD,0x01, \ +0x27,0xFB,0xE7,0x64,0x02,0x00,0x02,0x0A,0x49,0x01,0x20,0x48,0x63,0x00,0x20, \ +0xCB,0x1D,0x39,0x33,0x88,0x63,0x58,0x82,0x07,0x4A,0x18,0x82,0x10,0x60,0x90, \ +0x80,0x90,0x71,0xD0,0x71,0xCA,0x1D,0x49,0x32,0x50,0x71,0x98,0x81,0xD8,0x81, \ +0x60,0x31,0xC8,0x70,0xF7,0x46,0xFC,0x01,0x00,0x02,0xD4,0x51,0x00,0x00,0x80, \ +0xB5,0x07,0x27,0x7F,0x06,0xF8,0x69,0x40,0x23,0x18,0x43,0xF8,0x61,0x14,0x48, \ +0xFD,0xF7,0x34,0xFC,0xF8,0x69,0x20,0x23,0x18,0x43,0xF8,0x61,0xF8,0x69,0x1B, \ +0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xFF,0x21,0x91,0x31,0x01,0x30,0x88,0x42, \ +0xFC,0xD3,0xF8,0x69,0x0C,0x4B,0x18,0x40,0xF8,0x61,0x00,0x20,0x7D,0x21,0x49, \ +0x01,0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7,0xC2,0xFF,0xFD,0xF7,0x20,0xFC, \ +0x00,0xF0,0x0E,0xF8,0x05,0x49,0x0D,0x20,0x00,0x06,0x01,0x81,0xFF,0x21,0x41, \ +0x31,0x81,0x80,0x80,0xBD,0x50,0xC3,0x00,0x00,0xFF,0xFD,0x00,0x00,0xFF,0x0F, \ +0x00,0x00,0x90,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70,0x0D,0x48,0x80,0x27,0x07, \ +0x73,0x01,0x23,0x03,0x72,0x82,0x22,0x02,0x71,0x07,0x22,0x02,0x70,0x0A,0x48, \ +0x05,0x24,0x04,0x73,0x86,0x24,0x04,0x72,0x02,0x71,0x08,0x48,0x24,0x22,0x02, \ +0x71,0x07,0x72,0x03,0x73,0x06,0x48,0x01,0x71,0x01,0x73,0x90,0xBC,0xF7,0x46, \ +0x00,0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xC0, \ +0x03,0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x25,0x48,0x01,0x27,0x00,0x7B, \ +0x24,0x4C,0x0A,0x28,0x1F,0xD1,0x24,0x49,0x24,0x4E,0x00,0x20,0x0B,0x7B,0x02, \ +0x1C,0x01,0x30,0x08,0x28,0xB3,0x54,0xF9,0xD1,0xF1,0x78,0xB0,0x78,0xF2,0x79, \ +0x09,0x02,0x08,0x43,0x05,0x1C,0x71,0x79,0x30,0x79,0x09,0x02,0x01,0x43,0xB0, \ +0x79,0x12,0x02,0x02,0x43,0x30,0x78,0x73,0x78,0x00,0x02,0x18,0x43,0x05,0x28, \ +0x08,0xD1,0x28,0x1C,0x00,0xF0,0xEA,0xFA,0x21,0xE0,0x16,0x49,0x00,0x20,0x08, \ +0x73,0x27,0x71,0xF0,0xBD,0x09,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x03,0xFB, \ +0x16,0xE0,0x11,0x4B,0x98,0x42,0x04,0xD1,0xF1,0x78,0x10,0x1C,0x00,0xF0,0x33, \ +0xFB,0x0E,0xE0,0x0E,0x4B,0x9B,0x78,0x00,0x2B,0x05,0xD1,0x13,0x1C,0x0A,0x1C, \ +0x29,0x1C,0x00,0xF0,0x17,0xF8,0x04,0xE0,0x13,0x1C,0x0A,0x1C,0x29,0x1C,0x04, \ +0xF0,0x93,0xFA,0x27,0x71,0xF0,0xBD,0x00,0x00,0xF0,0x02,0x00,0x0D,0xD0,0x03, \ +0x00,0x0D,0x30,0x03,0x00,0x0D,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x06, \ +0x80,0x00,0x00,0x5C,0x02,0x00,0x02,0x80,0xB5,0x0F,0x1C,0x11,0x1C,0x1A,0x1C, \ +0x08,0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x12,0xF8,0x80,0xBD,0x06, \ +0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x2B,0xF8,0x80,0xBD,0x03,0x49, \ +0x20,0x20,0x08,0x73,0x80,0xBD,0x33,0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0x70, \ +0x03,0x00,0x0D,0x0B,0x49,0x0C,0x48,0x4A,0x6B,0x03,0x2A,0x03,0xD1,0x0B,0x4A, \ +0x92,0x78,0x01,0x2A,0x02,0xD0,0x20,0x21,0x01,0x73,0xF7,0x46,0x80,0x22,0x02, \ +0x73,0x50,0x31,0xC9,0x79,0x06,0x4A,0x10,0x23,0x11,0x73,0x01,0x7B,0x19,0x43, \ +0x01,0x73,0xF7,0x46,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x38, \ +0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x15,0x4E,0x17,0x1C,0xB2,0x78, \ +0x14,0x48,0x02,0x2A,0x06,0xD1,0xF2,0x78,0x08,0x2A,0x03,0xD1,0x12,0x4D,0x6A, \ +0x6B,0x03,0x2A,0x02,0xD0,0x20,0x21,0x01,0x73,0xF0,0xBD,0x10,0x4C,0x00,0x2F, \ +0x04,0xD1,0x00,0xF0,0x70,0xFB,0x01,0x20,0xA0,0x70,0x05,0xE0,0x00,0x29,0x01, \ +0xD1,0x0C,0x49,0xE9,0x65,0x00,0x21,0x01,0x73,0xE8,0x1D,0x39,0x30,0x07,0x83, \ +0x00,0x27,0x47,0x83,0x31,0x1C,0x08,0x22,0x07,0x48,0x01,0xF0,0xA5,0xFE,0x27, \ +0x71,0xF0,0xBD,0x00,0x00,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xFC,0x01, \ +0x00,0x02,0x5C,0x02,0x00,0x02,0x00,0x60,0x00,0x01,0x40,0x02,0x00,0x02,0x90, \ +0xB5,0x17,0x49,0x08,0x78,0x4A,0x78,0x00,0x02,0x10,0x43,0x05,0x28,0x15,0x4A, \ +0x04,0xD1,0x89,0x78,0x50,0x6B,0x00,0xF0,0xC1,0xFA,0x90,0xBD,0x13,0x4B,0x01, \ +0x27,0x98,0x42,0x11,0x4C,0x04,0xD1,0xC8,0x78,0x00,0xF0,0x8E,0xF9,0x27,0x71, \ +0x90,0xBD,0x09,0x28,0x0A,0xD1,0x0E,0x49,0x20,0x20,0x08,0x73,0x27,0x71,0x50, \ +0x6B,0x03,0x28,0xEB,0xD1,0xD0,0x1D,0x49,0x30,0x47,0x71,0x90,0xBD,0xD1,0x1D, \ +0x59,0x31,0x89,0x78,0x00,0x29,0x02,0xD1,0x00,0xF0,0x0E,0xF8,0x90,0xBD,0x04, \ +0xF0,0x1F,0xFA,0x90,0xBD,0x38,0x02,0x00,0x02,0xFC,0x01,0x00,0x02,0xD0,0x03, \ +0x00,0x0D,0x06,0x80,0x00,0x00,0x70,0x03,0x00,0x0D,0x08,0x4B,0x07,0x49,0x98, \ +0x42,0x02,0xD1,0xE0,0x20,0x08,0x73,0x04,0xE0,0x06,0x4B,0x98,0x42,0x01,0xD1, \ +0x20,0x20,0x08,0x73,0x04,0x49,0x01,0x20,0x08,0x71,0xF7,0x46,0x70,0x03,0x00, \ +0x0D,0x33,0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0xD0,0x03,0x00,0x0D,0x80,0xB5, \ +0x11,0x48,0x11,0x4B,0x01,0x78,0x42,0x78,0x09,0x02,0x11,0x43,0x0F,0x1C,0x9F, \ +0x42,0x03,0xD1,0x80,0x78,0x00,0xF0,0x93,0xF9,0x05,0xE0,0x0C,0x49,0xE0,0x20, \ +0x08,0x73,0x0C,0x49,0x01,0x20,0x08,0x71,0x0B,0x4B,0x9F,0x42,0x0B,0xD1,0x0B, \ +0x48,0x01,0x79,0x02,0x29,0x07,0xD1,0x03,0x21,0x01,0x71,0x09,0x48,0x00,0x22, \ +0xC1,0x78,0x80,0x78,0x03,0xF0,0x03,0xFF,0x80,0xBD,0x38,0x02,0x00,0x02,0x0E, \ +0x40,0x00,0x00,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x22,0xC1,0x00,0x00, \ +0x5C,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0x00,0xB5,0x0C,0x49,0x08,0x7B,0x02, \ +0x09,0x05,0xD3,0x00,0x20,0x08,0x73,0x0A,0x49,0x01,0x20,0x08,0x71,0x00,0xBD, \ +0xC1,0x08,0x02,0xD3,0xFF,0xF7,0x9F,0xFE,0x00,0xBD,0x41,0x08,0x02,0xD3,0xFF, \ +0xF7,0x68,0xFF,0x00,0xBD,0x80,0x08,0xF2,0xD3,0xFF,0xF7,0xB5,0xFF,0x00,0xBD, \ +0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0xF0,0xB5,0x42,0x4E,0x30,0x79,0x80, \ +0x08,0x4A,0xD3,0x41,0x4D,0x68,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xE3,0xFD, \ +0x3F,0x48,0x04,0x79,0xC0,0x20,0x01,0xF0,0xA4,0xF8,0x01,0x1C,0x3D,0x48,0x04, \ +0x22,0x02,0x71,0x00,0x22,0x02,0x71,0x08,0x1C,0x01,0xF0,0x9B,0xF8,0x3A,0x48, \ +0xC7,0x1D,0x39,0x37,0x39,0x8A,0x40,0x29,0x07,0xDA,0x39,0x8A,0x00,0x29,0x04, \ +0xD0,0x39,0x8A,0x02,0x31,0x09,0x04,0x09,0x0C,0x07,0xE0,0x40,0x2C,0x04,0xDA, \ +0x39,0x8A,0x00,0x29,0x01,0xD1,0x21,0x1C,0x00,0xE0,0x40,0x21,0x7A,0x8A,0x2F, \ +0x4C,0x52,0x18,0x19,0x23,0x9B,0x01,0x9A,0x42,0x04,0xD9,0x00,0x22,0x7A,0x82, \ +0x3A,0x82,0x01,0x22,0xA2,0x71,0x29,0x48,0xC0,0x6E,0x80,0x68,0x7A,0x8A,0x80, \ +0x18,0xCD,0x22,0x00,0xF0,0x0F,0xFA,0x00,0x20,0x30,0x71,0x68,0x79,0x01,0x28, \ +0x01,0xDD,0x00,0xF0,0x90,0xFD,0xA1,0x79,0x21,0x48,0x01,0x29,0x02,0xD1,0x00, \ +0x20,0xA0,0x71,0xF0,0xBD,0x04,0x1C,0x78,0x8A,0x00,0x28,0x1E,0xD1,0xE0,0x6E, \ +0x81,0x8A,0xC0,0x7D,0x08,0x31,0x08,0x18,0x38,0x82,0xE1,0x6E,0x8A,0x7D,0x48, \ +0x68,0x03,0x2A,0x01,0xDD,0x03,0x22,0x8A,0x75,0x39,0x8A,0x17,0x4B,0x99,0x42, \ +0x09,0xD8,0x39,0x8A,0x00,0x29,0x06,0xD0,0x0A,0x30,0x06,0x22,0x14,0x49,0x01, \ +0xF0,0x68,0xFD,0x00,0x28,0x03,0xD0,0x00,0x20,0x78,0x82,0x38,0x82,0xF0,0xBD, \ +0x38,0x8A,0x40,0x28,0x06,0xDD,0x38,0x8A,0x40,0x38,0x38,0x82,0x78,0x8A,0x40, \ +0x30,0x78,0x82,0xF0,0xBD,0x00,0x20,0x38,0x82,0x78,0x82,0xE0,0x6E,0xFF,0xF7, \ +0x02,0xFD,0xE0,0x66,0xF0,0xBD,0x70,0x03,0x00,0x0D,0xB4,0x00,0x00,0x02,0xF0, \ +0x02,0x00,0x0D,0x60,0x02,0x00,0x0D,0xFC,0x01,0x00,0x02,0x5C,0x02,0x00,0x02, \ +0x32,0x06,0x00,0x00,0x60,0x00,0x00,0x02,0xB0,0xB5,0x2F,0x4D,0xEF,0x1D,0x49, \ +0x37,0x78,0x79,0x00,0x28,0x45,0xD0,0x2D,0x48,0x00,0x78,0x01,0x28,0x41,0xD1, \ +0x2C,0x48,0x00,0x24,0x01,0x78,0x01,0x29,0x02,0xD1,0x04,0x70,0x05,0xF0,0x99, \ +0xFF,0x29,0x48,0x01,0x7A,0x01,0x29,0x03,0xD1,0x04,0x72,0xF8,0x79,0x00,0xF0, \ +0xB3,0xF9,0xE8,0x1D,0x59,0x30,0xC0,0x78,0x01,0x28,0x02,0xD1,0xF8,0x79,0x00, \ +0xF0,0xAB,0xF9,0xF8,0x79,0x2C,0x1C,0x02,0x28,0x26,0xD0,0xFF,0xF7,0x41,0xFF, \ +0xA0,0x6E,0x80,0x23,0x00,0x7A,0x1D,0x4F,0x18,0x40,0x16,0xD0,0xFF,0xF7,0x37, \ +0xFD,0x00,0x28,0x12,0xD0,0xA1,0x6E,0x10,0x20,0x08,0x72,0x78,0x79,0x01,0x28, \ +0x01,0xDD,0x00,0xF0,0x1D,0xFD,0xA0,0x6E,0x81,0x89,0x0C,0x30,0x0C,0x31,0xFC, \ +0xF7,0x09,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xFE,0xFC,0xFC,0xF7, \ +0x46,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xF7,0xFC,0xB0,0xBD,0x0D, \ +0x48,0x00,0x78,0x00,0x28,0xFA,0xD0,0xA0,0x6E,0x01,0x7A,0x10,0x29,0x05,0xD0, \ +0x01,0x7A,0x80,0x29,0x02,0xD0,0x01,0x7A,0x40,0x29,0xF0,0xD1,0xFF,0xF7,0xF9, \ +0xFC,0xA0,0x66,0xB0,0xBD,0xFC,0x01,0x00,0x02,0xE7,0x01,0x00,0x02,0xE5,0x01, \ +0x00,0x02,0xD4,0x51,0x00,0x00,0xB4,0x00,0x00,0x02,0xE6,0x01,0x00,0x02,0xB0, \ +0xB4,0x21,0x4F,0x80,0x21,0x21,0x4A,0x39,0x73,0xD1,0x1D,0x39,0x31,0x4C,0x8B, \ +0x0D,0x8B,0xAC,0x42,0x17,0xD1,0x38,0x7B,0x40,0x23,0x03,0x40,0xE0,0x20,0x00, \ +0x2B,0x0F,0xD1,0x09,0x8B,0x49,0x07,0x02,0xD0,0x38,0x73,0xB0,0xBC,0xF7,0x46, \ +0xD1,0x1D,0x49,0x31,0x89,0x79,0x01,0x29,0x02,0xD1,0xD0,0x20,0x38,0x73,0xF5, \ +0xE7,0x38,0x73,0xF3,0xE7,0x38,0x73,0xF1,0xE7,0x4A,0x8B,0x0C,0x8B,0xA2,0x42, \ +0xED,0xDA,0x0A,0x8B,0x4C,0x8B,0x12,0x1B,0x08,0x2A,0x00,0xD9,0x08,0x22,0x01, \ +0x28,0x01,0xD1,0x0C,0x4B,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4B,0x00,0x2A, \ +0x08,0xD0,0x0A,0x48,0x4C,0x8B,0x4D,0x8B,0x01,0x34,0x4C,0x83,0x5C,0x5D,0x01, \ +0x3A,0x04,0x73,0xF7,0xD1,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xD0,0xE7, \ +0x70,0x03,0x00,0x0D,0xFC,0x01,0x00,0x02,0xFC,0x01,0x00,0x02,0x0E,0x02,0x00, \ +0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x24,0x4E,0x22,0x4C,0xF7,0x1D,0x59,0x37, \ +0x01,0x28,0x22,0x4D,0x0C,0xD1,0xFD,0xF7,0x19,0xFA,0x28,0x7B,0xF1,0x1D,0x49, \ +0x31,0xC8,0x71,0x00,0xF0,0x4C,0xF9,0x00,0x20,0x38,0x71,0x01,0x20,0x20,0x71, \ +0xF0,0xBD,0xF1,0x1D,0x39,0x31,0x4A,0x8B,0x33,0x1C,0x0E,0x8B,0xB2,0x42,0x1E, \ +0xDA,0x0A,0x8B,0x4E,0x8B,0x92,0x1B,0x08,0x2A,0x00,0xD9,0x08,0x22,0x00,0x2A, \ +0x0A,0xD0,0x13,0x4D,0x2E,0x7B,0xDD,0x6D,0x2E,0x70,0x01,0x35,0xDD,0x65,0x4D, \ +0x8B,0x01,0x35,0x4D,0x83,0x01,0x3A,0xF4,0xD1,0x4A,0x8B,0x0B,0x8B,0x0E,0x49, \ +0x9A,0x42,0x0E,0xD1,0x02,0x28,0x07,0xD1,0x00,0xF0,0x24,0xF9,0x00,0x20,0x38, \ +0x71,0x09,0xE0,0x01,0x20,0x20,0x71,0xF0,0xBD,0x60,0x20,0x08,0x73,0x01,0x20, \ +0x38,0x71,0x01,0xE0,0x00,0x20,0x08,0x73,0x01,0x20,0x20,0x71,0xF0,0xBD,0xD0, \ +0x03,0x00,0x0D,0xFC,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D, \ +0x00,0xB5,0x7F,0x28,0x07,0xD8,0x00,0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C, \ +0x4A,0x51,0x6B,0x03,0x29,0x03,0xD1,0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD, \ +0x01,0x29,0x04,0xD1,0x00,0x28,0x08,0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02, \ +0x29,0x03,0xD1,0x00,0x28,0x01,0xD1,0x01,0x20,0x50,0x63,0x00,0xF0,0xEE,0xF8, \ +0x00,0xBD,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00, \ +0x29,0x09,0xD1,0x00,0x2A,0x07,0xD1,0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1, \ +0x14,0x49,0x4A,0x6B,0x01,0x2A,0x03,0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80, \ +0xBD,0x12,0x4B,0x02,0x2A,0x09,0xD1,0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63, \ +0x1F,0x7B,0x1A,0x1C,0x02,0x23,0x3B,0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08, \ +0xD1,0x00,0x28,0x06,0xD1,0x02,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23, \ +0x3B,0x40,0x13,0x73,0x88,0x63,0x00,0x20,0x40,0x31,0x88,0x81,0xC8,0x81,0x00, \ +0xF0,0xB8,0xF8,0x80,0xBD,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D, \ +0xE0,0x03,0x00,0x0D,0x90,0xB5,0x15,0x4F,0xFA,0x1D,0x39,0x32,0x01,0x29,0x02, \ +0xD1,0x12,0x23,0x13,0x83,0x03,0xE0,0x20,0x23,0x02,0x29,0x09,0xD1,0x13,0x83, \ +0x00,0x23,0x50,0x37,0xBB,0x71,0x14,0x8B,0xA0,0x42,0x05,0xD8,0xBB,0x71,0x10, \ +0x83,0x0F,0xE0,0x0B,0x48,0x03,0x73,0x90,0xBD,0x14,0x8B,0xA0,0x42,0x09,0xD9, \ +0x10,0x8B,0x40,0x07,0x01,0xD0,0xBB,0x71,0x04,0xE0,0x10,0x8B,0x40,0x07,0x01, \ +0xD1,0x01,0x20,0xB8,0x71,0x53,0x83,0x08,0x1C,0xFF,0xF7,0xDC,0xFE,0x90,0xBD, \ +0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x0E,0x4F,0x0E, \ +0x4A,0x01,0x28,0x06,0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE,0x23,0x18,0x40, \ +0x38,0x73,0x08,0xE0,0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43,0x10,0x72,0x38, \ +0x7B,0x01,0x23,0x18,0x43,0x38,0x73,0x06,0x49,0x20,0x20,0x08,0x73,0x05,0x49, \ +0x01,0x20,0x08,0x71,0x80,0xBC,0xF7,0x46,0x00,0x00,0xE0,0x03,0x00,0x0D,0xC0, \ +0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x0D,0x23,0x1B,0x06, \ +0x99,0x83,0x05,0x49,0x0A,0x70,0x05,0x4A,0x10,0x60,0x02,0x20,0x08,0x72,0x08, \ +0x7A,0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00,0x20,0x00,0x00,0x0D,0x40,0x00, \ +0x00,0x0D,0x90,0xB5,0x1B,0x4C,0x07,0x1C,0x60,0x79,0x01,0x28,0x01,0xDD,0x00, \ +0xF0,0x82,0xFB,0x00,0x21,0x02,0x2F,0x17,0x48,0x18,0x4A,0x0F,0xD0,0x43,0x79, \ +0x02,0x2B,0x03,0xD1,0x41,0x71,0x03,0xF0,0xEF,0xFD,0x1A,0xE0,0x11,0x72,0x14, \ +0x48,0x20,0x22,0x02,0x70,0x01,0x70,0x13,0x49,0x86,0x20,0x08,0x72,0x11,0xE0, \ +0x12,0x4B,0x9B,0x7B,0x00,0x2B,0x0D,0xD1,0x17,0x7A,0x7B,0x09,0x0A,0xD2,0x10, \ +0x23,0x13,0x72,0xC1,0x70,0x0E,0x4A,0x01,0x20,0x10,0x70,0x0F,0x20,0x00,0x06, \ +0x81,0x81,0x0C,0x49,0x81,0x80,0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x42, \ +0xFB,0x03,0xF0,0x26,0xFE,0x90,0xBD,0x00,0x00,0xB4,0x00,0x00,0x02,0x5C,0x02, \ +0x00,0x02,0x60,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0x60, \ +0x09,0x00,0x02,0xE6,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x04,0x48,0x01,0x78, \ +0x02,0x78,0x91,0x42,0xFC,0xD0,0x03,0x49,0x60,0x20,0x08,0x73,0xF7,0x46,0x00, \ +0x00,0xF0,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0xF0,0xB5,0x28,0x4E,0x30,0x78, \ +0x00,0x28,0x01,0xD1,0x00,0xF0,0xD5,0xFA,0x0D,0x24,0x24,0x06,0x27,0x89,0x40, \ +0x20,0x24,0x4D,0x38,0x40,0x08,0xD0,0x28,0x7A,0x00,0x28,0xFC,0xD1,0x22,0x48, \ +0x00,0x7B,0x40,0x08,0x01,0xD3,0xFF,0xF7,0x11,0xFD,0x78,0x0A,0x1C,0xD3,0xF8, \ +0x43,0xFF,0x23,0x01,0x33,0x18,0x43,0x20,0x81,0xFD,0xF7,0x99,0xF8,0x20,0x7B, \ +0x00,0x09,0xFC,0xD2,0x28,0x7A,0x00,0x28,0xFC,0xD1,0xFF,0xF7,0x86,0xFB,0x17, \ +0x48,0x01,0x7A,0x02,0x29,0x05,0xD0,0x01,0x21,0x01,0x72,0x15,0x48,0x00,0x23, \ +0x43,0x71,0x01,0xE0,0x00,0x23,0x03,0x72,0xFF,0xF7,0x28,0xFB,0x12,0x49,0x08, \ +0x78,0x01,0x28,0x10,0xD1,0xB8,0x08,0x0E,0xD3,0x10,0x4A,0x00,0x23,0x10,0x7A, \ +0x13,0x72,0xFA,0x43,0x02,0x23,0x1A,0x43,0x22,0x81,0x09,0x78,0x01,0x29,0x03, \ +0xD1,0x00,0x04,0x00,0x0C,0x03,0xF0,0x39,0xFD,0x30,0x78,0x00,0x28,0x01,0xD1, \ +0x00,0xF0,0xB0,0xFA,0xF0,0xBD,0x00,0x00,0x41,0x01,0x00,0x02,0x20,0x00,0x00, \ +0x0D,0xD0,0x03,0x00,0x0D,0xD4,0x51,0x00,0x00,0x5C,0x02,0x00,0x02,0x3B,0x01, \ +0x00,0x02,0xE0,0x03,0x00,0x0D,0x90,0xB5,0x41,0x68,0x0A,0x78,0x08,0x2A,0x12, \ +0xD1,0x8A,0x7F,0xCB,0x7F,0x12,0x02,0x1A,0x43,0x15,0x4B,0x12,0x04,0x1F,0x88, \ +0x12,0x0C,0xBA,0x42,0x02,0xD0,0x5B,0x88,0x93,0x42,0x06,0xD1,0xC8,0x1D,0x11, \ +0x30,0x06,0x22,0x10,0x49,0x01,0xF0,0xC2,0xFA,0x90,0xBD,0x03,0x23,0x5B,0x02, \ +0x9A,0x42,0x06,0xDD,0xC8,0x1D,0x11,0x30,0x06,0x22,0x0B,0x49,0x01,0xF0,0xB7, \ +0xFA,0x90,0xBD,0xCF,0x1D,0x01,0x37,0x47,0x60,0x18,0x32,0x82,0x82,0x08,0x4C, \ +0x18,0x22,0x20,0x1C,0x01,0xF0,0xAC,0xFA,0x18,0x22,0x38,0x1C,0x21,0x1C,0x01, \ +0xF0,0xA7,0xFA,0x90,0xBD,0x9C,0x02,0x00,0x02,0x96,0x02,0x00,0x02,0x90,0x02, \ +0x00,0x02,0x4C,0x0A,0x00,0x02,0xF0,0xB5,0x00,0xF0,0x79,0xF9,0x80,0x4E,0xFF, \ +0x21,0xF0,0x1D,0x27,0x30,0x01,0x31,0x06,0x22,0x05,0x1C,0x00,0xF0,0xD7,0xF9, \ +0x7C,0x4F,0x12,0x22,0x03,0x21,0x38,0x1C,0x00,0xF0,0xD1,0xF9,0xF0,0x1D,0x15, \ +0x30,0x0E,0x22,0xFF,0x21,0x11,0x31,0x00,0xF0,0xCA,0xF9,0xF0,0x1D,0x2D,0x30, \ +0x01,0x22,0xFF,0x21,0x31,0x31,0x00,0xF0,0xC3,0xF9,0xF0,0x1D,0x58,0x30,0x07, \ +0x22,0xFF,0x21,0x81,0x31,0x00,0xF0,0xBC,0xF9,0x0E,0x22,0xFF,0x21,0x30,0x1C, \ +0x41,0x31,0x00,0xF0,0xB6,0xF9,0xF0,0x1D,0x07,0x30,0x0E,0x22,0xFF,0x21,0x51, \ +0x31,0x00,0xF0,0xAF,0xF9,0xF0,0x1D,0x3C,0x30,0x0E,0x22,0xFF,0x21,0x71,0x31, \ +0x00,0xF0,0xA8,0xF9,0xF0,0x1D,0x4A,0x30,0x0E,0x22,0xFF,0x21,0x21,0x31,0x00, \ +0xF0,0xA1,0xF9,0xF0,0x1D,0x2E,0x30,0x0E,0x22,0xFF,0x21,0x61,0x31,0x00,0xF0, \ +0x9A,0xF9,0xF0,0x1D,0x5F,0x30,0x03,0x22,0xFF,0x21,0x89,0x31,0x00,0xF0,0x93, \ +0xF9,0xF0,0x1D,0x63,0x30,0x04,0x22,0xFF,0x21,0x8D,0x31,0x00,0xF0,0x8C,0xF9, \ +0x00,0xF0,0x41,0xF9,0xF0,0x1D,0x23,0x30,0x04,0x22,0xF9,0x1D,0x01,0x31,0x01, \ +0xF0,0x3E,0xFA,0xF4,0x1D,0x19,0x34,0xA0,0x7B,0xC0,0x07,0xC0,0x0F,0x00,0x27, \ +0x00,0x28,0x10,0xD1,0x4F,0x4A,0x17,0x54,0x01,0x30,0x06,0x28,0xFB,0xD3,0x10, \ +0x1C,0x06,0x22,0x29,0x1C,0x01,0xF0,0x0E,0xFA,0x00,0x28,0x04,0xD0,0x29,0x1C, \ +0x06,0x22,0x49,0x48,0x01,0xF0,0x25,0xFA,0xF0,0x1D,0x29,0x30,0x00,0x79,0x35, \ +0x1C,0x10,0x28,0x11,0xD0,0x20,0x28,0x0F,0xD0,0x31,0x28,0x0D,0xD0,0x30,0x28, \ +0x0B,0xD0,0x32,0x28,0x09,0xD0,0x40,0x28,0x07,0xD0,0x41,0x28,0x05,0xD0,0x50, \ +0x28,0x03,0xD0,0x51,0x28,0x01,0xD0,0x52,0x28,0x01,0xD1,0x3D,0x49,0xC8,0x75, \ +0xE8,0x1D,0x49,0x30,0xC3,0x7B,0x01,0x21,0x3C,0x48,0x3A,0x4A,0x55,0x2B,0x13, \ +0xD1,0x03,0x78,0x53,0x2B,0x10,0xD1,0x43,0x78,0x42,0x2B,0x0D,0xD1,0x83,0x78, \ +0x53,0x2B,0x0A,0xD1,0xC3,0x78,0x55,0x2B,0x07,0xD1,0x03,0x79,0x53,0x2B,0x04, \ +0xD1,0x43,0x79,0x50,0x2B,0x01,0xD1,0x11,0x70,0x00,0xE0,0x17,0x70,0xFF,0x23, \ +0x01,0x22,0x86,0x79,0x52,0x02,0x01,0x33,0x53,0x2E,0x09,0xD1,0xC6,0x79,0x45, \ +0x2E,0x06,0xD1,0x06,0x7A,0x4C,0x2E,0x03,0xD1,0x2A,0x4E,0xF3,0x61,0x32,0x62, \ +0x02,0xE0,0x28,0x4E,0xF2,0x61,0x33,0x62,0x86,0x7A,0x02,0x23,0x26,0x4A,0x48, \ +0x2E,0x1C,0xD1,0xC6,0x7A,0x57,0x2E,0x19,0xD1,0x00,0x7B,0x31,0x28,0x02,0xD1, \ +0x03,0x20,0x90,0x70,0x14,0xE0,0x32,0x28,0x02,0xD1,0x04,0x20,0x90,0x70,0x0F, \ +0xE0,0x33,0x28,0x0A,0xD1,0x05,0x20,0x90,0x70,0x1B,0x4E,0x20,0x20,0xF0,0x60, \ +0x71,0x76,0x31,0x76,0x18,0x4E,0x00,0x20,0x71,0x62,0x13,0xE0,0x93,0x70,0x00, \ +0xE0,0x93,0x70,0x15,0x4E,0x40,0x20,0xF0,0x60,0x77,0x76,0x90,0x78,0x02,0x28, \ +0x01,0xD1,0x37,0x76,0x02,0xE0,0x31,0x76,0x04,0x28,0xEC,0xDA,0x20,0x79,0x00, \ +0x28,0xE9,0xD0,0x27,0x71,0xE7,0xE7,0x29,0x18,0x50,0x31,0x49,0x78,0xFA,0x29, \ +0x01,0xDD,0x77,0x62,0xF0,0xBD,0x01,0x30,0x0E,0x28,0xF5,0xD3,0xF0,0xBD,0x00, \ +0x00,0x64,0x0A,0x00,0x02,0xFC,0x01,0x00,0x02,0x00,0x72,0x01,0x02,0x60,0x00, \ +0x00,0x02,0x00,0x00,0x00,0x02,0x3A,0x01,0x00,0x02,0xC4,0x0A,0x00,0x02,0xA0, \ +0x02,0x00,0x02,0x14,0x01,0x00,0x02,0xB0,0xB5,0x2B,0x48,0x04,0x25,0x05,0x70, \ +0x2A,0x49,0x00,0x20,0x08,0x70,0x2A,0x49,0x02,0x24,0x0C,0x70,0x06,0x21,0x07, \ +0x27,0x7F,0x06,0xB9,0x61,0x78,0x61,0xF8,0x69,0xFB,0x0B,0x98,0x43,0xF8,0x61, \ +0xF8,0x69,0x10,0x23,0x98,0x43,0xF8,0x61,0xFF,0xF7,0xD1,0xFE,0x22,0x48,0x00, \ +0x78,0x00,0x28,0x00,0xD1,0xBD,0x61,0x20,0x48,0x01,0x21,0xC1,0x76,0x20,0x49, \ +0xCA,0x69,0x0B,0x0C,0x1A,0x43,0xCA,0x61,0xCA,0x69,0x1B,0x23,0x9A,0x43,0xCA, \ +0x61,0xCA,0x69,0x04,0x23,0x9A,0x43,0xCA,0x61,0xC2,0x68,0xCB,0x69,0xD2,0x43, \ +0x1A,0x40,0xCA,0x61,0xC2,0x69,0xCB,0x69,0xD2,0x43,0x1A,0x40,0xCA,0x61,0x02, \ +0x6A,0xCB,0x69,0x1A,0x43,0xCA,0x61,0xCA,0x69,0x0B,0x0C,0x9A,0x43,0xCA,0x61, \ +0xB9,0x69,0x01,0x23,0x19,0x43,0xB9,0x61,0x84,0x76,0x00,0xF0,0x3D,0xF8,0x00, \ +0xF0,0xA7,0xF8,0x0A,0x20,0xFC,0xF7,0xB8,0xFD,0x00,0xF0,0x9A,0xF8,0xFF,0xF7, \ +0x76,0xF9,0x09,0x48,0x01,0x23,0x04,0x72,0xF8,0x69,0xDB,0x03,0x18,0x43,0xF8, \ +0x61,0xB0,0xBD,0x53,0x02,0x00,0x02,0x5E,0x02,0x00,0x02,0x3F,0x01,0x00,0x02, \ +0x3A,0x01,0x00,0x02,0xA0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xD4,0x51,0x00, \ +0x00,0x80,0x21,0xF3,0x20,0x00,0x05,0x01,0x60,0x00,0x21,0x01,0x60,0x01,0x21, \ +0x41,0x60,0x01,0x60,0x07,0x21,0x49,0x06,0xCA,0x69,0x01,0x23,0x5B,0x03,0x1A, \ +0x43,0xCA,0x61,0x04,0x49,0x01,0x63,0x04,0x49,0x41,0x63,0x81,0x63,0xC1,0x63, \ +0x01,0x69,0x80,0x68,0xF7,0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x01,0x02,0x00, \ +0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x02,0x4B,0x19,0x40,0xC1,0x61,0xF7,0x46, \ +0x00,0x00,0xFF,0xDF,0x00,0x00,0xF0,0xB5,0x0F,0x1C,0x00,0x21,0xF3,0x24,0x24, \ +0x05,0x00,0x28,0x08,0xD9,0x10,0x4D,0x6B,0x5C,0xE3,0x60,0x26,0x69,0xB3,0x08, \ +0xFC,0xD3,0x01,0x31,0x81,0x42,0xF7,0xD3,0xFF,0x20,0xE0,0x60,0xA1,0x68,0x21, \ +0x1C,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x0C,0x69,0xA3,0x08,0xFC,0xD3, \ +0xC8,0x60,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x3B,0x70,0x01,0x37,0x01, \ +0x3A,0xF3,0xD1,0x02,0x20,0xFC,0xF7,0x4F,0xFD,0xF0,0xBD,0xB0,0x02,0x00,0x02, \ +0xF3,0x20,0x00,0x05,0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC, \ +0xD3,0xFF,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81,0x68,0x01,0x69, \ +0x49,0x08,0xFC,0xD3,0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46,0x90,0xB5,0x04, \ +0x1C,0x48,0x09,0x08,0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02,0x43,0x08,0x48, \ +0x02,0x70,0x41,0x70,0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2,0x02,0x20,0xFC, \ +0xF7,0x25,0xFD,0x02,0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xAA,0xFF,0x90,0xBD, \ +0x00,0x00,0xB0,0x02,0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B, \ +0x03,0x19,0x43,0xC1,0x61,0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F,0x23, \ +0x1B,0x04,0x99,0x43,0x41,0x60,0x41,0x68,0x19,0x43,0x41,0x60,0xF7,0x46,0x00, \ +0x00,0x80,0xB4,0x14,0x4B,0x5B,0x79,0x01,0x2B,0x0E,0xDD,0x17,0x1C,0x12,0x4A, \ +0x14,0xD1,0x02,0x2B,0x09,0xD1,0x00,0x29,0x07,0xD1,0x00,0x28,0x07,0xD1,0x90, \ +0x78,0x4B,0x1F,0x18,0x40,0x90,0x70,0x00,0x20,0x50,0x70,0x80,0xBC,0xF7,0x46, \ +0x90,0x78,0x04,0x23,0x18,0x43,0x90,0x70,0x01,0x20,0x50,0x70,0xF6,0xE7,0x00, \ +0x28,0x04,0xD1,0x90,0x78,0x02,0x23,0x98,0x43,0x90,0x70,0xEF,0xE7,0x90,0x78, \ +0x02,0x23,0x18,0x43,0x90,0x70,0xEA,0xE7,0x00,0x00,0xB4,0x00,0x00,0x02,0xC0, \ +0x09,0x00,0x02,0x90,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x15,0xD3, \ +0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x01,0x28,0x01,0xD1,0x08,0x49,0x08, \ +0x70,0x08,0x4C,0x67,0x68,0xFC,0xF7,0xDA,0xFC,0x39,0x1A,0x49,0x01,0x09,0x18, \ +0x06,0x4A,0x61,0x60,0x51,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x50,0x63,0x90, \ +0xBD,0x00,0x00,0x41,0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04, \ +0x90,0xB5,0x0C,0x48,0x80,0x78,0x01,0x28,0x13,0xD1,0x0B,0x4F,0x7C,0x68,0xFC, \ +0xF7,0xBF,0xFC,0x21,0x1A,0x49,0x09,0x09,0x18,0x79,0x60,0x08,0x49,0x4A,0x6B, \ +0x12,0x1A,0x52,0x09,0x10,0x18,0x48,0x63,0x07,0x20,0x40,0x06,0xC1,0x69,0x10, \ +0x23,0x19,0x43,0xC1,0x61,0x90,0xBD,0xC0,0x09,0x00,0x02,0x80,0x00,0x00,0x04, \ +0x40,0x00,0x00,0x04,0x80,0xB5,0xC0,0x20,0x00,0xF0,0xD6,0xFA,0x07,0x1C,0x06, \ +0x48,0x01,0x78,0x00,0x29,0x03,0xD0,0x00,0x21,0x01,0x70,0xFF,0xF7,0xD3,0xFF, \ +0x38,0x1C,0x00,0xF0,0xCA,0xFA,0x80,0xBD,0x00,0x00,0x41,0x01,0x00,0x02,0x80, \ +0xB5,0xC0,0x20,0x00,0xF0,0xC2,0xFA,0x07,0x1C,0x01,0x20,0xFF,0xF7,0xA0,0xFF, \ +0x38,0x1C,0x00,0xF0,0xBB,0xFA,0x80,0xBD,0xF0,0xB4,0x13,0x4A,0x00,0x27,0xD7, \ +0x65,0x17,0x66,0x17,0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48,0x07,0x70, \ +0x41,0x1C,0x01,0x20,0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05,0xD2,0x5B, \ +0x08,0x01,0x35,0x2D,0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70,0x01,0x31, \ +0x01,0x30,0xA0,0x42,0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01,0x30,0x20, \ +0x28,0xFB,0xD3,0x57,0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46,0x68,0x03, \ +0x00,0x02,0x0C,0x0B,0x00,0x02,0x0C,0x0C,0x00,0x02,0x90,0xB5,0x0A,0x4F,0x0A, \ +0x4C,0x38,0x68,0x63,0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7,0x36,0xF9, \ +0x00,0xF0,0xEC,0xFB,0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0xB8,0xFB,0x00, \ +0x20,0x38,0x60,0x00,0xF0,0xF3,0xFB,0x90,0xBD,0xD4,0x03,0x00,0x02,0xF0,0xF0, \ +0xF0,0xF0,0x44,0x04,0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38,0x60,0xFC, \ +0xF7,0x1E,0xF9,0x00,0xF0,0xD4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD,0x00,0x00, \ +0xF0,0xF0,0xF0,0xF0,0xD4,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F,0x00,0x2D, \ +0xE9,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00, \ +0x20,0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9, \ +0xB0,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83, \ +0xE5,0x9C,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00, \ +0x00,0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x84, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00,0x9F,0xE5, \ +0x10,0xFF,0x2F,0xE1,0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00,0x20,0x93, \ +0xE5,0x00,0x00,0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00,0x20, \ +0x83,0xE5,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9,0x4C, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83,0xE5, \ +0x34,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00,0x00, \ +0x0A,0x00,0x20,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x20,0x00, \ +0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01, \ +0x40,0x2D,0xE9,0x0C,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0xD4,0x03,0x00,0x02, \ +0xC4,0x03,0x00,0x02,0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00,0xA0,0x00, \ +0x47,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3,0x03, \ +0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8, \ +0x0E,0xF0,0xB0,0xE1,0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50, \ +0xE3,0x27,0x00,0x00,0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00, \ +0x52,0xE3,0x03,0x00,0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x02, \ +0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1, \ +0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10,0xA0, \ +0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0,0x5F, \ +0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x0F, \ +0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D,0xE9, \ +0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00,0x00,0x91, \ +0xE5,0x08,0xD0,0x80,0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00, \ +0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3,0x00, \ +0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x78,0x01,0x00,0xEA, \ +0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x75,0x01,0x00,0xEA,0x00,0xA0,0x00, \ +0x47,0xD1,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5,0x00,0x20, \ +0x93,0xE5,0x01,0x20,0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3,0x03, \ +0x00,0x00,0x0A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8, \ +0x0E,0xF0,0xB0,0xE1,0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02,0x10,0x01, \ +0xE0,0x12,0x00,0x51,0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5,0x00,0x00, \ +0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30,0x9F,0xE5, \ +0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD, \ +0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40, \ +0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04, \ +0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20,0xA0,0xE3, \ +0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21, \ +0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x40,0x10, \ +0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F,0xE5,0x00, \ +0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81, \ +0xE5,0x37,0x01,0x00,0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3,0x00,0xF0, \ +0x21,0xE1,0x33,0x01,0x00,0xEA,0xD4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0xC0, \ +0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0x4C,0x04,0x00,0x02,0xE4,0x03,0x00,0x02, \ +0x90,0xB5,0x86,0xB0,0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C,0x60,0xBC, \ +0x60,0x00,0x21,0x10,0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29,0xFB,0xD3, \ +0xD0,0x1D,0x79,0x30,0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00,0x22,0x00, \ +0x21,0x05,0x92,0x02,0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A,0x02,0x92, \ +0x00,0x90,0x01,0x91,0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00,0xF0,0x18, \ +0xF8,0xBC,0x62,0xFC,0x62,0x06,0xB0,0x90,0xBD,0x48,0x04,0x00,0x02,0x8C,0x0C, \ +0x00,0x02,0xBD,0x46,0x00,0x00,0x0C,0x0D,0x00,0x02,0x53,0x79,0x73,0x74,0x65, \ +0x6D,0x20,0x54,0x69,0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x00, \ +0x4D,0x49,0x54,0x41,0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A,0xAE,0x4C, \ +0xCE,0x09,0x9D,0xB9,0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64,0x00,0x21, \ +0xB9,0x60,0x7A,0x61,0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA,0x06,0xD2, \ +0x0E,0xF3,0x06,0xB8,0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61,0x79,0x60, \ +0x03,0x20,0x38,0x63,0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01,0x20,0x90, \ +0x40,0xBF,0x65,0x39,0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7,0x0C,0xC7, \ +0x78,0x3F,0x38,0x64,0x38,0x1C,0x1E,0x49,0x00,0xF0,0x0B,0xFB,0xC0,0x20,0x00, \ +0xF0,0x6C,0xF8,0x1C,0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D,0x79,0x31, \ +0x00,0x2A,0x0A,0xD0,0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3,0x1D,0x79, \ +0x33,0x9F,0x60,0xCA,0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60,0x8F,0x60, \ +0xCF,0x60,0x13,0x49,0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29,0x68,0x01, \ +0x31,0x29,0x60,0x00,0xF0,0x4C,0xF8,0x00,0x2C,0x07,0xD0,0x38,0x1C,0x00,0xF0, \ +0x1D,0xFB,0x00,0x28,0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0,0x20,0x00, \ +0xF0,0x3F,0xF8,0x29,0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x3A,0xF8,0x00,0x20, \ +0x04,0xB0,0xF0,0xBD,0x00,0x00,0x19,0x48,0x00,0x00,0x59,0x48,0x00,0x00,0x44, \ +0x52,0x48,0x54,0xCC,0x03,0x00,0x02,0xD0,0x03,0x00,0x02,0xE4,0x03,0x00,0x02, \ +0x90,0xB5,0x41,0x60,0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12,0xC0,0x12, \ +0xC0,0xC0,0x20,0x00,0xF0,0x1F,0xF8,0x0C,0x49,0x0C,0x4B,0x39,0x60,0x19,0x68, \ +0x00,0x29,0x06,0xD0,0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19,0x68,0xB9, \ +0x61,0x02,0xE0,0x1F,0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68,0x01,0x32, \ +0x0A,0x60,0x00,0xF0,0x09,0xF8,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E,0x44,0x56, \ +0x44,0x78,0x04,0x00,0x02,0x7C,0x04,0x00,0x02,0x00,0xA3,0x18,0x47,0x00,0x30, \ +0x0F,0xE1,0x3F,0x20,0xA0,0xE3,0x02,0x10,0x03,0xE0,0x00,0x10,0x81,0xE1,0x01, \ +0xF0,0x21,0xE1,0x02,0x00,0xC3,0xE1,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x85,0xB0, \ +0x07,0x1C,0xC0,0x20,0x0C,0x1C,0x15,0x1C,0xFF,0xF7,0xE8,0xFF,0xA9,0x08,0x03, \ +0xD3,0xB9,0x68,0x21,0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60, \ +0x3C,0x69,0x0A,0x1C,0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35, \ +0xD1,0xE5,0x1D,0x79,0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C, \ +0x1E,0x40,0x9E,0x42,0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3, \ +0x6F,0x1A,0x60,0x2A,0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43, \ +0xBA,0x60,0x00,0x26,0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A, \ +0x60,0xFF,0xF7,0xB7,0xFF,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30, \ +0x00,0xF0,0x24,0xFB,0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x80, \ +0xFA,0x00,0x28,0x01,0xD0,0x00,0xF0,0xE6,0xFA,0x30,0x1C,0x9B,0xE0,0xFF,0xF7, \ +0xA2,0xFF,0x97,0xE0,0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02, \ +0x93,0x11,0x68,0x00,0x26,0x01,0x31,0x11,0x60,0xFF,0xF7,0x95,0xFF,0xC0,0x20, \ +0xFF,0xF7,0x92,0xFF,0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03, \ +0x9C,0x7B,0x69,0x02,0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C, \ +0x46,0xD0,0xE3,0x1D,0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06, \ +0xD3,0xA1,0x6F,0x01,0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0, \ +0xA1,0x6F,0x01,0x9A,0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22, \ +0x6F,0x00,0x29,0x28,0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08, \ +0x03,0xD3,0xB9,0x68,0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02, \ +0xD1,0x00,0x21,0x03,0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91, \ +0x63,0x6F,0x4B,0x67,0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79, \ +0x61,0x00,0x21,0xA1,0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C, \ +0x26,0x1C,0x21,0x67,0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14, \ +0x1C,0x01,0x3B,0x02,0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0xFF,0xF7, \ +0x39,0xFF,0x00,0x2D,0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00, \ +0x28,0x04,0xD0,0xE8,0x1D,0x45,0x30,0x00,0xF0,0xA1,0xFA,0x00,0xE0,0xEC,0x64, \ +0xC0,0x20,0xFF,0xF7,0x28,0xFF,0x31,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x23, \ +0xFF,0x28,0x1C,0x00,0xF0,0xF6,0xF9,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0xFF,0xF7, \ +0x1B,0xFF,0x0E,0x49,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xFF,0xF7,0x15,0xFF,0x0C, \ +0x48,0x0C,0x49,0x00,0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68, \ +0x00,0x28,0x01,0xD1,0x00,0xF0,0x49,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79, \ +0x69,0x00,0x29,0x00,0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7, \ +0xE4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xD4,0x03,0x00, \ +0x02,0xFF,0xB5,0x07,0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0xFF,0xF7,0xEF,0xFE, \ +0x02,0x9A,0x91,0x08,0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02, \ +0xE0,0xB9,0x68,0x29,0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60, \ +0x02,0x9A,0x51,0x08,0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07, \ +0x24,0x00,0x2E,0x36,0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67, \ +0xE5,0x1D,0x02,0x9A,0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39, \ +0x69,0x00,0x29,0x09,0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69, \ +0x49,0x6F,0x0C,0x67,0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64, \ +0x67,0x79,0x69,0x01,0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63, \ +0x0E,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xE6,0x64,0xFF,0xF7,0xAC,0xFE,0x01, \ +0x23,0xDE,0x42,0x03,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x87,0xFA,0x20,0x1C, \ +0x00,0xF0,0xBE,0xFA,0x68,0x68,0x04,0xB0,0xF0,0xBD,0xFF,0xF7,0x9D,0xFE,0x20, \ +0x1C,0xF9,0xE7,0x00,0x00,0xC4,0x03,0x00,0x02,0x8D,0x4A,0x00,0x00,0xE4,0x03, \ +0x00,0x02,0x00,0xB5,0xFF,0xF7,0xD7,0xFB,0xFF,0xF7,0xB1,0xFD,0x00,0xF0,0x8F, \ +0xFB,0x00,0xF0,0x95,0xFB,0x00,0xF0,0xF5,0xF9,0x00,0xF0,0x99,0xFB,0x00,0xF0, \ +0x9F,0xFB,0x00,0xBD,0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0,0x21, \ +0xE1,0x48,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC,0xFF, \ +0xFF,0x0A,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x04,0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82,0xE2, \ +0x04,0x20,0x80,0xE5,0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30,0x82, \ +0xE5,0x03,0x00,0xBD,0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0,0x80, \ +0xFD,0x08,0xFF,0xDF,0xFD,0xE8,0xC8,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0x4C, \ +0x04,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02,0xB0, \ +0xF0,0xBD,0x00,0x20,0x00,0x90,0x00,0x26,0xC0,0x20,0xFF,0xF7,0x43,0xFE,0x4A, \ +0x4D,0x29,0x68,0x09,0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A,0x61, \ +0x29,0x68,0x46,0x4C,0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68,0x91, \ +0x42,0x02,0xD1,0x43,0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0xFF,0xF7, \ +0x2B,0xFE,0xC0,0x20,0xFF,0xF7,0x28,0xFE,0x01,0x99,0x00,0x29,0x5C,0xD0,0x01, \ +0x9C,0x21,0x69,0xA1,0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A,0x61, \ +0x21,0x69,0x62,0x69,0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69,0x01, \ +0x91,0x21,0x68,0x20,0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04,0xE0, \ +0x27,0x1D,0xA2,0xCF,0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61,0x24, \ +0x61,0x00,0xE0,0xA6,0x61,0xFF,0xF7,0x02,0xFE,0x00,0x2D,0x02,0xD0,0x38,0x1C, \ +0x00,0xF0,0xFC,0xFB,0xC0,0x20,0xFF,0xF7,0xFA,0xFD,0xA2,0x69,0x69,0x46,0x8A, \ +0x42,0x25,0xD1,0x21,0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39, \ +0x20,0x4D,0x89,0x00,0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42,0x07, \ +0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89,0x00, \ +0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69,0x62, \ +0x61,0x14,0x61,0x0A,0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1,0x61, \ +0x64,0x61,0x0C,0x60,0xFF,0xF7,0xCE,0xFD,0xC0,0x20,0xFF,0xF7,0xCB,0xFD,0x01, \ +0x99,0x00,0x29,0xA2,0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E,0x4C, \ +0x03,0x21,0x22,0x68,0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A,0x11, \ +0x68,0x01,0x31,0x11,0x60,0xFF,0xF7,0xB7,0xFD,0x20,0x68,0x00,0xF0,0xD0,0xF9, \ +0x6C,0xE7,0xFF,0xF7,0xB1,0xFD,0x69,0xE7,0x4D,0x49,0x54,0x41,0x5C,0x04,0x00, \ +0x02,0x58,0x04,0x00,0x02,0x54,0x04,0x00,0x02,0x60,0x04,0x00,0x02,0xC4,0x03, \ +0x00,0x02,0xE4,0x03,0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28,0x0C, \ +0xD1,0xC0,0x20,0xFF,0xF7,0x9A,0xFD,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60, \ +0xFF,0xF7,0x94,0xFD,0x38,0x1C,0x00,0xF0,0x67,0xF8,0x90,0xBD,0xC0,0x20,0xFF, \ +0xF7,0x8D,0xFD,0xBC,0x6E,0xFF,0xF7,0x8A,0xFD,0x00,0x2C,0xF6,0xD0,0x38,0x1C, \ +0x00,0xF0,0x83,0xFB,0x90,0xBD,0xE4,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F,0x39, \ +0x68,0x88,0x6C,0x49,0x6C,0x00,0xF0,0x76,0xFB,0xC0,0x20,0xFF,0xF7,0x78,0xFD, \ +0x3A,0x68,0x01,0x21,0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68,0x01, \ +0x32,0x0A,0x60,0xFF,0xF7,0x6D,0xFD,0x38,0x68,0x00,0xF0,0x86,0xF9,0x80,0xBD, \ +0x00,0x00,0xC4,0x03,0x00,0x02,0xE4,0x03,0x00,0x02,0x00,0xA3,0x18,0x47,0x10, \ +0x20,0x90,0xE5,0x03,0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0,0xE3, \ +0x00,0x30,0x82,0xE5,0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30,0xA0, \ +0xE3,0x08,0x30,0x82,0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14,0x30, \ +0x82,0xE5,0x18,0x30,0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5,0x24, \ +0x30,0x82,0xE5,0x28,0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90,0xE5, \ +0x30,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30,0x82, \ +0xE5,0x3C,0x30,0x82,0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08,0x20, \ +0x80,0xE5,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0,0x20,0xFF, \ +0xF7,0x24,0xFD,0x29,0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xBA,0x6B, \ +0x00,0x21,0x00,0x2A,0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02,0x2A,0x37, \ +0xD0,0xB9,0x63,0x07,0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B,0x00,0x2A, \ +0x03,0xD0,0x79,0x63,0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9,0x6A,0x1D, \ +0x4B,0x8E,0x00,0x9A,0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62,0x57,0x62, \ +0x79,0x62,0x3A,0x62,0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17,0x4A,0x3B, \ +0x6C,0x16,0x68,0x33,0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A,0x02,0xD1, \ +0x2F,0x60,0x19,0x60,0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19,0x60,0xD3, \ +0x6B,0x8B,0x42,0x08,0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49,0x12,0x6C, \ +0x0B,0x68,0x1A,0x43,0x0A,0x60,0x2F,0x60,0xFF,0xF7,0xDC,0xFC,0x0B,0x48,0x00, \ +0x68,0x29,0x68,0x88,0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28,0x00,0xD1, \ +0x01,0x24,0x20,0x1C,0xF0,0xBD,0xE4,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0x0C, \ +0x0C,0x00,0x02,0xD8,0x03,0x00,0x02,0xE0,0x03,0x00,0x02,0xDC,0x03,0x00,0x02, \ +0xC4,0x03,0x00,0x02,0xD4,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00,0x00,0xA0, \ +0xE3,0x00,0x10,0x0F,0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9,0xD3,0x20, \ +0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93,0xE5,0x28, \ +0x20,0x9F,0xE5,0x00,0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40,0xA0,0xE3, \ +0x00,0x00,0x51,0xE3,0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00,0x40,0x82, \ +0xE5,0x18,0x10,0x80,0xE5,0x00,0x40,0x83,0xE5,0x0A,0xFF,0xFF,0xEA,0xC4,0x03, \ +0x00,0x02,0x4C,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7, \ +0x46,0x00,0x00,0x78,0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7, \ +0x87,0xFC,0xB9,0x69,0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA,0x42,0x04, \ +0xD1,0x0A,0x68,0xBA,0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69,0x51,0x61, \ +0x39,0x69,0x7A,0x69,0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04,0xD1,0x3A, \ +0x69,0x91,0x61,0x39,0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7,0x6A,0xFC, \ +0x20,0x1C,0x90,0xBD,0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF,0xF7,0x62, \ +0xFC,0xB9,0x6E,0x00,0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68,0x1C,0x4B, \ +0x99,0x42,0x32,0xD1,0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01,0xD1,0x25, \ +0x61,0x06,0xE0,0x21,0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F,0x7A,0x6F, \ +0x11,0x67,0x61,0x69,0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10,0xD1,0xFA, \ +0x1D,0x79,0x32,0x51,0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF,0xF7, \ +0x3C,0xFC,0x38,0x1C,0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF,0xF7,0x75, \ +0xFF,0x01,0xE0,0xFF,0xF7,0x32,0xFC,0x78,0x6E,0x00,0x28,0x04,0xD0,0xF8,0x1D, \ +0x45,0x30,0xFF,0xF7,0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF,0xF7,0x26, \ +0xFC,0xFF,0xF7,0x24,0xFC,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0xE4,0x03, \ +0x00,0x02,0x80,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x19,0xFC,0x39,0x68,0x00, \ +0x29,0x27,0xD0,0xBA,0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9,0x1F,0x21, \ +0x00,0xE0,0x01,0x39,0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10,0x4A,0x12, \ +0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A,0x89,0x10, \ +0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A,0x61,0x0A, \ +0x68,0x52,0x69,0x7A,0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61,0x03,0xE0, \ +0x3F,0x61,0xB9,0x61,0x7F,0x61,0x0F,0x60,0xFF,0xF7,0xEC,0xFB,0x00,0x20,0x80, \ +0xBD,0x5C,0x04,0x00,0x02,0x58,0x04,0x00,0x02,0x54,0x04,0x00,0x02,0xF0,0xB5, \ +0x05,0x1C,0xC0,0x20,0xFF,0xF7,0xDF,0xFB,0x67,0x49,0x67,0x4C,0x0A,0x68,0x67, \ +0x4F,0x01,0x3A,0x0A,0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26,0xAE,0x63, \ +0xEA,0x6A,0x2B,0x6A,0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29,0x6A,0x6B, \ +0x6A,0x19,0x62,0x91,0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1,0x2B,0x6A, \ +0x53,0x50,0x5D,0x49,0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3,0x43,0x0B, \ +0x60,0x5B,0x49,0x0B,0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68,0x9B,0x00, \ +0xD2,0x58,0x0A,0x60,0xFF,0xF7,0xB2,0xFB,0x55,0x49,0x38,0x68,0x09,0x68,0x88, \ +0x42,0x60,0xD0,0x20,0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26,0x4E,0x4B, \ +0x92,0x00,0x9E,0x50,0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33,0x40,0x13, \ +0x60,0x4B,0x4A,0x12,0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43,0x48,0x4E, \ +0x32,0x60,0x1A,0x06,0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14,0xE0,0x1B, \ +0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32,0x0C,0xE0, \ +0x1B,0x0A,0x1A,0x06,0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10,0x32,0x04, \ +0xE0,0x1A,0x0A,0x29,0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B,0x1A,0x60, \ +0x39,0x4A,0x12,0x68,0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32,0x68,0x36, \ +0x4D,0x92,0x00,0x9A,0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A,0x42,0xD0, \ +0x0E,0x1C,0x09,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x65,0xFB,0xC0,0x20,0xFF, \ +0xF7,0x62,0xFB,0x00,0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60,0x2A,0x49, \ +0x0A,0x68,0x11,0x06,0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E,0xE0,0x28, \ +0x4B,0x20,0x21,0x19,0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7,0x4D,0xFB, \ +0x38,0xE0,0x39,0xE0,0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0,0x22, \ +0x4B,0x59,0x5C,0x08,0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0, \ +0x1E,0x4B,0x59,0x5C,0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59,0x5C,0x18, \ +0x31,0x15,0x4B,0x89,0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68,0xB3,0x42, \ +0x05,0xD8,0x29,0x60,0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19,0x60,0xFF, \ +0xF7,0x26,0xFB,0x0F,0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0,0x20,0x68, \ +0x00,0x28,0x0C,0xD1,0x09,0xE0,0xFF,0xF7,0x1B,0xFB,0x0A,0x49,0x38,0x68,0x09, \ +0x68,0x88,0x42,0x04,0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7,0x51,0xFE, \ +0xF0,0xBD,0xE4,0x03,0x00,0x02,0xD4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0x0C, \ +0x0C,0x00,0x02,0xDC,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xE0,0x03,0x00,0x02, \ +0xD8,0x03,0x00,0x02,0x0C,0x0B,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41, \ +0x60,0xF7,0x46,0x00,0x00,0x80,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60, \ +0x41,0x60,0xF7,0x46,0x00,0x00,0x88,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01, \ +0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x90,0x04,0x00,0x02,0x02,0x48,0x00,0x21, \ +0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x98,0x04,0x00,0x02,0xBC,0x46,0x03, \ +0x1C,0x08,0x43,0x80,0x07,0x13,0xD1,0x12,0x1F,0x05,0xD3,0x01,0xCB,0x80,0xC9, \ +0xC0,0x1B,0x04,0xD1,0x12,0x1F,0xF9,0xD2,0xD2,0x1C,0x0C,0xD3,0x02,0xE0,0x1B, \ +0x1F,0x09,0x1F,0xD2,0x1C,0x18,0x78,0x0F,0x78,0xC0,0x1B,0x04,0xD1,0x5B,0x1C, \ +0x49,0x1C,0x52,0x1E,0xF7,0xD2,0x00,0x20,0x67,0x46,0xF7,0x46,0x43,0x1A,0x93, \ +0x42,0x30,0xD3,0x84,0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B,0x78, \ +0x03,0x70,0x40,0x1C,0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1,0x10, \ +0x3A,0x05,0xD3,0xB0,0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0,0xBC, \ +0x0C,0x32,0x0F,0xD3,0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0,0x08, \ +0xC9,0x03,0x70,0x1B,0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3,0x70, \ +0x00,0x1D,0x12,0x1F,0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70,0x49, \ +0x1C,0x40,0x1C,0x52,0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B,0x43, \ +0x13,0x43,0x9B,0x07,0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1,0xF7, \ +0x46,0x52,0x1E,0x8B,0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x4B,0x08, \ +0x02,0x1C,0x02,0xD1,0x00,0xF0,0x79,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00, \ +0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42, \ +0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46,0x00, \ +0x00,0xCB,0x17,0x59,0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C,0xB4, \ +0x4B,0x08,0x02,0x1C,0x02,0xD1,0x00,0xF0,0x5A,0xF8,0x52,0x00,0x9A,0x42,0xFC, \ +0xD9,0x00,0x23,0x91,0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08, \ +0x91,0x42,0xF9,0xD3,0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0x0C, \ +0xBC,0x5A,0x40,0x50,0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0xB0,0xB5, \ +0x0D,0x48,0x42,0x6E,0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75,0x39,0x9C,0x00,0x0C, \ +0x59,0xCD,0x59,0x2C,0x19,0xCC,0x51,0x59,0x1E,0x36,0x23,0x00,0x29,0x01,0x66, \ +0x03,0xDA,0x51,0x1E,0x41,0x66,0x03,0x66,0x03,0xE0,0x51,0x1E,0x41,0x66,0x00, \ +0xD5,0x43,0x66,0x60,0x00,0x40,0x08,0xB0,0xBD,0x1C,0x05,0x00,0x02,0x80,0xB5, \ +0x09,0x49,0x17,0x22,0x0A,0x66,0x36,0x22,0x4A,0x66,0x07,0x4A,0x00,0x21,0x03, \ +0x0C,0x1F,0x18,0x8B,0x00,0xD7,0x50,0x05,0x4B,0x01,0x31,0x58,0x43,0x05,0x4B, \ +0xC0,0x18,0x37,0x29,0xF4,0xDB,0x80,0xBD,0x1C,0x05,0x00,0x02,0xA0,0x04,0x00, \ +0x02,0xCD,0x0D,0x01,0x00,0xE1,0x19,0xD6,0x66,0x00,0x47,0x08,0x47,0x10,0x47, \ +0x18,0x47,0x20,0x47,0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00,0x00,0x2C, \ +0xC0,0x9F,0xE5,0xFF,0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00,0x8F,0xE2, \ +0x3C,0x10,0x4C,0xE2,0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0x74,0xEC,0xFF, \ +0xEA,0x20,0x00,0x00,0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62,0x79,0x20, \ +0x7A,0x65,0x72,0x6F,0x00,0x00,0x84,0x05,0x00,0x02,0x78,0x47,0x00,0x00,0x01, \ +0xE0,0x8E,0xE3,0x04,0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04,0x00,0xE2, \ +0xEA,0x04,0x50,0xE3,0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E,0xFF,0x2F, \ +0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47, \ +0x00,0x00,0x01,0xE0,0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50,0xE3,0x01, \ +0x00,0xA0,0x33,0x00,0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00, \ +0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E, \ +0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00,0x00, \ +0xA0,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00, \ +0x00,0x8F,0xE2,0x1E,0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77,0x6E,0x20, \ +0x45,0x72,0x72,0x6F,0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42,0x72,0x61, \ +0x6E,0x63,0x68,0x20,0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A,0x65,0x72, \ +0x6F,0x00,0x01,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64, \ +0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00,0x00, \ +0x02,0x00,0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20,0x53, \ +0x57,0x49,0x20,0x49,0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00, \ +0x00,0x00,0x03,0x00,0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63,0x68,0x20, \ +0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61,0x74,0x61, \ +0x20,0x41,0x62,0x6F,0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41,0x64,0x64, \ +0x72,0x65,0x73,0x73,0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F,0x6E,0x00, \ +0x00,0x00,0x06,0x00,0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64, \ +0x20,0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00,0x02,0x00, \ +0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73,0x74,0x20, \ +0x49,0x6E,0x74,0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00,0x30,0x50, \ +0x00,0x00,0x48,0x50,0x00,0x00,0x64,0x50,0x00,0x00,0x84,0x50,0x00,0x00,0x98, \ +0x50,0x00,0x00,0xA8,0x50,0x00,0x00,0xC0,0x50,0x00,0x00,0xD8,0x50,0x00,0x00, \ +0x28,0x20,0x4F,0xE2,0x00,0x01,0x92,0xE7,0x0C,0xEC,0xFF,0xEA,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x40,0x2D,0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5,0x01,0x20, \ +0xA0,0xE1,0x4C,0x40,0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00,0xEB,0x44, \ +0x20,0x9F,0xE5,0x44,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10,0x81,0xE0, \ +0x05,0x00,0x00,0xEB,0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04,0x30,0x90, \ +0x34,0x04,0x30,0x81,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x00,0x20, \ +0xA0,0xE3,0x01,0x00,0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF,0x3A,0x0E, \ +0xF0,0xA0,0xE1,0xE0,0x51,0x00,0x00,0x00,0x00,0x00,0x02,0xC4,0x05,0x00,0x00, \ +0xD8,0x07,0x00,0x00,0xC4,0x05,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x0A,0x00,0x90,0x00, \ +0x30,0x00,0x08,0x06,0x07,0x00,0x82,0x84,0x8B,0x96,0x09,0x04,0x02,0x41,0xFA, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0xAC,0x6C, \ +0x32,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x64,0x00,0x30,0x75,0x64,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04, \ +0x03,0x00,0x04,0xAC,0x6C,0x32,0x70,0x55,0x4E,0x48,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00,0x00,0x00, \ +0x45,0x55,0x00,0x00,0x00,0x00,0x00,0xFA,0x00,0x00,0x00,0xFA,0x00,0x00,0x2A, \ +0x09,0x2A,0x09,0x1F,0x00,0xFF,0x00,0x08,0x08,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x02,0x00,0x41,0x54,0x4D,0x45,0x4C,0x5F,0x41,0x50,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x05, \ +0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x65,0x00,0x54, \ +0x1E,0x1E,0x1E,0x1E,0x00,0x00,0x28,0x28,0x28,0x00,0x00,0x32,0x3C,0x46,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, \ +0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01, \ +0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x00,0x01,0x01,0x00, \ +0x00,0x01,0x01,0x00,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01, \ +0x01,0x00,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x08,0x10, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xD4,0x01,0x00, \ +0x02,0x00,0x00,0x00,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x12,0x01,0x10,0x01,0x00,0x00,0x00,0x08,0xEB,0x03,0x05,0x76, \ +0x00,0x01,0x00,0x00,0x00,0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA, \ +0x09,0x04,0x00,0x00,0x02,0xFF,0x00,0xFF,0x00,0x07,0x05,0x85,0x02,0x40,0x00, \ +0x00,0x07,0x05,0x02,0x02,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x07,0xFF,0x07,0xFF, \ +0x1F,0x00,0x06,0x00,0x1E,0x00,0x20,0xFF,0x3F,0xFC,0x01,0x7C,0x00,0xF8,0x00, \ +0x01,0x01,0x01,0x0A,0x0A,0x0E,0x01,0x03,0x03,0x04,0x00,0x00,0x00,0x00,0x00, \ +0x00,0xAA,0xAA,0x03,0x00,0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81, \ +0xF3,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03, \ +0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0xD8,0x05,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00, \ +0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00, \ +0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04, \ +0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xF6,0x07,0x00,0x00, \ +0xFB,0x07,0x00,0x00,0x00,0x08,0x00,0x00,0x05,0x08,0x00,0x00,0x0A,0x08,0x00, \ +0x00,0x0F,0x08,0x00,0x00,0x14,0x08,0x00,0x00,0x19,0x08,0x00,0x00,0x1E,0x08, \ +0x00,0x00,0x23,0x08,0x00,0x00,0x28,0x08,0x00,0x00,0x2D,0x08,0x00,0x00,0x32, \ +0x08,0x00,0x00,0x3E,0x08,0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69,0x67,0x68, \ +0x74,0x20,0x28,0x63,0x29,0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30,0x30,0x30, \ +0x20,0x45,0x78,0x70,0x72,0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69,0x63,0x20, \ +0x49,0x6E,0x63,0x2E,0x20,0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x58,0x20, \ +0x54,0x48,0x55,0x4D,0x42,0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56,0x65,0x72, \ +0x73,0x69,0x6F,0x6E,0x20,0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E,0x30,0x62, \ +0x20,0x2A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x47,0x2D,0x47,0x42,0x2D,0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44,0x4C,0x2D, \ +0x4B,0x4D,0x4C,0x2D,0x43,0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D,0x4C,0x32, \ +0x2D,0x47,0x5A,0x2D,0x4B,0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50,0x2D,0x54, \ +0x43,0x2D,0x4E,0x48,0x2D,0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41,0x2D,0x47, \ +0x46,0x2D,0x44,0x44,0x2D,0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53,0x2D,0x44, \ +0x57,0x2D,0x55,0x53,0x41,0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53,0x44,0x53, \ +0x55,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09,0x8C,0xD3, \ +0xD5,0xF5,0xD8,0x09,0x0A,0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB,0x69,0x07, \ +0xF7,0xBD,0x34,0x12,0x3D,0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE,0x61,0xAE, \ +0x8D,0x40,0xA7,0xE8,0xBD,0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E,0xE6,0xBB, \ +0xFF,0x7F,0xD5,0xF6,0x7A,0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C,0x2E,0x9F, \ +0xE1,0x05,0x57,0x5F,0x69,0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50,0x94,0x0E, \ +0x8B,0x72,0xAE,0x55,0xCC,0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F,0x85,0x17, \ +0x5C,0x22,0xD0,0xA3,0xFD,0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82,0xDB,0xF1, \ +0x9D,0xC5,0xFB,0xBC,0x89,0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30,0x5C,0x50, \ +0xCC,0xC9,0x5A,0xBC,0x9C,0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39,0xD2,0x7B, \ +0x15,0x75,0xFB,0x58,0xC1,0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82,0xC5,0xC2, \ +0xD6,0x69,0x05,0xB3,0x30,0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4,0x1D,0x1F, \ +0x15,0xE2,0x60,0x56,0xC5,0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08,0x32,0x59, \ +0x4A,0x4C,0xED,0x7B,0x5B,0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51,0xFA,0x3D, \ +0xFF,0xAC,0xB5,0x6C,0x09,0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D,0x25,0x17, \ +0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_503RFMD_EXTERNAL { \ +0x80,0xB5,0x10,0x49,0x00,0x20,0x08,0x70,0x0F,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0x00,0xF0,0x2D,0xFD, \ +0x80,0xBD,0x08,0x21,0x0B,0x20,0x00,0xF0,0x28,0xFD,0xC0,0x20,0xFE,0xF7,0x97, \ +0xF9,0x07,0x1C,0x00,0xF0,0xDC,0xFA,0x38,0x1C,0xFE,0xF7,0x91,0xF9,0x01,0x21, \ +0x0B,0x20,0x00,0xF0,0x1B,0xFD,0x80,0xBD,0x9C,0x01,0x00,0x02,0xD0,0x09,0x00, \ +0x02,0xF8,0xB5,0x35,0x4F,0x35,0x4E,0x38,0x78,0x35,0x4D,0x0A,0x28,0x59,0xD2, \ +0x02,0xA3,0x1B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x55,0x05,0x09,0x0D,0x55, \ +0x11,0x14,0x18,0x55,0x1B,0xB8,0x78,0x2F,0x49,0x44,0x18,0x14,0xE0,0xB8,0x78, \ +0x2E,0x49,0x44,0x18,0x10,0xE0,0xB8,0x78,0x2D,0x49,0x44,0x18,0x0C,0xE0,0xB8, \ +0x78,0x44,0x19,0x09,0xE0,0xB8,0x78,0x2B,0x49,0x44,0x18,0x05,0xE0,0xB8,0x78, \ +0x84,0x19,0x02,0xE0,0xB8,0x78,0x28,0x49,0x44,0x18,0x00,0x2C,0x39,0xD0,0xC0, \ +0x20,0xFE,0xF7,0x59,0xF9,0x00,0x90,0xE8,0x1D,0x1E,0x4E,0x29,0x30,0x45,0x7A, \ +0x36,0x7E,0x7A,0x78,0x00,0x21,0x00,0x2A,0x07,0xD9,0x7A,0x18,0x12,0x79,0x01, \ +0x31,0x22,0x70,0x7A,0x78,0x01,0x34,0x8A,0x42,0xF7,0xD8,0x1D,0x49,0x09,0x68, \ +0x00,0x29,0x11,0xD0,0x40,0x7A,0x00,0x28,0x0E,0xD1,0x00,0x2D,0x04,0xD0,0x19, \ +0x49,0x1A,0x48,0x0E,0x22,0xFE,0xF7,0x79,0xFE,0x0F,0x48,0x00,0x7E,0xB0,0x42, \ +0x01,0xD1,0x00,0x2D,0x01,0xD0,0x03,0xF0,0xCF,0xF9,0x02,0xF0,0xB1,0xFE,0x00, \ +0x98,0xFE,0xF7,0x2C,0xF9,0x01,0x21,0x01,0x20,0x00,0xF0,0xB6,0xFC,0xF8,0xBD, \ +0x04,0x21,0x01,0x20,0x00,0xF0,0xB1,0xFC,0xF9,0xE7,0x03,0x21,0x01,0x20,0x00, \ +0xF0,0xAC,0xFC,0xF4,0xE7,0x00,0x00,0xD8,0x09,0x00,0x02,0x00,0x00,0x00,0x02, \ +0x84,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x1C,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xC4,0x02,0x00,0x02,0xA7,0x0A, \ +0x00,0x02,0xFC,0x0A,0x00,0x02,0xF0,0xB5,0x83,0xB0,0x62,0x4D,0xEF,0x1D,0x19, \ +0x37,0xB8,0x79,0x03,0x28,0x1B,0xD0,0x60,0x49,0x0E,0x20,0x08,0x83,0x60,0x48, \ +0x60,0x39,0x02,0x90,0x40,0x7A,0xCC,0x1D,0xCE,0x1D,0x49,0x36,0x69,0x34,0x00, \ +0x28,0x40,0xD0,0xF8,0x7B,0x5B,0x49,0x00,0x28,0x29,0xD0,0xF8,0x79,0x01,0x28, \ +0x01,0xDB,0x0E,0x28,0x16,0xDD,0x03,0x21,0x03,0x20,0x00,0xF0,0x72,0xFC,0x03, \ +0xB0,0xF0,0xBD,0xF8,0x79,0x01,0x28,0x01,0xDB,0x0E,0x28,0x04,0xDD,0x03,0x21, \ +0x03,0x20,0x00,0xF0,0x67,0xFC,0xF3,0xE7,0x03,0xF0,0xE8,0xFA,0x01,0x21,0x03, \ +0x20,0x00,0xF0,0x60,0xFC,0xEC,0xE7,0x00,0x20,0x00,0x22,0x0B,0x18,0x9A,0x73, \ +0x0A,0x54,0x01,0x30,0x00,0x04,0x00,0x0C,0x0E,0x28,0xF7,0xDB,0xE2,0x71,0x01, \ +0x22,0xF2,0x71,0x29,0xE0,0xF8,0x79,0x41,0x18,0x49,0x7B,0x00,0x29,0x0A,0xD1, \ +0x02,0xF0,0xD2,0xF9,0x00,0x06,0x00,0x0E,0xF8,0x71,0x04,0xD1,0x03,0x21,0x03, \ +0x20,0x00,0xF0,0x42,0xFC,0xCE,0xE7,0x00,0x22,0xF2,0x71,0x16,0xE0,0xE0,0x7A, \ +0x3C,0x49,0x40,0x00,0x08,0x5A,0xF9,0x79,0x01,0x22,0x01,0x91,0x01,0x39,0x8A, \ +0x40,0x10,0x40,0x0B,0xD1,0x01,0x98,0x02,0xF0,0xB8,0xF9,0xF8,0x71,0xF8,0x79, \ +0x00,0x28,0x04,0xD1,0x03,0x21,0x03,0x20,0x00,0xF0,0x28,0xFC,0xB4,0xE7,0xC0, \ +0x20,0xFE,0xF7,0x96,0xF8,0x00,0x90,0x04,0x20,0xFC,0xF7,0x54,0xF8,0x2E,0x49, \ +0x00,0x20,0x0A,0x5C,0x2E,0x4B,0x1B,0x18,0x01,0x30,0x00,0x04,0x00,0x0C,0x04, \ +0x28,0x1A,0x74,0xF6,0xDB,0x2B,0x48,0x2B,0x49,0x00,0x88,0x00,0x23,0x0E,0x22, \ +0x08,0x80,0x04,0x21,0x29,0x48,0x01,0xF0,0xC7,0xFE,0x01,0x21,0xB1,0x71,0x28, \ +0x48,0x04,0x21,0x01,0x75,0x00,0x20,0xA0,0x72,0x06,0x22,0x29,0x1C,0x25,0x48, \ +0xFE,0xF7,0xB1,0xFD,0xA9,0x1D,0x20,0x22,0x24,0x48,0xFE,0xF7,0xAC,0xFD,0xB8, \ +0x7B,0x23,0x49,0x48,0x71,0x00,0x20,0x70,0x70,0x02,0x99,0x20,0x23,0x88,0x71, \ +0xB8,0x79,0x20,0x49,0x88,0x74,0xF8,0x79,0xC8,0x74,0x28,0x8D,0x88,0x82,0x68, \ +0x8D,0xC8,0x82,0xA8,0x8D,0x08,0x83,0x1C,0x48,0x01,0x78,0x19,0x43,0x01,0x70, \ +0x01,0x21,0xE1,0x70,0x02,0x98,0x41,0x71,0xA0,0x78,0x01,0x28,0x02,0xD1,0x00, \ +0x20,0x03,0xF0,0xE9,0xFA,0x00,0x20,0xA0,0x70,0x00,0x98,0xFE,0xF7,0x46,0xF8, \ +0x01,0x20,0x30,0x70,0x08,0x21,0x03,0x20,0x00,0xF0,0xCE,0xFB,0x5A,0xE7,0x00, \ +0x00,0xD8,0x09,0x00,0x02,0xB0,0x09,0x00,0x02,0xB4,0x00,0x00,0x02,0x18,0x01, \ +0x00,0x02,0x6C,0x02,0x00,0x02,0x8A,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0x3C, \ +0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x10,0x00,0x00,0x02,0x50,0x09,0x00,0x02, \ +0x00,0x01,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x9C,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x41,0x49,0x40,0x4E,0x01,0x91, \ +0x48,0x7A,0xF4,0x1D,0x19,0x34,0x00,0x28,0x3F,0x4F,0x13,0xD0,0xF8,0x79,0x00, \ +0x28,0x05,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0,0x9A,0xFB,0x02,0xB0,0xF0,0xBD, \ +0xE0,0x79,0x3A,0x49,0x40,0x18,0x40,0x7B,0x00,0x28,0x13,0xD1,0x03,0x21,0x04, \ +0x20,0x00,0xF0,0x8E,0xFB,0xF2,0xE7,0xF8,0x7A,0x35,0x49,0x40,0x00,0x08,0x5A, \ +0xE1,0x79,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x04,0xD1,0x03,0x21,0x04, \ +0x20,0x00,0xF0,0x7F,0xFB,0xE3,0xE7,0xC0,0x20,0xFD,0xF7,0xED,0xFF,0x00,0x90, \ +0xA0,0x79,0x2D,0x4D,0x02,0x28,0x02,0xD1,0x03,0x20,0xA8,0x71,0x03,0xE0,0x01, \ +0x28,0x40,0xD1,0x04,0x20,0xA8,0x71,0x04,0x20,0xFB,0xF7,0xA0,0xFF,0x27,0x49, \ +0x00,0x20,0x88,0x70,0xA0,0x79,0x26,0x49,0x06,0x22,0x88,0x70,0x08,0x1F,0x31, \ +0x1C,0xFE,0xF7,0x13,0xFD,0xB1,0x1D,0x20,0x22,0x23,0x48,0xFE,0xF7,0x0E,0xFD, \ +0xA0,0x7A,0x1F,0x49,0x48,0x71,0x00,0x20,0x68,0x70,0x01,0x99,0x88,0x71,0x08, \ +0x21,0x04,0x20,0x00,0xF0,0x51,0xFB,0x01,0x20,0xF8,0x70,0x01,0x99,0x48,0x71, \ +0xB8,0x78,0x01,0x28,0x02,0xD1,0x00,0x20,0x03,0xF0,0x57,0xFA,0x00,0x20,0xB8, \ +0x70,0x17,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23, \ +0x99,0x43,0x01,0x70,0x00,0x98,0xFD,0xF7,0xAB,0xFF,0x30,0x8D,0x81,0x02,0x04, \ +0x20,0xFB,0xF7,0x4E,0xFF,0xE0,0x79,0x03,0xF0,0xB5,0xF9,0x95,0xE7,0x03,0x21, \ +0x04,0x20,0x00,0xF0,0x2C,0xFB,0x00,0x98,0xFD,0xF7,0x9B,0xFF,0x8D,0xE7,0xD8, \ +0x09,0x00,0x02,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0x18,0x01,0x00,0x02, \ +0x6C,0x02,0x00,0x02,0xA0,0x09,0x00,0x02,0x08,0x01,0x00,0x02,0x04,0x01,0x00, \ +0x02,0xE0,0x00,0x00,0x02,0x9C,0x01,0x00,0x02,0xF0,0xB5,0x25,0x48,0x10,0x23, \ +0x01,0x78,0x22,0x4C,0x99,0x43,0x01,0x70,0x01,0x78,0x20,0x23,0x99,0x43,0x01, \ +0x70,0x21,0x48,0x21,0x49,0xC0,0x7A,0x40,0x00,0x09,0x5A,0xE7,0x18,0xF8,0x79, \ +0x01,0x25,0x42,0x1E,0x2B,0x1C,0x93,0x40,0x19,0x40,0x04,0xD1,0x03,0x21,0x05, \ +0x20,0x00,0xF0,0xF8,0xFA,0xF0,0xBD,0xB9,0x79,0x01,0x29,0x04,0xD0,0x03,0x21, \ +0x05,0x20,0x00,0xF0,0xF0,0xFA,0xF0,0xBD,0x03,0xF0,0x71,0xF9,0xC0,0x20,0xFD, \ +0xF7,0x5C,0xFF,0x06,0x1C,0x38,0x7A,0x12,0x4F,0x78,0x71,0x12,0x48,0xC1,0x1D, \ +0x39,0x31,0x8D,0x70,0xA1,0x1D,0x1C,0x30,0x0C,0x1C,0x7A,0x79,0xFE,0xF7,0x8E, \ +0xFC,0x7A,0x79,0x0E,0x4F,0x21,0x1C,0xF8,0x1D,0x0D,0x30,0xFE,0xF7,0x87,0xFC, \ +0x00,0x20,0xF9,0x1D,0x29,0x31,0x88,0x71,0x00,0xF0,0x13,0xF8,0x30,0x1C,0xFD, \ +0xF7,0x3E,0xFF,0xF0,0xBD,0x00,0x00,0xD8,0x09,0x00,0x02,0x9C,0x01,0x00,0x02, \ +0xC0,0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x84,0x00,0x00,0x02,0xF0,0xB5,0xFA,0xF7,0xF9,0xF8,0xFE,0xF7,0x07,0xFD, \ +0xFA,0xF7,0xF5,0xF8,0x2C,0x4F,0x02,0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38, \ +0x74,0x01,0x0A,0x79,0x74,0x01,0x0C,0x00,0x0E,0xB9,0x74,0x27,0x4E,0xF8,0x74, \ +0xF9,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x35,0x30,0xFE,0xF7,0x53,0xFC,0x24, \ +0x4C,0x01,0x25,0xF8,0x1D,0x29,0x30,0x25,0x75,0x05,0x71,0x22,0x48,0xF1,0x1D, \ +0x42,0x79,0xF8,0x1D,0x0D,0x30,0x15,0x31,0xFE,0xF7,0x45,0xFC,0x1F,0x48,0x1F, \ +0x4A,0x00,0x21,0x53,0x5C,0x46,0x18,0x01,0x31,0x04,0x29,0x33,0x74,0xF9,0xD3, \ +0x1C,0x49,0x00,0x23,0x09,0x88,0x39,0x80,0x02,0x7D,0x04,0x21,0x10,0x30,0x01, \ +0xF0,0x3D,0xFD,0x19,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78, \ +0x10,0x23,0x19,0x43,0x01,0x70,0x10,0x48,0x85,0x70,0xFB,0xF7,0x6A,0xFE,0x39, \ +0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0x88,0xFE,0xE0,0x1D,0x49,0x30, \ +0x45,0x70,0x05,0x21,0x81,0x71,0x0E,0x48,0x01,0x68,0x0E,0x48,0xC2,0x69,0x11, \ +0x43,0xC1,0x61,0x0D,0x48,0x01,0x21,0x05,0x70,0x05,0x20,0x00,0xF0,0x5D,0xFA, \ +0xF0,0xBD,0x84,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x08, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x8A,0x02,0x00,0x02,0x3C,0x01,0x00,0x02, \ +0x9C,0x01,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x3E,0x01,0x00, \ +0x02,0xF0,0xB5,0x54,0x4F,0x54,0x4E,0xFC,0x1D,0xF9,0x1D,0x09,0x31,0x59,0x34, \ +0x0D,0x1C,0xF0,0x1D,0x0D,0x30,0x22,0x79,0xFE,0xF7,0xEB,0xFB,0x22,0x79,0x29, \ +0x1C,0x4F,0x48,0xFE,0xF7,0xE6,0xFB,0x20,0x79,0x4E,0x49,0x4E,0x4A,0x48,0x71, \ +0xB8,0x7B,0x00,0x28,0x03,0xD1,0x10,0x70,0xF0,0x72,0x50,0x70,0x08,0xE0,0x01, \ +0x20,0x10,0x70,0xF0,0x72,0xF8,0x7B,0xD1,0x1D,0x39,0x31,0x50,0x70,0xF8,0x78, \ +0x08,0x70,0x00,0x25,0x0D,0x20,0x68,0x43,0xC1,0x19,0x43,0x4A,0x30,0x31,0x80, \ +0x18,0x0D,0x22,0x0C,0x30,0xFE,0xF7,0xC7,0xFB,0x01,0x35,0x04,0x2D,0xF2,0xD3, \ +0x60,0x79,0x00,0x28,0x03,0xD0,0x3C,0x49,0x01,0x20,0x48,0x72,0x02,0xE0,0x3A, \ +0x49,0x00,0x20,0x48,0x72,0x78,0x7B,0x3A,0x49,0x0E,0x28,0x02,0xDC,0x01,0x28, \ +0x00,0xDB,0x08,0x75,0xB8,0x78,0x37,0x4A,0x10,0x74,0x38,0x7B,0x01,0x28,0x02, \ +0xD1,0x32,0x4B,0xD8,0x70,0x02,0xE0,0x30,0x4B,0x00,0x20,0xD8,0x70,0xF8,0x88, \ +0x10,0x81,0xB8,0x88,0x50,0x81,0x38,0x78,0x2D,0x4A,0xD0,0x70,0xE0,0x88,0x2F, \ +0x4A,0x30,0x80,0x00,0x20,0x3B,0x18,0x1C,0x7A,0x0D,0x18,0x2C,0x74,0x1B,0x7A, \ +0x13,0x54,0x01,0x30,0x04,0x28,0xF6,0xD3,0x30,0x88,0x29,0x4A,0x00,0x23,0x10, \ +0x80,0xC8,0x1D,0x09,0x30,0x0F,0x1C,0x0E,0x22,0x04,0x21,0x01,0xF0,0x92,0xFC, \ +0x00,0xF0,0xFE,0xF8,0x24,0x4C,0x25,0x49,0xE0,0x1D,0x69,0x30,0xC0,0x7A,0x08, \ +0x5C,0x38,0x75,0x23,0x4F,0x38,0x78,0x02,0x28,0x28,0xD1,0x02,0xF0,0x09,0xFC, \ +0x03,0xF0,0xF8,0xF9,0x17,0x49,0x88,0x78,0x00,0x28,0x07,0xD0,0xFB,0xF7,0xB5, \ +0xFD,0x31,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0xD3,0xFD,0x01,0x20, \ +0x00,0xF0,0xD2,0xF9,0x02,0xF0,0xA8,0xFB,0x01,0x20,0xF9,0xF7,0xF5,0xFF,0x01, \ +0x20,0x80,0x06,0x80,0x69,0xFE,0xF7,0xFC,0xFB,0xFB,0xF7,0x96,0xFD,0xFB,0xF7, \ +0x16,0xFA,0xFE,0xF7,0xD8,0xFB,0x80,0x06,0x80,0x0E,0xA0,0x62,0x01,0x20,0x38, \ +0x70,0xF0,0xBD,0x02,0xF0,0x92,0xFB,0xF0,0xBD,0x00,0x00,0xD8,0x09,0x00,0x02, \ +0x84,0x00,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x1C,0x00,0x00, \ +0x02,0x00,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x8A,0x02,0x00,0x02,0x3C,0x01, \ +0x00,0x02,0x50,0x09,0x00,0x02,0x80,0x02,0x00,0x02,0x3F,0x01,0x00,0x02,0x80, \ +0xB5,0x1F,0x49,0x1D,0x4A,0x0F,0x68,0x0E,0x2F,0x27,0xD2,0x01,0xA3,0xDB,0x5D, \ +0x5B,0x00,0x9F,0x44,0x23,0x06,0x0B,0x0E,0x11,0x13,0x16,0x23,0x23,0x1F,0x22, \ +0x23,0x19,0x1C,0x06,0x23,0xFF,0x20,0x01,0x30,0x8B,0x60,0x17,0xE0,0xFF,0x20, \ +0x41,0x30,0x14,0xE0,0xFF,0x20,0x51,0x30,0x11,0xE0,0x0B,0x20,0x0F,0xE0,0xFF, \ +0x20,0x31,0x30,0x0C,0xE0,0xFF,0x20,0x11,0x30,0x09,0xE0,0xFF,0x20,0x21,0x30, \ +0x06,0xE0,0xFF,0x20,0x61,0x30,0x03,0xE0,0xFF,0x20,0x71,0x30,0x00,0xE0,0x00, \ +0x20,0x01,0x23,0x4B,0x60,0x89,0x68,0x00,0xF0,0xDC,0xF9,0x04,0x21,0x0C,0x20, \ +0x00,0xF0,0x4D,0xF9,0x0F,0x20,0x00,0x06,0x81,0x88,0x03,0x4B,0x19,0x43,0x81, \ +0x80,0x80,0xBD,0x64,0x0A,0x00,0x02,0xA0,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xB0,0xB5,0x0D,0x4D,0x00,0x24,0xE8,0x1D,0x49,0x30,0x0C,0x4F,0x04,0x70,0xF8, \ +0x7C,0x02,0xF0,0xBA,0xFF,0xE8,0x1D,0x69,0x30,0x84,0x72,0x38,0x8B,0x81,0x02, \ +0x04,0x20,0xFB,0xF7,0x48,0xFD,0xB8,0x7C,0x00,0x28,0x03,0xD1,0x01,0x20,0xA8, \ +0x77,0x00,0x05,0xB0,0xBD,0x20,0x1C,0xB0,0xBD,0x00,0x00,0x50,0x09,0x00,0x02, \ +0xC4,0x00,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49,0x32,0x91,0x70,0x01, \ +0x21,0x81,0x77,0x10,0x20,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0x03,0x48, \ +0x00,0x21,0xC1,0x73,0x01,0x21,0x81,0x73,0x08,0x07,0xF7,0x46,0x00,0x00,0x60, \ +0x09,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49,0x32,0x51,0x71,0x01,0x21, \ +0x81,0x77,0x08,0x05,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0xB0,0xB5,0x04, \ +0x20,0xFB,0xF7,0x2E,0xFD,0x0F,0x48,0xC7,0x1D,0x49,0x37,0xB9,0x79,0x01,0x29, \ +0x16,0xD1,0x03,0x21,0x70,0x30,0x81,0x72,0x00,0x25,0x0B,0x4C,0x7D,0x71,0xE0, \ +0x7C,0x01,0xF0,0x73,0xFE,0x00,0x28,0x07,0xD1,0x3D,0x70,0x02,0x20,0xB8,0x71, \ +0x01,0x21,0x03,0x20,0x00,0xF0,0xE2,0xF8,0xB0,0xBD,0x01,0x21,0x39,0x70,0xE0, \ +0x74,0xB0,0xBD,0x02,0xF0,0x3B,0xFA,0xB0,0xBD,0x50,0x09,0x00,0x02,0xC4,0x00, \ +0x00,0x02,0x16,0x49,0xC9,0x7D,0x40,0x29,0x21,0xD0,0x0E,0xDC,0x30,0x29,0x1A, \ +0xD0,0x05,0xDC,0x10,0x29,0x15,0xD0,0x20,0x29,0x10,0xD1,0x01,0x20,0x0E,0xE0, \ +0x31,0x29,0x13,0xD0,0x32,0x29,0x0A,0xD1,0x04,0x20,0x08,0xE0,0x41,0x29,0x11, \ +0xD0,0x50,0x29,0x11,0xD0,0x51,0x29,0x11,0xD0,0x52,0x29,0x00,0xD1,0x09,0x20, \ +0x09,0x49,0xC8,0x72,0xF7,0x46,0x00,0x20,0xFA,0xE7,0x02,0x20,0xF8,0xE7,0x03, \ +0x20,0xF6,0xE7,0x05,0x20,0xF4,0xE7,0x06,0x20,0xF2,0xE7,0x07,0x20,0xF0,0xE7, \ +0x08,0x20,0xEE,0xE7,0x00,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xF0,0xB5,0x1E, \ +0x4D,0x01,0x24,0x28,0x78,0x01,0x28,0x30,0xD1,0x1C,0x4C,0x1D,0x49,0xE0,0x7A, \ +0x1D,0x4E,0x08,0x5C,0x30,0x75,0xC0,0x20,0xFD,0xF7,0x07,0xFD,0x07,0x1C,0x1A, \ +0x48,0x01,0x78,0x02,0x29,0x06,0xD1,0x01,0x21,0x01,0x70,0x30,0x7D,0x02,0xF0, \ +0x19,0xFE,0x20,0x73,0x0D,0xE0,0x07,0x20,0x40,0x06,0xC1,0x69,0x10,0x23,0x99, \ +0x43,0xC1,0x61,0x13,0x48,0x01,0x21,0x41,0x71,0x00,0x20,0x02,0xF0,0x8D,0xFF, \ +0x00,0x20,0xA0,0x70,0x20,0x7B,0x01,0x28,0x01,0xD1,0x00,0x20,0x28,0x70,0x20, \ +0x7B,0x01,0x21,0x00,0x28,0x00,0xD1,0x05,0x21,0x38,0x1C,0x0C,0x1C,0xFD,0xF7, \ +0xDF,0xFC,0x21,0x06,0x09,0x0E,0x06,0x20,0x00,0xF0,0x68,0xF8,0xF0,0xBD,0x00, \ +0x00,0xBB,0x02,0x00,0x02,0xC0,0x09,0x00,0x02,0x80,0x02,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xBA,0x02,0x00,0x02,0xB4,0x00,0x00,0x02,0x00,0xB5,0x12,0x48,0x01, \ +0x78,0x0D,0x29,0x1A,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x16,0x07,0x16,0x07,0x07,0x07,0x0B,0x0E,0x16,0x16,0x07,0x07,0x07,0x00,0x0B, \ +0x49,0x01,0x20,0x08,0x70,0x00,0xBD,0xFF,0xF7,0x9F,0xFF,0x00,0xBD,0x08,0x49, \ +0x02,0x20,0x08,0x70,0x08,0x21,0x07,0x20,0x00,0xF0,0x3B,0xF8,0x00,0xBD,0x00, \ +0x78,0x02,0x21,0x00,0xF0,0x36,0xF8,0x00,0xBD,0x00,0x00,0xD0,0x09,0x00,0x02, \ +0xDD,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x00,0xB5,0x15,0x48,0x01,0x78,0x0D, \ +0x29,0x20,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1C,0x07, \ +0x1C,0x0A,0x0D,0x13,0x1C,0x1C,0x1C,0x1C,0x10,0x16,0x19,0x00,0xFF,0xF7,0x05, \ +0xFB,0x00,0xBD,0xFF,0xF7,0x84,0xFB,0x00,0xBD,0xFF,0xF7,0x69,0xFC,0x00,0xBD, \ +0x00,0xF0,0xD0,0xFD,0x00,0xBD,0xFF,0xF7,0xFB,0xFC,0x00,0xBD,0xFF,0xF7,0xD0, \ +0xFA,0x00,0xBD,0xFF,0xF7,0x85,0xFE,0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0, \ +0x04,0xF8,0x00,0xBD,0x00,0x00,0xD0,0x09,0x00,0x02,0x04,0x4A,0x10,0x70,0x04, \ +0x48,0x01,0x70,0x04,0x48,0x00,0x21,0x01,0x70,0x41,0x70,0xF7,0x46,0x00,0x00, \ +0x5C,0x02,0x00,0x02,0x5D,0x02,0x00,0x02,0xD0,0x09,0x00,0x02,0x04,0x48,0x00, \ +0x21,0xC2,0x1D,0x69,0x32,0x51,0x70,0x01,0x21,0x81,0x77,0x08,0x02,0xF7,0x46, \ +0x00,0x00,0x50,0x09,0x00,0x02,0x80,0xB5,0x0F,0x4F,0x01,0x28,0x03,0xD1,0xF9, \ +0xF7,0x1E,0xFE,0xF8,0x62,0x38,0x63,0x0C,0x48,0x01,0x21,0x80,0x89,0x0C,0x4A, \ +0xB8,0x87,0x39,0x72,0x79,0x72,0x39,0x73,0x00,0x20,0x38,0x74,0x38,0x60,0xB8, \ +0x72,0xF8,0x72,0x10,0x70,0xB9,0x73,0x79,0x60,0x06,0x49,0xCA,0x7A,0x06,0x49, \ +0xCA,0x70,0x88,0x70,0x08,0x70,0x80,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0xC4, \ +0x00,0x00,0x02,0xE8,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xD0,0x01,0x00,0x02, \ +0xB0,0xB5,0xF3,0x25,0x2D,0x05,0x07,0x1C,0xA8,0x68,0x06,0x20,0xE8,0x60,0x0C, \ +0x1C,0x28,0x69,0x80,0x08,0xFC,0xD3,0x0A,0x20,0xF9,0xF7,0xDA,0xFD,0xA8,0x68, \ +0x78,0x09,0x08,0x23,0x18,0x40,0x02,0x23,0x18,0x43,0xE8,0x60,0x28,0x69,0x80, \ +0x08,0xFC,0xD3,0x38,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3, \ +0xA8,0x68,0x20,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3,0xA8, \ +0x68,0xB0,0xBD,0xF0,0xB5,0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFD,0xF7,0x1D,0xF8, \ +0x00,0x26,0x00,0x2F,0x10,0xD9,0xFD,0xF7,0x6A,0xF8,0x40,0x08,0xFB,0xD2,0x28, \ +0x20,0xF9,0xF7,0xB1,0xFD,0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xC5,0xFF,0x28,0x20, \ +0xF9,0xF7,0xAA,0xFD,0x01,0x36,0xBE,0x42,0xEE,0xD3,0xFD,0xF7,0x25,0xF8,0x00, \ +0x20,0xF0,0xBD,0xF7,0xB5,0x85,0xB0,0x62,0x4D,0x60,0x4F,0x28,0x68,0x00,0x28, \ +0x75,0xD0,0x68,0x68,0x01,0x28,0x73,0xD1,0x00,0x21,0x0F,0x20,0x00,0x06,0x81, \ +0x80,0x81,0x81,0x01,0x88,0x00,0x89,0x28,0x68,0x04,0x28,0x1B,0xD1,0x5A,0x4C, \ +0xFC,0xF7,0xEE,0xFF,0x07,0x22,0x03,0x21,0x20,0x1C,0xFD,0xF7,0x50,0xF8,0xFD, \ +0xF7,0x05,0xF8,0x20,0x79,0x00,0x28,0x05,0xD1,0x60,0x79,0x00,0x28,0x02,0xD1, \ +0xA0,0x79,0x00,0x28,0x08,0xD0,0x00,0x21,0x21,0x71,0x61,0x71,0xA1,0x71,0x07, \ +0x22,0x03,0x20,0x4E,0x49,0xFF,0xF7,0xB2,0xFF,0x4A,0x49,0x4A,0x4D,0xCA,0x1D, \ +0x28,0x68,0x23,0x32,0x04,0x92,0xCA,0x1D,0x2E,0x32,0x03,0x92,0xCA,0x1D,0x4A, \ +0x32,0x02,0x92,0xCA,0x1D,0xCD,0x1D,0xCC,0x1D,0xCE,0x1D,0x0E,0x31,0x3C,0x32, \ +0x2D,0x36,0x27,0x34,0x15,0x35,0x0A,0x28,0x01,0x92,0x00,0x91,0x38,0xD1,0x0B, \ +0x22,0x04,0x20,0x04,0x99,0xFF,0xF7,0x94,0xFF,0xFF,0x22,0x06,0x20,0x21,0x1C, \ +0x01,0x32,0xFF,0xF7,0x8E,0xFF,0xFF,0x22,0x0E,0x20,0x29,0x1C,0x11,0x32,0xFF, \ +0xF7,0x88,0xFF,0xFF,0x22,0x01,0x20,0x31,0x1C,0x31,0x32,0xFF,0xF7,0x82,0xFF, \ +0xFF,0x22,0x0E,0x20,0x39,0x1C,0x41,0x32,0xFF,0xF7,0x7C,0xFF,0xFF,0x22,0x0E, \ +0x20,0x51,0x32,0x00,0x99,0xFF,0xF7,0x76,0xFF,0xFF,0x22,0x0E,0x20,0x71,0x32, \ +0x01,0x99,0xFF,0xF7,0x70,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x32,0x02,0x99,0xFF, \ +0xF7,0x6A,0xFF,0xFF,0x22,0x0E,0x20,0x61,0x32,0x03,0x99,0x01,0xE0,0x44,0xE0, \ +0x43,0xE0,0xFF,0xF7,0x61,0xFF,0x04,0xE0,0x06,0x98,0x05,0x9A,0x39,0x1C,0xFF, \ +0xF7,0x5B,0xFF,0xFC,0xF7,0x7C,0xFF,0x06,0x22,0xFF,0x21,0x20,0x1C,0x01,0x31, \ +0xFC,0xF7,0xDD,0xFF,0x04,0x22,0x0B,0x21,0x04,0x98,0xFC,0xF7,0xD8,0xFF,0x0E, \ +0x22,0xFF,0x21,0x28,0x1C,0x11,0x31,0xFC,0xF7,0xD2,0xFF,0x01,0x22,0xFF,0x21, \ +0x30,0x1C,0x31,0x31,0xFC,0xF7,0xCC,0xFF,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41, \ +0x31,0xFC,0xF7,0xC6,0xFF,0x0E,0x22,0xFF,0x21,0x51,0x31,0x00,0x98,0xFC,0xF7, \ +0xC0,0xFF,0x0E,0x22,0xFF,0x21,0x71,0x31,0x01,0x98,0xFC,0xF7,0xBA,0xFF,0x0E, \ +0x22,0xFF,0x21,0x21,0x31,0x02,0x98,0xFC,0xF7,0xB4,0xFF,0x0E,0x22,0xFF,0x21, \ +0x61,0x31,0x03,0x98,0xFC,0xF7,0xAE,0xFF,0xFC,0xF7,0x63,0xFF,0x03,0x4D,0x00, \ +0x21,0x29,0x60,0x08,0xB0,0xF0,0xBD,0x64,0x0A,0x00,0x02,0xA0,0x02,0x00,0x02, \ +0x00,0x72,0x01,0x02,0x04,0x72,0x01,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49,0x00, \ +0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x34,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0B, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03,0x2B, \ +0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x04, \ +0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3,0xE7, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21,0x01, \ +0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1,0x00, \ +0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20,0x20, \ +0x08,0x73,0x00,0xBD,0xFC,0xF7,0xA9,0xFC,0x04,0x49,0x00,0x20,0x08,0x80,0x03, \ +0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x48,0x02,0x00,0x02, \ +0x4A,0x02,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1,0x02, \ +0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08,0x06, \ +0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7,0x00, \ +0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08,0xD0, \ +0x80,0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0,0x00, \ +0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00,0x21, \ +0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7,0x25, \ +0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0x4A,0x02,0x00,0x02,0x48,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1,0x00, \ +0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09,0x0E, \ +0x01,0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80,0x01, \ +0xE0,0x05,0x49,0x08,0x80,0xFC,0xF7,0x47,0xFC,0x90,0xBD,0x27,0x73,0x90,0xBD, \ +0x70,0x03,0x00,0x0D,0x4A,0x02,0x00,0x02,0x48,0x02,0x00,0x02,0x80,0xB4,0x0C, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B, \ +0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05, \ +0x48,0x00,0x21,0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73, \ +0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x02, \ +0x28,0x03,0xD1,0x0A,0x29,0x26,0xD1,0x16,0x4B,0x24,0xE0,0x04,0x28,0x01,0xD1, \ +0x15,0x4B,0x20,0xE0,0x05,0x28,0x01,0xD1,0x14,0x4B,0x1C,0xE0,0x00,0x28,0x1A, \ +0xD1,0x0A,0x29,0x17,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x13,0x05,0x07,0x09,0x13,0x0B,0x0D,0x0F,0x13,0x11,0x0D,0x4B,0x0C,0xE0,0x0D, \ +0x4B,0x0A,0xE0,0x0D,0x4B,0x08,0xE0,0x0D,0x4B,0x06,0xE0,0x0D,0x4B,0x04,0xE0, \ +0x0D,0x4B,0x02,0xE0,0x0D,0x4B,0x00,0xE0,0x0D,0x4B,0x0D,0x49,0x98,0x18,0x08, \ +0x60,0x00,0xF0,0x5B,0xF8,0x00,0xBD,0x64,0x0A,0x00,0x02,0xD0,0x02,0x00,0x02, \ +0xE4,0x0A,0x00,0x02,0x08,0x01,0x00,0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x84,0x00,0x00,0x02,0x1C,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x18,0x01, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x58,0x02,0x00,0x02,0x80,0xB4,0x17,0x1C,0x00, \ +0x22,0x01,0x2F,0x17,0x4B,0x23,0xD1,0x02,0x28,0x10,0xD1,0x16,0x48,0x87,0x79, \ +0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29,0x07,0xD0,0x14,0x48,0x87,0x60,0x0C, \ +0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01,0x60,0x42,0x60,0x80,0xBC,0xF7,0x46, \ +0x06,0x28,0xFB,0xD1,0x0F,0x48,0x00,0x78,0x01,0x28,0xF7,0xD1,0xFF,0x20,0x0D, \ +0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0B,0x49,0x01,0x20,0x08,0x71,0x0B,0x49, \ +0x08,0x70,0xEC,0xE7,0x18,0x79,0x18,0x70,0x5A,0x70,0x9A,0x70,0x18,0x78,0x0A, \ +0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2,0xE7,0xD0,0x09,0x00,0x02,0x38,0x02, \ +0x00,0x02,0xA0,0x02,0x00,0x02,0x3A,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x3B, \ +0x01,0x00,0x02,0x9C,0x01,0x00,0x02,0x90,0xB4,0x1A,0x4A,0x80,0x20,0x10,0x73, \ +0x19,0x49,0x1A,0x48,0x0B,0x88,0x07,0x88,0xBB,0x42,0x11,0xD1,0x11,0x7B,0xC9, \ +0x09,0x09,0xD2,0x00,0x88,0x40,0x07,0x03,0xD0,0xE0,0x20,0x10,0x73,0x90,0xBC, \ +0xF7,0x46,0xD0,0x20,0x10,0x73,0xFA,0xE7,0x10,0x7B,0x20,0x23,0x18,0x43,0x10, \ +0x73,0xF5,0xE7,0x00,0x88,0x0B,0x88,0xC0,0x1A,0x08,0x28,0x00,0xD9,0x08,0x20, \ +0x0B,0x88,0x1B,0x18,0x0B,0x80,0x00,0x28,0x08,0xD0,0x0A,0x4B,0x0A,0x49,0x0F, \ +0x68,0x3C,0x78,0x01,0x37,0x0F,0x60,0x1C,0x73,0x01,0x38,0xF8,0xD1,0x10,0x7B, \ +0x10,0x23,0x18,0x43,0x10,0x73,0xDC,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x56, \ +0x02,0x00,0x02,0x54,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x58,0x02,0x00,0x02, \ +0x90,0xB5,0x20,0x24,0x00,0x28,0x0B,0x4F,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03, \ +0x2B,0x01,0xD0,0x3C,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28, \ +0x01,0xD1,0x3C,0x73,0x90,0xBD,0x04,0x48,0x00,0x79,0x00,0xF0,0x3A,0xF8,0x60, \ +0x20,0x38,0x73,0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x38,0x02,0x00,0x02, \ +0xB0,0xB4,0x13,0x48,0x01,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0xB0,0xBC,0xF7, \ +0x46,0x10,0x49,0x00,0x23,0x0D,0x78,0x02,0x22,0x0F,0x4C,0x10,0x4F,0x01,0x2D, \ +0x02,0xD0,0x0D,0x78,0x02,0x2D,0x02,0xD1,0x0A,0x70,0x3B,0x70,0x23,0x70,0x80, \ +0x21,0x01,0x73,0x0B,0x49,0x01,0x25,0x0D,0x73,0x0B,0x73,0x0A,0x73,0x0B,0x73, \ +0x3A,0x78,0x10,0x23,0x0A,0x73,0x22,0x78,0x0A,0x73,0x01,0x7B,0x19,0x43,0x01, \ +0x73,0xDE,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x60,0x02,0x00,0x02,0x5D,0x02, \ +0x00,0x02,0x5C,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x01,0x22,0x00, \ +0x23,0x02,0x28,0x10,0x49,0x12,0xD1,0x18,0x1C,0x10,0x4B,0x04,0x27,0x18,0x71, \ +0x0F,0x4B,0x1F,0x70,0x18,0x70,0x0F,0x4F,0x82,0x23,0x3B,0x71,0x0E,0x4B,0x18, \ +0x80,0x0E,0x4B,0x18,0x80,0x0E,0x4B,0x18,0x80,0x0A,0x70,0x80,0xBC,0xF7,0x46, \ +0x85,0x28,0xFB,0xD1,0x0C,0x48,0x03,0x80,0x0C,0x48,0x02,0x72,0x08,0x78,0x01, \ +0x28,0xF4,0xD1,0x02,0x20,0x08,0x70,0xF1,0xE7,0x00,0x00,0x61,0x02,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xB0,0x03,0x00,0x0D,0x4E,0x02,0x00, \ +0x02,0x4C,0x02,0x00,0x02,0x48,0x02,0x00,0x02,0x4A,0x02,0x00,0x02,0xD4,0x51, \ +0x00,0x00,0x90,0xB5,0x0F,0x1C,0x19,0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98, \ +0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xAC,0xFD, \ +0x90,0xBD,0x24,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0xC2,0xFD,0x90,0xBD,0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xD3,0xFD,0x90,0xBD,0xFF, \ +0x23,0x0C,0x33,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C, \ +0xFF,0xF7,0xE6,0xFD,0x90,0xBD,0x41,0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13, \ +0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xF7,0xFD,0x90,0xBD,0x0F,0x4B, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x29, \ +0xFE,0x90,0xBD,0x01,0x23,0xDB,0x03,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x40,0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08, \ +0x73,0x90,0xBD,0x00,0x00,0x30,0x02,0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81, \ +0x00,0x00,0x03,0x02,0x00,0x00,0x70,0x03,0x00,0x0D,0x10,0x49,0x09,0x78,0x01, \ +0x29,0x1B,0xD1,0x40,0x08,0x19,0xD3,0x0D,0x20,0x00,0x06,0x01,0x78,0x20,0x23, \ +0x19,0x43,0x01,0x70,0x0B,0x48,0x00,0x68,0xC1,0x43,0x0B,0x48,0xC2,0x69,0x11, \ +0x40,0xC1,0x61,0x00,0x20,0x07,0x21,0x49,0x06,0x7D,0x22,0x12,0x01,0x88,0x61, \ +0x01,0x30,0x90,0x42,0xFC,0xD3,0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xF7, \ +0x46,0x00,0x00,0x3A,0x01,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04, \ +0xF0,0xB5,0xC0,0x20,0xFD,0xF7,0x4A,0xF8,0x22,0x4C,0x23,0x4F,0x21,0x7A,0x23, \ +0x4A,0x39,0x70,0x11,0x79,0x79,0x70,0x21,0x7B,0xF9,0x70,0x11,0x7B,0xB9,0x70, \ +0x0D,0x21,0x09,0x06,0x8B,0x88,0x07,0x25,0x6D,0x06,0xBB,0x80,0xEE,0x69,0x01, \ +0x23,0x5B,0x02,0x33,0x43,0xEB,0x61,0x00,0x23,0x01,0x33,0x32,0x2B,0xFC,0xD3, \ +0xEE,0x69,0x18,0x4B,0x33,0x40,0xEB,0x61,0x00,0x23,0x01,0x33,0x64,0x2B,0xFC, \ +0xD3,0x15,0x4D,0x00,0x23,0x2B,0x70,0x15,0x4B,0x80,0x25,0x1D,0x73,0x01,0x25, \ +0x1D,0x72,0x82,0x25,0x1D,0x71,0x07,0x25,0x1D,0x70,0x11,0x4B,0x05,0x26,0x1E, \ +0x73,0x86,0x26,0x1E,0x72,0x1D,0x71,0x24,0x23,0x23,0x71,0x3B,0x78,0x23,0x72, \ +0xFB,0x78,0x23,0x73,0x7B,0x78,0x13,0x71,0xBB,0x78,0x13,0x73,0x0A,0x4A,0x0A, \ +0x81,0xBA,0x88,0x8A,0x80,0xFD,0xF7,0x06,0xF8,0xF0,0xBD,0x00,0x00,0xC0,0x03, \ +0x00,0x0D,0xC8,0x02,0x00,0x02,0xE0,0x03,0x00,0x0D,0xFF,0xFD,0x00,0x00,0x10, \ +0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xFF,0x0F,0x00,0x00, \ +0x80,0xB5,0x0B,0x49,0x00,0x20,0x08,0x60,0x88,0x80,0x88,0x71,0x09,0x4F,0xC8, \ +0x71,0x38,0x68,0x01,0x7A,0x10,0x29,0x02,0xD1,0xFB,0xF7,0x1F,0xFD,0x38,0x60, \ +0x38,0x68,0x01,0x7A,0x40,0x29,0x02,0xD1,0xFB,0xF7,0x18,0xFD,0x38,0x60,0x80, \ +0xBD,0x00,0x00,0xD4,0x51,0x00,0x00,0x64,0x02,0x00,0x02,0xF0,0xB5,0x23,0x4E, \ +0x04,0x1C,0x0F,0x1C,0x13,0x1C,0x20,0x22,0xB5,0x78,0xF1,0x78,0x03,0x2B,0x20, \ +0x48,0x01,0xD0,0x02,0x73,0xF0,0xBD,0x02,0x2D,0x09,0xD1,0x01,0x29,0x01,0xD3, \ +0x0D,0x29,0x01,0xD9,0x02,0x73,0xF0,0xBD,0x08,0x29,0x01,0xD1,0x02,0x73,0xF0, \ +0xBD,0x00,0x2F,0x09,0xD1,0xFC,0xF7,0xB3,0xF9,0x06,0x2D,0x07,0xD1,0xF9,0xF7, \ +0x76,0xFA,0x15,0x48,0x00,0x21,0x01,0x70,0x01,0xE0,0x00,0x21,0x01,0x73,0x13, \ +0x48,0x02,0x2D,0x07,0xD1,0x00,0x2C,0x0E,0xD1,0x11,0x49,0x01,0x60,0x11,0x48, \ +0x00,0x21,0x01,0x70,0x08,0xE0,0x01,0x2D,0xD7,0xD0,0x0F,0x49,0x01,0x60,0x0F, \ +0x48,0x00,0x21,0x01,0x70,0x0F,0x48,0x01,0x70,0x0F,0x48,0x31,0x1C,0x07,0x80, \ +0x0E,0x48,0x00,0x27,0x07,0x80,0x0E,0x48,0x08,0x22,0xFD,0xF7,0xD3,0xFC,0x03, \ +0x48,0x07,0x70,0xF0,0xBD,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x60,0x02, \ +0x00,0x02,0x58,0x02,0x00,0x02,0x64,0x0A,0x00,0x02,0x9C,0x01,0x00,0x02,0xD4, \ +0x09,0x00,0x02,0x5D,0x02,0x00,0x02,0x5C,0x02,0x00,0x02,0x54,0x02,0x00,0x02, \ +0x56,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0xB0,0xB5,0x11,0x4F,0x14,0x1C,0xBB, \ +0x78,0xFF,0x78,0x03,0x2C,0x0F,0x4A,0x02,0xD0,0x20,0x20,0x10,0x73,0xB0,0xBD, \ +0x0E,0x4D,0x00,0x24,0x2C,0x80,0x0D,0x4C,0x01,0x2B,0x21,0x80,0x0A,0xD1,0x80, \ +0x20,0x10,0x73,0x0B,0x48,0x0C,0x49,0x00,0x78,0x10,0x23,0x08,0x73,0x10,0x7B, \ +0x18,0x43,0x10,0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C,0x39,0x1C,0xFF,0xF7,0x2E, \ +0xFD,0xB0,0xBD,0x00,0x00,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x56,0x02, \ +0x00,0x02,0x54,0x02,0x00,0x02,0x53,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xB0, \ +0xB5,0x0F,0x1C,0x18,0x4D,0x19,0x1C,0x14,0x1C,0xA8,0x42,0x02,0xD0,0x17,0x4B, \ +0x00,0x22,0x1A,0x70,0x16,0x4A,0xA8,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38, \ +0x1C,0x21,0x1C,0xFF,0xF7,0xD9,0xFD,0xB0,0xBD,0x12,0x4B,0x98,0x42,0x04,0xD1, \ +0x12,0x68,0x20,0x1C,0xFF,0xF7,0x55,0xFF,0xB0,0xBD,0x0F,0x4B,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xE5,0xFD,0xB0,0xBD, \ +0x0B,0x4B,0x98,0x42,0x04,0xD1,0x12,0x68,0x20,0x1C,0xFF,0xF7,0xA3,0xFF,0xB0, \ +0xBD,0x0B,0x1C,0x39,0x1C,0x22,0x1C,0xFF,0xF7,0x41,0xFE,0xB0,0xBD,0x01,0x02, \ +0x00,0x00,0x61,0x02,0x00,0x02,0x30,0x02,0x00,0x02,0x0E,0x40,0x00,0x00,0x22, \ +0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0xF0,0xB5,0x22,0x4B,0xE0,0x25,0x01,0x27, \ +0x98,0x42,0x1D,0x49,0x1D,0x4C,0x1E,0x4A,0x08,0xD1,0x90,0x78,0x01,0x28,0x01, \ +0xD1,0x0D,0x73,0x01,0xE0,0xFF,0xF7,0x5C,0xFD,0x27,0x71,0xF0,0xBD,0x1A,0x4B, \ +0x20,0x26,0x98,0x42,0x21,0xD1,0x0E,0x73,0x19,0x48,0x27,0x71,0x00,0x78,0x00, \ +0x28,0xF4,0xD1,0x90,0x78,0x02,0x28,0x02,0xD1,0xD0,0x78,0x08,0x28,0xEE,0xD0, \ +0x90,0x78,0x01,0x28,0x0C,0xD1,0x13,0x49,0x00,0x20,0x08,0x70,0x12,0x48,0x00, \ +0x78,0x02,0x28,0x02,0xD1,0x11,0x48,0x07,0x70,0xF0,0xBD,0x11,0x48,0x07,0x70, \ +0xF0,0xBD,0xD1,0x78,0x90,0x78,0x01,0x22,0xFF,0xF7,0xF3,0xFC,0xF0,0xBD,0x10, \ +0x78,0x00,0x0A,0x01,0xD2,0x0E,0x73,0x00,0xE0,0x0D,0x73,0x27,0x71,0xF0,0xBD, \ +0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x38,0x02,0x00,0x02,0x33,0xC1,0x00, \ +0x00,0x0E,0x40,0x00,0x00,0x60,0x02,0x00,0x02,0x9C,0x01,0x00,0x02,0x53,0x02, \ +0x00,0x02,0x5F,0x02,0x00,0x02,0xE5,0x01,0x00,0x02,0x80,0xB5,0x00,0x20,0x1C, \ +0x49,0x0F,0x27,0x3F,0x06,0x08,0x70,0xB8,0x80,0x39,0x88,0xB8,0x81,0x1A,0x4A, \ +0x39,0x89,0xD1,0x69,0xD1,0x04,0xCB,0x68,0xC9,0x6B,0x18,0x49,0x09,0x68,0x90, \ +0x61,0x17,0x49,0x02,0x20,0xC8,0x74,0x17,0x48,0x01,0x7A,0x0C,0x30,0x08,0x29, \ +0x19,0xD2,0x01,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x15,0x03,0x06,0x15,0x09, \ +0x0C,0x0F,0x12,0x00,0xF0,0xFA,0xFB,0x80,0xBD,0x00,0xF0,0x7B,0xF9,0x80,0xBD, \ +0x00,0xF0,0x10,0xFA,0x80,0xBD,0x00,0xF0,0x1B,0xF8,0x80,0xBD,0x00,0xF0,0xC4, \ +0xF8,0x80,0xBD,0x00,0xF0,0x73,0xFA,0x80,0xBD,0x02,0x21,0x0A,0x20,0xFF,0xF7, \ +0x0A,0xFA,0x06,0x48,0xB8,0x80,0x80,0xBD,0x00,0x00,0x9C,0x01,0x00,0x02,0x80, \ +0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x50,0x09,0x00,0x02,0xD0,0x09,0x00,0x02, \ +0x08,0x08,0x00,0x00,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38, \ +0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x48,0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFF,0xF7,0xE9,0xF9,0xAC,0x80,0xF0,0xBD,0x44,0x48,0x90, \ +0x21,0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0, \ +0x40,0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41, \ +0x7C,0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75, \ +0x79,0x79,0x41,0x75,0x38,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41, \ +0x77,0xFF,0x20,0xF5,0x30,0x35,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0xE1,0xFF,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0xDE,0xF9,0x38,0x78,0x00, \ +0x21,0x01,0xF0,0x35,0xFF,0x00,0x21,0x08,0x20,0xF9,0xF7,0x71,0xF8,0x00,0x21, \ +0x09,0x20,0xF9,0xF7,0x6D,0xF8,0x00,0x21,0x0A,0x20,0xF9,0xF7,0x69,0xF8,0x20, \ +0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7,0xD7,0xFF, \ +0x0A,0x20,0xF8,0xF7,0xBE,0xFF,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x19, \ +0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7, \ +0x83,0xF9,0xAC,0x80,0xF0,0xBD,0x00,0x22,0xFF,0x21,0x7D,0x20,0xC0,0x00,0xAC, \ +0x80,0x00,0xF0,0xA6,0xFA,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x10,0x48, \ +0x11,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0,0x74,0xB8,0x60,0x00,0x03,0x78, \ +0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xA8,0x80,0x0A,0x20, \ +0xFF,0xF7,0x64,0xF9,0xF0,0xBD,0x00,0x00,0xD8,0x02,0x00,0x02,0x08,0x08,0x00, \ +0x00,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09,0x00,0x02,0x88, \ +0x88,0x00,0x00,0xF0,0xB5,0x4C,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78, \ +0x0F,0x25,0x2D,0x06,0x0E,0x28,0x49,0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFF,0xF7,0x3D,0xF9,0xAC,0x80,0xF0,0xBD,0x45,0x48,0x90,0x21, \ +0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41,0x7C, \ +0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75,0x79, \ +0x79,0x41,0x75,0x39,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77, \ +0xFF,0x20,0xF5,0x30,0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01, \ +0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43, \ +0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x35,0xFF,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0x32,0xF9,0x38,0x78,0x00,0x21, \ +0x01,0xF0,0x89,0xFE,0x0B,0x21,0x08,0x20,0xF8,0xF7,0xC5,0xFF,0xB7,0x21,0x09, \ +0x20,0xF8,0xF7,0xC1,0xFF,0x00,0x21,0x0A,0x20,0xF8,0xF7,0xBD,0xFF,0x14,0x20, \ +0xF8,0xF7,0x1A,0xFF,0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00, \ +0x20,0xF8,0xF7,0x28,0xFF,0x0A,0x20,0xF8,0xF7,0x0F,0xFF,0x01,0x20,0x80,0x06, \ +0x46,0x61,0xC0,0x68,0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06, \ +0x21,0x0A,0x20,0xFF,0xF7,0xD4,0xF8,0xAC,0x80,0xF0,0xBD,0x00,0x22,0x55,0x21, \ +0x7D,0x20,0xC0,0x00,0xAC,0x80,0x00,0xF0,0xF7,0xF9,0x11,0x48,0x01,0x21,0x89, \ +0x06,0x88,0x63,0x10,0x48,0x10,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0,0x74, \ +0xB8,0x60,0x00,0x03,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01, \ +0x21,0xA8,0x80,0x0A,0x20,0xFF,0xF7,0xB5,0xF8,0xF0,0xBD,0xD8,0x02,0x00,0x02, \ +0x08,0x08,0x00,0x00,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00, \ +0x04,0x04,0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09, \ +0x00,0x02,0x88,0x88,0x00,0x00,0xF0,0xB5,0x42,0x4C,0xC0,0xC8,0x21,0x1C,0xC0, \ +0xC1,0xA0,0x78,0x40,0x4D,0x80,0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28, \ +0x05,0xD0,0x03,0x21,0x0A,0x20,0xFF,0xF7,0x8F,0xF8,0xBD,0x80,0xF0,0xBD,0x20, \ +0x78,0x0E,0x28,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFF,0xF7, \ +0x84,0xF8,0xBD,0x80,0xF0,0xBD,0x08,0x21,0x0A,0x20,0xFF,0xF7,0x7E,0xF8,0x33, \ +0x48,0x00,0x26,0x06,0x70,0x33,0x48,0x06,0x60,0x46,0x60,0x00,0x20,0xF8,0xF7, \ +0xB9,0xFE,0xA1,0x78,0x30,0x48,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x21,0x79,0x01,0x75,0x61,0x79,0x41,0x75,0x2C,0x49,0x09,0x78, \ +0x01,0x29,0x01,0xD1,0xE1,0x79,0x41,0x77,0xFF,0x20,0xF5,0x30,0x29,0x49,0x49, \ +0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21, \ +0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8, \ +0xF7,0x7D,0xFE,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61, \ +0x02,0xF0,0x7A,0xF8,0x20,0x78,0x00,0x21,0x01,0xF0,0xD1,0xFD,0x00,0x28,0x05, \ +0xD1,0x05,0x21,0x0A,0x20,0xFF,0xF7,0x3D,0xF8,0xBD,0x80,0xF0,0xBD,0x14,0x20, \ +0xF8,0xF7,0x66,0xFE,0x00,0x20,0xF8,0xF7,0x79,0xFE,0x13,0x48,0x41,0x68,0xC9, \ +0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7,0x2D,0xF8,0xBD,0x80,0xF0,0xBD, \ +0x86,0x60,0x20,0x20,0x41,0x05,0x48,0x61,0x0D,0x48,0x01,0x21,0x01,0x73,0xC1, \ +0x74,0xB8,0x88,0x0B,0x4B,0x18,0x43,0xB8,0x80,0x0A,0x20,0xFF,0xF7,0x1C,0xF8, \ +0xF0,0xBD,0x00,0x00,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00,0x9C,0x01,0x00, \ +0x02,0xD0,0x02,0x00,0x02,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x50,0x09,0x00,0x02,0x48,0x48,0x00,0x00,0xF0,0xB5,0x2F,0x4F,0x60, \ +0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x2C,0x4C, \ +0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFE,0xF7,0xF7,0xFF,0xAC, \ +0x80,0xF0,0xBD,0x28,0x48,0x00,0x26,0x46,0x70,0x41,0x7C,0xFD,0x23,0x19,0x40, \ +0x41,0x74,0x25,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77,0xFF, \ +0x20,0xF5,0x30,0x22,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38, \ +0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8, \ +0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x02,0xFE,0x07,0x21,0x49,0x06,0xC8,0x69, \ +0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0xFF,0xFF,0xAE,0x80,0x38,0x78,0x00, \ +0x21,0x01,0xF0,0x55,0xFD,0x00,0x28,0x02,0xD1,0x13,0x49,0x05,0x20,0x48,0x70, \ +0x14,0x20,0xF8,0xF7,0xED,0xFD,0x00,0x20,0xF8,0xF7,0x00,0xFE,0x0D,0x48,0x41, \ +0x68,0xC9,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7,0xB4,0xFF,0xAC,0x80, \ +0xF0,0xBD,0x86,0x60,0x01,0x20,0x80,0x06,0x46,0x61,0x01,0x21,0x0A,0x20,0xAC, \ +0x80,0xFE,0xF7,0xA9,0xFF,0xF0,0xBD,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xD0,0x09,0x00, \ +0x02,0xF0,0xB5,0x01,0x1C,0xB8,0xC9,0x58,0x4E,0x30,0x1C,0xB8,0xC0,0x30,0x7A, \ +0x0F,0x24,0x24,0x06,0x0E,0x28,0x55,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFE,0xF7,0x8A,0xFF,0xA7,0x80,0xF0,0xBD,0x51,0x4D,0xA8,0x70, \ +0x70,0x78,0x68,0x70,0x30,0x78,0x28,0x70,0x70,0x88,0xA8,0x60,0x70,0x68,0xE8, \ +0x60,0x00,0x20,0xE8,0x70,0x68,0x60,0x28,0x61,0xF0,0x68,0x68,0x61,0x00,0x20, \ +0xF8,0xF7,0xB9,0xFD,0x6A,0x78,0x40,0x21,0x48,0x48,0x00,0x2A,0x16,0xD0,0x01, \ +0x2A,0x17,0xD0,0x02,0x2A,0x18,0xD0,0x03,0x2A,0x01,0xD1,0x60,0x22,0x42,0x70, \ +0x42,0x7C,0x92,0x07,0x92,0x0F,0x42,0x74,0xB3,0x7A,0x42,0x7C,0x9B,0x00,0x1A, \ +0x43,0x42,0x74,0x72,0x7A,0x01,0x2A,0x0A,0xD1,0x00,0x22,0xC2,0x70,0x08,0xE0, \ +0x00,0x22,0x42,0x70,0xED,0xE7,0x20,0x22,0x42,0x70,0xEA,0xE7,0x41,0x70,0xE8, \ +0xE7,0xC1,0x70,0x37,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF1,0x7A,0x41,0x77, \ +0xFF,0x20,0x35,0x4E,0xF5,0x30,0x71,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01, \ +0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43, \ +0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x64,0xFD,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0x61,0xFF,0xA8,0x78,0x00,0x21, \ +0x01,0xF0,0xB8,0xFC,0x25,0x49,0xC8,0x69,0x8B,0x01,0x18,0x43,0xC8,0x61,0x14, \ +0x20,0xF8,0xF7,0x50,0xFD,0x00,0x20,0xF8,0xF7,0x63,0xFD,0x0A,0x20,0xF8,0xF7, \ +0x4A,0xFD,0x00,0x26,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x1C,0x49,0x1C, \ +0x48,0x48,0x61,0x48,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7, \ +0x0D,0xFF,0xA7,0x80,0xF0,0xBD,0xE8,0x68,0x00,0xF0,0xAC,0xF8,0x68,0x60,0xE8, \ +0x78,0xF8,0xF7,0x8A,0xFD,0x68,0x68,0xF9,0xF7,0xB3,0xFF,0xA7,0x80,0x29,0x78, \ +0xE8,0x68,0x00,0x22,0x00,0xF0,0x27,0xF8,0x10,0x49,0xA6,0x80,0x03,0x20,0xC8, \ +0x74,0x0C,0x49,0x22,0x20,0x88,0x60,0x08,0x05,0x41,0x6A,0x0C,0x4B,0xC9,0x18, \ +0x01,0x62,0x0C,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFE,0xF7,0xE9,0xFE,0xF0, \ +0xBD,0xEC,0x0A,0x00,0x02,0x08,0x08,0x00,0x00,0xD4,0x0A,0x00,0x02,0xD8,0x07, \ +0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00,0x00,0x50, \ +0x09,0x00,0x02,0x10,0x27,0x00,0x00,0x88,0x88,0x00,0x00,0xF0,0xB5,0x07,0x1C, \ +0x00,0x2A,0x0B,0xD1,0x00,0x20,0x00,0x2F,0x14,0x4A,0x06,0xD9,0x09,0x06,0x09, \ +0x0E,0x11,0x70,0x01,0x32,0x01,0x30,0xB8,0x42,0xFA,0xD3,0xF0,0xBD,0xF8,0xF7, \ +0x02,0xFD,0xFD,0xF7,0x10,0xF9,0xFD,0xF7,0xF0,0xF8,0xBC,0x08,0x26,0x1C,0x0B, \ +0x4D,0x04,0xD0,0xFD,0xF7,0xEA,0xF8,0x01,0xC5,0x01,0x3C,0xFA,0xD1,0xB0,0x00, \ +0x3F,0x1A,0xFD,0xF7,0xE3,0xF8,0x69,0x1C,0x03,0x2F,0x28,0x70,0x02,0xD1,0x00, \ +0x0C,0x08,0x70,0xF0,0xBD,0x02,0x2F,0xE2,0xD1,0x00,0x0A,0x08,0x70,0xF0,0xBD, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x88,0xB4,0x01,0x20,0x80,0x06,0xC1,0x6B,0x00, \ +0xAB,0x19,0x80,0x1A,0x49,0x1B,0x4A,0xC9,0x7C,0x1B,0x4F,0x03,0x29,0x21,0xD1, \ +0x00,0xA9,0x09,0x88,0x20,0x23,0x0B,0x40,0x18,0x49,0x0C,0xD0,0x87,0x63,0xCF, \ +0x68,0x03,0x23,0x1B,0x03,0x3B,0x43,0x43,0x63,0x4B,0x78,0x15,0x4F,0xFF,0x5C, \ +0x11,0x23,0x9B,0x02,0x3B,0x43,0x53,0x60,0x00,0xAA,0x12,0x88,0x92,0x08,0x16, \ +0xD3,0x0A,0x69,0x01,0x32,0x0A,0x61,0x4B,0x69,0x9A,0x42,0x10,0xD2,0x89,0x68, \ +0x42,0x6A,0x89,0x18,0x01,0x62,0x0B,0xE0,0x04,0x29,0x09,0xD1,0x00,0xA9,0x09, \ +0x88,0xC9,0x08,0x05,0xD3,0x87,0x63,0x64,0x21,0x41,0x63,0x01,0x20,0x80,0x03, \ +0x50,0x60,0x88,0xBC,0xF7,0x46,0x50,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x00, \ +0x72,0x01,0x02,0xD4,0x0A,0x00,0x02,0xC8,0x01,0x00,0x02,0xF0,0xB5,0x04,0x30, \ +0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0,0x02, \ +0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFD,0xF7,0x41,0xF8, \ +0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFD,0xF7,0x3C,0xF8,0x07,0x1C,0x00,0x2D,0x18, \ +0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFD,0xF7,0x2C,0xF8,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0xFD,0xF7,0x27,0xF8,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD,0xD4, \ +0x0A,0x00,0x02,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78, \ +0x0F,0x26,0x36,0x06,0x0E,0x28,0x48,0x4D,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFE,0xF7,0x13,0xFE,0xB5,0x80,0xF0,0xBD,0x44,0x48,0x90,0x21, \ +0x41,0x70,0xB9,0x78,0x00,0x24,0x01,0x29,0x01,0xD1,0xC4,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41,0x7C, \ +0x92,0x00,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75,0x79,0x79,0x41,0x75,0x39, \ +0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77,0xFF,0x20,0xF5,0x30, \ +0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7, \ +0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20, \ +0x2D,0x30,0xF8,0xF7,0x0D,0xFC,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98, \ +0x43,0xC8,0x61,0x01,0xF0,0x0A,0xFE,0x38,0x78,0x00,0x21,0x01,0xF0,0x61,0xFB, \ +0x0B,0x21,0x08,0x20,0xF8,0xF7,0x9D,0xFC,0xB7,0x21,0x09,0x20,0xF8,0xF7,0x99, \ +0xFC,0x00,0x21,0x0A,0x20,0xF8,0xF7,0x95,0xFC,0x14,0x20,0xF8,0xF7,0xF2,0xFB, \ +0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7,0x00, \ +0xFC,0x0A,0x20,0xF8,0xF7,0xE7,0xFB,0x01,0x20,0x80,0x06,0x44,0x61,0xC0,0x68, \ +0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE, \ +0xF7,0xAC,0xFD,0xB5,0x80,0xF0,0xBD,0x01,0x22,0x55,0x21,0x7D,0x20,0xC0,0x00, \ +0xB5,0x80,0xFF,0xF7,0xCF,0xFE,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x10, \ +0x48,0x10,0x4A,0x48,0x63,0xB4,0x80,0x04,0x20,0xB8,0x60,0xD0,0x74,0x00,0x03, \ +0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xB0,0x80,0x0A, \ +0x20,0xFE,0xF7,0x8D,0xFD,0xF0,0xBD,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00, \ +0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09,0x00,0x02,0x88,0x88, \ +0x00,0x00,0x80,0xB5,0x15,0x49,0x01,0x27,0xC9,0x7C,0x01,0x29,0x13,0xD1,0x13, \ +0x4B,0x18,0x40,0x0E,0xD0,0x88,0x06,0xC0,0x68,0x81,0x09,0x0A,0xD3,0x04,0x21, \ +0x01,0x40,0x10,0x48,0x03,0xD0,0x41,0x68,0x01,0x31,0x41,0x60,0x02,0xE0,0x01, \ +0x68,0x01,0x31,0x01,0x60,0x38,0x1C,0x80,0xBD,0x02,0x29,0x01,0xD1,0x38,0x1C, \ +0x80,0xBD,0x03,0x29,0x01,0xD0,0x04,0x29,0x06,0xD1,0x07,0x4B,0x18,0x40,0x01, \ +0xD0,0xFF,0xF7,0xAF,0xFE,0x38,0x1C,0x80,0xBD,0x00,0x20,0x80,0xBD,0x00,0x00, \ +0x50,0x09,0x00,0x02,0x40,0x40,0x00,0x00,0xD0,0x02,0x00,0x02,0x80,0x80,0x00, \ +0x00,0xFF,0xB5,0x84,0xB0,0x00,0x20,0x00,0x24,0x00,0x26,0x00,0x27,0x00,0x25, \ +0x03,0x90,0x02,0x90,0x01,0x90,0x68,0x46,0x04,0x22,0x5A,0x49,0xFC,0xF7,0xE7, \ +0xFE,0x05,0x99,0x00,0x20,0x00,0x29,0x1B,0xDD,0x04,0x99,0x80,0x23,0x09,0x5C, \ +0x0A,0x1C,0x9A,0x43,0x16,0x2A,0x02,0xD1,0x00,0xAB,0xD9,0x70,0x0D,0xE0,0x0B, \ +0x2A,0x02,0xD1,0x00,0xAB,0x99,0x70,0x08,0xE0,0x04,0x2A,0x02,0xD1,0x00,0xAB, \ +0x59,0x70,0x03,0xE0,0x02,0x2A,0x01,0xD1,0x00,0xAB,0x19,0x70,0x05,0x99,0x01, \ +0x30,0x88,0x42,0xE3,0xDB,0x00,0x20,0x69,0x46,0x09,0x5C,0x00,0x29,0x0D,0xD0, \ +0x09,0x0A,0x04,0xD3,0x00,0x2E,0x00,0xD1,0x07,0x1C,0x01,0x26,0x04,0x1C,0x01, \ +0x99,0x02,0x90,0x00,0x29,0x02,0xD1,0x01,0x21,0x01,0x91,0x05,0x1C,0x01,0x30, \ +0x04,0x28,0xEA,0xDB,0x01,0x99,0x00,0x20,0x00,0x29,0x01,0xD1,0x08,0xB0,0xF0, \ +0xBD,0x00,0x2E,0x01,0xD1,0x2C,0x1C,0x2F,0x1C,0x3A,0x49,0x00,0x22,0x8B,0x18, \ +0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x03,0x92,0x01,0x32,0x04,0x2A,0xF7,0xDB,0x06, \ +0x9B,0x01,0x26,0x0E,0x2B,0x34,0x4A,0x03,0xD1,0x34,0x4B,0x1B,0x78,0x01,0x2B, \ +0x0A,0xD1,0x03,0x98,0x84,0x42,0x02,0xDD,0x03,0x98,0x90,0x72,0x00,0xE0,0x94, \ +0x72,0x02,0x98,0xD0,0x72,0xD7,0x71,0x42,0xE0,0x2D,0x4B,0x1B,0x78,0x00,0x2B, \ +0x3E,0xD1,0x01,0x2D,0x10,0xD9,0xD0,0x71,0x96,0x72,0xD6,0x72,0x07,0x9B,0x00, \ +0x27,0x01,0x2B,0x35,0xD1,0x82,0x20,0x00,0xAB,0x18,0x70,0x84,0x20,0x58,0x70, \ +0x0B,0x20,0x98,0x70,0x16,0x20,0xD8,0x70,0x2B,0xE0,0x01,0x2C,0x0D,0xDD,0x00, \ +0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0x96,0x72,0x08,0xE0,0x00,0xAC,0x24,0x78, \ +0x23,0x0A,0x01,0xD3,0x90,0x72,0x02,0xE0,0x95,0x72,0x00,0xE0,0x94,0x72,0x01, \ +0x2F,0x0D,0xD9,0x00,0xAC,0x24,0x78,0x23,0x0A,0x01,0xD3,0xD0,0x71,0x08,0xE0, \ +0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0xD6,0x71,0x02,0xE0,0xD5,0x71,0x00, \ +0xE0,0xD7,0x71,0x02,0x9B,0x00,0x2B,0x05,0xDD,0x00,0xAB,0x5B,0x78,0x00,0x2B, \ +0x01,0xD0,0xD6,0x72,0x00,0xE0,0xD0,0x72,0x00,0x20,0x6B,0x46,0x1B,0x5C,0x0C, \ +0x18,0x01,0x30,0x04,0x28,0x23,0x74,0xF8,0xDB,0xC8,0x19,0x01,0x7C,0x80,0x23, \ +0x19,0x43,0x01,0x74,0xD0,0x7A,0x05,0x49,0xC8,0x70,0x30,0x1C,0x86,0xE7,0x38, \ +0x9C,0x00,0x00,0x00,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xB9,0x02,0x00,0x02, \ +0xD0,0x01,0x00,0x02,0x90,0xB4,0x47,0x78,0x00,0x22,0x00,0x23,0x00,0x2F,0x14, \ +0xDD,0xC7,0x18,0xBC,0x78,0x67,0x06,0x7F,0x0E,0x02,0x2F,0x05,0xD0,0x04,0x2F, \ +0x03,0xD0,0x0B,0x2F,0x01,0xD0,0x16,0x2F,0x04,0xD1,0x04,0x2A,0x02,0xDA,0x17, \ +0x1C,0xCC,0x55,0x01,0x32,0x47,0x78,0x01,0x33,0x9F,0x42,0xEA,0xDC,0x90,0xBC, \ +0x10,0x1C,0xF7,0x46,0xF1,0xB5,0x85,0xB0,0x00,0x20,0x01,0x90,0x68,0x46,0x04, \ +0x22,0x75,0x49,0xFC,0xF7,0x03,0xFE,0x75,0x4E,0x04,0x24,0x30,0x68,0x45,0x68, \ +0x80,0x89,0x2F,0x28,0x02,0xDA,0x00,0x20,0x06,0xB0,0xF0,0xBD,0x05,0x98,0x70, \ +0x49,0x01,0x28,0x04,0x91,0x09,0xD1,0x06,0x22,0xE8,0x1D,0x09,0x30,0x04,0x99, \ +0xFC,0xF7,0xD0,0xFD,0x00,0x28,0x01,0xD0,0x00,0x20,0xEE,0xE7,0x20,0x20,0xE9, \ +0x1D,0x19,0x31,0x28,0x5C,0x49,0x78,0x09,0x02,0x08,0x43,0x01,0x04,0x09,0x0C, \ +0x02,0x91,0x14,0x29,0x04,0xDB,0x7D,0x23,0x02,0x99,0xDB,0x00,0x99,0x42,0x01, \ +0xDD,0x00,0x20,0xDB,0xE7,0x22,0x20,0x28,0x5C,0x80,0x08,0x01,0xD2,0x00,0x20, \ +0xD5,0xE7,0x30,0x68,0x24,0x27,0x80,0x89,0x04,0x38,0x24,0x28,0x45,0xDD,0x5B, \ +0x49,0x03,0x91,0xE8,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x20,0xD0,0x03,0x28, \ +0x39,0xD1,0xE8,0x19,0x41,0x78,0x01,0x29,0x27,0xD0,0x00,0x20,0xC0,0xE7,0xEE, \ +0x19,0x70,0x78,0x00,0x28,0x00,0xD1,0xBB,0xE7,0x52,0x49,0x4A,0x79,0x82,0x42, \ +0x01,0xD0,0x00,0x20,0xB5,0xE7,0x03,0x99,0xB0,0x1C,0xFC,0xF7,0x8F,0xFD,0x00, \ +0x28,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x01,0x20, \ +0x01,0x90,0x14,0xE0,0xE8,0x19,0x69,0x46,0x06,0x1C,0xFF,0xF7,0x74,0xFF,0x04, \ +0x1C,0x01,0xD1,0x00,0x20,0x9E,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0, \ +0x42,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0x93,0xE7,0x03, \ +0x37,0x3A,0x4E,0x30,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xBE,0xDC,0x01,0x98, \ +0x00,0x28,0x01,0xD1,0x00,0x20,0x87,0xE7,0x39,0x49,0x68,0x46,0x01,0x23,0x0A, \ +0x7D,0x21,0x1C,0xFF,0xF7,0x86,0xFE,0x00,0x28,0x00,0xD1,0x7D,0xE7,0x04,0x20, \ +0xF9,0xF7,0xF8,0xFF,0x33,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01, \ +0x78,0x10,0x23,0x19,0x43,0x01,0x70,0xC0,0x20,0xFC,0xF7,0x2A,0xF8,0xE9,0x1D, \ +0x2E,0x4C,0x09,0x31,0x07,0x1C,0xE0,0x1D,0x0D,0x1C,0x06,0x22,0x07,0x30,0xFC, \ +0xF7,0x60,0xFD,0x06,0x22,0x29,0x1C,0x04,0x98,0xFC,0xF7,0x5B,0xFD,0x24,0x4D, \ +0xE0,0x1D,0x6A,0x79,0x03,0x99,0x0D,0x30,0xFC,0xF7,0x54,0xFD,0x24,0x49,0x01, \ +0x20,0xE6,0x1D,0x29,0x36,0x08,0x75,0x30,0x71,0x02,0x99,0x21,0x80,0xA8,0x70, \ +0x05,0x98,0x01,0x28,0x08,0xD1,0x00,0x21,0x00,0x20,0x01,0xF0,0xE6,0xFB,0x15, \ +0x49,0x00,0x20,0x09,0x68,0x48,0x61,0x07,0xE0,0xF9,0xF7,0x81,0xFF,0x21,0x88, \ +0x89,0x02,0x09,0x1A,0x06,0x20,0xF9,0xF7,0x9F,0xFF,0x17,0x49,0x00,0x20,0x48, \ +0x70,0x05,0x20,0x88,0x71,0x05,0x98,0x01,0x28,0x04,0xD1,0x01,0x21,0x04,0x20, \ +0xFE,0xF7,0x79,0xFB,0x01,0xE0,0x01,0x20,0xB0,0x71,0x10,0x48,0x01,0x68,0x10, \ +0x48,0xC2,0x69,0x11,0x43,0xC1,0x61,0x0F,0x48,0x01,0x24,0x04,0x70,0x38,0x1C, \ +0xFB,0xF7,0xDC,0xFF,0x20,0x1C,0x1E,0xE7,0x3C,0x9C,0x00,0x00,0x50,0x01,0x00, \ +0x02,0x00,0x01,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00, \ +0x00,0x02,0x9C,0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0xA0, \ +0x09,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x3E,0x01,0x00,0x02, \ +0xF0,0xB5,0x84,0xB0,0x5D,0x49,0x04,0x22,0x01,0xA8,0xFC,0xF7,0xF9,0xFC,0x5C, \ +0x4F,0x5C,0x49,0x38,0x68,0x00,0x25,0x46,0x68,0x06,0x22,0xF0,0x1D,0x09,0x30, \ +0x03,0x91,0xFC,0xF7,0xD0,0xFC,0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0, \ +0xBD,0x39,0x68,0x38,0x1C,0x89,0x89,0x2F,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7, \ +0x20,0x22,0xF3,0x1D,0x19,0x33,0xB2,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12, \ +0x04,0x12,0x0C,0x00,0x92,0x14,0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00, \ +0x9A,0x42,0x01,0xDD,0x00,0x20,0xE3,0xE7,0x22,0x22,0xB2,0x5C,0x52,0x08,0x01, \ +0xD2,0x00,0x20,0xDD,0xE7,0x24,0x27,0x04,0x39,0x24,0x29,0x34,0xDD,0xF0,0x5D, \ +0x00,0x28,0x09,0xD0,0x01,0x28,0x11,0xD0,0x03,0x28,0x2B,0xD1,0xF0,0x19,0x41, \ +0x78,0x01,0x29,0x19,0xD0,0x00,0x20,0xCC,0xE7,0xF0,0x19,0x40,0x78,0x20,0x28, \ +0x01,0xD9,0x00,0x25,0x00,0xE0,0x01,0x25,0xC0,0x19,0x87,0x1C,0x15,0xE0,0xF0, \ +0x19,0x02,0x90,0x01,0xA9,0xFF,0xF7,0x7F,0xFE,0x04,0x1C,0x01,0xD1,0x00,0x20, \ +0xB9,0xE7,0x02,0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x31,0x49,0x80, \ +0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x03,0x37,0x2B,0x48, \ +0x00,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xCC,0xDC,0x00,0x2D,0x01,0xD1,0x00, \ +0x20,0xA2,0xE7,0x28,0x49,0x01,0x23,0x0A,0x7D,0x21,0x1C,0x01,0xA8,0xFF,0xF7, \ +0x91,0xFD,0x00,0x28,0x00,0xD1,0x98,0xE7,0x25,0x4C,0x06,0x22,0xE0,0x1D,0x07, \ +0x30,0x22,0x4F,0x03,0x99,0xFC,0xF7,0x7C,0xFC,0xE0,0x1D,0x0D,0x30,0x20,0x22, \ +0xF9,0x1D,0x15,0x31,0xFC,0xF7,0x75,0xFC,0xF8,0x1D,0x39,0x30,0x81,0x78,0xE0, \ +0x1D,0x29,0x30,0x01,0x71,0x01,0x79,0x1B,0x48,0x20,0x23,0x01,0x75,0x00,0x9A, \ +0x1A,0x49,0x22,0x80,0x0A,0x78,0x1A,0x43,0x0A,0x70,0x0A,0x78,0x10,0x23,0x1A, \ +0x43,0x0A,0x70,0x00,0x21,0x16,0x4A,0x50,0x30,0x41,0x70,0x91,0x70,0x05,0x21, \ +0x81,0x71,0x04,0x20,0xF9,0xF7,0xD9,0xFE,0x01,0x21,0x04,0x20,0xFE,0xF7,0xA1, \ +0xFA,0xC0,0x20,0xFB,0xF7,0x10,0xFF,0x0F,0x49,0x0A,0x68,0x0F,0x49,0xCB,0x69, \ +0x1A,0x43,0xCA,0x61,0x0E,0x49,0x01,0x27,0x0F,0x70,0xFB,0xF7,0x05,0xFF,0x38, \ +0x1C,0x57,0xE7,0x00,0x00,0x40,0x9C,0x00,0x00,0x50,0x01,0x00,0x02,0x00,0x01, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x84,0x00,0x00,0x02,0x50, \ +0x09,0x00,0x02,0x9C,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xAC,0x02,0x00,0x02, \ +0x40,0x00,0x00,0x04,0x3E,0x01,0x00,0x02,0xF0,0xB4,0x1D,0x4A,0x1D,0x4B,0xD1, \ +0x1D,0x69,0x31,0xC9,0x7A,0x49,0x00,0x5F,0x5A,0xD1,0x1D,0x59,0x31,0x0B,0x8B, \ +0x01,0x3B,0x1B,0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0x26,0xDD,0x17,0x4B,0x01, \ +0x25,0x5C,0x7A,0x50,0x32,0xD3,0x79,0x00,0x2B,0x04,0xD1,0x05,0x30,0x0E,0x28, \ +0x05,0xD9,0x0E,0x38,0x03,0xE0,0x01,0x30,0x0E,0x28,0x00,0xD9,0x01,0x20,0x00, \ +0x2C,0x05,0xD1,0x2B,0x1C,0x46,0x1E,0xB3,0x40,0x3B,0x40,0x10,0xD1,0x07,0xE0, \ +0xD3,0x79,0x00,0x2B,0x0C,0xD1,0x0A,0x4B,0x1B,0x18,0x5B,0x7B,0x00,0x2B,0x07, \ +0xD1,0x0B,0x8B,0x01,0x3B,0x1B,0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0xDC,0xDC, \ +0x00,0x20,0xF0,0xBC,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0x6C,0x02,0x00, \ +0x02,0xB4,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C, \ +0x00,0x26,0x27,0x70,0xE0,0x1D,0x03,0x30,0x66,0x70,0x66,0x80,0x06,0x22,0x25, \ +0x49,0xFC,0xF7,0xD9,0xFB,0x25,0x4D,0xE0,0x1D,0x09,0x30,0x06,0x22,0xE9,0x1D, \ +0x35,0x31,0xFC,0xF7,0xD1,0xFB,0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0, \ +0x71,0x20,0x72,0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0, \ +0xE8,0xF8,0x00,0xF0,0xF0,0xF8,0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x0B, \ +0xF9,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x23,0xF9,0x2D,0x18,0x16,0x48,0x80,0x7D, \ +0x02,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x33,0xF9,0x2D,0x18,0x28,0x1C,0x00, \ +0xF0,0x3D,0xF9,0x28,0x18,0x00,0x1B,0xF8,0x64,0xB8,0x64,0xF0,0xBD,0x26,0x76, \ +0x0F,0x4E,0xE0,0x1D,0x72,0x79,0x13,0x30,0xE9,0x1D,0x15,0x31,0x62,0x76,0xFC, \ +0xF7,0x9E,0xFB,0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0,0x03,0xF9,0x70,0x79, \ +0x20,0x30,0x00,0x06,0x00,0x0E,0xB8,0x64,0xF0,0xBD,0x00,0x00,0x10,0x08,0x00, \ +0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x00,0x00, \ +0x00,0x02,0x08,0x01,0x00,0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0x9C,0xFF,0x00, \ +0x26,0x80,0x2F,0x47,0x4D,0x0E,0xD1,0xC0,0x20,0xFB,0xF7,0x3B,0xFE,0x04,0x1C, \ +0x45,0x48,0x41,0x7B,0x03,0x29,0x03,0xD0,0x20,0x1C,0xFB,0xF7,0x33,0xFE,0xF8, \ +0xBD,0x01,0x21,0x41,0x73,0x10,0xE0,0x40,0x2F,0x05,0xD1,0x40,0x48,0x01,0x21, \ +0x81,0x74,0x3F,0x48,0x46,0x80,0x08,0xE0,0x50,0x2F,0x06,0xD1,0x3E,0x48,0x3E, \ +0x49,0x06,0x22,0xFC,0xF7,0x60,0xFB,0x01,0x21,0x29,0x71,0x3C,0x48,0xF8,0xF7, \ +0x47,0xF9,0x50,0x2F,0x02,0xD1,0x36,0x48,0xC0,0x6C,0x01,0xE0,0x34,0x48,0x80, \ +0x6C,0x33,0x49,0x88,0x66,0x37,0x48,0x89,0x6E,0xC0,0x79,0xF9,0xF7,0xC7,0xFC, \ +0x30,0x49,0x50,0x2F,0xC8,0x66,0x0C,0xD1,0x2E,0x48,0x2E,0x49,0xC0,0x6E,0x48, \ +0x80,0x31,0x48,0xC0,0x79,0xFA,0xF7,0x51,0xF9,0x2B,0x49,0x49,0x88,0x40,0x18, \ +0x29,0x49,0x48,0x80,0x28,0x48,0x27,0x49,0x80,0x2F,0x48,0x66,0x16,0xD1,0xFC, \ +0xF7,0xB5,0xFB,0x2A,0x49,0x89,0x89,0x49,0x00,0x01,0x31,0x08,0x40,0x21,0x49, \ +0x88,0x62,0x27,0x48,0x00,0x88,0x08,0x62,0x89,0x6A,0x8B,0x00,0x59,0x18,0x89, \ +0x00,0x09,0x18,0x08,0x20,0xF9,0xF7,0x8B,0xFD,0x20,0x1C,0xFB,0xF7,0xE0,0xFD, \ +0xF9,0xF7,0xBE,0xF9,0xF9,0xF7,0x28,0xFA,0x00,0x90,0x80,0x2F,0x05,0xD1,0x00, \ +0x98,0x00,0x28,0x23,0xD1,0x01,0x21,0x69,0x70,0x20,0xE0,0x40,0x2F,0x1E,0xD1, \ +0x12,0x4C,0xC0,0x20,0xA6,0x74,0xFB,0xF7,0xCC,0xFD,0x07,0x1C,0xA8,0x79,0x01, \ +0x28,0x12,0xD1,0x00,0x98,0x00,0x28,0x0D,0xD1,0xE0,0x1D,0x69,0x30,0x81,0x7A, \ +0x00,0x29,0x0A,0xD1,0x01,0x21,0x81,0x72,0x0E,0x49,0xC8,0x8A,0x81,0x02,0x04, \ +0x20,0xF9,0xF7,0x60,0xFD,0x01,0xE0,0x01,0x21,0x69,0x71,0x38,0x1C,0xFB,0xF7, \ +0xB2,0xFD,0x7D,0xE7,0x00,0x00,0xA0,0x09,0x00,0x02,0x60,0x09,0x00,0x02,0x50, \ +0x09,0x00,0x02,0x10,0x08,0x00,0x02,0x14,0x08,0x00,0x02,0x34,0x01,0x00,0x02, \ +0x26,0x08,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00,0x02,0xB0,0x01,0x00, \ +0x02,0x03,0x49,0x02,0x48,0x09,0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0x30,0x08, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02, \ +0x80,0xC9,0x7A,0x00,0x29,0x03,0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80, \ +0x08,0x49,0x49,0x7A,0x01,0x29,0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01, \ +0x80,0xF7,0x46,0x01,0x88,0x02,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x32,0x08, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00, \ +0x20,0x0A,0x4A,0x08,0x70,0x53,0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18, \ +0x3F,0x7D,0x0C,0x18,0x01,0x30,0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50, \ +0x79,0x48,0x70,0x50,0x79,0x90,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x08,0x01, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x90,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x00, \ +0x20,0x08,0x4B,0x00,0x22,0x9F,0x18,0x3F,0x7C,0x00,0x2F,0x02,0xD0,0x0C,0x18, \ +0xA7,0x70,0x01,0x30,0x01,0x32,0x04,0x2A,0xF5,0xD3,0x48,0x70,0x90,0xBC,0x02, \ +0x30,0xF7,0x46,0x00,0x00,0x00,0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22, \ +0x42,0x70,0x01,0x30,0x80,0x18,0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7, \ +0x46,0x00,0x00,0x00,0x00,0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70, \ +0x04,0x49,0x02,0x30,0x0A,0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04, \ +0x20,0xF7,0x46,0x00,0x00,0x84,0x00,0x00,0x02,0x0A,0x21,0x01,0x70,0x02,0x21, \ +0x41,0x70,0x00,0x21,0x81,0x70,0x02,0x30,0x41,0x1C,0x07,0x20,0x08,0x70,0x04, \ +0x20,0xF7,0x46,0xF0,0xB5,0x83,0xB0,0x51,0x48,0x52,0x4D,0x48,0x21,0x01,0x70, \ +0x01,0x26,0xEC,0x1D,0x29,0x34,0x46,0x70,0x62,0x79,0x11,0x21,0x4E,0x4F,0x02, \ +0x2A,0x01,0xD1,0x41,0x70,0x05,0xE0,0x03,0x2A,0x03,0xD1,0xBA,0x78,0x08,0x2A, \ +0x00,0xD1,0x41,0x70,0x4A,0x49,0x09,0x68,0x89,0x78,0x00,0x29,0x03,0xD0,0x41, \ +0x78,0x08,0x23,0x19,0x43,0x41,0x70,0x46,0x49,0x00,0x23,0x00,0x22,0x46,0x48, \ +0xC9,0x79,0xF8,0xF7,0x27,0xF8,0x45,0x48,0x45,0x49,0x06,0x22,0xFC,0xF7,0x28, \ +0xFA,0xE9,0x1D,0x07,0x31,0x0D,0x1C,0x06,0x22,0x42,0x48,0xFC,0xF7,0x21,0xFA, \ +0x29,0x1C,0x06,0x22,0x41,0x48,0xFC,0xF7,0x1C,0xFA,0x40,0x4D,0x18,0x20,0xA8, \ +0x66,0x39,0x48,0x18,0x21,0xC0,0x79,0xF9,0xF7,0x8E,0xFB,0xE8,0x66,0x32,0x48, \ +0xEE,0x1D,0x68,0x66,0x01,0x20,0x49,0x36,0xF0,0x70,0xF9,0xF7,0xAB,0xF8,0xF9, \ +0xF7,0x15,0xF9,0x02,0x90,0x00,0x20,0xF0,0x70,0x02,0x98,0x00,0x28,0x01,0xD0, \ +0x03,0xB0,0xF0,0xBD,0x02,0x26,0x2C,0x48,0x6E,0x60,0xC0,0x79,0x32,0x49,0x40, \ +0x00,0x08,0x5A,0x31,0x49,0xC9,0x88,0x40,0x18,0x31,0x49,0x09,0x88,0x41,0x18, \ +0x01,0x20,0xF9,0xF7,0x59,0xFC,0x00,0x22,0xD2,0x43,0x6E,0x74,0x00,0x92,0x01, \ +0x22,0x10,0x21,0x01,0xAB,0x2B,0x48,0xFB,0xF7,0xB1,0xFD,0x00,0x20,0x1E,0x49, \ +0x68,0x74,0x0A,0x68,0x53,0x78,0x00,0x2B,0x22,0xD0,0x93,0x78,0x01,0x33,0x1B, \ +0x06,0x1B,0x0E,0x93,0x70,0x04,0x2B,0x02,0xDA,0x09,0x68,0x48,0x70,0xD2,0xE7, \ +0x60,0x79,0x01,0x28,0x1F,0xDD,0x02,0x28,0x03,0xD1,0xBA,0x78,0x08,0x23,0x9A, \ +0x43,0xBA,0x70,0x03,0x28,0x17,0xD1,0x0E,0x48,0x40,0x78,0x40,0x09,0x06,0xD3, \ +0x01,0x20,0xF8,0x70,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x0C,0xE0,0x01, \ +0x20,0xB8,0x71,0x09,0xE0,0x60,0x79,0x03,0x28,0x06,0xD1,0x05,0x4A,0x01,0x20, \ +0x52,0x78,0x52,0x09,0x00,0xD3,0x00,0x20,0xF8,0x70,0x09,0x68,0x40,0x20,0x08, \ +0x70,0xAB,0xE7,0x00,0x00,0x10,0x08,0x00,0x02,0x84,0x00,0x00,0x02,0xC0,0x09, \ +0x00,0x02,0xD8,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x12,0x08,0x00,0x02,0x1A, \ +0x08,0x00,0x02,0x60,0x00,0x00,0x02,0x20,0x08,0x00,0x02,0x14,0x08,0x00,0x02, \ +0x50,0x09,0x00,0x02,0xB8,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB6,0x01,0x00, \ +0x02,0x04,0x07,0x00,0x02,0xF8,0xB4,0x00,0x26,0x82,0x1C,0x06,0x29,0x01,0xD3, \ +0x48,0x08,0x02,0xD3,0x00,0x20,0xF8,0xBC,0xF7,0x46,0x00,0x24,0x03,0x23,0x00, \ +0x25,0xCF,0x1E,0x17,0xD0,0x01,0x39,0xD0,0x5C,0x99,0x42,0x02,0xD1,0x00,0x28, \ +0x0F,0xD1,0x0C,0xE0,0x0E,0x28,0x0C,0xD8,0x01,0x28,0x0A,0xD3,0xA8,0x42,0x08, \ +0xD3,0xD5,0x18,0x6D,0x78,0x03,0x33,0x03,0x34,0x2D,0x18,0xA7,0x42,0xEC,0xD8, \ +0x01,0x2E,0x01,0xD1,0x00,0x20,0xE0,0xE7,0x1B,0x48,0xC0,0x79,0x01,0x28,0x00, \ +0xD1,0xDB,0xE7,0x19,0x48,0xC1,0x1D,0x29,0x31,0x49,0x7A,0x00,0x29,0x01,0xD1, \ +0x01,0x20,0xD3,0xE7,0x91,0x78,0x3A,0x30,0x00,0x23,0x81,0x70,0x51,0x78,0x41, \ +0x70,0x11,0x78,0x01,0x70,0x03,0x21,0x00,0x2F,0x1B,0xD9,0x50,0x5C,0x00,0x28, \ +0x18,0xD0,0x0F,0x4D,0x01,0x26,0x2C,0x18,0x66,0x73,0x54,0x18,0x00,0x94,0x64, \ +0x78,0x24,0x18,0xA0,0x42,0x0A,0xD2,0x0A,0x4D,0x01,0x26,0x2D,0x18,0x6E,0x73, \ +0x00,0x9E,0x10,0x3D,0xB6,0x78,0x01,0x30,0xA0,0x42,0xEE,0x73,0xF4,0xD3,0x03, \ +0x31,0x03,0x33,0x9F,0x42,0xE3,0xD8,0x01,0x20,0xAA,0xE7,0x00,0x00,0xC0,0x09, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x25, \ +0x4F,0x01,0x9E,0x3F,0x68,0x00,0x24,0xBF,0x89,0x00,0x21,0x24,0x20,0x3D,0x1F, \ +0x00,0x95,0x24,0x2D,0x3F,0xD9,0x21,0x4F,0x7F,0x7A,0x35,0x5C,0x03,0x2D,0x08, \ +0xD0,0x07,0x2D,0x0D,0xD1,0x35,0x18,0x6D,0x78,0x01,0x24,0x03,0x1C,0x02,0x35, \ +0x28,0x18,0x0A,0xE0,0x35,0x18,0x6D,0x78,0x01,0x21,0x02,0x1C,0x02,0x35,0x28, \ +0x18,0x05,0xE0,0x35,0x18,0x6D,0x78,0x02,0x35,0x28,0x18,0x00,0x29,0x01,0xD0, \ +0x00,0x2F,0x02,0xD0,0x00,0x9D,0x85,0x42,0xE1,0xD8,0x00,0x29,0x1D,0xD0,0xB0, \ +0x18,0x40,0x78,0x01,0x28,0x01,0xD0,0x02,0xB0,0xF0,0xBD,0x01,0x2F,0x15,0xD1, \ +0x00,0x2C,0x13,0xD0,0x01,0x98,0xC0,0x18,0x41,0x78,0xFF,0xF7,0x5E,0xFF,0x00, \ +0x28,0x00,0xD1,0xF1,0xE7,0x08,0x48,0xC1,0x79,0x00,0x29,0x01,0xD1,0x01,0x21, \ +0xC1,0x71,0x06,0x48,0x00,0x68,0x00,0x28,0x01,0xD0,0x00,0xF0,0x07,0xFC,0xE4, \ +0xE7,0x50,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xC4,0x02, \ +0x00,0x02,0x00,0xB5,0x05,0x49,0x89,0x7C,0x01,0x29,0x04,0xD1,0x01,0x78,0x80, \ +0x29,0x01,0xD1,0xFF,0xF7,0xA0,0xFF,0x00,0xBD,0x00,0x00,0xC4,0x00,0x00,0x02, \ +0x90,0xB5,0x10,0x4C,0x60,0x78,0x00,0x28,0x1A,0xD0,0x0F,0x4F,0x38,0x68,0x40, \ +0x68,0x42,0x7E,0x18,0x30,0x00,0x2A,0x09,0xD0,0x0C,0x49,0x49,0x79,0x91,0x42, \ +0x0F,0xD1,0x0B,0x49,0x02,0x30,0xFC,0xF7,0x96,0xF8,0x00,0x28,0x09,0xD1,0x38, \ +0x68,0x40,0x68,0xC1,0x1D,0x03,0x31,0x06,0x22,0x07,0x48,0xFC,0xF7,0xAA,0xF8, \ +0x01,0x20,0xA0,0x70,0x90,0xBD,0x00,0x00,0xA0,0x09,0x00,0x02,0x50,0x01,0x00, \ +0x02,0x08,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0x34,0x01,0x00,0x02,0xB0,0xB4, \ +0x03,0x78,0x00,0x27,0x20,0x49,0x20,0x4A,0x08,0x2B,0x37,0xD1,0xD3,0x78,0x00, \ +0x2B,0x04,0xD0,0xD0,0x7A,0x09,0x68,0x88,0x75,0xB0,0xBC,0xF7,0x46,0x00,0x79, \ +0x40,0x08,0x03,0xD3,0x90,0x7A,0x09,0x68,0x88,0x75,0xF6,0xE7,0x0B,0x68,0x99, \ +0x7D,0xD2,0x7A,0x91,0x42,0x01,0xDD,0x9A,0x75,0xEF,0xE7,0x15,0x4C,0x08,0x19, \ +0x00,0x7C,0x00,0x28,0xEA,0xD1,0x08,0x1C,0x01,0x29,0x0A,0xD3,0x01,0x38,0x25, \ +0x18,0x2D,0x7C,0x00,0x2D,0x03,0xD1,0x01,0x28,0xF8,0xD2,0x00,0x2F,0x01,0xD0, \ +0x98,0x75,0xDC,0xE7,0x8A,0x42,0x06,0xD9,0x01,0x31,0x60,0x18,0x00,0x7C,0x00, \ +0x28,0x03,0xD1,0x8A,0x42,0xF8,0xD8,0x00,0x2F,0x01,0xD0,0x99,0x75,0xCF,0xE7, \ +0x9A,0x75,0xCD,0xE7,0xD0,0x79,0x09,0x68,0x88,0x75,0xC9,0xE7,0x00,0x00,0xCC, \ +0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0xB5,0x07,0x48, \ +0x81,0x79,0x03,0x29,0x02,0xD0,0x81,0x79,0x04,0x29,0x05,0xD1,0x00,0x21,0x81, \ +0x71,0x07,0x21,0x04,0x20,0xFD,0xF7,0x92,0xFE,0x00,0xBD,0x00,0x00,0xA0,0x09, \ +0x00,0x02,0xB0,0xB5,0x37,0x48,0x37,0x49,0x00,0x68,0x44,0x68,0x22,0x20,0x20, \ +0x5C,0x10,0x23,0x18,0x40,0xC1,0x27,0x00,0x28,0x04,0xD0,0x08,0x78,0x00,0x28, \ +0x06,0xD1,0x38,0x1C,0xB0,0xBD,0x08,0x78,0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0, \ +0xBD,0x24,0x20,0x20,0x5C,0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0xE0,0x1D, \ +0x1D,0x30,0x45,0x78,0x2A,0x49,0x00,0x2D,0x04,0xD0,0x4A,0x79,0xAA,0x42,0x01, \ +0xD0,0x38,0x1C,0xB0,0xBD,0x4A,0x79,0x26,0x49,0x02,0x30,0xFB,0xF7,0xF6,0xFF, \ +0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x60,0x19,0x20,0x30,0xC0,0x79,0x40, \ +0x19,0x28,0x30,0x21,0x5C,0x03,0x29,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x20,0x18, \ +0x1E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x1B, \ +0x48,0x40,0x7A,0x00,0x28,0x06,0xD0,0x1A,0x48,0x08,0x18,0x40,0x7B,0x00,0x28, \ +0x0D,0xD1,0x38,0x1C,0xB0,0xBD,0x18,0x48,0x18,0x4A,0xC0,0x7A,0x40,0x00,0x10, \ +0x5A,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x01,0xD1,0x38,0x1C,0xB0,0xBD, \ +0xC0,0x20,0xFB,0xF7,0xA4,0xFA,0x04,0x1C,0x01,0x20,0xF8,0xF7,0x6E,0xFB,0x00, \ +0x28,0x04,0xD1,0x20,0x1C,0xFB,0xF7,0x9B,0xFA,0x38,0x1C,0xB0,0xBD,0x20,0x1C, \ +0xFB,0xF7,0x96,0xFA,0x02,0x20,0xFF,0xF7,0xC7,0xF9,0x00,0x20,0xB0,0xBD,0x00, \ +0x00,0x50,0x01,0x00,0x02,0x1C,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xE0,0x00, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xB4,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xC0, \ +0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0x80,0xB5,0xFD,0xF7,0x33,0xFD,0x1A,0x48, \ +0x00,0xF0,0x26,0xFE,0x19,0x4B,0x1A,0x48,0x59,0x7A,0x01,0x29,0x04,0xD1,0x48, \ +0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x03,0xE0,0x90,0x21,0x41,0x81,0x30,0x21, \ +0x01,0x81,0x41,0x89,0x02,0x89,0x14,0x4F,0x89,0x18,0x12,0x4A,0x11,0x80,0xC2, \ +0x88,0x80,0x88,0x11,0x18,0x09,0x18,0x39,0x80,0x51,0x18,0xFF,0x31,0x10,0x4A, \ +0x31,0x31,0x11,0x80,0x19,0x88,0x10,0x4F,0x48,0x43,0x0E,0x49,0x08,0x80,0xD8, \ +0x79,0x0E,0x49,0x38,0x70,0x38,0x78,0x08,0x70,0xF7,0xF7,0x30,0xFC,0xF9,0xF7, \ +0x3C,0xF9,0x39,0x78,0x0B,0x48,0x40,0x5C,0x0B,0x49,0x08,0x70,0x80,0xBD,0x60, \ +0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB4,0x01,0x00,0x02, \ +0xB0,0x01,0x00,0x02,0xB2,0x01,0x00,0x02,0xB6,0x01,0x00,0x02,0x9A,0x01,0x00, \ +0x02,0x9B,0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0x99,0x01,0x00,0x02,0x80,0xB4, \ +0x23,0x48,0x00,0x21,0x01,0x70,0x00,0x20,0x19,0x27,0x21,0x4A,0xFF,0x02,0x11, \ +0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43,0x27,0x1E,0x4A,0x7F,0x02, \ +0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x1C,0x48,0x1A,0x4A,0x01,0x80,0x1C, \ +0x48,0x1C,0x4B,0x02,0x60,0x13,0x60,0x02,0x68,0xD7,0x1D,0x15,0x37,0x57,0x60, \ +0x3A,0x1C,0x07,0x68,0x08,0x3A,0xBA,0x60,0x02,0x68,0x11,0x73,0x02,0x68,0x91, \ +0x73,0x07,0x68,0x03,0x22,0xBA,0x75,0x02,0x68,0x91,0x82,0x00,0x68,0x13,0x4A, \ +0x10,0x60,0x13,0x48,0x0D,0x4A,0x01,0x80,0x12,0x48,0x02,0x60,0x13,0x60,0x02, \ +0x68,0xD3,0x1D,0x11,0x33,0x53,0x60,0x02,0x68,0x91,0x81,0x02,0x68,0x11,0x72, \ +0x00,0x68,0x0D,0x49,0x08,0x60,0x0D,0x49,0x08,0x60,0x0D,0x49,0x01,0x20,0x08, \ +0x70,0x80,0xBC,0xF7,0x46,0x00,0x00,0x9C,0x01,0x00,0x02,0x00,0x11,0x00,0x02, \ +0x00,0xDA,0x00,0x02,0xF8,0x01,0x00,0x02,0xCC,0x01,0x00,0x02,0x00,0x00,0x00, \ +0x80,0x68,0x02,0x00,0x02,0xFA,0x01,0x00,0x02,0x4C,0x01,0x00,0x02,0x64,0x02, \ +0x00,0x02,0x50,0x01,0x00,0x02,0xE7,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x39, \ +0x4E,0xF7,0x1D,0x69,0x37,0xB8,0x78,0x04,0x23,0x18,0x40,0x40,0x24,0x00,0x25, \ +0x00,0x28,0x03,0xD1,0x7D,0x71,0x3C,0x71,0x02,0xB0,0xF0,0xBD,0x33,0x49,0xA4, \ +0x20,0x08,0x70,0x10,0x20,0x48,0x70,0x32,0x48,0x03,0x23,0xC0,0x88,0x9B,0x03, \ +0x18,0x43,0x48,0x80,0xC8,0x1D,0x03,0x30,0x06,0x22,0x2E,0x49,0xFB,0xF7,0xEF, \ +0xFE,0x2E,0x49,0x2E,0x48,0x06,0x22,0xFB,0xF7,0xEA,0xFE,0x10,0x20,0x2D,0x49, \ +0xB0,0x66,0xC8,0x79,0x10,0x21,0xF9,0xF7,0x5D,0xF8,0xF0,0x66,0x24,0x48,0x70, \ +0x66,0x01,0x20,0x38,0x70,0xF8,0xF7,0x7C,0xFD,0xF8,0xF7,0xE6,0xFD,0x3D,0x70, \ +0x82,0x25,0x00,0x28,0x2E,0xD1,0x23,0x49,0x24,0x48,0xC9,0x79,0x24,0x4A,0xC0, \ +0x88,0x49,0x00,0x51,0x5A,0x40,0x18,0x22,0x49,0x09,0x88,0x41,0x18,0x01,0x20, \ +0x38,0x71,0x04,0x20,0x70,0x60,0x01,0x20,0xF9,0xF7,0x2C,0xF9,0x00,0x22,0xD2, \ +0x43,0x00,0x92,0x01,0x22,0x11,0x21,0x01,0xAB,0x1B,0x48,0xFB,0xF7,0x85,0xFA, \ +0x01,0x98,0x41,0x08,0x01,0xD3,0x3C,0x71,0x1A,0xE0,0x40,0x09,0x18,0xD3,0x78, \ +0x79,0x17,0x49,0x01,0x30,0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C,0x88,0x42, \ +0x01,0xDA,0x3D,0x71,0x0D,0xE0,0x3C,0x71,0x0B,0xE0,0x78,0x79,0x10,0x49,0x01, \ +0x30,0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C,0x88,0x42,0x01,0xDA,0x3D,0x71, \ +0x00,0xE0,0x3C,0x71,0x97,0xE7,0x50,0x09,0x00,0x02,0x10,0x08,0x00,0x02,0x84, \ +0x00,0x00,0x02,0x60,0x00,0x00,0x02,0x92,0x00,0x00,0x02,0x14,0x08,0x00,0x02, \ +0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01,0x00,0x02,0xB6,0x01,0x00, \ +0x02,0x44,0x07,0x00,0x02,0xC4,0x00,0x00,0x02,0x80,0xB5,0xC0,0x20,0xFB,0xF7, \ +0x3E,0xF9,0x07,0x1C,0x12,0x48,0x01,0x68,0x01,0x31,0x01,0x60,0x11,0x48,0xFB, \ +0xF7,0xBA,0xFE,0x00,0x29,0x17,0xD1,0x0F,0x48,0x10,0x4A,0x03,0x78,0x10,0x49, \ +0x00,0x2B,0x06,0xD1,0x09,0x68,0xD3,0x69,0x19,0x43,0xD1,0x61,0x01,0x21,0x01, \ +0x70,0x0A,0xE0,0x0C,0x4B,0x9B,0x79,0x05,0x2B,0x06,0xD0,0x09,0x68,0xD3,0x69, \ +0xC9,0x43,0x19,0x40,0xD1,0x61,0x00,0x21,0x01,0x70,0x38,0x1C,0xFB,0xF7,0x19, \ +0xF9,0x80,0xBD,0xE0,0x02,0x00,0x02,0x20,0x4E,0x00,0x00,0x3E,0x01,0x00,0x02, \ +0x40,0x00,0x00,0x04,0xAC,0x02,0x00,0x02,0xA0,0x09,0x00,0x02,0x90,0xB5,0xC0, \ +0x20,0xFB,0xF7,0x08,0xF9,0x07,0x1C,0x0F,0x48,0x81,0x7A,0x00,0x29,0x15,0xD1, \ +0x01,0x7B,0x01,0x29,0x12,0xD1,0xC1,0x7A,0x00,0x29,0x0F,0xD1,0x00,0x24,0x0A, \ +0x49,0x50,0x30,0x0C,0x70,0x44,0x70,0x00,0xF0,0xDA,0xFB,0x08,0x48,0x01,0x21, \ +0x84,0x61,0x07,0x20,0xFD,0xF7,0x7E,0xFC,0x06,0x49,0x01,0x20,0x08,0x70,0x38, \ +0x1C,0xFB,0xF7,0xEA,0xF8,0x90,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x40,0x01, \ +0x00,0x02,0x80,0x00,0x00,0x04,0xBB,0x02,0x00,0x02,0x90,0xB5,0x16,0x49,0x16, \ +0x4F,0xCC,0x1D,0x29,0x34,0x62,0x79,0x03,0x2A,0x0F,0xD1,0x01,0x23,0x1B,0x03, \ +0x98,0x42,0x0B,0xD1,0x08,0x88,0x80,0x02,0x05,0x23,0x1B,0x03,0xC1,0x18,0x02, \ +0x20,0xF9,0xF7,0x74,0xF8,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0xB8,0x78, \ +0x01,0x28,0x0C,0xD1,0x00,0xF0,0x61,0xFB,0x60,0x79,0x02,0x28,0x08,0xD1,0xB8, \ +0x78,0x08,0x23,0x18,0x43,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70, \ +0x90,0xBD,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70,0x90,0xBD,0x84,0x00,0x00, \ +0x02,0xC0,0x09,0x00,0x02,0x80,0xB5,0x18,0x48,0x81,0x7A,0x00,0x29,0x1C,0xD1, \ +0x01,0x7B,0x01,0x29,0x19,0xD1,0xC0,0x7A,0x00,0x28,0x16,0xD1,0x14,0x4F,0xF8, \ +0x1D,0x29,0x30,0x40,0x79,0x03,0x28,0x14,0xD1,0xF9,0xF7,0x1F,0xF8,0x39,0x88, \ +0x11,0x4B,0x10,0x4F,0x89,0x02,0x08,0x1A,0x98,0x42,0x08,0xD9,0xC1,0x1A,0x06, \ +0x20,0xF9,0xF7,0x38,0xF8,0x00,0xF0,0x72,0xFB,0x01,0x20,0xB8,0x70,0x80,0xBD, \ +0x00,0x20,0xB8,0x70,0x80,0xBD,0x01,0x20,0x80,0x06,0x08,0x49,0x40,0x6A,0x06, \ +0x4B,0x49,0x68,0xC0,0x18,0x88,0x42,0xF2,0xD2,0x00,0xF0,0x61,0xFB,0x80,0xBD, \ +0x50,0x09,0x00,0x02,0x84,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xB8,0x0B,0x00, \ +0x00,0x80,0x00,0x00,0x04,0xF0,0xB5,0xC0,0x20,0xFB,0xF7,0x6E,0xF8,0x05,0x1C, \ +0x00,0x26,0x34,0x48,0x07,0x24,0x64,0x06,0x06,0x70,0xE0,0x69,0x10,0x23,0x98, \ +0x43,0xE0,0x61,0x31,0x48,0xC1,0x69,0x03,0x0C,0x19,0x43,0xC1,0x61,0xC1,0x69, \ +0x1B,0x23,0x99,0x43,0xC1,0x61,0xC1,0x69,0x73,0x1F,0x19,0x40,0xC1,0x61,0xC1, \ +0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0x2A,0x49,0xC2,0x69,0x09,0x68,0xC9,0x43, \ +0x11,0x40,0xC1,0x61,0xA1,0x69,0x01,0x23,0x19,0x43,0xA1,0x61,0x0F,0x22,0x12, \ +0x06,0x25,0x4B,0x11,0x89,0xD9,0x69,0x01,0x05,0x00,0x68,0xCF,0x68,0x10,0x88, \ +0xC9,0x6B,0x04,0x27,0x21,0x48,0x23,0x49,0x06,0x70,0x21,0x48,0x9E,0x61,0x07, \ +0x70,0x01,0x20,0x08,0x70,0x20,0x48,0x06,0x70,0x20,0x48,0x06,0x70,0x20,0x48, \ +0xC6,0x74,0x20,0x48,0x06,0x70,0xFA,0xF7,0xCE,0xFA,0x1B,0x48,0x00,0x78,0x00, \ +0x28,0x03,0xD1,0xA0,0x69,0xFD,0x23,0x18,0x40,0xA0,0x61,0xFA,0xF7,0x5E,0xFC, \ +0xFA,0xF7,0xC8,0xFC,0x0A,0x20,0xF7,0xF7,0xD9,0xF9,0xFA,0xF7,0xBB,0xFC,0x15, \ +0x48,0x01,0x21,0xC2,0x1D,0x49,0x32,0x07,0x75,0x91,0x71,0x56,0x70,0x13,0x4B, \ +0x02,0x22,0x1A,0x70,0x13,0x4B,0x70,0x30,0x19,0x70,0x12,0x4B,0x59,0x71,0x86, \ +0x70,0x12,0x48,0x02,0x70,0xF7,0xF7,0xCB,0xF9,0x28,0x1C,0xFB,0xF7,0x04,0xF8, \ +0xF0,0xBD,0x00,0x00,0xE6,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0xBC,0x02,0x00, \ +0x02,0x80,0x00,0x00,0x04,0x40,0x01,0x00,0x02,0x53,0x02,0x00,0x02,0x5E,0x02, \ +0x00,0x02,0x3A,0x01,0x00,0x02,0x3B,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x51, \ +0x02,0x00,0x02,0xBA,0x02,0x00,0x02,0xBB,0x02,0x00,0x02,0xB4,0x00,0x00,0x02, \ +0x3F,0x01,0x00,0x02,0x90,0xB5,0x22,0x49,0x00,0x27,0xC8,0x1D,0x49,0x30,0x82, \ +0x79,0x01,0x2A,0x00,0xD0,0x47,0x71,0xCA,0x1D,0x69,0x32,0x93,0x79,0x1D,0x49, \ +0x00,0x2B,0x03,0xD0,0x97,0x71,0x01,0x20,0x88,0x73,0x90,0xBD,0x52,0x78,0x00, \ +0x2A,0x02,0xD0,0xFD,0xF7,0x6A,0xFB,0x90,0xBD,0x02,0x78,0x00,0x2A,0x03,0xD0, \ +0x47,0x71,0xFD,0xF7,0x13,0xFA,0x90,0xBD,0x42,0x79,0x00,0x2A,0x02,0xD0,0xFD, \ +0xF7,0x43,0xFA,0x90,0xBD,0x82,0x78,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0x27,0xFA, \ +0x90,0xBD,0xC9,0x7B,0x00,0x29,0x02,0xD0,0xFD,0xF7,0x2D,0xFA,0x90,0xBD,0x80, \ +0x79,0x05,0x28,0x0D,0xD1,0x0A,0x4C,0x20,0x68,0x01,0x7B,0xC9,0x09,0x02,0xD3, \ +0xF9,0xF7,0xC0,0xFB,0x90,0xBD,0x01,0x7B,0x10,0x29,0x02,0xD1,0xF9,0xF7,0xD2, \ +0xFC,0x20,0x60,0x38,0x1C,0x90,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x60,0x09, \ +0x00,0x02,0xCC,0x01,0x00,0x02,0xF0,0xB5,0xC0,0x20,0xFA,0xF7,0x94,0xFF,0x15, \ +0x4D,0x00,0x24,0x07,0x1C,0xEE,0x1D,0x2E,0x36,0x14,0x48,0x01,0x19,0x89,0x7B, \ +0x00,0x29,0x19,0xD0,0x00,0x5D,0x81,0x00,0x09,0x18,0x49,0x00,0x28,0x19,0xC2, \ +0x1D,0x49,0x32,0x52,0x78,0x91,0x42,0x04,0xDB,0x30,0x30,0x40,0x79,0x0C,0x49, \ +0x08,0x55,0x05,0xE0,0x50,0x1A,0x22,0x06,0x12,0x0E,0x31,0x1C,0x00,0xF0,0x53, \ +0xF8,0x08,0x48,0xFC,0x23,0x01,0x5D,0x19,0x40,0x01,0x55,0x01,0x34,0x0E,0x2C, \ +0xDD,0xDB,0x38,0x1C,0xFA,0xF7,0x6A,0xFF,0xF0,0xBD,0x00,0x00,0x64,0x0A,0x00, \ +0x02,0x18,0x01,0x00,0x02,0xFC,0x0A,0x00,0x02,0xF0,0xB5,0x1B,0x4E,0x00,0x27, \ +0x1B,0x4D,0xF4,0x1D,0x3C,0x34,0xF1,0x19,0xC8,0x1D,0x29,0x30,0x42,0x79,0xE0, \ +0x5D,0x12,0x1A,0x93,0x00,0x9A,0x18,0x00,0xD5,0x0F,0x32,0x12,0x11,0x50,0x31, \ +0x2B,0x7E,0x49,0x78,0x89,0x1A,0x8B,0x42,0x02,0xD3,0x12,0x49,0xC8,0x55,0x05, \ +0xE0,0xC8,0x1A,0x3A,0x06,0x12,0x0E,0x21,0x1C,0x00,0xF0,0x20,0xF8,0x0D,0x48, \ +0xFC,0x23,0xC1,0x5D,0x19,0x40,0xC1,0x55,0x0C,0x49,0x49,0x7C,0x49,0x08,0x03, \ +0xD3,0xC1,0x5D,0x01,0x23,0x19,0x43,0xC1,0x55,0x01,0x37,0x0E,0x2F,0xD5,0xDB, \ +0x28,0x7D,0x05,0x49,0x40,0x18,0x10,0x38,0xC0,0x7B,0x04,0x49,0x48,0x74,0xF0, \ +0xBD,0x64,0x0A,0x00,0x02,0x00,0x00,0x00,0x02,0xFC,0x0A,0x00,0x02,0xD8,0x07, \ +0x00,0x02,0x90,0xB5,0x0C,0x1C,0x01,0x01,0x05,0x20,0x17,0x1C,0xFB,0xF7,0x9F, \ +0xFC,0xE3,0x5D,0x00,0x22,0x06,0x49,0x83,0x42,0x02,0xD3,0x18,0x1A,0xC8,0x55, \ +0x00,0xE0,0xCA,0x55,0xC8,0x5D,0xE3,0x5D,0x98,0x42,0x00,0xDD,0xCA,0x55,0x90, \ +0xBD,0xFC,0x0A,0x00,0x02,0x80,0xB5,0x11,0x48,0x02,0x68,0x51,0x68,0xC8,0x1D, \ +0x19,0x30,0x80,0x78,0x40,0x08,0x15,0xD3,0x92,0x89,0x24,0x20,0x04,0x3A,0x24, \ +0x2A,0x10,0xD9,0x0F,0x5C,0x06,0x2F,0x0D,0xD2,0x02,0xA3,0xDB,0x5D,0x5B,0x00, \ +0x9F,0x44,0x00,0x1C,0x03,0x03,0x09,0x03,0x03,0x0A,0x0B,0x18,0x5B,0x78,0x02, \ +0x33,0x18,0x18,0x82,0x42,0xEE,0xD8,0x80,0xBD,0x08,0x18,0xF8,0xF7,0x87,0xF8, \ +0x80,0xBD,0x50,0x01,0x00,0x02,0xB0,0xB5,0x0C,0x1C,0x07,0x1C,0x01,0x28,0x01, \ +0xD3,0x0E,0x2F,0x01,0xD9,0x00,0x20,0xB0,0xBD,0x1B,0x4D,0xE8,0x69,0x2B,0x0C, \ +0x18,0x43,0xE8,0x61,0x19,0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61,0x18, \ +0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61,0xE8,0x69,0x04,0x23,0x18,0x43, \ +0xE8,0x61,0x14,0x48,0xF7,0xF7,0x81,0xF8,0x00,0xF0,0xBF,0xF8,0x01,0x2C,0x01, \ +0xD1,0x00,0xF0,0xF5,0xF9,0x00,0xF0,0x81,0xF8,0x10,0x48,0x00,0x78,0x01,0x28, \ +0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40,0x23,0x98,0x43,0xE8,0x61,0x03, \ +0xE0,0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x38,0x1C,0x00,0xF0,0x11,0xF8, \ +0xE8,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8,0x61,0x01,0x20,0xB0,0xBD,0x40, \ +0x00,0x00,0x04,0xBC,0x02,0x00,0x02,0xC0,0x02,0x00,0x02,0xDC,0x05,0x00,0x00, \ +0xB9,0x02,0x00,0x02,0x90,0xB5,0x07,0x1C,0x07,0x20,0x40,0x06,0x81,0x69,0x04, \ +0x23,0x19,0x43,0x81,0x61,0xFA,0xF7,0xCC,0xFA,0x0A,0x20,0xF7,0xF7,0x49,0xF8, \ +0x17,0x4C,0x02,0x20,0x61,0x68,0x00,0xF0,0x30,0xF8,0x00,0x20,0x21,0x68,0x00, \ +0xF0,0x2C,0xF8,0x13,0x48,0xBF,0x00,0x38,0x18,0x40,0x38,0xC1,0x6B,0x01,0x20, \ +0x00,0xF0,0x24,0xF8,0x05,0x20,0x21,0x69,0x00,0xF0,0x20,0xF8,0x08,0x20,0xA1, \ +0x68,0x00,0xF0,0x1C,0xF8,0x07,0x20,0xE1,0x68,0x00,0xF0,0x18,0xF8,0x0A,0x48, \ +0x38,0x18,0x40,0x38,0xC1,0x6B,0x04,0x20,0x00,0xF0,0x11,0xF8,0xFF,0x20,0xF5, \ +0x30,0xF7,0xF7,0x21,0xF8,0xFA,0xF7,0x03,0xFB,0x0A,0x20,0xF7,0xF7,0x1C,0xF8, \ +0x90,0xBD,0x00,0x00,0xE4,0x02,0x00,0x02,0xF8,0x02,0x00,0x02,0x30,0x03,0x00, \ +0x02,0x90,0xB4,0x0B,0x4A,0x13,0x68,0xDF,0x43,0x0A,0x4B,0xDC,0x69,0x27,0x40, \ +0xDF,0x61,0x07,0x05,0x89,0x00,0x39,0x43,0x80,0x08,0x08,0x43,0x18,0x62,0x18, \ +0x1C,0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x11,0x68,0xC2,0x69,0x11,0x43,0xC1,0x61, \ +0x90,0xBC,0xF7,0x46,0xC0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0xB5,0x19, \ +0x4F,0x00,0x20,0x39,0x78,0xF7,0xF7,0x92,0xF8,0x79,0x78,0x01,0x20,0xF7,0xF7, \ +0x8E,0xF8,0xB9,0x78,0x02,0x20,0xF7,0xF7,0x8A,0xF8,0xF9,0x78,0x03,0x20,0xF7, \ +0xF7,0x86,0xF8,0x79,0x7C,0x11,0x20,0xF7,0xF7,0x82,0xF8,0x39,0x7D,0x14,0x20, \ +0xF7,0xF7,0x7E,0xF8,0x79,0x7D,0x15,0x20,0xF7,0xF7,0x7A,0xF8,0x39,0x7F,0x1C, \ +0x20,0xF7,0xF7,0x76,0xF8,0xB9,0x7C,0x12,0x20,0xF7,0xF7,0x72,0xF8,0xF9,0x7C, \ +0x13,0x20,0xF7,0xF7,0x6E,0xF8,0x05,0x48,0x00,0x78,0x01,0x28,0x03,0xD1,0x79, \ +0x7F,0x1D,0x20,0xF7,0xF7,0x66,0xF8,0x80,0xBD,0x00,0x00,0xD8,0x07,0x00,0x02, \ +0xB8,0x02,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F,0x06,0xB8,0x69,0x40,0x08,0x40, \ +0x00,0xB8,0x61,0xB8,0x69,0x01,0x23,0x18,0x43,0xB8,0x61,0x05,0x20,0xF6,0xF7, \ +0xB2,0xFF,0xB8,0x69,0x40,0x08,0x40,0x00,0xB8,0x61,0x05,0x20,0xF6,0xF7,0xAB, \ +0xFF,0x80,0xBD,0xF0,0xB5,0x39,0x4E,0x07,0x1C,0xF0,0x7A,0x03,0x28,0xFC,0xD0, \ +0xC0,0x20,0xFA,0xF7,0xE5,0xFD,0x36,0x4D,0x04,0x1C,0xE8,0x69,0xAB,0x01,0x18, \ +0x43,0xE8,0x61,0x98,0x03,0xC1,0x68,0xC0,0x6B,0x28,0x68,0x0F,0x20,0x00,0x06, \ +0x01,0x88,0x00,0x89,0x30,0x48,0xC0,0x69,0x30,0x48,0xC1,0x19,0xC8,0x1F,0x09, \ +0x38,0xC2,0x7B,0x2E,0x48,0xFF,0x2A,0x00,0xD0,0x02,0x75,0x49,0x7B,0xFF,0x29, \ +0x00,0xD0,0x41,0x75,0x2B,0x49,0xC9,0x19,0x10,0x39,0xC9,0x7B,0xFF,0x29,0x02, \ +0xD0,0x8A,0x07,0x00,0xD1,0x41,0x74,0x26,0x48,0x01,0x7D,0x14,0x20,0xF7,0xF7, \ +0x16,0xF8,0x23,0x48,0x41,0x7D,0x15,0x20,0xF7,0xF7,0x11,0xF8,0x23,0x48,0x00, \ +0x78,0x01,0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40,0x23,0x98,0x43, \ +0xE8,0x61,0x03,0xE0,0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61,0x1C,0x48,0x07, \ +0x75,0x00,0x7D,0xFF,0xF7,0x07,0xFF,0x01,0x20,0xFD,0xF7,0x48,0xF9,0xE8,0x69, \ +0x19,0x4B,0x18,0x40,0xE8,0x61,0x06,0x20,0x70,0x72,0xFA,0x21,0x07,0x20,0xF8, \ +0xF7,0x3C,0xFD,0x15,0x49,0x08,0x20,0xF8,0xF7,0x38,0xFD,0x20,0x1C,0xFA,0xF7, \ +0x8D,0xFD,0x70,0x7C,0x01,0x28,0x05,0xD1,0x00,0x22,0x10,0x21,0x10,0x48,0xFA, \ +0xF7,0x95,0xFD,0xF0,0xBD,0x70,0x7C,0x02,0x28,0xFB,0xD1,0x00,0x22,0x10,0x21, \ +0x0D,0x48,0xFA,0xF7,0x8C,0xFD,0xF0,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x40, \ +0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x64,0x0A,0x00,0x02,0xD8,0x07,0x00,0x02, \ +0xFC,0x0A,0x00,0x02,0xB9,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0xFF,0xEF,0x00, \ +0x00,0x88,0x13,0x00,0x00,0xE4,0x06,0x00,0x02,0x04,0x07,0x00,0x02,0xB0,0xB5, \ +0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x03,0xD3,0xCA,0x69,0x10,0x23,0x9A, \ +0x43,0xCA,0x61,0x18,0x4C,0x01,0x28,0x0C,0xD1,0x18,0x4D,0x6F,0x68,0xF6,0xF7, \ +0x1F,0xFF,0x39,0x1A,0x49,0x01,0x09,0x18,0x69,0x60,0x61,0x6B,0x09,0x1A,0x49, \ +0x01,0x08,0x18,0x60,0x63,0x12,0x48,0x00,0x21,0x00,0x7D,0xFF,0xF7,0x5F,0xFE, \ +0x11,0x4F,0x11,0x4B,0xF9,0x1D,0x69,0x31,0x08,0x73,0x01,0x20,0x80,0x06,0xC0, \ +0x68,0xE0,0x69,0x18,0x40,0xE0,0x61,0x01,0x20,0xFD,0xF7,0xDF,0xF8,0x01,0x20, \ +0x38,0x72,0x06,0x20,0x78,0x72,0x07,0x20,0xFF,0x21,0x2D,0x31,0xF8,0xF7,0xD4, \ +0xFC,0x4B,0x21,0xC9,0x00,0x08,0x20,0xF8,0xF7,0xCF,0xFC,0xB0,0xBD,0x40,0x00, \ +0x00,0x04,0x80,0x00,0x00,0x04,0x00,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0xFF, \ +0xEF,0x00,0x00,0xF0,0xB5,0x24,0x4F,0xF8,0x69,0x3B,0x0C,0x18,0x43,0xF8,0x61, \ +0xF8,0x69,0x1B,0x23,0x98,0x43,0xF8,0x61,0xF8,0x69,0x04,0x23,0x98,0x43,0xF8, \ +0x61,0xF8,0x69,0x9B,0x02,0x18,0x43,0xF8,0x61,0x1C,0x48,0xF9,0x69,0x00,0x68, \ +0xC0,0x43,0x08,0x40,0x07,0x24,0x64,0x06,0xF8,0x61,0xA0,0x69,0x01,0x23,0x18, \ +0x43,0xA0,0x61,0x01,0x20,0xF8,0xF7,0xBD,0xFC,0x08,0x20,0xF8,0xF7,0xBA,0xFC, \ +0x07,0x20,0xF8,0xF7,0xB7,0xFC,0x01,0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B,0x11, \ +0x4D,0x38,0x68,0x0F,0x20,0x00,0x06,0x00,0x88,0x01,0x26,0x6E,0x72,0xF8,0xF7, \ +0x62,0xFC,0xE8,0x1D,0x69,0x30,0x0C,0x4D,0x86,0x70,0x6E,0x68,0xF6,0xF7,0xAF, \ +0xFE,0x31,0x1A,0x49,0x09,0x09,0x18,0x69,0x60,0x79,0x6B,0x09,0x1A,0x49,0x09, \ +0x08,0x18,0x78,0x63,0xE0,0x69,0x10,0x23,0x18,0x43,0xE0,0x61,0xF0,0xBD,0x00, \ +0x00,0x40,0x00,0x00,0x04,0xBC,0x02,0x00,0x02,0x50,0x09,0x00,0x02,0x80,0x00, \ +0x00,0x04,0xF0,0xB5,0x33,0x4A,0x01,0x21,0xD4,0x1D,0x19,0x34,0xE5,0x78,0x00, \ +0x20,0x31,0x4F,0xFF,0x2D,0x13,0xD0,0x2B,0x09,0x11,0xD3,0x13,0x7F,0x3B,0x70, \ +0x56,0x7F,0x7E,0x70,0x96,0x7F,0xBE,0x70,0xD3,0x7F,0xFB,0x70,0x23,0x78,0x7B, \ +0x74,0x63,0x78,0x3B,0x75,0xA3,0x78,0x7B,0x75,0x3D,0x77,0xB9,0x74,0xF8,0x74, \ +0x0E,0xE0,0x38,0x70,0x60,0x23,0x7B,0x70,0x40,0x23,0xBB,0x70,0xFB,0x70,0x78, \ +0x74,0xFF,0x23,0x3B,0x75,0x57,0x23,0x7B,0x75,0x48,0x23,0x3B,0x77,0xB9,0x74, \ +0xF8,0x74,0x1F,0x4B,0x9D,0x78,0x1F,0x4B,0x04,0x2D,0x01,0xDA,0x58,0x73,0x05, \ +0xE0,0x24,0x79,0xFF,0x2C,0x01,0xD0,0x5C,0x73,0x00,0xE0,0x58,0x73,0xFB,0x78, \ +0x1A,0x4C,0xC0,0x2B,0x02,0xD1,0x21,0x76,0xF8,0x70,0x00,0xE0,0x20,0x76,0x17, \ +0x4D,0x11,0x1C,0x28,0x7D,0x80,0x18,0xC2,0x1F,0x09,0x3A,0xD2,0x7B,0xFF,0x2A, \ +0x00,0xD0,0x3A,0x75,0x42,0x7B,0xFF,0x2A,0x00,0xD0,0x7A,0x75,0x40,0x30,0x80, \ +0x78,0xFF,0x28,0x0C,0xD0,0x80,0x07,0x0A,0xD1,0x0E,0x4C,0x43,0x31,0x0E,0x22, \ +0x20,0x1C,0xFB,0xF7,0xAE,0xF9,0x28,0x7D,0x00,0x19,0x10,0x38,0xC0,0x7B,0x78, \ +0x74,0x78,0x78,0x09,0x49,0x40,0x09,0x80,0x07,0x80,0x0F,0x08,0x70,0xF0,0xBD, \ +0x64,0x0A,0x00,0x02,0xD8,0x07,0x00,0x02,0x14,0x01,0x00,0x02,0xE8,0x07,0x00, \ +0x02,0x50,0x09,0x00,0x02,0x00,0x00,0x00,0x02,0xFC,0x0A,0x00,0x02,0x9A,0x01, \ +0x00,0x02,0x02,0x79,0x41,0x79,0x12,0x02,0x11,0x43,0xC2,0x78,0x12,0x04,0x11, \ +0x43,0x82,0x78,0x12,0x06,0x0A,0x43,0x01,0x21,0x89,0x06,0x8A,0x61,0x42,0x78, \ +0x00,0x78,0x00,0x02,0x10,0x43,0xC8,0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49,0x0D, \ +0x48,0x41,0x61,0x23,0x21,0x81,0x61,0x00,0x22,0x01,0x05,0x0A,0x61,0xC2,0x01, \ +0x42,0x60,0x05,0x22,0xC2,0x60,0x08,0x4A,0x82,0x62,0xF2,0x22,0x82,0x60,0x32, \ +0x22,0x4A,0x61,0xCA,0x68,0xC9,0x6B,0x00,0x68,0x00,0x21,0x00,0x20,0x00,0xF0, \ +0x07,0xF8,0x00,0xBD,0x04,0x90,0x00,0x00,0x40,0x00,0x00,0x04,0x81,0xFF,0x00, \ +0x00,0x02,0x1C,0x01,0x20,0x80,0x06,0x82,0x62,0x41,0x62,0xF7,0x46,0x80,0xB5, \ +0x1D,0x48,0x20,0x23,0x81,0x69,0x1D,0x4F,0x99,0x43,0x81,0x61,0x1B,0x48,0x81, \ +0x78,0x1C,0x48,0x00,0x29,0x0F,0xD0,0x01,0x7D,0x04,0x29,0x0C,0xD0,0x01,0x21, \ +0xC1,0x77,0x03,0x21,0x41,0x77,0xF8,0xF7,0x87,0xFB,0x39,0x88,0x89,0x02,0x09, \ +0x1A,0x06,0x20,0xF8,0xF7,0xA5,0xFB,0x80,0xBD,0xF9,0x1D,0x29,0x31,0x0A,0x79, \ +0x02,0x2A,0xF9,0xD1,0xC2,0x1D,0x49,0x32,0x92,0x79,0x05,0x2A,0xF4,0xD1,0x49, \ +0x79,0x01,0x29,0xF1,0xDD,0xC7,0x1D,0x69,0x37,0xB8,0x78,0x01,0x28,0x04,0xD1, \ +0x00,0x20,0xFF,0xF7,0x85,0xFE,0x00,0x20,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40, \ +0x00,0xB8,0x70,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x80,0xBD,0x80,0x00, \ +0x00,0x04,0x08,0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x80, \ +0xB5,0xF8,0xF7,0x67,0xFE,0x08,0x48,0x01,0x21,0x41,0x60,0x41,0x7F,0x10,0x30, \ +0x00,0x27,0x01,0x29,0x00,0xD1,0x47,0x73,0x01,0x20,0xF6,0xF7,0x97,0xFD,0x03, \ +0x48,0x07,0x83,0x87,0x82,0x80,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x20,0x00, \ +0x20,0x0F,0x80,0xB5,0x0F,0x48,0x40,0x23,0x81,0x69,0x0E,0x4F,0x99,0x43,0x81, \ +0x61,0xF8,0x69,0x9B,0x01,0x18,0x43,0xF8,0x61,0x14,0x20,0xF6,0xF7,0x6A,0xFD, \ +0xF8,0x69,0x0A,0x4B,0x0A,0x49,0x18,0x40,0xF8,0x61,0x01,0x20,0x08,0x72,0x4A, \ +0x7A,0x06,0x2A,0x00,0xD0,0x48,0x72,0x08,0x73,0x00,0x20,0xC8,0x72,0x05,0x49, \ +0x08,0x70,0x80,0xBD,0x00,0x00,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0xFF, \ +0xEF,0x00,0x00,0x50,0x09,0x00,0x02,0xE8,0x01,0x00,0x02,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/fw-rfmd-acc-1.101.0-84.h linux.22-ac2/drivers/usb/atmel/fw-rfmd-acc-1.101.0-84.h --- linux.vanilla/drivers/usb/atmel/fw-rfmd-acc-1.101.0-84.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/fw-rfmd-acc-1.101.0-84.h 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,2553 @@ +/**************************************************************************** + * The following firmware has been taken (and reformatted slighly) from the * + * Atmel (atmelwlandriver) driver source. * + * * + * Target: * + * Version: * + ****************************************************************************/ + +/**************************************************************************/ +/* */ +/* Copyright (c) 1999-2000 by Atmel Corporation */ +/* */ +/* This software is copyrighted by and is the sole property of Atmel */ +/* Corporation. All rights, title, ownership, or other interests */ +/* in the software remain the property of Atmel Corporation. This */ +/* software may only be used in accordance with the corresponding */ +/* license agreement. Any un-authorized use, duplication, transmission, */ +/* distribution, or disclosure of this software is expressly forbidden. */ +/* */ +/* This Copyright notice may not be removed or modified without prior */ +/* written consent of Atmel Corporation. */ +/* */ +/* Atmel Corporation, Inc. reserves the right to modify this software */ +/* without notice. */ +/* */ +/* Atmel Corporation. */ +/* 2325 Orchard Parkway literature@atmel.com */ +/* San Jose, CA 95131 http://www.atmel.com */ +/* */ +/**************************************************************************/ +/**************************************************************************/ +/* */ +/* Automatically generated FW file for AT76C502A */ +/* */ +/**************************************************************************/ + +#define FW_503RFMD_ACC_INTERNAL { \ +0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F, \ +0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1,0x9F,0xE5,0x7C,0xF1, \ +0x9F,0xE5,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x0E,0x04,0xA0,0xE3,0x00, \ +0x10,0xA0,0xE3,0x81,0x11,0xA0,0xE1,0x00,0x10,0x81,0xE3,0x00,0x10,0x80,0xE5, \ +0x0F,0x04,0xA0,0xE3,0x00,0x10,0xA0,0xE3,0x04,0x10,0x80,0xE5,0x0C,0x10,0x80, \ +0xE5,0x00,0x10,0x90,0xE5,0x08,0x10,0x90,0xE5,0x48,0xD1,0x9F,0xE5,0x22,0x14, \ +0x00,0xEB,0x44,0x11,0x9F,0xE5,0xD0,0x20,0x9F,0xE5,0xD1,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10,0xC1,0xE3,0x04,0x10,0x41,0xE2, \ +0x01,0xD0,0xA0,0xE1,0x00,0xA0,0xA0,0xE3,0x00,0xB0,0xA0,0xE3,0xB0,0x20,0x9F, \ +0xE5,0xD2,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x02,0x10,0x81,0xE0,0x03,0x10, \ +0xC1,0xE3,0x04,0x10,0x41,0xE2,0x01,0xD0,0xA0,0xE1,0xD3,0x00,0xA0,0xE3,0x00, \ +0xF0,0x21,0xE1,0x84,0x30,0x9F,0xE5,0x00,0x10,0x83,0xE5,0x01,0xD0,0xA0,0xE1, \ +0x74,0x00,0x9F,0xE5,0x01,0x00,0x80,0xE3,0x0F,0xE0,0xA0,0xE1,0x10,0xFF,0x2F, \ +0xE1,0x00,0xA0,0x00,0x47,0x64,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x04,0x00, \ +0x80,0xE2,0x6C,0x10,0x9F,0xE5,0x6C,0x30,0x9F,0xE5,0x5C,0x20,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0x00,0x20,0x83,0xE5,0x02,0x00,0x80,0xE0,0x5C,0x10,0x9F,0xE5, \ +0x00,0x20,0xA0,0xE3,0x00,0x20,0x81,0xE5,0x44,0x20,0x9F,0xE5,0x00,0x00,0x82, \ +0xE5,0x1E,0xFF,0x2F,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF, \ +0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x41,0x0F,0x00,0xEA,0x02, \ +0xF0,0xCC,0xFA,0x22,0x48,0x87,0x46,0x5A,0x0F,0x00,0xEA,0x02,0xF0,0xE0,0xF8, \ +0x20,0x48,0x87,0x46,0xCD,0x05,0x00,0x00,0xC0,0x03,0x00,0x02,0x00,0x01,0x00, \ +0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x44,0x04,0x00,0x02,0x64,0x04, \ +0x00,0x02,0x68,0x04,0x00,0x02,0x6C,0x04,0x00,0x02,0xFE,0xFF,0xFF,0xEA,0xFE, \ +0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0,0xE3, \ +0x0E,0xF0,0xA0,0xE1,0xFE,0xFF,0xFF,0xEA,0xFE,0xFF,0xFF,0xEA,0x00,0x00,0xA0, \ +0xE3,0x0E,0xF0,0xA0,0xE1,0x00,0x00,0xA0,0xE3,0x0E,0xF0,0xA0,0xE1,0x20,0x00, \ +0x00,0x00,0x04,0x01,0x00,0x00,0x08,0x01,0x00,0x00,0x0C,0x01,0x00,0x00,0x10, \ +0x01,0x00,0x00,0x14,0x01,0x00,0x00,0x18,0x01,0x00,0x00,0x24,0x01,0x00,0x00, \ +0x00,0x60,0x00,0x01,0xE0,0x59,0x00,0x01,0x15,0x3F,0x00,0x00,0x05,0x40,0x00, \ +0x00,0x78,0x47,0x00,0x00,0xF0,0x40,0x2D,0xE9,0x80,0x31,0x9F,0xE5,0x03,0x00, \ +0x83,0xE8,0x3F,0x40,0x01,0xE2,0x00,0x00,0x54,0xE3,0x01,0x50,0xA0,0x03,0x07, \ +0x50,0xC3,0x05,0x00,0x00,0xA0,0xE3,0x5F,0x00,0x00,0xEB,0x60,0x31,0x9F,0xE5, \ +0xB4,0x20,0xD3,0xE1,0x00,0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x01,0x00,0xA0, \ +0xE3,0x59,0x00,0x00,0xEB,0x0B,0x00,0x00,0xEA,0x07,0x00,0xD3,0xE5,0x01,0x00, \ +0x50,0xE3,0x03,0x00,0x00,0x0A,0x3C,0x01,0x9F,0xE5,0x10,0x10,0xA0,0xE3,0x00, \ +0x10,0xC0,0xE5,0x07,0x00,0x00,0xEA,0x00,0x10,0xA0,0xE3,0x07,0x10,0xC3,0xE5, \ +0x24,0x01,0x9F,0xE5,0x10,0x10,0xA0,0xE3,0x00,0x10,0xC0,0xE5,0x14,0x31,0x9F, \ +0xE5,0x01,0x00,0xA0,0xE3,0x06,0x00,0xC3,0xE5,0xF0,0x40,0xBD,0xE8,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0xF0,0x40,0x2D,0xE9,0xFC,0x00,0x9F,0xE5,0x00, \ +0x10,0xD0,0xE5,0x01,0x00,0x51,0xE3,0x39,0x00,0x00,0x1A,0xF0,0x30,0x9F,0xE5, \ +0x35,0x30,0xD3,0xE5,0x01,0x00,0x53,0xE3,0x00,0x00,0x00,0xCA,0x04,0x00,0x00, \ +0xEA,0xE0,0x20,0x9F,0xE5,0x01,0x20,0x82,0xE3,0x0F,0xE0,0xA0,0xE1,0x12,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0xC4,0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE3,0x00, \ +0x10,0xC0,0xE5,0xB4,0x00,0x9F,0xE5,0x06,0x10,0xD0,0xE5,0x00,0x00,0x51,0xE3, \ +0x18,0x00,0x00,0x0A,0x00,0x20,0xA0,0xE3,0x06,0x20,0xC0,0xE5,0xB4,0x10,0xD0, \ +0xE1,0x00,0x00,0x51,0xE3,0x05,0x00,0x00,0x0A,0x01,0x00,0xA0,0xE3,0x29,0x00, \ +0x00,0xEB,0x88,0x00,0x9F,0xE5,0x01,0x10,0xA0,0xE3,0x06,0x10,0xC0,0xE5,0x1D, \ +0x00,0x00,0xEA,0x07,0x30,0xD0,0xE5,0x00,0x00,0x53,0xE3,0x01,0x00,0x00,0x0A, \ +0x01,0x30,0xA0,0x13,0xB6,0x30,0xC0,0x11,0x08,0x40,0xD0,0xE5,0x00,0x00,0x54, \ +0xE3,0x15,0x00,0x00,0x1A,0x5C,0x40,0x9F,0xE5,0x10,0x50,0xA0,0xE3,0x00,0x50, \ +0xC4,0xE5,0x00,0x50,0xA0,0xE3,0x07,0x50,0xC0,0xE5,0x0F,0x00,0x00,0xEA,0x40, \ +0x00,0x9F,0xE5,0x00,0x10,0x80,0xE5,0x04,0x10,0x80,0xE5,0x44,0x00,0x9F,0xE5, \ +0x00,0x10,0x90,0xE5,0x08,0x20,0xD1,0xE5,0x10,0x00,0x52,0xE3,0x07,0x00,0x00, \ +0x1A,0x01,0x00,0xA0,0xE1,0x30,0x20,0x9F,0xE5,0x01,0x20,0x82,0xE3,0x0F,0xE0, \ +0xA0,0xE1,0x12,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x18,0x10,0x9F,0xE5,0x00, \ +0x00,0x81,0xE5,0xF0,0x40,0xBD,0xE8,0x1E,0xFF,0x2F,0xE1,0x58,0x51,0x00,0x00, \ +0x68,0x03,0x00,0x0D,0x84,0x00,0x00,0x02,0x55,0x3D,0x00,0x00,0x64,0x02,0x00, \ +0x02,0xBF,0x2D,0x00,0x00,0x01,0x40,0x2D,0xE9,0x00,0x40,0xA0,0xE1,0x54,0x02, \ +0x9F,0xE5,0x54,0x12,0x9F,0xE5,0x08,0x20,0xD0,0xE5,0x00,0x00,0x52,0xE3,0x8F, \ +0x00,0x00,0x1A,0x00,0x20,0x90,0xE5,0x00,0x30,0xD2,0xE5,0x00,0x00,0x54,0xE3, \ +0x00,0x30,0xC1,0x05,0x10,0x00,0x00,0x0A,0x34,0x42,0x9F,0xE5,0x10,0x50,0xA0, \ +0xE3,0xC0,0x60,0xA0,0xE3,0x00,0x70,0x0F,0xE1,0x3F,0x80,0xA0,0xE3,0x08,0x90, \ +0x07,0xE0,0x06,0x90,0x89,0xE1,0x09,0xF0,0x21,0xE1,0x08,0x60,0xC7,0xE1,0x00, \ +0x50,0xC4,0xE5,0x00,0x30,0xC1,0xE5,0x00,0x70,0x0F,0xE1,0x3F,0x80,0xA0,0xE3, \ +0x08,0x90,0x07,0xE0,0x06,0x90,0x89,0xE1,0x09,0xF0,0x21,0xE1,0x08,0x60,0xC7, \ +0xE1,0xB4,0x30,0xD0,0xE1,0x40,0x00,0x53,0xE3,0x3F,0x00,0x00,0xAA,0x01,0x30, \ +0x43,0xE2,0x01,0x20,0x82,0xE2,0x00,0x00,0x53,0xE3,0x70,0x00,0x00,0x0A,0x01, \ +0x20,0x42,0xE2,0xE0,0x01,0xB2,0xE8,0x03,0x40,0xA0,0xE3,0x05,0x00,0x00,0xEA, \ +0xE0,0x01,0xB2,0xE8,0x03,0x40,0xA0,0xE3,0x00,0x50,0xC1,0xE5,0x01,0x30,0x43, \ +0xE2,0x00,0x00,0x53,0xE3,0x66,0x00,0x00,0x0A,0x25,0x54,0xA0,0xE1,0x00,0x50, \ +0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x60, \ +0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3, \ +0x00,0x60,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x59,0x00,0x00, \ +0x0A,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30, \ +0x43,0xE2,0x00,0x00,0x53,0xE3,0x53,0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7, \ +0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3,0x00,0x70,0xC1,0xE5,0x01,0x30,0x43,0xE2, \ +0x00,0x00,0x53,0xE3,0x4C,0x00,0x00,0x0A,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1, \ +0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x46,0x00, \ +0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF,0xFF,0x1A,0x03,0x40,0xA0,0xE3,0x00, \ +0x80,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0x3F,0x00,0x00,0x0A, \ +0x28,0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x01,0x40,0x44,0xE2,0x01,0x30,0x43, \ +0xE2,0x00,0x00,0x53,0xE3,0x39,0x00,0x00,0x0A,0x00,0x00,0x54,0xE3,0xF7,0xFF, \ +0xFF,0x1A,0x00,0x00,0x53,0xE3,0xC8,0xFF,0xFF,0x1A,0x34,0x00,0x00,0xEA,0x03, \ +0x30,0xA0,0xE3,0xE0,0x03,0xB2,0xE8,0x01,0x00,0x00,0xEA,0xE0,0x03,0xB2,0xE8, \ +0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0, \ +0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x00,0x60, \ +0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00, \ +0x60,0xC1,0xE5,0x26,0x64,0xA0,0xE1,0x00,0x60,0xC1,0xE5,0x00,0x70,0xC1,0xE5, \ +0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1,0xE5,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1, \ +0xE5,0x27,0x74,0xA0,0xE1,0x00,0x70,0xC1,0xE5,0x00,0x80,0xC1,0xE5,0x28,0x84, \ +0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x28,0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x28, \ +0x84,0xA0,0xE1,0x00,0x80,0xC1,0xE5,0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0,0xE1, \ +0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0,0xE1,0x00,0x90,0xC1,0xE5,0x29,0x94,0xA0, \ +0xE1,0x00,0x90,0xC1,0xE5,0x01,0x30,0x43,0xE2,0x00,0x00,0x53,0xE3,0xD8,0xFF, \ +0xFF,0x1A,0x00,0x50,0x92,0xE5,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00, \ +0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1,0x00,0x50,0xC1,0xE5,0x25,0x54,0xA0,0xE1, \ +0x00,0x50,0xC1,0xE5,0x04,0x20,0x82,0xE2,0xB4,0x30,0xD0,0xE1,0x40,0x30,0x43, \ +0xE2,0x00,0x20,0x80,0xE5,0xB4,0x30,0xC0,0xE1,0x01,0x40,0xBD,0xE8,0x0E,0xF0, \ +0xA0,0xE1,0x58,0x51,0x00,0x00,0x28,0x03,0x00,0x0D,0x68,0x03,0x00,0x0D,0x00, \ +0xB5,0x03,0xF0,0xFB,0xFB,0x00,0x20,0x00,0xBD,0x80,0xB5,0x86,0xB0,0x07,0x1C, \ +0x00,0x21,0x04,0x91,0xFF,0x21,0x01,0x22,0x91,0x31,0x01,0x20,0x03,0x90,0x01, \ +0x91,0x05,0x92,0x02,0x92,0x17,0x4A,0x19,0xA1,0x17,0x48,0x01,0x23,0x00,0x97, \ +0x03,0xF0,0xD1,0xFD,0x00,0x21,0x04,0x91,0xFF,0x21,0x91,0x31,0x01,0x22,0x05, \ +0x92,0xFB,0x1D,0xFF,0x33,0x03,0x22,0x03,0x20,0x8A,0x33,0x00,0x93,0x03,0x90, \ +0x02,0x92,0x01,0x91,0x13,0xA1,0x11,0x4A,0x11,0x48,0x02,0x23,0x03,0xF0,0xBC, \ +0xFD,0x13,0x48,0x14,0xA1,0x03,0xF0,0x2E,0xFE,0x16,0x48,0x17,0xA1,0x03,0xF0, \ +0x2A,0xFE,0x1A,0x48,0x1B,0xA1,0x03,0xF0,0x26,0xFE,0x1E,0x48,0x1F,0xA1,0x03, \ +0xF0,0x22,0xFE,0x03,0xF0,0x0A,0xFA,0x06,0xB0,0x80,0xBD,0x7D,0x17,0x00,0x00, \ +0xC4,0x05,0x00,0x02,0x54,0x78,0x20,0x74,0x68,0x72,0x65,0x61,0x64,0x00,0x00, \ +0x00,0x01,0x2A,0x00,0x00,0x54,0x06,0x00,0x02,0x4D,0x67,0x6D,0x20,0x74,0x68, \ +0x72,0x65,0x61,0x64,0x00,0x00,0xE4,0x06,0x00,0x02,0x54,0x78,0x20,0x73,0x74, \ +0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x04,0x07,0x00,0x02, \ +0x4D,0x67,0x6D,0x20,0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67, \ +0x73,0x00,0x00,0x00,0x00,0x24,0x07,0x00,0x02,0x54,0x58,0x20,0x47,0x4F,0x20, \ +0x73,0x74,0x61,0x74,0x75,0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0x00,0x44, \ +0x07,0x00,0x02,0x50,0x73,0x50,0x6F,0x6C,0x6C,0x20,0x73,0x74,0x61,0x74,0x75, \ +0x73,0x20,0x66,0x6C,0x61,0x67,0x73,0x00,0xC3,0x00,0x18,0x18,0x80,0x00,0x80, \ +0x08,0x01,0xD0,0x01,0x38,0xFD,0xD1,0xF7,0x46,0x03,0x49,0x0F,0x20,0x00,0x06, \ +0x81,0x80,0x00,0x21,0x81,0x81,0xF7,0x46,0x00,0x00,0xFB,0xFB,0x00,0x00,0x01, \ +0x20,0x80,0x06,0x40,0x6A,0xF7,0x46,0x01,0x1C,0x06,0x48,0x04,0xD0,0x41,0x68, \ +0xC3,0x01,0x19,0x43,0x41,0x60,0xF7,0x46,0x41,0x68,0x01,0x23,0x5B,0x03,0x99, \ +0x43,0x41,0x60,0xF7,0x46,0x40,0x00,0x00,0x04,0x80,0xB5,0x13,0x49,0x15,0x4F, \ +0x08,0x78,0x42,0x01,0x12,0x48,0x42,0x70,0x12,0x4A,0x52,0x7A,0x00,0x2A,0x0B, \ +0xD0,0x09,0x78,0x00,0x29,0x08,0xDD,0x41,0x78,0x10,0x23,0x19,0x43,0x41,0x70, \ +0x48,0x21,0x79,0x81,0x18,0x21,0x39,0x81,0x03,0xE0,0x90,0x21,0x79,0x81,0x30, \ +0x21,0x39,0x81,0x41,0x78,0x01,0x20,0x00,0xF0,0x5B,0xF8,0x78,0x89,0x39,0x89, \ +0x40,0x18,0x06,0x49,0x08,0x80,0x01,0xF0,0xE0,0xFC,0x80,0xBD,0x00,0x00,0x9B, \ +0x01,0x00,0x02,0xD8,0x07,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02, \ +0xB4,0x01,0x00,0x02,0x01,0x1C,0x06,0x48,0x04,0xD0,0x41,0x7C,0x01,0x23,0x19, \ +0x43,0x41,0x74,0xF7,0x46,0x41,0x7C,0xFE,0x23,0x19,0x40,0x41,0x74,0xF7,0x46, \ +0x00,0x00,0xD8,0x07,0x00,0x02,0xF0,0xB4,0x07,0x24,0x64,0x06,0xA2,0x69,0x04, \ +0x23,0x9A,0x43,0xA2,0x61,0xF3,0x22,0x12,0x05,0x93,0x68,0x40,0x23,0xD3,0x60, \ +0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08,0xFC,0xD3,0x93,0x68,0x80, \ +0x23,0x03,0x43,0xD3,0x60,0x17,0x69,0xBB,0x08,0xFC,0xD3,0x13,0x69,0x5B,0x08, \ +0xFC,0xD3,0x17,0x1C,0x92,0x68,0x00,0x22,0x00,0x29,0x0D,0xD9,0x0A,0x4D,0x83, \ +0x18,0xEB,0x5C,0xFB,0x60,0x3E,0x69,0xB3,0x08,0xFC,0xD3,0x3B,0x69,0x5B,0x08, \ +0xFC,0xD3,0x01,0x32,0x8A,0x42,0xBB,0x68,0xF2,0xD3,0xA0,0x69,0x04,0x23,0x18, \ +0x43,0xA0,0x61,0xF0,0xBC,0xF7,0x46,0x00,0x00,0xD8,0x07,0x00,0x02,0x90,0xB4, \ +0x07,0x27,0x7F,0x06,0xBA,0x69,0x04,0x23,0x9A,0x43,0xBA,0x61,0xF3,0x22,0x12, \ +0x05,0x93,0x68,0x40,0x23,0xD3,0x60,0x14,0x69,0xA3,0x08,0xFC,0xD3,0x13,0x69, \ +0x5B,0x08,0xFC,0xD3,0x93,0x68,0xD0,0x60,0x10,0x69,0x80,0x08,0xFC,0xD3,0x10, \ +0x1C,0x02,0x69,0x52,0x08,0xFC,0xD3,0x82,0x68,0xC1,0x60,0x01,0x69,0x89,0x08, \ +0xFC,0xD3,0x01,0x69,0x49,0x08,0xFC,0xD3,0x80,0x68,0x04,0x23,0xB8,0x69,0x18, \ +0x43,0xB8,0x61,0x90,0xBC,0xF7,0x46,0x80,0xB4,0x07,0x22,0x52,0x06,0x91,0x69, \ +0x04,0x23,0x99,0x43,0x91,0x61,0xF3,0x21,0x09,0x05,0x8B,0x68,0x40,0x23,0xCB, \ +0x60,0x0F,0x69,0xBB,0x08,0xFC,0xD3,0x0B,0x69,0x5B,0x08,0xFC,0xD3,0x8B,0x68, \ +0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3,0x08,0x69,0x40,0x08,0xFC,0xD3,0x97, \ +0x69,0x04,0x23,0x3B,0x43,0x88,0x68,0x93,0x61,0x97,0x69,0x04,0x23,0x9F,0x43, \ +0x97,0x61,0x41,0x20,0xC8,0x60,0x08,0x69,0x80,0x08,0xFC,0xD3,0x08,0x1C,0x01, \ +0x69,0x49,0x08,0xFC,0xD3,0x81,0x68,0xFF,0x21,0xC1,0x60,0x01,0x69,0x49,0x08, \ +0xFC,0xD3,0x91,0x69,0x04,0x23,0x19,0x43,0x80,0x68,0x91,0x61,0x80,0xBC,0xF7, \ +0x46,0x00,0xB5,0x08,0x48,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0x00,0x20, \ +0xFF,0xF7,0x0A,0xFF,0x07,0x20,0x40,0x06,0x81,0x69,0x01,0x23,0x19,0x43,0x81, \ +0x61,0x02,0x49,0x01,0x20,0x08,0x70,0x00,0xBD,0x40,0x00,0x00,0x04,0xBB,0x02, \ +0x00,0x02,0xC1,0x0A,0x01,0xD3,0x00,0x20,0xF7,0x46,0xFF,0x22,0x01,0x32,0x02, \ +0x40,0x01,0x21,0x00,0x2A,0x01,0xD0,0x08,0x1C,0xF7,0x46,0x80,0x0A,0x01,0xD3, \ +0x08,0x1C,0xF7,0x46,0x02,0x20,0xF7,0x46,0xF0,0xB5,0x0F,0x1C,0x19,0x49,0x04, \ +0x1C,0x19,0x4E,0x1A,0x48,0x31,0x60,0x05,0x6C,0x00,0x2D,0x16,0xD0,0x06,0x22, \ +0x20,0x1C,0x31,0x68,0x04,0xF0,0xED,0xF9,0x00,0x28,0x08,0xD1,0x30,0x68,0xC1, \ +0x88,0xB9,0x42,0x01,0xD1,0x01,0x20,0xF0,0xBD,0xC7,0x80,0x00,0x20,0xF0,0xBD, \ +0x30,0x68,0x08,0x30,0x30,0x60,0x28,0x1C,0x01,0x3D,0x00,0x28,0xE8,0xD1,0x0C, \ +0x48,0x01,0x6C,0x01,0x31,0x01,0x64,0x01,0x6C,0x07,0x29,0x03,0xD9,0x06,0x49, \ +0x31,0x60,0x08,0x21,0x01,0x64,0x06,0x22,0x21,0x1C,0x30,0x68,0x04,0xF0,0xE9, \ +0xF9,0x30,0x68,0xC7,0x80,0x00,0x20,0xF0,0xBD,0x00,0x00,0x80,0x07,0x00,0x02, \ +0x44,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x05,0x49,0x0A,0x68,0x12,0x01,0x02, \ +0x70,0x0A,0x68,0x12,0x01,0x12,0x0A,0x42,0x70,0x08,0x68,0x01,0x30,0x08,0x60, \ +0xF7,0x46,0x48,0x01,0x00,0x02,0x00,0x2A,0x0C,0xD1,0x08,0x4A,0x92,0x7A,0x8A, \ +0x42,0x00,0xD8,0x11,0x1C,0x07,0x4A,0x49,0x00,0x51,0x5A,0x06,0x4A,0xD2,0x88, \ +0x89,0x18,0xC9,0x18,0x00,0xE0,0x00,0x21,0x01,0x70,0x09,0x0A,0x41,0x70,0xF7, \ +0x46,0x08,0x01,0x00,0x02,0xB8,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xF0,0xB5, \ +0x5A,0x4E,0x30,0x68,0x47,0x68,0x38,0x78,0x05,0x07,0x2D,0x0F,0x08,0x28,0x01, \ +0xD0,0x00,0x2D,0x73,0xD1,0x56,0x4C,0x20,0x79,0x02,0x28,0x07,0xD1,0xF8,0x1D, \ +0x09,0x30,0x06,0x22,0x53,0x49,0x04,0xF0,0x83,0xF9,0x00,0x28,0x68,0xD0,0x52, \ +0x48,0x00,0x68,0xFF,0xF7,0x6F,0xFF,0x31,0x68,0x48,0x72,0x30,0x68,0x46,0x7A, \ +0x28,0x06,0x00,0x0E,0x08,0x28,0x61,0xD1,0x4D,0x48,0x80,0x79,0x05,0x28,0x5E, \ +0xD1,0x20,0x79,0x4B,0x4D,0x02,0x28,0x0C,0xD1,0xF8,0x1D,0x03,0x30,0x06,0x22, \ +0x29,0x1C,0x04,0xF0,0x67,0xF9,0x00,0x28,0x52,0xD1,0x78,0x78,0x81,0x08,0x73, \ +0xD3,0x40,0x08,0x72,0xD2,0x20,0x79,0x01,0x28,0x0C,0xD1,0xF8,0x1D,0x09,0x30, \ +0x06,0x22,0x29,0x1C,0x04,0xF0,0x57,0xF9,0x00,0x28,0x42,0xD1,0x78,0x78,0x81, \ +0x08,0x64,0xD2,0x40,0x08,0x62,0xD2,0x38,0x78,0x08,0x28,0x60,0xD1,0x3B,0x48, \ +0x01,0x78,0x00,0x29,0x11,0xD0,0xC0,0x78,0x00,0x28,0x11,0xD0,0x78,0x78,0xC0, \ +0x09,0x0E,0xD2,0xB9,0x7F,0xF8,0x1D,0x09,0x30,0x88,0x29,0x02,0xD1,0xC0,0x7B, \ +0x8E,0x28,0x06,0xD0,0xB8,0x7D,0x00,0x07,0x1F,0xD0,0x02,0xE0,0x78,0x78,0xC0, \ +0x09,0x46,0xD2,0x20,0x79,0x02,0x28,0x44,0xD1,0x00,0x2E,0x0A,0xD1,0x2D,0x48, \ +0x41,0x68,0x04,0x29,0x06,0xD1,0x01,0x27,0x47,0x60,0x01,0x20,0x01,0xF0,0xFC, \ +0xFB,0x29,0x48,0x07,0x70,0x60,0x79,0x03,0x28,0x34,0xD1,0x19,0x21,0xC9,0x02, \ +0x02,0x20,0x01,0xF0,0xD8,0xFB,0x25,0x48,0x81,0x78,0x01,0xE0,0x34,0xE0,0x33, \ +0xE0,0x08,0x23,0x99,0x43,0x81,0x70,0x2D,0xE0,0x00,0xE0,0x2D,0xE0,0x00,0x28, \ +0x22,0xD1,0x39,0x78,0x80,0x29,0x01,0xD0,0x50,0x29,0x1D,0xD1,0xFA,0x1D,0x19, \ +0x32,0x50,0x79,0x1C,0x4C,0xC3,0x19,0x20,0x33,0xDB,0x79,0xC0,0x18,0x2A,0x30, \ +0x3B,0x5C,0x24,0x7D,0xA3,0x42,0x10,0xD1,0x92,0x78,0x52,0x08,0x14,0xD3,0x80, \ +0x29,0x0B,0xD1,0x01,0x30,0x39,0x5C,0x04,0x29,0x07,0xD1,0x38,0x18,0xC1,0x79, \ +0x09,0x02,0x09,0x04,0x09,0x0C,0x02,0xE0,0x09,0xE0,0x08,0xE0,0x05,0xE0,0x80, \ +0x79,0x08,0x43,0x02,0xD0,0x80,0x02,0x00,0xF0,0x7B,0xFD,0x00,0xF0,0x6D,0xF8, \ +0xF0,0xBD,0x4C,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0x60,0x00,0x00,0x02,0xF0, \ +0x01,0x00,0x02,0xA0,0x09,0x00,0x02,0x92,0x00,0x00,0x02,0x1C,0x00,0x00,0x02, \ +0x50,0x09,0x00,0x02,0xE4,0x01,0x00,0x02,0xC0,0x09,0x00,0x02,0x00,0x00,0x00, \ +0x02,0xF0,0xB5,0x1A,0x4A,0x00,0x21,0x50,0x68,0x45,0x7A,0x41,0x72,0x50,0x68, \ +0x44,0x68,0x20,0x78,0x00,0x07,0x00,0x0F,0x08,0x28,0x1A,0xD1,0x15,0x48,0x46, \ +0x79,0xC0,0x20,0x03,0xF0,0x98,0xFB,0x07,0x1C,0x01,0x2E,0x0A,0xD9,0x20,0x78, \ +0x08,0x28,0x07,0xD1,0x60,0x78,0x04,0x21,0x01,0x40,0x20,0x23,0x18,0x40,0x2A, \ +0x1C,0x03,0xF0,0x3E,0xF8,0x38,0x1C,0x03,0xF0,0x87,0xFB,0x28,0x06,0x00,0x0E, \ +0x00,0xF0,0x13,0xF8,0x05,0xE0,0x00,0x28,0x03,0xD1,0x28,0x06,0x00,0x0E,0x00, \ +0xF0,0xE6,0xFB,0x03,0x4A,0x80,0x20,0x51,0x68,0x08,0x72,0x50,0x68,0x00,0x68, \ +0x50,0x60,0xF0,0xBD,0x4C,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0x88,0xB5,0x00, \ +0x21,0x00,0x91,0x00,0x28,0x0A,0x4F,0x0B,0xD1,0x78,0x68,0x40,0x68,0x81,0x7D, \ +0xC2,0x7D,0x12,0x02,0x11,0x43,0x09,0x04,0x09,0x0C,0x0A,0x30,0xFF,0xF7,0x85, \ +0xFE,0x00,0x90,0x00,0x98,0x01,0x28,0x02,0xD1,0x79,0x68,0xC1,0x20,0x48,0x72, \ +0x88,0xBD,0x00,0x00,0x4C,0x01,0x00,0x02,0x80,0xB5,0x05,0x48,0x00,0x78,0x80, \ +0x09,0x04,0xD3,0x04,0x4F,0x38,0x68,0x02,0xF0,0x7F,0xF8,0x38,0x60,0x80,0xBD, \ +0x00,0x00,0x9C,0x01,0x00,0x02,0x4C,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x00, \ +0x25,0x7D,0x26,0x36,0x01,0x01,0x21,0x89,0x06,0x88,0x68,0x00,0x0B,0xFC,0x24, \ +0x04,0x40,0xFA,0x48,0xC7,0x6A,0x00,0x2F,0x0F,0xD1,0x00,0x20,0xFF,0xF7,0x42, \ +0xFD,0xF6,0x48,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0xF4,0x4B, \ +0x19,0x40,0xC1,0x61,0x01,0x05,0xC8,0x68,0x02,0xB0,0xF0,0xBD,0xF2,0x49,0xA0, \ +0x08,0x08,0x5C,0x00,0x28,0x06,0xD0,0x00,0x20,0xFF,0xF7,0x2D,0xFD,0x01,0x21, \ +0x89,0x06,0xC8,0x68,0xF1,0xE7,0xED,0x49,0x04,0x20,0x20,0x40,0x01,0x91,0x61, \ +0xD0,0x04,0x20,0xFF,0xF7,0xD5,0xFD,0xEA,0x49,0x08,0x71,0xA0,0x09,0x01,0xD3, \ +0x14,0x21,0x00,0xE0,0x0E,0x21,0xE7,0x48,0x02,0x22,0x01,0xF0,0x74,0xFE,0x00, \ +0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8,0x68,0xD8,0xE7,0x01,0x21,0x89,0x06, \ +0xC8,0x68,0x28,0x43,0x01,0xE0,0xCA,0x68,0x10,0x43,0x42,0x09,0x03,0xD2,0x32, \ +0x1C,0x01,0x3E,0x00,0x2A,0xF7,0xD1,0x10,0x23,0x98,0x43,0x05,0x1C,0x00,0x2E, \ +0x01,0xDC,0x28,0x1C,0xC4,0xE7,0xD7,0x49,0x08,0x79,0x0A,0x28,0x09,0xD0,0x14, \ +0x28,0x0B,0xD0,0x37,0x28,0x0D,0xD0,0x6E,0x28,0x0F,0xD1,0xD4,0x4A,0x03,0x20, \ +0x50,0x75,0x14,0xE0,0xD2,0x4A,0x00,0x20,0x50,0x75,0x10,0xE0,0xD0,0x4A,0x01, \ +0x21,0x51,0x75,0x0C,0xE0,0xCE,0x4A,0x02,0x20,0x50,0x75,0x08,0xE0,0x14,0x2F, \ +0x03,0xD2,0xCB,0x4A,0x03,0x20,0x50,0x75,0x02,0xE0,0xC9,0x4A,0x02,0x20,0x50, \ +0x75,0xA8,0x09,0x06,0xD3,0xE8,0x08,0x04,0xD2,0x01,0x20,0xFF,0xF7,0xD2,0xFC, \ +0x28,0x1C,0x98,0xE7,0xC3,0x4F,0x00,0x20,0x38,0x60,0xE8,0x0A,0x1D,0xD3,0xB4, \ +0x2C,0x07,0xD0,0xC4,0x2C,0x16,0xD0,0xD4,0x2C,0x23,0xD1,0x00,0xF0,0xFF,0xF9, \ +0x20,0xE0,0x3D,0xE0,0xB6,0x48,0x40,0x68,0x80,0x0B,0x1B,0xD3,0xB9,0x4C,0x02, \ +0x20,0xE1,0x1D,0x03,0x31,0xB8,0x72,0x01,0x98,0x06,0x22,0xB7,0x4E,0x03,0xF0, \ +0xE3,0xFF,0x60,0x88,0x70,0x80,0x0E,0xE0,0x00,0xF0,0xF8,0xFA,0x0B,0xE0,0xA4, \ +0x2C,0x15,0xD0,0xB4,0x2C,0x13,0xD0,0xC4,0x2C,0x01,0xD0,0xD4,0x2C,0x03,0xD1, \ +0xAD,0x48,0xAF,0x49,0x40,0x88,0x08,0x80,0x78,0x68,0x04,0x28,0x06,0xD1,0x00, \ +0xF0,0xCE,0xF9,0x00,0x22,0x10,0x21,0xAB,0x48,0x03,0xF0,0x97,0xFA,0x28,0x1C, \ +0x5D,0xE7,0x7A,0x7D,0xA9,0x48,0xAA,0x4B,0x52,0x00,0x9A,0x5A,0xC1,0x88,0xA5, \ +0x4B,0x8A,0x18,0x1A,0x80,0xB4,0x2C,0xE8,0xD1,0x80,0x88,0x40,0x00,0x08,0x18, \ +0x19,0x88,0x40,0x18,0x18,0x80,0xE1,0xE7,0xA3,0x49,0x08,0x68,0x00,0x7A,0x00, \ +0x28,0x06,0xD0,0xC4,0x20,0x9A,0x4A,0x01,0x21,0x89,0x06,0x10,0x60,0xC8,0x68, \ +0x3F,0xE7,0x01,0x20,0xFF,0xF7,0x74,0xFC,0x29,0x2F,0x0D,0xD2,0x07,0x20,0xFF, \ +0xF7,0x23,0xFD,0x91,0x49,0xC8,0x71,0x0B,0x21,0x79,0x43,0xCF,0x08,0x03,0x21, \ +0x00,0x91,0x00,0x0A,0x1D,0xD3,0x01,0x3F,0x1B,0xE0,0x04,0x20,0xFF,0xF7,0x15, \ +0xFD,0x00,0x06,0x00,0x0E,0x89,0x4E,0x0A,0x28,0x30,0x71,0x1F,0xD0,0x14,0x28, \ +0x21,0xD0,0x37,0x28,0x23,0xD0,0x6E,0x28,0x03,0xD1,0x07,0x20,0xFF,0xF7,0x06, \ +0xFD,0xF0,0x71,0x0B,0x20,0x78,0x43,0xC7,0x08,0x03,0x21,0x00,0x91,0xF0,0x79, \ +0x00,0x0A,0x00,0xD3,0x01,0x3F,0x80,0x2C,0x01,0xD0,0x50,0x2C,0x16,0xD1,0x03, \ +0x20,0xFF,0xF7,0xF5,0xFC,0x82,0x49,0x80,0x06,0x09,0x68,0x80,0x0E,0x48,0x74, \ +0x11,0xE0,0xFF,0x08,0x00,0x21,0x00,0x91,0xEE,0xE7,0xBF,0x08,0x01,0x21,0x00, \ +0x91,0xEA,0xE7,0x0B,0x20,0x78,0x43,0x07,0x09,0x02,0x21,0x00,0x91,0xE4,0xE7, \ +0x78,0x49,0x00,0x20,0x09,0x68,0x48,0x74,0x74,0x48,0x80,0x89,0x04,0x30,0xB8, \ +0x42,0x01,0xD3,0x18,0x2F,0x0E,0xD8,0x6C,0x4A,0xC3,0x20,0x10,0x60,0x65,0x48, \ +0xC1,0x69,0x83,0x01,0x19,0x43,0xC1,0x61,0xC1,0x69,0x63,0x4B,0x19,0x40,0xC1, \ +0x61,0x01,0x05,0xC8,0x68,0xDB,0xE6,0x6B,0x4E,0x02,0x22,0x30,0x68,0x18,0x21, \ +0x40,0x68,0x01,0xF0,0x6A,0xFD,0x00,0x28,0x03,0xD1,0x01,0x21,0x89,0x06,0xC8, \ +0x68,0xCE,0xE6,0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43, \ +0x03,0xE0,0x01,0x22,0x92,0x06,0xD2,0x68,0x11,0x43,0x4A,0x09,0x03,0xD2,0x02, \ +0x1C,0x01,0x38,0x00,0x2A,0xF5,0xD1,0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28, \ +0x01,0xDC,0x28,0x1C,0xB6,0xE6,0x58,0x48,0x54,0x49,0x00,0x68,0x00,0x22,0x46, \ +0x68,0x0A,0x80,0x2A,0x0A,0x52,0x07,0x08,0xD1,0x70,0x88,0x4D,0x4A,0x00,0x27, \ +0x08,0x80,0x01,0x21,0x11,0x73,0xD7,0x72,0x28,0x1C,0xA4,0xE6,0x50,0x49,0x0D, \ +0x60,0xE9,0x0A,0x13,0xD3,0x47,0x4A,0x01,0x21,0x91,0x72,0x71,0x78,0xC9,0x08, \ +0x03,0xD3,0x71,0x88,0x45,0x4A,0x51,0x80,0x02,0xE0,0x43,0x49,0x00,0x22,0x4A, \ +0x80,0x40,0x68,0xC1,0x1D,0x03,0x31,0x06,0x22,0x01,0x98,0x03,0xF0,0xF4,0xFE, \ +0x00,0x98,0x3D,0x49,0x48,0x75,0x42,0x48,0x02,0x68,0x97,0x81,0x4A,0x7D,0x03, \ +0x68,0x00,0x27,0x9A,0x73,0x0F,0x60,0x31,0x78,0x48,0x29,0x03,0xD1,0x71,0x78, \ +0x40,0x23,0x99,0x43,0x71,0x70,0x71,0x78,0xC9,0x09,0x2E,0xD2,0x00,0x68,0x02, \ +0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x18,0x39,0x01,0xF0,0x05,0xFD,0x00,0x28, \ +0x05,0xD1,0x2E,0x48,0x01,0x21,0x01,0x73,0xC7,0x72,0x28,0x1C,0x67,0xE6,0x2B, \ +0x4F,0x03,0x20,0xF8,0x72,0x02,0x20,0x38,0x73,0x80,0x2C,0x15,0xD1,0x2F,0x48, \ +0xC1,0x1D,0x29,0x31,0x09,0x79,0x01,0x29,0x0F,0xD1,0xF9,0x1D,0x49,0x31,0x89, \ +0x79,0x05,0x29,0x0A,0xD1,0xC1,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x09,0x30, \ +0x03,0xF0,0x98,0xFE,0x00,0x28,0x01,0xD1,0x01,0x21,0xB9,0x76,0x28,0x1C,0x48, \ +0xE6,0x24,0x4E,0x31,0x78,0x00,0x29,0x05,0xD1,0x19,0x48,0x01,0x21,0x01,0x73, \ +0xC7,0x72,0x28,0x1C,0x3E,0xE6,0x04,0x1C,0x00,0x68,0x02,0x22,0x40,0x68,0x04, \ +0x21,0x18,0x30,0x01,0xF0,0xCC,0xFC,0x00,0x28,0x01,0xD1,0x28,0x1C,0x32,0xE6, \ +0x7D,0x20,0xC0,0x00,0x01,0x22,0x92,0x06,0xD1,0x68,0x29,0x43,0x01,0xE0,0xD3, \ +0x68,0x19,0x43,0x4B,0x09,0x03,0xD2,0x03,0x1C,0x01,0x38,0x00,0x2B,0xF7,0xD1, \ +0x10,0x23,0x99,0x43,0x0D,0x1C,0x00,0x28,0x23,0xDC,0x20,0xE0,0x00,0x00,0x40, \ +0x00,0x00,0x04,0xFF,0xEF,0x00,0x00,0x54,0x01,0x00,0x02,0x3C,0x09,0x00,0x02, \ +0xD8,0x07,0x00,0x02,0xC0,0x07,0x00,0x02,0x50,0x09,0x00,0x02,0x38,0x09,0x00, \ +0x02,0xEA,0x01,0x00,0x02,0x44,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01, \ +0x00,0x02,0x4C,0x01,0x00,0x02,0xEC,0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x1C, \ +0x00,0x00,0x02,0x28,0x1C,0xFA,0xE5,0x20,0x68,0x40,0x68,0xC1,0x1D,0x11,0x31, \ +0x40,0x7E,0x0A,0x78,0x00,0x02,0x10,0x43,0x8A,0x78,0xC9,0x78,0x12,0x04,0x10, \ +0x43,0x89,0x09,0x09,0x06,0x09,0x0E,0x0D,0x23,0x59,0x43,0x89,0x19,0x0B,0x7B, \ +0x1B,0x06,0x18,0x43,0x32,0x1C,0x03,0x26,0x76,0x06,0x30,0x60,0x8B,0x7B,0x48, \ +0x7B,0x1B,0x02,0x18,0x43,0xCB,0x7B,0x1B,0x04,0x18,0x43,0x0B,0x7C,0x1B,0x06, \ +0x18,0x43,0x70,0x60,0xD0,0x1D,0x39,0x30,0x00,0x78,0x01,0x28,0x02,0xD1,0x01, \ +0x21,0xB1,0x60,0x19,0xE0,0x02,0x28,0x17,0xD1,0x8A,0x7C,0x48,0x7C,0x12,0x02, \ +0x10,0x43,0xCA,0x7C,0x12,0x04,0x10,0x43,0x0A,0x7D,0x12,0x06,0x10,0x43,0x70, \ +0x61,0x8A,0x7D,0x48,0x7D,0x12,0x02,0x10,0x43,0xCA,0x7D,0x09,0x7E,0x12,0x04, \ +0x10,0x43,0x09,0x06,0x08,0x43,0xB0,0x61,0x81,0x20,0xB0,0x60,0x20,0x68,0x0E, \ +0x22,0x81,0x89,0x40,0x68,0x18,0x30,0x20,0x39,0x01,0xF0,0x42,0xFC,0x00,0x28, \ +0x06,0xD1,0x08,0x48,0x01,0x21,0x01,0x73,0xC7,0x72,0xB7,0x60,0x28,0x1C,0xA3, \ +0xE5,0x20,0x68,0x81,0x89,0x08,0x39,0x81,0x81,0x03,0x49,0x03,0x20,0xC8,0x72, \ +0x02,0x20,0x08,0x73,0x28,0x1C,0x98,0xE5,0x50,0x09,0x00,0x02,0x00,0xB5,0x03, \ +0x49,0x01,0x20,0x48,0x60,0x01,0xF0,0xB0,0xF8,0x00,0xBD,0x00,0x00,0x50,0x09, \ +0x00,0x02,0xF0,0xB5,0x3D,0x4F,0x01,0x24,0x78,0x68,0x04,0x28,0x0C,0xD1,0x01, \ +0x20,0x01,0xF0,0xA3,0xF8,0x7C,0x60,0x01,0x20,0xFF,0xF7,0xB7,0xFA,0x00,0x22, \ +0x01,0x21,0x37,0x48,0x03,0xF0,0xB2,0xF8,0xF0,0xBD,0x78,0x68,0x02,0x28,0xFB, \ +0xD1,0x01,0x20,0x01,0xF0,0x93,0xF8,0x7C,0x60,0x78,0x6E,0x08,0x23,0x41,0x78, \ +0x32,0x4C,0x99,0x43,0x41,0x70,0x2F,0x49,0x89,0x89,0xB9,0x87,0x22,0x78,0x2F, \ +0x49,0x01,0x2A,0x45,0xD1,0x2F,0x4B,0x1A,0x78,0x00,0x2A,0x01,0xD0,0x18,0x25, \ +0x00,0xE0,0x1E,0x25,0x2C,0x4E,0x36,0x88,0x75,0x1B,0x2C,0x4E,0x36,0x68,0xAD, \ +0x19,0x2A,0x4E,0x01,0x32,0x35,0x60,0x1A,0x70,0x1A,0x78,0x86,0x7D,0x12,0x07, \ +0x12,0x0F,0x1D,0x1C,0xF0,0x23,0x33,0x40,0x1A,0x43,0x82,0x75,0x42,0x78,0xD2, \ +0x09,0x03,0xD3,0x22,0x4A,0x13,0x68,0x08,0x3B,0x13,0x60,0x21,0x4B,0x2A,0x78, \ +0x1B,0x88,0x9A,0x42,0x0F,0xD1,0x20,0x4A,0x1C,0x4E,0x12,0x88,0x04,0x23,0x32, \ +0x80,0x42,0x78,0x9A,0x43,0x42,0x70,0x02,0x20,0x20,0x70,0x08,0x68,0x80,0x7D, \ +0x31,0x88,0x00,0xF0,0x48,0xFF,0xF8,0x66,0x15,0x4E,0x30,0x88,0xB8,0x66,0x20, \ +0x78,0x02,0x28,0x04,0xD0,0x01,0x21,0x01,0x20,0x00,0xF0,0x2D,0xF8,0xF0,0xBD, \ +0x01,0x21,0x00,0x20,0x00,0xF0,0x28,0xF8,0xF0,0xBD,0x09,0x68,0x00,0x20,0x48, \ +0x73,0x0F,0x49,0x09,0x68,0x48,0x70,0x01,0x20,0xFF,0xF7,0x4E,0xFA,0x00,0x22, \ +0x10,0x21,0x0C,0x48,0x03,0xF0,0x49,0xF8,0xF0,0xBD,0x50,0x09,0x00,0x02,0x44, \ +0x07,0x00,0x02,0xC4,0x00,0x00,0x02,0x9D,0x01,0x00,0x02,0xCC,0x01,0x00,0x02, \ +0x9E,0x01,0x00,0x02,0xA0,0x01,0x00,0x02,0xA8,0x01,0x00,0x02,0xA4,0x01,0x00, \ +0x02,0xA2,0x01,0x00,0x02,0xD8,0x01,0x00,0x02,0x04,0x07,0x00,0x02,0xF0,0xB5, \ +0x30,0x4D,0x04,0x1C,0x28,0x68,0x0F,0x1C,0x80,0x7D,0x2E,0x49,0x08,0x70,0x00, \ +0xF0,0x66,0xFF,0x2C,0x49,0x08,0x78,0x03,0x28,0x04,0xD1,0x2B,0x48,0x40,0x6B, \ +0xFF,0xF7,0x60,0xFA,0x02,0xE0,0x00,0x20,0xFF,0xF7,0x5C,0xFA,0x28,0x68,0x85, \ +0x7D,0x27,0x48,0x80,0x7A,0x85,0x42,0x00,0xDB,0x05,0x1C,0x23,0x48,0x00,0x78, \ +0x01,0xF0,0x85,0xFB,0x24,0x4A,0x24,0x49,0x10,0x60,0xC9,0x88,0x49,0x00,0x6B, \ +0x00,0x23,0x4D,0xEB,0x5A,0xC9,0x18,0x00,0x2C,0x11,0xD0,0x00,0x2F,0x10,0xD0, \ +0x20,0x4C,0x21,0x4D,0x24,0x88,0x2D,0x78,0x0B,0x18,0x01,0x3C,0xAC,0x42,0x03, \ +0xD1,0x1E,0x4C,0x24,0x68,0x1E,0x19,0x04,0xE0,0x15,0x4C,0xE4,0x6E,0xE6,0x18, \ +0x00,0xE0,0x00,0x26,0x13,0x4C,0x14,0x4A,0xE3,0x6E,0x18,0x18,0x10,0x60,0x18, \ +0x4A,0x12,0x88,0x10,0x18,0x45,0x18,0x00,0x2F,0x07,0xD0,0x60,0x6E,0x0C,0x49, \ +0x02,0x30,0x33,0x1C,0x00,0x22,0x09,0x78,0xFF,0xF7,0x3E,0xFB,0x01,0x20,0x29, \ +0x1C,0x00,0xF0,0xA8,0xFF,0x02,0x20,0x60,0x60,0x01,0x20,0x0F,0x49,0xE0,0x75, \ +0x09,0x88,0xE0,0x6E,0x06,0x4A,0x40,0x18,0x10,0x60,0xF0,0xBD,0x00,0x00,0xCC, \ +0x01,0x00,0x02,0x9B,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x08,0x01,0x00,0x02, \ +0x94,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01,0x00,0x02,0xA4,0x01,0x00, \ +0x02,0x9E,0x01,0x00,0x02,0xAC,0x01,0x00,0x02,0xB6,0x01,0x00,0x02,0xB4,0x01, \ +0x00,0x02,0x00,0xB5,0x06,0x48,0x40,0x68,0x03,0x28,0x06,0xD1,0x01,0x20,0x00, \ +0xF0,0x96,0xFF,0x00,0x21,0x01,0x20,0xFF,0xF7,0x7A,0xFF,0x00,0xBD,0x00,0x00, \ +0x50,0x09,0x00,0x02,0xB0,0xB5,0x17,0x4C,0x61,0x68,0x4A,0x68,0x13,0x78,0x1D, \ +0x07,0x2D,0x0F,0xC1,0x27,0x00,0x2D,0x22,0xD1,0x1D,0x11,0x0D,0x2D,0x1F,0xD2, \ +0x02,0xA3,0x5B,0x5D,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1B,0x0B,0x1B,0x0B,0x12, \ +0x07,0x1B,0x1B,0x07,0x1B,0x0F,0x0B,0x0F,0x00,0x10,0x1C,0x00,0xF0,0x18,0xF8, \ +0xB0,0xBD,0x00,0x28,0x01,0xD0,0x4F,0x72,0xB0,0xBD,0xFF,0xF7,0x03,0xFC,0xB0, \ +0xBD,0x06,0x48,0x40,0x78,0x00,0x28,0x01,0xD0,0x07,0xF0,0x22,0xFC,0x60,0x68, \ +0x47,0x72,0xB0,0xBD,0x4F,0x72,0xB0,0xBD,0x00,0x00,0x4C,0x01,0x00,0x02,0xA0, \ +0x09,0x00,0x02,0xF0,0xB5,0x30,0x4C,0x07,0x1C,0xA0,0x79,0x01,0x28,0x02,0xD1, \ +0x38,0x1C,0x07,0xF0,0x01,0xFC,0x38,0x78,0xC1,0x25,0x80,0x28,0x12,0xD1,0xA0, \ +0x79,0x03,0x28,0x05,0xD1,0x06,0xF0,0xCE,0xFF,0x00,0x28,0x01,0xD1,0x28,0x1C, \ +0xF0,0xBD,0xA0,0x79,0x04,0x28,0x06,0xD1,0x01,0x20,0x06,0xF0,0xB8,0xFE,0x00, \ +0x28,0x01,0xD1,0x28,0x1C,0xF0,0xBD,0xA0,0x79,0x05,0x28,0x01,0xD0,0x00,0x20, \ +0xF0,0xBD,0x1F,0x48,0x06,0x22,0xC6,0x1D,0xC1,0x1D,0x29,0x36,0x07,0x31,0xF8, \ +0x1D,0x09,0x30,0x34,0x79,0x03,0xF0,0x54,0xFC,0x00,0x28,0x27,0xD1,0x02,0x2C, \ +0x01,0xD0,0x01,0x2C,0x21,0xD1,0xC0,0x20,0x02,0xF0,0x29,0xFF,0x07,0x1C,0x02, \ +0x2C,0x02,0xD1,0x06,0x20,0x00,0xF0,0x1B,0xFF,0x00,0x20,0x00,0xF0,0x24,0xF8, \ +0x02,0x2C,0x05,0xD1,0x70,0x79,0x01,0x28,0x0B,0xDD,0x08,0xF0,0x41,0xF8,0x08, \ +0xE0,0x0C,0x48,0x00,0x88,0x84,0x02,0x00,0xF0,0xCD,0xFE,0x21,0x1A,0x06,0x20, \ +0x00,0xF0,0xED,0xFE,0x38,0x1C,0x02,0xF0,0x0C,0xFF,0x00,0x20,0xF0,0xBD,0x28, \ +0x1C,0xF0,0xBD,0x01,0x2C,0x02,0xD1,0x07,0xF0,0x42,0xFC,0xF0,0xBD,0x28,0x1C, \ +0xF0,0xBD,0x00,0x00,0xA0,0x09,0x00,0x02,0x84,0x00,0x00,0x02,0xF1,0xB5,0x83, \ +0xB0,0x3E,0x49,0x00,0x25,0x4B,0x68,0x02,0x93,0x59,0x68,0x4A,0x7E,0x0F,0x7E, \ +0x12,0x02,0x3A,0x43,0x8F,0x7E,0x3F,0x04,0x3A,0x43,0xCF,0x7E,0x3F,0x06,0x3A, \ +0x43,0x16,0x1C,0x4F,0x7F,0x0A,0x7F,0x3F,0x02,0x3A,0x43,0x8F,0x7F,0xC9,0x7F, \ +0x3F,0x04,0x3A,0x43,0x09,0x06,0x0A,0x43,0x99,0x89,0x18,0x39,0xCC,0x00,0x99, \ +0x7B,0x17,0x1C,0x00,0x29,0x26,0xD0,0x01,0x29,0x26,0xD0,0x02,0x29,0x26,0xD0, \ +0x03,0x29,0x0C,0xD1,0x0B,0x20,0x21,0x1C,0x03,0xF0,0x53,0xFC,0x00,0x91,0x61, \ +0x1A,0x0B,0x20,0x03,0xF0,0x4E,0xFC,0x00,0x99,0x00,0x29,0x00,0xD9,0x01,0x30, \ +0x01,0x24,0xA4,0x06,0xA2,0x6A,0x61,0x6A,0x02,0x9B,0x30,0x18,0x5B,0x69,0xCB, \ +0x1A,0xC0,0x18,0xB0,0x42,0x00,0xD2,0x01,0x37,0x06,0x1C,0x1F,0x48,0x03,0x79, \ +0x00,0x20,0x02,0x2B,0x14,0xD1,0x01,0x25,0x1F,0xE0,0x20,0x1C,0xE9,0xE7,0x60, \ +0x08,0xE7,0xE7,0x61,0x00,0x01,0x91,0x0B,0x20,0x03,0xF0,0x2D,0xFC,0x0C,0x1C, \ +0x01,0x99,0x09,0x1B,0x0B,0x20,0x03,0xF0,0x27,0xFC,0x00,0x2C,0xDA,0xD9,0x01, \ +0x30,0xD8,0xE7,0x01,0x2B,0x0A,0xD1,0x12,0x4B,0x97,0x42,0x58,0x70,0x01,0xD9, \ +0x01,0x25,0x04,0xE0,0x97,0x42,0x02,0xD1,0x8E,0x42,0x00,0xD9,0x01,0x25,0x03, \ +0x9A,0x00,0x2A,0x03,0xD0,0x00,0x2D,0x03,0xD1,0x04,0xB0,0xF0,0xBD,0x00,0x2D, \ +0x09,0xD0,0x70,0x1A,0x00,0xF0,0x10,0xF8,0x01,0x23,0xDE,0x42,0x01,0xD1,0x00, \ +0x26,0x01,0x37,0xA7,0x62,0x66,0x62,0x01,0x20,0xEF,0xE7,0x00,0x00,0x4C,0x01, \ +0x00,0x02,0xB4,0x00,0x00,0x02,0xA0,0x09,0x00,0x02,0x90,0xB4,0x10,0x4A,0x00, \ +0x21,0x97,0x69,0x91,0x61,0x01,0x21,0x0E,0x4B,0x8C,0x00,0xE3,0x18,0xDC,0x6A, \ +0x01,0x31,0x24,0x18,0xDC,0x62,0x08,0x29,0xF6,0xD9,0x0B,0x49,0x0B,0x6B,0x1B, \ +0x18,0x0B,0x63,0x0B,0x6B,0x5B,0x00,0x5B,0x08,0x0B,0x63,0xCB,0x6A,0x18,0x18, \ +0xC8,0x62,0xC8,0x6A,0x40,0x00,0x40,0x08,0xC8,0x62,0x97,0x61,0x90,0xBC,0xF7, \ +0x46,0x00,0x00,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x50,0x09,0x00,0x02, \ +0x00,0xB5,0x08,0x29,0x01,0xD1,0xFF,0xF7,0xD9,0xFA,0x00,0xBD,0x0B,0x49,0x09, \ +0x68,0x49,0x69,0x08,0x18,0x0A,0x49,0x4A,0x7A,0x05,0x2A,0x02,0xD1,0x8A,0x6B, \ +0x82,0x42,0x0A,0xD2,0x05,0x22,0x4A,0x72,0x02,0x1C,0x06,0x48,0x80,0x23,0xC2, \ +0x60,0x82,0x69,0x1A,0x43,0x82,0x61,0xC0,0x68,0x88,0x63,0xF7,0x46,0x00,0x00, \ +0x4C,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0,0xB5,0x02, \ +0x79,0x35,0x4C,0x87,0x78,0xFE,0x21,0x11,0x40,0xE5,0x88,0x03,0x23,0x9B,0x03, \ +0x9D,0x43,0x2B,0x1C,0x00,0x29,0x03,0xD0,0xCD,0x00,0x01,0x3D,0x9D,0x42,0x05, \ +0xD2,0x45,0x78,0x6D,0x18,0xED,0x00,0x18,0x3D,0x9D,0x42,0x01,0xD8,0x00,0x25, \ +0x08,0xE0,0xC9,0x00,0x59,0x1A,0xC9,0x08,0x5E,0x07,0x76,0x0F,0x41,0x18,0x49, \ +0x79,0xF1,0x40,0x0D,0x1C,0x00,0x23,0x26,0x49,0x52,0x08,0x8B,0x70,0x05,0xD3, \ +0x00,0x2F,0x03,0xD1,0x8A,0x78,0x02,0x23,0x1A,0x43,0x8A,0x70,0xEA,0x07,0xD2, \ +0x0F,0x03,0xD0,0x8D,0x78,0x04,0x23,0x2B,0x43,0x8B,0x70,0xE3,0x1D,0x29,0x33, \ +0x5B,0x79,0x01,0x25,0x02,0x2B,0x1D,0xD1,0x8E,0x78,0x08,0x23,0x33,0x43,0x8B, \ +0x70,0x00,0x2A,0x03,0xD0,0x0A,0x78,0x00,0x2A,0x00,0xD1,0x4D,0x70,0x00,0x2F, \ +0x00,0xD1,0xC7,0x78,0x15,0x48,0x40,0x8B,0xB8,0x42,0x00,0xD8,0x07,0x1C,0x00, \ +0xF0,0x96,0xFD,0x21,0x88,0x12,0x4B,0x4F,0x43,0xB9,0x02,0x08,0x1A,0xC1,0x18, \ +0x06,0x20,0x00,0xF0,0xB1,0xFD,0xF0,0xBD,0x88,0x78,0xC0,0x08,0x00,0xD3,0x8D, \ +0x71,0x88,0x78,0x40,0x08,0x80,0x07,0x07,0xD1,0x0A,0x48,0x80,0x69,0x80,0x08, \ +0x03,0xD2,0x88,0x78,0x08,0x23,0x18,0x43,0x88,0x70,0x88,0x78,0x04,0x23,0x98, \ +0x43,0x88,0x70,0xF0,0xBD,0x00,0x00,0x84,0x00,0x00,0x02,0xC0,0x09,0x00,0x02, \ +0xC4,0x00,0x00,0x02,0x48,0xF4,0xFF,0xFF,0x80,0x00,0x00,0x04,0xF0,0xB5,0x82, \ +0xB0,0x36,0x48,0x34,0x4E,0xC5,0x1D,0x09,0x35,0x33,0x4C,0xC7,0x1D,0x69,0x37, \ +0x00,0x22,0xD2,0x43,0x00,0x92,0x01,0x22,0x01,0xAB,0x31,0x48,0x32,0x49,0x02, \ +0xF0,0xAA,0xFE,0x01,0x98,0x41,0x0A,0x0C,0xD3,0x80,0x20,0x38,0x71,0x00,0x20, \ +0x78,0x71,0x38,0x79,0x00,0x0A,0x4C,0xD3,0x07,0xF0,0x02,0xFC,0x38,0x79,0x00, \ +0x0A,0xFA,0xD2,0x46,0xE0,0x41,0x08,0x0F,0xD3,0x30,0x1C,0xFF,0xF7,0xED,0xF8, \ +0x27,0x48,0x41,0x6C,0x09,0x78,0x40,0x29,0x3C,0xD0,0x07,0xF0,0xAE,0xF8,0x23, \ +0x48,0x40,0x6C,0x00,0x78,0x40,0x28,0xF8,0xD1,0x34,0xE0,0x41,0x0D,0x03,0xD3, \ +0x40,0x20,0x06,0xF0,0x71,0xFF,0x2E,0xE0,0x41,0x09,0x03,0xD3,0x50,0x20,0x06, \ +0xF0,0x6B,0xFF,0x28,0xE0,0x40,0x0F,0x03,0xD3,0x80,0x20,0x06,0xF0,0x65,0xFF, \ +0x22,0xE0,0x00,0x21,0x79,0x22,0x52,0x05,0x17,0x48,0x91,0x82,0x10,0x82,0x91, \ +0x80,0x64,0x20,0x10,0x80,0x02,0x20,0x90,0x82,0x12,0x48,0x21,0x72,0x81,0x6B, \ +0x09,0x7B,0x09,0x0A,0x06,0xD3,0x00,0xF0,0x21,0xF8,0x0E,0x48,0x80,0x6B,0x00, \ +0x7B,0x00,0x0A,0xF8,0xD2,0xC0,0x20,0x02,0xF0,0x51,0xFD,0x00,0x21,0x79,0x22, \ +0x52,0x05,0x91,0x82,0x11,0x83,0x21,0x72,0x02,0xF0,0x49,0xFD,0x00,0x20,0xA8, \ +0x73,0x9C,0xE7,0x26,0x08,0x00,0x02,0xD4,0x01,0x00,0x02,0x50,0x09,0x00,0x02, \ +0x24,0x07,0x00,0x02,0x11,0x11,0x10,0x10,0x94,0x01,0x00,0x02,0xA0,0x8C,0x00, \ +0x00,0xF0,0xB5,0x83,0xB0,0x87,0x4D,0x00,0x24,0xA8,0x6B,0x47,0x68,0x39,0x79, \ +0x49,0x08,0x01,0xD3,0x01,0x26,0x00,0xE0,0x00,0x26,0x82,0x4D,0x69,0x7A,0x00, \ +0x29,0x73,0xD1,0x81,0x4A,0xD1,0x78,0x00,0x29,0x0C,0xD0,0x39,0x78,0x08,0x29, \ +0x09,0xD1,0x7F,0x4A,0x91,0x78,0x00,0x29,0x05,0xD0,0x81,0x7D,0xD3,0x78,0x99, \ +0x42,0x01,0xDD,0xD1,0x78,0x81,0x75,0x78,0x4B,0x7A,0x49,0x9D,0x6B,0x00,0x22, \ +0x68,0x68,0x00,0x2E,0x48,0x66,0x9A,0x72,0x65,0xD1,0x77,0x48,0xA9,0x8A,0x00, \ +0x89,0x04,0x38,0x81,0x42,0x60,0xDD,0x70,0x4A,0x01,0x21,0x51,0x72,0xF9,0x1D, \ +0x17,0x31,0x51,0x61,0x6D,0x49,0x04,0x04,0x24,0x0C,0x8C,0x81,0xA8,0x8A,0x01, \ +0x1B,0xE0,0x1F,0x11,0x38,0x02,0x90,0x03,0xF0,0x93,0xFA,0x68,0x49,0x01,0x30, \ +0x08,0x82,0xA8,0x8A,0x01,0x1B,0x02,0x98,0x03,0xF0,0x8B,0xFA,0xC8,0x1D,0x63, \ +0x49,0x11,0x30,0xC8,0x81,0xC8,0x89,0x18,0x28,0x04,0xD1,0x60,0x4B,0xDC,0x81, \ +0x18,0x8A,0x01,0x38,0x18,0x82,0x78,0x78,0xC0,0x09,0x06,0xD3,0x5C,0x4B,0xE0, \ +0x1D,0x01,0x30,0x98,0x81,0xD8,0x89,0x08,0x30,0xD8,0x81,0x59,0x4C,0xA8,0x7D, \ +0xE1,0x89,0x00,0xF0,0xC4,0xFB,0xA0,0x61,0xA0,0x6B,0x80,0x7D,0xA1,0x89,0x00, \ +0xF0,0xBE,0xFB,0x56,0x49,0x54,0x4A,0xC8,0x66,0xA0,0x89,0x88,0x66,0xA0,0x6B, \ +0x80,0x7D,0x92,0x7A,0x90,0x42,0x00,0xDA,0x02,0x1C,0x53,0x4B,0x52,0x00,0x9A, \ +0x5A,0x52,0x4B,0xDB,0x88,0x5B,0x00,0xD2,0x18,0x23,0x8A,0xA5,0x7A,0x01,0x3B, \ +0xAB,0x42,0x04,0xD1,0xA1,0x69,0x54,0x18,0x00,0xE0,0x25,0xE0,0x01,0xE0,0xC9, \ +0x6E,0x8C,0x18,0x01,0xF0,0x34,0xF8,0x04,0x19,0x78,0x78,0x04,0x23,0x18,0x43, \ +0x78,0x70,0x12,0xE0,0xFF,0xE7,0x40,0x48,0x42,0x49,0x42,0x72,0xA8,0x8A,0x88, \ +0x66,0x78,0x78,0xC0,0x09,0x02,0xD3,0x88,0x6E,0x08,0x30,0x88,0x66,0x3D,0x49, \ +0x89,0x6E,0xA8,0x7D,0x00,0xF0,0x87,0xFB,0x3B,0x49,0xC8,0x66,0x37,0x48,0x32, \ +0x1C,0x80,0x6B,0x81,0x7D,0xB8,0x1C,0x23,0x1C,0xFE,0xF7,0xFD,0xFF,0x30,0x1C, \ +0x00,0xF0,0xF6,0xFA,0x00,0x28,0x0A,0xD0,0x02,0x20,0x33,0x49,0xC2,0x1E,0x48, \ +0x74,0x00,0x92,0x01,0x22,0x11,0x21,0x34,0x48,0x01,0xAB,0x02,0xF0,0x87,0xFD, \ +0x2E,0x48,0x00,0x24,0x2A,0x4D,0x44,0x74,0xA8,0x6B,0x41,0x7B,0x00,0x29,0x0C, \ +0xD1,0x38,0x1C,0x00,0xF0,0x70,0xF8,0x27,0x4A,0x54,0x70,0x10,0x78,0x01,0x30, \ +0x10,0x70,0x00,0xF0,0xDF,0xFB,0x00,0xF0,0x55,0xF8,0x3E,0xE0,0xE9,0x1D,0x39, \ +0x31,0x0A,0x7A,0x01,0x2A,0x05,0xD1,0x08,0x22,0x42,0x73,0x0C,0x72,0x00,0xF0, \ +0x4A,0xF8,0x33,0xE0,0x40,0x7B,0x04,0x28,0x1F,0xD0,0x00,0xF0,0xBC,0xFB,0xA8, \ +0x6B,0x81,0x7B,0x01,0x31,0x81,0x73,0x78,0x78,0x08,0x23,0x18,0x43,0x78,0x70, \ +0x38,0x78,0x08,0x28,0x12,0xD1,0x14,0x48,0xC0,0x78,0x00,0x28,0x0E,0xD0,0x13, \ +0x4A,0x18,0x4B,0x50,0x78,0x01,0x30,0x00,0x06,0x00,0x0E,0x50,0x70,0xA9,0x6B, \ +0x89,0x7D,0x59,0x5C,0x88,0x42,0x00,0xDD,0x14,0x70,0x00,0xF0,0xAE,0xFB,0xA8, \ +0x6B,0x0D,0x4A,0x81,0x7B,0x12,0x7C,0x91,0x42,0x04,0xDA,0x44,0x73,0xA9,0x6B, \ +0x82,0x20,0x08,0x73,0x05,0xE0,0x01,0x21,0x38,0x1C,0x00,0xF0,0x29,0xF8,0x00, \ +0xF0,0x15,0xF8,0x03,0xB0,0xF0,0xBD,0x00,0x00,0x94,0x01,0x00,0x02,0x08,0x01, \ +0x00,0x02,0xD0,0x01,0x00,0x02,0x50,0x09,0x00,0x02,0xC4,0x00,0x00,0x02,0xB8, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x04,0x07,0x00,0x02,0xDE,0x01,0x00,0x02, \ +0x05,0x48,0x00,0x21,0x41,0x72,0x81,0x72,0x04,0x49,0x05,0x4A,0x89,0x89,0x91, \ +0x87,0x80,0x6B,0x10,0x21,0x01,0x73,0xF7,0x46,0x94,0x01,0x00,0x02,0xC4,0x00, \ +0x00,0x02,0x50,0x09,0x00,0x02,0x80,0xB4,0x09,0x4A,0x01,0x27,0x53,0x79,0x08, \ +0x4A,0x03,0x2B,0x02,0xD1,0xD7,0x70,0x80,0xBC,0xF7,0x46,0x40,0x78,0x40,0x09, \ +0xFA,0xD3,0x00,0x29,0x02,0xD1,0x00,0x20,0xD0,0x70,0xF5,0xE7,0xD7,0x70,0xF3, \ +0xE7,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0x90,0xB5,0xC0,0x20,0x02,0xF0, \ +0xE6,0xFB,0x0A,0x4C,0x03,0x21,0xA1,0x73,0x02,0xF0,0xE1,0xFB,0x60,0x7F,0x01, \ +0x28,0x0C,0xD0,0xC0,0x20,0x02,0xF0,0xDB,0xFB,0x07,0x1C,0xA0,0x7B,0x03,0x28, \ +0x02,0xD1,0x00,0x20,0x00,0xF0,0x8C,0xF9,0x38,0x1C,0x02,0xF0,0xD1,0xFB,0x90, \ +0xBD,0x50,0x09,0x00,0x02,0x90,0xB5,0xFE,0xF7,0xD7,0xFD,0x1E,0x4F,0xF9,0x6A, \ +0x40,0x1A,0x41,0x00,0x78,0x7F,0x49,0x08,0x01,0x28,0x01,0xD1,0xB8,0x6A,0x00, \ +0xE0,0x78,0x6A,0x3B,0x68,0x19,0x4A,0x00,0x2B,0x1C,0xD1,0x84,0x00,0x93,0x8B, \ +0x24,0x18,0xA4,0x00,0xE2,0x18,0x51,0x1A,0x8A,0x42,0x00,0xD2,0x11,0x1C,0x00, \ +0x28,0x0F,0xD1,0x01,0x20,0x78,0x72,0xB8,0x7B,0x03,0x28,0x05,0xD1,0x0D,0x29, \ +0x04,0xD9,0xC8,0x1F,0x01,0x38,0x00,0xF0,0x5E,0xF9,0x90,0xBD,0x00,0x20,0x00, \ +0xF0,0x5A,0xF9,0x90,0xBD,0x3B,0x62,0x09,0xE0,0x83,0x00,0xD2,0x8B,0x18,0x18, \ +0x80,0x00,0x80,0x18,0x41,0x1A,0x88,0x42,0x00,0xD2,0x01,0x1C,0x3A,0x62,0x08, \ +0x20,0x00,0xF0,0x70,0xFB,0x04,0x20,0x78,0x72,0x90,0xBD,0x00,0x00,0x50,0x09, \ +0x00,0x02,0x94,0x01,0x00,0x02,0x00,0xB5,0x04,0x49,0x02,0x0A,0x8A,0x74,0xC8, \ +0x74,0x03,0x21,0x11,0x20,0xFE,0xF7,0xE3,0xFD,0x00,0xBD,0xD8,0x07,0x00,0x02, \ +0xB0,0xB5,0x82,0xB0,0x11,0x4D,0x01,0x20,0x68,0x74,0x11,0x4F,0x11,0x48,0x00, \ +0x24,0xBC,0x82,0x38,0x82,0xBC,0x80,0x1E,0x20,0x38,0x80,0x02,0x20,0xB8,0x82, \ +0xC2,0x1E,0x00,0x92,0x01,0x22,0x1A,0x21,0x0C,0x48,0x01,0xAB,0x02,0xF0,0x71, \ +0xFC,0x6C,0x74,0x3C,0x83,0xBC,0x82,0x01,0x98,0x81,0x08,0x06,0xD3,0x00,0x09, \ +0x02,0xD3,0x82,0x20,0x02,0xB0,0xB0,0xBD,0x20,0x1C,0xFB,0xE7,0x42,0x20,0xF9, \ +0xE7,0x50,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0xA0,0x8C,0x00,0x00,0xE4,0x06, \ +0x00,0x02,0xF0,0xB5,0xFF,0x20,0x01,0x25,0xAD,0x06,0xF5,0x30,0x29,0x69,0x89, \ +0x08,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF8,0xD8,0x6D,0x4E,0x00,0x27, \ +0xB0,0x7D,0x6D,0x4C,0x00,0x28,0x11,0xD0,0x30,0x6E,0xFF,0xF7,0xB1,0xFF,0x60, \ +0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x03,0x22,0xF1,0x6D,0xB0,0x6D,0x12,0x03, \ +0x00,0xF0,0xD9,0xF8,0xB7,0x75,0x01,0x20,0xFE,0xF7,0x3D,0xFD,0xF0,0xBD,0xF0, \ +0x7B,0x00,0x28,0x1A,0xD0,0xA0,0x6B,0x61,0x49,0x80,0x7D,0x89,0x7A,0x88,0x42, \ +0x00,0xDB,0x08,0x1C,0x5F,0x49,0x40,0x00,0x08,0x5A,0xFF,0xF7,0x93,0xFF,0x60, \ +0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x03,0x22,0x12,0x03,0x10,0x21,0x5A,0x48, \ +0x00,0xF0,0xBB,0xF8,0x01,0x20,0xFE,0xF7,0x20,0xFD,0xF7,0x73,0xF0,0xBD,0xF0, \ +0x7D,0x00,0x28,0xDD,0xD0,0x51,0x4E,0xF0,0x6E,0xFF,0xF7,0x7D,0xFF,0x76,0x6E, \ +0x70,0x78,0xC0,0x09,0x4E,0xD3,0x4D,0x4A,0xD0,0x7A,0x00,0x28,0x00,0xD0,0xD7, \ +0x72,0x07,0x20,0x40,0x06,0x81,0x69,0x08,0x23,0x19,0x43,0x81,0x61,0x81,0x69, \ +0x99,0x43,0x81,0x61,0xE8,0x68,0x00,0xF0,0x92,0xFF,0x01,0x23,0x9B,0x03,0x9A, \ +0x08,0x1C,0x21,0x47,0x48,0x00,0xF0,0x93,0xF8,0xFF,0x20,0x46,0x49,0xF5,0x30, \ +0x4A,0x68,0xD2,0x0B,0x03,0xD3,0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x43, \ +0x48,0x03,0x21,0x00,0x78,0x49,0x06,0x02,0x28,0x02,0xD1,0x81,0x20,0x88,0x60, \ +0x01,0xE0,0x01,0x20,0x88,0x60,0x62,0x7A,0x00,0x2A,0x0F,0xD0,0xA0,0x7A,0x00, \ +0x28,0x03,0xD0,0x00,0x28,0x01,0xDD,0x60,0x69,0x01,0xE0,0x60,0x69,0x06,0x38, \ +0xA1,0x89,0x20,0x39,0x02,0x2A,0x08,0xD1,0x2E,0x4A,0xD7,0x75,0x05,0xE0,0x2D, \ +0x4A,0x50,0x6E,0x91,0x6E,0x18,0x30,0x20,0x39,0xD7,0x75,0x62,0x79,0x17,0x23, \ +0x9B,0x02,0x13,0x43,0x01,0x22,0x52,0x03,0x00,0xF0,0x5D,0xF8,0x33,0xE0,0x60, \ +0x7A,0x00,0x28,0x23,0xD0,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x18,0x25,0x00,0xE0, \ +0x1E,0x25,0x01,0x23,0x9B,0x03,0x20,0x48,0x9A,0x08,0x29,0x1C,0x40,0x6E,0x00, \ +0xF0,0x4B,0xF8,0xFF,0x20,0x22,0x49,0xF5,0x30,0x4A,0x68,0xD2,0x0B,0x03,0xD3, \ +0x02,0x1C,0x01,0x38,0x00,0x2A,0xF8,0xD1,0x60,0x79,0x11,0x23,0x9B,0x02,0x03, \ +0x43,0xA0,0x89,0x41,0x1B,0x01,0x22,0x52,0x03,0x60,0x69,0x00,0xF0,0x36,0xF8, \ +0x0A,0xE0,0x60,0x79,0x11,0x23,0x9B,0x02,0x03,0x43,0x10,0x48,0x03,0x22,0x81, \ +0x6E,0x40,0x6E,0x12,0x03,0x00,0xF0,0x2A,0xF8,0x0C,0x48,0xC7,0x75,0x0B,0x48, \ +0x42,0x7F,0xC1,0x1D,0x09,0x31,0x01,0x2A,0x00,0xD1,0x4F,0x73,0x50,0x30,0x07, \ +0x71,0x30,0x79,0x40,0x08,0x02,0xD2,0x60,0x7A,0x01,0x28,0x03,0xD1,0x01,0x20, \ +0xFE,0xF7,0x7E,0xFC,0xF0,0xBD,0x00,0x20,0xFE,0xF7,0x7A,0xFC,0xF0,0xBD,0x00, \ +0x00,0x50,0x09,0x00,0x02,0x94,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xC0,0x01, \ +0x00,0x02,0xF8,0x07,0x00,0x02,0x64,0x07,0x00,0x02,0x40,0x00,0x00,0x04,0x5C, \ +0x00,0x00,0x02,0xB0,0xB4,0x06,0x4C,0x1F,0x1C,0x65,0x68,0xEB,0x0B,0x04,0xD2, \ +0x0A,0x43,0x21,0x05,0x4A,0x63,0x88,0x63,0x67,0x60,0xB0,0xBC,0xF7,0x46,0x00, \ +0x00,0x40,0x00,0x00,0x04,0xF0,0xB5,0x52,0x49,0x07,0x1C,0x8A,0x7A,0x00,0x20, \ +0x00,0x2A,0x61,0xD1,0x0A,0x7A,0x00,0x2A,0x6B,0xD0,0x4A,0x7A,0x01,0x2A,0x5B, \ +0xD1,0x0A,0x7B,0x01,0x2A,0x58,0xD1,0xCA,0x7A,0x00,0x2A,0x55,0xD1,0xCE,0x1D, \ +0x49,0x36,0xF1,0x78,0xF5,0x1F,0x39,0x3D,0x00,0x29,0x0F,0xD1,0x45,0x49,0xCA, \ +0x1D,0x69,0x32,0x12,0x78,0x00,0x2A,0x09,0xD1,0x6A,0x7B,0x01,0x2A,0x06,0xD0, \ +0x32,0x79,0x00,0x2A,0x03,0xD1,0x0C,0x1C,0x89,0x7C,0x00,0x29,0x09,0xD0,0x3E, \ +0x4A,0x3F,0x4B,0xD1,0x79,0xD9,0x71,0x3B,0x49,0x88,0x75,0x01,0x20,0xC8,0x75, \ +0xCC,0x6E,0x21,0xE0,0x21,0x7F,0x00,0x29,0x12,0xD0,0x39,0x4B,0x37,0x4A,0x99, \ +0x6B,0x89,0x7D,0x92,0x7A,0x91,0x42,0x01,0xDA,0xD9,0x71,0x00,0xE0,0xDA,0x71, \ +0xA0,0x75,0x01,0x21,0xE1,0x73,0xE0,0x75,0xD8,0x79,0x32,0x49,0x40,0x00,0x0C, \ +0x5A,0x0B,0xE0,0xE2,0x7E,0x21,0x1C,0x00,0x2A,0x27,0xD0,0xCC,0x6E,0x88,0x75, \ +0x01,0x20,0x2C,0x4B,0xC8,0x75,0x98,0x6B,0x80,0x7D,0xD8,0x71,0x00,0xF0,0x40, \ +0xF9,0x26,0x4B,0xD8,0x7B,0x00,0x28,0x03,0xD0,0x00,0x20,0xFE,0xF7,0x3B,0xFC, \ +0x0C,0xE0,0x24,0x4B,0xD8,0x79,0x03,0x28,0x05,0xD1,0x20,0x4B,0x58,0x6B,0xFE, \ +0xF7,0x32,0xFC,0x03,0xE0,0x3A,0xE0,0x00,0x20,0xFE,0xF7,0x2D,0xFC,0x01,0x21, \ +0x89,0x06,0x00,0x2F,0x05,0xD0,0x05,0x2F,0x03,0xD9,0x48,0x6A,0x38,0x18,0x02, \ +0xE0,0x2D,0xE0,0x48,0x6A,0x0A,0x30,0x16,0x4B,0x02,0x22,0x9A,0x73,0x18,0x4B, \ +0x92,0x03,0x5A,0x60,0x08,0x62,0x6A,0x7B,0x01,0x2A,0x02,0xD0,0x32,0x79,0x00, \ +0x2A,0x15,0xD0,0x10,0x4A,0x8D,0x6A,0x4E,0x6A,0xD1,0x79,0x13,0x4A,0x12,0x4F, \ +0x89,0x00,0x51,0x58,0x0D,0x4B,0x08,0x18,0x38,0x60,0xD8,0x79,0x00,0xF0,0x3A, \ +0xFD,0x39,0x68,0x40,0x18,0x38,0x60,0xB0,0x42,0x00,0xD2,0x01,0x35,0x7D,0x60, \ +0x38,0x1D,0x06,0x4F,0x3C,0x60,0xF8,0x79,0x00,0xF0,0x2D,0xFD,0x39,0x68,0x40, \ +0x18,0x38,0x60,0x01,0x20,0xF0,0xBD,0x50,0x09,0x00,0x02,0x08,0x01,0x00,0x02, \ +0x94,0x01,0x00,0x02,0xC0,0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x28,0x08,0x00, \ +0x02,0x28,0x09,0x00,0x02,0xF8,0xB5,0x38,0x49,0x04,0x1C,0x88,0x6B,0x37,0x4A, \ +0x85,0x7D,0x46,0x68,0x92,0x7A,0x00,0x27,0x95,0x42,0x00,0xDB,0x15,0x1C,0x34, \ +0x49,0x80,0x8A,0x49,0x89,0x88,0x42,0x2E,0xDD,0x00,0x2C,0x2C,0xD1,0x2F,0x49, \ +0x88,0x7A,0x00,0x28,0x28,0xD1,0x30,0x49,0xB4,0x20,0x08,0x70,0x2F,0x48,0x30, \ +0x4A,0xC0,0x88,0x41,0x00,0x09,0x18,0x68,0x00,0x10,0x5A,0x40,0x00,0x08,0x18, \ +0x2D,0x49,0xC9,0x6E,0x40,0x18,0x28,0x49,0x48,0x80,0x28,0x1C,0x00,0xF0,0xEF, \ +0xFC,0x26,0x49,0x49,0x88,0x40,0x18,0x24,0x49,0x48,0x80,0x31,0x1D,0x06,0x22, \ +0x26,0x48,0x02,0xF0,0x9F,0xFE,0xF1,0x1D,0x03,0x31,0x06,0x22,0x24,0x48,0x02, \ +0xF0,0x99,0xFE,0x01,0x20,0x20,0x49,0x01,0x26,0x08,0x77,0x03,0xE0,0x01,0x20, \ +0x1E,0x49,0x00,0x26,0xC8,0x76,0xFF,0xF7,0x64,0xFD,0xFF,0xF7,0xCE,0xFD,0x00, \ +0x90,0x00,0x98,0x00,0x28,0x1E,0xD1,0x12,0x49,0x00,0x2E,0x8A,0x6B,0x50,0x73, \ +0x01,0xD1,0x00,0x2C,0x01,0xD0,0x01,0x2E,0x19,0xD1,0x13,0x4A,0x68,0x00,0x10, \ +0x5A,0x10,0x4A,0xD2,0x88,0x49,0x8C,0x80,0x18,0x41,0x18,0x01,0x20,0x00,0xF0, \ +0x12,0xF9,0x01,0x2E,0x03,0xD1,0x0D,0x49,0x03,0x20,0x48,0x60,0x02,0xE0,0x0B, \ +0x49,0x02,0x20,0x48,0x60,0x01,0x27,0x03,0xE0,0x03,0x49,0x04,0x20,0x89,0x6B, \ +0x48,0x73,0x38,0x1C,0xF8,0xBD,0x00,0x00,0x94,0x01,0x00,0x02,0x08,0x01,0x00, \ +0x02,0xC4,0x00,0x00,0x02,0xF8,0x07,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01, \ +0x00,0x02,0x50,0x09,0x00,0x02,0xFC,0x07,0x00,0x02,0x02,0x08,0x00,0x02,0x90, \ +0xB5,0x04,0x31,0xCF,0x00,0x01,0x28,0x17,0xD0,0x02,0x28,0x17,0xD0,0x03,0x28, \ +0x27,0xD1,0x0B,0x20,0x39,0x1C,0x02,0xF0,0x87,0xFE,0x0C,0x1C,0x79,0x1A,0x0B, \ +0x20,0x02,0xF0,0x82,0xFE,0x07,0x1C,0x00,0x21,0x00,0x2C,0x0F,0x48,0x18,0xD9, \ +0x01,0x37,0x04,0x2C,0x13,0xD2,0x01,0x21,0x41,0x63,0x13,0xE0,0x7F,0x08,0x11, \ +0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0x02,0xF0,0x70,0xFE,0x0C,0x1C,0x79,0x1A, \ +0x0B,0x20,0x02,0xF0,0x6B,0xFE,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37,0x02, \ +0xE0,0x41,0x63,0x00,0xE0,0x41,0x63,0x38,0x1C,0x90,0xBD,0x00,0x00,0x50,0x09, \ +0x00,0x02,0xFF,0x21,0x10,0x48,0x31,0x31,0x01,0x80,0x0F,0x49,0x09,0x8C,0xCA, \ +0x1D,0x31,0x32,0x42,0x80,0xCA,0x1D,0x0E,0x32,0x82,0x80,0x0B,0x31,0xC1,0x80, \ +0x0B,0x48,0xA0,0x21,0x01,0x80,0x50,0x21,0x41,0x80,0x1E,0x21,0x81,0x80,0x0F, \ +0x21,0xC1,0x80,0x08,0x48,0xC0,0x21,0x01,0x60,0x60,0x21,0x41,0x60,0x23,0x21, \ +0x81,0x60,0x12,0x21,0xC1,0x60,0xF7,0x46,0x00,0x00,0xB8,0x01,0x00,0x02,0x94, \ +0x01,0x00,0x02,0xC0,0x01,0x00,0x02,0x28,0x09,0x00,0x02,0x00,0xB5,0x07,0x48, \ +0xC1,0x79,0x82,0x79,0x91,0x42,0x07,0xD0,0xC1,0x79,0x81,0x71,0x82,0x79,0x04, \ +0x49,0x89,0x5C,0x41,0x71,0xFE,0xF7,0xC2,0xFA,0x00,0xBD,0x00,0x00,0x94,0x01, \ +0x00,0x02,0xC8,0x01,0x00,0x02,0x05,0x48,0x81,0x8F,0x49,0x00,0x01,0x31,0x81, \ +0x87,0x04,0x49,0x82,0x8F,0xC9,0x89,0x8A,0x42,0x00,0xDD,0x81,0x87,0xF7,0x46, \ +0x50,0x09,0x00,0x02,0xC4,0x00,0x00,0x02,0x1A,0x49,0x19,0x48,0x89,0x6B,0x1A, \ +0x4B,0x89,0x7D,0x42,0x78,0x5B,0x5C,0x00,0x21,0x9A,0x42,0x15,0xDD,0x41,0x70, \ +0x01,0x70,0xC2,0x78,0x01,0x21,0x00,0x2A,0x0D,0xDD,0xC2,0x78,0x04,0x2A,0x0A, \ +0xDA,0xC2,0x78,0x01,0x3A,0xC2,0x70,0xC2,0x78,0x00,0x2A,0x04,0xD1,0x10,0x4A, \ +0x52,0x7A,0x01,0x2A,0x00,0xD1,0xC1,0x70,0x81,0x70,0xF7,0x46,0x82,0x78,0x00, \ +0x2A,0xFB,0xD0,0x02,0x78,0x02,0x2A,0xF8,0xDD,0x41,0x70,0x01,0x70,0xC2,0x78, \ +0x01,0x32,0x12,0x06,0x12,0x0E,0xC2,0x70,0x03,0x2A,0xEF,0xDD,0x81,0x70,0x03, \ +0x21,0xC1,0x70,0xF7,0x46,0x00,0x00,0xD0,0x01,0x00,0x02,0x94,0x01,0x00,0x02, \ +0xDE,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0xB5,0x02,0xF0,0x0D,0xFE,0x02, \ +0x49,0x8A,0x8F,0x10,0x40,0x48,0x62,0x00,0xBD,0x50,0x09,0x00,0x02,0xB0,0xB5, \ +0x01,0x20,0x80,0x06,0x85,0x6A,0x41,0x6A,0x0E,0x48,0x00,0x88,0x84,0x02,0x20, \ +0x1C,0x02,0xF0,0xBF,0xFD,0x0F,0x1C,0x00,0x2D,0x10,0xD9,0x20,0x1C,0x29,0x1C, \ +0x02,0xF0,0xB8,0xFD,0x0D,0x1C,0x00,0x21,0xC9,0x43,0x20,0x1C,0x02,0xF0,0xB2, \ +0xFD,0x48,0x1C,0x45,0x43,0xE9,0x19,0x20,0x1C,0x02,0xF0,0xAC,0xFD,0x0F,0x1C, \ +0x38,0x1C,0xB0,0xBD,0x00,0x00,0x84,0x00,0x00,0x02,0x90,0xB5,0x0C,0x1C,0x07, \ +0x1C,0x00,0xF0,0x15,0xF8,0x01,0x20,0x80,0x06,0x40,0x6A,0x06,0x4B,0x20,0x18, \ +0xB9,0x00,0xC9,0x18,0xC8,0x62,0x01,0x21,0x78,0x1E,0x81,0x40,0x03,0x48,0x82, \ +0x69,0x11,0x43,0x81,0x61,0x90,0xBD,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x00, \ +0x00,0x04,0x80,0xB4,0x47,0x1E,0x01,0x20,0x04,0x49,0xB8,0x40,0x8A,0x69,0xC0, \ +0x43,0x10,0x40,0x88,0x61,0x80,0xBC,0xF7,0x46,0x00,0x00,0x80,0x00,0x00,0x04, \ +0xF0,0xB5,0x84,0xB0,0x0F,0x20,0x00,0x06,0x00,0x88,0xE1,0x4C,0x00,0x27,0x03, \ +0x90,0xE0,0x7C,0x00,0x28,0x04,0xD0,0x03,0x98,0x05,0xF0,0x40,0xFE,0x00,0x28, \ +0x60,0xD1,0x03,0x98,0xDC,0x4B,0x18,0x40,0x1C,0xD0,0xDB,0x48,0x00,0x68,0x02, \ +0x90,0x02,0x98,0x80,0x08,0x02,0xD2,0x02,0x98,0x40,0x08,0x13,0xD3,0x01,0x20, \ +0x80,0x06,0x00,0x6B,0x02,0x99,0x40,0x00,0x40,0x08,0xC9,0x08,0x05,0xD3,0xE0, \ +0x62,0x02,0x27,0x07,0x20,0xFF,0xF7,0xC8,0xFF,0x05,0xE0,0x20,0x63,0x01,0x27, \ +0xFA,0x21,0x07,0x20,0xFF,0xF7,0xA7,0xFF,0x03,0x98,0xCD,0x4B,0xCA,0x49,0x18, \ +0x40,0xCD,0x1D,0xCE,0x1D,0x49,0x36,0x09,0x35,0x00,0x28,0x5C,0xD0,0xD8,0x04, \ +0xC1,0x6B,0x01,0x91,0x01,0x99,0x89,0x09,0x0D,0xD3,0x00,0x6A,0x40,0x00,0x40, \ +0x08,0x20,0x63,0xA0,0x7A,0x00,0x28,0x01,0xD0,0x00,0xF0,0xD0,0xFA,0xFF,0xF7, \ +0x5E,0xFC,0x01,0x27,0x01,0x22,0x62,0x73,0x01,0x98,0x12,0x23,0x18,0x40,0x44, \ +0xD0,0x00,0x20,0x60,0x73,0xA0,0x7A,0x00,0x28,0x19,0xD0,0x01,0x98,0x80,0x08, \ +0x0E,0xD3,0x20,0x6B,0x21,0x6E,0x40,0x18,0xE0,0x62,0xB8,0x48,0x00,0x78,0x00, \ +0xF0,0x1D,0xFB,0xE1,0x6A,0x40,0x18,0xE0,0x62,0xE0,0x6A,0x40,0x00,0x40,0x08, \ +0xE0,0x62,0x01,0x20,0xFE,0xF7,0x9D,0xF9,0x00,0x20,0xA0,0x72,0xA0,0x75,0x0C, \ +0xE0,0x4C,0xE1,0xFF,0xF7,0x36,0xFF,0x01,0x98,0x80,0x08,0x06,0xD3,0xAD,0x49, \ +0x20,0x6B,0x09,0x68,0x40,0x18,0x40,0x00,0x40,0x08,0xE0,0x62,0x00,0x2F,0x00, \ +0xD1,0x02,0x27,0x01,0x98,0x40,0x09,0x02,0xD3,0x01,0x20,0xFE,0xF7,0x83,0xF9, \ +0xA0,0x7B,0x02,0x28,0x0D,0xD1,0x68,0x7B,0x01,0x28,0x01,0xD1,0x00,0x20,0x68, \ +0x73,0x00,0x20,0x30,0x71,0x00,0xF0,0x33,0xFA,0x01,0x99,0x9F,0x48,0x00,0x22, \ +0x01,0xF0,0x72,0xFF,0x03,0x98,0x9E,0x4B,0x18,0x40,0x73,0xD0,0x18,0x05,0xC0, \ +0x68,0x00,0x90,0x00,0x98,0x40,0x09,0x15,0xD3,0xE0,0x7A,0x03,0x28,0x12,0xD1, \ +0x04,0x20,0xE0,0x72,0x00,0x98,0x19,0x05,0xC9,0x68,0x96,0x4A,0x08,0x43,0x00, \ +0x90,0x10,0x68,0x40,0x68,0x40,0x78,0xC0,0x09,0x05,0xD3,0x00,0x98,0x40,0x08, \ +0x02,0xD2,0x92,0x49,0x00,0x20,0x48,0x71,0x00,0x98,0x80,0x08,0x3F,0xD3,0x07, \ +0x20,0xFF,0xF7,0x34,0xFF,0xB0,0x79,0x01,0x28,0x0E,0xD1,0xE0,0x1D,0x69,0x30, \ +0x81,0x7A,0x01,0x29,0x09,0xD1,0x02,0x21,0x81,0x72,0x89,0x48,0x01,0x8B,0xC0, \ +0x8A,0x08,0x1A,0x81,0x02,0x04,0x20,0xFF,0xF7,0x08,0xFF,0x60,0x7A,0x06,0x28, \ +0x04,0xD1,0x02,0x21,0x61,0x72,0x08,0x20,0xFF,0xF7,0x1A,0xFF,0x00,0x20,0x80, \ +0x49,0x01,0x22,0xC8,0x80,0x22,0x73,0xE0,0x72,0xA0,0x72,0xA0,0x75,0x20,0x74, \ +0x08,0x71,0x4A,0x71,0xFE,0xF7,0xD1,0xFB,0x00,0x99,0x08,0x43,0x00,0x90,0x60, \ +0x68,0x04,0x28,0x0F,0xD1,0x01,0x20,0xFF,0xF7,0x04,0xFF,0x20,0x7B,0x01,0x28, \ +0x09,0xD1,0xE0,0x7A,0x00,0x28,0x06,0xD1,0xFE,0xF7,0x46,0xFE,0x00,0x22,0x10, \ +0x21,0x72,0x48,0x01,0xF0,0x0F,0xFF,0x00,0x98,0x80,0x09,0x73,0xD3,0x01,0x20, \ +0x20,0x73,0x20,0x74,0x02,0x27,0x6A,0x4A,0x80,0x06,0xC1,0x6A,0x12,0x68,0x51, \ +0x61,0xC1,0x6A,0x49,0x00,0x49,0x08,0xE1,0x62,0x61,0x7A,0x05,0x29,0x0C,0xD1, \ +0xA1,0x6B,0x00,0xE0,0xA1,0xE0,0x40,0x6A,0x81,0x42,0x06,0xD2,0x02,0x21,0x65, \ +0x48,0x61,0x72,0x81,0x69,0x80,0x23,0x99,0x43,0x81,0x61,0x00,0x98,0xC0,0x08, \ +0x0E,0xD3,0x5D,0x48,0x01,0x21,0x01,0x71,0xC1,0x88,0x00,0x29,0x33,0xDD,0xC1, \ +0x88,0x01,0x23,0xDB,0x03,0x99,0x42,0x2E,0xDA,0xC0,0x88,0xFF,0xF7,0x86,0xF8, \ +0x2A,0xE0,0x56,0x49,0x00,0x20,0x08,0x71,0xA0,0x72,0xC1,0x20,0x20,0x60,0x01, \ +0x20,0xFE,0xF7,0xD4,0xF8,0x20,0x7E,0x01,0x28,0x14,0xD1,0x61,0x7E,0x00,0x29, \ +0x00,0xD0,0x00,0x20,0x60,0x76,0x51,0x48,0xC1,0x78,0x89,0x06,0x89,0x0E,0xC1, \ +0x70,0x61,0x7E,0x01,0x29,0x03,0xD1,0xC1,0x78,0x40,0x23,0x19,0x43,0xC1,0x70, \ +0xC1,0x78,0x03,0x20,0xFE,0xF7,0x46,0xF9,0x60,0x68,0x04,0x28,0x06,0xD1,0xFE, \ +0xF7,0xE9,0xFD,0x00,0x22,0x10,0x21,0x44,0x48,0x01,0xF0,0xB2,0xFE,0xA0,0x7E, \ +0x00,0x28,0x22,0xD0,0x3F,0x48,0x00,0x79,0x00,0x28,0x1E,0xD0,0x20,0x7C,0x00, \ +0x28,0x1B,0xD0,0x20,0x68,0x00,0x28,0x18,0xD1,0x06,0x20,0xFF,0xF7,0x8A,0xFE, \ +0x00,0x20,0xE8,0x73,0xA0,0x76,0x70,0x70,0x69,0x7B,0x01,0x29,0x0D,0xD1,0x68, \ +0x73,0xA0,0x7B,0x00,0xE0,0x0C,0xE0,0x03,0x28,0x08,0xD1,0x01,0x20,0xA0,0x73, \ +0x00,0x22,0x10,0x21,0x2D,0x48,0x01,0xF0,0x8E,0xFE,0x00,0xE0,0x68,0x73,0x00, \ +0x20,0xA0,0x76,0xE0,0x7A,0x04,0x28,0x2F,0xD1,0x20,0x7C,0x00,0x28,0x2C,0xD0, \ +0x60,0x7B,0x00,0x28,0x02,0xD1,0x00,0x2F,0x00,0xD1,0x02,0x27,0x00,0x20,0xE0, \ +0x72,0x25,0x4D,0x20,0x74,0x29,0x79,0x01,0x29,0x15,0xD1,0x21,0x68,0x00,0x29, \ +0x12,0xD1,0x69,0x79,0x00,0x29,0x0F,0xD0,0x1F,0x4A,0x11,0x68,0x48,0x72,0xA9, \ +0x68,0xE9,0x60,0x28,0x70,0xFE,0xF7,0xE3,0xF9,0x28,0x78,0x01,0x28,0x04,0xD1, \ +0x00,0x22,0x01,0x21,0x1B,0x48,0x01,0xF0,0x61,0xFE,0x60,0x68,0x04,0x28,0x06, \ +0xD1,0xFE,0xF7,0x8E,0xFD,0x00,0x22,0x10,0x21,0x16,0x48,0x01,0xF0,0x57,0xFE, \ +0x01,0x2F,0x02,0xD1,0x00,0xF0,0x1B,0xF9,0x03,0xE0,0x02,0x2F,0x01,0xD1,0x00, \ +0xF0,0x46,0xF9,0x03,0x98,0x00,0xF0,0x27,0xF8,0x03,0x98,0x11,0x4B,0x18,0x40, \ +0x01,0xD0,0x01,0xF0,0x71,0xF8,0x04,0xB0,0xF0,0xBD,0x00,0x00,0x50,0x09,0x00, \ +0x02,0x20,0x20,0x00,0x00,0x40,0x00,0x00,0x04,0x80,0x80,0x00,0x00,0x9B,0x01, \ +0x00,0x02,0x94,0x01,0x00,0x02,0xE4,0x06,0x00,0x02,0x40,0x40,0x00,0x00,0x4C, \ +0x01,0x00,0x02,0xE4,0x01,0x00,0x02,0xC4,0x00,0x00,0x02,0x44,0x07,0x00,0x02, \ +0x80,0x00,0x00,0x04,0xD8,0x07,0x00,0x02,0x08,0x08,0x00,0x00,0xF0,0xB5,0x64, \ +0x4B,0x07,0x1C,0x18,0x40,0x01,0x25,0x00,0x28,0x62,0x4E,0x6E,0xD0,0x62,0x49, \ +0xCC,0x69,0x60,0x08,0x3E,0xD3,0x88,0x69,0x40,0x08,0x3B,0xD3,0x88,0x69,0xA8, \ +0x43,0x88,0x61,0x5E,0x49,0x48,0x68,0x04,0x28,0x0A,0xD1,0xFE,0xF7,0x3E,0xFD, \ +0x01,0x20,0xFE,0xF7,0x09,0xF8,0x00,0x22,0x10,0x21,0x5A,0x48,0x01,0xF0,0x04, \ +0xFE,0x29,0xE0,0x4A,0x68,0x58,0x48,0x02,0x2A,0x05,0xD1,0x00,0x68,0x45,0x73, \ +0x57,0x48,0x00,0x68,0x45,0x70,0x05,0xE0,0x4A,0x68,0x03,0x2A,0x02,0xD1,0x00, \ +0x68,0x02,0x22,0x42,0x73,0x4F,0x48,0x45,0x60,0x00,0xF0,0xAB,0xF8,0x01,0x20, \ +0xFD,0xF7,0xEC,0xFF,0x4B,0x48,0x40,0x7C,0x01,0x28,0x05,0xD1,0x00,0x22,0x10, \ +0x21,0x30,0x1C,0x01,0xF0,0xE3,0xFD,0x08,0xE0,0x46,0x49,0x48,0x7C,0x02,0x28, \ +0x04,0xD1,0x00,0x22,0x10,0x21,0x47,0x48,0x01,0xF0,0xD9,0xFD,0x41,0x48,0x80, \ +0x69,0xC0,0x09,0x03,0xD3,0xE0,0x09,0x01,0xD3,0x07,0xF0,0x2B,0xFA,0x3D,0x48, \ +0x80,0x69,0x00,0x0A,0x32,0xD3,0x20,0x0A,0x30,0xD3,0x3A,0x48,0x80,0x23,0x81, \ +0x69,0x99,0x43,0x81,0x61,0x38,0x48,0x41,0x7A,0x05,0x29,0x02,0xD0,0x41,0x7A, \ +0x06,0x29,0x15,0xD1,0x00,0x21,0x81,0x63,0x01,0x7A,0x01,0x29,0x0D,0xD1,0x45, \ +0x72,0x81,0x7A,0x00,0x29,0x1B,0xD1,0x01,0x7B,0x01,0x29,0x18,0xD1,0xC0,0x7A, \ +0x00,0x28,0x15,0xD1,0xFF,0xF7,0xD0,0xF9,0x12,0xE0,0x3A,0xE0,0x02,0x22,0x42, \ +0x72,0x0E,0xE0,0x45,0x72,0x41,0x7F,0x01,0x29,0x02,0xD1,0x00,0x21,0x81,0x62, \ +0x01,0xE0,0x00,0x21,0x41,0x62,0x80,0x7B,0x03,0x28,0x02,0xD1,0x00,0x20,0xFF, \ +0xF7,0x42,0xFB,0x20,0x09,0x05,0xD3,0x20,0x48,0x80,0x69,0x00,0x09,0x01,0xD3, \ +0x04,0xF0,0x50,0xF8,0xA0,0x08,0x16,0xD3,0x1C,0x48,0x81,0x69,0x89,0x08,0x12, \ +0xD3,0x81,0x69,0x02,0x23,0x99,0x43,0x81,0x61,0x19,0x48,0xC1,0x1D,0x49,0x31, \ +0x89,0x79,0x05,0x29,0x08,0xD1,0x1B,0x49,0x49,0x79,0x03,0x29,0x04,0xD1,0x70, \ +0x30,0x81,0x78,0x08,0x23,0x19,0x43,0x81,0x70,0xA0,0x09,0x05,0xD3,0x10,0x48, \ +0x80,0x69,0x80,0x09,0x01,0xD3,0x07,0xF0,0x6F,0xF9,0x14,0x48,0x38,0x40,0x06, \ +0xD0,0x13,0x48,0x00,0x21,0x05,0x70,0x79,0x20,0x40,0x05,0x01,0x83,0x81,0x82, \ +0xFF,0x20,0x02,0x30,0x38,0x40,0x06,0xD0,0x07,0xF0,0xA3,0xF9,0x00,0x22,0x10, \ +0x21,0x30,0x1C,0x01,0xF0,0x5C,0xFD,0xF0,0xBD,0x00,0x00,0x10,0x10,0x00,0x00, \ +0xE4,0x06,0x00,0x02,0x80,0x00,0x00,0x04,0x50,0x09,0x00,0x02,0x44,0x07,0x00, \ +0x02,0xCC,0x01,0x00,0x02,0xD8,0x01,0x00,0x02,0x04,0x07,0x00,0x02,0xB4,0x00, \ +0x00,0x02,0x02,0x02,0x00,0x00,0xDC,0x01,0x00,0x02,0x04,0x48,0x01,0x21,0x81, \ +0x73,0x00,0x21,0xC1,0x75,0xC1,0x73,0xC1,0x76,0x01,0x77,0xF7,0x46,0x00,0x00, \ +0x50,0x09,0x00,0x02,0x80,0xB5,0x16,0x4F,0x00,0x20,0x38,0x72,0x79,0x7A,0x02, \ +0x20,0x01,0x29,0x1C,0xD0,0x04,0x29,0x19,0xD1,0x78,0x72,0x08,0x20,0xFF,0xF7, \ +0x12,0xFD,0x38,0x6B,0xF9,0x6A,0x40,0x1A,0x40,0x00,0x39,0x6A,0x40,0x08,0x81, \ +0x42,0x0D,0xD2,0x39,0x6A,0x41,0x1A,0x14,0x20,0x02,0xF0,0x91,0xFA,0x79,0x7F, \ +0x01,0x29,0x08,0xD1,0xB9,0x6A,0x81,0x42,0x02,0xD3,0xB9,0x6A,0x08,0x1A,0xB8, \ +0x62,0x80,0xBD,0x78,0x72,0x80,0xBD,0x79,0x6A,0x81,0x42,0xF9,0xD3,0x79,0x6A, \ +0x08,0x1A,0x78,0x62,0x80,0xBD,0x50,0x09,0x00,0x02,0x00,0xB5,0x0A,0x48,0x01, \ +0x21,0x01,0x72,0x01,0x7B,0x01,0x29,0x0D,0xD1,0xC1,0x7A,0x00,0x29,0x0A,0xD1, \ +0x81,0x7A,0x00,0x29,0x07,0xD1,0x41,0x7A,0x06,0x29,0x04,0xD0,0x40,0x7A,0x05, \ +0x28,0x01,0xD0,0xFF,0xF7,0x15,0xF9,0x00,0xBD,0x50,0x09,0x00,0x02,0xB0,0xB5, \ +0x20,0x4F,0x20,0x48,0x79,0x7D,0x80,0x7A,0x20,0x4C,0x81,0x42,0x02,0xDA,0x78, \ +0x7D,0x20,0x70,0x00,0xE0,0x20,0x70,0xFF,0xF7,0x22,0xFC,0x20,0x78,0x03,0x28, \ +0x03,0xD1,0x01,0x20,0xFD,0xF7,0x1E,0xFF,0x02,0xE0,0x00,0x20,0xFD,0xF7,0x1A, \ +0xFF,0xB8,0x7A,0x17,0x4D,0x02,0x28,0x02,0xD1,0xC4,0x20,0x28,0x70,0x04,0xE0, \ +0xB8,0x7A,0x01,0x28,0x01,0xD1,0xD4,0x20,0x28,0x70,0x00,0x20,0x68,0x70,0x69, \ +0x88,0x11,0x48,0x00,0x29,0x07,0xD0,0x23,0x78,0x10,0x4A,0x5B,0x00,0xC3,0x5A, \ +0xD2,0x88,0xD2,0x18,0x89,0x1A,0x69,0x80,0x0A,0x21,0xF9,0x65,0x21,0x78,0x49, \ +0x00,0x40,0x5A,0x38,0x66,0x20,0x78,0x00,0xF0,0x2B,0xF8,0x39,0x6E,0x08,0x1A, \ +0x38,0x66,0xBD,0x65,0x01,0x20,0xB8,0x75,0xB0,0xBD,0x50,0x09,0x00,0x02,0x08, \ +0x01,0x00,0x02,0x9B,0x01,0x00,0x02,0x38,0x09,0x00,0x02,0xB8,0x01,0x00,0x02, \ +0x00,0x00,0x00,0x02,0xB0,0xB4,0x0A,0x4B,0x00,0x24,0x99,0x42,0x01,0xD8,0x00, \ +0x29,0x02,0xD1,0x20,0x1C,0xB0,0xBC,0xF7,0x46,0x01,0x27,0xBF,0x06,0x3D,0x69, \ +0xAB,0x08,0x01,0xD3,0x20,0x1C,0xF6,0xE7,0x03,0xC7,0x08,0x3F,0x3A,0x61,0x01, \ +0x20,0xF1,0xE7,0x0E,0x06,0x00,0x00,0x00,0x28,0x01,0xD1,0xC0,0x20,0xF7,0x46, \ +0x01,0x48,0x00,0x88,0xF7,0x46,0x00,0x00,0xB4,0x01,0x00,0x02,0xF8,0xB5,0x41, \ +0x48,0x00,0x90,0x41,0x48,0xC4,0x1D,0x49,0x34,0xC7,0x1D,0x09,0x37,0x3F,0x4E, \ +0x40,0x4D,0x30,0x68,0x00,0x7A,0x20,0x28,0x01,0xD1,0xFE,0xF7,0xB6,0xF8,0x00, \ +0xF0,0x76,0xFC,0x00,0xF0,0xEE,0xF8,0x3B,0x48,0x00,0x78,0x00,0x28,0x04,0xD1, \ +0xA0,0x79,0x05,0x28,0x01,0xD0,0x06,0xF0,0x4F,0xFB,0x28,0x78,0x00,0x28,0xE9, \ +0xD0,0xB8,0x7B,0x00,0x28,0xE6,0xD1,0x35,0x48,0x01,0x78,0x01,0x29,0x03,0xD1, \ +0x00,0x21,0x01,0x70,0x03,0xF0,0xE3,0xFF,0x32,0x48,0x00,0x78,0x02,0x28,0x46, \ +0xD0,0x31,0x48,0x00,0x78,0x02,0x28,0x01,0xD1,0x06,0xF0,0x6D,0xFB,0x06,0xF0, \ +0x87,0xFC,0x05,0x1C,0x29,0x48,0x00,0x78,0x01,0x28,0x09,0xD1,0x03,0x03,0x9D, \ +0x42,0x03,0xD1,0x2A,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00,0x21,0xB9,0x73, \ +0xC1,0xE7,0x00,0x2D,0x23,0xD0,0x01,0x23,0x1B,0x03,0x9D,0x42,0x08,0xD0,0x24, \ +0x48,0x80,0x21,0x02,0x68,0x11,0x70,0x02,0x68,0x00,0x21,0x51,0x70,0x00,0x68, \ +0x81,0x70,0xA0,0x79,0x05,0x28,0x0D,0xD1,0x00,0x98,0x40,0x79,0x01,0x28,0x09, \ +0xDD,0xC0,0x20,0x01,0xF0,0x11,0xFC,0x06,0x1C,0x28,0x1C,0x06,0xF0,0x6B,0xFB, \ +0x30,0x1C,0x01,0xF0,0x0A,0xFC,0x29,0x1C,0x00,0x22,0x17,0x48,0x01,0xF0,0x15, \ +0xFC,0x9B,0xE7,0x00,0x98,0x40,0x79,0x01,0x28,0x97,0xDD,0xA0,0x79,0x05,0x28, \ +0x94,0xD1,0x00,0xF0,0x25,0xF8,0x91,0xE7,0x06,0xF0,0x46,0xFC,0x01,0x23,0x1B, \ +0x03,0x98,0x42,0x03,0xD1,0x0B,0x49,0x10,0x20,0x09,0x68,0x08,0x73,0x00,0x21, \ +0xB9,0x73,0x84,0xE7,0x00,0x00,0xB4,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x50, \ +0x01,0x00,0x02,0x5E,0x02,0x00,0x02,0xBB,0x02,0x00,0x02,0xDD,0x01,0x00,0x02, \ +0x53,0x02,0x00,0x02,0x40,0x01,0x00,0x02,0xCC,0x01,0x00,0x02,0xD8,0x01,0x00, \ +0x02,0x24,0x07,0x00,0x02,0x80,0xB5,0xC0,0x20,0x01,0xF0,0xD2,0xFB,0x07,0x1C, \ +0x0D,0x48,0x81,0x78,0x49,0x08,0x89,0x07,0x11,0xD1,0x81,0x78,0x09,0x09,0x0E, \ +0xD3,0x0A,0x49,0x09,0x68,0x09,0x7B,0x09,0x0A,0x09,0xD2,0xC1,0x78,0x00,0x29, \ +0x04,0xD0,0x00,0x21,0xC1,0x70,0x01,0x21,0x81,0x71,0x01,0xE0,0x06,0xF0,0x4A, \ +0xFB,0x38,0x1C,0x01,0xF0,0xB7,0xFB,0x80,0xBD,0xC0,0x09,0x00,0x02,0xCC,0x01, \ +0x00,0x02,0xB0,0xB5,0x1C,0x4C,0x01,0x20,0x1C,0x4D,0xA0,0x77,0x28,0x68,0x00, \ +0xF0,0x46,0xFE,0x29,0x68,0x00,0x20,0x4F,0x68,0x88,0x73,0x18,0x49,0x8A,0x78, \ +0x00,0x2A,0x00,0xD1,0x48,0x70,0x38,0x78,0x08,0x28,0x19,0xD1,0x20,0x7D,0x01, \ +0x28,0x06,0xD1,0x06,0x22,0xF8,0x1D,0x09,0x30,0x12,0x49,0x02,0xF0,0xD5,0xF8, \ +0x0F,0xE0,0x20,0x7D,0x02,0x28,0x0C,0xD1,0x10,0x48,0x40,0x79,0x02,0x28,0x08, \ +0xD1,0xE0,0x1D,0x49,0x30,0x80,0x79,0x05,0x28,0x03,0xD1,0x78,0x78,0x10,0x23, \ +0x18,0x43,0x78,0x70,0xF8,0x1D,0x0F,0x30,0xFD,0xF7,0xE2,0xFE,0x38,0x1C,0x06, \ +0xF0,0x5F,0xF8,0x29,0x68,0x80,0x20,0x08,0x73,0x40,0x01,0xB0,0xBD,0x50,0x09, \ +0x00,0x02,0xCC,0x01,0x00,0x02,0xD0,0x01,0x00,0x02,0x00,0x01,0x00,0x02,0xB4, \ +0x00,0x00,0x02,0x00,0xB5,0x05,0x48,0x01,0x78,0x00,0x29,0x04,0xD0,0x40,0x78, \ +0x00,0x28,0x01,0xD1,0x03,0xF0,0xD6,0xFE,0x00,0xBD,0x00,0x00,0xD0,0x09,0x00, \ +0x02,0xF0,0xB5,0x2E,0x48,0x47,0x6E,0xFD,0xF7,0x67,0xFD,0x01,0x02,0x2C,0x4C, \ +0x09,0x0A,0x2C,0x48,0x21,0x60,0x43,0x78,0x2C,0x4A,0x13,0x70,0x15,0x78,0x0D, \ +0x23,0x6B,0x43,0x1B,0x18,0x1B,0x7B,0x1B,0x06,0x0B,0x43,0x03,0x21,0x49,0x06, \ +0x0B,0x60,0x15,0x78,0x0D,0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7B,0x5D,0x7B,0x36, \ +0x02,0x35,0x43,0xDE,0x7B,0x1B,0x7C,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43, \ +0x4B,0x60,0xC3,0x1D,0x39,0x33,0x1B,0x78,0x02,0x2B,0x1D,0xD1,0x15,0x78,0x0D, \ +0x23,0x6B,0x43,0x1B,0x18,0x9E,0x7C,0x5D,0x7C,0x36,0x02,0x35,0x43,0xDE,0x7C, \ +0x1B,0x7D,0x36,0x04,0x35,0x43,0x1B,0x06,0x2B,0x43,0x4B,0x61,0x15,0x78,0x0D, \ +0x23,0x6B,0x43,0x18,0x18,0x85,0x7D,0x43,0x7D,0x2D,0x02,0x2B,0x43,0xC5,0x7D, \ +0x00,0x7E,0x2D,0x04,0x2B,0x43,0x00,0x06,0x18,0x43,0x88,0x61,0x10,0x78,0x21, \ +0x68,0x0D,0x4A,0x80,0x07,0x01,0x43,0x21,0x60,0x00,0x20,0x3B,0x5C,0x13,0x54, \ +0x01,0x30,0x18,0x28,0xFA,0xD3,0x11,0x76,0x08,0x0A,0x50,0x76,0x08,0x0C,0x90, \ +0x76,0x08,0x0E,0xD0,0x76,0xF0,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0xF4,0x01, \ +0x00,0x02,0x1C,0x00,0x00,0x02,0x98,0x01,0x00,0x02,0x64,0x07,0x00,0x02,0x80, \ +0xB4,0x11,0x4A,0x11,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x1E,0x29,0x00,0xD1, \ +0x00,0x21,0x0E,0x4F,0x0E,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7B,0x00,0x2F,0x11, \ +0xD1,0x11,0x80,0x0C,0x49,0x03,0x22,0x19,0x60,0xD9,0x1D,0x15,0x31,0x59,0x60, \ +0x08,0x39,0x99,0x60,0x00,0x21,0x19,0x73,0x99,0x73,0x9A,0x75,0x99,0x82,0x03, \ +0x60,0x40,0x21,0x01,0x73,0x18,0x1C,0x80,0xBC,0xF7,0x46,0xF8,0x01,0x00,0x02, \ +0xA4,0x06,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x00,0x00,0x80,0x80,0xB4,0x13, \ +0x4A,0x51,0x88,0x01,0x31,0x09,0x04,0x09,0x0C,0x14,0x29,0x00,0xD1,0x00,0x21, \ +0x10,0x4F,0x10,0x4B,0x4F,0x43,0xFB,0x18,0x1F,0x7A,0x00,0x2F,0x15,0xD1,0x51, \ +0x80,0x0E,0x49,0x01,0x22,0x19,0x60,0xD9,0x1D,0x11,0x31,0x59,0x60,0x9A,0x81, \ +0x00,0x21,0x19,0x72,0x0A,0x4F,0xD9,0x73,0xBF,0x79,0x01,0x2F,0x01,0xD1,0xC2, \ +0x73,0x00,0xE0,0xC1,0x73,0x20,0x21,0x03,0x60,0x01,0x72,0x18,0x1C,0x80,0xBC, \ +0xF7,0x46,0xF8,0x01,0x00,0x02,0xA4,0x06,0x00,0x00,0x00,0xDA,0x00,0x02,0x00, \ +0x00,0x00,0x80,0xB4,0x00,0x00,0x02,0x01,0x1C,0x00,0x68,0x02,0x08,0x01,0xD3, \ +0x08,0x1C,0xF7,0x46,0x00,0x22,0x0A,0x73,0xF7,0x46,0x01,0x68,0x09,0x08,0x02, \ +0xD3,0x40,0x21,0x01,0x72,0xF7,0x46,0x04,0x4A,0x01,0x68,0x12,0x7A,0x00,0x2A, \ +0xF9,0xD1,0x02,0x72,0x08,0x1C,0xF7,0x46,0x00,0x00,0x58,0x51,0x00,0x00,0xF0, \ +0xB5,0x00,0x27,0x0A,0x4E,0x00,0x25,0x34,0x68,0x20,0x7A,0x80,0x28,0x0B,0xD1, \ +0x60,0x7A,0x00,0x28,0x0A,0xD0,0x20,0x1C,0xFF,0xF7,0xE1,0xFF,0x30,0x60,0xA0, \ +0x42,0x02,0xD0,0x01,0x35,0x13,0x2D,0xEF,0xD3,0x38,0x1C,0xF0,0xBD,0x01,0x27, \ +0xFB,0xE7,0x64,0x02,0x00,0x02,0x0A,0x49,0x01,0x20,0x48,0x63,0x00,0x20,0xCB, \ +0x1D,0x39,0x33,0x88,0x63,0x58,0x82,0x07,0x4A,0x18,0x82,0x10,0x60,0x90,0x80, \ +0x90,0x71,0xD0,0x71,0xCA,0x1D,0x49,0x32,0x50,0x71,0x98,0x81,0xD8,0x81,0x60, \ +0x31,0xC8,0x70,0xF7,0x46,0xFC,0x01,0x00,0x02,0x58,0x51,0x00,0x00,0x80,0xB5, \ +0x07,0x27,0x7F,0x06,0xF8,0x69,0x40,0x23,0x18,0x43,0xF8,0x61,0x14,0x48,0xFD, \ +0xF7,0x3C,0xFC,0xF8,0x69,0x20,0x23,0x18,0x43,0xF8,0x61,0xF8,0x69,0x1B,0x01, \ +0x18,0x43,0xF8,0x61,0x00,0x20,0xFF,0x21,0x91,0x31,0x01,0x30,0x88,0x42,0xFC, \ +0xD3,0xF8,0x69,0x0C,0x4B,0x18,0x40,0xF8,0x61,0x00,0x20,0x7D,0x21,0x49,0x01, \ +0x01,0x30,0x88,0x42,0xFC,0xD3,0xFF,0xF7,0xC2,0xFF,0xFD,0xF7,0x28,0xFC,0x00, \ +0xF0,0x0E,0xF8,0x05,0x49,0x0D,0x20,0x00,0x06,0x01,0x81,0xFF,0x21,0x41,0x31, \ +0x81,0x80,0x80,0xBD,0x50,0xC3,0x00,0x00,0xFF,0xFD,0x00,0x00,0xFF,0x0F,0x00, \ +0x00,0x90,0xB4,0x0E,0x48,0x00,0x21,0x01,0x70,0x0D,0x48,0x80,0x27,0x07,0x73, \ +0x01,0x23,0x03,0x72,0x82,0x22,0x02,0x71,0x07,0x22,0x02,0x70,0x0A,0x48,0x05, \ +0x24,0x04,0x73,0x86,0x24,0x04,0x72,0x02,0x71,0x08,0x48,0x24,0x22,0x02,0x71, \ +0x07,0x72,0x03,0x73,0x06,0x48,0x01,0x71,0x01,0x73,0x90,0xBC,0xF7,0x46,0x00, \ +0x00,0x10,0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xC0,0x03, \ +0x00,0x0D,0xE0,0x03,0x00,0x0D,0xF0,0xB5,0x25,0x48,0x01,0x27,0x00,0x7B,0x24, \ +0x4C,0x0A,0x28,0x1F,0xD1,0x24,0x49,0x24,0x4E,0x00,0x20,0x0B,0x7B,0x02,0x1C, \ +0x01,0x30,0x08,0x28,0xB3,0x54,0xF9,0xD1,0xF1,0x78,0xB0,0x78,0xF2,0x79,0x09, \ +0x02,0x08,0x43,0x05,0x1C,0x71,0x79,0x30,0x79,0x09,0x02,0x01,0x43,0xB0,0x79, \ +0x12,0x02,0x02,0x43,0x30,0x78,0x73,0x78,0x00,0x02,0x18,0x43,0x05,0x28,0x08, \ +0xD1,0x28,0x1C,0x00,0xF0,0xEA,0xFA,0x21,0xE0,0x16,0x49,0x00,0x20,0x08,0x73, \ +0x27,0x71,0xF0,0xBD,0x09,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x03,0xFB,0x16, \ +0xE0,0x11,0x4B,0x98,0x42,0x04,0xD1,0xF1,0x78,0x10,0x1C,0x00,0xF0,0x33,0xFB, \ +0x0E,0xE0,0x0E,0x4B,0x9B,0x78,0x00,0x2B,0x05,0xD1,0x13,0x1C,0x0A,0x1C,0x29, \ +0x1C,0x00,0xF0,0x17,0xF8,0x04,0xE0,0x13,0x1C,0x0A,0x1C,0x29,0x1C,0x04,0xF0, \ +0x9B,0xFA,0x27,0x71,0xF0,0xBD,0x00,0x00,0xF0,0x02,0x00,0x0D,0xD0,0x03,0x00, \ +0x0D,0x30,0x03,0x00,0x0D,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x06,0x80, \ +0x00,0x00,0x5C,0x02,0x00,0x02,0x80,0xB5,0x0F,0x1C,0x11,0x1C,0x1A,0x1C,0x08, \ +0x4B,0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x12,0xF8,0x80,0xBD,0x06,0x4B, \ +0x98,0x42,0x03,0xD1,0x38,0x1C,0x00,0xF0,0x2B,0xF8,0x80,0xBD,0x03,0x49,0x20, \ +0x20,0x08,0x73,0x80,0xBD,0x33,0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0x70,0x03, \ +0x00,0x0D,0x0B,0x49,0x0C,0x48,0x4A,0x6B,0x03,0x2A,0x03,0xD1,0x0B,0x4A,0x92, \ +0x78,0x01,0x2A,0x02,0xD0,0x20,0x21,0x01,0x73,0xF7,0x46,0x80,0x22,0x02,0x73, \ +0x50,0x31,0xC9,0x79,0x06,0x4A,0x10,0x23,0x11,0x73,0x01,0x7B,0x19,0x43,0x01, \ +0x73,0xF7,0x46,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x38,0x02, \ +0x00,0x02,0x30,0x03,0x00,0x0D,0xF0,0xB5,0x15,0x4E,0x17,0x1C,0xB2,0x78,0x14, \ +0x48,0x02,0x2A,0x06,0xD1,0xF2,0x78,0x08,0x2A,0x03,0xD1,0x12,0x4D,0x6A,0x6B, \ +0x03,0x2A,0x02,0xD0,0x20,0x21,0x01,0x73,0xF0,0xBD,0x10,0x4C,0x00,0x2F,0x04, \ +0xD1,0x00,0xF0,0x70,0xFB,0x01,0x20,0xA0,0x70,0x05,0xE0,0x00,0x29,0x01,0xD1, \ +0x0C,0x49,0xE9,0x65,0x00,0x21,0x01,0x73,0xE8,0x1D,0x39,0x30,0x07,0x83,0x00, \ +0x27,0x47,0x83,0x31,0x1C,0x08,0x22,0x07,0x48,0x01,0xF0,0x6F,0xFE,0x27,0x71, \ +0xF0,0xBD,0x00,0x00,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0xFC,0x01,0x00, \ +0x02,0x5C,0x02,0x00,0x02,0x00,0x60,0x00,0x01,0x40,0x02,0x00,0x02,0x90,0xB5, \ +0x17,0x49,0x08,0x78,0x4A,0x78,0x00,0x02,0x10,0x43,0x05,0x28,0x15,0x4A,0x04, \ +0xD1,0x89,0x78,0x50,0x6B,0x00,0xF0,0xC1,0xFA,0x90,0xBD,0x13,0x4B,0x01,0x27, \ +0x98,0x42,0x11,0x4C,0x04,0xD1,0xC8,0x78,0x00,0xF0,0x8E,0xF9,0x27,0x71,0x90, \ +0xBD,0x09,0x28,0x0A,0xD1,0x0E,0x49,0x20,0x20,0x08,0x73,0x27,0x71,0x50,0x6B, \ +0x03,0x28,0xEB,0xD1,0xD0,0x1D,0x49,0x30,0x47,0x71,0x90,0xBD,0xD1,0x1D,0x59, \ +0x31,0x89,0x78,0x00,0x29,0x02,0xD1,0x00,0xF0,0x0E,0xF8,0x90,0xBD,0x04,0xF0, \ +0x27,0xFA,0x90,0xBD,0x38,0x02,0x00,0x02,0xFC,0x01,0x00,0x02,0xD0,0x03,0x00, \ +0x0D,0x06,0x80,0x00,0x00,0x70,0x03,0x00,0x0D,0x08,0x4B,0x07,0x49,0x98,0x42, \ +0x02,0xD1,0xE0,0x20,0x08,0x73,0x04,0xE0,0x06,0x4B,0x98,0x42,0x01,0xD1,0x20, \ +0x20,0x08,0x73,0x04,0x49,0x01,0x20,0x08,0x71,0xF7,0x46,0x70,0x03,0x00,0x0D, \ +0x33,0xC1,0x00,0x00,0x0E,0x40,0x00,0x00,0xD0,0x03,0x00,0x0D,0x80,0xB5,0x11, \ +0x48,0x11,0x4B,0x01,0x78,0x42,0x78,0x09,0x02,0x11,0x43,0x0F,0x1C,0x9F,0x42, \ +0x03,0xD1,0x80,0x78,0x00,0xF0,0x93,0xF9,0x05,0xE0,0x0C,0x49,0xE0,0x20,0x08, \ +0x73,0x0C,0x49,0x01,0x20,0x08,0x71,0x0B,0x4B,0x9F,0x42,0x0B,0xD1,0x0B,0x48, \ +0x01,0x79,0x02,0x29,0x07,0xD1,0x03,0x21,0x01,0x71,0x09,0x48,0x00,0x22,0xC1, \ +0x78,0x80,0x78,0x03,0xF0,0x0B,0xFF,0x80,0xBD,0x38,0x02,0x00,0x02,0x0E,0x40, \ +0x00,0x00,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x22,0xC1,0x00,0x00,0x5C, \ +0x02,0x00,0x02,0x40,0x02,0x00,0x02,0x00,0xB5,0x0C,0x49,0x08,0x7B,0x02,0x09, \ +0x05,0xD3,0x00,0x20,0x08,0x73,0x0A,0x49,0x01,0x20,0x08,0x71,0x00,0xBD,0xC1, \ +0x08,0x02,0xD3,0xFF,0xF7,0x9F,0xFE,0x00,0xBD,0x41,0x08,0x02,0xD3,0xFF,0xF7, \ +0x68,0xFF,0x00,0xBD,0x80,0x08,0xF2,0xD3,0xFF,0xF7,0xB5,0xFF,0x00,0xBD,0x70, \ +0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0xF0,0xB5,0x42,0x4E,0x30,0x79,0x80,0x08, \ +0x4A,0xD3,0x41,0x4D,0x68,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xAD,0xFD,0x3F, \ +0x48,0x04,0x79,0xC0,0x20,0x01,0xF0,0x6E,0xF8,0x01,0x1C,0x3D,0x48,0x04,0x22, \ +0x02,0x71,0x00,0x22,0x02,0x71,0x08,0x1C,0x01,0xF0,0x65,0xF8,0x3A,0x48,0xC7, \ +0x1D,0x39,0x37,0x39,0x8A,0x40,0x29,0x07,0xDA,0x39,0x8A,0x00,0x29,0x04,0xD0, \ +0x39,0x8A,0x02,0x31,0x09,0x04,0x09,0x0C,0x07,0xE0,0x40,0x2C,0x04,0xDA,0x39, \ +0x8A,0x00,0x29,0x01,0xD1,0x21,0x1C,0x00,0xE0,0x40,0x21,0x7A,0x8A,0x2F,0x4C, \ +0x52,0x18,0x19,0x23,0x9B,0x01,0x9A,0x42,0x04,0xD9,0x00,0x22,0x7A,0x82,0x3A, \ +0x82,0x01,0x22,0xA2,0x71,0x29,0x48,0xC0,0x6E,0x80,0x68,0x7A,0x8A,0x80,0x18, \ +0xCD,0x22,0x00,0xF0,0x0F,0xFA,0x00,0x20,0x30,0x71,0x68,0x79,0x01,0x28,0x01, \ +0xDD,0x00,0xF0,0x5A,0xFD,0xA1,0x79,0x21,0x48,0x01,0x29,0x02,0xD1,0x00,0x20, \ +0xA0,0x71,0xF0,0xBD,0x04,0x1C,0x78,0x8A,0x00,0x28,0x1E,0xD1,0xE0,0x6E,0x81, \ +0x8A,0xC0,0x7D,0x08,0x31,0x08,0x18,0x38,0x82,0xE1,0x6E,0x8A,0x7D,0x48,0x68, \ +0x03,0x2A,0x01,0xDD,0x03,0x22,0x8A,0x75,0x39,0x8A,0x17,0x4B,0x99,0x42,0x09, \ +0xD8,0x39,0x8A,0x00,0x29,0x06,0xD0,0x0A,0x30,0x06,0x22,0x14,0x49,0x01,0xF0, \ +0x32,0xFD,0x00,0x28,0x03,0xD0,0x00,0x20,0x78,0x82,0x38,0x82,0xF0,0xBD,0x38, \ +0x8A,0x40,0x28,0x06,0xDD,0x38,0x8A,0x40,0x38,0x38,0x82,0x78,0x8A,0x40,0x30, \ +0x78,0x82,0xF0,0xBD,0x00,0x20,0x38,0x82,0x78,0x82,0xE0,0x6E,0xFF,0xF7,0x02, \ +0xFD,0xE0,0x66,0xF0,0xBD,0x70,0x03,0x00,0x0D,0xB4,0x00,0x00,0x02,0xF0,0x02, \ +0x00,0x0D,0x60,0x02,0x00,0x0D,0xFC,0x01,0x00,0x02,0x5C,0x02,0x00,0x02,0x32, \ +0x06,0x00,0x00,0x60,0x00,0x00,0x02,0xB0,0xB5,0x2F,0x4D,0xEF,0x1D,0x49,0x37, \ +0x78,0x79,0x00,0x28,0x45,0xD0,0x2D,0x48,0x00,0x78,0x01,0x28,0x41,0xD1,0x2C, \ +0x48,0x00,0x24,0x01,0x78,0x01,0x29,0x02,0xD1,0x04,0x70,0x05,0xF0,0xA1,0xFF, \ +0x29,0x48,0x01,0x7A,0x01,0x29,0x03,0xD1,0x04,0x72,0xF8,0x79,0x00,0xF0,0xB3, \ +0xF9,0xE8,0x1D,0x59,0x30,0xC0,0x78,0x01,0x28,0x02,0xD1,0xF8,0x79,0x00,0xF0, \ +0xAB,0xF9,0xF8,0x79,0x2C,0x1C,0x02,0x28,0x26,0xD0,0xFF,0xF7,0x41,0xFF,0xA0, \ +0x6E,0x80,0x23,0x00,0x7A,0x1D,0x4F,0x18,0x40,0x16,0xD0,0xFF,0xF7,0x37,0xFD, \ +0x00,0x28,0x12,0xD0,0xA1,0x6E,0x10,0x20,0x08,0x72,0x78,0x79,0x01,0x28,0x01, \ +0xDD,0x00,0xF0,0xE7,0xFC,0xA0,0x6E,0x81,0x89,0x0C,0x30,0x0C,0x31,0xFC,0xF7, \ +0x11,0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xC8,0xFC,0xFC,0xF7,0x4E, \ +0xFF,0x78,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0xC1,0xFC,0xB0,0xBD,0x0D,0x48, \ +0x00,0x78,0x00,0x28,0xFA,0xD0,0xA0,0x6E,0x01,0x7A,0x10,0x29,0x05,0xD0,0x01, \ +0x7A,0x80,0x29,0x02,0xD0,0x01,0x7A,0x40,0x29,0xF0,0xD1,0xFF,0xF7,0xF9,0xFC, \ +0xA0,0x66,0xB0,0xBD,0xFC,0x01,0x00,0x02,0xE7,0x01,0x00,0x02,0xE5,0x01,0x00, \ +0x02,0x58,0x51,0x00,0x00,0xB4,0x00,0x00,0x02,0xE6,0x01,0x00,0x02,0xB0,0xB4, \ +0x21,0x4F,0x80,0x21,0x21,0x4A,0x39,0x73,0xD1,0x1D,0x39,0x31,0x4C,0x8B,0x0D, \ +0x8B,0xAC,0x42,0x17,0xD1,0x38,0x7B,0x40,0x23,0x03,0x40,0xE0,0x20,0x00,0x2B, \ +0x0F,0xD1,0x09,0x8B,0x49,0x07,0x02,0xD0,0x38,0x73,0xB0,0xBC,0xF7,0x46,0xD1, \ +0x1D,0x49,0x31,0x89,0x79,0x01,0x29,0x02,0xD1,0xD0,0x20,0x38,0x73,0xF5,0xE7, \ +0x38,0x73,0xF3,0xE7,0x38,0x73,0xF1,0xE7,0x4A,0x8B,0x0C,0x8B,0xA2,0x42,0xED, \ +0xDA,0x0A,0x8B,0x4C,0x8B,0x12,0x1B,0x08,0x2A,0x00,0xD9,0x08,0x22,0x01,0x28, \ +0x01,0xD1,0x0C,0x4B,0x02,0xE0,0x02,0x28,0x00,0xD1,0x0B,0x4B,0x00,0x2A,0x08, \ +0xD0,0x0A,0x48,0x4C,0x8B,0x4D,0x8B,0x01,0x34,0x4C,0x83,0x5C,0x5D,0x01,0x3A, \ +0x04,0x73,0xF7,0xD1,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xD0,0xE7,0x70, \ +0x03,0x00,0x0D,0xFC,0x01,0x00,0x02,0xFC,0x01,0x00,0x02,0x0E,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0xF0,0xB5,0x24,0x4E,0x22,0x4C,0xF7,0x1D,0x59,0x37,0x01, \ +0x28,0x22,0x4D,0x0C,0xD1,0xFD,0xF7,0x21,0xFA,0x28,0x7B,0xF1,0x1D,0x49,0x31, \ +0xC8,0x71,0x00,0xF0,0x4C,0xF9,0x00,0x20,0x38,0x71,0x01,0x20,0x20,0x71,0xF0, \ +0xBD,0xF1,0x1D,0x39,0x31,0x4A,0x8B,0x33,0x1C,0x0E,0x8B,0xB2,0x42,0x1E,0xDA, \ +0x0A,0x8B,0x4E,0x8B,0x92,0x1B,0x08,0x2A,0x00,0xD9,0x08,0x22,0x00,0x2A,0x0A, \ +0xD0,0x13,0x4D,0x2E,0x7B,0xDD,0x6D,0x2E,0x70,0x01,0x35,0xDD,0x65,0x4D,0x8B, \ +0x01,0x35,0x4D,0x83,0x01,0x3A,0xF4,0xD1,0x4A,0x8B,0x0B,0x8B,0x0E,0x49,0x9A, \ +0x42,0x0E,0xD1,0x02,0x28,0x07,0xD1,0x00,0xF0,0x24,0xF9,0x00,0x20,0x38,0x71, \ +0x09,0xE0,0x01,0x20,0x20,0x71,0xF0,0xBD,0x60,0x20,0x08,0x73,0x01,0x20,0x38, \ +0x71,0x01,0xE0,0x00,0x20,0x08,0x73,0x01,0x20,0x20,0x71,0xF0,0xBD,0xD0,0x03, \ +0x00,0x0D,0xFC,0x01,0x00,0x02,0x30,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0x00, \ +0xB5,0x7F,0x28,0x07,0xD8,0x00,0x29,0x05,0xD1,0x00,0x2A,0x03,0xD1,0x0C,0x4A, \ +0x51,0x6B,0x03,0x29,0x03,0xD1,0x0B,0x49,0x20,0x20,0x08,0x73,0x00,0xBD,0x01, \ +0x29,0x04,0xD1,0x00,0x28,0x08,0xD0,0x02,0x20,0x50,0x63,0x05,0xE0,0x02,0x29, \ +0x03,0xD1,0x00,0x28,0x01,0xD1,0x01,0x20,0x50,0x63,0x00,0xF0,0xEE,0xF8,0x00, \ +0xBD,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB5,0x00,0x29, \ +0x09,0xD1,0x00,0x2A,0x07,0xD1,0x00,0x28,0x01,0xD0,0x01,0x28,0x03,0xD1,0x14, \ +0x49,0x4A,0x6B,0x01,0x2A,0x03,0xD1,0x13,0x49,0x20,0x20,0x08,0x73,0x80,0xBD, \ +0x12,0x4B,0x02,0x2A,0x09,0xD1,0x00,0x28,0x12,0xD0,0x03,0x22,0x4A,0x63,0x1F, \ +0x7B,0x1A,0x1C,0x02,0x23,0x3B,0x43,0x13,0x73,0x0A,0xE0,0x03,0x2A,0x08,0xD1, \ +0x00,0x28,0x06,0xD1,0x02,0x22,0x4A,0x63,0x1F,0x7B,0x1A,0x1C,0xFD,0x23,0x3B, \ +0x40,0x13,0x73,0x88,0x63,0x00,0x20,0x40,0x31,0x88,0x81,0xC8,0x81,0x00,0xF0, \ +0xB8,0xF8,0x80,0xBD,0x00,0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0xE0, \ +0x03,0x00,0x0D,0x90,0xB5,0x15,0x4F,0xFA,0x1D,0x39,0x32,0x01,0x29,0x02,0xD1, \ +0x12,0x23,0x13,0x83,0x03,0xE0,0x20,0x23,0x02,0x29,0x09,0xD1,0x13,0x83,0x00, \ +0x23,0x50,0x37,0xBB,0x71,0x14,0x8B,0xA0,0x42,0x05,0xD8,0xBB,0x71,0x10,0x83, \ +0x0F,0xE0,0x0B,0x48,0x03,0x73,0x90,0xBD,0x14,0x8B,0xA0,0x42,0x09,0xD9,0x10, \ +0x8B,0x40,0x07,0x01,0xD0,0xBB,0x71,0x04,0xE0,0x10,0x8B,0x40,0x07,0x01,0xD1, \ +0x01,0x20,0xB8,0x71,0x53,0x83,0x08,0x1C,0xFF,0xF7,0xDC,0xFE,0x90,0xBD,0x00, \ +0x00,0xFC,0x01,0x00,0x02,0x70,0x03,0x00,0x0D,0x80,0xB4,0x0E,0x4F,0x0E,0x4A, \ +0x01,0x28,0x06,0xD1,0x80,0x20,0x10,0x72,0x38,0x7B,0xFE,0x23,0x18,0x40,0x38, \ +0x73,0x08,0xE0,0x02,0x28,0x06,0xD1,0x80,0x20,0x08,0x43,0x10,0x72,0x38,0x7B, \ +0x01,0x23,0x18,0x43,0x38,0x73,0x06,0x49,0x20,0x20,0x08,0x73,0x05,0x49,0x01, \ +0x20,0x08,0x71,0x80,0xBC,0xF7,0x46,0x00,0x00,0xE0,0x03,0x00,0x0D,0xC0,0x03, \ +0x00,0x0D,0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x0D,0x23,0x1B,0x06,0x99, \ +0x83,0x05,0x49,0x0A,0x70,0x05,0x4A,0x10,0x60,0x02,0x20,0x08,0x72,0x08,0x7A, \ +0x00,0x28,0xFC,0xD1,0xF7,0x46,0x00,0x00,0x20,0x00,0x00,0x0D,0x40,0x00,0x00, \ +0x0D,0x90,0xB5,0x1B,0x4C,0x07,0x1C,0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0, \ +0x4C,0xFB,0x00,0x21,0x02,0x2F,0x17,0x48,0x18,0x4A,0x0F,0xD0,0x43,0x79,0x02, \ +0x2B,0x03,0xD1,0x41,0x71,0x03,0xF0,0xF7,0xFD,0x1A,0xE0,0x11,0x72,0x14,0x48, \ +0x20,0x22,0x02,0x70,0x01,0x70,0x13,0x49,0x86,0x20,0x08,0x72,0x11,0xE0,0x12, \ +0x4B,0x9B,0x7B,0x00,0x2B,0x0D,0xD1,0x17,0x7A,0x7B,0x09,0x0A,0xD2,0x10,0x23, \ +0x13,0x72,0xC1,0x70,0x0E,0x4A,0x01,0x20,0x10,0x70,0x0F,0x20,0x00,0x06,0x81, \ +0x81,0x0C,0x49,0x81,0x80,0x60,0x79,0x01,0x28,0x01,0xDD,0x00,0xF0,0x0C,0xFB, \ +0x03,0xF0,0x2E,0xFE,0x90,0xBD,0x00,0x00,0xB4,0x00,0x00,0x02,0x5C,0x02,0x00, \ +0x02,0x60,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0x60,0x09, \ +0x00,0x02,0xE6,0x01,0x00,0x02,0x08,0x08,0x00,0x00,0x04,0x48,0x01,0x78,0x02, \ +0x78,0x91,0x42,0xFC,0xD0,0x03,0x49,0x60,0x20,0x08,0x73,0xF7,0x46,0x00,0x00, \ +0xF0,0x03,0x00,0x0D,0x70,0x03,0x00,0x0D,0xF0,0xB5,0x28,0x4E,0x30,0x78,0x00, \ +0x28,0x01,0xD1,0x00,0xF0,0x9F,0xFA,0x0D,0x24,0x24,0x06,0x27,0x89,0x40,0x20, \ +0x24,0x4D,0x38,0x40,0x08,0xD0,0x28,0x7A,0x00,0x28,0xFC,0xD1,0x22,0x48,0x00, \ +0x7B,0x40,0x08,0x01,0xD3,0xFF,0xF7,0x11,0xFD,0x78,0x0A,0x1C,0xD3,0xF8,0x43, \ +0xFF,0x23,0x01,0x33,0x18,0x43,0x20,0x81,0xFD,0xF7,0xA1,0xF8,0x20,0x7B,0x00, \ +0x09,0xFC,0xD2,0x28,0x7A,0x00,0x28,0xFC,0xD1,0xFF,0xF7,0x86,0xFB,0x17,0x48, \ +0x01,0x7A,0x02,0x29,0x05,0xD0,0x01,0x21,0x01,0x72,0x15,0x48,0x00,0x23,0x43, \ +0x71,0x01,0xE0,0x00,0x23,0x03,0x72,0xFF,0xF7,0x28,0xFB,0x12,0x49,0x08,0x78, \ +0x01,0x28,0x10,0xD1,0xB8,0x08,0x0E,0xD3,0x10,0x4A,0x00,0x23,0x10,0x7A,0x13, \ +0x72,0xFA,0x43,0x02,0x23,0x1A,0x43,0x22,0x81,0x09,0x78,0x01,0x29,0x03,0xD1, \ +0x00,0x04,0x00,0x0C,0x03,0xF0,0x41,0xFD,0x30,0x78,0x00,0x28,0x01,0xD1,0x00, \ +0xF0,0x7A,0xFA,0xF0,0xBD,0x00,0x00,0x41,0x01,0x00,0x02,0x20,0x00,0x00,0x0D, \ +0xD0,0x03,0x00,0x0D,0x58,0x51,0x00,0x00,0x5C,0x02,0x00,0x02,0x3B,0x01,0x00, \ +0x02,0xE0,0x03,0x00,0x0D,0x90,0xB5,0x41,0x68,0x0A,0x78,0x08,0x2A,0x12,0xD1, \ +0x8A,0x7F,0xCB,0x7F,0x12,0x02,0x1A,0x43,0x15,0x4B,0x12,0x04,0x1F,0x88,0x12, \ +0x0C,0xBA,0x42,0x02,0xD0,0x5B,0x88,0x93,0x42,0x06,0xD1,0xC8,0x1D,0x11,0x30, \ +0x06,0x22,0x10,0x49,0x01,0xF0,0x8C,0xFA,0x90,0xBD,0x03,0x23,0x5B,0x02,0x9A, \ +0x42,0x06,0xDD,0xC8,0x1D,0x11,0x30,0x06,0x22,0x0B,0x49,0x01,0xF0,0x81,0xFA, \ +0x90,0xBD,0xCF,0x1D,0x01,0x37,0x47,0x60,0x18,0x32,0x82,0x82,0x08,0x4C,0x18, \ +0x22,0x20,0x1C,0x01,0xF0,0x76,0xFA,0x18,0x22,0x38,0x1C,0x21,0x1C,0x01,0xF0, \ +0x71,0xFA,0x90,0xBD,0x9C,0x02,0x00,0x02,0x96,0x02,0x00,0x02,0x90,0x02,0x00, \ +0x02,0x4C,0x0A,0x00,0x02,0xF0,0xB5,0x00,0xF0,0x43,0xF9,0x69,0x4F,0xFF,0x21, \ +0xF8,0x1D,0x27,0x30,0x01,0x31,0x06,0x22,0x04,0x1C,0x00,0xF0,0xA1,0xF9,0x65, \ +0x4D,0x12,0x22,0x03,0x21,0x28,0x1C,0x00,0xF0,0x9B,0xF9,0xF8,0x1D,0x15,0x30, \ +0x0E,0x22,0xFF,0x21,0x11,0x31,0x00,0xF0,0x94,0xF9,0xF8,0x1D,0x2D,0x30,0x01, \ +0x22,0xFF,0x21,0x31,0x31,0x00,0xF0,0x8D,0xF9,0xF8,0x1D,0x58,0x30,0x07,0x22, \ +0xFF,0x21,0x81,0x31,0x00,0xF0,0x86,0xF9,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41, \ +0x31,0x00,0xF0,0x80,0xF9,0xF8,0x1D,0x07,0x30,0x0E,0x22,0xFF,0x21,0x51,0x31, \ +0x00,0xF0,0x79,0xF9,0xF8,0x1D,0x3C,0x30,0x0E,0x22,0xFF,0x21,0x71,0x31,0x00, \ +0xF0,0x72,0xF9,0xF8,0x1D,0x4A,0x30,0x0E,0x22,0xFF,0x21,0x21,0x31,0x00,0xF0, \ +0x6B,0xF9,0xF8,0x1D,0x2E,0x30,0x0E,0x22,0xFF,0x21,0x61,0x31,0x00,0xF0,0x64, \ +0xF9,0xF8,0x1D,0x5F,0x30,0x03,0x22,0xFF,0x21,0x89,0x31,0x00,0xF0,0x5D,0xF9, \ +0xF8,0x1D,0x63,0x30,0x04,0x22,0xFF,0x21,0x8D,0x31,0x00,0xF0,0x56,0xF9,0x00, \ +0xF0,0x0B,0xF9,0xF8,0x1D,0x23,0x30,0x04,0x22,0xE9,0x1D,0x01,0x31,0x01,0xF0, \ +0x08,0xFA,0xF8,0x1D,0x19,0x30,0x80,0x7B,0xC0,0x07,0xC0,0x0F,0x00,0x25,0x00, \ +0x28,0x10,0xD1,0x38,0x4A,0x15,0x54,0x01,0x30,0x06,0x28,0xFB,0xD3,0x10,0x1C, \ +0x06,0x22,0x21,0x1C,0x01,0xF0,0xD8,0xF9,0x00,0x28,0x04,0xD0,0x21,0x1C,0x06, \ +0x22,0x32,0x48,0x01,0xF0,0xEF,0xF9,0xF8,0x1D,0x29,0x30,0x00,0x79,0x10,0x28, \ +0x11,0xD0,0x20,0x28,0x0F,0xD0,0x31,0x28,0x0D,0xD0,0x30,0x28,0x0B,0xD0,0x32, \ +0x28,0x09,0xD0,0x40,0x28,0x07,0xD0,0x41,0x28,0x05,0xD0,0x50,0x28,0x03,0xD0, \ +0x51,0x28,0x01,0xD0,0x52,0x28,0x01,0xD1,0x26,0x49,0xC8,0x75,0xF8,0x1D,0x49, \ +0x30,0xC2,0x7B,0x01,0x24,0x25,0x48,0x24,0x49,0x55,0x2A,0x13,0xD1,0x02,0x78, \ +0x53,0x2A,0x10,0xD1,0x42,0x78,0x42,0x2A,0x0D,0xD1,0x82,0x78,0x53,0x2A,0x0A, \ +0xD1,0xC2,0x78,0x55,0x2A,0x07,0xD1,0x02,0x79,0x53,0x2A,0x04,0xD1,0x42,0x79, \ +0x50,0x2A,0x01,0xD1,0x0C,0x70,0x00,0xE0,0x0D,0x70,0xFF,0x23,0x01,0x22,0x86, \ +0x79,0x52,0x02,0x01,0x33,0x53,0x2E,0x17,0x49,0x08,0xD1,0xC6,0x79,0x45,0x2E, \ +0x05,0xD1,0x00,0x7A,0x4C,0x28,0x02,0xD1,0xCB,0x61,0x0A,0x62,0x01,0xE0,0xCA, \ +0x61,0x0B,0x62,0x0C,0x76,0x11,0x4A,0x4C,0x76,0x05,0x20,0x90,0x70,0x20,0x20, \ +0xC8,0x60,0x4C,0x62,0x00,0x20,0x3A,0x18,0x50,0x32,0x52,0x78,0xFA,0x2A,0x01, \ +0xDD,0x4D,0x62,0xF0,0xBD,0x01,0x30,0x0E,0x28,0xF5,0xD3,0xF0,0xBD,0x64,0x0A, \ +0x00,0x02,0xFC,0x01,0x00,0x02,0x00,0x72,0x01,0x02,0x60,0x00,0x00,0x02,0x00, \ +0x00,0x00,0x02,0x3A,0x01,0x00,0x02,0xC4,0x0A,0x00,0x02,0xA0,0x02,0x00,0x02, \ +0x14,0x01,0x00,0x02,0xB0,0xB5,0x27,0x48,0x04,0x25,0x05,0x70,0x26,0x49,0x00, \ +0x20,0x08,0x70,0x26,0x49,0x02,0x24,0x0C,0x70,0x06,0x21,0x07,0x27,0x7F,0x06, \ +0xB9,0x61,0x78,0x61,0xF8,0x69,0xFB,0x0B,0x98,0x43,0xF8,0x61,0xF8,0x69,0x10, \ +0x23,0x98,0x43,0xF8,0x61,0xFF,0xF7,0xFF,0xFE,0x1E,0x48,0x00,0x78,0x00,0x28, \ +0x00,0xD1,0xBD,0x61,0x1C,0x48,0x01,0x21,0xC1,0x76,0x1C,0x49,0xCA,0x69,0x0B, \ +0x0C,0x1A,0x43,0xCA,0x61,0xCA,0x69,0x1B,0x23,0x9A,0x43,0xCA,0x61,0xC2,0x68, \ +0xCB,0x69,0xD2,0x43,0x1A,0x40,0xCA,0x61,0x02,0x6A,0xCB,0x69,0x1A,0x43,0xCA, \ +0x61,0xCA,0x69,0x0B,0x0C,0x9A,0x43,0xCA,0x61,0xB9,0x69,0x01,0x23,0x19,0x43, \ +0xB9,0x61,0x84,0x76,0x00,0xF0,0x3E,0xF8,0x00,0xF0,0xA8,0xF8,0x0A,0x20,0xFC, \ +0xF7,0xF7,0xFD,0x00,0xF0,0x9B,0xF8,0xFF,0xF7,0xAD,0xF9,0x0A,0x48,0x01,0x23, \ +0x04,0x72,0xF8,0x69,0xDB,0x03,0x18,0x43,0xF8,0x61,0xB0,0xBD,0x00,0x00,0x53, \ +0x02,0x00,0x02,0x5E,0x02,0x00,0x02,0x3F,0x01,0x00,0x02,0x3A,0x01,0x00,0x02, \ +0xA0,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x58,0x51,0x00,0x00,0x80,0x21,0xF3, \ +0x20,0x00,0x05,0x01,0x60,0x00,0x21,0x01,0x60,0x01,0x21,0x41,0x60,0x01,0x60, \ +0x07,0x21,0x49,0x06,0xCA,0x69,0x01,0x23,0x5B,0x03,0x1A,0x43,0xCA,0x61,0x04, \ +0x49,0x01,0x63,0x04,0x49,0x41,0x63,0x81,0x63,0xC1,0x63,0x01,0x69,0x80,0x68, \ +0xF7,0x46,0x00,0x00,0x01,0x0C,0x00,0x02,0x01,0x02,0x00,0x02,0x07,0x20,0x40, \ +0x06,0xC1,0x69,0x02,0x4B,0x19,0x40,0xC1,0x61,0xF7,0x46,0x00,0x00,0xFF,0xDF, \ +0x00,0x00,0xF0,0xB5,0x0F,0x1C,0x00,0x21,0xF3,0x24,0x24,0x05,0x00,0x28,0x08, \ +0xD9,0x10,0x4D,0x6B,0x5C,0xE3,0x60,0x26,0x69,0xB3,0x08,0xFC,0xD3,0x01,0x31, \ +0x81,0x42,0xF7,0xD3,0xFF,0x20,0xE0,0x60,0xA1,0x68,0x21,0x1C,0x0B,0x69,0x5B, \ +0x08,0xFC,0xD3,0x8B,0x68,0x0C,0x69,0xA3,0x08,0xFC,0xD3,0xC8,0x60,0x0B,0x69, \ +0x5B,0x08,0xFC,0xD3,0x8B,0x68,0x3B,0x70,0x01,0x37,0x01,0x3A,0xF3,0xD1,0x02, \ +0x20,0xFC,0xF7,0x8D,0xFD,0xF0,0xBD,0xB0,0x02,0x00,0x02,0xF3,0x20,0x00,0x05, \ +0x81,0x68,0x05,0x21,0xC1,0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0xFF,0x21,0xC1, \ +0x60,0x01,0x69,0x89,0x08,0xFC,0xD3,0x81,0x68,0x01,0x69,0x49,0x08,0xFC,0xD3, \ +0x80,0x68,0x00,0x06,0x00,0x0E,0xF7,0x46,0x90,0xB5,0x04,0x1C,0x48,0x09,0x08, \ +0x23,0x18,0x40,0x17,0x1C,0x03,0x22,0x02,0x43,0x08,0x48,0x02,0x70,0x41,0x70, \ +0xFF,0xF7,0xDE,0xFF,0x40,0x08,0xFB,0xD2,0x02,0x20,0xFC,0xF7,0x63,0xFD,0x02, \ +0x20,0x21,0x1C,0x3A,0x1C,0xFF,0xF7,0xAA,0xFF,0x90,0xBD,0x00,0x00,0xB0,0x02, \ +0x00,0x02,0x07,0x20,0x40,0x06,0xC1,0x69,0x01,0x23,0x5B,0x03,0x19,0x43,0xC1, \ +0x61,0xF7,0x46,0xF3,0x20,0x00,0x05,0x41,0x68,0x0F,0x23,0x1B,0x04,0x99,0x43, \ +0x41,0x60,0x41,0x68,0x19,0x43,0x41,0x60,0xF7,0x46,0x00,0x00,0x80,0xB4,0x14, \ +0x4B,0x5B,0x79,0x01,0x2B,0x0E,0xDD,0x17,0x1C,0x12,0x4A,0x14,0xD1,0x02,0x2B, \ +0x09,0xD1,0x00,0x29,0x07,0xD1,0x00,0x28,0x07,0xD1,0x90,0x78,0x4B,0x1F,0x18, \ +0x40,0x90,0x70,0x00,0x20,0x50,0x70,0x80,0xBC,0xF7,0x46,0x90,0x78,0x04,0x23, \ +0x18,0x43,0x90,0x70,0x01,0x20,0x50,0x70,0xF6,0xE7,0x00,0x28,0x04,0xD1,0x90, \ +0x78,0x02,0x23,0x98,0x43,0x90,0x70,0xEF,0xE7,0x90,0x78,0x02,0x23,0x18,0x43, \ +0x90,0x70,0xEA,0xE7,0x00,0x00,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0x90, \ +0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x15,0xD3,0xCA,0x69,0x10,0x23, \ +0x9A,0x43,0xCA,0x61,0x01,0x28,0x01,0xD1,0x08,0x49,0x08,0x70,0x08,0x4C,0x67, \ +0x68,0xFC,0xF7,0x18,0xFD,0x39,0x1A,0x49,0x01,0x09,0x18,0x06,0x4A,0x61,0x60, \ +0x51,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x50,0x63,0x90,0xBD,0x00,0x00,0x41, \ +0x01,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x90,0xB5,0x0C,0x48, \ +0x80,0x78,0x01,0x28,0x13,0xD1,0x0B,0x4F,0x7C,0x68,0xFC,0xF7,0xFD,0xFC,0x21, \ +0x1A,0x49,0x09,0x09,0x18,0x79,0x60,0x08,0x49,0x4A,0x6B,0x12,0x1A,0x52,0x09, \ +0x10,0x18,0x48,0x63,0x07,0x20,0x40,0x06,0xC1,0x69,0x10,0x23,0x19,0x43,0xC1, \ +0x61,0x90,0xBD,0xC0,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04, \ +0x80,0xB5,0xC0,0x20,0x00,0xF0,0xD6,0xFA,0x07,0x1C,0x06,0x48,0x01,0x78,0x00, \ +0x29,0x03,0xD0,0x00,0x21,0x01,0x70,0xFF,0xF7,0xD3,0xFF,0x38,0x1C,0x00,0xF0, \ +0xCA,0xFA,0x80,0xBD,0x00,0x00,0x41,0x01,0x00,0x02,0x80,0xB5,0xC0,0x20,0x00, \ +0xF0,0xC2,0xFA,0x07,0x1C,0x01,0x20,0xFF,0xF7,0xA0,0xFF,0x38,0x1C,0x00,0xF0, \ +0xBB,0xFA,0x80,0xBD,0xF0,0xB4,0x13,0x4A,0x00,0x27,0xD7,0x65,0x17,0x66,0x17, \ +0x67,0x57,0x67,0x20,0x20,0x90,0x67,0x10,0x48,0x07,0x70,0x41,0x1C,0x01,0x20, \ +0x04,0x02,0x00,0x25,0x03,0x1C,0x46,0x08,0x05,0xD2,0x5B,0x08,0x01,0x35,0x2D, \ +0x06,0x2D,0x0E,0x5E,0x08,0xF9,0xD3,0x0D,0x70,0x01,0x31,0x01,0x30,0xA0,0x42, \ +0xF0,0xD3,0x07,0x49,0x00,0x20,0x80,0xC1,0x01,0x30,0x20,0x28,0xFB,0xD3,0x57, \ +0x66,0x97,0x66,0xD7,0x67,0xF0,0xBC,0xF7,0x46,0x68,0x03,0x00,0x02,0x0C,0x0B, \ +0x00,0x02,0x0C,0x0C,0x00,0x02,0x90,0xB5,0x0A,0x4F,0x0A,0x4C,0x38,0x68,0x63, \ +0x1C,0x98,0x42,0x04,0xD0,0x3C,0x60,0xFC,0xF7,0x74,0xF9,0x00,0xF0,0xEC,0xFB, \ +0x06,0x48,0x3C,0x60,0x00,0x68,0xFC,0xF7,0xF6,0xFB,0x00,0x20,0x38,0x60,0x00, \ +0xF0,0xF3,0xFB,0x90,0xBD,0xD4,0x03,0x00,0x02,0xF0,0xF0,0xF0,0xF0,0x44,0x04, \ +0x00,0x02,0x80,0xB5,0x05,0x48,0x05,0x4F,0x38,0x60,0xFC,0xF7,0x5C,0xF9,0x00, \ +0xF0,0xD4,0xFB,0x03,0x48,0x38,0x60,0x80,0xBD,0x00,0x00,0xF0,0xF0,0xF0,0xF0, \ +0xD4,0x03,0x00,0x02,0xF1,0xF0,0xF0,0xF0,0x0F,0x00,0x2D,0xE9,0xD2,0x00,0xA0, \ +0xE3,0x00,0xF0,0x21,0xE1,0xCC,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00, \ +0x52,0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00,0x20,0x83,0xE5,0x00, \ +0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9,0xB0,0x00,0x9F,0xE5, \ +0x10,0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83,0xE5,0x9C,0x10,0x9F, \ +0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00,0x00,0x0A,0x00,0x20, \ +0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x84,0x00,0x9F,0xE5,0x10, \ +0xFF,0x2F,0xE1,0x10,0xD0,0x8D,0xE2,0x78,0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1, \ +0x0F,0x00,0x2D,0xE9,0x64,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52, \ +0xE3,0x06,0x00,0x00,0x0A,0x01,0x20,0x82,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00, \ +0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9,0x4C,0x00,0x9F,0xE5,0x10, \ +0xFF,0x2F,0xE1,0x01,0x20,0x82,0xE2,0x00,0x20,0x83,0xE5,0x34,0x10,0x9F,0xE5, \ +0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x04,0x00,0x00,0x0A,0x00,0x20,0x4F, \ +0xE1,0x04,0xE0,0x4E,0xE2,0x04,0x40,0x2D,0xE9,0x20,0x00,0x9F,0xE5,0x10,0xFF, \ +0x2F,0xE1,0x00,0x00,0x4F,0xE1,0x04,0xE0,0x4E,0xE2,0x01,0x40,0x2D,0xE9,0x0C, \ +0x00,0x9F,0xE5,0x10,0xFF,0x2F,0xE1,0xD4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02, \ +0x1D,0x01,0x00,0x00,0x29,0x01,0x00,0x00,0x00,0xA0,0x00,0x47,0xD2,0x00,0xA0, \ +0xE3,0x00,0xF0,0x21,0xE1,0xE4,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x01,0x20, \ +0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x0A,0x01, \ +0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1, \ +0xC0,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0x27,0x00,0x00, \ +0x0A,0xC0,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00, \ +0x00,0x1A,0xA8,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x02,0x00,0x50,0xE1,0x03, \ +0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8, \ +0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10,0xA0,0xE1,0xD3,0x20,0xA0, \ +0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0,0x5F,0x2D,0xE9,0x03,0x40, \ +0xA0,0xE1,0xD2,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x0F,0x00,0xBD,0xE8,0xD3, \ +0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D,0xE9,0x01,0x30,0xA0,0xE3, \ +0x18,0x00,0x2D,0xE9,0x44,0x11,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x08,0xD0,0x80, \ +0xE5,0x44,0x31,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00,0x00,0x52,0xE3,0x02,0x00, \ +0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3,0x00,0x20,0x83,0xE5,0x00, \ +0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x78,0x01,0x00,0xEA,0xD3,0x00,0xA0,0xE3, \ +0x00,0xF0,0x21,0xE1,0x75,0x01,0x00,0xEA,0x00,0xA0,0x00,0x47,0xD1,0x00,0xA0, \ +0xE3,0x00,0xF0,0x21,0xE1,0xF4,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x01,0x20, \ +0x42,0xE2,0x00,0x20,0x83,0xE5,0x00,0x00,0x52,0xE3,0x03,0x00,0x00,0x0A,0x01, \ +0x40,0xBD,0xE8,0x00,0xF0,0x6F,0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1, \ +0x00,0x10,0x9D,0xE5,0x1F,0x20,0xA0,0xE3,0x02,0x10,0x01,0xE0,0x12,0x00,0x51, \ +0xE3,0x0B,0x00,0x00,0x0A,0xBC,0x10,0x9F,0xE5,0x00,0x00,0x91,0xE5,0x00,0x00, \ +0x50,0xE3,0x27,0x00,0x00,0x0A,0xBC,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x03,0x00,0x00,0x1A,0xA4,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5, \ +0x02,0x00,0x50,0xE1,0x03,0x00,0x00,0x1A,0x01,0x40,0xBD,0xE8,0x00,0xF0,0x6F, \ +0xE1,0x0F,0x00,0xBD,0xE8,0x0E,0xF0,0xB0,0xE1,0x08,0x40,0xBD,0xE8,0x0E,0x10, \ +0xA0,0xE1,0xD3,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x04,0x10,0x2D,0xE5,0xF0, \ +0x5F,0x2D,0xE9,0x03,0x40,0xA0,0xE1,0xD1,0x20,0xA0,0xE3,0x02,0xF0,0x21,0xE1, \ +0x0F,0x00,0xBD,0xE8,0xD3,0x50,0xA0,0xE3,0x05,0xF0,0x21,0xE1,0x0F,0x00,0x2D, \ +0xE9,0x01,0x30,0xA0,0xE3,0x18,0x00,0x2D,0xE9,0x40,0x10,0x9F,0xE5,0x00,0x00, \ +0x91,0xE5,0x08,0xD0,0x80,0xE5,0x40,0x30,0x9F,0xE5,0x00,0x20,0x93,0xE5,0x00, \ +0x00,0x52,0xE3,0x02,0x00,0x00,0x0A,0x18,0x20,0x80,0xE5,0x00,0x20,0xA0,0xE3, \ +0x00,0x20,0x83,0xE5,0x00,0x00,0xA0,0xE3,0x00,0x00,0x81,0xE5,0x37,0x01,0x00, \ +0xEA,0x18,0xD0,0x8D,0xE2,0xD3,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x33,0x01, \ +0x00,0xEA,0xD4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0xC0,0x03,0x00,0x02,0xC8, \ +0x03,0x00,0x02,0x4C,0x04,0x00,0x02,0xE4,0x03,0x00,0x02,0x90,0xB5,0x86,0xB0, \ +0x00,0x24,0x13,0x4F,0x13,0x4A,0x3C,0x60,0x7C,0x60,0xBC,0x60,0x00,0x21,0x10, \ +0x1C,0xBC,0x61,0x10,0xC0,0x01,0x31,0x20,0x29,0xFB,0xD3,0xD0,0x1D,0x79,0x30, \ +0xFA,0x60,0x38,0x61,0x7A,0x61,0x78,0x6A,0x00,0x22,0x00,0x21,0x05,0x92,0x02, \ +0x1C,0x04,0x91,0x03,0x90,0xF8,0x69,0x39,0x6A,0x02,0x92,0x00,0x90,0x01,0x91, \ +0x08,0xA1,0x07,0x48,0x05,0x4A,0x0C,0x4B,0x00,0xF0,0x18,0xF8,0xBC,0x62,0xFC, \ +0x62,0x06,0xB0,0x90,0xBD,0x48,0x04,0x00,0x02,0x8C,0x0C,0x00,0x02,0x41,0x46, \ +0x00,0x00,0x0C,0x0D,0x00,0x02,0x53,0x79,0x73,0x74,0x65,0x6D,0x20,0x54,0x69, \ +0x6D,0x65,0x72,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x00,0x4D,0x49,0x54,0x41, \ +0xFF,0xB5,0x07,0x1C,0x0D,0x98,0x0E,0x9C,0x0A,0xAE,0x4C,0xCE,0x09,0x9D,0xB9, \ +0x62,0x02,0x99,0x79,0x64,0x03,0x99,0xB9,0x64,0x00,0x21,0xB9,0x60,0x7A,0x61, \ +0xAA,0x18,0x01,0x3A,0xFD,0x60,0x3A,0x61,0xDA,0x06,0xD2,0x0E,0xF3,0x06,0xB8, \ +0x61,0xFA,0x62,0xDB,0x0E,0xFB,0x63,0xF8,0x61,0x79,0x60,0x03,0x20,0x38,0x63, \ +0x79,0x63,0x25,0x48,0xB9,0x63,0x78,0x65,0x01,0x20,0x90,0x40,0xBF,0x65,0x39, \ +0x65,0x64,0x37,0x00,0x22,0x00,0x23,0x0E,0xC7,0x0C,0xC7,0x78,0x3F,0x38,0x64, \ +0x38,0x1C,0x1E,0x49,0x00,0xF0,0x0B,0xFB,0xC0,0x20,0x00,0xF0,0x6C,0xF8,0x1C, \ +0x49,0x1D,0x4D,0x39,0x60,0x2A,0x68,0xF9,0x1D,0x79,0x31,0x00,0x2A,0x0A,0xD0, \ +0xD3,0x1D,0x79,0x33,0xDA,0x68,0xDF,0x60,0xD3,0x1D,0x79,0x33,0x9F,0x60,0xCA, \ +0x60,0x2A,0x68,0x8A,0x60,0x02,0xE0,0x2F,0x60,0x8F,0x60,0xCF,0x60,0x13,0x49, \ +0x14,0x4D,0x0A,0x68,0x01,0x32,0x0A,0x60,0x29,0x68,0x01,0x31,0x29,0x60,0x00, \ +0xF0,0x4C,0xF8,0x00,0x2C,0x07,0xD0,0x38,0x1C,0x00,0xF0,0x1D,0xFB,0x00,0x28, \ +0x0A,0xD0,0x00,0xF0,0x83,0xFB,0x07,0xE0,0xC0,0x20,0x00,0xF0,0x3F,0xF8,0x29, \ +0x68,0x01,0x39,0x29,0x60,0x00,0xF0,0x3A,0xF8,0x00,0x20,0x04,0xB0,0xF0,0xBD, \ +0x00,0x00,0x9D,0x47,0x00,0x00,0xDD,0x47,0x00,0x00,0x44,0x52,0x48,0x54,0xCC, \ +0x03,0x00,0x02,0xD0,0x03,0x00,0x02,0xE4,0x03,0x00,0x02,0x90,0xB5,0x41,0x60, \ +0x07,0x1C,0x08,0x30,0x00,0x21,0x00,0x24,0x12,0xC0,0x12,0xC0,0xC0,0x20,0x00, \ +0xF0,0x1F,0xF8,0x0C,0x49,0x0C,0x4B,0x39,0x60,0x19,0x68,0x00,0x29,0x06,0xD0, \ +0xCA,0x69,0xCF,0x61,0x97,0x61,0xFA,0x61,0x19,0x68,0xB9,0x61,0x02,0xE0,0x1F, \ +0x60,0xBF,0x61,0xFF,0x61,0x06,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0x00,0xF0, \ +0x09,0xF8,0x20,0x1C,0x90,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0x78,0x04,0x00, \ +0x02,0x7C,0x04,0x00,0x02,0x00,0xA3,0x18,0x47,0x00,0x30,0x0F,0xE1,0x3F,0x20, \ +0xA0,0xE3,0x02,0x10,0x03,0xE0,0x00,0x10,0x81,0xE1,0x01,0xF0,0x21,0xE1,0x02, \ +0x00,0xC3,0xE1,0x1E,0xFF,0x2F,0xE1,0xF0,0xB5,0x85,0xB0,0x07,0x1C,0xC0,0x20, \ +0x0C,0x1C,0x15,0x1C,0xFF,0xF7,0xE8,0xFF,0xA9,0x08,0x03,0xD3,0xB9,0x68,0x21, \ +0x40,0xB9,0x60,0x3D,0xE0,0xB9,0x68,0x21,0x43,0xB9,0x60,0x3C,0x69,0x0A,0x1C, \ +0x00,0x2C,0x63,0xD0,0x7B,0x69,0x6D,0x49,0x01,0x2B,0x35,0xD1,0xE5,0x1D,0x79, \ +0x35,0x2E,0x68,0xB3,0x08,0x05,0xD3,0xA3,0x6F,0x16,0x1C,0x1E,0x40,0x9E,0x42, \ +0x28,0xD1,0x02,0xE0,0xA3,0x6F,0x13,0x40,0x24,0xD0,0xE3,0x6F,0x1A,0x60,0x2A, \ +0x68,0x52,0x08,0x03,0xD3,0xBA,0x68,0xA3,0x6F,0x9A,0x43,0xBA,0x60,0x00,0x26, \ +0x3E,0x61,0x7E,0x61,0xA6,0x66,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF,0xF7,0xB7, \ +0xFF,0x60,0x6E,0x00,0x28,0x04,0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x24,0xFB, \ +0x00,0xE0,0xE6,0x64,0x6E,0x60,0x20,0x1C,0x00,0xF0,0x80,0xFA,0x00,0x28,0x01, \ +0xD0,0x00,0xF0,0xE6,0xFA,0x30,0x1C,0x9B,0xE0,0xFF,0xF7,0xA2,0xFF,0x97,0xE0, \ +0x03,0x94,0x00,0x25,0x3D,0x61,0x01,0x92,0x4F,0x4A,0x02,0x93,0x11,0x68,0x00, \ +0x26,0x01,0x31,0x11,0x60,0xFF,0xF7,0x95,0xFF,0xC0,0x20,0xFF,0xF7,0x92,0xFF, \ +0xF9,0x68,0x00,0x29,0x0A,0xD0,0x00,0x21,0xF9,0x60,0x03,0x9C,0x7B,0x69,0x02, \ +0x93,0xB9,0x68,0x01,0x9A,0x11,0x43,0x01,0x91,0x00,0x2C,0x46,0xD0,0xE3,0x1D, \ +0x79,0x33,0x04,0x93,0x19,0x68,0x00,0x91,0x89,0x08,0x06,0xD3,0xA1,0x6F,0x01, \ +0x9A,0x0A,0x40,0x8A,0x42,0x07,0xD1,0x04,0xE0,0x6F,0xE0,0xA1,0x6F,0x01,0x9A, \ +0x11,0x40,0x01,0xD0,0x00,0x21,0x00,0xE0,0x07,0x21,0x22,0x6F,0x00,0x29,0x28, \ +0xD1,0x01,0x99,0xE3,0x6F,0x19,0x60,0x00,0x99,0x49,0x08,0x03,0xD3,0xB9,0x68, \ +0xA3,0x6F,0x99,0x43,0xB9,0x60,0x21,0x6F,0xA1,0x42,0x02,0xD1,0x00,0x21,0x03, \ +0x91,0x08,0xE0,0x03,0x9B,0xA3,0x42,0x00,0xD1,0x03,0x91,0x63,0x6F,0x4B,0x67, \ +0x21,0x6F,0x63,0x6F,0x19,0x67,0x79,0x69,0x01,0x39,0x79,0x61,0x00,0x21,0xA1, \ +0x66,0x04,0x9B,0x00,0x2D,0x59,0x60,0x03,0xD1,0x25,0x1C,0x26,0x1C,0x21,0x67, \ +0x02,0xE0,0x34,0x67,0x21,0x67,0x26,0x1C,0x02,0x9B,0x14,0x1C,0x01,0x3B,0x02, \ +0x93,0x00,0x2B,0xA5,0xD1,0x03,0x99,0x39,0x61,0xFF,0xF7,0x39,0xFF,0x00,0x2D, \ +0x18,0xD0,0x1C,0x4E,0x00,0x24,0x68,0x6E,0x2F,0x6F,0x00,0x28,0x04,0xD0,0xE8, \ +0x1D,0x45,0x30,0x00,0xF0,0xA1,0xFA,0x00,0xE0,0xEC,0x64,0xC0,0x20,0xFF,0xF7, \ +0x28,0xFF,0x31,0x68,0x01,0x31,0x31,0x60,0xFF,0xF7,0x23,0xFF,0x28,0x1C,0x00, \ +0xF0,0xF6,0xF9,0x3D,0x1C,0xE8,0xD1,0xC0,0x20,0xFF,0xF7,0x1B,0xFF,0x0E,0x49, \ +0x0A,0x68,0x01,0x3A,0x0A,0x60,0xFF,0xF7,0x15,0xFF,0x0C,0x48,0x0C,0x49,0x00, \ +0x68,0x09,0x68,0x88,0x42,0x05,0xD0,0x0B,0x48,0x00,0x68,0x00,0x28,0x01,0xD1, \ +0x00,0xF0,0x49,0xFA,0x00,0x20,0x05,0xB0,0xF0,0xBD,0x79,0x69,0x00,0x29,0x00, \ +0xD1,0x5D,0xE7,0xF9,0x68,0x01,0x31,0xF9,0x60,0x59,0xE7,0xE4,0x03,0x00,0x02, \ +0xC4,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xD4,0x03,0x00,0x02,0xFF,0xB5,0x07, \ +0x1C,0xC0,0x20,0x0D,0x1C,0x09,0x9E,0xFF,0xF7,0xEF,0xFE,0x02,0x9A,0x91,0x08, \ +0x04,0xD3,0xB9,0x68,0x29,0x40,0xA9,0x42,0x0E,0xD1,0x02,0xE0,0xB9,0x68,0x29, \ +0x40,0x0A,0xD0,0xB9,0x68,0x03,0x9B,0x00,0x24,0x19,0x60,0x02,0x9A,0x51,0x08, \ +0x3D,0xD3,0xB9,0x68,0xA9,0x43,0xB9,0x60,0x39,0xE0,0x07,0x24,0x00,0x2E,0x36, \ +0xD0,0x1D,0x49,0x0C,0x68,0x1D,0x49,0xA1,0x66,0xA5,0x67,0xE5,0x1D,0x02,0x9A, \ +0x79,0x35,0x2A,0x60,0x03,0x9B,0xE3,0x67,0xE7,0x66,0x39,0x69,0x00,0x29,0x09, \ +0xD0,0x21,0x67,0x39,0x69,0x49,0x6F,0x61,0x67,0x39,0x69,0x49,0x6F,0x0C,0x67, \ +0x39,0x69,0x4C,0x67,0x02,0xE0,0x3C,0x61,0x24,0x67,0x64,0x67,0x79,0x69,0x01, \ +0x31,0x79,0x61,0x07,0x21,0x21,0x63,0x01,0x21,0xA1,0x63,0x0E,0x49,0x0A,0x68, \ +0x01,0x32,0x0A,0x60,0xE6,0x64,0xFF,0xF7,0xAC,0xFE,0x01,0x23,0xDE,0x42,0x03, \ +0xD0,0xE0,0x1D,0x45,0x30,0x00,0xF0,0x87,0xFA,0x20,0x1C,0x00,0xF0,0xBE,0xFA, \ +0x68,0x68,0x04,0xB0,0xF0,0xBD,0xFF,0xF7,0x9D,0xFE,0x20,0x1C,0xF9,0xE7,0x00, \ +0x00,0xC4,0x03,0x00,0x02,0x11,0x4A,0x00,0x00,0xE4,0x03,0x00,0x02,0x00,0xB5, \ +0xFF,0xF7,0xD7,0xFB,0xFF,0xF7,0xB1,0xFD,0x00,0xF0,0x8F,0xFB,0x00,0xF0,0x95, \ +0xFB,0x00,0xF0,0xF5,0xF9,0x00,0xF0,0x99,0xFB,0x00,0xF0,0x9F,0xFB,0x00,0xBD, \ +0x00,0xA0,0x00,0x47,0x13,0x00,0xA0,0xE3,0x00,0xF0,0x21,0xE1,0x48,0x10,0x9F, \ +0xE5,0x00,0x00,0x91,0xE5,0x00,0x00,0x50,0xE3,0xFC,0xFF,0xFF,0x0A,0xD3,0x20, \ +0xA0,0xE3,0x02,0xF0,0x21,0xE1,0x34,0x10,0x9F,0xE5,0x00,0x00,0x81,0xE5,0x04, \ +0x20,0x90,0xE5,0x18,0x30,0x90,0xE5,0x01,0x20,0x82,0xE2,0x04,0x20,0x80,0xE5, \ +0x20,0x20,0x9F,0xE5,0x08,0xD0,0x90,0xE5,0x00,0x30,0x82,0xE5,0x03,0x00,0xBD, \ +0xE8,0x00,0x00,0x50,0xE3,0x01,0xF0,0x6F,0xE1,0xF0,0x80,0xFD,0x08,0xFF,0xDF, \ +0xFD,0xE8,0xC8,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0x4C,0x04,0x00,0x02,0xF0, \ +0xB5,0x82,0xB0,0x4E,0x4B,0x98,0x42,0x01,0xD0,0x02,0xB0,0xF0,0xBD,0x00,0x20, \ +0x00,0x90,0x00,0x26,0xC0,0x20,0xFF,0xF7,0x43,0xFE,0x4A,0x4D,0x29,0x68,0x09, \ +0x68,0x01,0x91,0x00,0x29,0x01,0xD0,0x01,0xAA,0x8A,0x61,0x29,0x68,0x46,0x4C, \ +0x0E,0x60,0x29,0x68,0x04,0x31,0x29,0x60,0x22,0x68,0x91,0x42,0x02,0xD1,0x43, \ +0x49,0x09,0x68,0x29,0x60,0x43,0x49,0x0E,0x60,0xFF,0xF7,0x2B,0xFE,0xC0,0x20, \ +0xFF,0xF7,0x28,0xFE,0x01,0x99,0x00,0x29,0x5C,0xD0,0x01,0x9C,0x21,0x69,0xA1, \ +0x42,0x01,0xD1,0x00,0x21,0x08,0xE0,0x62,0x69,0x4A,0x61,0x21,0x69,0x62,0x69, \ +0x11,0x61,0x22,0x69,0x01,0xA9,0x91,0x61,0x21,0x69,0x01,0x91,0x21,0x68,0x20, \ +0x29,0x03,0xD9,0x20,0x39,0x21,0x60,0x00,0x25,0x04,0xE0,0x27,0x1D,0xA2,0xCF, \ +0x21,0x60,0x00,0x29,0x03,0xD0,0x69,0x46,0xA1,0x61,0x24,0x61,0x00,0xE0,0xA6, \ +0x61,0xFF,0xF7,0x02,0xFE,0x00,0x2D,0x02,0xD0,0x38,0x1C,0x00,0xF0,0xFC,0xFB, \ +0xC0,0x20,0xFF,0xF7,0xFA,0xFD,0xA2,0x69,0x69,0x46,0x8A,0x42,0x25,0xD1,0x21, \ +0x68,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39,0x20,0x4D,0x89,0x00, \ +0x2A,0x68,0x89,0x18,0x1F,0x4A,0x12,0x68,0x91,0x42,0x07,0xD3,0x89,0x1A,0x00, \ +0xD5,0x03,0x31,0x1D,0x4A,0x89,0x10,0x12,0x68,0x89,0x00,0x89,0x18,0x0A,0x68, \ +0x00,0x2A,0x08,0xD0,0x22,0x61,0x0A,0x68,0x52,0x69,0x62,0x61,0x14,0x61,0x0A, \ +0x68,0x54,0x61,0xA1,0x61,0x03,0xE0,0x24,0x61,0xA1,0x61,0x64,0x61,0x0C,0x60, \ +0xFF,0xF7,0xCE,0xFD,0xC0,0x20,0xFF,0xF7,0xCB,0xFD,0x01,0x99,0x00,0x29,0xA2, \ +0xD1,0x0F,0x49,0x09,0x68,0x00,0x29,0x10,0xD1,0x0E,0x4C,0x03,0x21,0x22,0x68, \ +0x11,0x63,0x22,0x68,0x01,0x21,0x91,0x63,0x0C,0x4A,0x11,0x68,0x01,0x31,0x11, \ +0x60,0xFF,0xF7,0xB7,0xFD,0x20,0x68,0x00,0xF0,0xD0,0xF9,0x6C,0xE7,0xFF,0xF7, \ +0xB1,0xFD,0x69,0xE7,0x4D,0x49,0x54,0x41,0x5C,0x04,0x00,0x02,0x58,0x04,0x00, \ +0x02,0x54,0x04,0x00,0x02,0x60,0x04,0x00,0x02,0xC4,0x03,0x00,0x02,0xE4,0x03, \ +0x00,0x02,0x90,0xB5,0x07,0x1C,0x00,0x6B,0x04,0x28,0x0C,0xD1,0xC0,0x20,0xFF, \ +0xF7,0x9A,0xFD,0x0A,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF,0xF7,0x94,0xFD, \ +0x38,0x1C,0x00,0xF0,0x67,0xF8,0x90,0xBD,0xC0,0x20,0xFF,0xF7,0x8D,0xFD,0xBC, \ +0x6E,0xFF,0xF7,0x8A,0xFD,0x00,0x2C,0xF6,0xD0,0x38,0x1C,0x00,0xF0,0x83,0xFB, \ +0x90,0xBD,0xE4,0x03,0x00,0x02,0x80,0xB5,0x0C,0x4F,0x39,0x68,0x88,0x6C,0x49, \ +0x6C,0x00,0xF0,0x76,0xFB,0xC0,0x20,0xFF,0xF7,0x78,0xFD,0x3A,0x68,0x01,0x21, \ +0x11,0x63,0x3A,0x68,0x91,0x63,0x06,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF, \ +0xF7,0x6D,0xFD,0x38,0x68,0x00,0xF0,0x86,0xF9,0x80,0xBD,0x00,0x00,0xC4,0x03, \ +0x00,0x02,0xE4,0x03,0x00,0x02,0x00,0xA3,0x18,0x47,0x10,0x20,0x90,0xE5,0x03, \ +0x20,0xC2,0xE3,0x48,0x20,0x42,0xE2,0x01,0x30,0xA0,0xE3,0x00,0x30,0x82,0xE5, \ +0x33,0x30,0xA0,0xE3,0x04,0x30,0x82,0xE5,0x00,0x30,0xA0,0xE3,0x08,0x30,0x82, \ +0xE5,0x0C,0x30,0x82,0xE5,0x10,0x30,0x82,0xE5,0x14,0x30,0x82,0xE5,0x18,0x30, \ +0x82,0xE5,0x1C,0x30,0x82,0xE5,0x20,0x30,0x82,0xE5,0x24,0x30,0x82,0xE5,0x28, \ +0x30,0x82,0xE5,0x2C,0x30,0x82,0xE5,0x0C,0x30,0x90,0xE5,0x30,0x30,0x82,0xE5, \ +0x00,0x30,0xA0,0xE3,0x34,0x30,0x82,0xE5,0x38,0x30,0x82,0xE5,0x3C,0x30,0x82, \ +0xE5,0x40,0x10,0x82,0xE5,0x44,0x30,0x82,0xE5,0x08,0x20,0x80,0xE5,0x1E,0xFF, \ +0x2F,0xE1,0xF0,0xB5,0x00,0x24,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x24,0xFD,0x29, \ +0x49,0x2A,0x4D,0x0A,0x68,0x01,0x3A,0x0A,0x60,0xBA,0x6B,0x00,0x21,0x00,0x2A, \ +0x06,0xD0,0x3A,0x6B,0x01,0x2A,0x39,0xD0,0x02,0x2A,0x37,0xD0,0xB9,0x63,0x07, \ +0xE0,0x3A,0x6B,0x00,0x2A,0x32,0xD0,0x7A,0x6B,0x00,0x2A,0x03,0xD0,0x79,0x63, \ +0x03,0x21,0x39,0x63,0x2B,0xE0,0x39,0x63,0xF9,0x6A,0x1D,0x4B,0x8E,0x00,0x9A, \ +0x59,0x00,0x2A,0x05,0xD0,0x51,0x6A,0x0F,0x62,0x57,0x62,0x79,0x62,0x3A,0x62, \ +0x1E,0xE0,0x9F,0x51,0x3F,0x62,0x7F,0x62,0x17,0x4A,0x3B,0x6C,0x16,0x68,0x33, \ +0x43,0x13,0x60,0x2A,0x68,0x15,0x4B,0x00,0x2A,0x02,0xD1,0x2F,0x60,0x19,0x60, \ +0x0F,0xE0,0x1E,0x68,0xB1,0x42,0x0C,0xD2,0x19,0x60,0xD3,0x6B,0x8B,0x42,0x08, \ +0xD9,0xD1,0x6A,0x8B,0x42,0x04,0xD0,0x0E,0x49,0x12,0x6C,0x0B,0x68,0x1A,0x43, \ +0x0A,0x60,0x2F,0x60,0xFF,0xF7,0xDC,0xFC,0x0B,0x48,0x00,0x68,0x29,0x68,0x88, \ +0x42,0x04,0xD0,0x0A,0x48,0x00,0x68,0x00,0x28,0x00,0xD1,0x01,0x24,0x20,0x1C, \ +0xF0,0xBD,0xE4,0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0x0C,0x0C,0x00,0x02,0xD8, \ +0x03,0x00,0x02,0xE0,0x03,0x00,0x02,0xDC,0x03,0x00,0x02,0xC4,0x03,0x00,0x02, \ +0xD4,0x03,0x00,0x02,0x00,0xA0,0x00,0x47,0x00,0x00,0xA0,0xE3,0x00,0x10,0x0F, \ +0xE1,0x20,0x10,0x81,0xE3,0xF3,0x40,0x2D,0xE9,0xD3,0x20,0xA0,0xE3,0x02,0xF0, \ +0x21,0xE1,0x2C,0x30,0x9F,0xE5,0x00,0x00,0x93,0xE5,0x28,0x20,0x9F,0xE5,0x00, \ +0x10,0x92,0xE5,0x08,0xD0,0x80,0xE5,0x00,0x40,0xA0,0xE3,0x00,0x00,0x51,0xE3, \ +0x02,0x00,0x00,0x0A,0x1C,0x10,0x90,0xE5,0x00,0x40,0x82,0xE5,0x18,0x10,0x80, \ +0xE5,0x00,0x40,0x83,0xE5,0x0A,0xFF,0xFF,0xEA,0xC4,0x03,0x00,0x02,0x4C,0x04, \ +0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00,0x00,0x78, \ +0x04,0x00,0x02,0x90,0xB5,0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x87,0xFC,0xB9,0x69, \ +0x00,0x24,0x00,0x29,0x16,0xD0,0x3A,0x69,0xBA,0x42,0x04,0xD1,0x0A,0x68,0xBA, \ +0x42,0x0F,0xD1,0x0C,0x60,0x0D,0xE0,0x79,0x69,0x51,0x61,0x39,0x69,0x7A,0x69, \ +0x11,0x61,0xB9,0x69,0x0A,0x68,0xBA,0x42,0x04,0xD1,0x3A,0x69,0x91,0x61,0x39, \ +0x69,0xBA,0x69,0x11,0x60,0xBC,0x61,0xFF,0xF7,0x6A,0xFC,0x20,0x1C,0x90,0xBD, \ +0xB0,0xB5,0x07,0x1C,0xC4,0x6E,0xC0,0x20,0xFF,0xF7,0x62,0xFC,0xB9,0x6E,0x00, \ +0x29,0x38,0xD0,0x00,0x2C,0x36,0xD0,0x21,0x68,0x1C,0x4B,0x99,0x42,0x32,0xD1, \ +0x00,0x25,0xBD,0x66,0x39,0x6F,0xB9,0x42,0x01,0xD1,0x25,0x61,0x06,0xE0,0x21, \ +0x61,0x79,0x6F,0x3A,0x6F,0x51,0x67,0x39,0x6F,0x7A,0x6F,0x11,0x67,0x61,0x69, \ +0x01,0x39,0x61,0x61,0x39,0x6B,0x07,0x29,0x10,0xD1,0xFA,0x1D,0x79,0x32,0x51, \ +0x60,0x10,0x49,0x0A,0x68,0x01,0x32,0x0A,0x60,0xFF,0xF7,0x3C,0xFC,0x38,0x1C, \ +0xFF,0xF7,0x0F,0xFF,0x00,0x28,0x04,0xD0,0xFF,0xF7,0x75,0xFF,0x01,0xE0,0xFF, \ +0xF7,0x32,0xFC,0x78,0x6E,0x00,0x28,0x04,0xD0,0xF8,0x1D,0x45,0x30,0xFF,0xF7, \ +0x9F,0xFF,0x00,0xE0,0xFD,0x64,0xC0,0x20,0xFF,0xF7,0x26,0xFC,0xFF,0xF7,0x24, \ +0xFC,0xB0,0xBD,0x00,0x00,0x4E,0x44,0x56,0x44,0xE4,0x03,0x00,0x02,0x80,0xB5, \ +0x07,0x1C,0xC0,0x20,0xFF,0xF7,0x19,0xFC,0x39,0x68,0x00,0x29,0x27,0xD0,0xBA, \ +0x69,0x00,0x2A,0x24,0xD1,0x20,0x29,0x01,0xD9,0x1F,0x21,0x00,0xE0,0x01,0x39, \ +0x11,0x4A,0x89,0x00,0x12,0x68,0x89,0x18,0x10,0x4A,0x12,0x68,0x91,0x42,0x07, \ +0xD3,0x89,0x1A,0x00,0xD5,0x03,0x31,0x0E,0x4A,0x89,0x10,0x12,0x68,0x89,0x00, \ +0x89,0x18,0x0A,0x68,0x00,0x2A,0x08,0xD0,0x3A,0x61,0x0A,0x68,0x52,0x69,0x7A, \ +0x61,0x17,0x61,0x0A,0x68,0x57,0x61,0xB9,0x61,0x03,0xE0,0x3F,0x61,0xB9,0x61, \ +0x7F,0x61,0x0F,0x60,0xFF,0xF7,0xEC,0xFB,0x00,0x20,0x80,0xBD,0x5C,0x04,0x00, \ +0x02,0x58,0x04,0x00,0x02,0x54,0x04,0x00,0x02,0xF0,0xB5,0x05,0x1C,0xC0,0x20, \ +0xFF,0xF7,0xDF,0xFB,0x67,0x49,0x67,0x4C,0x0A,0x68,0x67,0x4F,0x01,0x3A,0x0A, \ +0x60,0xAA,0x6B,0x00,0x2A,0x74,0xD0,0x00,0x26,0xAE,0x63,0xEA,0x6A,0x2B,0x6A, \ +0xAB,0x42,0x26,0xD0,0x69,0x6A,0x59,0x62,0x29,0x6A,0x6B,0x6A,0x19,0x62,0x91, \ +0x00,0x5F,0x4A,0x53,0x58,0xAB,0x42,0x11,0xD1,0x2B,0x6A,0x53,0x50,0x5D,0x49, \ +0x0B,0x68,0x00,0x2B,0x02,0xD0,0x2E,0x6C,0xB3,0x43,0x0B,0x60,0x5B,0x49,0x0B, \ +0x68,0x9D,0x42,0x04,0xD1,0x5A,0x4B,0x1B,0x68,0x9B,0x00,0xD2,0x58,0x0A,0x60, \ +0xFF,0xF7,0xB2,0xFB,0x55,0x49,0x38,0x68,0x09,0x68,0x88,0x42,0x60,0xD0,0x20, \ +0x68,0x00,0x28,0x5E,0xD1,0x95,0xE0,0x00,0x26,0x4E,0x4B,0x92,0x00,0x9E,0x50, \ +0x2A,0x6C,0xD3,0x43,0x50,0x4A,0x16,0x68,0x33,0x40,0x13,0x60,0x4B,0x4A,0x12, \ +0x68,0x00,0x2A,0x03,0xD0,0x2E,0x6C,0xB2,0x43,0x48,0x4E,0x32,0x60,0x1A,0x06, \ +0x12,0x0E,0x02,0xD0,0x49,0x4B,0x9A,0x5C,0x14,0xE0,0x1B,0x0A,0x1A,0x06,0x12, \ +0x0E,0x03,0xD0,0x46,0x4B,0x9A,0x5C,0x08,0x32,0x0C,0xE0,0x1B,0x0A,0x1A,0x06, \ +0x12,0x0E,0x03,0xD0,0x42,0x4B,0x9A,0x5C,0x10,0x32,0x04,0xE0,0x1A,0x0A,0x29, \ +0xD0,0x3F,0x4B,0x9A,0x5C,0x18,0x32,0x3B,0x4B,0x1A,0x60,0x39,0x4A,0x12,0x68, \ +0x95,0x42,0x4D,0xD1,0x38,0x4E,0x35,0x4B,0x32,0x68,0x36,0x4D,0x92,0x00,0x9A, \ +0x58,0x2A,0x60,0x33,0x4A,0x12,0x68,0x00,0x2A,0x42,0xD0,0x0E,0x1C,0x09,0x68, \ +0x01,0x31,0x31,0x60,0xFF,0xF7,0x65,0xFB,0xC0,0x20,0xFF,0xF7,0x62,0xFB,0x00, \ +0xE0,0x42,0xE0,0x31,0x68,0x01,0x39,0x31,0x60,0x2A,0x49,0x0A,0x68,0x11,0x06, \ +0x09,0x0E,0x0D,0xD0,0x2B,0x4B,0x59,0x5C,0x1E,0xE0,0x28,0x4B,0x20,0x21,0x19, \ +0x60,0x25,0x49,0x00,0x26,0x0E,0x60,0xFF,0xF7,0x4D,0xFB,0x38,0xE0,0x39,0xE0, \ +0x38,0xE0,0x13,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0,0x22,0x4B,0x59,0x5C,0x08, \ +0x31,0x0B,0xE0,0x1B,0x0A,0x19,0x06,0x09,0x0E,0x03,0xD0,0x1E,0x4B,0x59,0x5C, \ +0x10,0x31,0x03,0xE0,0x19,0x0A,0x1C,0x4B,0x59,0x5C,0x18,0x31,0x15,0x4B,0x89, \ +0x00,0x59,0x58,0x17,0x4E,0xCB,0x6B,0x36,0x68,0xB3,0x42,0x05,0xD8,0x29,0x60, \ +0x09,0x6C,0x11,0x4B,0xC9,0x43,0x11,0x40,0x19,0x60,0xFF,0xF7,0x26,0xFB,0x0F, \ +0x4A,0x38,0x68,0x11,0x68,0x88,0x42,0x0F,0xD0,0x20,0x68,0x00,0x28,0x0C,0xD1, \ +0x09,0xE0,0xFF,0xF7,0x1B,0xFB,0x0A,0x49,0x38,0x68,0x09,0x68,0x88,0x42,0x04, \ +0xD0,0x20,0x68,0x00,0x28,0x01,0xD1,0xFF,0xF7,0x51,0xFE,0xF0,0xBD,0xE4,0x03, \ +0x00,0x02,0xD4,0x03,0x00,0x02,0xC4,0x03,0x00,0x02,0x0C,0x0C,0x00,0x02,0xDC, \ +0x03,0x00,0x02,0xC8,0x03,0x00,0x02,0xE0,0x03,0x00,0x02,0xD8,0x03,0x00,0x02, \ +0x0C,0x0B,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46,0x00, \ +0x00,0x80,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7,0x46, \ +0x00,0x00,0x88,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60,0xF7, \ +0x46,0x00,0x00,0x90,0x04,0x00,0x02,0x02,0x48,0x00,0x21,0x01,0x60,0x41,0x60, \ +0xF7,0x46,0x00,0x00,0x98,0x04,0x00,0x02,0xBC,0x46,0x03,0x1C,0x08,0x43,0x80, \ +0x07,0x13,0xD1,0x12,0x1F,0x05,0xD3,0x01,0xCB,0x80,0xC9,0xC0,0x1B,0x04,0xD1, \ +0x12,0x1F,0xF9,0xD2,0xD2,0x1C,0x0C,0xD3,0x02,0xE0,0x1B,0x1F,0x09,0x1F,0xD2, \ +0x1C,0x18,0x78,0x0F,0x78,0xC0,0x1B,0x04,0xD1,0x5B,0x1C,0x49,0x1C,0x52,0x1E, \ +0xF7,0xD2,0x00,0x20,0x67,0x46,0xF7,0x46,0x43,0x1A,0x93,0x42,0x30,0xD3,0x84, \ +0x46,0x8B,0x07,0x07,0xD0,0x52,0x1E,0x29,0xD3,0x0B,0x78,0x03,0x70,0x40,0x1C, \ +0x49,0x1C,0x8B,0x07,0xF7,0xD1,0x83,0x07,0x17,0xD1,0x10,0x3A,0x05,0xD3,0xB0, \ +0xB4,0xB8,0xC9,0xB8,0xC0,0x10,0x3A,0xFB,0xD2,0xB0,0xBC,0x0C,0x32,0x0F,0xD3, \ +0x08,0xC9,0x08,0xC0,0x12,0x1F,0xFB,0xD2,0x0A,0xE0,0x08,0xC9,0x03,0x70,0x1B, \ +0x0A,0x43,0x70,0x1B,0x0A,0x83,0x70,0x1B,0x0A,0xC3,0x70,0x00,0x1D,0x12,0x1F, \ +0xF4,0xD2,0xD2,0x1C,0x05,0xD3,0x0B,0x78,0x03,0x70,0x49,0x1C,0x40,0x1C,0x52, \ +0x1E,0xF9,0xD2,0x60,0x46,0xF7,0x46,0x03,0x1C,0x0B,0x43,0x13,0x43,0x9B,0x07, \ +0x04,0xD1,0x12,0x1F,0x8B,0x58,0x83,0x50,0xFB,0xD1,0xF7,0x46,0x52,0x1E,0x8B, \ +0x5C,0x83,0x54,0xFB,0xD1,0xF7,0x46,0x00,0x00,0x4B,0x08,0x02,0x1C,0x02,0xD1, \ +0x00,0xF0,0x79,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00,0x23,0x91,0x42,0x05, \ +0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42,0xF9,0xD3,0x5B,0x41, \ +0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0xF7,0x46,0x00,0x00,0xCB,0x17,0x59, \ +0x40,0xC9,0x1A,0xC2,0x17,0x50,0x40,0x80,0x1A,0x0C,0xB4,0x4B,0x08,0x02,0x1C, \ +0x02,0xD1,0x00,0xF0,0x5A,0xF8,0x52,0x00,0x9A,0x42,0xFC,0xD9,0x00,0x23,0x91, \ +0x42,0x05,0xD2,0xDB,0x18,0x82,0x42,0x06,0xD0,0x52,0x08,0x91,0x42,0xF9,0xD3, \ +0x5B,0x41,0x89,0x1A,0x82,0x42,0xF8,0xD1,0x18,0x1C,0x0C,0xBC,0x5A,0x40,0x50, \ +0x40,0x80,0x1A,0x59,0x40,0xC9,0x1A,0x70,0x47,0xB0,0xB5,0x0D,0x48,0x42,0x6E, \ +0x03,0x6E,0x97,0x00,0xC1,0x1F,0x75,0x39,0x9C,0x00,0x0C,0x59,0xCD,0x59,0x2C, \ +0x19,0xCC,0x51,0x59,0x1E,0x36,0x23,0x00,0x29,0x01,0x66,0x03,0xDA,0x51,0x1E, \ +0x41,0x66,0x03,0x66,0x03,0xE0,0x51,0x1E,0x41,0x66,0x00,0xD5,0x43,0x66,0x60, \ +0x00,0x40,0x08,0xB0,0xBD,0x1C,0x05,0x00,0x02,0x80,0xB5,0x09,0x49,0x17,0x22, \ +0x0A,0x66,0x36,0x22,0x4A,0x66,0x07,0x4A,0x00,0x21,0x03,0x0C,0x1F,0x18,0x8B, \ +0x00,0xD7,0x50,0x05,0x4B,0x01,0x31,0x58,0x43,0x05,0x4B,0xC0,0x18,0x37,0x29, \ +0xF4,0xDB,0x80,0xBD,0x1C,0x05,0x00,0x02,0xA0,0x04,0x00,0x02,0xCD,0x0D,0x01, \ +0x00,0xE1,0x19,0xD6,0x66,0x00,0x47,0x08,0x47,0x10,0x47,0x18,0x47,0x20,0x47, \ +0x28,0x47,0x30,0x47,0x38,0x47,0x78,0x47,0x00,0x00,0x2C,0xC0,0x9F,0xE5,0xFF, \ +0x7F,0x8C,0xE8,0x3C,0xC0,0x8C,0xE2,0x0C,0x00,0x8F,0xE2,0x3C,0x10,0x4C,0xE2, \ +0x04,0xE0,0x4E,0xE2,0x00,0xE0,0x8C,0xE5,0x93,0xEC,0xFF,0xEA,0x20,0x00,0x00, \ +0x80,0x44,0x69,0x76,0x69,0x64,0x65,0x20,0x62,0x79,0x20,0x7A,0x65,0x72,0x6F, \ +0x00,0x00,0x84,0x05,0x00,0x02,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x04, \ +0x00,0xA0,0xE3,0x00,0x00,0x90,0xE5,0xFF,0x04,0x00,0xE2,0xEA,0x04,0x50,0xE3, \ +0x01,0x00,0xA0,0x03,0x00,0x00,0xA0,0x13,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00, \ +0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0, \ +0x8E,0xE3,0x05,0x0B,0x40,0xE2,0x1B,0x0B,0x50,0xE3,0x01,0x00,0xA0,0x33,0x00, \ +0x00,0xA0,0x23,0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3, \ +0x1E,0xFF,0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x1E,0xFF,0x2F, \ +0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00,0x00,0xA0,0xE3,0x1E,0xFF, \ +0x2F,0xE1,0x78,0x47,0x00,0x00,0x01,0xE0,0x8E,0xE3,0x00,0x00,0x8F,0xE2,0x1E, \ +0xFF,0x2F,0xE1,0x55,0x6E,0x6B,0x6E,0x6F,0x77,0x6E,0x20,0x45,0x72,0x72,0x6F, \ +0x72,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x42,0x72,0x61,0x6E,0x63,0x68,0x20, \ +0x54,0x68,0x72,0x6F,0x75,0x67,0x68,0x20,0x5A,0x65,0x72,0x6F,0x00,0x01,0x00, \ +0x02,0x00,0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20,0x49,0x6E,0x73, \ +0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00,0x00,0x02,0x00,0x02,0x00, \ +0x55,0x6E,0x64,0x65,0x66,0x69,0x6E,0x65,0x64,0x20,0x53,0x57,0x49,0x20,0x49, \ +0x6E,0x73,0x74,0x72,0x75,0x63,0x74,0x69,0x6F,0x6E,0x00,0x00,0x00,0x03,0x00, \ +0x02,0x00,0x50,0x72,0x65,0x66,0x65,0x74,0x63,0x68,0x20,0x41,0x62,0x6F,0x72, \ +0x74,0x00,0x00,0x04,0x00,0x02,0x00,0x44,0x61,0x74,0x61,0x20,0x41,0x62,0x6F, \ +0x72,0x74,0x00,0x00,0x05,0x00,0x02,0x00,0x41,0x64,0x64,0x72,0x65,0x73,0x73, \ +0x20,0x45,0x78,0x63,0x65,0x70,0x74,0x69,0x6F,0x6E,0x00,0x00,0x00,0x06,0x00, \ +0x02,0x00,0x55,0x6E,0x68,0x61,0x6E,0x64,0x6C,0x65,0x64,0x20,0x49,0x6E,0x74, \ +0x65,0x72,0x72,0x75,0x70,0x74,0x00,0x07,0x00,0x02,0x00,0x55,0x6E,0x68,0x61, \ +0x6E,0x64,0x6C,0x65,0x64,0x20,0x46,0x61,0x73,0x74,0x20,0x49,0x6E,0x74,0x65, \ +0x72,0x72,0x75,0x70,0x74,0x00,0x00,0x00,0x00,0xB4,0x4F,0x00,0x00,0xCC,0x4F, \ +0x00,0x00,0xE8,0x4F,0x00,0x00,0x08,0x50,0x00,0x00,0x1C,0x50,0x00,0x00,0x2C, \ +0x50,0x00,0x00,0x44,0x50,0x00,0x00,0x5C,0x50,0x00,0x00,0x28,0x20,0x4F,0xE2, \ +0x00,0x01,0x92,0xE7,0x2B,0xEC,0xFF,0xEA,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x2D, \ +0xE9,0x50,0x00,0x9F,0xE5,0x50,0x10,0x9F,0xE5,0x01,0x20,0xA0,0xE1,0x4C,0x40, \ +0x9F,0xE5,0x04,0x20,0x82,0xE0,0x05,0x00,0x00,0xEB,0x44,0x20,0x9F,0xE5,0x44, \ +0x00,0x9F,0xE5,0x00,0x10,0xA0,0xE1,0x02,0x10,0x81,0xE0,0x05,0x00,0x00,0xEB, \ +0x00,0x80,0xBD,0xE8,0x02,0x00,0x51,0xE1,0x04,0x30,0x90,0x34,0x04,0x30,0x81, \ +0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x00,0x20,0xA0,0xE3,0x01,0x00, \ +0x50,0xE1,0x04,0x20,0x80,0x34,0xFB,0xFF,0xFF,0x3A,0x0E,0xF0,0xA0,0xE1,0x64, \ +0x51,0x00,0x00,0x00,0x00,0x00,0x02,0xC4,0x05,0x00,0x00,0xD8,0x07,0x00,0x00, \ +0xC4,0x05,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x0A,0x00,0x90,0x00,0x30,0x00,0x08,0x06, \ +0x07,0x00,0x82,0x84,0x8B,0x96,0x09,0x04,0x02,0x41,0xFA,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x11,0x11,0x11,0x11, \ +0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0xAC,0x6C,0x32,0x70,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x00, \ +0x30,0x75,0x64,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x03,0x00,0x04,0xAC, \ +0x6C,0x32,0x70,0x55,0x4E,0x48,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00,0x00,0x00,0x45,0x55,0x00,0x00, \ +0x00,0x00,0x00,0xFA,0x00,0x00,0x00,0xFA,0x00,0x00,0x2A,0x09,0x2A,0x09,0x1F, \ +0x00,0xFF,0x00,0x08,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00, \ +0x41,0x54,0x4D,0x45,0x4C,0x5F,0x41,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x05,0x00,0x00,0x00,0x00, \ +0x08,0x00,0x00,0x00,0x00,0x01,0x01,0x08,0x65,0x00,0x54,0x1E,0x1E,0x1E,0x1E, \ +0x00,0x00,0x28,0x28,0x28,0x00,0x00,0x32,0x3C,0x46,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x01, \ +0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, \ +0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x00,0x01, \ +0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x00,0x01,0x01,0x00,0x00,0x01,0x01,0x00, \ +0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01,0x01,0x00,0x01,0x01, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x04,0x08,0x10,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xD4,0x01,0x00,0x02,0x00,0x00,0x00, \ +0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12, \ +0x01,0x10,0x01,0x00,0x00,0x00,0x08,0xEB,0x03,0x05,0x76,0x00,0x01,0x00,0x00, \ +0x00,0x01,0x09,0x02,0x20,0x00,0x01,0x01,0x00,0x80,0xFA,0x09,0x04,0x00,0x00, \ +0x02,0xFF,0x00,0xFF,0x00,0x07,0x05,0x85,0x02,0x40,0x00,0x00,0x07,0x05,0x02, \ +0x02,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x07,0xFF,0x07,0xFF,0x1F,0x00,0x06,0x00, \ +0x1E,0x00,0x20,0xFF,0x3F,0xFC,0x01,0x7C,0x00,0xF8,0x00,0x01,0x01,0x01,0x0A, \ +0x0A,0x0E,0x01,0x03,0x03,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0xAA,0xAA,0x03, \ +0x00,0x00,0x00,0xAA,0xAA,0x03,0x00,0x00,0xF8,0x37,0x81,0xF3,0x80,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x58, \ +0x00,0x00,0x00,0x58,0x00,0x00,0x00,0xD8,0x05,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00, \ +0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00, \ +0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04, \ +0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xF6,0x07,0x00,0x00,0xFB,0x07,0x00,0x00, \ +0x00,0x08,0x00,0x00,0x05,0x08,0x00,0x00,0x0A,0x08,0x00,0x00,0x0F,0x08,0x00, \ +0x00,0x14,0x08,0x00,0x00,0x19,0x08,0x00,0x00,0x1E,0x08,0x00,0x00,0x23,0x08, \ +0x00,0x00,0x28,0x08,0x00,0x00,0x2D,0x08,0x00,0x00,0x32,0x08,0x00,0x00,0x3E, \ +0x08,0x00,0x00,0x43,0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x63, \ +0x29,0x20,0x31,0x39,0x39,0x36,0x2D,0x32,0x30,0x30,0x30,0x20,0x45,0x78,0x70, \ +0x72,0x65,0x73,0x73,0x20,0x4C,0x6F,0x67,0x69,0x63,0x20,0x49,0x6E,0x63,0x2E, \ +0x20,0x2A,0x20,0x54,0x68,0x72,0x65,0x61,0x64,0x58,0x20,0x54,0x48,0x55,0x4D, \ +0x42,0x2D,0x46,0x2F,0x41,0x52,0x4D,0x20,0x56,0x65,0x72,0x73,0x69,0x6F,0x6E, \ +0x20,0x47,0x33,0x2E,0x30,0x66,0x2E,0x33,0x2E,0x30,0x62,0x20,0x2A,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xF0,0xF0,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x47,0x2D,0x47,0x42, \ +0x2D,0x47,0x4C,0x2D,0x4D,0x2D,0x44,0x2D,0x44,0x4C,0x2D,0x4B,0x4D,0x4C,0x2D, \ +0x43,0x4D,0x52,0x2D,0x48,0x4D,0x52,0x2D,0x4D,0x4C,0x32,0x2D,0x47,0x5A,0x2D, \ +0x4B,0x48,0x32,0x2D,0x43,0x4D,0x2D,0x52,0x50,0x2D,0x54,0x43,0x2D,0x4E,0x48, \ +0x2D,0x54,0x44,0x2D,0x41,0x50,0x2D,0x48,0x41,0x2D,0x47,0x46,0x2D,0x44,0x44, \ +0x2D,0x41,0x54,0x2D,0x4D,0x46,0x2D,0x4D,0x53,0x2D,0x44,0x57,0x2D,0x55,0x53, \ +0x41,0x2D,0x43,0x41,0x2D,0x53,0x44,0x2D,0x53,0x44,0x53,0x55,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x01,0x00,0x00,0x00,0x85,0x8E,0xD7,0x66,0x09,0x8C,0xD3,0xD5,0xF5,0xD8,0x09, \ +0x0A,0xFB,0x87,0x1F,0xBF,0x67,0xF7,0x8D,0xCB,0x69,0x07,0xF7,0xBD,0x34,0x12, \ +0x3D,0x50,0xC8,0x84,0x4F,0x7F,0xA3,0x02,0xDE,0x61,0xAE,0x8D,0x40,0xA7,0xE8, \ +0xBD,0x24,0x7A,0xEA,0xA2,0x15,0x51,0x57,0x2E,0xE6,0xBB,0xFF,0x7F,0xD5,0xF6, \ +0x7A,0x83,0x2A,0x63,0x77,0x1D,0x86,0x13,0x7C,0x2E,0x9F,0xE1,0x05,0x57,0x5F, \ +0x69,0x2E,0x6B,0x93,0x87,0x6E,0x9A,0xA1,0x50,0x94,0x0E,0x8B,0x72,0xAE,0x55, \ +0xCC,0xC5,0xB1,0x8A,0x0A,0xB1,0xD7,0x72,0x6F,0x85,0x17,0x5C,0x22,0xD0,0xA3, \ +0xFD,0xC4,0x51,0x61,0x98,0xED,0x89,0x9F,0x82,0xDB,0xF1,0x9D,0xC5,0xFB,0xBC, \ +0x89,0xC1,0xEE,0x83,0x59,0xB1,0x59,0x63,0x30,0x5C,0x50,0xCC,0xC9,0x5A,0xBC, \ +0x9C,0xF9,0x30,0xE2,0x2F,0x42,0x5E,0xF6,0x39,0xD2,0x7B,0x15,0x75,0xFB,0x58, \ +0xC1,0x40,0x3E,0x9A,0xEB,0x27,0xD9,0xA2,0x82,0xC5,0xC2,0xD6,0x69,0x05,0xB3, \ +0x30,0x8E,0xED,0xD2,0xDD,0x83,0x10,0x41,0xA4,0x1D,0x1F,0x15,0xE2,0x60,0x56, \ +0xC5,0x2F,0xF3,0x04,0x99,0xEF,0x8E,0xE1,0x08,0x32,0x59,0x4A,0x4C,0xED,0x7B, \ +0x5B,0x40,0xFC,0x02,0x81,0xD9,0x41,0x53,0x51,0xFA,0x3D,0xFF,0xAC,0xB5,0x6C, \ +0x09,0x6D,0x1D,0xCC,0xB3,0x2B,0xFF,0x15,0x3D,0x25,0x17,0x00,0x00,0x00,0x36, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00,0x00,0x00} + +#define FW_503RFMD_ACC_EXTERNAL { \ +0x80,0xB5,0x10,0x49,0x00,0x20,0x08,0x70,0x0F,0x48,0x81,0x79,0xC0,0x79,0x00, \ +0x02,0x08,0x43,0x68,0x28,0x04,0xD0,0x03,0x21,0x0B,0x20,0x00,0xF0,0x2D,0xFD, \ +0x80,0xBD,0x08,0x21,0x0B,0x20,0x00,0xF0,0x28,0xFD,0xC0,0x20,0xFE,0xF7,0x59, \ +0xF9,0x07,0x1C,0x00,0xF0,0xDC,0xFA,0x38,0x1C,0xFE,0xF7,0x53,0xF9,0x01,0x21, \ +0x0B,0x20,0x00,0xF0,0x1B,0xFD,0x80,0xBD,0x9C,0x01,0x00,0x02,0xD0,0x09,0x00, \ +0x02,0xF8,0xB5,0x35,0x4F,0x35,0x4E,0x38,0x78,0x35,0x4D,0x0A,0x28,0x59,0xD2, \ +0x02,0xA3,0x1B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x55,0x05,0x09,0x0D,0x55, \ +0x11,0x14,0x18,0x55,0x1B,0xB8,0x78,0x2F,0x49,0x44,0x18,0x14,0xE0,0xB8,0x78, \ +0x2E,0x49,0x44,0x18,0x10,0xE0,0xB8,0x78,0x2D,0x49,0x44,0x18,0x0C,0xE0,0xB8, \ +0x78,0x44,0x19,0x09,0xE0,0xB8,0x78,0x2B,0x49,0x44,0x18,0x05,0xE0,0xB8,0x78, \ +0x84,0x19,0x02,0xE0,0xB8,0x78,0x28,0x49,0x44,0x18,0x00,0x2C,0x39,0xD0,0xC0, \ +0x20,0xFE,0xF7,0x1B,0xF9,0x00,0x90,0xE8,0x1D,0x1E,0x4E,0x29,0x30,0x45,0x7A, \ +0x36,0x7E,0x7A,0x78,0x00,0x21,0x00,0x2A,0x07,0xD9,0x7A,0x18,0x12,0x79,0x01, \ +0x31,0x22,0x70,0x7A,0x78,0x01,0x34,0x8A,0x42,0xF7,0xD8,0x1D,0x49,0x09,0x68, \ +0x00,0x29,0x11,0xD0,0x40,0x7A,0x00,0x28,0x0E,0xD1,0x00,0x2D,0x04,0xD0,0x19, \ +0x49,0x1A,0x48,0x0E,0x22,0xFE,0xF7,0x3B,0xFE,0x0F,0x48,0x00,0x7E,0xB0,0x42, \ +0x01,0xD1,0x00,0x2D,0x01,0xD0,0x03,0xF0,0xC3,0xF9,0x02,0xF0,0xB1,0xFE,0x00, \ +0x98,0xFE,0xF7,0xEE,0xF8,0x01,0x21,0x01,0x20,0x00,0xF0,0xB6,0xFC,0xF8,0xBD, \ +0x04,0x21,0x01,0x20,0x00,0xF0,0xB1,0xFC,0xF9,0xE7,0x03,0x21,0x01,0x20,0x00, \ +0xF0,0xAC,0xFC,0xF4,0xE7,0x00,0x00,0xD8,0x09,0x00,0x02,0x00,0x00,0x00,0x02, \ +0x84,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x1C,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xC4,0x02,0x00,0x02,0xA7,0x0A, \ +0x00,0x02,0xFC,0x0A,0x00,0x02,0xF0,0xB5,0x83,0xB0,0x62,0x4D,0xEF,0x1D,0x19, \ +0x37,0xB8,0x79,0x03,0x28,0x1B,0xD0,0x60,0x49,0x0E,0x20,0x08,0x83,0x60,0x48, \ +0x60,0x39,0x02,0x90,0x40,0x7A,0xCC,0x1D,0xCE,0x1D,0x49,0x36,0x69,0x34,0x00, \ +0x28,0x40,0xD0,0xF8,0x7B,0x5B,0x49,0x00,0x28,0x29,0xD0,0xF8,0x79,0x01,0x28, \ +0x01,0xDB,0x0E,0x28,0x16,0xDD,0x03,0x21,0x03,0x20,0x00,0xF0,0x72,0xFC,0x03, \ +0xB0,0xF0,0xBD,0xF8,0x79,0x01,0x28,0x01,0xDB,0x0E,0x28,0x04,0xDD,0x03,0x21, \ +0x03,0x20,0x00,0xF0,0x67,0xFC,0xF3,0xE7,0x03,0xF0,0xDC,0xFA,0x01,0x21,0x03, \ +0x20,0x00,0xF0,0x60,0xFC,0xEC,0xE7,0x00,0x20,0x00,0x22,0x0B,0x18,0x9A,0x73, \ +0x0A,0x54,0x01,0x30,0x00,0x04,0x00,0x0C,0x0E,0x28,0xF7,0xDB,0xE2,0x71,0x01, \ +0x22,0xF2,0x71,0x29,0xE0,0xF8,0x79,0x41,0x18,0x49,0x7B,0x00,0x29,0x0A,0xD1, \ +0x02,0xF0,0xD2,0xF9,0x00,0x06,0x00,0x0E,0xF8,0x71,0x04,0xD1,0x03,0x21,0x03, \ +0x20,0x00,0xF0,0x42,0xFC,0xCE,0xE7,0x00,0x22,0xF2,0x71,0x16,0xE0,0xE0,0x7A, \ +0x3C,0x49,0x40,0x00,0x08,0x5A,0xF9,0x79,0x01,0x22,0x01,0x91,0x01,0x39,0x8A, \ +0x40,0x10,0x40,0x0B,0xD1,0x01,0x98,0x02,0xF0,0xB8,0xF9,0xF8,0x71,0xF8,0x79, \ +0x00,0x28,0x04,0xD1,0x03,0x21,0x03,0x20,0x00,0xF0,0x28,0xFC,0xB4,0xE7,0xC0, \ +0x20,0xFE,0xF7,0x58,0xF8,0x00,0x90,0x04,0x20,0xFC,0xF7,0x4C,0xF8,0x2E,0x49, \ +0x00,0x20,0x0A,0x5C,0x2E,0x4B,0x1B,0x18,0x01,0x30,0x00,0x04,0x00,0x0C,0x04, \ +0x28,0x1A,0x74,0xF6,0xDB,0x2B,0x48,0x2B,0x49,0x00,0x88,0x00,0x23,0x0E,0x22, \ +0x08,0x80,0x04,0x21,0x29,0x48,0x01,0xF0,0xC7,0xFE,0x01,0x21,0xB1,0x71,0x28, \ +0x48,0x04,0x21,0x01,0x75,0x00,0x20,0xA0,0x72,0x06,0x22,0x29,0x1C,0x25,0x48, \ +0xFE,0xF7,0x73,0xFD,0xA9,0x1D,0x20,0x22,0x24,0x48,0xFE,0xF7,0x6E,0xFD,0xB8, \ +0x7B,0x23,0x49,0x48,0x71,0x00,0x20,0x70,0x70,0x02,0x99,0x20,0x23,0x88,0x71, \ +0xB8,0x79,0x20,0x49,0x88,0x74,0xF8,0x79,0xC8,0x74,0x28,0x8D,0x88,0x82,0x68, \ +0x8D,0xC8,0x82,0xA8,0x8D,0x08,0x83,0x1C,0x48,0x01,0x78,0x19,0x43,0x01,0x70, \ +0x01,0x21,0xE1,0x70,0x02,0x98,0x41,0x71,0xA0,0x78,0x01,0x28,0x02,0xD1,0x00, \ +0x20,0x03,0xF0,0xDD,0xFA,0x00,0x20,0xA0,0x70,0x00,0x98,0xFE,0xF7,0x08,0xF8, \ +0x01,0x20,0x30,0x70,0x08,0x21,0x03,0x20,0x00,0xF0,0xCE,0xFB,0x5A,0xE7,0x00, \ +0x00,0xD8,0x09,0x00,0x02,0xB0,0x09,0x00,0x02,0xB4,0x00,0x00,0x02,0x18,0x01, \ +0x00,0x02,0x6C,0x02,0x00,0x02,0x8A,0x02,0x00,0x02,0x00,0x00,0x00,0x02,0x3C, \ +0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x10,0x00,0x00,0x02,0x50,0x09,0x00,0x02, \ +0x00,0x01,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x9C,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x41,0x49,0x40,0x4E,0x01,0x91, \ +0x48,0x7A,0xF4,0x1D,0x19,0x34,0x00,0x28,0x3F,0x4F,0x13,0xD0,0xF8,0x79,0x00, \ +0x28,0x05,0xD1,0x03,0x21,0x04,0x20,0x00,0xF0,0x9A,0xFB,0x02,0xB0,0xF0,0xBD, \ +0xE0,0x79,0x3A,0x49,0x40,0x18,0x40,0x7B,0x00,0x28,0x13,0xD1,0x03,0x21,0x04, \ +0x20,0x00,0xF0,0x8E,0xFB,0xF2,0xE7,0xF8,0x7A,0x35,0x49,0x40,0x00,0x08,0x5A, \ +0xE1,0x79,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x04,0xD1,0x03,0x21,0x04, \ +0x20,0x00,0xF0,0x7F,0xFB,0xE3,0xE7,0xC0,0x20,0xFD,0xF7,0xAF,0xFF,0x00,0x90, \ +0xA0,0x79,0x2D,0x4D,0x02,0x28,0x02,0xD1,0x03,0x20,0xA8,0x71,0x03,0xE0,0x01, \ +0x28,0x40,0xD1,0x04,0x20,0xA8,0x71,0x04,0x20,0xFB,0xF7,0x98,0xFF,0x27,0x49, \ +0x00,0x20,0x88,0x70,0xA0,0x79,0x26,0x49,0x06,0x22,0x88,0x70,0x08,0x1F,0x31, \ +0x1C,0xFE,0xF7,0xD5,0xFC,0xB1,0x1D,0x20,0x22,0x23,0x48,0xFE,0xF7,0xD0,0xFC, \ +0xA0,0x7A,0x1F,0x49,0x48,0x71,0x00,0x20,0x68,0x70,0x01,0x99,0x88,0x71,0x08, \ +0x21,0x04,0x20,0x00,0xF0,0x51,0xFB,0x01,0x20,0xF8,0x70,0x01,0x99,0x48,0x71, \ +0xB8,0x78,0x01,0x28,0x02,0xD1,0x00,0x20,0x03,0xF0,0x4B,0xFA,0x00,0x20,0xB8, \ +0x70,0x17,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78,0x10,0x23, \ +0x99,0x43,0x01,0x70,0x00,0x98,0xFD,0xF7,0x6D,0xFF,0x30,0x8D,0x81,0x02,0x04, \ +0x20,0xFB,0xF7,0x46,0xFF,0xE0,0x79,0x03,0xF0,0xA9,0xF9,0x95,0xE7,0x03,0x21, \ +0x04,0x20,0x00,0xF0,0x2C,0xFB,0x00,0x98,0xFD,0xF7,0x5D,0xFF,0x8D,0xE7,0xD8, \ +0x09,0x00,0x02,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0x18,0x01,0x00,0x02, \ +0x6C,0x02,0x00,0x02,0xA0,0x09,0x00,0x02,0x08,0x01,0x00,0x02,0x04,0x01,0x00, \ +0x02,0xE0,0x00,0x00,0x02,0x9C,0x01,0x00,0x02,0xF0,0xB5,0x25,0x48,0x10,0x23, \ +0x01,0x78,0x22,0x4C,0x99,0x43,0x01,0x70,0x01,0x78,0x20,0x23,0x99,0x43,0x01, \ +0x70,0x21,0x48,0x21,0x49,0xC0,0x7A,0x40,0x00,0x09,0x5A,0xE7,0x18,0xF8,0x79, \ +0x01,0x25,0x42,0x1E,0x2B,0x1C,0x93,0x40,0x19,0x40,0x04,0xD1,0x03,0x21,0x05, \ +0x20,0x00,0xF0,0xF8,0xFA,0xF0,0xBD,0xB9,0x79,0x01,0x29,0x04,0xD0,0x03,0x21, \ +0x05,0x20,0x00,0xF0,0xF0,0xFA,0xF0,0xBD,0x03,0xF0,0x65,0xF9,0xC0,0x20,0xFD, \ +0xF7,0x1E,0xFF,0x06,0x1C,0x38,0x7A,0x12,0x4F,0x78,0x71,0x12,0x48,0xC1,0x1D, \ +0x39,0x31,0x8D,0x70,0xA1,0x1D,0x1C,0x30,0x0C,0x1C,0x7A,0x79,0xFE,0xF7,0x50, \ +0xFC,0x7A,0x79,0x0E,0x4F,0x21,0x1C,0xF8,0x1D,0x0D,0x30,0xFE,0xF7,0x49,0xFC, \ +0x00,0x20,0xF9,0x1D,0x29,0x31,0x88,0x71,0x00,0xF0,0x13,0xF8,0x30,0x1C,0xFD, \ +0xF7,0x00,0xFF,0xF0,0xBD,0x00,0x00,0xD8,0x09,0x00,0x02,0x9C,0x01,0x00,0x02, \ +0xC0,0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x84,0x00,0x00,0x02,0xF0,0xB5,0xFA,0xF7,0xF9,0xF8,0xFE,0xF7,0xC9,0xFC, \ +0xFA,0xF7,0xF5,0xF8,0x2C,0x4F,0x02,0x21,0xB9,0x73,0x00,0x21,0xF9,0x73,0x38, \ +0x74,0x01,0x0A,0x79,0x74,0x01,0x0C,0x00,0x0E,0xB9,0x74,0x27,0x4E,0xF8,0x74, \ +0xF9,0x1D,0x07,0x31,0xF0,0x1D,0x06,0x22,0x35,0x30,0xFE,0xF7,0x15,0xFC,0x24, \ +0x4C,0x01,0x25,0xF8,0x1D,0x29,0x30,0x25,0x75,0x05,0x71,0x22,0x48,0xF1,0x1D, \ +0x42,0x79,0xF8,0x1D,0x0D,0x30,0x15,0x31,0xFE,0xF7,0x07,0xFC,0x1F,0x48,0x1F, \ +0x4A,0x00,0x21,0x53,0x5C,0x46,0x18,0x01,0x31,0x04,0x29,0x33,0x74,0xF9,0xD3, \ +0x1C,0x49,0x00,0x23,0x09,0x88,0x39,0x80,0x02,0x7D,0x04,0x21,0x10,0x30,0x01, \ +0xF0,0x3D,0xFD,0x19,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01,0x78, \ +0x10,0x23,0x19,0x43,0x01,0x70,0x10,0x48,0x85,0x70,0xFB,0xF7,0x62,0xFE,0x39, \ +0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0x80,0xFE,0xE0,0x1D,0x49,0x30, \ +0x45,0x70,0x05,0x21,0x81,0x71,0x0E,0x48,0x01,0x68,0x0E,0x48,0xC2,0x69,0x11, \ +0x43,0xC1,0x61,0x0D,0x48,0x01,0x21,0x05,0x70,0x05,0x20,0x00,0xF0,0x5D,0xFA, \ +0xF0,0xBD,0x84,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x08, \ +0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x8A,0x02,0x00,0x02,0x3C,0x01,0x00,0x02, \ +0x9C,0x01,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x3E,0x01,0x00, \ +0x02,0xF0,0xB5,0x54,0x4F,0x54,0x4E,0xFC,0x1D,0xF9,0x1D,0x09,0x31,0x59,0x34, \ +0x0D,0x1C,0xF0,0x1D,0x0D,0x30,0x22,0x79,0xFE,0xF7,0xAD,0xFB,0x22,0x79,0x29, \ +0x1C,0x4F,0x48,0xFE,0xF7,0xA8,0xFB,0x20,0x79,0x4E,0x49,0x4E,0x4A,0x48,0x71, \ +0xB8,0x7B,0x00,0x28,0x03,0xD1,0x10,0x70,0xF0,0x72,0x50,0x70,0x08,0xE0,0x01, \ +0x20,0x10,0x70,0xF0,0x72,0xF8,0x7B,0xD1,0x1D,0x39,0x31,0x50,0x70,0xF8,0x78, \ +0x08,0x70,0x00,0x25,0x0D,0x20,0x68,0x43,0xC1,0x19,0x43,0x4A,0x30,0x31,0x80, \ +0x18,0x0D,0x22,0x0C,0x30,0xFE,0xF7,0x89,0xFB,0x01,0x35,0x04,0x2D,0xF2,0xD3, \ +0x60,0x79,0x00,0x28,0x03,0xD0,0x3C,0x49,0x01,0x20,0x48,0x72,0x02,0xE0,0x3A, \ +0x49,0x00,0x20,0x48,0x72,0x78,0x7B,0x3A,0x49,0x0E,0x28,0x02,0xDC,0x01,0x28, \ +0x00,0xDB,0x08,0x75,0xB8,0x78,0x37,0x4A,0x10,0x74,0x38,0x7B,0x01,0x28,0x02, \ +0xD1,0x32,0x4B,0xD8,0x70,0x02,0xE0,0x30,0x4B,0x00,0x20,0xD8,0x70,0xF8,0x88, \ +0x10,0x81,0xB8,0x88,0x50,0x81,0x38,0x78,0x2D,0x4A,0xD0,0x70,0xE0,0x88,0x2F, \ +0x4A,0x30,0x80,0x00,0x20,0x3B,0x18,0x1C,0x7A,0x0D,0x18,0x2C,0x74,0x1B,0x7A, \ +0x13,0x54,0x01,0x30,0x04,0x28,0xF6,0xD3,0x30,0x88,0x29,0x4A,0x00,0x23,0x10, \ +0x80,0xC8,0x1D,0x09,0x30,0x0F,0x1C,0x0E,0x22,0x04,0x21,0x01,0xF0,0x92,0xFC, \ +0x00,0xF0,0xFE,0xF8,0x24,0x4C,0x25,0x49,0xE0,0x1D,0x69,0x30,0xC0,0x7A,0x08, \ +0x5C,0x38,0x75,0x23,0x4F,0x38,0x78,0x02,0x28,0x28,0xD1,0x02,0xF0,0x09,0xFC, \ +0x03,0xF0,0xE0,0xF9,0x17,0x49,0x88,0x78,0x00,0x28,0x07,0xD0,0xFB,0xF7,0xAD, \ +0xFD,0x31,0x88,0x89,0x02,0x09,0x1A,0x06,0x20,0xFB,0xF7,0xCB,0xFD,0x01,0x20, \ +0x00,0xF0,0xD2,0xF9,0x02,0xF0,0xA8,0xFB,0x01,0x20,0xF9,0xF7,0xF5,0xFF,0x01, \ +0x20,0x80,0x06,0x80,0x69,0xFE,0xF7,0xBE,0xFB,0xFB,0xF7,0x8E,0xFD,0xFB,0xF7, \ +0x0E,0xFA,0xFE,0xF7,0x9A,0xFB,0x80,0x06,0x80,0x0E,0xA0,0x62,0x01,0x20,0x38, \ +0x70,0xF0,0xBD,0x02,0xF0,0x92,0xFB,0xF0,0xBD,0x00,0x00,0xD8,0x09,0x00,0x02, \ +0x84,0x00,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x1C,0x00,0x00, \ +0x02,0x00,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x8A,0x02,0x00,0x02,0x3C,0x01, \ +0x00,0x02,0x50,0x09,0x00,0x02,0x80,0x02,0x00,0x02,0x3F,0x01,0x00,0x02,0x80, \ +0xB5,0x1F,0x49,0x1D,0x4A,0x0F,0x68,0x0E,0x2F,0x27,0xD2,0x01,0xA3,0xDB,0x5D, \ +0x5B,0x00,0x9F,0x44,0x23,0x06,0x0B,0x0E,0x11,0x13,0x16,0x23,0x23,0x1F,0x22, \ +0x23,0x19,0x1C,0x06,0x23,0xFF,0x20,0x01,0x30,0x8B,0x60,0x17,0xE0,0xFF,0x20, \ +0x41,0x30,0x14,0xE0,0xFF,0x20,0x51,0x30,0x11,0xE0,0x0B,0x20,0x0F,0xE0,0xFF, \ +0x20,0x31,0x30,0x0C,0xE0,0xFF,0x20,0x11,0x30,0x09,0xE0,0xFF,0x20,0x21,0x30, \ +0x06,0xE0,0xFF,0x20,0x61,0x30,0x03,0xE0,0xFF,0x20,0x71,0x30,0x00,0xE0,0x00, \ +0x20,0x01,0x23,0x4B,0x60,0x89,0x68,0x00,0xF0,0xDC,0xF9,0x04,0x21,0x0C,0x20, \ +0x00,0xF0,0x4D,0xF9,0x0F,0x20,0x00,0x06,0x81,0x88,0x03,0x4B,0x19,0x43,0x81, \ +0x80,0x80,0xBD,0x64,0x0A,0x00,0x02,0xA0,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xB0,0xB5,0x0D,0x4D,0x00,0x24,0xE8,0x1D,0x49,0x30,0x0C,0x4F,0x04,0x70,0xF8, \ +0x7C,0x02,0xF0,0xAE,0xFF,0xE8,0x1D,0x69,0x30,0x84,0x72,0x38,0x8B,0x81,0x02, \ +0x04,0x20,0xFB,0xF7,0x40,0xFD,0xB8,0x7C,0x00,0x28,0x03,0xD1,0x01,0x20,0xA8, \ +0x77,0x00,0x05,0xB0,0xBD,0x20,0x1C,0xB0,0xBD,0x00,0x00,0x50,0x09,0x00,0x02, \ +0xC4,0x00,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49,0x32,0x91,0x70,0x01, \ +0x21,0x81,0x77,0x10,0x20,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0x03,0x48, \ +0x00,0x21,0xC1,0x73,0x01,0x21,0x81,0x73,0x08,0x07,0xF7,0x46,0x00,0x00,0x60, \ +0x09,0x00,0x02,0x04,0x48,0x00,0x21,0xC2,0x1D,0x49,0x32,0x51,0x71,0x01,0x21, \ +0x81,0x77,0x08,0x05,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0xB0,0xB5,0x04, \ +0x20,0xFB,0xF7,0x26,0xFD,0x0F,0x48,0xC7,0x1D,0x49,0x37,0xB9,0x79,0x01,0x29, \ +0x16,0xD1,0x03,0x21,0x70,0x30,0x81,0x72,0x00,0x25,0x0B,0x4C,0x7D,0x71,0xE0, \ +0x7C,0x01,0xF0,0x73,0xFE,0x00,0x28,0x07,0xD1,0x3D,0x70,0x02,0x20,0xB8,0x71, \ +0x01,0x21,0x03,0x20,0x00,0xF0,0xE2,0xF8,0xB0,0xBD,0x01,0x21,0x39,0x70,0xE0, \ +0x74,0xB0,0xBD,0x02,0xF0,0x3B,0xFA,0xB0,0xBD,0x50,0x09,0x00,0x02,0xC4,0x00, \ +0x00,0x02,0x16,0x49,0xC9,0x7D,0x40,0x29,0x21,0xD0,0x0E,0xDC,0x30,0x29,0x1A, \ +0xD0,0x05,0xDC,0x10,0x29,0x15,0xD0,0x20,0x29,0x10,0xD1,0x01,0x20,0x0E,0xE0, \ +0x31,0x29,0x13,0xD0,0x32,0x29,0x0A,0xD1,0x04,0x20,0x08,0xE0,0x41,0x29,0x11, \ +0xD0,0x50,0x29,0x11,0xD0,0x51,0x29,0x11,0xD0,0x52,0x29,0x00,0xD1,0x09,0x20, \ +0x09,0x49,0xC8,0x72,0xF7,0x46,0x00,0x20,0xFA,0xE7,0x02,0x20,0xF8,0xE7,0x03, \ +0x20,0xF6,0xE7,0x05,0x20,0xF4,0xE7,0x06,0x20,0xF2,0xE7,0x07,0x20,0xF0,0xE7, \ +0x08,0x20,0xEE,0xE7,0x00,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xF0,0xB5,0x1E, \ +0x4D,0x01,0x24,0x28,0x78,0x01,0x28,0x30,0xD1,0x1C,0x4C,0x1D,0x49,0xE0,0x7A, \ +0x1D,0x4E,0x08,0x5C,0x30,0x75,0xC0,0x20,0xFD,0xF7,0xC9,0xFC,0x07,0x1C,0x1A, \ +0x48,0x01,0x78,0x02,0x29,0x06,0xD1,0x01,0x21,0x01,0x70,0x30,0x7D,0x02,0xF0, \ +0x0D,0xFE,0x20,0x73,0x0D,0xE0,0x07,0x20,0x40,0x06,0xC1,0x69,0x10,0x23,0x99, \ +0x43,0xC1,0x61,0x13,0x48,0x01,0x21,0x41,0x71,0x00,0x20,0x02,0xF0,0x81,0xFF, \ +0x00,0x20,0xA0,0x70,0x20,0x7B,0x01,0x28,0x01,0xD1,0x00,0x20,0x28,0x70,0x20, \ +0x7B,0x01,0x21,0x00,0x28,0x00,0xD1,0x05,0x21,0x38,0x1C,0x0C,0x1C,0xFD,0xF7, \ +0xA1,0xFC,0x21,0x06,0x09,0x0E,0x06,0x20,0x00,0xF0,0x68,0xF8,0xF0,0xBD,0x00, \ +0x00,0xBB,0x02,0x00,0x02,0xC0,0x09,0x00,0x02,0x80,0x02,0x00,0x02,0x00,0x00, \ +0x00,0x02,0xBA,0x02,0x00,0x02,0xB4,0x00,0x00,0x02,0x00,0xB5,0x12,0x48,0x01, \ +0x78,0x0D,0x29,0x1A,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x16,0x07,0x16,0x07,0x07,0x07,0x0B,0x0E,0x16,0x16,0x07,0x07,0x07,0x00,0x0B, \ +0x49,0x01,0x20,0x08,0x70,0x00,0xBD,0xFF,0xF7,0x9F,0xFF,0x00,0xBD,0x08,0x49, \ +0x02,0x20,0x08,0x70,0x08,0x21,0x07,0x20,0x00,0xF0,0x3B,0xF8,0x00,0xBD,0x00, \ +0x78,0x02,0x21,0x00,0xF0,0x36,0xF8,0x00,0xBD,0x00,0x00,0xD0,0x09,0x00,0x02, \ +0xDD,0x01,0x00,0x02,0x40,0x01,0x00,0x02,0x00,0xB5,0x15,0x48,0x01,0x78,0x0D, \ +0x29,0x20,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x1C,0x07, \ +0x1C,0x0A,0x0D,0x13,0x1C,0x1C,0x1C,0x1C,0x10,0x16,0x19,0x00,0xFF,0xF7,0x05, \ +0xFB,0x00,0xBD,0xFF,0xF7,0x84,0xFB,0x00,0xBD,0xFF,0xF7,0x69,0xFC,0x00,0xBD, \ +0x00,0xF0,0xD0,0xFD,0x00,0xBD,0xFF,0xF7,0xFB,0xFC,0x00,0xBD,0xFF,0xF7,0xD0, \ +0xFA,0x00,0xBD,0xFF,0xF7,0x85,0xFE,0x00,0xBD,0x00,0x78,0x02,0x21,0x00,0xF0, \ +0x04,0xF8,0x00,0xBD,0x00,0x00,0xD0,0x09,0x00,0x02,0x04,0x4A,0x10,0x70,0x04, \ +0x48,0x01,0x70,0x04,0x48,0x00,0x21,0x01,0x70,0x41,0x70,0xF7,0x46,0x00,0x00, \ +0x5C,0x02,0x00,0x02,0x5D,0x02,0x00,0x02,0xD0,0x09,0x00,0x02,0x04,0x48,0x00, \ +0x21,0xC2,0x1D,0x69,0x32,0x51,0x70,0x01,0x21,0x81,0x77,0x08,0x02,0xF7,0x46, \ +0x00,0x00,0x50,0x09,0x00,0x02,0x80,0xB5,0x0F,0x4F,0x01,0x28,0x03,0xD1,0xF9, \ +0xF7,0x1E,0xFE,0xF8,0x62,0x38,0x63,0x0C,0x48,0x01,0x21,0x80,0x89,0x0C,0x4A, \ +0xB8,0x87,0x39,0x72,0x79,0x72,0x39,0x73,0x00,0x20,0x38,0x74,0x38,0x60,0xB8, \ +0x72,0xF8,0x72,0x10,0x70,0xB9,0x73,0x79,0x60,0x06,0x49,0xCA,0x7A,0x06,0x49, \ +0xCA,0x70,0x88,0x70,0x08,0x70,0x80,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0xC4, \ +0x00,0x00,0x02,0xE8,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xD0,0x01,0x00,0x02, \ +0xB0,0xB5,0xF3,0x25,0x2D,0x05,0x07,0x1C,0xA8,0x68,0x06,0x20,0xE8,0x60,0x0C, \ +0x1C,0x28,0x69,0x80,0x08,0xFC,0xD3,0x0A,0x20,0xF9,0xF7,0xDA,0xFD,0xA8,0x68, \ +0x78,0x09,0x08,0x23,0x18,0x40,0x02,0x23,0x18,0x43,0xE8,0x60,0x28,0x69,0x80, \ +0x08,0xFC,0xD3,0x38,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3, \ +0xA8,0x68,0x20,0x06,0x00,0x0E,0xE8,0x60,0x28,0x69,0x80,0x08,0xFC,0xD3,0xA8, \ +0x68,0xB0,0xBD,0xF0,0xB5,0x14,0x1C,0x0D,0x1C,0x07,0x1C,0xFC,0xF7,0xDF,0xFF, \ +0x00,0x26,0x00,0x2F,0x10,0xD9,0xFD,0xF7,0x2C,0xF8,0x40,0x08,0xFB,0xD2,0x28, \ +0x20,0xF9,0xF7,0xB1,0xFD,0xA9,0x5D,0xA0,0x19,0xFF,0xF7,0xC5,0xFF,0x28,0x20, \ +0xF9,0xF7,0xAA,0xFD,0x01,0x36,0xBE,0x42,0xEE,0xD3,0xFC,0xF7,0xE7,0xFF,0x00, \ +0x20,0xF0,0xBD,0xF7,0xB5,0x85,0xB0,0x62,0x4D,0x60,0x4F,0x28,0x68,0x00,0x28, \ +0x75,0xD0,0x68,0x68,0x01,0x28,0x73,0xD1,0x00,0x21,0x0F,0x20,0x00,0x06,0x81, \ +0x80,0x81,0x81,0x01,0x88,0x00,0x89,0x28,0x68,0x04,0x28,0x1B,0xD1,0x5A,0x4C, \ +0xFC,0xF7,0xB0,0xFF,0x07,0x22,0x03,0x21,0x20,0x1C,0xFD,0xF7,0x12,0xF8,0xFC, \ +0xF7,0xC7,0xFF,0x20,0x79,0x00,0x28,0x05,0xD1,0x60,0x79,0x00,0x28,0x02,0xD1, \ +0xA0,0x79,0x00,0x28,0x08,0xD0,0x00,0x21,0x21,0x71,0x61,0x71,0xA1,0x71,0x07, \ +0x22,0x03,0x20,0x4E,0x49,0xFF,0xF7,0xB2,0xFF,0x4A,0x49,0x4A,0x4D,0xCA,0x1D, \ +0x28,0x68,0x23,0x32,0x04,0x92,0xCA,0x1D,0x2E,0x32,0x03,0x92,0xCA,0x1D,0x4A, \ +0x32,0x02,0x92,0xCA,0x1D,0xCD,0x1D,0xCC,0x1D,0xCE,0x1D,0x0E,0x31,0x3C,0x32, \ +0x2D,0x36,0x27,0x34,0x15,0x35,0x0A,0x28,0x01,0x92,0x00,0x91,0x38,0xD1,0x0B, \ +0x22,0x04,0x20,0x04,0x99,0xFF,0xF7,0x94,0xFF,0xFF,0x22,0x06,0x20,0x21,0x1C, \ +0x01,0x32,0xFF,0xF7,0x8E,0xFF,0xFF,0x22,0x0E,0x20,0x29,0x1C,0x11,0x32,0xFF, \ +0xF7,0x88,0xFF,0xFF,0x22,0x01,0x20,0x31,0x1C,0x31,0x32,0xFF,0xF7,0x82,0xFF, \ +0xFF,0x22,0x0E,0x20,0x39,0x1C,0x41,0x32,0xFF,0xF7,0x7C,0xFF,0xFF,0x22,0x0E, \ +0x20,0x51,0x32,0x00,0x99,0xFF,0xF7,0x76,0xFF,0xFF,0x22,0x0E,0x20,0x71,0x32, \ +0x01,0x99,0xFF,0xF7,0x70,0xFF,0xFF,0x22,0x0E,0x20,0x21,0x32,0x02,0x99,0xFF, \ +0xF7,0x6A,0xFF,0xFF,0x22,0x0E,0x20,0x61,0x32,0x03,0x99,0x01,0xE0,0x44,0xE0, \ +0x43,0xE0,0xFF,0xF7,0x61,0xFF,0x04,0xE0,0x06,0x98,0x05,0x9A,0x39,0x1C,0xFF, \ +0xF7,0x5B,0xFF,0xFC,0xF7,0x3E,0xFF,0x06,0x22,0xFF,0x21,0x20,0x1C,0x01,0x31, \ +0xFC,0xF7,0x9F,0xFF,0x04,0x22,0x0B,0x21,0x04,0x98,0xFC,0xF7,0x9A,0xFF,0x0E, \ +0x22,0xFF,0x21,0x28,0x1C,0x11,0x31,0xFC,0xF7,0x94,0xFF,0x01,0x22,0xFF,0x21, \ +0x30,0x1C,0x31,0x31,0xFC,0xF7,0x8E,0xFF,0x0E,0x22,0xFF,0x21,0x38,0x1C,0x41, \ +0x31,0xFC,0xF7,0x88,0xFF,0x0E,0x22,0xFF,0x21,0x51,0x31,0x00,0x98,0xFC,0xF7, \ +0x82,0xFF,0x0E,0x22,0xFF,0x21,0x71,0x31,0x01,0x98,0xFC,0xF7,0x7C,0xFF,0x0E, \ +0x22,0xFF,0x21,0x21,0x31,0x02,0x98,0xFC,0xF7,0x76,0xFF,0x0E,0x22,0xFF,0x21, \ +0x61,0x31,0x03,0x98,0xFC,0xF7,0x70,0xFF,0xFC,0xF7,0x25,0xFF,0x03,0x4D,0x00, \ +0x21,0x29,0x60,0x08,0xB0,0xF0,0xBD,0x64,0x0A,0x00,0x02,0xA0,0x02,0x00,0x02, \ +0x00,0x72,0x01,0x02,0x04,0x72,0x01,0x02,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x06,0x49,0x00, \ +0x68,0x10,0x23,0x08,0x73,0x38,0x7B,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x34,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0B, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x01,0x2A,0x01,0xD1,0x03,0x2B, \ +0x03,0xD0,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x04, \ +0x49,0x00,0x20,0x08,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF3,0xE7, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x80,0xB4,0x0C,0x4F,0x00,0x28,0x05, \ +0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B,0x03,0xD1,0x20,0x20, \ +0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05,0x48,0x00,0x21,0x01, \ +0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73,0xF2,0xE7,0x00,0x00, \ +0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x00,0x28,0x05,0xD1,0x00, \ +0x29,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03,0x2B,0x03,0xD0,0x06,0x49,0x20,0x20, \ +0x08,0x73,0x00,0xBD,0xFC,0xF7,0xA1,0xFC,0x04,0x49,0x00,0x20,0x08,0x80,0x03, \ +0x49,0x08,0x80,0x00,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x48,0x02,0x00,0x02, \ +0x4A,0x02,0x00,0x02,0xB0,0xB4,0x20,0x25,0x00,0x28,0x18,0x4C,0x03,0xD1,0x02, \ +0x2A,0x01,0xD1,0x01,0x2B,0x02,0xD1,0x25,0x73,0xB0,0xBC,0xF7,0x46,0x08,0x06, \ +0x00,0x0E,0x02,0x2B,0x05,0xD1,0x00,0x28,0x01,0xD0,0x25,0x73,0xF5,0xE7,0x00, \ +0x27,0x10,0xE0,0x03,0x2B,0x0E,0xD1,0x00,0x28,0x08,0xD0,0x02,0x28,0x08,0xD0, \ +0x80,0x28,0x04,0xD0,0x85,0x28,0x11,0xD1,0x0A,0x48,0x07,0x88,0x03,0xE0,0x00, \ +0x27,0x01,0xE0,0x09,0x48,0x07,0x88,0x80,0x20,0x20,0x73,0x08,0x48,0x00,0x21, \ +0x07,0x73,0x01,0x73,0x20,0x7B,0x10,0x23,0x18,0x43,0x20,0x73,0xD7,0xE7,0x25, \ +0x73,0xD5,0xE7,0x70,0x03,0x00,0x0D,0x4A,0x02,0x00,0x02,0x48,0x02,0x00,0x02, \ +0x30,0x03,0x00,0x0D,0x90,0xB5,0x20,0x27,0x00,0x28,0x0C,0x4C,0x03,0xD1,0x00, \ +0x2A,0x01,0xD1,0x03,0x2B,0x01,0xD0,0x27,0x73,0x90,0xBD,0x09,0x06,0x09,0x0E, \ +0x01,0x20,0x02,0x29,0x04,0xD0,0x85,0x29,0x07,0xD1,0x05,0x49,0x08,0x80,0x01, \ +0xE0,0x05,0x49,0x08,0x80,0xFC,0xF7,0x3F,0xFC,0x90,0xBD,0x27,0x73,0x90,0xBD, \ +0x70,0x03,0x00,0x0D,0x4A,0x02,0x00,0x02,0x48,0x02,0x00,0x02,0x80,0xB4,0x0C, \ +0x4F,0x00,0x28,0x05,0xD1,0x00,0x29,0x03,0xD1,0x02,0x2A,0x01,0xD1,0x01,0x2B, \ +0x03,0xD1,0x20,0x20,0x38,0x73,0x80,0xBC,0xF7,0x46,0x80,0x20,0x38,0x73,0x05, \ +0x48,0x00,0x21,0x01,0x73,0x01,0x73,0x38,0x7B,0x10,0x23,0x18,0x43,0x38,0x73, \ +0xF2,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x30,0x03,0x00,0x0D,0x00,0xB5,0x02, \ +0x28,0x03,0xD1,0x0A,0x29,0x26,0xD1,0x16,0x4B,0x24,0xE0,0x04,0x28,0x01,0xD1, \ +0x15,0x4B,0x20,0xE0,0x05,0x28,0x01,0xD1,0x14,0x4B,0x1C,0xE0,0x00,0x28,0x1A, \ +0xD1,0x0A,0x29,0x17,0xD2,0x02,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x00,0x1C, \ +0x13,0x05,0x07,0x09,0x13,0x0B,0x0D,0x0F,0x13,0x11,0x0D,0x4B,0x0C,0xE0,0x0D, \ +0x4B,0x0A,0xE0,0x0D,0x4B,0x08,0xE0,0x0D,0x4B,0x06,0xE0,0x0D,0x4B,0x04,0xE0, \ +0x0D,0x4B,0x02,0xE0,0x0D,0x4B,0x00,0xE0,0x0D,0x4B,0x0D,0x49,0x98,0x18,0x08, \ +0x60,0x00,0xF0,0x5B,0xF8,0x00,0xBD,0x64,0x0A,0x00,0x02,0xD0,0x02,0x00,0x02, \ +0xE4,0x0A,0x00,0x02,0x08,0x01,0x00,0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00, \ +0x02,0x84,0x00,0x00,0x02,0x1C,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x18,0x01, \ +0x00,0x02,0x14,0x01,0x00,0x02,0x58,0x02,0x00,0x02,0x80,0xB4,0x17,0x1C,0x00, \ +0x22,0x01,0x2F,0x17,0x4B,0x23,0xD1,0x02,0x28,0x10,0xD1,0x16,0x48,0x87,0x79, \ +0xC0,0x79,0x00,0x02,0x07,0x43,0x08,0x29,0x07,0xD0,0x14,0x48,0x87,0x60,0x0C, \ +0x27,0x1F,0x70,0x5A,0x70,0x9A,0x70,0x01,0x60,0x42,0x60,0x80,0xBC,0xF7,0x46, \ +0x06,0x28,0xFB,0xD1,0x0F,0x48,0x00,0x78,0x01,0x28,0xF7,0xD1,0xFF,0x20,0x0D, \ +0x21,0x09,0x06,0x43,0x30,0x88,0x80,0x0B,0x49,0x01,0x20,0x08,0x71,0x0B,0x49, \ +0x08,0x70,0xEC,0xE7,0x18,0x79,0x18,0x70,0x5A,0x70,0x9A,0x70,0x18,0x78,0x0A, \ +0x28,0xE5,0xD1,0x07,0x48,0x02,0x70,0xE2,0xE7,0xD0,0x09,0x00,0x02,0x38,0x02, \ +0x00,0x02,0xA0,0x02,0x00,0x02,0x3A,0x01,0x00,0x02,0xE0,0x03,0x00,0x0D,0x3B, \ +0x01,0x00,0x02,0x9C,0x01,0x00,0x02,0x90,0xB4,0x1A,0x4A,0x80,0x20,0x10,0x73, \ +0x19,0x49,0x1A,0x48,0x0B,0x88,0x07,0x88,0xBB,0x42,0x11,0xD1,0x11,0x7B,0xC9, \ +0x09,0x09,0xD2,0x00,0x88,0x40,0x07,0x03,0xD0,0xE0,0x20,0x10,0x73,0x90,0xBC, \ +0xF7,0x46,0xD0,0x20,0x10,0x73,0xFA,0xE7,0x10,0x7B,0x20,0x23,0x18,0x43,0x10, \ +0x73,0xF5,0xE7,0x00,0x88,0x0B,0x88,0xC0,0x1A,0x08,0x28,0x00,0xD9,0x08,0x20, \ +0x0B,0x88,0x1B,0x18,0x0B,0x80,0x00,0x28,0x08,0xD0,0x0A,0x4B,0x0A,0x49,0x0F, \ +0x68,0x3C,0x78,0x01,0x37,0x0F,0x60,0x1C,0x73,0x01,0x38,0xF8,0xD1,0x10,0x7B, \ +0x10,0x23,0x18,0x43,0x10,0x73,0xDC,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x56, \ +0x02,0x00,0x02,0x54,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x58,0x02,0x00,0x02, \ +0x90,0xB5,0x20,0x24,0x00,0x28,0x0B,0x4F,0x03,0xD1,0x00,0x2A,0x01,0xD1,0x03, \ +0x2B,0x01,0xD0,0x3C,0x73,0x90,0xBD,0x08,0x06,0x00,0x0E,0x01,0xD0,0x80,0x28, \ +0x01,0xD1,0x3C,0x73,0x90,0xBD,0x04,0x48,0x00,0x79,0x00,0xF0,0x3A,0xF8,0x60, \ +0x20,0x38,0x73,0x90,0xBD,0x00,0x00,0x70,0x03,0x00,0x0D,0x38,0x02,0x00,0x02, \ +0xB0,0xB4,0x13,0x48,0x01,0x2B,0x03,0xD1,0x20,0x21,0x01,0x73,0xB0,0xBC,0xF7, \ +0x46,0x10,0x49,0x00,0x23,0x0D,0x78,0x02,0x22,0x0F,0x4C,0x10,0x4F,0x01,0x2D, \ +0x02,0xD0,0x0D,0x78,0x02,0x2D,0x02,0xD1,0x0A,0x70,0x3B,0x70,0x23,0x70,0x80, \ +0x21,0x01,0x73,0x0B,0x49,0x01,0x25,0x0D,0x73,0x0B,0x73,0x0A,0x73,0x0B,0x73, \ +0x3A,0x78,0x10,0x23,0x0A,0x73,0x22,0x78,0x0A,0x73,0x01,0x7B,0x19,0x43,0x01, \ +0x73,0xDE,0xE7,0x00,0x00,0x70,0x03,0x00,0x0D,0x60,0x02,0x00,0x02,0x5D,0x02, \ +0x00,0x02,0x5C,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0x80,0xB4,0x01,0x22,0x00, \ +0x23,0x02,0x28,0x10,0x49,0x12,0xD1,0x18,0x1C,0x10,0x4B,0x04,0x27,0x18,0x71, \ +0x0F,0x4B,0x1F,0x70,0x18,0x70,0x0F,0x4F,0x82,0x23,0x3B,0x71,0x0E,0x4B,0x18, \ +0x80,0x0E,0x4B,0x18,0x80,0x0E,0x4B,0x18,0x80,0x0A,0x70,0x80,0xBC,0xF7,0x46, \ +0x85,0x28,0xFB,0xD1,0x0C,0x48,0x03,0x80,0x0C,0x48,0x02,0x72,0x08,0x78,0x01, \ +0x28,0xF4,0xD1,0x02,0x20,0x08,0x70,0xF1,0xE7,0x00,0x00,0x61,0x02,0x00,0x02, \ +0x70,0x03,0x00,0x0D,0xC0,0x03,0x00,0x0D,0xB0,0x03,0x00,0x0D,0x4E,0x02,0x00, \ +0x02,0x4C,0x02,0x00,0x02,0x48,0x02,0x00,0x02,0x4A,0x02,0x00,0x02,0x58,0x51, \ +0x00,0x00,0x90,0xB5,0x0F,0x1C,0x19,0x1C,0x29,0x4B,0x14,0x1C,0x27,0x4A,0x98, \ +0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xAC,0xFD, \ +0x90,0xBD,0x24,0x4B,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21, \ +0x1C,0xFF,0xF7,0xC2,0xFD,0x90,0xBD,0x81,0x23,0x1B,0x02,0x98,0x42,0x06,0xD1, \ +0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xD3,0xFD,0x90,0xBD,0xFF, \ +0x23,0x0C,0x33,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C, \ +0xFF,0xF7,0xE6,0xFD,0x90,0xBD,0x41,0x23,0x5B,0x02,0x98,0x42,0x06,0xD1,0x13, \ +0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xF7,0xFD,0x90,0xBD,0x0F,0x4B, \ +0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x29, \ +0xFE,0x90,0xBD,0x01,0x23,0xDB,0x03,0x98,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C, \ +0x38,0x1C,0x21,0x1C,0xFF,0xF7,0x40,0xFE,0x90,0xBD,0x06,0x49,0x20,0x20,0x08, \ +0x73,0x90,0xBD,0x00,0x00,0x30,0x02,0x00,0x02,0x08,0x80,0x00,0x00,0x0A,0x81, \ +0x00,0x00,0x03,0x02,0x00,0x00,0x70,0x03,0x00,0x0D,0x10,0x49,0x09,0x78,0x01, \ +0x29,0x1B,0xD1,0x40,0x08,0x19,0xD3,0x0D,0x20,0x00,0x06,0x01,0x78,0x20,0x23, \ +0x19,0x43,0x01,0x70,0x0B,0x48,0x00,0x68,0xC1,0x43,0x0B,0x48,0xC2,0x69,0x11, \ +0x40,0xC1,0x61,0x00,0x20,0x07,0x21,0x49,0x06,0x7D,0x22,0x12,0x01,0x88,0x61, \ +0x01,0x30,0x90,0x42,0xFC,0xD3,0xFF,0x20,0x48,0x61,0xFF,0xE7,0xFE,0xE7,0xF7, \ +0x46,0x00,0x00,0x3A,0x01,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04, \ +0xF0,0xB5,0xC0,0x20,0xFD,0xF7,0x0C,0xF8,0x22,0x4C,0x23,0x4F,0x21,0x7A,0x23, \ +0x4A,0x39,0x70,0x11,0x79,0x79,0x70,0x21,0x7B,0xF9,0x70,0x11,0x7B,0xB9,0x70, \ +0x0D,0x21,0x09,0x06,0x8B,0x88,0x07,0x25,0x6D,0x06,0xBB,0x80,0xEE,0x69,0x01, \ +0x23,0x5B,0x02,0x33,0x43,0xEB,0x61,0x00,0x23,0x01,0x33,0x32,0x2B,0xFC,0xD3, \ +0xEE,0x69,0x18,0x4B,0x33,0x40,0xEB,0x61,0x00,0x23,0x01,0x33,0x64,0x2B,0xFC, \ +0xD3,0x15,0x4D,0x00,0x23,0x2B,0x70,0x15,0x4B,0x80,0x25,0x1D,0x73,0x01,0x25, \ +0x1D,0x72,0x82,0x25,0x1D,0x71,0x07,0x25,0x1D,0x70,0x11,0x4B,0x05,0x26,0x1E, \ +0x73,0x86,0x26,0x1E,0x72,0x1D,0x71,0x24,0x23,0x23,0x71,0x3B,0x78,0x23,0x72, \ +0xFB,0x78,0x23,0x73,0x7B,0x78,0x13,0x71,0xBB,0x78,0x13,0x73,0x0A,0x4A,0x0A, \ +0x81,0xBA,0x88,0x8A,0x80,0xFC,0xF7,0xC8,0xFF,0xF0,0xBD,0x00,0x00,0xC0,0x03, \ +0x00,0x0D,0xC8,0x02,0x00,0x02,0xE0,0x03,0x00,0x0D,0xFF,0xFD,0x00,0x00,0x10, \ +0x00,0x00,0x0D,0xB0,0x03,0x00,0x0D,0xA0,0x03,0x00,0x0D,0xFF,0x0F,0x00,0x00, \ +0x80,0xB5,0x0B,0x49,0x00,0x20,0x08,0x60,0x88,0x80,0x88,0x71,0x09,0x4F,0xC8, \ +0x71,0x38,0x68,0x01,0x7A,0x10,0x29,0x02,0xD1,0xFB,0xF7,0x17,0xFD,0x38,0x60, \ +0x38,0x68,0x01,0x7A,0x40,0x29,0x02,0xD1,0xFB,0xF7,0x10,0xFD,0x38,0x60,0x80, \ +0xBD,0x00,0x00,0x58,0x51,0x00,0x00,0x64,0x02,0x00,0x02,0xF0,0xB5,0x23,0x4E, \ +0x04,0x1C,0x0F,0x1C,0x13,0x1C,0x20,0x22,0xB5,0x78,0xF1,0x78,0x03,0x2B,0x20, \ +0x48,0x01,0xD0,0x02,0x73,0xF0,0xBD,0x02,0x2D,0x09,0xD1,0x01,0x29,0x01,0xD3, \ +0x0D,0x29,0x01,0xD9,0x02,0x73,0xF0,0xBD,0x08,0x29,0x01,0xD1,0x02,0x73,0xF0, \ +0xBD,0x00,0x2F,0x09,0xD1,0xFC,0xF7,0xAB,0xF9,0x06,0x2D,0x07,0xD1,0xF9,0xF7, \ +0x76,0xFA,0x15,0x48,0x00,0x21,0x01,0x70,0x01,0xE0,0x00,0x21,0x01,0x73,0x13, \ +0x48,0x02,0x2D,0x07,0xD1,0x00,0x2C,0x0E,0xD1,0x11,0x49,0x01,0x60,0x11,0x48, \ +0x00,0x21,0x01,0x70,0x08,0xE0,0x01,0x2D,0xD7,0xD0,0x0F,0x49,0x01,0x60,0x0F, \ +0x48,0x00,0x21,0x01,0x70,0x0F,0x48,0x01,0x70,0x0F,0x48,0x31,0x1C,0x07,0x80, \ +0x0E,0x48,0x00,0x27,0x07,0x80,0x0E,0x48,0x08,0x22,0xFD,0xF7,0x95,0xFC,0x03, \ +0x48,0x07,0x70,0xF0,0xBD,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x60,0x02, \ +0x00,0x02,0x58,0x02,0x00,0x02,0x64,0x0A,0x00,0x02,0x9C,0x01,0x00,0x02,0xD4, \ +0x09,0x00,0x02,0x5D,0x02,0x00,0x02,0x5C,0x02,0x00,0x02,0x54,0x02,0x00,0x02, \ +0x56,0x02,0x00,0x02,0x40,0x02,0x00,0x02,0xB0,0xB5,0x11,0x4F,0x14,0x1C,0xBB, \ +0x78,0xFF,0x78,0x03,0x2C,0x0F,0x4A,0x02,0xD0,0x20,0x20,0x10,0x73,0xB0,0xBD, \ +0x0E,0x4D,0x00,0x24,0x2C,0x80,0x0D,0x4C,0x01,0x2B,0x21,0x80,0x0A,0xD1,0x80, \ +0x20,0x10,0x73,0x0B,0x48,0x0C,0x49,0x00,0x78,0x10,0x23,0x08,0x73,0x10,0x7B, \ +0x18,0x43,0x10,0x73,0xB0,0xBD,0x02,0x1C,0x18,0x1C,0x39,0x1C,0xFF,0xF7,0x2E, \ +0xFD,0xB0,0xBD,0x00,0x00,0x38,0x02,0x00,0x02,0x70,0x03,0x00,0x0D,0x56,0x02, \ +0x00,0x02,0x54,0x02,0x00,0x02,0x53,0x02,0x00,0x02,0x30,0x03,0x00,0x0D,0xB0, \ +0xB5,0x0F,0x1C,0x18,0x4D,0x19,0x1C,0x14,0x1C,0xA8,0x42,0x02,0xD0,0x17,0x4B, \ +0x00,0x22,0x1A,0x70,0x16,0x4A,0xA8,0x42,0x06,0xD1,0x13,0x68,0x0A,0x1C,0x38, \ +0x1C,0x21,0x1C,0xFF,0xF7,0xD9,0xFD,0xB0,0xBD,0x12,0x4B,0x98,0x42,0x04,0xD1, \ +0x12,0x68,0x20,0x1C,0xFF,0xF7,0x55,0xFF,0xB0,0xBD,0x0F,0x4B,0x98,0x42,0x06, \ +0xD1,0x13,0x68,0x0A,0x1C,0x38,0x1C,0x21,0x1C,0xFF,0xF7,0xE5,0xFD,0xB0,0xBD, \ +0x0B,0x4B,0x98,0x42,0x04,0xD1,0x12,0x68,0x20,0x1C,0xFF,0xF7,0xA3,0xFF,0xB0, \ +0xBD,0x0B,0x1C,0x39,0x1C,0x22,0x1C,0xFF,0xF7,0x41,0xFE,0xB0,0xBD,0x01,0x02, \ +0x00,0x00,0x61,0x02,0x00,0x02,0x30,0x02,0x00,0x02,0x0E,0x40,0x00,0x00,0x22, \ +0xC1,0x00,0x00,0x33,0xC1,0x00,0x00,0xF0,0xB5,0x22,0x4B,0xE0,0x25,0x01,0x27, \ +0x98,0x42,0x1D,0x49,0x1D,0x4C,0x1E,0x4A,0x08,0xD1,0x90,0x78,0x01,0x28,0x01, \ +0xD1,0x0D,0x73,0x01,0xE0,0xFF,0xF7,0x5C,0xFD,0x27,0x71,0xF0,0xBD,0x1A,0x4B, \ +0x20,0x26,0x98,0x42,0x21,0xD1,0x0E,0x73,0x19,0x48,0x27,0x71,0x00,0x78,0x00, \ +0x28,0xF4,0xD1,0x90,0x78,0x02,0x28,0x02,0xD1,0xD0,0x78,0x08,0x28,0xEE,0xD0, \ +0x90,0x78,0x01,0x28,0x0C,0xD1,0x13,0x49,0x00,0x20,0x08,0x70,0x12,0x48,0x00, \ +0x78,0x02,0x28,0x02,0xD1,0x11,0x48,0x07,0x70,0xF0,0xBD,0x11,0x48,0x07,0x70, \ +0xF0,0xBD,0xD1,0x78,0x90,0x78,0x01,0x22,0xFF,0xF7,0xF3,0xFC,0xF0,0xBD,0x10, \ +0x78,0x00,0x0A,0x01,0xD2,0x0E,0x73,0x00,0xE0,0x0D,0x73,0x27,0x71,0xF0,0xBD, \ +0x70,0x03,0x00,0x0D,0xD0,0x03,0x00,0x0D,0x38,0x02,0x00,0x02,0x33,0xC1,0x00, \ +0x00,0x0E,0x40,0x00,0x00,0x60,0x02,0x00,0x02,0x9C,0x01,0x00,0x02,0x53,0x02, \ +0x00,0x02,0x5F,0x02,0x00,0x02,0xE5,0x01,0x00,0x02,0x80,0xB5,0x00,0x20,0x1C, \ +0x49,0x0F,0x27,0x3F,0x06,0x08,0x70,0xB8,0x80,0x39,0x88,0xB8,0x81,0x1A,0x4A, \ +0x39,0x89,0xD1,0x69,0xD1,0x04,0xCB,0x68,0xC9,0x6B,0x18,0x49,0x09,0x68,0x90, \ +0x61,0x17,0x49,0x02,0x20,0xC8,0x74,0x17,0x48,0x01,0x7A,0x0C,0x30,0x08,0x29, \ +0x19,0xD2,0x01,0xA3,0x5B,0x5C,0x5B,0x00,0x9F,0x44,0x15,0x03,0x06,0x15,0x09, \ +0x0C,0x0F,0x12,0x00,0xF0,0xFA,0xFB,0x80,0xBD,0x00,0xF0,0x7B,0xF9,0x80,0xBD, \ +0x00,0xF0,0x10,0xFA,0x80,0xBD,0x00,0xF0,0x1B,0xF8,0x80,0xBD,0x00,0xF0,0xC4, \ +0xF8,0x80,0xBD,0x00,0xF0,0x73,0xFA,0x80,0xBD,0x02,0x21,0x0A,0x20,0xFF,0xF7, \ +0x0A,0xFA,0x06,0x48,0xB8,0x80,0x80,0xBD,0x00,0x00,0x9C,0x01,0x00,0x02,0x80, \ +0x00,0x00,0x04,0x40,0x00,0x00,0x04,0x50,0x09,0x00,0x02,0xD0,0x09,0x00,0x02, \ +0x08,0x08,0x00,0x00,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38, \ +0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x48,0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1, \ +0x03,0x21,0x0A,0x20,0xFF,0xF7,0xE9,0xF9,0xAC,0x80,0xF0,0xBD,0x44,0x48,0x90, \ +0x21,0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0, \ +0x40,0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41, \ +0x7C,0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75, \ +0x79,0x79,0x41,0x75,0x38,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41, \ +0x77,0xFF,0x20,0xF5,0x30,0x35,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C, \ +0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18, \ +0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0xE1,0xFF,0x07,0x21,0x49,0x06, \ +0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0xC6,0xF9,0x38,0x78,0x00, \ +0x21,0x01,0xF0,0x29,0xFF,0x00,0x21,0x08,0x20,0xF9,0xF7,0x71,0xF8,0x00,0x21, \ +0x09,0x20,0xF9,0xF7,0x6D,0xF8,0x00,0x21,0x0A,0x20,0xF9,0xF7,0x69,0xF8,0x20, \ +0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7,0xD7,0xFF, \ +0x0A,0x20,0xF8,0xF7,0xBE,0xFF,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x19, \ +0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7, \ +0x83,0xF9,0xAC,0x80,0xF0,0xBD,0x00,0x22,0xFF,0x21,0x7D,0x20,0xC0,0x00,0xAC, \ +0x80,0x00,0xF0,0xA6,0xFA,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x10,0x48, \ +0x11,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0,0x74,0xB8,0x60,0x00,0x03,0x78, \ +0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xA8,0x80,0x0A,0x20, \ +0xFF,0xF7,0x64,0xF9,0xF0,0xBD,0x00,0x00,0xD8,0x02,0x00,0x02,0x08,0x08,0x00, \ +0x00,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09,0x00,0x02,0x88, \ +0x88,0x00,0x00,0xF0,0xB5,0x4C,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78, \ +0x0F,0x25,0x2D,0x06,0x0E,0x28,0x49,0x4C,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFF,0xF7,0x3D,0xF9,0xAC,0x80,0xF0,0xBD,0x45,0x48,0x90,0x21, \ +0x41,0x70,0xB9,0x78,0x00,0x26,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41,0x7C, \ +0x92,0x00,0x02,0x23,0x1A,0x43,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75,0x79, \ +0x79,0x41,0x75,0x39,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77, \ +0xFF,0x20,0xF5,0x30,0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01, \ +0x38,0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43, \ +0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x35,0xFF,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x02,0xF0,0x1A,0xF9,0x38,0x78,0x00,0x21, \ +0x01,0xF0,0x7D,0xFE,0x0B,0x21,0x08,0x20,0xF8,0xF7,0xC5,0xFF,0xB7,0x21,0x09, \ +0x20,0xF8,0xF7,0xC1,0xFF,0x00,0x21,0x0A,0x20,0xF8,0xF7,0xBD,0xFF,0x14,0x20, \ +0xF8,0xF7,0x1A,0xFF,0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00, \ +0x20,0xF8,0xF7,0x28,0xFF,0x0A,0x20,0xF8,0xF7,0x0F,0xFF,0x01,0x20,0x80,0x06, \ +0x46,0x61,0xC0,0x68,0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06, \ +0x21,0x0A,0x20,0xFF,0xF7,0xD4,0xF8,0xAC,0x80,0xF0,0xBD,0x00,0x22,0x55,0x21, \ +0x7D,0x20,0xC0,0x00,0xAC,0x80,0x00,0xF0,0xF7,0xF9,0x11,0x48,0x01,0x21,0x89, \ +0x06,0x88,0x63,0x10,0x48,0x10,0x4A,0x48,0x63,0xAE,0x80,0x04,0x20,0xD0,0x74, \ +0xB8,0x60,0x00,0x03,0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01, \ +0x21,0xA8,0x80,0x0A,0x20,0xFF,0xF7,0xB5,0xF8,0xF0,0xBD,0xD8,0x02,0x00,0x02, \ +0x08,0x08,0x00,0x00,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00, \ +0x04,0x04,0x24,0x00,0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09, \ +0x00,0x02,0x88,0x88,0x00,0x00,0xF0,0xB5,0x42,0x4C,0xC0,0xC8,0x21,0x1C,0xC0, \ +0xC1,0xA0,0x78,0x40,0x4D,0x80,0x08,0x80,0x00,0x0F,0x27,0x3F,0x06,0x00,0x28, \ +0x05,0xD0,0x03,0x21,0x0A,0x20,0xFF,0xF7,0x8F,0xF8,0xBD,0x80,0xF0,0xBD,0x20, \ +0x78,0x0E,0x28,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFF,0xF7, \ +0x84,0xF8,0xBD,0x80,0xF0,0xBD,0x08,0x21,0x0A,0x20,0xFF,0xF7,0x7E,0xF8,0x33, \ +0x48,0x00,0x26,0x06,0x70,0x33,0x48,0x06,0x60,0x46,0x60,0x00,0x20,0xF8,0xF7, \ +0xB9,0xFE,0xA1,0x78,0x30,0x48,0x01,0x29,0x01,0xD1,0xC6,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x21,0x79,0x01,0x75,0x61,0x79,0x41,0x75,0x2C,0x49,0x09,0x78, \ +0x01,0x29,0x01,0xD1,0xE1,0x79,0x41,0x77,0xFF,0x20,0xF5,0x30,0x29,0x49,0x49, \ +0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7,0xD1,0x07,0x21, \ +0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8, \ +0xF7,0x7D,0xFE,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98,0x43,0xC8,0x61, \ +0x02,0xF0,0x62,0xF8,0x20,0x78,0x00,0x21,0x01,0xF0,0xC5,0xFD,0x00,0x28,0x05, \ +0xD1,0x05,0x21,0x0A,0x20,0xFF,0xF7,0x3D,0xF8,0xBD,0x80,0xF0,0xBD,0x14,0x20, \ +0xF8,0xF7,0x66,0xFE,0x00,0x20,0xF8,0xF7,0x79,0xFE,0x13,0x48,0x41,0x68,0xC9, \ +0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFF,0xF7,0x2D,0xF8,0xBD,0x80,0xF0,0xBD, \ +0x86,0x60,0x20,0x20,0x41,0x05,0x48,0x61,0x0D,0x48,0x01,0x21,0x01,0x73,0xC1, \ +0x74,0xB8,0x88,0x0B,0x4B,0x18,0x43,0xB8,0x80,0x0A,0x20,0xFF,0xF7,0x1C,0xF8, \ +0xF0,0xBD,0x00,0x00,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00,0x9C,0x01,0x00, \ +0x02,0xD0,0x02,0x00,0x02,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00, \ +0x00,0x04,0x50,0x09,0x00,0x02,0x48,0x48,0x00,0x00,0xF0,0xB5,0x2F,0x4F,0x60, \ +0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78,0x0F,0x25,0x2D,0x06,0x0E,0x28,0x2C,0x4C, \ +0x01,0xDC,0x00,0x28,0x05,0xD1,0x03,0x21,0x0A,0x20,0xFE,0xF7,0xF7,0xFF,0xAC, \ +0x80,0xF0,0xBD,0x28,0x48,0x00,0x26,0x46,0x70,0x41,0x7C,0xFD,0x23,0x19,0x40, \ +0x41,0x74,0x25,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77,0xFF, \ +0x20,0xF5,0x30,0x22,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38, \ +0x00,0x29,0xF7,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8, \ +0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x02,0xFE,0x07,0x21,0x49,0x06,0xC8,0x69, \ +0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0xE7,0xFF,0xAE,0x80,0x38,0x78,0x00, \ +0x21,0x01,0xF0,0x49,0xFD,0x00,0x28,0x02,0xD1,0x13,0x49,0x05,0x20,0x48,0x70, \ +0x14,0x20,0xF8,0xF7,0xED,0xFD,0x00,0x20,0xF8,0xF7,0x00,0xFE,0x0D,0x48,0x41, \ +0x68,0xC9,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7,0xB4,0xFF,0xAC,0x80, \ +0xF0,0xBD,0x86,0x60,0x01,0x20,0x80,0x06,0x46,0x61,0x01,0x21,0x0A,0x20,0xAC, \ +0x80,0xFE,0xF7,0xA9,0xFF,0xF0,0xBD,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0xD0,0x09,0x00, \ +0x02,0xF0,0xB5,0x01,0x1C,0xB8,0xC9,0x58,0x4E,0x30,0x1C,0xB8,0xC0,0x30,0x7A, \ +0x0F,0x24,0x24,0x06,0x0E,0x28,0x55,0x4F,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFE,0xF7,0x8A,0xFF,0xA7,0x80,0xF0,0xBD,0x51,0x4D,0xA8,0x70, \ +0x70,0x78,0x68,0x70,0x30,0x78,0x28,0x70,0x70,0x88,0xA8,0x60,0x70,0x68,0xE8, \ +0x60,0x00,0x20,0xE8,0x70,0x68,0x60,0x28,0x61,0xF0,0x68,0x68,0x61,0x00,0x20, \ +0xF8,0xF7,0xB9,0xFD,0x6A,0x78,0x40,0x21,0x48,0x48,0x00,0x2A,0x16,0xD0,0x01, \ +0x2A,0x17,0xD0,0x02,0x2A,0x18,0xD0,0x03,0x2A,0x01,0xD1,0x60,0x22,0x42,0x70, \ +0x42,0x7C,0x92,0x07,0x92,0x0F,0x42,0x74,0xB3,0x7A,0x42,0x7C,0x9B,0x00,0x1A, \ +0x43,0x42,0x74,0x72,0x7A,0x01,0x2A,0x0A,0xD1,0x00,0x22,0xC2,0x70,0x08,0xE0, \ +0x00,0x22,0x42,0x70,0xED,0xE7,0x20,0x22,0x42,0x70,0xEA,0xE7,0x41,0x70,0xE8, \ +0xE7,0xC1,0x70,0x37,0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF1,0x7A,0x41,0x77, \ +0xFF,0x20,0x35,0x4E,0xF5,0x30,0x71,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01, \ +0x38,0x00,0x29,0xF8,0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43, \ +0xC8,0x61,0xFF,0x20,0x2D,0x30,0xF8,0xF7,0x64,0xFD,0x07,0x21,0x49,0x06,0xC8, \ +0x69,0x80,0x23,0x98,0x43,0xC8,0x61,0x01,0xF0,0x49,0xFF,0xA8,0x78,0x00,0x21, \ +0x01,0xF0,0xAC,0xFC,0x25,0x49,0xC8,0x69,0x8B,0x01,0x18,0x43,0xC8,0x61,0x14, \ +0x20,0xF8,0xF7,0x50,0xFD,0x00,0x20,0xF8,0xF7,0x63,0xFD,0x0A,0x20,0xF8,0xF7, \ +0x4A,0xFD,0x00,0x26,0x01,0x20,0x80,0x06,0x46,0x61,0xC0,0x68,0x1C,0x49,0x1C, \ +0x48,0x48,0x61,0x48,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE,0xF7, \ +0x0D,0xFF,0xA7,0x80,0xF0,0xBD,0xE8,0x68,0x00,0xF0,0xAC,0xF8,0x68,0x60,0xE8, \ +0x78,0xF8,0xF7,0x8A,0xFD,0x68,0x68,0xF9,0xF7,0xAB,0xFF,0xA7,0x80,0x29,0x78, \ +0xE8,0x68,0x00,0x22,0x00,0xF0,0x27,0xF8,0x10,0x49,0xA6,0x80,0x03,0x20,0xC8, \ +0x74,0x0C,0x49,0x22,0x20,0x88,0x60,0x08,0x05,0x41,0x6A,0x0C,0x4B,0xC9,0x18, \ +0x01,0x62,0x0C,0x48,0x01,0x21,0xA0,0x80,0x0A,0x20,0xFE,0xF7,0xE9,0xFE,0xF0, \ +0xBD,0xEC,0x0A,0x00,0x02,0x08,0x08,0x00,0x00,0xD4,0x0A,0x00,0x02,0xD8,0x07, \ +0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00,0x00,0x50, \ +0x09,0x00,0x02,0x10,0x27,0x00,0x00,0x88,0x88,0x00,0x00,0xF0,0xB5,0x07,0x1C, \ +0x00,0x2A,0x0B,0xD1,0x00,0x20,0x00,0x2F,0x14,0x4A,0x06,0xD9,0x09,0x06,0x09, \ +0x0E,0x11,0x70,0x01,0x32,0x01,0x30,0xB8,0x42,0xFA,0xD3,0xF0,0xBD,0xF8,0xF7, \ +0x02,0xFD,0xFD,0xF7,0xD2,0xF8,0xFD,0xF7,0xB2,0xF8,0xBC,0x08,0x26,0x1C,0x0B, \ +0x4D,0x04,0xD0,0xFD,0xF7,0xAC,0xF8,0x01,0xC5,0x01,0x3C,0xFA,0xD1,0xB0,0x00, \ +0x3F,0x1A,0xFD,0xF7,0xA5,0xF8,0x69,0x1C,0x03,0x2F,0x28,0x70,0x02,0xD1,0x00, \ +0x0C,0x08,0x70,0xF0,0xBD,0x02,0x2F,0xE2,0xD1,0x00,0x0A,0x08,0x70,0xF0,0xBD, \ +0x00,0x00,0x00,0x72,0x01,0x02,0x88,0xB4,0x01,0x20,0x80,0x06,0xC1,0x6B,0x00, \ +0xAB,0x19,0x80,0x1A,0x49,0x1B,0x4A,0xC9,0x7C,0x1B,0x4F,0x03,0x29,0x21,0xD1, \ +0x00,0xA9,0x09,0x88,0x20,0x23,0x0B,0x40,0x18,0x49,0x0C,0xD0,0x87,0x63,0xCF, \ +0x68,0x03,0x23,0x1B,0x03,0x3B,0x43,0x43,0x63,0x4B,0x78,0x15,0x4F,0xFF,0x5C, \ +0x11,0x23,0x9B,0x02,0x3B,0x43,0x53,0x60,0x00,0xAA,0x12,0x88,0x92,0x08,0x16, \ +0xD3,0x0A,0x69,0x01,0x32,0x0A,0x61,0x4B,0x69,0x9A,0x42,0x10,0xD2,0x89,0x68, \ +0x42,0x6A,0x89,0x18,0x01,0x62,0x0B,0xE0,0x04,0x29,0x09,0xD1,0x00,0xA9,0x09, \ +0x88,0xC9,0x08,0x05,0xD3,0x87,0x63,0x64,0x21,0x41,0x63,0x01,0x20,0x80,0x03, \ +0x50,0x60,0x88,0xBC,0xF7,0x46,0x50,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x00, \ +0x72,0x01,0x02,0xD4,0x0A,0x00,0x02,0xC8,0x01,0x00,0x02,0xF0,0xB5,0x04,0x30, \ +0xC7,0x00,0x19,0x4C,0x00,0x26,0xE6,0x70,0x60,0x78,0x01,0x28,0x15,0xD0,0x02, \ +0x28,0x15,0xD0,0x03,0x28,0x25,0xD1,0x0B,0x20,0x39,0x1C,0xFD,0xF7,0x03,0xF8, \ +0x0D,0x1C,0x79,0x1A,0x0B,0x20,0xFC,0xF7,0xFE,0xFF,0x07,0x1C,0x00,0x2D,0x18, \ +0xD9,0x01,0x37,0x04,0x2D,0x13,0xD2,0x01,0x20,0xE0,0x70,0x13,0xE0,0x7F,0x08, \ +0x11,0xE0,0x79,0x00,0x0B,0x20,0x0F,0x1C,0xFC,0xF7,0xEE,0xFF,0x0C,0x1C,0x79, \ +0x1A,0x0B,0x20,0xFC,0xF7,0xE9,0xFF,0x07,0x1C,0x00,0x2C,0x04,0xD9,0x01,0x37, \ +0x02,0xE0,0xE6,0x70,0x00,0xE0,0xE6,0x70,0x38,0x04,0x00,0x0C,0xF0,0xBD,0xD4, \ +0x0A,0x00,0x02,0xF0,0xB5,0x4B,0x4F,0x60,0xC8,0x39,0x1C,0x60,0xC1,0x38,0x78, \ +0x0F,0x26,0x36,0x06,0x0E,0x28,0x48,0x4D,0x01,0xDC,0x00,0x28,0x05,0xD1,0x03, \ +0x21,0x0A,0x20,0xFE,0xF7,0x13,0xFE,0xB5,0x80,0xF0,0xBD,0x44,0x48,0x90,0x21, \ +0x41,0x70,0xB9,0x78,0x00,0x24,0x01,0x29,0x01,0xD1,0xC4,0x70,0x01,0xE0,0x40, \ +0x21,0xC1,0x70,0x41,0x7C,0x89,0x07,0x89,0x0F,0x41,0x74,0xFA,0x78,0x41,0x7C, \ +0x92,0x00,0x11,0x43,0x41,0x74,0x39,0x79,0x01,0x75,0x79,0x79,0x41,0x75,0x39, \ +0x49,0x09,0x78,0x01,0x29,0x01,0xD1,0xF9,0x79,0x41,0x77,0xFF,0x20,0xF5,0x30, \ +0x36,0x49,0x49,0x68,0xC9,0x0B,0x03,0xD3,0x01,0x1C,0x01,0x38,0x00,0x29,0xF7, \ +0xD1,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x18,0x43,0xC8,0x61,0xFF,0x20, \ +0x2D,0x30,0xF8,0xF7,0x0D,0xFC,0x07,0x21,0x49,0x06,0xC8,0x69,0x80,0x23,0x98, \ +0x43,0xC8,0x61,0x01,0xF0,0xF2,0xFD,0x38,0x78,0x00,0x21,0x01,0xF0,0x55,0xFB, \ +0x0B,0x21,0x08,0x20,0xF8,0xF7,0x9D,0xFC,0xB7,0x21,0x09,0x20,0xF8,0xF7,0x99, \ +0xFC,0x00,0x21,0x0A,0x20,0xF8,0xF7,0x95,0xFC,0x14,0x20,0xF8,0xF7,0xF2,0xFB, \ +0x1F,0x4F,0xF8,0x69,0xBB,0x01,0x18,0x43,0xF8,0x61,0x00,0x20,0xF8,0xF7,0x00, \ +0xFC,0x0A,0x20,0xF8,0xF7,0xE7,0xFB,0x01,0x20,0x80,0x06,0x44,0x61,0xC0,0x68, \ +0x19,0x48,0x78,0x61,0x78,0x68,0xC0,0x0B,0x05,0xD3,0x06,0x21,0x0A,0x20,0xFE, \ +0xF7,0xAC,0xFD,0xB5,0x80,0xF0,0xBD,0x01,0x22,0x55,0x21,0x7D,0x20,0xC0,0x00, \ +0xB5,0x80,0xFF,0xF7,0xCF,0xFE,0x11,0x48,0x01,0x21,0x89,0x06,0x88,0x63,0x10, \ +0x48,0x10,0x4A,0x48,0x63,0xB4,0x80,0x04,0x20,0xB8,0x60,0xD0,0x74,0x00,0x03, \ +0x78,0x60,0x48,0x6A,0x0A,0x30,0x08,0x62,0x0C,0x48,0x01,0x21,0xB0,0x80,0x0A, \ +0x20,0xFE,0xF7,0x8D,0xFD,0xF0,0xBD,0xD8,0x02,0x00,0x02,0x08,0x08,0x00,0x00, \ +0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x04,0x24,0x00, \ +0x00,0x00,0x72,0x01,0x02,0x64,0x10,0x00,0x00,0x50,0x09,0x00,0x02,0x88,0x88, \ +0x00,0x00,0x80,0xB5,0x15,0x49,0x01,0x27,0xC9,0x7C,0x01,0x29,0x13,0xD1,0x13, \ +0x4B,0x18,0x40,0x0E,0xD0,0x88,0x06,0xC0,0x68,0x81,0x09,0x0A,0xD3,0x04,0x21, \ +0x01,0x40,0x10,0x48,0x03,0xD0,0x41,0x68,0x01,0x31,0x41,0x60,0x02,0xE0,0x01, \ +0x68,0x01,0x31,0x01,0x60,0x38,0x1C,0x80,0xBD,0x02,0x29,0x01,0xD1,0x38,0x1C, \ +0x80,0xBD,0x03,0x29,0x01,0xD0,0x04,0x29,0x06,0xD1,0x07,0x4B,0x18,0x40,0x01, \ +0xD0,0xFF,0xF7,0xAF,0xFE,0x38,0x1C,0x80,0xBD,0x00,0x20,0x80,0xBD,0x00,0x00, \ +0x50,0x09,0x00,0x02,0x40,0x40,0x00,0x00,0xD0,0x02,0x00,0x02,0x80,0x80,0x00, \ +0x00,0xFF,0xB5,0x84,0xB0,0x00,0x20,0x00,0x24,0x00,0x26,0x00,0x27,0x00,0x25, \ +0x03,0x90,0x02,0x90,0x01,0x90,0x68,0x46,0x04,0x22,0x5A,0x49,0xFC,0xF7,0xA9, \ +0xFE,0x05,0x99,0x00,0x20,0x00,0x29,0x1B,0xDD,0x04,0x99,0x80,0x23,0x09,0x5C, \ +0x0A,0x1C,0x9A,0x43,0x16,0x2A,0x02,0xD1,0x00,0xAB,0xD9,0x70,0x0D,0xE0,0x0B, \ +0x2A,0x02,0xD1,0x00,0xAB,0x99,0x70,0x08,0xE0,0x04,0x2A,0x02,0xD1,0x00,0xAB, \ +0x59,0x70,0x03,0xE0,0x02,0x2A,0x01,0xD1,0x00,0xAB,0x19,0x70,0x05,0x99,0x01, \ +0x30,0x88,0x42,0xE3,0xDB,0x00,0x20,0x69,0x46,0x09,0x5C,0x00,0x29,0x0D,0xD0, \ +0x09,0x0A,0x04,0xD3,0x00,0x2E,0x00,0xD1,0x07,0x1C,0x01,0x26,0x04,0x1C,0x01, \ +0x99,0x02,0x90,0x00,0x29,0x02,0xD1,0x01,0x21,0x01,0x91,0x05,0x1C,0x01,0x30, \ +0x04,0x28,0xEA,0xDB,0x01,0x99,0x00,0x20,0x00,0x29,0x01,0xD1,0x08,0xB0,0xF0, \ +0xBD,0x00,0x2E,0x01,0xD1,0x2C,0x1C,0x2F,0x1C,0x3A,0x49,0x00,0x22,0x8B,0x18, \ +0x1B,0x7C,0x00,0x2B,0x00,0xD0,0x03,0x92,0x01,0x32,0x04,0x2A,0xF7,0xDB,0x06, \ +0x9B,0x01,0x26,0x0E,0x2B,0x34,0x4A,0x03,0xD1,0x34,0x4B,0x1B,0x78,0x01,0x2B, \ +0x0A,0xD1,0x03,0x98,0x84,0x42,0x02,0xDD,0x03,0x98,0x90,0x72,0x00,0xE0,0x94, \ +0x72,0x02,0x98,0xD0,0x72,0xD7,0x71,0x42,0xE0,0x2D,0x4B,0x1B,0x78,0x00,0x2B, \ +0x3E,0xD1,0x01,0x2D,0x10,0xD9,0xD0,0x71,0x96,0x72,0xD6,0x72,0x07,0x9B,0x00, \ +0x27,0x01,0x2B,0x35,0xD1,0x82,0x20,0x00,0xAB,0x18,0x70,0x84,0x20,0x58,0x70, \ +0x0B,0x20,0x98,0x70,0x16,0x20,0xD8,0x70,0x2B,0xE0,0x01,0x2C,0x0D,0xDD,0x00, \ +0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0x96,0x72,0x08,0xE0,0x00,0xAC,0x24,0x78, \ +0x23,0x0A,0x01,0xD3,0x90,0x72,0x02,0xE0,0x95,0x72,0x00,0xE0,0x94,0x72,0x01, \ +0x2F,0x0D,0xD9,0x00,0xAC,0x24,0x78,0x23,0x0A,0x01,0xD3,0xD0,0x71,0x08,0xE0, \ +0x00,0xAC,0x64,0x78,0x23,0x0A,0x01,0xD3,0xD6,0x71,0x02,0xE0,0xD5,0x71,0x00, \ +0xE0,0xD7,0x71,0x02,0x9B,0x00,0x2B,0x05,0xDD,0x00,0xAB,0x5B,0x78,0x00,0x2B, \ +0x01,0xD0,0xD6,0x72,0x00,0xE0,0xD0,0x72,0x00,0x20,0x6B,0x46,0x1B,0x5C,0x0C, \ +0x18,0x01,0x30,0x04,0x28,0x23,0x74,0xF8,0xDB,0xC8,0x19,0x01,0x7C,0x80,0x23, \ +0x19,0x43,0x01,0x74,0xD0,0x7A,0x05,0x49,0xC8,0x70,0x30,0x1C,0x86,0xE7,0x08, \ +0x9C,0x00,0x00,0x00,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xB9,0x02,0x00,0x02, \ +0xD0,0x01,0x00,0x02,0x90,0xB4,0x47,0x78,0x00,0x22,0x00,0x23,0x00,0x2F,0x14, \ +0xDD,0xC7,0x18,0xBC,0x78,0x67,0x06,0x7F,0x0E,0x02,0x2F,0x05,0xD0,0x04,0x2F, \ +0x03,0xD0,0x0B,0x2F,0x01,0xD0,0x16,0x2F,0x04,0xD1,0x04,0x2A,0x02,0xDA,0x17, \ +0x1C,0xCC,0x55,0x01,0x32,0x47,0x78,0x01,0x33,0x9F,0x42,0xEA,0xDC,0x90,0xBC, \ +0x10,0x1C,0xF7,0x46,0xF1,0xB5,0x85,0xB0,0x00,0x20,0x01,0x90,0x68,0x46,0x04, \ +0x22,0x75,0x49,0xFC,0xF7,0xC5,0xFD,0x75,0x4E,0x04,0x24,0x30,0x68,0x45,0x68, \ +0x80,0x89,0x2F,0x28,0x02,0xDA,0x00,0x20,0x06,0xB0,0xF0,0xBD,0x05,0x98,0x70, \ +0x49,0x01,0x28,0x04,0x91,0x09,0xD1,0x06,0x22,0xE8,0x1D,0x09,0x30,0x04,0x99, \ +0xFC,0xF7,0x92,0xFD,0x00,0x28,0x01,0xD0,0x00,0x20,0xEE,0xE7,0x20,0x20,0xE9, \ +0x1D,0x19,0x31,0x28,0x5C,0x49,0x78,0x09,0x02,0x08,0x43,0x01,0x04,0x09,0x0C, \ +0x02,0x91,0x14,0x29,0x04,0xDB,0x7D,0x23,0x02,0x99,0xDB,0x00,0x99,0x42,0x01, \ +0xDD,0x00,0x20,0xDB,0xE7,0x22,0x20,0x28,0x5C,0x80,0x08,0x01,0xD2,0x00,0x20, \ +0xD5,0xE7,0x30,0x68,0x24,0x27,0x80,0x89,0x04,0x38,0x24,0x28,0x45,0xDD,0x5B, \ +0x49,0x03,0x91,0xE8,0x5D,0x00,0x28,0x09,0xD0,0x01,0x28,0x20,0xD0,0x03,0x28, \ +0x39,0xD1,0xE8,0x19,0x41,0x78,0x01,0x29,0x27,0xD0,0x00,0x20,0xC0,0xE7,0xEE, \ +0x19,0x70,0x78,0x00,0x28,0x00,0xD1,0xBB,0xE7,0x52,0x49,0x4A,0x79,0x82,0x42, \ +0x01,0xD0,0x00,0x20,0xB5,0xE7,0x03,0x99,0xB0,0x1C,0xFC,0xF7,0x51,0xFD,0x00, \ +0x28,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x01,0x20, \ +0x01,0x90,0x14,0xE0,0xE8,0x19,0x69,0x46,0x06,0x1C,0xFF,0xF7,0x74,0xFF,0x04, \ +0x1C,0x01,0xD1,0x00,0x20,0x9E,0xE7,0x70,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0, \ +0x42,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0x93,0xE7,0x03, \ +0x37,0x3A,0x4E,0x30,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xBE,0xDC,0x01,0x98, \ +0x00,0x28,0x01,0xD1,0x00,0x20,0x87,0xE7,0x39,0x49,0x68,0x46,0x01,0x23,0x0A, \ +0x7D,0x21,0x1C,0xFF,0xF7,0x86,0xFE,0x00,0x28,0x00,0xD1,0x7D,0xE7,0x04,0x20, \ +0xF9,0xF7,0xF0,0xFF,0x33,0x48,0x20,0x23,0x01,0x78,0x19,0x43,0x01,0x70,0x01, \ +0x78,0x10,0x23,0x19,0x43,0x01,0x70,0xC0,0x20,0xFB,0xF7,0xEC,0xFF,0xE9,0x1D, \ +0x2E,0x4C,0x09,0x31,0x07,0x1C,0xE0,0x1D,0x0D,0x1C,0x06,0x22,0x07,0x30,0xFC, \ +0xF7,0x22,0xFD,0x06,0x22,0x29,0x1C,0x04,0x98,0xFC,0xF7,0x1D,0xFD,0x24,0x4D, \ +0xE0,0x1D,0x6A,0x79,0x03,0x99,0x0D,0x30,0xFC,0xF7,0x16,0xFD,0x24,0x49,0x01, \ +0x20,0xE6,0x1D,0x29,0x36,0x08,0x75,0x30,0x71,0x02,0x99,0x21,0x80,0xA8,0x70, \ +0x05,0x98,0x01,0x28,0x08,0xD1,0x00,0x21,0x00,0x20,0x01,0xF0,0xCE,0xFB,0x15, \ +0x49,0x00,0x20,0x09,0x68,0x48,0x61,0x07,0xE0,0xF9,0xF7,0x79,0xFF,0x21,0x88, \ +0x89,0x02,0x09,0x1A,0x06,0x20,0xF9,0xF7,0x97,0xFF,0x17,0x49,0x00,0x20,0x48, \ +0x70,0x05,0x20,0x88,0x71,0x05,0x98,0x01,0x28,0x04,0xD1,0x01,0x21,0x04,0x20, \ +0xFE,0xF7,0x79,0xFB,0x01,0xE0,0x01,0x20,0xB0,0x71,0x10,0x48,0x01,0x68,0x10, \ +0x48,0xC2,0x69,0x11,0x43,0xC1,0x61,0x0F,0x48,0x01,0x24,0x04,0x70,0x38,0x1C, \ +0xFB,0xF7,0x9E,0xFF,0x20,0x1C,0x1E,0xE7,0x0C,0x9C,0x00,0x00,0x50,0x01,0x00, \ +0x02,0x00,0x01,0x00,0x02,0xE0,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00, \ +0x00,0x02,0x9C,0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0xA0, \ +0x09,0x00,0x02,0xAC,0x02,0x00,0x02,0x40,0x00,0x00,0x04,0x3E,0x01,0x00,0x02, \ +0xF0,0xB5,0x84,0xB0,0x5D,0x49,0x04,0x22,0x01,0xA8,0xFC,0xF7,0xBB,0xFC,0x5C, \ +0x4F,0x5C,0x49,0x38,0x68,0x00,0x25,0x46,0x68,0x06,0x22,0xF0,0x1D,0x09,0x30, \ +0x03,0x91,0xFC,0xF7,0x92,0xFC,0x00,0x28,0x02,0xD0,0x00,0x20,0x04,0xB0,0xF0, \ +0xBD,0x39,0x68,0x38,0x1C,0x89,0x89,0x2F,0x29,0x01,0xDA,0x00,0x20,0xF6,0xE7, \ +0x20,0x22,0xF3,0x1D,0x19,0x33,0xB2,0x5C,0x5B,0x78,0x1B,0x02,0x1A,0x43,0x12, \ +0x04,0x12,0x0C,0x00,0x92,0x14,0x2A,0x04,0xDB,0x7D,0x23,0x00,0x9A,0xDB,0x00, \ +0x9A,0x42,0x01,0xDD,0x00,0x20,0xE3,0xE7,0x22,0x22,0xB2,0x5C,0x52,0x08,0x01, \ +0xD2,0x00,0x20,0xDD,0xE7,0x24,0x27,0x04,0x39,0x24,0x29,0x34,0xDD,0xF0,0x5D, \ +0x00,0x28,0x09,0xD0,0x01,0x28,0x11,0xD0,0x03,0x28,0x2B,0xD1,0xF0,0x19,0x41, \ +0x78,0x01,0x29,0x19,0xD0,0x00,0x20,0xCC,0xE7,0xF0,0x19,0x40,0x78,0x20,0x28, \ +0x01,0xD9,0x00,0x25,0x00,0xE0,0x01,0x25,0xC0,0x19,0x87,0x1C,0x15,0xE0,0xF0, \ +0x19,0x02,0x90,0x01,0xA9,0xFF,0xF7,0x7F,0xFE,0x04,0x1C,0x01,0xD1,0x00,0x20, \ +0xB9,0xE7,0x02,0x98,0x40,0x78,0xC0,0x19,0x87,0x1C,0x07,0xE0,0x31,0x49,0x80, \ +0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x00,0x20,0xAD,0xE7,0x03,0x37,0x2B,0x48, \ +0x00,0x68,0x80,0x89,0x04,0x38,0xB8,0x42,0xCC,0xDC,0x00,0x2D,0x01,0xD1,0x00, \ +0x20,0xA2,0xE7,0x28,0x49,0x01,0x23,0x0A,0x7D,0x21,0x1C,0x01,0xA8,0xFF,0xF7, \ +0x91,0xFD,0x00,0x28,0x00,0xD1,0x98,0xE7,0x25,0x4C,0x06,0x22,0xE0,0x1D,0x07, \ +0x30,0x22,0x4F,0x03,0x99,0xFC,0xF7,0x3E,0xFC,0xE0,0x1D,0x0D,0x30,0x20,0x22, \ +0xF9,0x1D,0x15,0x31,0xFC,0xF7,0x37,0xFC,0xF8,0x1D,0x39,0x30,0x81,0x78,0xE0, \ +0x1D,0x29,0x30,0x01,0x71,0x01,0x79,0x1B,0x48,0x20,0x23,0x01,0x75,0x00,0x9A, \ +0x1A,0x49,0x22,0x80,0x0A,0x78,0x1A,0x43,0x0A,0x70,0x0A,0x78,0x10,0x23,0x1A, \ +0x43,0x0A,0x70,0x00,0x21,0x16,0x4A,0x50,0x30,0x41,0x70,0x91,0x70,0x05,0x21, \ +0x81,0x71,0x04,0x20,0xF9,0xF7,0xD1,0xFE,0x01,0x21,0x04,0x20,0xFE,0xF7,0xA1, \ +0xFA,0xC0,0x20,0xFB,0xF7,0xD2,0xFE,0x0F,0x49,0x0A,0x68,0x0F,0x49,0xCB,0x69, \ +0x1A,0x43,0xCA,0x61,0x0E,0x49,0x01,0x27,0x0F,0x70,0xFB,0xF7,0xC7,0xFE,0x38, \ +0x1C,0x57,0xE7,0x00,0x00,0x10,0x9C,0x00,0x00,0x50,0x01,0x00,0x02,0x00,0x01, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x84,0x00,0x00,0x02,0x50, \ +0x09,0x00,0x02,0x9C,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0xAC,0x02,0x00,0x02, \ +0x40,0x00,0x00,0x04,0x3E,0x01,0x00,0x02,0xF0,0xB4,0x1D,0x4A,0x1D,0x4B,0xD1, \ +0x1D,0x69,0x31,0xC9,0x7A,0x49,0x00,0x5F,0x5A,0xD1,0x1D,0x59,0x31,0x0B,0x8B, \ +0x01,0x3B,0x1B,0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0x26,0xDD,0x17,0x4B,0x01, \ +0x25,0x5C,0x7A,0x50,0x32,0xD3,0x79,0x00,0x2B,0x04,0xD1,0x05,0x30,0x0E,0x28, \ +0x05,0xD9,0x0E,0x38,0x03,0xE0,0x01,0x30,0x0E,0x28,0x00,0xD9,0x01,0x20,0x00, \ +0x2C,0x05,0xD1,0x2B,0x1C,0x46,0x1E,0xB3,0x40,0x3B,0x40,0x10,0xD1,0x07,0xE0, \ +0xD3,0x79,0x00,0x2B,0x0C,0xD1,0x0A,0x4B,0x1B,0x18,0x5B,0x7B,0x00,0x2B,0x07, \ +0xD1,0x0B,0x8B,0x01,0x3B,0x1B,0x04,0x1B,0x14,0x0B,0x83,0x00,0x2B,0xDC,0xDC, \ +0x00,0x20,0xF0,0xBC,0xF7,0x46,0x00,0x00,0x50,0x09,0x00,0x02,0x6C,0x02,0x00, \ +0x02,0xB4,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xF0,0xB5,0x29,0x4C,0x07,0x1C, \ +0x00,0x26,0x27,0x70,0xE0,0x1D,0x03,0x30,0x66,0x70,0x66,0x80,0x06,0x22,0x25, \ +0x49,0xFC,0xF7,0x9B,0xFB,0x25,0x4D,0xE0,0x1D,0x09,0x30,0x06,0x22,0xE9,0x1D, \ +0x35,0x31,0xFC,0xF7,0x93,0xFB,0xFF,0x20,0x20,0x71,0x60,0x71,0xA0,0x71,0xE0, \ +0x71,0x20,0x72,0x60,0x72,0x38,0x1C,0x40,0x28,0x1D,0x4F,0x1D,0xD0,0x00,0xF0, \ +0xE8,0xF8,0x00,0xF0,0xF0,0xF8,0xE5,0x1D,0x1D,0x35,0x28,0x1C,0x00,0xF0,0x0B, \ +0xF9,0x2D,0x18,0x28,0x1C,0x00,0xF0,0x23,0xF9,0x2D,0x18,0x16,0x48,0x80,0x7D, \ +0x02,0x28,0x03,0xD1,0x28,0x1C,0x00,0xF0,0x33,0xF9,0x2D,0x18,0x28,0x1C,0x00, \ +0xF0,0x3D,0xF9,0x28,0x18,0x00,0x1B,0xF8,0x64,0xB8,0x64,0xF0,0xBD,0x26,0x76, \ +0x0F,0x4E,0xE0,0x1D,0x72,0x79,0x13,0x30,0xE9,0x1D,0x15,0x31,0x62,0x76,0xFC, \ +0xF7,0x60,0xFB,0x70,0x79,0x00,0x19,0x1A,0x30,0x00,0xF0,0x03,0xF9,0x70,0x79, \ +0x20,0x30,0x00,0x06,0x00,0x0E,0xB8,0x64,0xF0,0xBD,0x00,0x00,0x10,0x08,0x00, \ +0x02,0x60,0x00,0x00,0x02,0xC4,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x00,0x00, \ +0x00,0x02,0x08,0x01,0x00,0x02,0xF8,0xB5,0x07,0x1C,0xFF,0xF7,0x9C,0xFF,0x00, \ +0x26,0x80,0x2F,0x47,0x4D,0x0E,0xD1,0xC0,0x20,0xFB,0xF7,0xFD,0xFD,0x04,0x1C, \ +0x45,0x48,0x41,0x7B,0x03,0x29,0x03,0xD0,0x20,0x1C,0xFB,0xF7,0xF5,0xFD,0xF8, \ +0xBD,0x01,0x21,0x41,0x73,0x10,0xE0,0x40,0x2F,0x05,0xD1,0x40,0x48,0x01,0x21, \ +0x81,0x74,0x3F,0x48,0x46,0x80,0x08,0xE0,0x50,0x2F,0x06,0xD1,0x3E,0x48,0x3E, \ +0x49,0x06,0x22,0xFC,0xF7,0x22,0xFB,0x01,0x21,0x29,0x71,0x3C,0x48,0xF8,0xF7, \ +0x3F,0xF9,0x50,0x2F,0x02,0xD1,0x36,0x48,0xC0,0x6C,0x01,0xE0,0x34,0x48,0x80, \ +0x6C,0x33,0x49,0x88,0x66,0x37,0x48,0x89,0x6E,0xC0,0x79,0xF9,0xF7,0xBF,0xFC, \ +0x30,0x49,0x50,0x2F,0xC8,0x66,0x0C,0xD1,0x2E,0x48,0x2E,0x49,0xC0,0x6E,0x48, \ +0x80,0x31,0x48,0xC0,0x79,0xFA,0xF7,0x49,0xF9,0x2B,0x49,0x49,0x88,0x40,0x18, \ +0x29,0x49,0x48,0x80,0x28,0x48,0x27,0x49,0x80,0x2F,0x48,0x66,0x16,0xD1,0xFC, \ +0xF7,0x77,0xFB,0x2A,0x49,0x89,0x89,0x49,0x00,0x01,0x31,0x08,0x40,0x21,0x49, \ +0x88,0x62,0x27,0x48,0x00,0x88,0x08,0x62,0x89,0x6A,0x8B,0x00,0x59,0x18,0x89, \ +0x00,0x09,0x18,0x08,0x20,0xF9,0xF7,0x83,0xFD,0x20,0x1C,0xFB,0xF7,0xA2,0xFD, \ +0xF9,0xF7,0xB6,0xF9,0xF9,0xF7,0x20,0xFA,0x00,0x90,0x80,0x2F,0x05,0xD1,0x00, \ +0x98,0x00,0x28,0x23,0xD1,0x01,0x21,0x69,0x70,0x20,0xE0,0x40,0x2F,0x1E,0xD1, \ +0x12,0x4C,0xC0,0x20,0xA6,0x74,0xFB,0xF7,0x8E,0xFD,0x07,0x1C,0xA8,0x79,0x01, \ +0x28,0x12,0xD1,0x00,0x98,0x00,0x28,0x0D,0xD1,0xE0,0x1D,0x69,0x30,0x81,0x7A, \ +0x00,0x29,0x0A,0xD1,0x01,0x21,0x81,0x72,0x0E,0x49,0xC8,0x8A,0x81,0x02,0x04, \ +0x20,0xF9,0xF7,0x58,0xFD,0x01,0xE0,0x01,0x21,0x69,0x71,0x38,0x1C,0xFB,0xF7, \ +0x74,0xFD,0x7D,0xE7,0x00,0x00,0xA0,0x09,0x00,0x02,0x60,0x09,0x00,0x02,0x50, \ +0x09,0x00,0x02,0x10,0x08,0x00,0x02,0x14,0x08,0x00,0x02,0x34,0x01,0x00,0x02, \ +0x26,0x08,0x00,0x02,0x08,0x01,0x00,0x02,0xC4,0x00,0x00,0x02,0xB0,0x01,0x00, \ +0x02,0x03,0x49,0x02,0x48,0x09,0x88,0x01,0x80,0xF7,0x46,0x00,0x00,0x30,0x08, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x0D,0x49,0x0C,0x48,0x8A,0x7A,0x92,0x00,0x02, \ +0x80,0xC9,0x7A,0x00,0x29,0x03,0xD0,0x01,0x88,0x10,0x23,0x19,0x43,0x01,0x80, \ +0x08,0x49,0x49,0x7A,0x01,0x29,0x04,0xD1,0x01,0x88,0x22,0x23,0x19,0x43,0x01, \ +0x80,0xF7,0x46,0x01,0x88,0x02,0x23,0x19,0x43,0x01,0x80,0xF7,0x46,0x32,0x08, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x90,0xB4,0x01,0x1C,0x00, \ +0x20,0x0A,0x4A,0x08,0x70,0x53,0x79,0x00,0x2B,0x08,0xD9,0x08,0x4B,0x1F,0x18, \ +0x3F,0x7D,0x0C,0x18,0x01,0x30,0xA7,0x70,0x57,0x79,0x87,0x42,0xF7,0xD8,0x50, \ +0x79,0x48,0x70,0x50,0x79,0x90,0xBC,0x02,0x30,0xF7,0x46,0x00,0x00,0x08,0x01, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x90,0xB4,0x01,0x1C,0x01,0x20,0x08,0x70,0x00, \ +0x20,0x08,0x4B,0x00,0x22,0x9F,0x18,0x3F,0x7C,0x00,0x2F,0x02,0xD0,0x0C,0x18, \ +0xA7,0x70,0x01,0x30,0x01,0x32,0x04,0x2A,0xF5,0xD3,0x48,0x70,0x90,0xBC,0x02, \ +0x30,0xF7,0x46,0x00,0x00,0x00,0x00,0x00,0x02,0x03,0x21,0x01,0x70,0x01,0x22, \ +0x42,0x70,0x01,0x30,0x80,0x18,0x02,0x4A,0x12,0x7D,0x02,0x70,0x08,0x1C,0xF7, \ +0x46,0x00,0x00,0x00,0x00,0x00,0x02,0x06,0x21,0x01,0x70,0x02,0x21,0x41,0x70, \ +0x04,0x49,0x02,0x30,0x0A,0x89,0x02,0x70,0x09,0x89,0x09,0x0A,0x41,0x70,0x04, \ +0x20,0xF7,0x46,0x00,0x00,0x84,0x00,0x00,0x02,0x0A,0x21,0x01,0x70,0x02,0x21, \ +0x41,0x70,0x00,0x21,0x81,0x70,0x02,0x30,0x41,0x1C,0x07,0x20,0x08,0x70,0x04, \ +0x20,0xF7,0x46,0xF0,0xB5,0x83,0xB0,0x51,0x48,0x52,0x4D,0x48,0x21,0x01,0x70, \ +0x01,0x26,0xEC,0x1D,0x29,0x34,0x46,0x70,0x62,0x79,0x11,0x21,0x4E,0x4F,0x02, \ +0x2A,0x01,0xD1,0x41,0x70,0x05,0xE0,0x03,0x2A,0x03,0xD1,0xBA,0x78,0x08,0x2A, \ +0x00,0xD1,0x41,0x70,0x4A,0x49,0x09,0x68,0x89,0x78,0x00,0x29,0x03,0xD0,0x41, \ +0x78,0x08,0x23,0x19,0x43,0x41,0x70,0x46,0x49,0x00,0x23,0x00,0x22,0x46,0x48, \ +0xC9,0x79,0xF8,0xF7,0x1F,0xF8,0x45,0x48,0x45,0x49,0x06,0x22,0xFC,0xF7,0xEA, \ +0xF9,0xE9,0x1D,0x07,0x31,0x0D,0x1C,0x06,0x22,0x42,0x48,0xFC,0xF7,0xE3,0xF9, \ +0x29,0x1C,0x06,0x22,0x41,0x48,0xFC,0xF7,0xDE,0xF9,0x40,0x4D,0x18,0x20,0xA8, \ +0x66,0x39,0x48,0x18,0x21,0xC0,0x79,0xF9,0xF7,0x86,0xFB,0xE8,0x66,0x32,0x48, \ +0xEE,0x1D,0x68,0x66,0x01,0x20,0x49,0x36,0xF0,0x70,0xF9,0xF7,0xA3,0xF8,0xF9, \ +0xF7,0x0D,0xF9,0x02,0x90,0x00,0x20,0xF0,0x70,0x02,0x98,0x00,0x28,0x01,0xD0, \ +0x03,0xB0,0xF0,0xBD,0x02,0x26,0x2C,0x48,0x6E,0x60,0xC0,0x79,0x32,0x49,0x40, \ +0x00,0x08,0x5A,0x31,0x49,0xC9,0x88,0x40,0x18,0x31,0x49,0x09,0x88,0x41,0x18, \ +0x01,0x20,0xF9,0xF7,0x51,0xFC,0x00,0x22,0xD2,0x43,0x6E,0x74,0x00,0x92,0x01, \ +0x22,0x10,0x21,0x01,0xAB,0x2B,0x48,0xFB,0xF7,0x73,0xFD,0x00,0x20,0x1E,0x49, \ +0x68,0x74,0x0A,0x68,0x53,0x78,0x00,0x2B,0x22,0xD0,0x93,0x78,0x01,0x33,0x1B, \ +0x06,0x1B,0x0E,0x93,0x70,0x04,0x2B,0x02,0xDA,0x09,0x68,0x48,0x70,0xD2,0xE7, \ +0x60,0x79,0x01,0x28,0x1F,0xDD,0x02,0x28,0x03,0xD1,0xBA,0x78,0x08,0x23,0x9A, \ +0x43,0xBA,0x70,0x03,0x28,0x17,0xD1,0x0E,0x48,0x40,0x78,0x40,0x09,0x06,0xD3, \ +0x01,0x20,0xF8,0x70,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x0C,0xE0,0x01, \ +0x20,0xB8,0x71,0x09,0xE0,0x60,0x79,0x03,0x28,0x06,0xD1,0x05,0x4A,0x01,0x20, \ +0x52,0x78,0x52,0x09,0x00,0xD3,0x00,0x20,0xF8,0x70,0x09,0x68,0x40,0x20,0x08, \ +0x70,0xAB,0xE7,0x00,0x00,0x10,0x08,0x00,0x02,0x84,0x00,0x00,0x02,0xC0,0x09, \ +0x00,0x02,0xD8,0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x12,0x08,0x00,0x02,0x1A, \ +0x08,0x00,0x02,0x60,0x00,0x00,0x02,0x20,0x08,0x00,0x02,0x14,0x08,0x00,0x02, \ +0x50,0x09,0x00,0x02,0xB8,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB6,0x01,0x00, \ +0x02,0x04,0x07,0x00,0x02,0xF8,0xB4,0x00,0x26,0x82,0x1C,0x06,0x29,0x01,0xD3, \ +0x48,0x08,0x02,0xD3,0x00,0x20,0xF8,0xBC,0xF7,0x46,0x00,0x24,0x03,0x23,0x00, \ +0x25,0xCF,0x1E,0x17,0xD0,0x01,0x39,0xD0,0x5C,0x99,0x42,0x02,0xD1,0x00,0x28, \ +0x0F,0xD1,0x0C,0xE0,0x0E,0x28,0x0C,0xD8,0x01,0x28,0x0A,0xD3,0xA8,0x42,0x08, \ +0xD3,0xD5,0x18,0x6D,0x78,0x03,0x33,0x03,0x34,0x2D,0x18,0xA7,0x42,0xEC,0xD8, \ +0x01,0x2E,0x01,0xD1,0x00,0x20,0xE0,0xE7,0x1B,0x48,0xC0,0x79,0x01,0x28,0x00, \ +0xD1,0xDB,0xE7,0x19,0x48,0xC1,0x1D,0x29,0x31,0x49,0x7A,0x00,0x29,0x01,0xD1, \ +0x01,0x20,0xD3,0xE7,0x91,0x78,0x3A,0x30,0x00,0x23,0x81,0x70,0x51,0x78,0x41, \ +0x70,0x11,0x78,0x01,0x70,0x03,0x21,0x00,0x2F,0x1B,0xD9,0x50,0x5C,0x00,0x28, \ +0x18,0xD0,0x0F,0x4D,0x01,0x26,0x2C,0x18,0x66,0x73,0x54,0x18,0x00,0x94,0x64, \ +0x78,0x24,0x18,0xA0,0x42,0x0A,0xD2,0x0A,0x4D,0x01,0x26,0x2D,0x18,0x6E,0x73, \ +0x00,0x9E,0x10,0x3D,0xB6,0x78,0x01,0x30,0xA0,0x42,0xEE,0x73,0xF4,0xD3,0x03, \ +0x31,0x03,0x33,0x9F,0x42,0xE3,0xD8,0x01,0x20,0xAA,0xE7,0x00,0x00,0xC0,0x09, \ +0x00,0x02,0x84,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xF1,0xB5,0x81,0xB0,0x25, \ +0x4F,0x01,0x9E,0x3F,0x68,0x00,0x24,0xBF,0x89,0x00,0x21,0x24,0x20,0x3D,0x1F, \ +0x00,0x95,0x24,0x2D,0x3F,0xD9,0x21,0x4F,0x7F,0x7A,0x35,0x5C,0x03,0x2D,0x08, \ +0xD0,0x07,0x2D,0x0D,0xD1,0x35,0x18,0x6D,0x78,0x01,0x24,0x03,0x1C,0x02,0x35, \ +0x28,0x18,0x0A,0xE0,0x35,0x18,0x6D,0x78,0x01,0x21,0x02,0x1C,0x02,0x35,0x28, \ +0x18,0x05,0xE0,0x35,0x18,0x6D,0x78,0x02,0x35,0x28,0x18,0x00,0x29,0x01,0xD0, \ +0x00,0x2F,0x02,0xD0,0x00,0x9D,0x85,0x42,0xE1,0xD8,0x00,0x29,0x1D,0xD0,0xB0, \ +0x18,0x40,0x78,0x01,0x28,0x01,0xD0,0x02,0xB0,0xF0,0xBD,0x01,0x2F,0x15,0xD1, \ +0x00,0x2C,0x13,0xD0,0x01,0x98,0xC0,0x18,0x41,0x78,0xFF,0xF7,0x5E,0xFF,0x00, \ +0x28,0x00,0xD1,0xF1,0xE7,0x08,0x48,0xC1,0x79,0x00,0x29,0x01,0xD1,0x01,0x21, \ +0xC1,0x71,0x06,0x48,0x00,0x68,0x00,0x28,0x01,0xD0,0x00,0xF0,0xFB,0xFB,0xE4, \ +0xE7,0x50,0x01,0x00,0x02,0xB4,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xC4,0x02, \ +0x00,0x02,0x00,0xB5,0x05,0x49,0x89,0x7C,0x01,0x29,0x04,0xD1,0x01,0x78,0x80, \ +0x29,0x01,0xD1,0xFF,0xF7,0xA0,0xFF,0x00,0xBD,0x00,0x00,0xC4,0x00,0x00,0x02, \ +0x90,0xB5,0x10,0x4C,0x60,0x78,0x00,0x28,0x1A,0xD0,0x0F,0x4F,0x38,0x68,0x40, \ +0x68,0x42,0x7E,0x18,0x30,0x00,0x2A,0x09,0xD0,0x0C,0x49,0x49,0x79,0x91,0x42, \ +0x0F,0xD1,0x0B,0x49,0x02,0x30,0xFC,0xF7,0x58,0xF8,0x00,0x28,0x09,0xD1,0x38, \ +0x68,0x40,0x68,0xC1,0x1D,0x03,0x31,0x06,0x22,0x07,0x48,0xFC,0xF7,0x6C,0xF8, \ +0x01,0x20,0xA0,0x70,0x90,0xBD,0x00,0x00,0xA0,0x09,0x00,0x02,0x50,0x01,0x00, \ +0x02,0x08,0x01,0x00,0x02,0x98,0x00,0x00,0x02,0x34,0x01,0x00,0x02,0xB0,0xB4, \ +0x03,0x78,0x00,0x27,0x20,0x49,0x20,0x4A,0x08,0x2B,0x37,0xD1,0xD3,0x78,0x00, \ +0x2B,0x04,0xD0,0xD0,0x7A,0x09,0x68,0x88,0x75,0xB0,0xBC,0xF7,0x46,0x00,0x79, \ +0x40,0x08,0x03,0xD3,0x90,0x7A,0x09,0x68,0x88,0x75,0xF6,0xE7,0x0B,0x68,0x99, \ +0x7D,0xD2,0x7A,0x91,0x42,0x01,0xDD,0x9A,0x75,0xEF,0xE7,0x15,0x4C,0x08,0x19, \ +0x00,0x7C,0x00,0x28,0xEA,0xD1,0x08,0x1C,0x01,0x29,0x0A,0xD3,0x01,0x38,0x25, \ +0x18,0x2D,0x7C,0x00,0x2D,0x03,0xD1,0x01,0x28,0xF8,0xD2,0x00,0x2F,0x01,0xD0, \ +0x98,0x75,0xDC,0xE7,0x8A,0x42,0x06,0xD9,0x01,0x31,0x60,0x18,0x00,0x7C,0x00, \ +0x28,0x03,0xD1,0x8A,0x42,0xF8,0xD8,0x00,0x2F,0x01,0xD0,0x99,0x75,0xCF,0xE7, \ +0x9A,0x75,0xCD,0xE7,0xD0,0x79,0x09,0x68,0x88,0x75,0xC9,0xE7,0x00,0x00,0xCC, \ +0x01,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0xB5,0x07,0x48, \ +0x81,0x79,0x03,0x29,0x02,0xD0,0x81,0x79,0x04,0x29,0x05,0xD1,0x00,0x21,0x81, \ +0x71,0x07,0x21,0x04,0x20,0xFD,0xF7,0x92,0xFE,0x00,0xBD,0x00,0x00,0xA0,0x09, \ +0x00,0x02,0xB0,0xB5,0x37,0x48,0x37,0x49,0x00,0x68,0x44,0x68,0x22,0x20,0x20, \ +0x5C,0x10,0x23,0x18,0x40,0xC1,0x27,0x00,0x28,0x04,0xD0,0x08,0x78,0x00,0x28, \ +0x06,0xD1,0x38,0x1C,0xB0,0xBD,0x08,0x78,0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0, \ +0xBD,0x24,0x20,0x20,0x5C,0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0xE0,0x1D, \ +0x1D,0x30,0x45,0x78,0x2A,0x49,0x00,0x2D,0x04,0xD0,0x4A,0x79,0xAA,0x42,0x01, \ +0xD0,0x38,0x1C,0xB0,0xBD,0x4A,0x79,0x26,0x49,0x02,0x30,0xFB,0xF7,0xB8,0xFF, \ +0x00,0x28,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x60,0x19,0x20,0x30,0xC0,0x79,0x40, \ +0x19,0x28,0x30,0x21,0x5C,0x03,0x29,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x20,0x18, \ +0x1E,0x49,0x80,0x78,0x09,0x7D,0x88,0x42,0x01,0xD0,0x38,0x1C,0xB0,0xBD,0x1B, \ +0x48,0x40,0x7A,0x00,0x28,0x06,0xD0,0x1A,0x48,0x08,0x18,0x40,0x7B,0x00,0x28, \ +0x0D,0xD1,0x38,0x1C,0xB0,0xBD,0x18,0x48,0x18,0x4A,0xC0,0x7A,0x40,0x00,0x10, \ +0x5A,0x01,0x22,0x01,0x39,0x8A,0x40,0x10,0x40,0x01,0xD1,0x38,0x1C,0xB0,0xBD, \ +0xC0,0x20,0xFB,0xF7,0x66,0xFA,0x04,0x1C,0x01,0x20,0xF8,0xF7,0x66,0xFB,0x00, \ +0x28,0x04,0xD1,0x20,0x1C,0xFB,0xF7,0x5D,0xFA,0x38,0x1C,0xB0,0xBD,0x20,0x1C, \ +0xFB,0xF7,0x58,0xFA,0x02,0x20,0xFF,0xF7,0xC7,0xF9,0x00,0x20,0xB0,0xBD,0x00, \ +0x00,0x50,0x01,0x00,0x02,0x1C,0x00,0x00,0x02,0x08,0x01,0x00,0x02,0xE0,0x00, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xB4,0x00,0x00,0x02,0x18,0x01,0x00,0x02,0xC0, \ +0x09,0x00,0x02,0x6C,0x02,0x00,0x02,0x80,0xB5,0xFD,0xF7,0x33,0xFD,0x1A,0x48, \ +0x00,0xF0,0x0E,0xFE,0x19,0x4B,0x1A,0x48,0x59,0x7A,0x01,0x29,0x04,0xD1,0x48, \ +0x21,0x41,0x81,0x18,0x21,0x01,0x81,0x03,0xE0,0x90,0x21,0x41,0x81,0x30,0x21, \ +0x01,0x81,0x41,0x89,0x02,0x89,0x14,0x4F,0x89,0x18,0x12,0x4A,0x11,0x80,0xC2, \ +0x88,0x80,0x88,0x11,0x18,0x09,0x18,0x39,0x80,0x51,0x18,0xFF,0x31,0x10,0x4A, \ +0x31,0x31,0x11,0x80,0x19,0x88,0x10,0x4F,0x48,0x43,0x0E,0x49,0x08,0x80,0xD8, \ +0x79,0x0E,0x49,0x38,0x70,0x38,0x78,0x08,0x70,0xF7,0xF7,0x30,0xFC,0xF9,0xF7, \ +0x34,0xF9,0x39,0x78,0x0B,0x48,0x40,0x5C,0x0B,0x49,0x08,0x70,0x80,0xBD,0x60, \ +0x00,0x00,0x02,0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB4,0x01,0x00,0x02, \ +0xB0,0x01,0x00,0x02,0xB2,0x01,0x00,0x02,0xB6,0x01,0x00,0x02,0x9A,0x01,0x00, \ +0x02,0x9B,0x01,0x00,0x02,0xC8,0x01,0x00,0x02,0x99,0x01,0x00,0x02,0x80,0xB4, \ +0x23,0x48,0x00,0x21,0x01,0x70,0x00,0x20,0x19,0x27,0x21,0x4A,0xFF,0x02,0x11, \ +0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x00,0x20,0x43,0x27,0x1E,0x4A,0x7F,0x02, \ +0x11,0x54,0x01,0x30,0xB8,0x42,0xFB,0xDB,0x1C,0x48,0x1A,0x4A,0x01,0x80,0x1C, \ +0x48,0x1C,0x4B,0x02,0x60,0x13,0x60,0x02,0x68,0xD7,0x1D,0x15,0x37,0x57,0x60, \ +0x3A,0x1C,0x07,0x68,0x08,0x3A,0xBA,0x60,0x02,0x68,0x11,0x73,0x02,0x68,0x91, \ +0x73,0x07,0x68,0x03,0x22,0xBA,0x75,0x02,0x68,0x91,0x82,0x00,0x68,0x13,0x4A, \ +0x10,0x60,0x13,0x48,0x0D,0x4A,0x01,0x80,0x12,0x48,0x02,0x60,0x13,0x60,0x02, \ +0x68,0xD3,0x1D,0x11,0x33,0x53,0x60,0x02,0x68,0x91,0x81,0x02,0x68,0x11,0x72, \ +0x00,0x68,0x0D,0x49,0x08,0x60,0x0D,0x49,0x08,0x60,0x0D,0x49,0x01,0x20,0x08, \ +0x70,0x80,0xBC,0xF7,0x46,0x00,0x00,0x9C,0x01,0x00,0x02,0x00,0x11,0x00,0x02, \ +0x00,0xDA,0x00,0x02,0xF8,0x01,0x00,0x02,0xCC,0x01,0x00,0x02,0x00,0x00,0x00, \ +0x80,0x68,0x02,0x00,0x02,0xFA,0x01,0x00,0x02,0x4C,0x01,0x00,0x02,0x64,0x02, \ +0x00,0x02,0x50,0x01,0x00,0x02,0xE7,0x01,0x00,0x02,0xF0,0xB5,0x82,0xB0,0x39, \ +0x4E,0xF7,0x1D,0x69,0x37,0xB8,0x78,0x04,0x23,0x18,0x40,0x40,0x24,0x00,0x25, \ +0x00,0x28,0x03,0xD1,0x7D,0x71,0x3C,0x71,0x02,0xB0,0xF0,0xBD,0x33,0x49,0xA4, \ +0x20,0x08,0x70,0x10,0x20,0x48,0x70,0x32,0x48,0x03,0x23,0xC0,0x88,0x9B,0x03, \ +0x18,0x43,0x48,0x80,0xC8,0x1D,0x03,0x30,0x06,0x22,0x2E,0x49,0xFB,0xF7,0xB1, \ +0xFE,0x2E,0x49,0x2E,0x48,0x06,0x22,0xFB,0xF7,0xAC,0xFE,0x10,0x20,0x2D,0x49, \ +0xB0,0x66,0xC8,0x79,0x10,0x21,0xF9,0xF7,0x55,0xF8,0xF0,0x66,0x24,0x48,0x70, \ +0x66,0x01,0x20,0x38,0x70,0xF8,0xF7,0x74,0xFD,0xF8,0xF7,0xDE,0xFD,0x3D,0x70, \ +0x82,0x25,0x00,0x28,0x2E,0xD1,0x23,0x49,0x24,0x48,0xC9,0x79,0x24,0x4A,0xC0, \ +0x88,0x49,0x00,0x51,0x5A,0x40,0x18,0x22,0x49,0x09,0x88,0x41,0x18,0x01,0x20, \ +0x38,0x71,0x04,0x20,0x70,0x60,0x01,0x20,0xF9,0xF7,0x24,0xF9,0x00,0x22,0xD2, \ +0x43,0x00,0x92,0x01,0x22,0x11,0x21,0x01,0xAB,0x1B,0x48,0xFB,0xF7,0x47,0xFA, \ +0x01,0x98,0x41,0x08,0x01,0xD3,0x3C,0x71,0x1A,0xE0,0x40,0x09,0x18,0xD3,0x78, \ +0x79,0x17,0x49,0x01,0x30,0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C,0x88,0x42, \ +0x01,0xDA,0x3D,0x71,0x0D,0xE0,0x3C,0x71,0x0B,0xE0,0x78,0x79,0x10,0x49,0x01, \ +0x30,0x00,0x06,0x00,0x0E,0x78,0x71,0x09,0x7C,0x88,0x42,0x01,0xDA,0x3D,0x71, \ +0x00,0xE0,0x3C,0x71,0x97,0xE7,0x50,0x09,0x00,0x02,0x10,0x08,0x00,0x02,0x84, \ +0x00,0x00,0x02,0x60,0x00,0x00,0x02,0x92,0x00,0x00,0x02,0x14,0x08,0x00,0x02, \ +0x08,0x01,0x00,0x02,0x00,0x00,0x00,0x02,0xB8,0x01,0x00,0x02,0xB6,0x01,0x00, \ +0x02,0x44,0x07,0x00,0x02,0xC4,0x00,0x00,0x02,0x80,0xB5,0xC0,0x20,0xFB,0xF7, \ +0x00,0xF9,0x07,0x1C,0x12,0x48,0x01,0x68,0x01,0x31,0x01,0x60,0x11,0x48,0xFB, \ +0xF7,0x7C,0xFE,0x00,0x29,0x17,0xD1,0x0F,0x48,0x10,0x4A,0x03,0x78,0x10,0x49, \ +0x00,0x2B,0x06,0xD1,0x09,0x68,0xD3,0x69,0x19,0x43,0xD1,0x61,0x01,0x21,0x01, \ +0x70,0x0A,0xE0,0x0C,0x4B,0x9B,0x79,0x05,0x2B,0x06,0xD0,0x09,0x68,0xD3,0x69, \ +0xC9,0x43,0x19,0x40,0xD1,0x61,0x00,0x21,0x01,0x70,0x38,0x1C,0xFB,0xF7,0xDB, \ +0xF8,0x80,0xBD,0xE0,0x02,0x00,0x02,0x20,0x4E,0x00,0x00,0x3E,0x01,0x00,0x02, \ +0x40,0x00,0x00,0x04,0xAC,0x02,0x00,0x02,0xA0,0x09,0x00,0x02,0x90,0xB5,0xC0, \ +0x20,0xFB,0xF7,0xCA,0xF8,0x07,0x1C,0x0F,0x48,0x81,0x7A,0x00,0x29,0x15,0xD1, \ +0x01,0x7B,0x01,0x29,0x12,0xD1,0xC1,0x7A,0x00,0x29,0x0F,0xD1,0x00,0x24,0x0A, \ +0x49,0x50,0x30,0x0C,0x70,0x44,0x70,0x00,0xF0,0xCE,0xFB,0x08,0x48,0x01,0x21, \ +0x84,0x61,0x07,0x20,0xFD,0xF7,0x7E,0xFC,0x06,0x49,0x01,0x20,0x08,0x70,0x38, \ +0x1C,0xFB,0xF7,0xAC,0xF8,0x90,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x40,0x01, \ +0x00,0x02,0x80,0x00,0x00,0x04,0xBB,0x02,0x00,0x02,0x90,0xB5,0x16,0x49,0x16, \ +0x4F,0xCC,0x1D,0x29,0x34,0x62,0x79,0x03,0x2A,0x0F,0xD1,0x01,0x23,0x1B,0x03, \ +0x98,0x42,0x0B,0xD1,0x08,0x88,0x80,0x02,0x05,0x23,0x1B,0x03,0xC1,0x18,0x02, \ +0x20,0xF9,0xF7,0x6C,0xF8,0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0xB8,0x78, \ +0x01,0x28,0x0C,0xD1,0x00,0xF0,0x55,0xFB,0x60,0x79,0x02,0x28,0x08,0xD1,0xB8, \ +0x78,0x08,0x23,0x18,0x43,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70, \ +0x90,0xBD,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70,0x90,0xBD,0x84,0x00,0x00, \ +0x02,0xC0,0x09,0x00,0x02,0x80,0xB5,0x18,0x48,0x81,0x7A,0x00,0x29,0x1C,0xD1, \ +0x01,0x7B,0x01,0x29,0x19,0xD1,0xC0,0x7A,0x00,0x28,0x16,0xD1,0x14,0x4F,0xF8, \ +0x1D,0x29,0x30,0x40,0x79,0x03,0x28,0x14,0xD1,0xF9,0xF7,0x17,0xF8,0x39,0x88, \ +0x11,0x4B,0x10,0x4F,0x89,0x02,0x08,0x1A,0x98,0x42,0x08,0xD9,0xC1,0x1A,0x06, \ +0x20,0xF9,0xF7,0x30,0xF8,0x00,0xF0,0x66,0xFB,0x01,0x20,0xB8,0x70,0x80,0xBD, \ +0x00,0x20,0xB8,0x70,0x80,0xBD,0x01,0x20,0x80,0x06,0x08,0x49,0x40,0x6A,0x06, \ +0x4B,0x49,0x68,0xC0,0x18,0x88,0x42,0xF2,0xD2,0x00,0xF0,0x55,0xFB,0x80,0xBD, \ +0x50,0x09,0x00,0x02,0x84,0x00,0x00,0x02,0xC0,0x09,0x00,0x02,0xB8,0x0B,0x00, \ +0x00,0x80,0x00,0x00,0x04,0xF0,0xB5,0xC0,0x20,0xFB,0xF7,0x30,0xF8,0x05,0x1C, \ +0x00,0x26,0x2F,0x48,0x07,0x24,0x64,0x06,0x06,0x70,0xE0,0x69,0x10,0x23,0x98, \ +0x43,0xE0,0x61,0x2C,0x48,0x2D,0x4A,0xC1,0x69,0x03,0x0C,0x19,0x43,0xC1,0x61, \ +0xC1,0x69,0x1B,0x23,0x99,0x43,0xC1,0x61,0xC1,0x69,0x83,0x01,0x19,0x43,0xC1, \ +0x61,0xA1,0x69,0x01,0x23,0x19,0x43,0xA1,0x61,0x0F,0x23,0x1B,0x06,0x19,0x89, \ +0xD1,0x69,0x01,0x05,0x00,0x68,0xCF,0x68,0x18,0x88,0xC9,0x6B,0x04,0x27,0x20, \ +0x48,0x22,0x49,0x06,0x70,0x20,0x48,0x96,0x61,0x07,0x70,0x01,0x20,0x08,0x70, \ +0x1F,0x48,0x06,0x70,0x1F,0x48,0x06,0x70,0x1F,0x48,0xC6,0x74,0x1F,0x48,0x06, \ +0x70,0xFA,0xF7,0xD0,0xFA,0x1A,0x48,0x00,0x78,0x00,0x28,0x03,0xD1,0xA0,0x69, \ +0xFD,0x23,0x18,0x40,0xA0,0x61,0xFA,0xF7,0x2A,0xFC,0xFA,0xF7,0x94,0xFC,0x0A, \ +0x20,0xF7,0xF7,0xE3,0xF9,0xFA,0xF7,0x87,0xFC,0x14,0x48,0x01,0x21,0xC2,0x1D, \ +0x49,0x32,0x07,0x75,0x91,0x71,0x56,0x70,0x12,0x4B,0x02,0x22,0x1A,0x70,0x12, \ +0x4B,0x70,0x30,0x19,0x70,0x11,0x4B,0x59,0x71,0x86,0x70,0x11,0x48,0x02,0x70, \ +0xF7,0xF7,0xD5,0xF9,0x28,0x1C,0xFA,0xF7,0xD0,0xFF,0xF0,0xBD,0x00,0x00,0xE6, \ +0x01,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x40,0x01,0x00,0x02, \ +0x53,0x02,0x00,0x02,0x5E,0x02,0x00,0x02,0x3A,0x01,0x00,0x02,0x3B,0x01,0x00, \ +0x02,0x50,0x09,0x00,0x02,0x51,0x02,0x00,0x02,0xBA,0x02,0x00,0x02,0xBB,0x02, \ +0x00,0x02,0xB4,0x00,0x00,0x02,0x3F,0x01,0x00,0x02,0x90,0xB5,0x22,0x49,0x00, \ +0x27,0xC8,0x1D,0x49,0x30,0x82,0x79,0x01,0x2A,0x00,0xD0,0x47,0x71,0xCA,0x1D, \ +0x69,0x32,0x93,0x79,0x1D,0x49,0x00,0x2B,0x03,0xD0,0x97,0x71,0x01,0x20,0x88, \ +0x73,0x90,0xBD,0x52,0x78,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0x76,0xFB,0x90,0xBD, \ +0x02,0x78,0x00,0x2A,0x03,0xD0,0x47,0x71,0xFD,0xF7,0x1F,0xFA,0x90,0xBD,0x42, \ +0x79,0x00,0x2A,0x02,0xD0,0xFD,0xF7,0x4F,0xFA,0x90,0xBD,0x82,0x78,0x00,0x2A, \ +0x02,0xD0,0xFD,0xF7,0x33,0xFA,0x90,0xBD,0xC9,0x7B,0x00,0x29,0x02,0xD0,0xFD, \ +0xF7,0x39,0xFA,0x90,0xBD,0x80,0x79,0x05,0x28,0x0D,0xD1,0x0A,0x4C,0x20,0x68, \ +0x01,0x7B,0xC9,0x09,0x02,0xD3,0xF9,0xF7,0xC4,0xFB,0x90,0xBD,0x01,0x7B,0x10, \ +0x29,0x02,0xD1,0xF9,0xF7,0xD6,0xFC,0x20,0x60,0x38,0x1C,0x90,0xBD,0x00,0x00, \ +0x50,0x09,0x00,0x02,0x60,0x09,0x00,0x02,0xCC,0x01,0x00,0x02,0xF0,0xB5,0xC0, \ +0x20,0xFA,0xF7,0x62,0xFF,0x15,0x4D,0x00,0x24,0x07,0x1C,0xEE,0x1D,0x2E,0x36, \ +0x14,0x48,0x01,0x19,0x89,0x7B,0x00,0x29,0x19,0xD0,0x00,0x5D,0x81,0x00,0x09, \ +0x18,0x49,0x00,0x28,0x19,0xC2,0x1D,0x49,0x32,0x52,0x78,0x91,0x42,0x04,0xDB, \ +0x30,0x30,0x40,0x79,0x0C,0x49,0x08,0x55,0x05,0xE0,0x50,0x1A,0x22,0x06,0x12, \ +0x0E,0x31,0x1C,0x00,0xF0,0x53,0xF8,0x08,0x48,0xFC,0x23,0x01,0x5D,0x19,0x40, \ +0x01,0x55,0x01,0x34,0x0E,0x2C,0xDD,0xDB,0x38,0x1C,0xFA,0xF7,0x38,0xFF,0xF0, \ +0xBD,0x00,0x00,0x64,0x0A,0x00,0x02,0x18,0x01,0x00,0x02,0xFC,0x0A,0x00,0x02, \ +0xF0,0xB5,0x1B,0x4E,0x00,0x27,0x1B,0x4D,0xF4,0x1D,0x3C,0x34,0xF1,0x19,0xC8, \ +0x1D,0x29,0x30,0x42,0x79,0xE0,0x5D,0x12,0x1A,0x93,0x00,0x9A,0x18,0x00,0xD5, \ +0x0F,0x32,0x12,0x11,0x50,0x31,0x2B,0x7E,0x49,0x78,0x89,0x1A,0x8B,0x42,0x02, \ +0xD3,0x12,0x49,0xC8,0x55,0x05,0xE0,0xC8,0x1A,0x3A,0x06,0x12,0x0E,0x21,0x1C, \ +0x00,0xF0,0x20,0xF8,0x0D,0x48,0xFC,0x23,0xC1,0x5D,0x19,0x40,0xC1,0x55,0x0C, \ +0x49,0x49,0x7C,0x49,0x08,0x03,0xD3,0xC1,0x5D,0x01,0x23,0x19,0x43,0xC1,0x55, \ +0x01,0x37,0x0E,0x2F,0xD5,0xDB,0x28,0x7D,0x05,0x49,0x40,0x18,0x10,0x38,0xC0, \ +0x7B,0x04,0x49,0x48,0x74,0xF0,0xBD,0x64,0x0A,0x00,0x02,0x00,0x00,0x00,0x02, \ +0xFC,0x0A,0x00,0x02,0xD8,0x07,0x00,0x02,0x90,0xB5,0x0C,0x1C,0x01,0x01,0x05, \ +0x20,0x17,0x1C,0xFB,0xF7,0x6D,0xFC,0xE3,0x5D,0x00,0x22,0x06,0x49,0x83,0x42, \ +0x02,0xD3,0x18,0x1A,0xC8,0x55,0x00,0xE0,0xCA,0x55,0xC8,0x5D,0xE3,0x5D,0x98, \ +0x42,0x00,0xDD,0xCA,0x55,0x90,0xBD,0xFC,0x0A,0x00,0x02,0x80,0xB5,0x11,0x48, \ +0x02,0x68,0x51,0x68,0xC8,0x1D,0x19,0x30,0x80,0x78,0x40,0x08,0x15,0xD3,0x92, \ +0x89,0x24,0x20,0x04,0x3A,0x24,0x2A,0x10,0xD9,0x0F,0x5C,0x06,0x2F,0x0D,0xD2, \ +0x02,0xA3,0xDB,0x5D,0x5B,0x00,0x9F,0x44,0x00,0x1C,0x03,0x03,0x09,0x03,0x03, \ +0x0A,0x0B,0x18,0x5B,0x78,0x02,0x33,0x18,0x18,0x82,0x42,0xEE,0xD8,0x80,0xBD, \ +0x08,0x18,0xF8,0xF7,0x8B,0xF8,0x80,0xBD,0x50,0x01,0x00,0x02,0xB0,0xB5,0x0C, \ +0x1C,0x07,0x1C,0x01,0x28,0x01,0xD3,0x0E,0x2F,0x01,0xD9,0x00,0x20,0xB0,0xBD, \ +0x1B,0x4D,0xE8,0x69,0x2B,0x0C,0x18,0x43,0xE8,0x61,0x19,0x48,0xE9,0x69,0x00, \ +0x68,0x08,0x43,0xE8,0x61,0x18,0x48,0xE9,0x69,0x00,0x68,0x08,0x43,0xE8,0x61, \ +0xE8,0x69,0x04,0x23,0x18,0x43,0xE8,0x61,0x14,0x48,0xF7,0xF7,0x8D,0xF8,0x00, \ +0xF0,0xBF,0xF8,0x01,0x2C,0x01,0xD1,0x00,0xF0,0xE9,0xF9,0x00,0xF0,0x81,0xF8, \ +0x10,0x48,0x00,0x78,0x01,0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1,0xE8,0x69,0x40, \ +0x23,0x98,0x43,0xE8,0x61,0x03,0xE0,0xE8,0x69,0x40,0x23,0x18,0x43,0xE8,0x61, \ +0x38,0x1C,0x00,0xF0,0x11,0xF8,0xE8,0x69,0x01,0x23,0x9B,0x02,0x98,0x43,0xE8, \ +0x61,0x01,0x20,0xB0,0xBD,0x40,0x00,0x00,0x04,0xBC,0x02,0x00,0x02,0xC0,0x02, \ +0x00,0x02,0xDC,0x05,0x00,0x00,0xB9,0x02,0x00,0x02,0x90,0xB5,0x07,0x1C,0x07, \ +0x20,0x40,0x06,0x81,0x69,0x04,0x23,0x19,0x43,0x81,0x61,0xFA,0xF7,0x9A,0xFA, \ +0x0A,0x20,0xF7,0xF7,0x55,0xF8,0x17,0x4C,0x02,0x20,0x61,0x68,0x00,0xF0,0x30, \ +0xF8,0x00,0x20,0x21,0x68,0x00,0xF0,0x2C,0xF8,0x13,0x48,0xBF,0x00,0x38,0x18, \ +0x40,0x38,0xC1,0x6B,0x01,0x20,0x00,0xF0,0x24,0xF8,0x05,0x20,0x21,0x69,0x00, \ +0xF0,0x20,0xF8,0x08,0x20,0xA1,0x68,0x00,0xF0,0x1C,0xF8,0x07,0x20,0xE1,0x68, \ +0x00,0xF0,0x18,0xF8,0x0A,0x48,0x38,0x18,0x40,0x38,0xC1,0x6B,0x04,0x20,0x00, \ +0xF0,0x11,0xF8,0xFF,0x20,0xF5,0x30,0xF7,0xF7,0x2D,0xF8,0xFA,0xF7,0xD1,0xFA, \ +0x0A,0x20,0xF7,0xF7,0x28,0xF8,0x90,0xBD,0x00,0x00,0xE4,0x02,0x00,0x02,0xF8, \ +0x02,0x00,0x02,0x30,0x03,0x00,0x02,0x90,0xB4,0x0B,0x4A,0x13,0x68,0xDF,0x43, \ +0x0A,0x4B,0xDC,0x69,0x27,0x40,0xDF,0x61,0x07,0x05,0x89,0x00,0x39,0x43,0x80, \ +0x08,0x08,0x43,0x18,0x62,0x18,0x1C,0x01,0x6A,0xC9,0x0D,0xFC,0xD3,0x11,0x68, \ +0xC2,0x69,0x11,0x43,0xC1,0x61,0x90,0xBC,0xF7,0x46,0xC0,0x02,0x00,0x02,0x40, \ +0x00,0x00,0x04,0x80,0xB5,0x19,0x4F,0x00,0x20,0x39,0x78,0xF7,0xF7,0x9E,0xF8, \ +0x79,0x78,0x01,0x20,0xF7,0xF7,0x9A,0xF8,0xB9,0x78,0x02,0x20,0xF7,0xF7,0x96, \ +0xF8,0xF9,0x78,0x03,0x20,0xF7,0xF7,0x92,0xF8,0x79,0x7C,0x11,0x20,0xF7,0xF7, \ +0x8E,0xF8,0x39,0x7D,0x14,0x20,0xF7,0xF7,0x8A,0xF8,0x79,0x7D,0x15,0x20,0xF7, \ +0xF7,0x86,0xF8,0x39,0x7F,0x1C,0x20,0xF7,0xF7,0x82,0xF8,0xB9,0x7C,0x12,0x20, \ +0xF7,0xF7,0x7E,0xF8,0xF9,0x7C,0x13,0x20,0xF7,0xF7,0x7A,0xF8,0x05,0x48,0x00, \ +0x78,0x01,0x28,0x03,0xD1,0x79,0x7F,0x1D,0x20,0xF7,0xF7,0x72,0xF8,0x80,0xBD, \ +0x00,0x00,0xD8,0x07,0x00,0x02,0xB8,0x02,0x00,0x02,0x80,0xB5,0x07,0x27,0x7F, \ +0x06,0xB8,0x69,0x40,0x08,0x40,0x00,0xB8,0x61,0xB8,0x69,0x01,0x23,0x18,0x43, \ +0xB8,0x61,0x05,0x20,0xF6,0xF7,0xBE,0xFF,0xB8,0x69,0x40,0x08,0x40,0x00,0xB8, \ +0x61,0x05,0x20,0xF6,0xF7,0xB7,0xFF,0x80,0xBD,0xF0,0xB5,0x39,0x4E,0x07,0x1C, \ +0xF0,0x7A,0x03,0x28,0xFC,0xD0,0xC0,0x20,0xFA,0xF7,0xB3,0xFD,0x36,0x4D,0x04, \ +0x1C,0xE8,0x69,0xAB,0x01,0x18,0x43,0xE8,0x61,0x98,0x03,0xC1,0x68,0xC0,0x6B, \ +0x28,0x68,0x0F,0x20,0x00,0x06,0x01,0x88,0x00,0x89,0x30,0x48,0xC0,0x69,0x30, \ +0x48,0xC1,0x19,0xC8,0x1F,0x09,0x38,0xC2,0x7B,0x2E,0x48,0xFF,0x2A,0x00,0xD0, \ +0x02,0x75,0x49,0x7B,0xFF,0x29,0x00,0xD0,0x41,0x75,0x2B,0x49,0xC9,0x19,0x10, \ +0x39,0xC9,0x7B,0xFF,0x29,0x02,0xD0,0x8A,0x07,0x00,0xD1,0x41,0x74,0x26,0x48, \ +0x01,0x7D,0x14,0x20,0xF7,0xF7,0x22,0xF8,0x23,0x48,0x41,0x7D,0x15,0x20,0xF7, \ +0xF7,0x1D,0xF8,0x23,0x48,0x00,0x78,0x01,0x28,0x0A,0xD1,0x0E,0x2F,0x04,0xD1, \ +0xE8,0x69,0x40,0x23,0x98,0x43,0xE8,0x61,0x03,0xE0,0xE8,0x69,0x40,0x23,0x18, \ +0x43,0xE8,0x61,0x1C,0x48,0x07,0x75,0x00,0x7D,0xFF,0xF7,0x07,0xFF,0x01,0x20, \ +0xFD,0xF7,0x54,0xF9,0xE8,0x69,0x19,0x4B,0x18,0x40,0xE8,0x61,0x06,0x20,0x70, \ +0x72,0xFA,0x21,0x07,0x20,0xF8,0xF7,0x40,0xFD,0x15,0x49,0x08,0x20,0xF8,0xF7, \ +0x3C,0xFD,0x20,0x1C,0xFA,0xF7,0x5B,0xFD,0x70,0x7C,0x01,0x28,0x05,0xD1,0x00, \ +0x22,0x10,0x21,0x10,0x48,0xFA,0xF7,0x63,0xFD,0xF0,0xBD,0x70,0x7C,0x02,0x28, \ +0xFB,0xD1,0x00,0x22,0x10,0x21,0x0D,0x48,0xFA,0xF7,0x5A,0xFD,0xF0,0xBD,0x00, \ +0x00,0x50,0x09,0x00,0x02,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x64,0x0A, \ +0x00,0x02,0xD8,0x07,0x00,0x02,0xFC,0x0A,0x00,0x02,0xB9,0x02,0x00,0x02,0x00, \ +0x00,0x00,0x02,0xFF,0xEF,0x00,0x00,0x88,0x13,0x00,0x00,0xE4,0x06,0x00,0x02, \ +0x04,0x07,0x00,0x02,0xB0,0xB5,0x07,0x21,0x49,0x06,0xCA,0x69,0x52,0x09,0x03, \ +0xD3,0xCA,0x69,0x10,0x23,0x9A,0x43,0xCA,0x61,0x18,0x4C,0x01,0x28,0x0C,0xD1, \ +0x18,0x4D,0x6F,0x68,0xF6,0xF7,0x2B,0xFF,0x39,0x1A,0x49,0x01,0x09,0x18,0x69, \ +0x60,0x61,0x6B,0x09,0x1A,0x49,0x01,0x08,0x18,0x60,0x63,0x12,0x48,0x00,0x21, \ +0x00,0x7D,0xFF,0xF7,0x5F,0xFE,0x11,0x4F,0x11,0x4B,0xF9,0x1D,0x69,0x31,0x08, \ +0x73,0x01,0x20,0x80,0x06,0xC0,0x68,0xE0,0x69,0x18,0x40,0xE0,0x61,0x01,0x20, \ +0xFD,0xF7,0xEB,0xF8,0x01,0x20,0x38,0x72,0x06,0x20,0x78,0x72,0x07,0x20,0xFF, \ +0x21,0x2D,0x31,0xF8,0xF7,0xD8,0xFC,0x4B,0x21,0xC9,0x00,0x08,0x20,0xF8,0xF7, \ +0xD3,0xFC,0xB0,0xBD,0x40,0x00,0x00,0x04,0x80,0x00,0x00,0x04,0x00,0x00,0x00, \ +0x02,0x50,0x09,0x00,0x02,0xFF,0xEF,0x00,0x00,0xF0,0xB5,0x1F,0x4F,0xF8,0x69, \ +0x3B,0x0C,0x18,0x43,0xF8,0x61,0xF8,0x69,0x1B,0x23,0x98,0x43,0xF8,0x61,0xF8, \ +0x69,0xBB,0x01,0x18,0x43,0x07,0x24,0x64,0x06,0xF8,0x61,0xA0,0x69,0x01,0x23, \ +0x18,0x43,0xA0,0x61,0x01,0x20,0xF8,0xF7,0xCB,0xFC,0x08,0x20,0xF8,0xF7,0xC8, \ +0xFC,0x07,0x20,0xF8,0xF7,0xC5,0xFC,0x01,0x20,0x80,0x06,0xC1,0x68,0xC0,0x6B, \ +0x10,0x4E,0x38,0x68,0x0F,0x20,0x00,0x06,0x00,0x88,0x01,0x25,0x75,0x72,0xF8, \ +0xF7,0x70,0xFC,0xF0,0x1D,0x69,0x30,0x85,0x70,0x0B,0x4D,0x6E,0x68,0xF6,0xF7, \ +0xC5,0xFE,0x31,0x1A,0x49,0x09,0x09,0x18,0x69,0x60,0x79,0x6B,0x09,0x1A,0x49, \ +0x09,0x08,0x18,0x78,0x63,0xE0,0x69,0x10,0x23,0x18,0x43,0xE0,0x61,0xF0,0xBD, \ +0x00,0x00,0x40,0x00,0x00,0x04,0x50,0x09,0x00,0x02,0x80,0x00,0x00,0x04,0xF0, \ +0xB5,0x33,0x4A,0x01,0x21,0xD4,0x1D,0x19,0x34,0xE5,0x78,0x00,0x20,0x31,0x4F, \ +0xFF,0x2D,0x13,0xD0,0x2B,0x09,0x11,0xD3,0x13,0x7F,0x3B,0x70,0x56,0x7F,0x7E, \ +0x70,0x96,0x7F,0xBE,0x70,0xD3,0x7F,0xFB,0x70,0x23,0x78,0x7B,0x74,0x63,0x78, \ +0x3B,0x75,0xA3,0x78,0x7B,0x75,0x3D,0x77,0xB9,0x74,0xF8,0x74,0x0E,0xE0,0x38, \ +0x70,0x60,0x23,0x7B,0x70,0x40,0x23,0xBB,0x70,0xFB,0x70,0x78,0x74,0xFF,0x23, \ +0x3B,0x75,0x57,0x23,0x7B,0x75,0x48,0x23,0x3B,0x77,0xB9,0x74,0xF8,0x74,0x1F, \ +0x4B,0x9D,0x78,0x1F,0x4B,0x04,0x2D,0x01,0xDA,0x58,0x73,0x05,0xE0,0x24,0x79, \ +0xFF,0x2C,0x01,0xD0,0x5C,0x73,0x00,0xE0,0x58,0x73,0xFB,0x78,0x1A,0x4C,0xC0, \ +0x2B,0x02,0xD1,0x21,0x76,0xF8,0x70,0x00,0xE0,0x20,0x76,0x17,0x4D,0x11,0x1C, \ +0x28,0x7D,0x80,0x18,0xC2,0x1F,0x09,0x3A,0xD2,0x7B,0xFF,0x2A,0x00,0xD0,0x3A, \ +0x75,0x42,0x7B,0xFF,0x2A,0x00,0xD0,0x7A,0x75,0x40,0x30,0x80,0x78,0xFF,0x28, \ +0x0C,0xD0,0x80,0x07,0x0A,0xD1,0x0E,0x4C,0x43,0x31,0x0E,0x22,0x20,0x1C,0xFB, \ +0xF7,0x88,0xF9,0x28,0x7D,0x00,0x19,0x10,0x38,0xC0,0x7B,0x78,0x74,0x78,0x78, \ +0x09,0x49,0x40,0x09,0x80,0x07,0x80,0x0F,0x08,0x70,0xF0,0xBD,0x64,0x0A,0x00, \ +0x02,0xD8,0x07,0x00,0x02,0x14,0x01,0x00,0x02,0xE8,0x07,0x00,0x02,0x50,0x09, \ +0x00,0x02,0x00,0x00,0x00,0x02,0xFC,0x0A,0x00,0x02,0x9A,0x01,0x00,0x02,0x02, \ +0x79,0x41,0x79,0x12,0x02,0x11,0x43,0xC2,0x78,0x12,0x04,0x11,0x43,0x82,0x78, \ +0x12,0x06,0x0A,0x43,0x01,0x21,0x89,0x06,0x8A,0x61,0x42,0x78,0x00,0x78,0x00, \ +0x02,0x10,0x43,0xC8,0x61,0xF7,0x46,0x00,0xB5,0x0C,0x49,0x0D,0x48,0x41,0x61, \ +0x23,0x21,0x81,0x61,0x00,0x22,0x01,0x05,0x0A,0x61,0xC2,0x01,0x42,0x60,0x05, \ +0x22,0xC2,0x60,0x08,0x4A,0x82,0x62,0xF2,0x22,0x82,0x60,0x32,0x22,0x4A,0x61, \ +0xCA,0x68,0xC9,0x6B,0x00,0x68,0x00,0x21,0x00,0x20,0x00,0xF0,0x07,0xF8,0x00, \ +0xBD,0x04,0x90,0x00,0x00,0x40,0x00,0x00,0x04,0x81,0xFF,0x00,0x00,0x02,0x1C, \ +0x01,0x20,0x80,0x06,0x82,0x62,0x41,0x62,0xF7,0x46,0x80,0xB5,0x1D,0x48,0x20, \ +0x23,0x81,0x69,0x1D,0x4F,0x99,0x43,0x81,0x61,0x1B,0x48,0x81,0x78,0x1C,0x48, \ +0x00,0x29,0x0F,0xD0,0x01,0x7D,0x04,0x29,0x0C,0xD0,0x01,0x21,0xC1,0x77,0x03, \ +0x21,0x41,0x77,0xF8,0xF7,0x97,0xFB,0x39,0x88,0x89,0x02,0x09,0x1A,0x06,0x20, \ +0xF8,0xF7,0xB5,0xFB,0x80,0xBD,0xF9,0x1D,0x29,0x31,0x0A,0x79,0x02,0x2A,0xF9, \ +0xD1,0xC2,0x1D,0x49,0x32,0x92,0x79,0x05,0x2A,0xF4,0xD1,0x49,0x79,0x01,0x29, \ +0xF1,0xDD,0xC7,0x1D,0x69,0x37,0xB8,0x78,0x01,0x28,0x04,0xD1,0x00,0x20,0xFF, \ +0xF7,0x91,0xFE,0x00,0x20,0xB8,0x70,0xB8,0x78,0x40,0x08,0x40,0x00,0xB8,0x70, \ +0xB8,0x78,0x08,0x23,0x98,0x43,0xB8,0x70,0x80,0xBD,0x80,0x00,0x00,0x04,0x08, \ +0x01,0x00,0x02,0x84,0x00,0x00,0x02,0x50,0x09,0x00,0x02,0x80,0xB5,0xF8,0xF7, \ +0x77,0xFE,0x08,0x48,0x01,0x21,0x41,0x60,0x41,0x7F,0x10,0x30,0x00,0x27,0x01, \ +0x29,0x00,0xD1,0x47,0x73,0x01,0x20,0xF6,0xF7,0xAF,0xFD,0x03,0x48,0x07,0x83, \ +0x87,0x82,0x80,0xBD,0x00,0x00,0x50,0x09,0x00,0x02,0x20,0x00,0x20,0x0F,0x80, \ +0xB5,0x0F,0x48,0x40,0x23,0x81,0x69,0x0E,0x4F,0x99,0x43,0x81,0x61,0xF8,0x69, \ +0x9B,0x01,0x18,0x43,0xF8,0x61,0x14,0x20,0xF6,0xF7,0x82,0xFD,0xF8,0x69,0x0A, \ +0x4B,0x0A,0x49,0x18,0x40,0xF8,0x61,0x01,0x20,0x08,0x72,0x4A,0x7A,0x06,0x2A, \ +0x00,0xD0,0x48,0x72,0x08,0x73,0x00,0x20,0xC8,0x72,0x05,0x49,0x08,0x70,0x80, \ +0xBD,0x00,0x00,0x80,0x00,0x00,0x04,0x40,0x00,0x00,0x04,0xFF,0xEF,0x00,0x00, \ +0x50,0x09,0x00,0x02,0xE8,0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, \ +0x00,0x00,0x00,0x00,0x00} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/ieee802_11.h linux.22-ac2/drivers/usb/atmel/ieee802_11.h --- linux.vanilla/drivers/usb/atmel/ieee802_11.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/ieee802_11.h 2003-08-13 21:14:34.000000000 +0100 @@ -0,0 +1,127 @@ +#ifndef _IEEE802_11_H +#define _IEEE802_11_H + +struct ieee802_11_hdr { + u16 frame_ctl; + u16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + u16 seq_ctl; + u8 addr4[ETH_ALEN]; +} __attribute__ ((packed)); + +/* max. length of frame body, incl. IV and ICV fields) + see 802.11(1999), section 7.1.2 */ +#define IEEE802_11_MAX_DATA_LEN (4+2304+4) + +/* we include addr4 here althrough we'll never handle any packet containing it. + + 4 at the end for the FCS (Do we get it from the device ???) */ +#define IEEE802_11_MAX_FRAME_LEN \ + (sizeof(struct ieee802_11_hdr) + IEEE802_11_MAX_DATA_LEN + 4) +//#define IEEE802_11_HLEN 30 +//#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN) + +/* defines for information element coding: + 1 byte ID, 1 byte length of information field, n bytes information field + (see 7.3.2 in [1]) */ +#define IE_ID_SSID 0 /* length 0 - 32 */ +#define IE_ID_SUPPORTED_RATES 1 +#define IE_ID_DS_PARAM_SET 3 +#define IE_ID_CF_PARAM_SET 4 +#define IE_ID_TIM 5 +#define IE_ID_IBSS_PARAM_SET 6 +#define IE_ID_CHALLENGE_TEXT 16 + +/* we must convert frame_control to cpu endianess before reading it. */ + +/* Frame control field constants, see 802.11 std, chapter 7.1.3, pg. 36 */ +#define IEEE802_11_FCTL_VERS 0x0002 +#define IEEE802_11_FCTL_FTYPE 0x000c +#define IEEE802_11_FCTL_STYPE 0x00f0 +#define IEEE802_11_FCTL_TODS 0x0100 +#define IEEE802_11_FCTL_FROMDS 0x0200 +#define IEEE802_11_FCTL_MOREFRAGS 0x0400 +#define IEEE802_11_FCTL_RETRY 0x0800 +#define IEEE802_11_FCTL_PM 0x1000 +#define IEEE802_11_FCTL_MOREDATA 0x2000 +#define IEEE802_11_FCTL_WEP 0x4000 +#define IEEE802_11_FCTL_ORDER 0x8000 + +/* frame type values */ +#define IEEE802_11_FTYPE_MGMT 0x0000 +#define IEEE802_11_FTYPE_CTL 0x0004 +#define IEEE802_11_FTYPE_DATA 0x0008 + +/* management subtypes */ +#define IEEE802_11_STYPE_ASSOC_REQ 0x0000 +#define IEEE802_11_STYPE_ASSOC_RESP 0x0010 +#define IEEE802_11_STYPE_REASSOC_REQ 0x0020 +#define IEEE802_11_STYPE_REASSOC_RESP 0x0030 +#define IEEE802_11_STYPE_PROBE_REQ 0x0040 +#define IEEE802_11_STYPE_PROBE_RESP 0x0050 +#define IEEE802_11_STYPE_BEACON 0x0080 +#define IEEE802_11_STYPE_ATIM 0x0090 +#define IEEE802_11_STYPE_DISASSOC 0x00A0 +#define IEEE802_11_STYPE_AUTH 0x00B0 +#define IEEE802_11_STYPE_DEAUTH 0x00C0 + +/* control subtypes */ +#define IEEE802_11_STYPE_PSPOLL 0x00A0 +#define IEEE802_11_STYPE_RTS 0x00B0 +#define IEEE802_11_STYPE_CTS 0x00C0 +#define IEEE802_11_STYPE_ACK 0x00D0 +#define IEEE802_11_STYPE_CFEND 0x00E0 +#define IEEE802_11_STYPE_CFENDACK 0x00F0 + +/* data subtypes */ +#define IEEE802_11_STYPE_DATA 0x0000 +#define IEEE802_11_STYPE_DATA_CFACK 0x0010 +#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020 +#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030 +#define IEEE802_11_STYPE_NULLFUNC 0x0040 +#define IEEE802_11_STYPE_CFACK 0x0050 +#define IEEE802_11_STYPE_CFPOLL 0x0060 +#define IEEE802_11_STYPE_CFACKPOLL 0x0070 + +/* sequence control fragment / seq nr fields (802.12 std., ch. 7.1.3.4, pg. 40) */ +#define IEEE802_11_SCTL_FRAG 0x000F +#define IEEE802_11_SCTL_SEQ 0xFFF0 + +/* capability field in beacon, (re)assocReq */ +#define IEEE802_11_CAPA_ESS 0x0001 +#define IEEE802_11_CAPA_IBSS 0x0002 +#define IEEE802_11_CAPA_CF_POLLABLE 0x0004 +#define IEEE802_11_CAPA_POLL_REQ 0x0008 +#define IEEE802_11_CAPA_PRIVACY 0x0010 +#define IEEE802_11_CAPA_SHORT_PREAMBLE 0x0020 + +/* auth frame: algorithm type */ +#define IEEE802_11_AUTH_ALG_OPEN_SYSTEM 0x0000 +#define IEEE802_11_AUTH_ALG_SHARED_SECRET 0x0001 + +/* disassoc/deauth frame: reason codes (see 802.11, ch. 7.3.1.7, table 18) */ +#define IEEE802_11_REASON_UNSPECIFIED 0x0001 +#define IEEE802_11_REASON_PREV_AUTH_INVALID 0x0002 +#define IEEE802_11_REASON_DEAUTH_LEAVING 0x0003 +#define IEEE802_11_REASON_DISASS_INACTIVITY 0x0004 +#define IEEE802_11_REASON_DISASS_TOO_MANY_STA 0x0005 +#define IEEE802_11_REASON_CL2_FROM_NONAUTH 0x0006 +#define IEEE802_11_REASON_CL3_FROM_NONASSOC 0x0007 +#define IEEE802_11_REASON_DISASS_LEAVING 0x0008 +#define IEEE802_11_REASON_NOT_AUTH 0x0009 + +/* status in some response frames (802.11, ch. 7.3.1.9, table 19) */ +#define IEEE802_11_STATUS_SUCCESS 0x0000 +#define IEEE802_11_STATUS_UNSPECIFIED 0x0001 +#define IEEE802_11_STATUS_UNSUPP_CAPABILITIES 0x000a +#define IEEE802_11_STATUS_NO_PREV_ASSOC 0x000b +#define IEEE802_11_STATUS_ASSOC_FAILED 0x000c +#define IEEE802_11_STATUS_UNSUPP_AUTH_ALG 0x000d +#define IEEE802_11_STATUS_AUTH_INV_TRANS_SEQ 0x000e +#define IEEE802_11_STATUS_AUTH_CHALLENGE_FAIL 0x000f +#define IEEE802_11_STATUS_AUTH_TIMEOUT 0x0010 +#define IEEE802_11_STATUS_ASSOC_TOO_MANY_STA 0x0011 +#define IEEE802_11_STATUS_BASIC_RATE_SET 0x0012 + +#endif /* _IEEE802_11_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/Makefile linux.22-ac2/drivers/usb/atmel/Makefile --- linux.vanilla/drivers/usb/atmel/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/Makefile 2003-08-13 23:01:17.000000000 +0100 @@ -0,0 +1,25 @@ +# +# Makefile for drivers/usb/atmel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := +obj-n := +obj-m := +obj- := +export-objs := at76c503.o + +obj-$(CONFIG_USB_ATMEL76C503) += at76c503.o +obj-$(CONFIG_USB_ATMEL76C503_I3861) += at76c503-i3861.o +obj-$(CONFIG_USB_ATMEL76C503_I3863) += at76c503-i3863.o +obj-$(CONFIG_USB_ATMEL76C503_RFMD) += at76c503-rfmd.o +obj-$(CONFIG_USB_ATMEL76C503_RFMD_ACCTON) += at76c503-rfmd-acc.o +obj-$(CONFIG_USB_ATMEL76C505_RFMD) += at76c505-rfmd.o + +O_TARGET := usb-atmel.o + +include $(TOPDIR)/Rules.make + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/atmel/README linux.22-ac2/drivers/usb/atmel/README --- linux.vanilla/drivers/usb/atmel/README 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/atmel/README 2003-08-13 21:00:21.000000000 +0100 @@ -0,0 +1,192 @@ +(* $Id: README,v 1.11 2003/07/20 09:48:17 jal2 Exp $ *) + +at76c503 - linux driver for Atmel at76c503 based usb wlan adapters +------------------------------------------------------------------ + +This is another driver for the Atmel based USB WLAN adaptors. I am +developing this driver in private. I am not associated with Atmel or +any corporation that builds devices with this chip. My only +information source is the driver from +http://atmelwlandriver.sourceforge.net. + +Look at http://at76c503a.berlios.de/ for more information, mailing lists and links. + +Known devices with this chip are: +- Belkin F5D6050 +- Dynalink/Askey WLL013 +- Linksys WUSB11 v2.6 +- Netgear MA101B +and many more. + +Reqirements: +------------ + +- Kernel 2.4.x. I am developing the driver on 2.4.20, but it + reportedly also works on 2.4.19, 2.4.18 and 2.4.16. I am not sure + about 2.5.x. + +Installation: +------------- + +make +make install + +Running: +-------- + +Plug in the adapter. If you already used it under Windows or with the +driver from atmelwlandriver.sourceforge.net, replug the device +(ie. plug out and in again). + +If you have hotplug installed, the drivers should now be loaded. If not, +load them by hand: + +modprobe -v at76c503-rfmd +or +insmod usbdfu.o; insmod at76c503.o; insmod at76c503-rfmd.o + +You can give the network device another name than wlanX by giving +the module the netdev_name parameter. Eg. +insmod at76c503-rfmd.o netdev_name=eth%d +would give the first device the name eth0, the second eth1 etc... + +Check if the modules are loaded with lsmod. It should look like this: + +... +at76c503-rfmd 38656 0 (unused) +at76c503 34004 0 [at76c503-rfmd] +usbdfu 9144 0 [at76c503-rfmd] +... + +setup networking (replace with wlan0, wlan1, ..., with a +channel number (1..14) with your network id (a string), +with an IP address) + +iwconfig channel mode ad-hoc essid + +Example: +iwconfig wlan2 mode ad-hoc channel 10 essid okuwlan + +Test it by pinging another host with a wlan adaptor. + +Note that the firmware survives reboots of the computer, but not +unplugging the device. + +If you get problems: +Look with dmesg, if there are error messages. + +Power Save Modes in Infrastructure Mode +--------------------------------------- +You can activate 802.11 power save mode by executing + iwconfig wlanX power on period N +The default period value is 0 and the smallest possible period +of power save (listen interval) is two beacon intervals. +This should lower power consumption, but decrease data throughput and +increase delays. + +There is an Atmel specific power save mode called "smart power save" +which switches the device into active state (by a NULL packet to the AP) +as soon as there are pending packets at the AP for the STA and put it +back into doze state after the next beacon. This saves the need for +sending PS_POLL packets. +It can only be activated by + iwpriv wlanX powersave_mode 3 +I don't know if the listen_interval is used in this mode, too. +If yes, you can only set it earlier by iwconfig, e.g. + iwconfig wlanX power on period N + iwpriv wlanX powersave_mode 3 + + +Private Parameters +------------------ + +In addition to the parameters of iwconfig, some can be set by iwpriv: +- long preamble: iwpriv wlanX short_preamble 0 +- short preamble: iwpriv wlanX short_preamble 1 +- open system authentication: iwpriv wlanX auth_mode 0 +- shared key authentication: iwpriv wlanX auth_mode 1 +- amount of debug messages: iwpriv wlanX set_debug N + with N a combination of bits, see DBG_* in at76c503.c +- power save mode: iwpriv wlanX powersave_mode N (* N = 1,2,3 + for none, save, smart save *) +- scan channel time: iwpriv wlanX scan_times + (this may be required for 0.90.x firmware, i.e. Intersil radios) + +Be aware that shared key authentication requires a WEP key. + +at76c503.o module parameters +---------------------------- +The above private parameters can be set by module parameters (inside +/etc/modules.conf or on the "modprobe" command line) as well (see +at76c503a.c for a short explanation and the default values): + +debug +scan_min_time +scan_max_time +scan_mode +preamble_type +auth_mode +pm_mode +pm_period + +Known Problems +-------------- + +1) Bugs of the firmware 0.90.0-44 (the latest available for device with + Intersil radio after Atmel stopped support for Intersil radio + chipsets): + - active scan doesn't send ProbeReq neither in adhoc or in + infrastructure (managed) mode + + * If your ad-hoc peer sends the beacon in longer intervals, + the Atmel driver may not find it. Try to increase the max + channel time to 500 or 1000 (default 120) by calling + iwpriv wlanX scan_times 10 + If you found a working value, add the module parameter + scan_max_time to /etc/modules.conf . + + * You cannot connect to an access point which hides its SSID and + requires the STA to probe it! + + - After joining an existing IBSS of a 2MBit card, the basic rates are + wrong in the beacon, e.g. + a 2 MBit card broadcasts a basic rate set of 1 + 2 MBit/s, + but the Atmel device sends out basic rates of 1,2,5.5 and 11 MBit/s + Subsequently it cannot connect to the 2 MBit card, because it sends + it's data packets to broadcast addresses with 11 MBit/s. + + - power saving modes does not work + +2) firmware 1.101.0-84 + - if a 2MBit card joins an IBSS (ad-hoc mode) initiated by the Atmel + device (which currently broadcasts 1+2 MBit/s as the basic rates + and 5.5 and 11 MBit/s as (additional) operational rates), the + Atmel device sends out broadcast packets (e.g. ARP requests) with + 11 MBit/s. This may be fixed by changing the tx rate in iwconfig + from "auto" (default) to 1M or 2M. + I'm not sure if this is a firmware bug or a problem in the driver. + + + +Thanks to: +- the authors of the usbvnet driver (atmelwlandriver.sourceforge.net) +- Joerg Albert for lots of patches +- Brad Hards and Bas Vermeulen for the firmware code, which I ported to kernel space +- David Gibson, I used his orinoco driver for learning +- the author(s) of the usbnet driver +- the author(s) of the rtl8150 driver +- lots of other authors of usb and wlan drivers, where I stole code from +- Pavel Roskin for testing, debugging and his patches + +Oliver Kurth , Mon, 6 Jan 2003 22:39:47 +0100 +updated by Joerg Albert, Thu, 1 May 2003 and later + + + + + + + + + + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/audio.c linux.22-ac2/drivers/usb/audio.c --- linux.vanilla/drivers/usb/audio.c 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/audio.c 2003-07-31 14:45:45.000000000 +0100 @@ -105,6 +105,8 @@ * functionality. Tested and used in production with the emagic emi 2|6 * on PPC and Intel. Also fixed a few logic 'crash and burn' corner * cases. + * 2003-06-30: Thomas Sailer + * Fix SETTRIGGER non OSS API conformity */ /* @@ -279,23 +281,24 @@ unsigned int srate; /* physical buffer */ unsigned char *sgbuf[NRSGBUF]; - unsigned bufsize; - unsigned numfrag; - unsigned fragshift; - unsigned wrptr, rdptr; - unsigned total_bytes; + unsigned int bufsize; + unsigned int numfrag; + unsigned int fragshift; + unsigned int wrptr, rdptr; + unsigned int total_bytes; int count; - unsigned error; /* over/underrun */ + unsigned int error; /* over/underrun */ wait_queue_head_t wait; /* redundant, but makes calculations easier */ - unsigned fragsize; - unsigned dmasize; + unsigned int fragsize; + unsigned int dmasize; /* OSS stuff */ - unsigned mapped:1; - unsigned ready:1; - unsigned ossfragshift; + unsigned int mapped:1; + unsigned int ready:1; + unsigned int enabled:1; + unsigned int ossfragshift; int ossmaxfrags; - unsigned subdivision; + unsigned int subdivision; }; struct usb_audio_state; @@ -562,6 +565,7 @@ break; } db->bufsize = nr << PAGE_SHIFT; + db->enabled = 1; db->ready = 1; dprintk((KERN_DEBUG "usbaudio: dmabuf_init bytepersec %d bufs %d ossfragshift %d ossmaxfrags %d " "fragshift %d fragsize %d numfrag %d dmasize %d bufsize %d fmt 0x%x srate %d\n", @@ -2299,7 +2303,7 @@ if (cnt > count) cnt = count; if (cnt <= 0) { - if (usbin_start(as)) { + if (as->usbin.dma.enabled && usbin_start(as)) { if (!ret) ret = -ENODEV; break; @@ -2332,6 +2336,11 @@ count -= cnt; buffer += cnt; ret += cnt; + if (as->usbin.dma.enabled && usbin_start(as)) { + if (!ret) + ret = -ENODEV; + break; + } } __set_current_state(TASK_RUNNING); remove_wait_queue(&as->usbin.dma.wait, &wait); @@ -2378,7 +2387,7 @@ if (cnt > count) cnt = count; if (cnt <= 0) { - if (usbout_start(as)) { + if (as->usbout.dma.enabled && usbout_start(as)) { if (!ret) ret = -ENODEV; break; @@ -2411,7 +2420,7 @@ count -= cnt; buffer += cnt; ret += cnt; - if (as->usbout.dma.count >= start_thr && usbout_start(as)) { + if (as->usbout.dma.enabled && as->usbout.dma.count >= start_thr && usbout_start(as)) { if (!ret) ret = -ENODEV; break; @@ -2616,19 +2625,25 @@ if (val & PCM_ENABLE_INPUT) { if (!as->usbin.dma.ready && (ret = prog_dmabuf_in(as))) return ret; + as->usbin.dma.enabled = 1; if (usbin_start(as)) return -ENODEV; - } else + } else { + as->usbin.dma.enabled = 0; usbin_stop(as); + } } if (file->f_mode & FMODE_WRITE) { if (val & PCM_ENABLE_OUTPUT) { if (!as->usbout.dma.ready && (ret = prog_dmabuf_out(as))) return ret; + as->usbout.dma.enabled = 1; if (usbout_start(as)) return -ENODEV; - } else + } else { + as->usbout.dma.enabled = 0; usbout_stop(as); + } } return 0; @@ -2827,10 +2842,14 @@ if (signal_pending(current)) return -ERESTARTSYS; } - if (file->f_mode & FMODE_READ) + if (file->f_mode & FMODE_READ) { as->usbin.dma.ossfragshift = as->usbin.dma.ossmaxfrags = as->usbin.dma.subdivision = 0; - if (file->f_mode & FMODE_WRITE) + as->usbin.dma.enabled = 1; + } + if (file->f_mode & FMODE_WRITE) { as->usbout.dma.ossfragshift = as->usbout.dma.ossmaxfrags = as->usbout.dma.subdivision = 0; + as->usbout.dma.enabled = 1; + } if (set_format(as, file->f_mode, ((minor & 0xf) == SND_DEV_DSP16) ? AFMT_S16_LE : AFMT_U8 /* AFMT_ULAW */, 8000)) { up(&open_sem); return -EIO; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/Config.in linux.22-ac2/drivers/usb/Config.in --- linux.vanilla/drivers/usb/Config.in 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/Config.in 2003-08-13 22:26:45.000000000 +0100 @@ -28,6 +28,7 @@ comment ' USB Bluetooth can only be used with disabled Bluetooth subsystem' fi fi + dep_tristate ' USB DFU support' CONFIG_USB_DFU $CONFIG_USB dep_tristate ' USB MIDI support' CONFIG_USB_MIDI $CONFIG_USB $CONFIG_SOUND if [ "$CONFIG_SCSI" = "n" ]; then comment ' SCSI support is needed for USB Storage' @@ -109,5 +110,7 @@ if [ "$CONFIG_ATM" = "y" -o "$CONFIG_ATM" = "m" ]; then dep_tristate ' Alcatel Speedtouch USB support' CONFIG_USB_SPEEDTOUCH $CONFIG_ATM $CONFIG_USB fi + + source drivers/usb/atmel/Config.in fi endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/devio.c linux.22-ac2/drivers/usb/devio.c --- linux.vanilla/drivers/usb/devio.c 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/devio.c 2003-06-29 16:10:21.000000000 +0100 @@ -43,7 +43,7 @@ #include #include #include - +#include struct async { struct list_head asynclist; @@ -1078,6 +1078,8 @@ int size; void *buf = 0; int retval = 0; + struct usb_interface *ifp = 0; + struct usb_driver *driver = 0; /* get input parameters and alloc buffer */ if (copy_from_user(&ctrl, (void *) arg, sizeof (ctrl))) @@ -1095,32 +1097,55 @@ } } - /* ioctl to device */ - if (ctrl.ifno < 0) { - switch (ctrl.ioctl_code) { - /* access/release token for issuing control messages - * ask a particular driver to bind/unbind, ... etc - */ - } - retval = -ENOSYS; - - /* ioctl to the driver which has claimed a given interface */ - } else { - struct usb_interface *ifp = 0; - if (!ps->dev) - retval = -ENODEV; - else if (ctrl.ifno >= ps->dev->actconfig->bNumInterfaces) + if (!ps->dev) + retval = -ENODEV; + else if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) + retval = -EINVAL; + else switch (ctrl.ioctl_code) { + + /* disconnect kernel driver from interface, leaving it unbound */ + case USBDEVFS_DISCONNECT: + driver = ifp->driver; + if (driver) { + down (&driver->serialize); + dbg ("disconnect '%s' from dev %d interface %d", + driver->name, ps->dev->devnum, ctrl.ifno); + driver->disconnect (ps->dev, ifp->private_data); + usb_driver_release_interface (driver, ifp); + up (&driver->serialize); + } else retval = -EINVAL; - else { - if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) - retval = -EINVAL; - else if (ifp->driver == 0 || ifp->driver->ioctl == 0) - retval = -ENOSYS; - } - if (retval == 0) + break; + + /* let kernel drivers try to (re)bind to the interface */ + case USBDEVFS_CONNECT: + usb_find_interface_driver_for_ifnum (ps->dev, ctrl.ifno); + break; + + /* talk directly to the interface's driver */ + default: + lock_kernel(); /* against module unload */ + driver = ifp->driver; + if (driver == 0 || driver->ioctl == 0) { + unlock_kernel(); + retval = -ENOSYS; + } else { + if (ifp->driver->owner) { + __MOD_INC_USE_COUNT(ifp->driver->owner); + unlock_kernel(); + } /* ifno might usefully be passed ... */ - retval = ifp->driver->ioctl (ps->dev, ctrl.ioctl_code, buf); + retval = driver->ioctl (ps->dev, ctrl.ioctl_code, buf); /* size = min_t(int, size, retval)? */ + if (ifp->driver->owner) { + __MOD_DEC_USE_COUNT(ifp->driver->owner); + } else { + unlock_kernel(); + } + } + + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; } /* cleanup and return */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/hid-core.c linux.22-ac2/drivers/usb/hid-core.c --- linux.vanilla/drivers/usb/hid-core.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/hid-core.c 2003-06-29 16:24:52.000000000 +0100 @@ -1173,6 +1173,9 @@ #define USB_VENDOR_ID_TANGTOP 0x0d3d #define USB_DEVICE_ID_TANGTOP_USBPS2 0x0001 +#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f +#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 + #define USB_VENDOR_ID_OKI 0x070a #define USB_VENDOR_ID_OKI_MULITI 0x0007 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/hid.h linux.22-ac2/drivers/usb/hid.h --- linux.vanilla/drivers/usb/hid.h 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/hid.h 2003-09-01 13:54:30.000000000 +0100 @@ -30,6 +30,7 @@ * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic */ +#include #include #include #include diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/host/sl811.c linux.22-ac2/drivers/usb/host/sl811.c --- linux.vanilla/drivers/usb/host/sl811.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/host/sl811.c 2003-07-31 14:25:33.000000000 +0100 @@ -9,7 +9,7 @@ * Adam Richter, Gregory P. Smith; 2.Original SL811 driver (hc_sl811.o) by Pei Liu * - * It's now support isosynchronous mode and more effective than hc_sl811.o + * It's now support isochronous mode and more effective than hc_sl811.o * * To do: * 1.Modify the timeout part, it's some messy diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/Makefile linux.22-ac2/drivers/usb/Makefile --- linux.vanilla/drivers/usb/Makefile 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/Makefile 2003-08-13 22:38:05.000000000 +0100 @@ -10,7 +10,7 @@ # Objects that export symbols. -export-objs := hcd.o usb.o ov511.o pwc-uncompress.o +export-objs := hcd.o usb.o ov511.o pwc-uncompress.o usbdfu.o # Multipart objects. @@ -119,12 +119,14 @@ obj-$(CONFIG_USB_BRLVGER) += brlvger.o obj-$(CONFIG_USB_LCD) += usblcd.o obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o +obj-$(CONFIG_USB_DFU) += usbdfu.o # Object files in subdirectories -mod-subdirs := serial host +mod-subdirs := serial host atmel -subdir-$(CONFIG_USB_SERIAL) += serial -subdir-$(CONFIG_USB_STORAGE) += storage +subdir-$(CONFIG_USB_SERIAL) += serial +subdir-$(CONFIG_USB_STORAGE) += storage +subdir-$(CONFIG_USB_ATMEL76C503) += atmel ifeq ($(CONFIG_USB_SERIAL),y) obj-y += serial/usb-serial.o @@ -134,6 +136,10 @@ obj-y += storage/storage.o endif +ifeq ($(CONFIG_USB_ATMEL76C503),y) + obj-y += atmel/usb-atmel.o +endif + include $(TOPDIR)/Rules.make # Link rules for multi-part drivers. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-ctrl.c linux.22-ac2/drivers/usb/pwc-ctrl.c --- linux.vanilla/drivers/usb/pwc-ctrl.c 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-ctrl.c 2003-07-31 14:08:04.000000000 +0100 @@ -1,7 +1,7 @@ /* Driver for Philips webcam Functions that send various control messages to the webcam, including video modes. - (C) 1999-2002 Nemosoft Unv. (webcam@smcc.demon.nl) + (C) 1999-2003 Nemosoft Unv. (webcam@smcc.demon.nl) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -452,7 +452,7 @@ pdev->view.x = width; pdev->view.y = height; pwc_set_image_buffer_size(pdev); - Trace(TRACE_SIZE, "Set viewport to %dx%d, image size is %dx%d, palette = %d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y, pdev->vpalette); + Trace(TRACE_SIZE, "Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y); return 0; } @@ -461,38 +461,9 @@ { int factor, i, filler = 0; - switch(pdev->vpalette) { - case VIDEO_PALETTE_RGB32 | 0x80: - case VIDEO_PALETTE_RGB32: - factor = 16; - filler = 0; - break; - case VIDEO_PALETTE_RGB24 | 0x80: - case VIDEO_PALETTE_RGB24: - factor = 12; - filler = 0; - break; - case VIDEO_PALETTE_YUYV: - case VIDEO_PALETTE_YUV422: - factor = 8; - filler = 128; - break; - case VIDEO_PALETTE_YUV420: - case VIDEO_PALETTE_YUV420P: - factor = 6; - filler = 128; - break; -#if PWC_DEBUG - case VIDEO_PALETTE_RAW: - pdev->image.size = pdev->frame_size; - pdev->view.size = pdev->frame_size; - return; - break; -#endif - default: - factor = 0; - break; - } + /* for PALETTE_YUV420P */ + factor = 6; + filler = 128; /* Set sizes in bytes */ pdev->image.size = pdev->image.x * pdev->image.y * factor / 4; @@ -1358,7 +1329,7 @@ { struct pwc_probe probe; - strcpy(probe.name, pdev->vdev->name); + strcpy(probe.name, pdev->vdev.name); probe.type = pdev->type; if (copy_to_user(arg, &probe, sizeof(probe))) ret = -EFAULT; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc.h linux.22-ac2/drivers/usb/pwc.h --- linux.vanilla/drivers/usb/pwc.h 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc.h 2003-09-01 13:54:30.000000000 +0100 @@ -1,4 +1,4 @@ -/* (C) 1999-2002 Nemosoft Unv. (webcam@smcc.demon.nl) +/* (C) 1999-2003 Nemosoft Unv. (webcam@smcc.demon.nl) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -60,8 +60,8 @@ /* Version block */ #define PWC_MAJOR 8 -#define PWC_MINOR 10 -#define PWC_VERSION "8.10" +#define PWC_MINOR 11 +#define PWC_VERSION "8.11" #define PWC_NAME "pwc" /* Turn certain features on/off */ @@ -82,7 +82,7 @@ #define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE) /* Absolute maximum number of buffers available for mmap() */ -#define MAX_IMAGES 4 +#define MAX_IMAGES 10 struct pwc_coord { @@ -112,6 +112,7 @@ struct pwc_device { + struct video_device vdev; #ifdef PWC_MAGIC int magic; #endif @@ -120,22 +121,21 @@ int type; /* type of cam (645, 646, 675, 680, 690) */ int release; /* release number */ - int unplugged; /* set when the plug is pulled */ + int error_status; /* set when something goes wrong with the cam (unplugged, USB errors) */ int usb_init; /* set when the cam has been initialized over USB */ /*** Video data ***/ int vopen; /* flag */ - struct video_device *vdev; int vendpoint; /* video isoc endpoint */ int vcinterface; /* video control interface */ int valternate; /* alternate interface needed */ int vframes, vsize; /* frames-per-second & size (see PSZ_*) */ - int vpalette; /* YUV */ int vframe_count; /* received frames */ int vframes_dumped; /* counter for dumped frames */ int vframes_error; /* frames received in error */ int vmax_packet_size; /* USB maxpacket size */ int vlast_packet_size; /* for frame synchronisation */ + int visoc_errors; /* number of contiguous ISOC errors */ int vcompression; /* desired compression factor */ int vbandlength; /* compressed band length; 0 is uncompressed */ char vsnapshot; /* snapshot mode */ @@ -147,15 +147,15 @@ 2. data is synchronized and packed into a frame buffer 3a. in case data is compressed, decompress it directly into image buffer 3b. in case data is uncompressed, copy into image buffer with viewport - 4. data is transfered to the user process + 4. data is transferred to the user process - Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES.... + Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES.... We have in effect a back-to-back-double-buffer system. */ /* 1: isoc */ struct pwc_iso_buf sbuf[MAX_ISO_BUFS]; char iso_init; - + /* 2: frame */ struct pwc_frame_buf *fbuf; /* all frames */ struct pwc_frame_buf *empty_frames, *empty_frames_tail; /* all empty frames */ @@ -168,7 +168,7 @@ #if PWC_DEBUG int sequence; /* Debugging aid */ #endif - + /* 3: decompression */ struct pwc_decompressor *decompressor; /* function block with decompression routines */ void *decompress_data; /* private data for decompression engine */ @@ -176,7 +176,7 @@ /* 4: image */ /* We have an 'image' and a 'view', where 'image' is the fixed-size image as delivered by the camera, and 'view' is the size requested by the - program. The camera image is centered in this viewport, laced with + program. The camera image is centered in this viewport, laced with a gray or black border. view_min <= image <= view <= view_max; */ int image_mask; /* bitmask of supported sizes */ @@ -196,10 +196,9 @@ /*** Misc. data ***/ wait_queue_head_t frameq; /* When waiting for a frame to finish... */ - wait_queue_head_t remove_ok; /* When we got hot unplugged, we have to avoid a few race conditions */ #if PWC_INT_PIPE void *usb_int_handler; /* for the interrupt endpoint */ -#endif +#endif }; /* Enumeration of image sizes */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-if.c linux.22-ac2/drivers/usb/pwc-if.c --- linux.vanilla/drivers/usb/pwc-if.c 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-if.c 2003-07-31 14:08:04.000000000 +0100 @@ -1,6 +1,6 @@ -/* Linux driver for Philips webcam +/* Linux driver for Philips webcam USB and Video4Linux interface part. - (C) 1999-2002 Nemosoft Unv. + (C) 1999-2003 Nemosoft Unv. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -75,14 +75,21 @@ { USB_DEVICE(0x0471, 0x0310) }, { USB_DEVICE(0x0471, 0x0311) }, { USB_DEVICE(0x0471, 0x0312) }, + { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */ { USB_DEVICE(0x069A, 0x0001) }, /* Askey */ - { USB_DEVICE(0x046D, 0x08b0) }, /* Logitech QuickCam Pro 3000 */ - { USB_DEVICE(0x046D, 0x08b1) }, /* Logitech QuickCam Notebook Pro */ - { USB_DEVICE(0x046d, 0x08b2) }, /* Logitech QuickCam Pro 4000 */ - { USB_DEVICE(0x046d, 0x08b3) }, /* Logitech QuickCam Zoom */ + { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */ + { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */ + { USB_DEVICE(0x046D, 0x08B2) }, /* Logitech QuickCam Pro 4000 */ + { USB_DEVICE(0x046D, 0x08B3) }, /* Logitech QuickCam Zoom */ + { USB_DEVICE(0x046D, 0x08B4) }, /* Logitech (reserved) */ + { USB_DEVICE(0x046D, 0x08B5) }, /* Logitech (reserved) */ + { USB_DEVICE(0x046D, 0x08B6) }, /* Logitech (reserved) */ + { USB_DEVICE(0x046D, 0x08B7) }, /* Logitech (reserved) */ + { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech (reserved) */ { USB_DEVICE(0x055D, 0x9000) }, /* Samsung */ { USB_DEVICE(0x055D, 0x9001) }, { USB_DEVICE(0x041E, 0x400C) }, /* Creative Webcam 5 */ + { USB_DEVICE(0x041E, 0x4011) }, /* Creative Webcam Pro Ex */ { USB_DEVICE(0x04CC, 0x8116) }, /* Afina Eye */ { USB_DEVICE(0x0d81, 0x1910) }, /* Visionite */ { USB_DEVICE(0x0d81, 0x1900) }, @@ -101,11 +108,11 @@ disconnect: usb_pwc_disconnect, /* disconnect() */ }; -#define MAX_DEV_HINTS 10 +#define MAX_DEV_HINTS 20 +#define MAX_ISOC_ERRORS 20 static int default_size = PSZ_QCIF; static int default_fps = 10; -static int default_palette = VIDEO_PALETTE_YUV420P; /* This format is understood by most tools */ static int default_fbufs = 3; /* Default number of frame buffers */ static int default_mbufs = 2; /* Default number of mmap() buffers */ int pwc_trace = TRACE_MODULE | TRACE_FLOW | TRACE_PWCX; @@ -119,9 +126,6 @@ struct pwc_device *pdev; } device_hint[MAX_DEV_HINTS]; -static struct semaphore mem_lock; -static void *mem_leak = NULL; /* For delayed kfree()s. See below */ - /***/ static int pwc_video_open(struct video_device *vdev, int mode); @@ -383,50 +387,50 @@ the user program. The first scheme involves the ISO buffers (called thus since they transport ISO data from the USB controller), and not really interesting. Suffices to say the data from this buffer is quickly - gathered in an interrupt handler (pwc_isoc_handler) and placed into the + gathered in an interrupt handler (pwc_isoc_handler) and placed into the frame buffer. - + The frame buffer is the second scheme, and is the central element here. It collects the data from a single frame from the camera (hence, the name). Frames are delimited by the USB camera with a short USB packet, so that's easy to detect. The frame buffers form a list that is filled - by the camera+USB controller and drained by the user process through + by the camera+USB controller and drained by the user process through either read() or mmap(). - + The image buffer is the third scheme, in which frames are decompressed - and possibly converted into planar format. For mmap() there is more than + and converted into planar format. For mmap() there is more than one image buffer available. - The frame buffers provide the image buffering, in case the user process - is a bit slow. This introduces lag and some undesired side-effects. - The problem arises when the frame buffer is full. I used to drop the last - frame, which makes the data in the queue stale very quickly. But dropping + The frame buffers provide the image buffering. In case the user process + is a bit slow, this introduces lag and some undesired side-effects. + The problem arises when the frame buffer is full. I used to drop the last + frame, which makes the data in the queue stale very quickly. But dropping the frame at the head of the queue proved to be a litte bit more difficult. I tried a circular linked scheme, but this introduced more problems than it solved. Because filling and draining are completely asynchronous processes, this requires some fiddling with pointers and mutexes. - + Eventually, I came up with a system with 2 lists: an 'empty' frame list and a 'full' frame list: * Initially, all frame buffers but one are on the 'empty' list; the one remaining buffer is our initial fill frame. - * If a frame is needed for filling, we take it from the 'empty' list, - unless that list is empty, in which case we take the buffer at the - head of the 'full' list. - * When our fill buffer has been filled, it is appended to the 'full' + * If a frame is needed for filling, we try to take it from the 'empty' + list, unless that list is empty, in which case we take the buffer at + the head of the 'full' list. + * When our fill buffer has been filled, it is appended to the 'full' list. - * If a frame is needed by read() or mmap(), it is taken from the head of + * If a frame is needed by read() or mmap(), it is taken from the head of the 'full' list, handled, and then appended to the 'empty' list. If no buffer is present on the 'full' list, we wait. The advantage is that the buffer that is currently being decompressed/ - converted, is on neither list, and thus not in our way (any other scheme + converted, is on neither list, and thus not in our way (any other scheme I tried had the problem of old data lingering in the queue). Whatever strategy you choose, it always remains a tradeoff: with more frame buffers the chances of a missed frame are reduced. On the other - hand, on slower machines it introduces lag because the queue will + hand, on slower machines it introduces lag because the queue will always be full. */ @@ -437,7 +441,7 @@ { int ret; unsigned long flags; - + ret = 0; spin_lock_irqsave(&pdev->ptrlock, flags); if (pdev->fill_frame != NULL) { @@ -454,11 +458,11 @@ if (pdev->empty_frames != NULL) { /* We have empty frames available. That's easy */ pdev->fill_frame = pdev->empty_frames; - pdev->empty_frames = pdev->empty_frames->next; + pdev->empty_frames = pdev->empty_frames->next; } else { /* Hmm. Take it from the full list */ -#if PWC_DEBUG +#if PWC_DEBUG /* sanity check */ if (pdev->full_frames == NULL) { Err("Neither empty or full frames available!\n"); @@ -478,11 +482,11 @@ spin_unlock_irqrestore(&pdev->ptrlock, flags); return ret; } - + /** - \brief Reset all buffers, pointers and lists, except for the image_used[] buffer. - + \brief Reset all buffers, pointers and lists, except for the image_used[] buffer. + If the image_used[] buffer is cleared too, mmap()/VIDIOCSYNC will run into trouble. */ static void pwc_reset_buffers(struct pwc_device *pdev) @@ -519,7 +523,7 @@ { int ret = 0; unsigned long flags; - + spin_lock_irqsave(&pdev->ptrlock, flags); /* First grab our read_frame; this is removed from all lists, so we can release the lock after this without problems */ @@ -542,7 +546,7 @@ Trace(TRACE_SEQUENCE, "Decompressing frame %d\n", pdev->read_frame->sequence); #endif /* Decompression is a lenghty process, so it's outside of the lock. - This gives the isoc_handler the opportunity to fill more frames + This gives the isoc_handler the opportunity to fill more frames in the mean time. */ spin_unlock_irqrestore(&pdev->ptrlock, flags); @@ -566,7 +570,7 @@ } /** - \brief Advance pointers of image buffer (after each user request) + \brief Advance pointers of image buffer (after each user request) */ static inline void pwc_next_image(struct pwc_device *pdev) { @@ -574,22 +578,6 @@ pdev->fill_image = (pdev->fill_image + 1) % default_mbufs; } -/* 2001-10-14: YUV420P is the only palette remaining. */ -static int pwc_set_palette(struct pwc_device *pdev, int pal) -{ - if ( pal == VIDEO_PALETTE_YUV420P -#if PWC_DEBUG - || pal == VIDEO_PALETTE_RAW -#endif - ) { - pdev->vpalette = pal; - pwc_set_image_buffer_size(pdev); - return 0; - } - Trace(TRACE_READ, "Palette %d not supported.\n", pal); - return -1; -} - /* This gets called for the Isochronous pipe (video). This is done in @@ -601,14 +589,15 @@ int i, fst, flen; int awake; struct pwc_frame_buf *fbuf; - unsigned char *fillptr, *iso_buf; + unsigned char *fillptr = 0, *iso_buf = 0; + awake = 0; pdev = (struct pwc_device *)urb->context; if (pdev == NULL) { Err("isoc_handler() called with NULL device?!\n"); return; } -#ifdef PWC_MAGIC +#ifdef PWC_MAGIC if (pdev->magic != PWC_MAGIC) { Err("isoc_handler() called with bad magic!\n"); return; @@ -619,33 +608,51 @@ return; } if (urb->status != -EINPROGRESS && urb->status != 0) { - char *errmsg; - + const char *errmsg; + errmsg = "Unknown"; switch(urb->status) { case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; - case -EILSEQ: errmsg = "CRC/Timeout"; break; + case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIMEDOUT: errmsg = "NAK (device does not respond)"; break; } Trace(TRACE_FLOW, "pwc_isoc_handler() called with status %d [%s].\n", urb->status, errmsg); - return; + /* Give up after a number of contiguous errors on the USB bus. + Appearantly something is wrong so we simulate an unplug event. + */ + if (++pdev->visoc_errors > MAX_ISOC_ERRORS) + { + Info("Too many ISOC errors, bailing out.\n"); + pdev->error_status = EIO; + awake = 1; + } + else + return; // better luck next time } fbuf = pdev->fill_frame; if (fbuf == NULL) { Err("pwc_isoc_handler without valid fill frame.\n"); + awake = 1; + } + else { + fillptr = fbuf->data + fbuf->filled; + } + /* Premature wakeup */ + if (awake) { wake_up_interruptible(&pdev->frameq); return; } - fillptr = fbuf->data + fbuf->filled; - awake = 0; + + /* Reset ISOC error counter. We did get here, after all. */ + pdev->visoc_errors = 0; /* vsync: 0 = don't copy data 1 = sync-hunt - 2 = synched + 2 = synched */ /* Compact data */ for (i = 0; i < urb->number_of_packets; i++) { @@ -674,7 +681,7 @@ if (flen < pdev->vlast_packet_size) { /* Shorter packet... We probably have the end of an image-frame; wake up read() process and let select()/poll() do something. - Decompression is done in user time over there. + Decompression is done in user time over there. */ if (pdev->vsync == 2) { /* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus @@ -731,7 +738,7 @@ else { /* Send only once per EOF */ awake = 1; /* delay wake_ups */ - + /* Find our next frame to fill. This will always succeed, since we * nick a frame from either empty or full list, but if we had to * take it from the full list, it means a frame got dropped. @@ -764,7 +771,7 @@ if (iso_error < 20) Trace(TRACE_FLOW, "Iso frame %d of USB has error %d\n", i, fst); } -#endif +#endif } if (awake) wake_up_interruptible(&pdev->frameq); @@ -893,8 +900,10 @@ } } - /* Stop camera, but only if we are sure the camera is still there */ - if (!pdev->unplugged) { + /* Stop camera, but only if we are sure the camera is still there (unplug + is signalled by EPIPE) + */ + if (pdev->error_status && pdev->error_status != EPIPE) { Trace(TRACE_OPEN, "Setting alternate interface 0.\n"); usb_set_interface(pdev->udev, 0, 0); } @@ -922,28 +931,6 @@ } -static inline void set_mem_leak(void *ptr) -{ - down(&mem_lock); - if (mem_leak != NULL) - Err("Memleak: overwriting mem_leak pointer!\n"); - Trace(TRACE_MEMORY, "Setting mem_leak to 0x%p.\n", ptr); - mem_leak = ptr; - up(&mem_lock); -} - -static inline void free_mem_leak(void) -{ - down(&mem_lock); - if (mem_leak != NULL) { - Trace(TRACE_MEMORY, "Freeing mem_leak ptr 0x%p.\n", mem_leak); - kfree(mem_leak); - mem_leak = NULL; - } - up(&mem_lock); -} - - /***************************************************************************/ /* Video4Linux functions */ @@ -967,8 +954,8 @@ Trace(TRACE_OPEN, "Doing first time initialization.\n"); pdev->usb_init = 1; - if (pwc_trace & TRACE_OPEN) { - /* Query CMOS sensor type */ + { + /* Query sensor type */ const char *sensor_type = NULL; i = pwc_get_cmos_sensor(pdev); @@ -987,7 +974,7 @@ default: sensor_type = "unknown type of sensor"; break; } if (sensor_type != NULL) - Info("This %s camera is equipped with a %s (%d).\n", pdev->vdev->name, sensor_type, i); + Info("This %s camera is equipped with a %s (%d).\n", pdev->vdev.name, sensor_type, i); } } @@ -1022,24 +1009,20 @@ pdev->vframe_count = 0; pdev->vframes_dumped = 0; pdev->vframes_error = 0; - pdev->vpalette = default_palette; -#if PWC_DEBUG + pdev->visoc_errors = 0; + pdev->error_status = 0; +#if PWC_DEBUG pdev->sequence = 0; #endif /* Set some defaults */ pdev->vsnapshot = 0; - if (pdev->type == 730 || pdev->type == 740 || pdev->type == 750) - pdev->vsize = PSZ_QSIF; - else - pdev->vsize = PSZ_QCIF; - pdev->vframes = 10; - - /* Start iso pipe for video; first try user-supplied size/fps, if - that fails try QCIF/10 or QSIF/10 (a reasonable default), - then give up + + /* Start iso pipe for video; first try the last used video size + (or the default one); if that fails try QCIF/10 or QSIF/10; + it that fails too, give up. */ - i = pwc_set_video_mode(pdev, pwc_image_sizes[default_size].x, pwc_image_sizes[default_size].y, default_fps, pdev->vcompression, 0); + i = pwc_set_video_mode(pdev, pwc_image_sizes[pdev->vsize].x, pwc_image_sizes[pdev->vsize].y, pdev->vframes, pdev->vcompression, 0); if (i) { Trace(TRACE_OPEN, "First attempt at set_video_mode failed.\n"); if (pdev->type == 730 || pdev->type == 740 || pdev->type == 750) @@ -1091,46 +1074,41 @@ if (pdev->vframe_count > 20) Info("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error); - /* Free isoc URBs, stop camera */ + if (pdev->decompressor != NULL) { + pdev->decompressor->exit(); + pdev->decompressor->unlock(); + pdev->decompressor = NULL; + } + pwc_isoc_cleanup(pdev); + pwc_free_buffers(pdev); - if (!pdev->unplugged) { - /* Turn LEDs off */ + /* Turn off LEDS and power down camera, but only when not unplugged */ + if (pdev->error_status != EPIPE) { if (pwc_set_leds(pdev, 0, 0) < 0) Info("Failed to set LED on/off time.\n"); - /* Power down camera to save energy */ if (power_save) { i = pwc_camera_power(pdev, 0); if (i < 0) Err("Failed to power down camera (%d)\n", i); } } - pdev->vopen = 0; - if (pdev->decompressor != NULL) { - pdev->decompressor->exit(); - pdev->decompressor->unlock(); - } - pwc_free_buffers(pdev); - - /* wake up _disconnect() routine */ - if (pdev->unplugged) - wake_up(&pdev->remove_ok); Trace(TRACE_OPEN, "<< video_close()\n"); } /* * FIXME: what about two parallel reads ???? * ANSWER: Not supported. You can't open the device more than once, - despite what the V4L1 interface says. First, I don't see - the need, second there's no mechanism of alerting the + despite what the V4L1 interface says. First, I don't see + the need, second there's no mechanism of alerting the 2nd/3rd/... process of events like changing image size. - And I don't see the point of blocking that for the + And I don't see the point of blocking that for the 2nd/3rd/... process. In multi-threaded environments reading parallel from any device is tricky anyhow. */ - + static long pwc_video_read(struct video_device *vdev, char *buf, unsigned long count, int noblock) { struct pwc_device *pdev; @@ -1142,17 +1120,20 @@ pdev = vdev->priv; if (pdev == NULL) return -EFAULT; - if (pdev->unplugged) { - Info("pwc_video_read: Device got unplugged (1).\n"); - return -EPIPE; /* unplugged device! */ - } + if (pdev->error_status) + return -pdev->error_status; /* Something happened, report what. */ /* In case we're doing partial reads, we don't have to wait for a frame */ if (pdev->image_read_pos == 0) { /* Do wait queueing according to the (doc)book */ add_wait_queue(&pdev->frameq, &wait); - set_current_state(TASK_INTERRUPTIBLE); while (pdev->full_frames == NULL) { + /* Check for unplugged/etc. here */ + if (pdev->error_status) { + remove_wait_queue(&pdev->frameq, &wait); + set_current_state(TASK_RUNNING); + return -pdev->error_status ; + } if (noblock) { remove_wait_queue(&pdev->frameq, &wait); set_current_state(TASK_RUNNING); @@ -1168,8 +1149,8 @@ } remove_wait_queue(&pdev->frameq, &wait); set_current_state(TASK_RUNNING); - - /* Decompress [, convert] and release frame */ + + /* Decompress and release frame */ if (pwc_handle_frame(pdev)) return -EFAULT; } @@ -1191,35 +1172,33 @@ static long pwc_video_write(struct video_device *vdev, const char *buf, unsigned long count, int noblock) { - return -EINVAL; + return -EINVAL; } static unsigned int pwc_video_poll(struct video_device *vdev, struct file *file, poll_table *wait) { struct pwc_device *pdev; - + if (vdev == NULL) return -EFAULT; pdev = vdev->priv; if (pdev == NULL) return -EFAULT; - + poll_wait(file, &pdev->frameq, wait); - if (pdev->unplugged) { - Info("pwc_video_poll: Device got unplugged.\n"); + if (pdev->error_status) return POLLERR; - } if (pdev->full_frames != NULL) /* we have frames waiting */ return (POLLIN | POLLRDNORM); return 0; } - + static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *arg) { struct pwc_device *pdev; DECLARE_WAITQUEUE(wait, current); - + if (vdev == NULL) return -EFAULT; pdev = vdev->priv; @@ -1228,7 +1207,7 @@ switch (cmd) { /* Query cabapilities */ - case VIDIOCGCAP: + case VIDIOCGCAP: { struct video_capability caps; @@ -1314,7 +1293,7 @@ else p.colour = 0xffff; p.depth = 24; - p.palette = pdev->vpalette; + p.palette = VIDEO_PALETTE_YUV420P; p.hue = 0xFFFF; /* N/A */ if (copy_to_user(arg, &p, sizeof(p))) @@ -1341,9 +1320,8 @@ pwc_set_contrast(pdev, p.contrast); pwc_set_gamma(pdev, p.whiteness); pwc_set_saturation(pdev, p.colour); - if (p.palette && p.palette != pdev->vpalette) { - if (pwc_set_palette(pdev, p.palette) < 0) - return -EINVAL; + if (p.palette && p.palette != VIDEO_PALETTE_YUV420P) { + return -EINVAL; } break; } @@ -1436,9 +1414,8 @@ various palettes... The driver doesn't support such small images, so I'm working around it. */ - if (vm.format && vm.format != pdev->vpalette) - if (pwc_set_palette(pdev, vm.format) < 0) - return -EINVAL; + if (vm.format && vm.format != VIDEO_PALETTE_YUV420P) + return -EINVAL; if ((vm.width != pdev->view.x || vm.height != pdev->view.y) && (vm.width >= pdev->view_min.x && vm.height >= pdev->view_min.y)) { @@ -1458,7 +1435,7 @@ /* Okay, we're done here. In the SYNC call we wait until a frame comes available, then expand image into the given buffer. - In contrast to the CPiA cam the Philips cams deliver a + In contrast to the CPiA cam the Philips cams deliver a constant stream, almost like a grabber card. Also, we have separate buffers for the rawdata and the image, meaning we can nearly always expand into the requested buffer. @@ -1498,27 +1475,26 @@ return -EINVAL; /* Add ourselves to the frame wait-queue. - + In the loop, check for error conditions and signals. + FIXME: needs auditing for safety. - QUSTION: In what respect? I think that using the - frameq is safe now. + QUESTION: In what respect? I think that using the + frameq is safe now. */ add_wait_queue(&pdev->frameq, &wait); - set_current_state(TASK_INTERRUPTIBLE); while (pdev->full_frames == NULL) { - if (pdev->unplugged) { + if (pdev->error_status) { remove_wait_queue(&pdev->frameq, &wait); set_current_state(TASK_RUNNING); - return -ENODEV; + return -pdev->error_status; } - if (signal_pending(current)) { remove_wait_queue(&pdev->frameq, &wait); set_current_state(TASK_RUNNING); return -ERESTARTSYS; } - set_current_state(TASK_INTERRUPTIBLE); schedule(); + set_current_state(TASK_INTERRUPTIBLE); } remove_wait_queue(&pdev->frameq, &wait); set_current_state(TASK_RUNNING); @@ -1572,7 +1548,7 @@ { struct video_unit vu; - vu.video = pdev->vdev->minor & 0x3F; + vu.video = pdev->vdev.minor & 0x3F; vu.audio = -1; /* not known yet */ vu.vbi = -1; vu.radio = -1; @@ -1623,14 +1599,11 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) { struct pwc_device *pdev = NULL; - struct video_device *vdev; int vendor_id, product_id, type_id; int i, hint; int video_nr = -1; /* default: use next available device */ char serial_number[30], *name; - free_mem_leak(); - /* Check if we can handle this device */ Trace(TRACE_PROBE, "probe() called [%04X %04X], if %d\n", udev->descriptor.idVendor, udev->descriptor.idProduct, ifnum); @@ -1691,6 +1664,11 @@ name = "Philips 750 webcam"; type_id = 750; break; + case 0x0313: + Info("Philips PCVC720K/40 (ToUCam XS) USB webcam detected.\n"); + name = "Philips 720 webcam"; + type_id = 720; + break; default: return NULL; break; @@ -1713,12 +1691,12 @@ case 0x08b0: Info("Logitech QuickCam Pro 3000 USB webcam detected.\n"); name = "Logitech QuickCam Pro 3000"; - type_id = 730; + type_id = 740; /* CCD sensor */ break; case 0x08b1: Info("Logitech QuickCam for Notebook Pro USB webcam detected.\n"); name = "Logitech QuickCam Notebook Pro"; - type_id = 740; /* ?? unknown sensor */ + type_id = 740; /* CCD sensor */ break; case 0x08b2: Info("Logitech QuickCam 4000 Pro USB webcam detected.\n"); @@ -1730,6 +1708,15 @@ name = "Logitech QuickCam Zoom"; type_id = 740; /* CCD sensor */ break; + case 0x08b4: + case 0x08b5: + case 0x08b6: + case 0x08b7: + case 0x08b8: + Info("Logitech QuickCam detected (reserved ID).\n"); + name = "Logitech QuickCam (res.)"; + type_id = 730; /* Assuming CMOS */ + break; default: return NULL; break; @@ -1763,6 +1750,11 @@ name = "Creative Labs Webcam 5"; type_id = 730; break; + case 0x4011: + Info("Creative Labs Webcam Pro Ex detected.\n"); + name = "Creative Labs Webcam Pro Ex"; + type_id = 740; + break; default: return NULL; break; @@ -1816,26 +1808,20 @@ memset(pdev, 0, sizeof(struct pwc_device)); pdev->type = type_id; pwc_construct(pdev); + pdev->vsize = default_size; + pdev->vframes = default_fps; init_MUTEX(&pdev->modlock); pdev->ptrlock = SPIN_LOCK_UNLOCKED; pdev->udev = udev; init_waitqueue_head(&pdev->frameq); - init_waitqueue_head(&pdev->remove_ok); pdev->vcompression = pwc_preferred_compression; - /* Now hook it up to the video subsystem */ - vdev = kmalloc(sizeof(struct video_device), GFP_KERNEL); - if (vdev == NULL) { - Err("Oops, could not allocate memory for video_device.\n"); - return NULL; - } - memcpy(vdev, &pwc_template, sizeof(pwc_template)); - strcpy(vdev->name, name); - SET_MODULE_OWNER(vdev); - pdev->vdev = vdev; - vdev->priv = pdev; + memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template)); + strcpy(pdev->vdev.name, name); + SET_MODULE_OWNER(&pdev->vdev); + pdev->vdev.priv = pdev; pdev->release = udev->descriptor.bcdDevice; Trace(TRACE_PROBE, "Release: %04x\n", pdev->release); @@ -1854,14 +1840,14 @@ } } - i = video_register_device(vdev, VFL_TYPE_GRABBER, video_nr); + i = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr); if (i < 0) { Err("Failed to register as video device (%d).\n", i); + kfree(pdev); /* Oops, no memory leaks please */ return NULL; } else { - Trace(TRACE_PROBE, "Registered video struct at 0x%p.\n", vdev); - Info("Registered as /dev/video%d.\n", vdev->minor & 0x3F); + Info("Registered as /dev/video%d.\n", pdev->vdev.minor & 0x3F); } /* occupy slot */ if (hint < MAX_DEV_HINTS) @@ -1876,75 +1862,56 @@ { struct pwc_device *pdev; int hint; - DECLARE_WAITQUEUE(wait, current); lock_kernel(); - free_mem_leak(); - pdev = (struct pwc_device *)ptr; if (pdev == NULL) { Err("pwc_disconnect() Called without private pointer.\n"); + unlock_kernel(); return; } if (pdev->udev == NULL) { Err("pwc_disconnect() already called for %p\n", pdev); + unlock_kernel(); return; } if (pdev->udev != udev) { Err("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n"); + unlock_kernel(); return; } -#ifdef PWC_MAGIC +#ifdef PWC_MAGIC if (pdev->magic != PWC_MAGIC) { Err("pwc_disconnect() Magic number failed. Consult your scrolls and try again.\n"); + unlock_kernel(); return; } #endif - - pdev->unplugged = 1; - if (pdev->vdev != NULL) { - add_wait_queue(&pdev->remove_ok, &wait); - set_current_state(TASK_UNINTERRUPTIBLE); - Trace(TRACE_PROBE, "Unregistering video device.\n"); - video_unregister_device(pdev->vdev); - if (pdev->vopen) { - Info("Disconnected while device/video is open!\n"); - - /* Wake up any processes that might be waiting for - a frame, let them return an error condition - */ - wake_up(&pdev->frameq); - - /* Wait until we get a 'go' from _close(). This used - to have a gigantic race condition, since we kfree() - stuff here, but we have to wait until close() - is finished. - */ - - Trace(TRACE_PROBE, "Sleeping on remove_ok.\n"); - /* ... wait ... */ - schedule(); - Trace(TRACE_PROBE, "Done sleeping.\n"); - set_mem_leak(pdev->vdev); - pdev->vdev = NULL; - } - else { - /* Normal disconnect; remove from available devices */ - kfree(pdev->vdev); - pdev->vdev = NULL; - } - remove_wait_queue(&pdev->remove_ok, &wait); - set_current_state(TASK_RUNNING); + + /* We got unplugged; this is signalled by an EPIPE error code */ + if (pdev->vopen) { + Info("Disconnected while webcam is in use!\n"); + pdev->error_status = EPIPE; } + + /* Alert waiting processes */ + wake_up_interruptible(&pdev->frameq); + /* Wait until device is closed */ + while (pdev->vopen) + schedule(); + /* Device is now closed, so we can safely unregister it */ + Trace(TRACE_PROBE, "Unregistering video device in disconnect().\n"); + video_unregister_device(&pdev->vdev); + + /* Free memory (don't set pdev to 0 just yet) */ + kfree(pdev); /* search device_hint[] table if we occupy a slot, by any chance */ for (hint = 0; hint < MAX_DEV_HINTS; hint++) if (device_hint[hint].pdev == pdev) device_hint[hint].pdev = NULL; - pdev->udev = NULL; unlock_kernel(); - kfree(pdev); } @@ -1973,7 +1940,7 @@ static int trace = -1; static int compression = -1; static int leds[2] = { -1, -1 }; -static char *dev_hint[10] = { }; +static char *dev_hint[MAX_DEV_HINTS] = { }; MODULE_PARM(size, "s"); MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif, vga"); @@ -1991,7 +1958,7 @@ MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)"); MODULE_PARM(leds, "2i"); MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); -MODULE_PARM(dev_hint, "0-10s"); +MODULE_PARM(dev_hint, "0-20s"); MODULE_PARM_DESC(dev_hint, "Device node hints"); MODULE_DESCRIPTION("Philips USB & OEM webcam driver"); @@ -2133,14 +2100,12 @@ device_hint[i].type = 0; /* not filled */ } /* ..for MAX_DEV_HINTS */ - init_MUTEX(&mem_lock); Trace(TRACE_PROBE, "Registering driver at address 0x%p.\n", &pwc_driver); return usb_register(&pwc_driver); } static void __exit usb_pwc_exit(void) { - free_mem_leak(); Trace(TRACE_MODULE, "Deregistering driver.\n"); usb_deregister(&pwc_driver); Info("Philips webcam module removed.\n"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-ioctl.h linux.22-ac2/drivers/usb/pwc-ioctl.h --- linux.vanilla/drivers/usb/pwc-ioctl.h 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-ioctl.h 2003-07-31 14:08:04.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef PWC_IOCTL_H #define PWC_IOCTL_H -/* (C) 2001-2002 Nemosoft Unv. webcam@smcc.demon.nl +/* (C) 2001-2003 Nemosoft Unv. webcam@smcc.demon.nl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-misc.c linux.22-ac2/drivers/usb/pwc-misc.c --- linux.vanilla/drivers/usb/pwc-misc.c 2002-08-03 16:08:28.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-misc.c 2003-07-31 14:08:04.000000000 +0100 @@ -1,6 +1,6 @@ /* Linux driver for Philips webcam Various miscellaneous functions and tables. - (C) 1999-2002 Nemosoft Unv. (webcam@smcc.demon.nl) + (C) 1999-2003 Nemosoft Unv. (webcam@smcc.demon.nl) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -51,7 +51,6 @@ } return find; } - /* initialize variables depending on type */ void pwc_construct(struct pwc_device *pdev) { @@ -81,6 +80,7 @@ pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; break; + case 720: case 730: case 740: case 750: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-uncompress.c linux.22-ac2/drivers/usb/pwc-uncompress.c --- linux.vanilla/drivers/usb/pwc-uncompress.c 2003-06-14 00:11:38.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-uncompress.c 2003-07-31 14:08:04.000000000 +0100 @@ -1,6 +1,6 @@ /* Linux driver for Philips webcam Decompression frontend. - (C) 1999-2002 Nemosoft Unv. (webcam@smcc.demon.nl) + (C) 1999-2003 Nemosoft Unv. (webcam@smcc.demon.nl) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -32,7 +32,7 @@ /* Should the pwc_decompress structure ever change, we increase the version number so that we don't get nasty surprises, or can - dynamicly adjust our structure. + dynamically adjust our structure. */ const int pwc_decompressor_version = PWC_MAJOR; @@ -98,14 +98,6 @@ if (!image) return -EFAULT; -#if PWC_DEBUG - /* This is a quickie */ - if (pdev->vpalette == VIDEO_PALETTE_RAW) { - memcpy(image, fbuf->data, pdev->frame_size); - return 0; - } -#endif - yuv = fbuf->data + pdev->frame_header_size; /* Skip header */ if (pdev->vbandlength == 0) { /* Uncompressed mode. We copy the data into the output buffer, @@ -113,8 +105,6 @@ size). Unfortunately we have to do a bit of byte stuffing to get the desired output format/size. */ - switch (pdev->vpalette) { - case VIDEO_PALETTE_YUV420P: /* * We do some byte shuffling here to go from the * native format to YUV420P. @@ -149,11 +139,6 @@ else dstu += (stride >> 1); } - break; - default: - Err("Unsupported palette!"); - break; - } } else { /* Compressed; the decompressor routines will write the data diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/pwc-uncompress.h linux.22-ac2/drivers/usb/pwc-uncompress.h --- linux.vanilla/drivers/usb/pwc-uncompress.h 2002-08-03 16:08:28.000000000 +0100 +++ linux.22-ac2/drivers/usb/pwc-uncompress.h 2003-09-01 13:54:30.000000000 +0100 @@ -1,4 +1,4 @@ -/* (C) 1999-2002 Nemosoft Unv. (webcam@smcc.demon.nl) +/* (C) 1999-2003 Nemosoft Unv. (webcam@smcc.demon.nl) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/scanner.c linux.22-ac2/drivers/usb/scanner.c --- linux.vanilla/drivers/usb/scanner.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/scanner.c 2003-07-09 12:57:58.000000000 +0100 @@ -411,6 +411,8 @@ */ #include "scanner.h" +static void purge_scanner(struct scn_usb_data *scn); + static void irq_scanner(struct urb *urb) { @@ -501,28 +503,20 @@ static int close_scanner(struct inode * inode, struct file * file) { - struct scn_usb_data *scn; - - kdev_t scn_minor; + struct scn_usb_data *scn = file->private_data; - scn_minor = USB_SCN_MINOR (inode); - - dbg("close_scanner: scn_minor:%d", scn_minor); - - if (!p_scn_table[scn_minor]) { - err("close_scanner(%d): invalid scn_minor", scn_minor); - return -ENODEV; - } - - down(&scn_mutex); - - scn = p_scn_table[scn_minor]; down(&(scn->sem)); scn->isopen = 0; file->private_data = NULL; - up(&scn_mutex); + if (!scn->present) { + /* The device was unplugged while open - need to clean up */ + up(&(scn->sem)); + purge_scanner(scn); + return 0; + } + up(&(scn->sem)); return 0; @@ -550,6 +544,12 @@ down(&(scn->sem)); + if (!scn->present) { + /* The device was unplugged while open */ + up(&(scn->sem)); + return -ENODEV; + } + if (!scn->bulk_out_ep) { /* This scanner does not have a bulk-out endpoint */ up(&(scn->sem)); @@ -644,6 +644,12 @@ down(&(scn->sem)); + if (!scn->present) { + /* The device was unplugged while open */ + up(&(scn->sem)); + return -ENODEV; + } + scn_minor = scn->scn_minor; ibuf = scn->ibuf; @@ -751,6 +757,12 @@ scn = file->private_data; down(&(scn->sem)); + if (!scn->present) { + /* The device was unplugged while open */ + up(&(scn->sem)); + return -ENODEV; + } + dev = scn->scn_dev; switch (cmd) @@ -978,7 +990,7 @@ } /* Check to make sure that the last slot isn't already taken */ - if (p_scn_table[scn_minor]) { + if (scn_minor >= SCN_MAX_MNR) { err("probe_scanner: No more minor devices remaining."); up(&scn_mutex); return NULL; @@ -1018,6 +1030,8 @@ /* Ok, now initialize all the relevant values */ if (!(scn->obuf = (char *)kmalloc(OBUF_SIZE, GFP_KERNEL))) { err("probe_scanner(%d): Not enough memory for the output buffer.", scn_minor); + if (have_intr) + usb_unlink_urb(&scn->scn_irq); kfree(scn); up(&scn_mutex); return NULL; @@ -1027,6 +1041,8 @@ if (!(scn->ibuf = (char *)kmalloc(IBUF_SIZE, GFP_KERNEL))) { err("probe_scanner(%d): Not enough memory for the input buffer.", scn_minor); kfree(scn->obuf); + if (have_intr) + usb_unlink_urb(&scn->scn_irq); kfree(scn); up(&scn_mutex); return NULL; @@ -1080,6 +1096,14 @@ } static void +purge_scanner(struct scn_usb_data *scn) +{ + kfree(scn->ibuf); + kfree(scn->obuf); + kfree(scn); +} + +static void disconnect_scanner(struct usb_device *dev, void *ptr) { struct scn_usb_data *scn = (struct scn_usb_data *) ptr; @@ -1094,15 +1118,22 @@ usb_driver_release_interface(&scanner_driver, &scn->scn_dev->actconfig->interface[scn->ifnum]); - kfree(scn->ibuf); - kfree(scn->obuf); - dbg("disconnect_scanner: De-allocating minor:%d", scn->scn_minor); devfs_unregister(scn->devfs); p_scn_table[scn->scn_minor] = NULL; + + if (scn->isopen) { + /* The device is still open - cleanup must be delayed */ + scn->present = 0; + up(&(scn->sem)); + up(&scn_mutex); + return; + } + up (&(scn->sem)); - kfree (scn); up (&scn_mutex); + + purge_scanner(scn); } static struct diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/storage/unusual_devs.h linux.22-ac2/drivers/usb/storage/unusual_devs.h --- linux.vanilla/drivers/usb/storage/unusual_devs.h 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/storage/unusual_devs.h 2003-09-01 13:54:30.000000000 +0100 @@ -75,12 +75,12 @@ /* Deduced by Jonathan Woithe * Entry needed for flags: US_FL_FIX_INQUIRY because initial inquiry message - * always fails and confuses drive. + * always fails and confuses drive; */ UNUSUAL_DEV( 0x0411, 0x001c, 0x0113, 0x0113, "Buffalo", "DUB-P40G HDD", - US_SC_DEVICE, US_PR_DEVICE, NULL, + US_SC_SCSI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY ), #ifdef CONFIG_USB_STORAGE_DPCM @@ -90,6 +90,19 @@ US_SC_SCSI, US_PR_DPCM_USB, NULL, 0 ), #endif +/* Made with the help of Edd Dumbill */ +UNUSUAL_DEV( 0x0451, 0x5409, 0x0001, 0x0001, + "Frontier Labs", + "Nex II Digital", + US_SC_SCSI, US_PR_BULK, NULL, 0), + +/* Reported by Thomas Rabe */ +UNUSUAL_DEV( 0x0461, 0x0822, 0x0000, 0x9999, + "Vivitar", + "Vivicam 3610", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_FIX_INQUIRY | US_FL_MODE_XLATE), + /* Patch submitted by Philipp Friedrich */ UNUSUAL_DEV( 0x0482, 0x0100, 0x0100, 0x0100, "Kyocera", @@ -239,6 +252,13 @@ US_SC_SCSI, US_PR_CB, NULL, US_FL_SINGLE_LUN | US_FL_MODE_XLATE ), +/* This entry is needed because the device reports Sub=ff */ +UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0432, + "Sony", + "DSC-F707/U10/U20", + US_SC_SCSI, US_PR_CB, NULL, + US_FL_SINGLE_LUN | US_FL_MODE_XLATE ), + /* Reported by wim@geeks.nl */ UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100, "Sony", @@ -268,11 +288,17 @@ US_FL_SINGLE_LUN | US_FL_MODE_XLATE), UNUSUAL_DEV( 0x054c, 0x0032, 0x0000, 0x9999, - "Sony", + "Sony", "Memorystick MSC-U01N", - US_SC_DEVICE, US_PR_DEVICE, NULL, + US_SC_UFI, US_PR_DEVICE, NULL, US_FL_SINGLE_LUN ), +UNUSUAL_DEV( 0x054c, 0x0058, 0x0000, 0x9999, + "Sony", + "PEG-N760C Mass Storage", + US_SC_8070, US_PR_CBI, NULL, + US_FL_FIX_INQUIRY ), + UNUSUAL_DEV( 0x054c, 0x0069, 0x0000, 0x9999, "Sony", "Memorystick MSC-U03", @@ -281,11 +307,11 @@ /* Submitted by Nathan Babb */ UNUSUAL_DEV( 0x054c, 0x006d, 0x0000, 0x9999, - "Sony", + "Sony", "PEG Mass Storage", US_SC_8070, US_PR_CBI, NULL, US_FL_FIX_INQUIRY ), - + UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299, "Y-E Data", "Flashbuster-U", @@ -411,10 +437,10 @@ US_FL_IGNORE_SER ), UNUSUAL_DEV( 0x0781, 0x0100, 0x0100, 0x0100, - "Sandisk", - "ImageMate SDDR-12", - US_SC_SCSI, US_PR_CB, NULL, - US_FL_SINGLE_LUN ), + "Sandisk", + "ImageMate SDDR-12", + US_SC_SCSI, US_PR_CB, NULL, + US_FL_SINGLE_LUN ), #ifdef CONFIG_USB_STORAGE_SDDR09 UNUSUAL_DEV( 0x0781, 0x0200, 0x0000, 0x9999, @@ -424,11 +450,17 @@ US_FL_SINGLE_LUN ), #endif +UNUSUAL_DEV( 0x0784, 0x1688, 0x0000, 0x9999, + "Vivitar", + "Vivicam 36xx", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_FIX_INQUIRY | US_FL_MODE_XLATE), + #ifdef CONFIG_USB_STORAGE_FREECOM UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999, - "Freecom", - "USB-IDE", - US_SC_QIC, US_PR_FREECOM, freecom_init, 0), + "Freecom", + "USB-IDE", + US_SC_QIC, US_PR_FREECOM, freecom_init, 0), #endif UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, @@ -517,6 +549,16 @@ US_FL_MODE_XLATE ), #endif +/* Datafab KECF-USB Ver A /Jenoptik Jenreader + * Note: there seem to be two versions of the KECF-USB device. + * Submitted by Chris Clayton (chris@theclaytons.freeserve.co.uk) + */ +UNUSUAL_DEV( 0x07c4, 0xb000, 0x0000, 0xffff, + "Datafab", + "KECF-USB Ver A", + US_SC_SCSI, US_PR_BULK, NULL, + US_FL_FIX_INQUIRY ), + /* Datafab KECF-USB / Sagatek DCS-CF / Simpletech Flashlink UCF-100 * Only revision 1.13 tested (same for all of the above devices, * based on the Datafab DF-UG-07 chip). Needed for US_FL_FIX_INQUIRY. @@ -633,3 +675,31 @@ US_SC_SCSI, US_PR_SDDR55, NULL, US_FL_SINGLE_LUN), #endif + +/* + * Panasonic/OEMs compact USB CDROMs status + * KXL-840(CD-ROM11): usb_stor_Bulk_max_lun() is danger, need US_FL_SINGLE_LUN + * KXL-RW11(CDRRW02): usb_stor_Bulk_max_lun() is danger, need US_FL_SINGLE_LUN + * KXL-RW20(CDRRW03): original IClass is 0xFF, use US_PR_CB and need init reset + * KXL-RW21(CDRRW06): original IClass is 0xFF, use US_PR_CB and need init reset + * KXL-RW31(CDRRW05): work fine with current code + * KXL-RW32(CDRRW09): work fine with current code + * Checked: Sun Feb 9 JST 2003 Go Taniguchi + */ +UNUSUAL_DEV( 0x04da, 0x0d01, 0x0000, 0xffff, + "MATSHITA", + "CD-ROM11", + US_SC_8020, US_PR_BULK, NULL, US_FL_SINGLE_LUN), +UNUSUAL_DEV( 0x04da, 0x0d02, 0x0000, 0xffff, + "MATSHITA", + "CDRRW02", + US_SC_8020, US_PR_BULK, NULL, US_FL_SINGLE_LUN), +UNUSUAL_DEV( 0x04da, 0x0d03, 0x0000, 0xffff, + "MATSHITA", + "CDRRW03", + US_SC_8020, US_PR_CB, NULL, US_FL_INIT_RESET), +UNUSUAL_DEV( 0x04da, 0x0d06, 0x0000, 0xffff, + "MATSHITA", + "CDRRW06", + US_SC_8020, US_PR_CB, NULL, US_FL_INIT_RESET), + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/storage/usb.c linux.22-ac2/drivers/usb/storage/usb.c --- linux.vanilla/drivers/usb/storage/usb.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/storage/usb.c 2003-07-17 13:50:29.000000000 +0100 @@ -840,7 +840,8 @@ ss->transport_name = "Bulk"; ss->transport = usb_stor_Bulk_transport; ss->transport_reset = usb_stor_Bulk_reset; - ss->max_lun = usb_stor_Bulk_max_lun(ss); + if (!(ss->flags & US_FL_SINGLE_LUN)) + ss->max_lun = usb_stor_Bulk_max_lun(ss); break; #ifdef CONFIG_USB_STORAGE_HP8200e @@ -1019,6 +1020,11 @@ /* now register - our detect function will be called */ ss->htmplt.module = THIS_MODULE; + + /* some device need reset process */ + if (ss->flags & US_FL_INIT_RESET) + ss->transport_reset(ss); + scsi_register_module(MODULE_SCSI_HA, &(ss->htmplt)); /* lock access to the data structures */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/storage/usb.h linux.22-ac2/drivers/usb/storage/usb.h --- linux.vanilla/drivers/usb/storage/usb.h 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/storage/usb.h 2003-09-09 22:27:29.000000000 +0100 @@ -103,6 +103,7 @@ #define US_FL_SCM_MULT_TARG 0x00000020 /* supports multiple targets */ #define US_FL_FIX_INQUIRY 0x00000040 /* INQUIRY response needs fixing */ #define US_FL_FIX_CAPACITY 0x00000080 /* READ_CAPACITY response too big */ +#define US_FL_INIT_RESET 0x00000100 /* reset process when initialize */ #define USB_STOR_STRING_LEN 32 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/usb.c linux.22-ac2/drivers/usb/usb.c --- linux.vanilla/drivers/usb/usb.c 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/usb/usb.c 2003-07-17 13:50:41.000000000 +0100 @@ -164,6 +164,26 @@ } } +/* + * usb_ifnum_to_ifpos - convert the interface _number_ (as in interface.bInterfaceNumber) + * to the interface _position_ (as in dev->actconfig->interface + position) + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position + * + * Note that the number is the same as the position for all interfaces _except_ + * devices with interfaces not sequentially numbered (e.g., 0, 2, 3, etc). + */ +int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum) +{ + int i; + + for (i = 0; i < dev->actconfig->bNumInterfaces; i++) + if (dev->actconfig->interface[i].altsetting[0].bInterfaceNumber == ifnum) + return i; + + return -EINVAL; +} + /** * usb_deregister - unregister a USB driver * @driver: USB operations of the driver to unregister @@ -546,7 +566,6 @@ iface->private_data = NULL; } - /** * usb_match_id - find first usb_device_id matching device or interface * @dev: the device whose descriptors are considered when matching @@ -759,6 +778,23 @@ return -1; } +/* + * usb_find_interface_driver_for_ifnum - convert ifnum to ifpos via + * usb_ifnum_to_ifpos and call usb_find_interface_driver(). + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position! + * + * Note usb_find_interface_driver's ifnum parameter is actually interface position. + */ +int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned int ifnum) +{ + int ifpos = usb_ifnum_to_ifpos(dev, ifnum); + + if (0 > ifpos) + return -EINVAL; + + return usb_find_interface_driver(dev, ifpos); +} #ifdef CONFIG_HOTPLUG @@ -2384,6 +2420,7 @@ * into the kernel, and other device drivers are built as modules, * then these symbols need to be exported for the modules to use. */ +EXPORT_SYMBOL(usb_ifnum_to_ifpos); EXPORT_SYMBOL(usb_ifnum_to_if); EXPORT_SYMBOL(usb_epnum_to_ep_desc); @@ -2398,6 +2435,7 @@ EXPORT_SYMBOL(usb_free_dev); EXPORT_SYMBOL(usb_inc_dev_use); +EXPORT_SYMBOL(usb_find_interface_driver_for_ifnum); EXPORT_SYMBOL(usb_driver_claim_interface); EXPORT_SYMBOL(usb_interface_claimed); EXPORT_SYMBOL(usb_driver_release_interface); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/usbdfu.c linux.22-ac2/drivers/usb/usbdfu.c --- linux.vanilla/drivers/usb/usbdfu.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/usbdfu.c 2003-08-13 22:04:06.000000000 +0100 @@ -0,0 +1,1015 @@ +/* -*- linux-c -*- */ +/* + * USB Device Firmware Upgrade (DFU) handler + * + * Copyright (c) 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * 2003_01_19 0.1: + * - initial release + * + * TODO: + * (someday) + * - make a way for drivers to feed firmware data at download time (instead of + * providing it all at once during register) [request_firmware] + * - procfs support for userland firmware downloaders + * - Firmware upload (device-to-host) support + */ + +#include +#include +#include +#include +#include +#include +#include "usbdfu.h" + +#define DRIVER_VERSION "v0.11beta4-ac1" + +#ifdef CONFIG_USB_DEBUG +static int debug = 1; +#else +static int debug; +#endif + +/* Use our own dbg macro */ +#undef dbg +#define dbg(format, arg...) do { if (debug) printk(KERN_DEBUG __FILE__ ": " format "\n" , ## arg); } while (0) + +#ifdef DEBUG_SEM + #define dfu_down(sem) do { dbg("sem %s down", #sem); down(sem); } while (0) + #define dfu_up(sem) do { dbg("sem %s up", #sem); up(sem); } while (0) +#else + #define dfu_down(sem) down(sem) + #define dfu_up(sem) up(sem) +#endif + +/* Version Information */ +#define DRIVER_AUTHOR \ +"Oliver Kurth , Joerg Albert , Alex " +#define DRIVER_DESC "USB Device Firmware Upgrade (DFU) handler" + +/* Module paramaters */ +MODULE_PARM(debug, "i"); +MODULE_PARM_DESC(debug, "Debug enabled or not"); + +/* USB class/subclass for DFU devices/interfaces */ + +#define DFU_USB_CLASS 0xfe +#define DFU_USB_SUBCLASS 0x01 + +/* DFU states */ + +#define STATE_IDLE 0x00 +#define STATE_DETACH 0x01 +#define STATE_DFU_IDLE 0x02 +#define STATE_DFU_DOWNLOAD_SYNC 0x03 +#define STATE_DFU_DOWNLOAD_BUSY 0x04 +#define STATE_DFU_DOWNLOAD_IDLE 0x05 +#define STATE_DFU_MANIFEST_SYNC 0x06 +#define STATE_DFU_MANIFEST 0x07 +#define STATE_DFU_MANIFEST_WAIT_RESET 0x08 +#define STATE_DFU_UPLOAD_IDLE 0x09 +#define STATE_DFU_ERROR 0x0a + +/* DFU commands */ +#define DFU_DETACH 0 +#define DFU_DNLOAD 1 +#define DFU_UPLOAD 2 +#define DFU_GETSTATUS 3 +#define DFU_CLRSTATUS 4 +#define DFU_GETSTATE 5 +#define DFU_ABORT 6 + +struct dfu_status { + unsigned char bStatus; + unsigned char bwPollTimeout[3]; + unsigned char bState; + unsigned char iString; +} __attribute__ ((packed)); + +struct usbdfu_infolist { + struct list_head list; + struct usbdfu_info *info; +}; + +/* driver independent download context */ +struct dfu_ctx { + struct usb_device *udev; + u8 dfu_state; + struct dfu_status dfu_status; + u8 *buf; +}; + +#define KEVENT_FLAG_SCHEDRESET 1 +#define KEVENT_FLAG_RESET 2 + +/* Structure to hold all of our device specific stuff */ +struct usbdfu { + struct usb_device * udev; /* save off the usb device pointer */ + + struct timer_list timer; + + struct tq_struct kevent; + u32 kevent_flags; + + struct semaphore sem; /* locks this structure */ + + struct usbdfu_info *info; + u8 op_mode; +}; + +static LIST_HEAD(usbdfu_infolist_head); +static struct semaphore usbdfu_lock; + +/* local function prototypes */ +static void * usbdfu_probe(struct usb_device *dev, + unsigned int ifnum, const struct usb_device_id *id); +static void usbdfu_disconnect(struct usb_device *dev, void *ptr); + +static struct usb_device_id dev_table[] = { + { .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS), + .bInterfaceClass = DFU_USB_CLASS, .bInterfaceSubClass = DFU_USB_SUBCLASS}, + { } +}; + +MODULE_DEVICE_TABLE (usb, dev_table); + +/* usb specific object needed to register this driver with the usb subsystem */ +static struct usb_driver usbdfu_driver = { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) + owner: THIS_MODULE, +#endif + name: "usbdfu", + probe: usbdfu_probe, + disconnect: usbdfu_disconnect, + id_table: dev_table, +}; + +/** + * usbdfu_debug_data - dump debug data + * @function: name of function + * @size: size of block to dump + * @data: block of data to dump + * + * Dump data into the kernel log along with identifying header + * and nicely formatted for debugging + */ + +static void usbdfu_debug_data (const char *function, int size, const unsigned char *data) +{ + int i; + + if (!debug) + return; + + printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", + function, size); + for (i = 0; i < size; ++i) { + printk ("%.2x ", data[i]); + } + printk ("\n"); +} + + +#define USB_SUCCESS(a) (a >= 0) + +#define DFU_PACKETSIZE 1024 + +#define INTERFACE_VENDOR_REQUEST_OUT 0x41 +#define INTERFACE_VENDOR_REQUEST_IN 0xc1 + +/** + * dfu_detach - unplug a DFU device + * @udev: device being detached + * + * Trigger the detach of a DFU device + */ + +static int dfu_detach(struct usb_device *udev) +{ + int result; + + dbg("dfu_detach"); + + result = usb_control_msg(udev, usb_sndctrlpipe(udev,0), + DFU_DETACH, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 1000, /* Value */ + 0, /* Index */ + NULL, /* Buffer */ + 0, /* Size */ + HZ); + + return result; +} + +/** + * dfu_download_block - download firmware block + * @ctx: DFU context for this download + * @buffer: the block to upload + * @bytes: the block size + * @block: the block number + * + * Upload a block of data to a DFU class device + */ + +static int dfu_download_block(struct dfu_ctx *ctx, u8 *buffer, + int bytes, int block) +{ + int result; + u8 *tmpbuf = ctx->buf; + struct usb_device *udev = ctx->udev; + + dbg("dfu_download_block(): buffer=%p, bytes=%d, block=%d", buffer, bytes, block); + + if(tmpbuf == NULL) + return -ENOMEM; + + memcpy(tmpbuf, buffer, bytes); + + result = usb_control_msg(udev, usb_sndctrlpipe(udev,0), + DFU_DNLOAD, + USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, + block, /* Value */ + 0, /* Index */ + tmpbuf, /* Buffer */ + bytes, /* Size */ + HZ); + return result; +} + +/** + * dfu_get_status - get DFU device state + * @ctx: DFU context + * @status: returned status block + * + * Retrieve the status of the DFU device + */ + +static int dfu_get_status(struct dfu_ctx *ctx, struct dfu_status *status) +{ + int result; + struct usb_device *udev = ctx->udev; + +// dbg("dfu_get_status()"); + + result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + DFU_GETSTATUS, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, /* Value */ + 0, /* Index */ + status, /* Buffer */ + sizeof(struct dfu_status), /* Size */ + HZ); + + return result; +} + +/** + * dfu_get_state - get DFU device state + * @ctx: DFU context + * @state: returned state + * + * Retrieve the state of the DFU device + */ + +static u8 dfu_get_state(struct usb_device *udev, u8 *state) +{ + int result; + +// dbg("dfu_get_state()"); + + result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + DFU_GETSTATE, /* Request */ + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, /* Value */ + 0, /* Index */ + state, /* Buffer */ + 1, /* Size */ + HZ); + + return result; +} + +/** + * __get_timeout - 24bit number fixup + * @s: status block + * + * Unpacks and returns the 24bit status field from the + * status block as a native 32bit value + */ + +static inline u32 __get_timeout(struct dfu_status *s) +{ + unsigned long ret = 0; + + ret = (unsigned long) (s->bwPollTimeout[2] << 16); + ret |= (unsigned long) (s->bwPollTimeout[1] << 8); + ret |= (unsigned long) (s->bwPollTimeout[0]); + + return ret; +} + +/** + * dfu_alloc_ctx - allocate a DFU context + * @udev: USB device + * + * Allocate a context block for this USB device. The context + * block consists of a data buffer and a device pointer. + */ + +static struct dfu_ctx *dfu_alloc_ctx(struct usb_device *udev) +{ + struct dfu_ctx *ctx; + + ctx = kmalloc(sizeof(struct dfu_ctx) + DFU_PACKETSIZE, GFP_KERNEL); + if(ctx){ + ctx->udev = udev; + ctx->buf = (u8 *)&(ctx[1]); + } + return ctx; +} + +/** + * do_dfu_download - download handler + * @udev: USB device + * @dfu_buffer: firmware + * @dfu_len: firmware size + * + * Drive the state machine for downloading firmware into the USB + * device we are processing. + */ + +int do_dfu_download(struct usb_device *udev, unsigned char *dfu_buffer, + unsigned int dfu_len) +{ + struct dfu_ctx *ctx; + struct dfu_status *dfu_stat_buf; + int status = 0; + int need_dfu_state = 1; + int is_done = 0; + u8 dfu_state = 0; + u32 dfu_timeout = 0; + int dfu_block_bytes = 0, dfu_bytes_left = dfu_len, dfu_buffer_offset = 0; + int dfu_block_cnt = 0; + + if (dfu_len == 0) { + err("FW Buffer length invalid!"); + return -EINVAL; + } + + ctx = dfu_alloc_ctx(udev); + if(ctx == NULL) + return -ENOMEM; + + dfu_stat_buf = &ctx->dfu_status; + + do { + if (need_dfu_state) { + status = dfu_get_state(ctx->udev, &ctx->dfu_state); + if (!USB_SUCCESS(status)) { + err("DFU: Failed to get DFU state: %d", status); + goto exit; + } + dfu_state = ctx->dfu_state; + need_dfu_state = 0; + } + + switch (dfu_state) { + case STATE_DFU_DOWNLOAD_SYNC: + dbg("STATE_DFU_DOWNLOAD_SYNC"); + if (USB_SUCCESS(status = dfu_get_status(ctx, dfu_stat_buf))) { + dfu_state = dfu_stat_buf->bState; + dfu_timeout = __get_timeout(dfu_stat_buf); + need_dfu_state = 0; + }else + err("dfu_get_status failed with %d", status); + break; + + case STATE_DFU_DOWNLOAD_BUSY: + dbg("STATE_DFU_DOWNLOAD_BUSY"); + need_dfu_state = 1; + + if (dfu_timeout >= 0) { + dbg("DFU: Resetting device"); + set_current_state( TASK_INTERRUPTIBLE ); + schedule_timeout(1+dfu_timeout*HZ/1000); + }else + dbg("DFU: In progress"); + + break; + + case STATE_DFU_DOWNLOAD_IDLE: + dbg("DOWNLOAD..."); + /* fall through */ + case STATE_DFU_IDLE: + dbg("DFU IDLE"); + + if (dfu_bytes_left <= DFU_PACKETSIZE) + dfu_block_bytes = dfu_bytes_left; + else + dfu_block_bytes = DFU_PACKETSIZE; + + dfu_bytes_left -= dfu_block_bytes; + status = dfu_download_block(ctx, + dfu_buffer + + dfu_buffer_offset, + dfu_block_bytes, + dfu_block_cnt); + dfu_buffer_offset += dfu_block_bytes; + dfu_block_cnt++; + + if (!USB_SUCCESS(status)) + err("dfu_download_block failed with %d", status); + need_dfu_state = 1; + break; + + case STATE_DFU_MANIFEST_SYNC: + dbg("STATE_DFU_MANIFEST_SYNC"); + + status = dfu_get_status(ctx, dfu_stat_buf); + + if (USB_SUCCESS(status)) { + dfu_state = dfu_stat_buf->bState; + dfu_timeout = __get_timeout(dfu_stat_buf); + need_dfu_state = 0; + + if (dfu_timeout >= 0){ + dbg("DFU: Waiting for manifest phase"); + + set_current_state( TASK_INTERRUPTIBLE ); + schedule_timeout((dfu_timeout*HZ+999)/1000); + }else + dbg("DFU: In progress"); + } + break; + + case STATE_DFU_MANIFEST: + dbg("STATE_DFU_MANIFEST"); + is_done = 1; + break; + + case STATE_DFU_MANIFEST_WAIT_RESET: + dbg("STATE_DFU_MANIFEST_WAIT_RESET"); +// usb_reset_device(udev); + is_done = 1; + break; + + case STATE_DFU_UPLOAD_IDLE: + dbg("STATE_DFU_UPLOAD_IDLE"); + break; + + case STATE_DFU_ERROR: + dbg("STATE_DFU_ERROR"); +// usb_reset_device(udev); + status = -EPIPE; + break; + + default: + dbg("DFU UNKNOWN STATE (%d)", dfu_state); + status = -EINVAL; + break; + } + } while (!is_done && USB_SUCCESS(status)); + + exit: + kfree(ctx); + if (status < 0) + return status; + else + return 0; +} + +/** + * usbdfu_download - download firmware + * @dev: USB DFU device + * @fw_buf: firmware + * @fw_len: length + * + * Performn the download sequence for a device, call back the + * pre and post methods as needed (post is not called on a failed + * download) + */ + +static int usbdfu_download(struct usbdfu *dev, u8 *fw_buf, u32 fw_len) +{ + int ret = 0; + + if (dev->info->pre_download_hook) { + ret = dev->info->pre_download_hook(dev->udev); + } + + if (ret) + return ret; + + info("Downloading firmware for USB device %d...", dev->udev->devnum); + + ret = do_dfu_download(dev->udev, fw_buf, fw_len); + + if (ret) + return ret; + + if (dev->info->post_download_hook) { + ret = dev->info->post_download_hook(dev->udev); + } + + return ret; +} + +/** + * usbdfu_delete - destroy USBDFU device blocks + * @dev: USB DFU device + * + * Free up the memory belonging to this DFU object + */ + +static inline void usbdfu_delete (struct usbdfu *dev) +{ + kfree (dev); +} + +/* shamelessly copied from usbnet.c (oku) */ + +/** + * defer_kevent - queue an asynchronous event + * @dev USB DFU device + * @flag: bit number of event to queue + * + * Queues an event to be processed later. We don't count + * events but we do ensure they are processed at least once + */ + +static void defer_kevent (struct usbdfu *dev, int flag) +{ + set_bit (flag, &dev->kevent_flags); + if (!schedule_task (&dev->kevent)) + err ("kevent %d may have been dropped", flag); + else + dbg ("kevent %d scheduled", flag); +} + +/** + * kevent_timer - event timer + * @data: DFU pointer + * + * Timeout event handler. This is called when we give up + * due to time limits + */ + +static void kevent_timer(unsigned long data) +{ + struct usbdfu *dev = (struct usbdfu *)data; + defer_kevent(dev, KEVENT_FLAG_RESET); +} + +/* TODO: how do we make sure the device hasn't been + plugged out in the meantime? */ +/* We don't really need to (trying to reset a disconnected device shouldn't + * cause a problem), we just need to make sure that disconnect hasn't freed the + * dev structure already. We do this by not freeing dev as long as + * "kevent_flags" has something set (indicating a kevent is pending) --alex */ + +static void +kevent(void *data) +{ + struct usbdfu *dev = data; + struct usb_device *udev; + struct usbdfu_info *info; + struct usb_interface *interface; + + dbg("kevent entered"); + + /* some paranoid checks: */ + if(!dev){ + err("kevent: no dev!"); + return; + } + + dfu_down(&dev->sem); + + info = dev->info; + if(!info){ + err("kevent: no dev->info!"); + goto exit; + } + udev = dev->udev; + if(!udev){ + err("kevent: no device!"); + goto exit; + } + + if (test_bit(KEVENT_FLAG_SCHEDRESET, &dev->kevent_flags)) { + clear_bit(KEVENT_FLAG_SCHEDRESET, &dev->kevent_flags); + defer_kevent (dev, KEVENT_FLAG_RESET); + } else if (test_bit(KEVENT_FLAG_RESET, &dev->kevent_flags)) { + clear_bit(KEVENT_FLAG_RESET, &dev->kevent_flags); + + /* releasing interface, so it can be claimed by our + fellow driver */ + interface = &udev->actconfig->interface[0]; + usb_driver_release_interface(&usbdfu_driver, interface); + + /* Once we release the interface, the USB system won't call + * usbdfu_disconnect for us, so we need to do that ourselves. + * Note: we cannot use dev after this point. */ + dfu_up(&dev->sem); + usbdfu_disconnect(udev, dev); + + dbg("resetting device"); + usb_reset_device(udev); + + dbg("scanning unclaimed devices"); + usb_scan_devices(); + + return; + } + + exit: + dfu_up(&dev->sem); + return; +} + +/** + * usbdfu_register - register a DFU device handler + * @info: USB DFU info block + * + * Register a DFU handler for this category of DFU device. Begin + * downloads for any DFU device we find that was attached before + * we were loaded. + */ + +int usbdfu_register(struct usbdfu_info *info) +{ + struct usbdfu_infolist *infolist = kmalloc(sizeof(struct usbdfu_infolist), GFP_KERNEL); + + if(!infolist) + return -ENOMEM; + + infolist->info = info; + + dfu_down(&usbdfu_lock); + list_add_tail(&infolist->list, &usbdfu_infolist_head); + dfu_up(&usbdfu_lock); /* before scan, because that calls probe() */ + + dbg("registered new driver %s", info->name); + + /* if the device is not yet plugged in, we are settled. If it already + is (and it's already in DFU state), we have to scan for unclaimed + devices. This will call our probe function again. */ + usb_scan_devices(); + + return 0; +} + +/** + * usbdfu_unregister - unregister a USB DFU handler + * @info: The handler + * + * Unregister a DFU handler that was previously successfully + * registered with the USB DFU driver. + */ + +void usbdfu_unregister(struct usbdfu_info *info) +{ + struct list_head *tmp; + struct usbdfu_infolist *infolist = NULL; + + dbg("unregistering driver %s", info->name); + dfu_down(&usbdfu_lock); + + for(tmp = usbdfu_infolist_head.next; + tmp != &usbdfu_infolist_head; + tmp = tmp->next) { + + infolist = list_entry(tmp, struct usbdfu_infolist, + list); + + if(infolist->info == info) + break; + } + if(tmp != &usbdfu_infolist_head){ + list_del(tmp); + kfree(infolist); + } else { + err("unregistering %s: driver was not previously registered!", + info->name); + } + dfu_up(&usbdfu_lock); +} + +/** + * usbdfu_in_use - check USB DFU status + * @udev: USB device + * @ifnum: interface count + * + * Look for any USB DFU devices and then sanity check the + * results from querying them. Don't claim any DFU class + * devices that are in application state. + */ + +int usbdfu_in_use(struct usb_device *udev, unsigned int ifnum) +{ + int result; + u8 state; + struct usb_interface *interface; + struct usb_interface_descriptor *idesc; + + if (ifnum != 0) { + /* DFU-mode devices only have one interface */ + return 0; + } + + /* Check to see whether the interface's class is a DFU device. + * We need to check this first to make sure the DFU_GETSTATE command + * isn't misinterpreted as something else. */ + interface = &udev->actconfig->interface[ifnum]; + idesc = &interface->altsetting[interface->act_altsetting]; + if ((idesc->bInterfaceClass != DFU_USB_CLASS) || + (idesc->bInterfaceSubClass != DFU_USB_SUBCLASS)) { + dbg("interface class is not DFU"); + return 0; + } + + result = dfu_get_state(udev, &state); + if (result < 0) { + return result; + } else if (result != 1) { + /* This should be an error. The device reported this interface + * as a DFU-class interface, but it's not responding correctly + * to DFU-class commands on this interface. However, there + * appear to be some broken devices out there where this is + * normal behavior in some cases (at76c503 immediately after + * fw-load-reset), so just continue on (and hope we didn't + * screw anything up with that DFU command).. */ + dbg("DFU state query returned %d-byte response", + result); + return 0; + } + + switch (state) { + case STATE_IDLE: + case STATE_DETACH: + /* Device is in an application mode, it's up to other drivers + * to deal with it */ + dbg("DFU state=App (%d)", state); + return 0; + case STATE_DFU_IDLE: + case STATE_DFU_DOWNLOAD_SYNC: + case STATE_DFU_DOWNLOAD_BUSY: + case STATE_DFU_DOWNLOAD_IDLE: + case STATE_DFU_MANIFEST_SYNC: + case STATE_DFU_MANIFEST: + case STATE_DFU_MANIFEST_WAIT_RESET: + case STATE_DFU_UPLOAD_IDLE: + case STATE_DFU_ERROR: + /* This is what we're looking for. We're in the middle + * of dealing with this device */ + dbg("DFU state=DFU (%d)", state); + return 1; + default: + /* We got something that shouldn't be a valid response to a DFU + * state query. Again, this sometimes happens on broken + * devices (at76c503 immediately after fw-load-reset) which + * report DFU class but aren't really DFU-capable. */ + dbg("DFU state query returned bizarre response (%d)", state); + return 0; + } +} + +/** + * find_info - find info block for device + * @udev: device + * + * Search the USB DFU information blocks for a USB DFU handler + * for this device. Arguably we should have a generic handler + * here too that simply asks for firmware-usbdfu-%d-%d .. + */ + +static struct usbdfu_info *find_info(struct usb_device *udev) +{ + struct usb_interface *interface; + struct list_head *tmp; + struct usbdfu_infolist *infolist; + + dbg("searching for driver"); + + interface = &udev->actconfig->interface[0]; + + for(tmp = usbdfu_infolist_head.next; + tmp != &usbdfu_infolist_head; + tmp = tmp->next) { + infolist = list_entry(tmp, struct usbdfu_infolist, + list); + if(usb_match_id(udev, interface, infolist->info->id_table)) + return infolist->info; + } + + return NULL; +} + + +/** + * usbdfu_initiate_download - start download + * @udev: USB device + * + * Detach the DFU device, reset it and rescan so that we + * trigger a download for it. + */ + +int usbdfu_initiate_download(struct usb_device *udev) +{ + int result; + + if (!find_info(udev)) { + return -ENOENT; + } + + result = dfu_detach(udev); + if (!result) { + dbg("dfu_detach failed (%d)", result); + return result; + } + + usb_reset_device(udev); + usb_scan_devices(); + + return 0; +} + +/** + * usbdfu_probe - A device has been discovered + * @udev: device + * @ifnum: interface + * @id: matched id block + * + * Process the discovered device and see if we need to claim it and + * perform firmware downloads. If we do kick off the download and + * reset sequence. + */ + +static void * usbdfu_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id) +{ + struct usbdfu *dev = NULL; + struct usbdfu_info *info = NULL; + int ret; + + dbg("usbdfu_probe entered"); + + if (ifnum != 0) { + dbg("more than one interface, cannot be DFU mode"); + return NULL; + } + + dfu_down(&usbdfu_lock); + + info = find_info(udev); + if (!info) + goto exit; /* not for us */ + + dbg("device is registered (%s)", info->name); + + if (usbdfu_in_use(udev, ifnum) != 1) { + dbg("device not in DFU-idle mode"); + goto exit; + } + dbg("device is in DFU mode"); + + /* allocate memory for our device state and intialize it */ + dev = kmalloc (sizeof(struct usbdfu), GFP_KERNEL); + if (dev == NULL) { + err ("out of memory"); + goto exit; + } + memset (dev, 0, sizeof (*dev)); + + init_MUTEX (&dev->sem); + + dfu_down(&dev->sem); + + INIT_TQUEUE (&dev->kevent, kevent, dev); + dev->udev = udev; + dev->info = info; + + dbg("going for download"); + /* here our main action takes place: */ + ret = usbdfu_download(dev, info->fw_buf, info->fw_buf_len); + if(ret < 0){ + err("Firmware download failed for USB device %d", udev->devnum); + goto error; + } + + init_timer(&dev->timer); + + if(info->reset_delay){ + dev->timer.data = (long) dev; + dev->timer.function = kevent_timer; + + mod_timer(&dev->timer, jiffies + info->reset_delay); + }else{ + defer_kevent (dev, KEVENT_FLAG_SCHEDRESET); + } + + dfu_up(&dev->sem); + goto exit; + +error: + dfu_up(&dev->sem); + usbdfu_delete (dev); + dev = NULL; + +exit: + dfu_up(&usbdfu_lock); + + dbg("usbdfu_probe() exiting"); + return dev; +} + +/** + * usbdfu_disconnect - device unplug + * @udev: device + * @ptr: info ptr + * + * A DFU device just talk a walk. Wait until there are no + * events pending and then delete the timer and device. + */ + +static void usbdfu_disconnect(struct usb_device *udev, void *ptr) +{ + struct usbdfu *dev = (struct usbdfu *)ptr; + int kevent_pending; + + dbg("usbdfu_disconnect called"); + + while (1) { + dfu_down(&dev->sem); + kevent_pending = dev->kevent_flags; + dfu_up(&dev->sem); + if (!kevent_pending) break; + dbg("usbdfu_disconnect: waiting for kevent to complete (%d pending)...", kevent_pending); + schedule(); + } + + /* FIXME: Race - the timer may have kicked the events off again */ + del_timer_sync(&dev->timer); + + usbdfu_delete(dev); + + dbg("USB DFU now disconnected"); +} + +/** + * usbdfu_init - initialize the USB DFU driver + * + * Called when the module is loaded to initialize the USB DFU class + * support + */ + +static int __init usbdfu_init(void) +{ + int result; + + info(DRIVER_DESC " " DRIVER_VERSION); + + init_MUTEX(&usbdfu_lock); + + /* register this driver with the USB subsystem */ + result = usb_register(&usbdfu_driver); + if (result < 0) { + err("usb_register failed for the "__FILE__" driver. Error number %d", + result); + return -1; + } + + return 0; +} + +/** + * usbdfu_exit - exit module + * + * Called when the USB DFU module is unloaded in order to + * unregister from the USB layer + */ + +static void __exit usbdfu_exit(void) +{ + /* deregister this driver with the USB subsystem */ + usb_deregister(&usbdfu_driver); +} + +module_init (usbdfu_init); +module_exit (usbdfu_exit); + +EXPORT_SYMBOL(usbdfu_register); +EXPORT_SYMBOL(usbdfu_unregister); +EXPORT_SYMBOL(usbdfu_in_use); +EXPORT_SYMBOL(usbdfu_initiate_download); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/usb/usbdfu.h linux.22-ac2/drivers/usb/usbdfu.h --- linux.vanilla/drivers/usb/usbdfu.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/drivers/usb/usbdfu.h 2003-09-01 13:54:30.000000000 +0100 @@ -0,0 +1,36 @@ +/* -*- linux-c -*- */ +/* + * USB Device Firmware Upgrade (DFU) handler + * + * Copyright (c) 2003 Oliver Kurth + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * + */ + +#ifndef _USBDFU_H +#define _USBDFU_H + +#include + +struct usbdfu_info { + const char *name; + const struct usb_device_id *id_table; + u8 *fw_buf; + int fw_buf_len; + int flags; + int (*pre_download_hook)(struct usb_device *udev); + int (*post_download_hook)(struct usb_device *udev); + unsigned int reset_delay; +}; + +int usbdfu_register(struct usbdfu_info *info); +void usbdfu_unregister(struct usbdfu_info *info); +int usbdfu_in_use(struct usb_device *udev, unsigned int ifnum); +int usbdfu_initiate_download(struct usb_device *udev); + +#endif /* _USBDFU_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/aty/atyfb_base.c linux.22-ac2/drivers/video/aty/atyfb_base.c --- linux.vanilla/drivers/video/aty/atyfb_base.c 2002-11-29 21:27:22.000000000 +0000 +++ linux.22-ac2/drivers/video/aty/atyfb_base.c 2003-06-29 16:10:20.000000000 +0100 @@ -360,6 +360,7 @@ /* 3D RAGE Mobility */ { 0x4c4d, 0x4c4d, 0x00, 0x00, m64n_mob_p, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, + { 0x4c52, 0x4c52, 0x00, 0x00, m64n_mob_p, 230, 40, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS | M64F_MAGIC_POSTDIV | M64F_SDRAM_MAGIC_PLL | M64F_XL_DLL }, { 0x4c4e, 0x4c4e, 0x00, 0x00, m64n_mob_a, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, #endif /* CONFIG_FB_ATY_CT */ }; @@ -438,7 +439,7 @@ #endif /* defined(CONFIG_PPC) */ -#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) +#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_CT_VAIO_LCD) static void aty_st_lcd(int index, u32 val, const struct fb_info_aty *info) { unsigned long temp; @@ -460,7 +461,7 @@ /* read the register value */ return aty_ld_le32(LCD_DATA, info); } -#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT */ +#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT || CONFIG_FB_ATY_CT_VAIO_LCD */ /* ------------------------------------------------------------------------- */ @@ -1772,6 +1773,9 @@ #if defined(CONFIG_PPC) int sense; #endif +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + u32 pm, hs; +#endif u8 pll_ref_div; info->aty_cmap_regs = (struct aty_cmap_regs *)(info->ati_regbase+0xc0); @@ -2089,6 +2093,35 @@ var = default_var; #endif /* !__sparc__ */ #endif /* !CONFIG_PPC */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + /* Power Management */ + pm=aty_ld_lcd(POWER_MANAGEMENT, info); + pm=(pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_PCI; + pm|=PWR_MGT_ON; + aty_st_lcd(POWER_MANAGEMENT, pm, info); + udelay(10); + + /* OVR_WID_LEFT_RIGHT */ + hs=aty_ld_le32(OVR_WID_LEFT_RIGHT,info); + hs= 0x00000000; + aty_st_le32(OVR_WID_LEFT_RIGHT, hs, info); + udelay(10); + + /* CONFIG_PANEL */ + hs=aty_ld_lcd(CONFIG_PANEL,info); + hs|=DONT_SHADOW_HEND ; + aty_st_lcd(CONFIG_PANEL, hs, info); + udelay(10); + +#if defined(DEBUG) + printk("LCD_INDEX CONFIG_PANEL LCD_GEN_CTRL POWER_MANAGEMENT\n" + "%08x %08x %08x %08x\n", + aty_ld_le32(LCD_INDEX, info), + aty_ld_lcd(CONFIG_PANEL, info), + aty_ld_lcd(LCD_GEN_CTRL, info), + aty_ld_lcd(POWER_MANAGEMENT, info), +#endif /* DEBUG */ +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ #endif /* !MODULE */ if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; @@ -2714,6 +2747,23 @@ /* * Blank the display. */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) +static int set_backlight_enable(int on, struct fb_info_aty *info) +{ + unsigned int reg = aty_ld_lcd(POWER_MANAGEMENT, info); + if(on) { + reg=(reg & ~SUSPEND_NOW) | PWR_BLON; + } else { + reg=(reg & ~PWR_BLON) | SUSPEND_NOW; + } + aty_st_lcd(POWER_MANAGEMENT, reg, info); + udelay(10); +#ifdef DEBUG + printk(KERN_INFO "set_backlight_enable(%i): %08x\n", on, aty_ld_lcd(POWER_MANAGEMENT, info) ); +#endif + return 0; +} +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ static void atyfbcon_blank(int blank, struct fb_info *fb) { @@ -2725,6 +2775,9 @@ set_backlight_enable(0); #endif /* CONFIG_PMAC_BACKLIGHT */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + set_backlight_enable(!blank, info); +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ gen_cntl = aty_ld_8(CRTC_GEN_CNTL, info); if (blank > 0) switch (blank-1) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/aty/mach64_ct.c linux.22-ac2/drivers/video/aty/mach64_ct.c --- linux.vanilla/drivers/video/aty/mach64_ct.c 2001-07-31 22:43:29.000000000 +0100 +++ linux.22-ac2/drivers/video/aty/mach64_ct.c 2003-06-29 16:10:20.000000000 +0100 @@ -178,11 +178,14 @@ } pll->pll_gen_cntl |= mpostdiv<<4; /* mclk */ - if (M64_HAS(MAGIC_POSTDIV)) - pll->pll_ext_cntl = 0; - else +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ - +#else + if ( M64_HAS(MAGIC_POSTDIV) ) + pll->pll_ext_cntl = 0; + else + pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ +#endif switch (pll->vclk_post_div_real) { case 2: vpostdiv = 1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/aty/mach64.h linux.22-ac2/drivers/video/aty/mach64.h --- linux.vanilla/drivers/video/aty/mach64.h 2001-07-31 22:43:29.000000000 +0100 +++ linux.22-ac2/drivers/video/aty/mach64.h 2003-06-29 16:10:20.000000000 +0100 @@ -1148,6 +1148,8 @@ #define APC_LUT_MN 0x39 #define APC_LUT_OP 0x3A +/* Values in CONFIG_PANEL */ +#define DONT_SHADOW_HEND 0x00004000 /* Values in LCD_MISC_CNTL */ #define BIAS_MOD_LEVEL_MASK 0x0000ff00 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/Config.in linux.22-ac2/drivers/video/Config.in --- linux.vanilla/drivers/video/Config.in 2003-08-28 16:45:39.000000000 +0100 +++ linux.22-ac2/drivers/video/Config.in 2003-08-28 17:05:46.000000000 +0100 @@ -96,6 +96,9 @@ tristate ' Hercules mono graphics console (EXPERIMENTAL)' CONFIG_FB_HGA define_bool CONFIG_VIDEO_SELECT y fi + if [ "$CONFIG_IA64" = "y" ]; then + tristate ' VGA 16-color graphics console' CONFIG_FB_VGA16 + fi if [ "$CONFIG_VISWS" = "y" ]; then tristate ' SGI Visual Workstation framebuffer support' CONFIG_FB_SGIVW define_bool CONFIG_BUS_I2C y @@ -144,14 +147,20 @@ if [ "$CONFIG_FB_ATY" != "n" ]; then bool ' Mach64 GX support (EXPERIMENTAL)' CONFIG_FB_ATY_GX bool ' Mach64 CT/VT/GT/LT (incl. 3D RAGE) support' CONFIG_FB_ATY_CT + if [ "$CONFIG_FB_ATY_CT" = "y" ]; then + bool ' Sony Vaio C1VE 1024x480 LCD support' CONFIG_FB_ATY_CT_VAIO_LCD + fi fi tristate ' ATI Radeon display support (EXPERIMENTAL)' CONFIG_FB_RADEON +# if [ "$CONFIG_FB_RADEON" = "y" ]; then +# bool ' Sony Vaio C1MV 1280x600 LCD support' CONFIG_FB_RADEON_VAIO_LCD +# fi tristate ' ATI Rage128 display support (EXPERIMENTAL)' CONFIG_FB_ATY128 tristate ' Intel 830M/845G/852GM/855GM/865G display support (EXPERIMENTAL)' CONFIG_FB_INTEL tristate ' SIS acceleration (EXPERIMENTAL)' CONFIG_FB_SIS if [ "$CONFIG_FB_SIS" != "n" ]; then - bool ' SIS 630/540/730 support' CONFIG_FB_SIS_300 - bool ' SIS 315H/315 support' CONFIG_FB_SIS_315 + bool ' SIS 300/305/540/630/730 support' CONFIG_FB_SIS_300 + bool ' SIS 315/650/M650/651/740/Xabre support' CONFIG_FB_SIS_315 fi tristate ' NeoMagic display support (EXPERIMENTAL)' CONFIG_FB_NEOMAGIC tristate ' 3Dfx Banshee/Voodoo3 display support (EXPERIMENTAL)' CONFIG_FB_3DFX @@ -294,6 +303,7 @@ "$CONFIG_FB_PM3" = "y" -o "$CONFIG_FB_TRIDENT" = "y" -o \ "$CONFIG_FB_P9100" = "y" -o "$CONFIG_FB_ATY128" = "y" -o \ "$CONFIG_FB_RIVA" = "y" -o "$CONFIG_FB_RADEON" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_SGIVW" = "y" -o "$CONFIG_FB_CYBER2000" = "y" -o \ "$CONFIG_FB_SA1100" = "y" -o "$CONFIG_FB_3DFX" = "y" -o \ "$CONFIG_FB_PMAG_BA" = "y" -o "$CONFIG_FB_PMAGB_B" = "y" -o \ @@ -314,12 +324,13 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_IGA" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_P9100" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_3DFX" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_CYBER2000" = "m" -o \ "$CONFIG_FB_PMAG_BA" = "m" -o "$CONFIG_FB_PMAGB_B" = "m" -o \ "$CONFIG_FB_MAXINE" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_SA1100" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_TX3912" = "m" -o "$CONFIG_FB_NEOMAGIC" = "m" -o \ "$CONFIG_FB_STI" = "m" -o "$CONFIG_FB_INTEL" = "m" ]; then @@ -330,6 +341,7 @@ "$CONFIG_FB_MAC" = "y" -o "$CONFIG_FB_VESA" = "y" -o \ "$CONFIG_FB_VIRTUAL" = "y" -o "$CONFIG_FB_TBOX" = "y" -o \ "$CONFIG_FB_Q40" = "y" -o "$CONFIG_FB_RADEON" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_CONTROL" = "y" -o "$CONFIG_FB_CLGEN" = "y" -o \ "$CONFIG_FB_VIRGE" = "y" -o "$CONFIG_FB_CYBER" = "y" -o \ "$CONFIG_FB_VALKYRIE" = "y" -o "$CONFIG_FB_PLATINUM" = "y" -o \ @@ -352,10 +364,11 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_PM2" = "m" -o "$CONFIG_FB_SGIVW" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_SA1100" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_PVR2" = "m" -o "$CONFIG_FB_VOODOO1" = "m" -o \ "$CONFIG_FB_NEOMAGIC" = "m" -o "$CONFIG_FB_INTEL" = "m" ]; then define_tristate CONFIG_FBCON_CFB16 m @@ -387,6 +400,7 @@ "$CONFIG_FB_RIVA" = "y" -o "$CONFIG_FB_ATY128" = "y" -o \ "$CONFIG_FB_FM2" = "y" -o "$CONFIG_FB_SGIVW" = "y" -o \ "$CONFIG_FB_RADEON" = "y" -o "$CONFIG_FB_PVR2" = "y" -o \ + "$CONFIG_FB_INTEL" = "y" -o \ "$CONFIG_FB_3DFX" = "y" -o "$CONFIG_FB_SIS" = "y" -o \ "$CONFIG_FB_VOODOO1" = "y" -o "$CONFIG_FB_CYBER2000" = "y" -o \ "$CONFIG_FB_STI" = "y" -o "$CONFIG_FB_INTEL" = "y" ]; then @@ -397,13 +411,13 @@ "$CONFIG_FB_CONTROL" = "m" -o "$CONFIG_FB_CLGEN" = "m" -o \ "$CONFIG_FB_TGA" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_MATROX" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_3DFX" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ + "$CONFIG_FB_INTEL" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_PVR2" = "m" -o "$CONFIG_FB_VOODOO1" = "m" -o \ - "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "y" -o \ - "$CONFIG_FB_INTEL" = "m" ]; then + "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "m" ]; then define_tristate CONFIG_FBCON_CFB32 m fi fi @@ -450,9 +464,9 @@ define_tristate CONFIG_FBCON_HGA m fi fi - if [ "$CONFIG_FB_STI" = "y" ]; then - define_tristate CONFIG_FBCON_STI y - fi + fi + if [ "$CONFIG_FB_STI" = "y" ]; then + define_tristate CONFIG_FBCON_STI y fi bool ' Support only 8 pixels wide fonts' CONFIG_FBCON_FONTWIDTH8_ONLY if [ "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" ]; then diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/modedb.c linux.22-ac2/drivers/video/modedb.c --- linux.vanilla/drivers/video/modedb.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/drivers/video/modedb.c 2003-06-29 16:10:19.000000000 +0100 @@ -43,6 +43,20 @@ #define DEFAULT_MODEDB_INDEX 0 static struct fb_videomode modedb[] __initdata = { +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + { + /* 1024x480 @ 65 Hz */ + NULL, 65, 1024, 480, 25203, 24, 24, 1, 17, 144, 4, + 0, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ +#if defined(CONFIG_FB_RADEON_VAIO_LCD) + { + /* 1280x600 @ 72 Hz, 45.288 kHz hsync */ + NULL, 72, 1280, 600, 13940, 24, 24, 23, 1, 256, 5, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_RADEON_VAIO_LCD */ { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/drivers/video/tdfxfb.c linux.22-ac2/drivers/video/tdfxfb.c --- linux.vanilla/drivers/video/tdfxfb.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/drivers/video/tdfxfb.c 2003-07-31 14:22:43.000000000 +0100 @@ -764,7 +764,11 @@ tdfx_outl(SRCXY, 0); tdfx_outl(DSTXY, xx | (yy << 16)); tdfx_outl(COMMAND_2D, COMMAND_2D_H2S_BITBLT | (ROP_COPY << 24)); +#ifdef __BIG_ENDIAN + tdfx_outl(SRCFORMAT, 0x400000 | BIT(20) ); +#else tdfx_outl(SRCFORMAT, 0x400000); +#endif tdfx_outl(DSTFORMAT, fmt); tdfx_outl(DSTSIZE, fontwidth(p) | (fontheight(p) << 16)); i=fontheight(p); @@ -822,7 +826,11 @@ tdfx_outl(COMMAND_3D, COMMAND_3D_NOP); tdfx_outl(COLORFORE, fgx); tdfx_outl(COLORBACK, bgx); +#ifdef __BIG_ENDIAN + tdfx_outl(SRCFORMAT, 0x400000 | BIT(20) ); +#else tdfx_outl(SRCFORMAT, 0x400000); +#endif tdfx_outl(DSTFORMAT, fmt); tdfx_outl(DSTSIZE, w | (h << 16)); tdfx_outl(SRCXY, 0); @@ -1475,6 +1483,7 @@ #if defined(__BIG_ENDIAN) switch (par->bpp) { case 8: + case 24: reg.miscinit0 &= ~(1 << 30); reg.miscinit0 &= ~(1 << 31); break; @@ -1482,7 +1491,6 @@ reg.miscinit0 |= (1 << 30); reg.miscinit0 |= (1 << 31); break; - case 24: case 32: reg.miscinit0 |= (1 << 30); reg.miscinit0 &= ~(1 << 31); @@ -1635,10 +1643,6 @@ v.blue.length = 5; break; case 24: - v.red.offset=16; - v.green.offset=8; - v.blue.offset=0; - v.red.length = v.green.length = v.blue.length = 8; case 32: v.red.offset = 16; v.green.offset = 8; @@ -1942,6 +1946,12 @@ break; } + if (pci_enable_device(pdev)) + { + printk(KERN_WARNING "fb: Unable to enable %s PCI device.\n", name); + return -ENXIO; + } + fb_info.regbase_phys = pci_resource_start(pdev, 0); fb_info.regbase_size = 1 << 24; fb_info.regbase_virt = ioremap_nocache(fb_info.regbase_phys, 1 << 24); @@ -1970,6 +1980,13 @@ fb_info.iobase = pci_resource_start (pdev, 2); + if (!fb_info.iobase) { + printk(KERN_WARNING "fb: Can't access %s I/O ports.\n", name); + iounmap(fb_info.regbase_virt); + iounmap(fb_info.bufbase_virt); + return -ENXIO; + } + printk("fb: %s memory = %ldK\n", name, fb_info.bufbase_size >> 10); #ifdef CONFIG_MTRR @@ -2363,10 +2380,25 @@ unsigned int h,to; tdfxfb_createcursorshape(p); - xline = ~((1 << (32 - fb_info.cursor.w)) - 1); + xline = (~0) << (32 - fb_info.cursor.w); #ifdef __LITTLE_ENDIAN xline = swab32(xline); +#else + switch (p->var.bits_per_pixel) { + case 8: + case 24: + xline = swab32(xline); + break; + case 16: + xline = ((xline & 0xff000000 ) >> 16 ) + | ((xline & 0x00ff0000 ) >> 16 ) + | ((xline & 0x0000ff00 ) << 16 ) + | ((xline & 0x000000ff ) << 16 ); + break; + case 32: + break; + } #endif cursorbase=(u8*)fb_info.bufbase_virt; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/binfmt_elf.c linux.22-ac2/fs/binfmt_elf.c --- linux.vanilla/fs/binfmt_elf.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/binfmt_elf.c 2003-08-28 18:28:09.000000000 +0100 @@ -375,7 +375,6 @@ unsigned long text_data, elf_entry = ~0UL; char * addr; loff_t offset; - int retval; current->mm->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; @@ -397,11 +396,9 @@ } do_brk(0, text_data); - retval = -ENOEXEC; if (!interpreter->f_op || !interpreter->f_op->read) goto out; - retval = interpreter->f_op->read(interpreter, addr, text_data, &offset); - if (retval < 0) + if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) goto out; flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); @@ -1166,7 +1163,7 @@ psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; - psinfo.pr_nice = current->nice; + psinfo.pr_nice = task_nice(current); psinfo.pr_flag = current->flags; psinfo.pr_uid = NEW_TO_OLD_UID(current->uid); psinfo.pr_gid = NEW_TO_OLD_GID(current->gid); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/buffer.c linux.22-ac2/fs/buffer.c --- linux.vanilla/fs/buffer.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/buffer.c 2003-08-28 17:06:29.000000000 +0100 @@ -123,6 +123,36 @@ int bdflush_min[N_PARAM] = { 0, 1, 0, 0, 0, 1*HZ, 0, 0, 0}; int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 10000*HZ, 100, 100, 0}; +static inline int write_buffer_delay(struct buffer_head *bh) +{ + struct page *page = bh->b_page; + + if (!TryLockPage(page)) { + spin_unlock(&lru_list_lock); + unlock_buffer(bh); + page->mapping->a_ops->writepage(page); + return 1; + } + + return 0; +} + +static inline void write_buffer(struct buffer_head *bh) +{ + if (buffer_delay(bh)) { + struct page *page = bh->b_page; + + lock_page(page); + if (buffer_delay(bh)) { + page->mapping->a_ops->writepage(page); + return; + } + unlock_page(page); + } + + ll_rw_block(WRITE, 1, &bh); +} + void unlock_buffer(struct buffer_head *bh) { clear_bit(BH_Wait_IO, &bh->b_state); @@ -226,7 +256,13 @@ continue; if (test_and_set_bit(BH_Lock, &bh->b_state)) continue; - if (atomic_set_buffer_clean(bh)) { + if (buffer_delay(bh)) { + if (write_buffer_delay(bh)) { + if (count) + write_locked_buffers(array, count); + return -EAGAIN; + } + } else if (atomic_set_buffer_clean(bh)) { __refile_buffer(bh); get_bh(bh); array[count++] = bh; @@ -756,9 +792,10 @@ bh->b_list = BUF_CLEAN; bh->b_end_io = handler; bh->b_private = private; + bh->b_journal_head = NULL; } -static void end_buffer_io_async(struct buffer_head * bh, int uptodate) +void end_buffer_io_async(struct buffer_head * bh, int uptodate) { static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; @@ -817,6 +854,7 @@ spin_unlock_irqrestore(&page_uptodate_lock, flags); return; } +EXPORT_SYMBOL(end_buffer_io_async); inline void set_buffer_async_io(struct buffer_head *bh) { @@ -873,7 +911,7 @@ * a noop) */ wait_on_buffer(bh); - ll_rw_block(WRITE, 1, &bh); + write_buffer(bh); brelse(bh); spin_lock(&lru_list_lock); } @@ -1321,13 +1359,14 @@ */ static void discard_buffer(struct buffer_head * bh) { - if (buffer_mapped(bh)) { + if (buffer_mapped(bh) || buffer_delay(bh)) { mark_buffer_clean(bh); lock_buffer(bh); clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Mapped, &bh->b_state); clear_bit(BH_Req, &bh->b_state); clear_bit(BH_New, &bh->b_state); + clear_bit(BH_Delay, &bh->b_state); remove_from_queues(bh); unlock_buffer(bh); } @@ -1619,7 +1658,7 @@ set_bit(BH_Uptodate, &bh->b_state); continue; } - if (!buffer_uptodate(bh) && + if (!buffer_uptodate(bh) && !buffer_delay(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; @@ -2016,7 +2055,7 @@ if (Page_Uptodate(page)) set_bit(BH_Uptodate, &bh->b_state); - if (!buffer_uptodate(bh)) { + if (!buffer_uptodate(bh) && !buffer_delay(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); @@ -2146,7 +2185,6 @@ bh.b_state = 0; bh.b_dev = inode->i_dev; bh.b_size = blocksize; - bh.b_page = NULL; if (((loff_t) blocknr) * blocksize >= inode->i_size) beyond_eof = 1; @@ -2231,7 +2269,9 @@ for (i = nr; --i >= 0; ) { iosize += size; tmp = bh[i]; - wait_on_buffer(tmp); + if (buffer_locked(tmp)) { + wait_on_buffer(tmp); + } if (!buffer_uptodate(tmp)) { /* We are traversing bh'es in reverse order so @@ -2745,7 +2785,7 @@ { #ifdef CONFIG_SMP struct buffer_head * bh; - int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; + int delalloc = 0, found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; int nlist; static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", }; #endif @@ -2760,7 +2800,7 @@ if (!spin_trylock(&lru_list_lock)) return; for(nlist = 0; nlist < NR_LIST; nlist++) { - found = locked = dirty = used = lastused = 0; + delalloc = found = locked = dirty = used = lastused = 0; bh = lru_list[nlist]; if(!bh) continue; @@ -2770,6 +2810,8 @@ locked++; if (buffer_dirty(bh)) dirty++; + if (buffer_delay(bh)) + delalloc++; if (atomic_read(&bh->b_count)) used++, lastused = found; bh = bh->b_next_free; @@ -2780,10 +2822,10 @@ printk("%9s: BUG -> found %d, reported %d\n", buf_types[nlist], found, tmp); } - printk("%9s: %d buffers, %lu kbyte, %d used (last=%d), " - "%d locked, %d dirty\n", + printk("%7s: %d buffers, %lu kbyte, %d used (last=%d), " + "%d locked, %d dirty %d delay\n", buf_types[nlist], found, size_buffers_type[nlist]>>10, - used, lastused, locked, dirty); + used, lastused, locked, dirty, delalloc); } spin_unlock(&lru_list_lock); #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/Config.in linux.22-ac2/fs/Config.in --- linux.vanilla/fs/Config.in 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/Config.in 2003-07-17 13:51:04.000000000 +0100 @@ -97,6 +97,10 @@ tristate 'UFS file system support (read only)' CONFIG_UFS_FS dep_mbool ' UFS file system write support (DANGEROUS)' CONFIG_UFS_FS_WRITE $CONFIG_UFS_FS $CONFIG_EXPERIMENTAL +tristate 'XFS filesystem support' CONFIG_XFS_FS +dep_mbool ' Realtime support (EXPERIMENTAL)' CONFIG_XFS_RT $CONFIG_XFS_FS $CONFIG_EXPERIMENTAL +dep_mbool ' Quota support' CONFIG_XFS_QUOTA $CONFIG_XFS_FS + if [ "$CONFIG_NET" = "y" ]; then mainmenu_option next_comment diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/dquot.c linux.22-ac2/fs/dquot.c --- linux.vanilla/fs/dquot.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/dquot.c 2003-07-17 13:51:27.000000000 +0100 @@ -68,11 +68,13 @@ #include #include #include +#include #include static char *quotatypes[] = INITQFNAMES; static struct quota_format_type *quota_formats; /* List of registered formats */ +static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; int register_quota_format(struct quota_format_type *fmt) { @@ -100,8 +102,19 @@ lock_kernel(); for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); - if (actqf && !try_inc_mod_count(actqf->qf_owner)) - actqf = NULL; + if (!actqf || !try_inc_mod_count(actqf->qf_owner)) { + int qm; + + for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); + if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) { + actqf = NULL; + goto out; + } + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (actqf && !try_inc_mod_count(actqf->qf_owner)) + actqf = NULL; + } +out: unlock_kernel(); return actqf; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/exec.c linux.22-ac2/fs/exec.c --- linux.vanilla/fs/exec.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/exec.c 2003-08-28 17:06:29.000000000 +0100 @@ -50,6 +50,7 @@ int core_uses_pid; char core_pattern[65] = "core"; +int core_setuid_ok = 0; /* The maximal length of core_pattern is also specified in sysctl.c */ static struct linux_binfmt *formats; @@ -339,6 +340,12 @@ if (!mpnt) return -ENOMEM; + if (!vm_enough_memory((STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) + { + kmem_cache_free(vm_area_cachep, mpnt); + return -ENOMEM; + } + down_write(¤t->mm->mmap_sem); { mpnt->vm_mm = current->mm; @@ -1104,13 +1111,18 @@ struct file * file; struct inode * inode; int retval = 0; + int fsuid = current->fsuid; lock_kernel(); binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; if (!is_dumpable(current)) - goto fail; + { + if(!core_setuid_ok || !current->task_dumpable) + goto fail; + current->fsuid = 0; + } current->mm->dumpable = 0; if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail; @@ -1139,6 +1151,8 @@ close_fail: filp_close(file, NULL); fail: + if (fsuid != current->fsuid) + current->fsuid = fsuid; unlock_kernel(); return retval; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/ext3/ioctl.c linux.22-ac2/fs/ext3/ioctl.c --- linux.vanilla/fs/ext3/ioctl.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/ext3/ioctl.c 2003-08-08 15:01:15.000000000 +0100 @@ -115,13 +115,11 @@ if (IS_ERR(handle)) return PTR_ERR(handle); err = ext3_reserve_inode_write(handle, inode, &iloc); - if (err) - return err; - - inode->i_ctime = CURRENT_TIME; - inode->i_generation = generation; - - err = ext3_mark_iloc_dirty(handle, inode, &iloc); + if (!err) { + inode->i_ctime = CURRENT_TIME; + inode->i_generation = generation; + err = ext3_mark_iloc_dirty(handle, inode, &iloc); + } ext3_journal_stop(handle, inode); return err; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/inode.c linux.22-ac2/fs/inode.c --- linux.vanilla/fs/inode.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/inode.c 2003-07-14 13:29:31.000000000 +0100 @@ -834,6 +834,20 @@ return inode; } +void unlock_new_inode(struct inode *inode) +{ + /* + * This is special! We do not need the spinlock + * when clearing I_LOCK, because we're guaranteed + * that nobody else tries to do anything about the + * state of the inode when it is locked, as we + * just created it (so there can be no old holders + * that haven't tested I_LOCK). + */ + inode->i_state &= ~(I_LOCK|I_NEW); + wake_up(&inode->i_wait); +} + /* * This is called without the inode lock held.. Be careful. * @@ -856,31 +870,13 @@ list_add(&inode->i_list, &inode_in_use); list_add(&inode->i_hash, head); inode->i_ino = ino; - inode->i_state = I_LOCK; + inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); - /* reiserfs specific hack right here. We don't - ** want this to last, and are looking for VFS changes - ** that will allow us to get rid of it. - ** -- mason@suse.com - */ - if (sb->s_op->read_inode2) { - sb->s_op->read_inode2(inode, opaque) ; - } else { - sb->s_op->read_inode(inode); - } - /* - * This is special! We do not need the spinlock - * when clearing I_LOCK, because we're guaranteed - * that nobody else tries to do anything about the - * state of the inode when it is locked, as we - * just created it (so there can be no old holders - * that haven't tested I_LOCK). + * Return the locked inode with I_NEW set, the + * caller is responsible for filling in the contents */ - inode->i_state &= ~I_LOCK; - wake_up(&inode->i_wait); - return inode; } @@ -960,8 +956,7 @@ return inode; } - -struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque) +struct inode *iget4_locked(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque) { struct list_head * head = inode_hashtable + hash(sb,ino); struct inode * inode; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/iobuf.c linux.22-ac2/fs/iobuf.c --- linux.vanilla/fs/iobuf.c 2002-11-29 21:27:22.000000000 +0000 +++ linux.22-ac2/fs/iobuf.c 2003-06-29 16:09:19.000000000 +0100 @@ -8,8 +8,6 @@ #include #include -#include - static kmem_cache_t *kiobuf_cachep; @@ -27,6 +25,8 @@ static int kiobuf_init(struct kiobuf *iobuf) { + int retval; + init_waitqueue_head(&iobuf->wait_queue); iobuf->array_len = 0; iobuf->nr_pages = 0; @@ -35,7 +35,16 @@ iobuf->blocks = NULL; atomic_set(&iobuf->io_count, 0); iobuf->end_io = NULL; - return expand_kiobuf(iobuf, KIO_STATIC_PAGES); + iobuf->initialized = 0; + retval = expand_kiobuf(iobuf, KIO_STATIC_PAGES); + if (retval) return retval; + retval = alloc_kiobuf_bhs(iobuf); + if (retval) { + kfree(iobuf->maplist); + return retval; + } + iobuf->initialized = 1; + return 0; } int alloc_kiobuf_bhs(struct kiobuf * kiobuf) @@ -89,6 +98,21 @@ } } +void kiobuf_ctor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + kiobuf_init(iobuf); +} + +void kiobuf_dtor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + if (iobuf->initialized) { + kfree(iobuf->maplist); + free_kiobuf_bhs(iobuf); + } +} + int alloc_kiovec(int nr, struct kiobuf **bufp) { int i; @@ -98,10 +122,11 @@ iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL); if (unlikely(!iobuf)) goto nomem; - if (unlikely(kiobuf_init(iobuf))) - goto nomem2; - if (unlikely(alloc_kiobuf_bhs(iobuf))) - goto nomem2; + if (unlikely(!iobuf->initialized)) { + /* try again to complete previously failed ctor */ + if (unlikely(kiobuf_init(iobuf))) + goto nomem2; + } bufp[i] = iobuf; } @@ -121,11 +146,10 @@ for (i = 0; i < nr; i++) { iobuf = bufp[i]; - if (iobuf->locked) - unlock_kiovec(1, &iobuf); - kfree(iobuf->maplist); - free_kiobuf_bhs(iobuf); - kmem_cache_free(kiobuf_cachep, bufp[i]); + init_waitqueue_head(&iobuf->wait_queue); + iobuf->io_count.counter = 0; + iobuf->end_io = NULL; + kmem_cache_free(kiobuf_cachep, iobuf); } } @@ -180,7 +204,7 @@ void __init iobuf_cache_init(void) { kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN, kiobuf_ctor, kiobuf_dtor); if (!kiobuf_cachep) panic("Cannot create kiobuf SLAB cache"); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/jbd/journal.c linux.22-ac2/fs/jbd/journal.c --- linux.vanilla/fs/jbd/journal.c 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/jbd/journal.c 2003-07-30 21:19:24.000000000 +0100 @@ -1802,9 +1802,9 @@ if (buffer_jbd(bh)) { /* Someone did it for us! */ - J_ASSERT_BH(bh, bh->b_private != NULL); + J_ASSERT_BH(bh, bh->b_journal_head != NULL); journal_free_journal_head(jh); - jh = bh->b_private; + jh = bh->b_journal_head; } else { /* * We actually don't need jh_splice_lock when @@ -1812,7 +1812,7 @@ */ spin_lock(&jh_splice_lock); set_bit(BH_JBD, &bh->b_state); - bh->b_private = jh; + bh->b_journal_head = jh; jh->b_bh = bh; atomic_inc(&bh->b_count); spin_unlock(&jh_splice_lock); @@ -1821,7 +1821,7 @@ } jh->b_jcount++; spin_unlock(&journal_datalist_lock); - return bh->b_private; + return bh->b_journal_head; } /* @@ -1854,7 +1854,7 @@ J_ASSERT_BH(bh, jh2bh(jh) == bh); BUFFER_TRACE(bh, "remove journal_head"); spin_lock(&jh_splice_lock); - bh->b_private = NULL; + bh->b_journal_head = NULL; jh->b_bh = NULL; /* debug, really */ clear_bit(BH_JBD, &bh->b_state); __brelse(bh); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/jbd/transaction.c linux.22-ac2/fs/jbd/transaction.c --- linux.vanilla/fs/jbd/transaction.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/jbd/transaction.c 2003-06-29 16:09:21.000000000 +0100 @@ -1440,6 +1440,7 @@ if (handle->h_sync) { do { old_handle_count = transaction->t_handle_count; + set_current_state(TASK_RUNNING); yield(); } while (old_handle_count != transaction->t_handle_count); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/jffs2/background.c linux.22-ac2/fs/jffs2/background.c --- linux.vanilla/fs/jffs2/background.c 2001-10-25 08:07:09.000000000 +0100 +++ linux.22-ac2/fs/jffs2/background.c 2003-06-29 16:09:20.000000000 +0100 @@ -106,9 +106,6 @@ sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index); - /* FIXME in the 2.2 backport */ - current->nice = 10; - for (;;) { spin_lock_irq(¤t->sigmask_lock); siginitsetinv (¤t->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT)); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/lockd/svclock.c linux.22-ac2/fs/lockd/svclock.c --- linux.vanilla/fs/lockd/svclock.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/lockd/svclock.c 2003-07-06 18:45:07.000000000 +0100 @@ -176,8 +176,14 @@ struct nlm_rqst *call; /* Create host handle for callback */ + /* We must up the semaphore in case the host lookup does + * garbage collection (which calls nlmsvc_traverse_blocks), + * but this shouldn't be a problem because nlmsvc_lock has + * to retry the lock after this anyway */ + up(&file->f_sema); host = nlmclnt_lookup_host(&rqstp->rq_addr, rqstp->rq_prot, rqstp->rq_vers); + down(&file->f_sema); if (host == NULL) return NULL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/lockd/svcproc.c linux.22-ac2/fs/lockd/svcproc.c --- linux.vanilla/fs/lockd/svcproc.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/lockd/svcproc.c 2003-07-06 21:27:49.000000000 +0100 @@ -561,7 +561,6 @@ #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void -#define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/locks.c linux.22-ac2/fs/locks.c --- linux.vanilla/fs/locks.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/locks.c 2003-08-28 17:06:29.000000000 +0100 @@ -935,8 +935,11 @@ goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. + * + * be careful if fl_end == OFFSET_MAX --okir */ - if (fl->fl_start > caller->fl_end + 1) + if (fl->fl_start > caller->fl_end + 1 + && caller->fl_end != OFFSET_MAX) break; /* If we come here, the new and old lock are of the diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/Makefile linux.22-ac2/fs/Makefile --- linux.vanilla/fs/Makefile 2003-08-28 16:45:40.000000000 +0100 +++ linux.22-ac2/fs/Makefile 2003-07-22 18:32:10.000000000 +0100 @@ -8,13 +8,14 @@ O_TARGET := fs.o export-objs := filesystems.o open.o dcache.o buffer.o dquot.o -mod-subdirs := nls +mod-subdirs := nls xfs obj-y := open.o read_write.o devices.o file_table.o buffer.o \ super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \ fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \ dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \ - filesystems.o namespace.o seq_file.o xattr.o quota.o + filesystems.o namespace.o seq_file.o quota.o xattr.o + obj-$(CONFIG_QUOTA) += dquot.o quota_v1.o obj-$(CONFIG_QFMT_V2) += quota_v2.o @@ -66,9 +67,11 @@ subdir-$(CONFIG_SUN_OPENPROMFS) += openpromfs subdir-$(CONFIG_BEFS_FS) += befs subdir-$(CONFIG_JFS_FS) += jfs +subdir-$(CONFIG_XFS_FS) += xfs obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o +obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/namei.c linux.22-ac2/fs/namei.c --- linux.vanilla/fs/namei.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/namei.c 2003-08-01 17:51:49.000000000 +0100 @@ -635,7 +635,8 @@ * Check the cached dentry for staleness. */ dentry = nd->dentry; - if (dentry && dentry->d_op && dentry->d_op->d_revalidate) { + if (dentry && dentry->d_sb + && (dentry->d_sb->s_type->fs_flags & FS_ALWAYS_REVAL)) { err = -ESTALE; if (!dentry->d_op->d_revalidate(dentry, 0)) { d_invalidate(dentry); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/dir.c linux.22-ac2/fs/nfs/dir.c --- linux.vanilla/fs/nfs/dir.c 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/fs/nfs/dir.c 2003-06-29 16:09:17.000000000 +0100 @@ -36,6 +36,8 @@ static int nfs_readdir(struct file *, void *, filldir_t); static struct dentry *nfs_lookup(struct inode *, struct dentry *); +static int nfs_cached_lookup(struct inode *, struct dentry *, + struct nfs_fh *, struct nfs_fattr *); static int nfs_create(struct inode *, struct dentry *, int); static int nfs_mkdir(struct inode *, struct dentry *, int); static int nfs_rmdir(struct inode *, struct dentry *); @@ -109,13 +111,15 @@ error = NFS_PROTO(inode)->readdir(inode, cred, desc->entry->cookie, page, NFS_SERVER(inode)->dtsize, desc->plus); /* We requested READDIRPLUS, but the server doesn't grok it */ - if (desc->plus && error == -ENOTSUPP) { - NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; - desc->plus = 0; - goto again; - } - if (error < 0) + if (error < 0) { + if (error == -ENOTSUPP && desc->plus) { + NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; + NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; + desc->plus = 0; + goto again; + } goto error; + } SetPageUptodate(page); /* Ensure consistent page alignment of the data. * Note: assumes we have exclusive access to this mapping either @@ -194,8 +198,7 @@ dfprintk(VFS, "NFS: find_dirent_page() searching directory page %ld\n", desc->page_index); - desc->plus = NFS_USE_READDIRPLUS(inode); - page = read_cache_page(&inode->i_data, desc->page_index, + page = read_cache_page(inode->i_mapping, desc->page_index, (filler_t *)nfs_readdir_filler, desc); if (IS_ERR(page)) { status = PTR_ERR(page); @@ -246,6 +249,24 @@ return res; } +static unsigned int nfs_type2dtype[] = { + DT_UNKNOWN, + DT_REG, + DT_DIR, + DT_BLK, + DT_CHR, + DT_LNK, + DT_SOCK, + DT_UNKNOWN, + DT_FIFO +}; + +static inline +unsigned int nfs_type_to_d_type(enum nfs_ftype type) +{ + return nfs_type2dtype[type]; +} + /* * Once we've found the start of the dirent within a page: fill 'er up... */ @@ -262,11 +283,17 @@ dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target); for(;;) { + unsigned d_type = DT_UNKNOWN; /* Note: entry->prev_cookie contains the cookie for * retrieving the current dirent on the server */ fileid = nfs_fileid_to_ino_t(entry->ino); + + /* Use readdirplus info */ + if (desc->plus && (entry->fattr->valid & NFS_ATTR_FATTR)) + d_type = nfs_type_to_d_type(entry->fattr->type); + res = filldir(dirent, entry->name, entry->len, - entry->prev_cookie, fileid, DT_UNKNOWN); + entry->prev_cookie, fileid, d_type); if (res < 0) break; file->f_pos = desc->target = entry->cookie; @@ -333,7 +360,8 @@ /* Reset read descriptor so it searches the page cache from * the start upon the next call to readdir_search_pagecache() */ desc->page_index = 0; - memset(desc->entry, 0, sizeof(*desc->entry)); + desc->entry->cookie = desc->entry->prev_cookie = 0; + desc->entry->eof = 0; out: dfprintk(VFS, "NFS: uncached_readdir() returns %d\n", status); return status; @@ -352,9 +380,11 @@ nfs_readdir_descriptor_t my_desc, *desc = &my_desc; struct nfs_entry my_entry; + struct nfs_fh fh; + struct nfs_fattr fattr; long res; - res = nfs_revalidate(dentry); + res = nfs_revalidate_inode(NFS_SERVER(inode), inode); if (res < 0) return res; @@ -365,12 +395,16 @@ * itself. */ memset(desc, 0, sizeof(*desc)); - memset(&my_entry, 0, sizeof(my_entry)); - desc->file = filp; desc->target = filp->f_pos; - desc->entry = &my_entry; desc->decode = NFS_PROTO(inode)->decode_dirent; + desc->plus = NFS_USE_READDIRPLUS(inode); + + my_entry.cookie = my_entry.prev_cookie = 0; + my_entry.eof = 0; + my_entry.fh = &fh; + my_entry.fattr = &fattr; + desc->entry = &my_entry; while(!desc->entry->eof) { res = readdir_search_pagecache(desc); @@ -434,16 +468,9 @@ } static inline -int nfs_lookup_verify_inode(struct inode *inode, int flags) +int nfs_lookup_verify_inode(struct inode *inode) { - struct nfs_server *server = NFS_SERVER(inode); - /* - * If we're interested in close-to-open cache consistency, - * then we revalidate the inode upon lookup. - */ - if (!(server->flags & NFS_MOUNT_NOCTO) && !(flags & LOOKUP_CONTINUE)) - NFS_CACHEINV(inode); - return nfs_revalidate_inode(server, inode); + return nfs_revalidate_inode(NFS_SERVER(inode), inode); } /* @@ -497,11 +524,20 @@ /* Force a full look up iff the parent directory has changed */ if (nfs_check_verifier(dir, dentry)) { - if (nfs_lookup_verify_inode(inode, flags)) + if (nfs_lookup_verify_inode(inode)) goto out_bad; goto out_valid; } + error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr); + if (!error) { + if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0) + goto out_bad; + if (nfs_lookup_verify_inode(inode)) + goto out_bad; + goto out_valid_renew; + } + if (NFS_STALE(inode)) goto out_bad; @@ -513,6 +549,7 @@ if ((error = nfs_refresh_inode(inode, &fattr)) != 0) goto out_bad; + out_valid_renew: nfs_renew_times(dentry); out_valid: unlock_kernel(); @@ -588,6 +625,18 @@ error = -ENOMEM; dentry->d_op = &nfs_dentry_operations; + error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr); + if (!error) { + error = -EACCES; + inode = nfs_fhget(dentry, &fhandle, &fattr); + if (inode) { + d_add(dentry, inode); + nfs_renew_times(dentry); + error = 0; + } + goto out; + } + error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr); inode = NULL; if (error == -ENOENT) @@ -606,6 +655,79 @@ return ERR_PTR(error); } +static inline +int find_dirent_name(nfs_readdir_descriptor_t *desc, struct page *page, struct dentry *dentry) +{ + struct nfs_entry *entry = desc->entry; + int status; + + while((status = dir_decode(desc)) == 0) { + if (entry->len != dentry->d_name.len) + continue; + if (memcmp(entry->name, dentry->d_name.name, entry->len)) + continue; + if (!(entry->fattr->valid & NFS_ATTR_FATTR)) + continue; + break; + } + return status; +} + +/* + * Use the cached Readdirplus results in order to avoid a LOOKUP call + * whenever we believe that the parent directory has not changed. + * + * We assume that any file creation/rename changes the directory mtime. + * As this results in a page cache invalidation whenever it occurs, + * we don't require any other tests for cache coherency. + */ +static +int nfs_cached_lookup(struct inode *dir, struct dentry *dentry, + struct nfs_fh *fh, struct nfs_fattr *fattr) +{ + nfs_readdir_descriptor_t desc; + struct nfs_server *server; + struct nfs_entry entry; + struct page *page; + unsigned long timestamp = NFS_MTIME_UPDATE(dir); + int res; + + if (!NFS_USE_READDIRPLUS(dir)) + return -ENOENT; + server = NFS_SERVER(dir); + if (server->flags & NFS_MOUNT_NOAC) + return -ENOENT; + nfs_revalidate_inode(server, dir); + + entry.fh = fh; + entry.fattr = fattr; + + desc.decode = NFS_PROTO(dir)->decode_dirent; + desc.entry = &entry; + desc.page_index = 0; + desc.plus = 1; + + for(;(page = find_get_page(dir->i_mapping, desc.page_index)); desc.page_index++) { + + res = -EIO; + if (Page_Uptodate(page)) { + desc.ptr = kmap(page); + res = find_dirent_name(&desc, page, dentry); + kunmap(page); + } + page_cache_release(page); + + if (res == 0) + goto out_found; + if (res != -EAGAIN) + break; + } + return -ENOENT; + out_found: + fattr->timestamp = timestamp; + return 0; +} + /* * Code common to create, mkdir, and mknod. */ @@ -613,7 +735,7 @@ struct nfs_fattr *fattr) { struct inode *inode; - int error = -EACCES; + int error = 0; if (fhandle->size == 0 || !(fattr->valid & NFS_ATTR_FATTR)) { struct inode *dir = dentry->d_parent->d_inode; @@ -625,9 +747,12 @@ if (inode) { d_instantiate(dentry, inode); nfs_renew_times(dentry); - error = 0; + } else { + error = -ENOMEM; + goto out_err; } return error; + out_err: d_drop(dentry); return error; @@ -1082,34 +1207,62 @@ int nfs_permission(struct inode *inode, int mask) { - int error = vfs_permission(inode, mask); - - if (!NFS_PROTO(inode)->access) - goto out; - - if (error == -EROFS) - goto out; - - /* - * Trust UNIX mode bits except: - * - * 1) When override capabilities may have been invoked - * 2) When root squashing may be involved - * 3) When ACLs may overturn a negative answer */ - if (!capable(CAP_DAC_OVERRIDE) && !capable(CAP_DAC_READ_SEARCH) - && (current->fsuid != 0) && (current->fsgid != 0) - && error != -EACCES) - goto out; + struct nfs_access_cache *cache = &NFS_I(inode)->cache_access; + struct rpc_cred *cred; + int mode = inode->i_mode; + int error; - error = NFS_PROTO(inode)->access(inode, mask, 0); + if (mask & MAY_WRITE) { + /* + * + * Nobody gets write access to a read-only fs. + * + */ + if (IS_RDONLY(inode) && + (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) + return -EROFS; - if (error == -EACCES && NFS_CLIENT(inode)->cl_droppriv && - current->uid != 0 && current->gid != 0 && - (current->fsuid != current->uid || current->fsgid != current->gid)) - error = NFS_PROTO(inode)->access(inode, mask, 1); + /* + * + * Nobody gets write access to an immutable file. + * + */ + if (IS_IMMUTABLE(inode)) + return -EACCES; + } - out: - return error; + if (!NFS_PROTO(inode)->access) + goto out_notsup; + cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0); + if (cache->cred == cred + && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))) { + if (!cache->err) { + /* Is the mask a subset of an accepted mask? */ + if ((cache->mask & mask) == mask) + goto out_cached; + } else { + /* ...or is it a superset of a rejected mask? */ + if ((cache->mask & mask) == cache->mask) + goto out_cached; + } + } + error = NFS_PROTO(inode)->access(inode, cred, mask); + if (!error || error == -EACCES) { + cache->jiffies = jiffies; + if (cache->cred) + put_rpccred(cache->cred); + cache->cred = cred; + cache->mask = mask; + cache->err = error; + return error; + } + put_rpccred(cred); +out_notsup: + nfs_revalidate_inode(NFS_SERVER(inode), inode); + return vfs_permission(inode, mask); +out_cached: + put_rpccred(cred); + return cache->err; } /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/direct.c linux.22-ac2/fs/nfs/direct.c --- linux.vanilla/fs/nfs/direct.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/nfs/direct.c 2003-07-17 13:57:57.000000000 +0100 @@ -47,7 +47,7 @@ #define NFSDBG_FACILITY (NFSDBG_PAGECACHE | NFSDBG_VFS) #define VERF_SIZE (2 * sizeof(__u32)) -static inline int +static /* inline */ int nfs_direct_read_rpc(struct file *file, struct nfs_readargs *arg) { int result; @@ -75,7 +75,7 @@ return result; } -static inline int +static /* inline */ int nfs_direct_write_rpc(struct file *file, struct nfs_writeargs *arg, struct nfs_writeverf *verf) { @@ -132,7 +132,7 @@ } #ifdef CONFIG_NFS_V3 -static inline int +static /* inline */ int nfs_direct_commit_rpc(struct inode *inode, loff_t offset, size_t count, struct nfs_writeverf *verf) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/inode.c linux.22-ac2/fs/nfs/inode.c --- linux.vanilla/fs/nfs/inode.c 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/fs/nfs/inode.c 2003-06-29 16:09:17.000000000 +0100 @@ -146,10 +146,14 @@ static void nfs_clear_inode(struct inode *inode) { - struct rpc_cred *cred = NFS_I(inode)->mm_cred; + struct nfs_inode_info *nfsi = NFS_I(inode); + struct rpc_cred *cred = nfsi->mm_cred; if (cred) put_rpccred(cred); + cred = nfsi->cache_access.cred; + if (cred) + put_rpccred(cred); } void @@ -251,6 +255,72 @@ } /* + * Set up the NFS superblock private area using probed values + */ +static int +nfs_setup_superblock(struct super_block *sb, struct nfs_fh *rootfh) +{ + struct nfs_server *server = &sb->u.nfs_sb.s_server; + struct nfs_fattr fattr; + struct nfs_fsinfo fsinfo = { &fattr, }; + struct nfs_pathconf pathinfo = { &fattr, }; + int maxlen, res; + + res = server->rpc_ops->fsinfo(server, rootfh, &fsinfo); + if (res < 0) + return res; + + /* Work out a lot of parameters */ + if (!server->rsize) + server->rsize = nfs_block_size(fsinfo.rtpref, NULL); + if (!server->wsize) + server->wsize = nfs_block_size(fsinfo.wtpref, NULL); + + /* NFSv3: we don't have bsize, but rather rtmult and wtmult... */ + if (!fsinfo.wtmult) + fsinfo.wtmult = 512; + sb->s_blocksize = nfs_block_bits(fsinfo.wtmult, &sb->s_blocksize_bits); + + if (server->rsize > fsinfo.rtmax) + server->rsize = fsinfo.rtmax; + if (server->wsize > fsinfo.wtmax) + server->wsize = fsinfo.wtmax; + + server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (server->rpages > NFS_READ_MAXIOV) { + server->rpages = NFS_READ_MAXIOV; + server->rsize = server->rpages << PAGE_CACHE_SHIFT; + } + + server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (server->wpages > NFS_WRITE_MAXIOV) { + server->wpages = NFS_WRITE_MAXIOV; + server->wsize = server->wpages << PAGE_CACHE_SHIFT; + } + + server->dtsize = nfs_block_size(fsinfo.dtpref, NULL); + if (server->dtsize > PAGE_CACHE_SIZE) + server->dtsize = PAGE_CACHE_SIZE; + if (server->dtsize > server->rsize) + server->dtsize = server->rsize; + + maxlen = (server->rpc_ops->version == 2) ? NFS2_MAXNAMLEN : NFS3_MAXNAMLEN; + if (!server->namelen) { + res = server->rpc_ops->pathconf(server, rootfh, &pathinfo); + if (!res) + server->namelen = pathinfo.name_max; + } + if (!server->namelen || server->namelen > maxlen) + server->namelen = maxlen; + + sb->s_maxbytes = fsinfo.maxfilesize; + if (sb->s_maxbytes > MAX_LFS_FILESIZE) + sb->s_maxbytes = MAX_LFS_FILESIZE; + + return 0; +} + +/* * The way this works is that the mount process passes a structure * in the data argument which contains the server's IP address * and the root file handle obtained from the server's mount @@ -268,8 +338,7 @@ unsigned int authflavor; struct sockaddr_in srvaddr; struct rpc_timeout timeparms; - struct nfs_fsinfo fsinfo; - int tcp, version, maxlen; + int tcp, version; memset(&sb->u.nfs_sb, 0, sizeof(sb->u.nfs_sb)); if (!data) @@ -298,11 +367,11 @@ sb->s_magic = NFS_SUPER_MAGIC; sb->s_op = &nfs_sops; - sb->s_blocksize_bits = 0; - sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits); server = &sb->u.nfs_sb.s_server; - server->rsize = nfs_block_size(data->rsize, NULL); - server->wsize = nfs_block_size(data->wsize, NULL); + if (data->rsize) + server->rsize = nfs_block_size(data->rsize, NULL); + if (data->wsize) + server->wsize = nfs_block_size(data->wsize, NULL); server->flags = data->flags & NFS_MOUNT_FLAGMASK; if (data->flags & NFS_MOUNT_NOAC) { @@ -326,12 +395,14 @@ INIT_LIST_HEAD(&server->lru_busy); nfsv3_try_again: + server->caps = 0; /* Check NFS protocol revision and initialize RPC op vector * and file handle pool. */ if (data->flags & NFS_MOUNT_VER3) { #ifdef CONFIG_NFS_V3 server->rpc_ops = &nfs_v3_clientops; version = 3; + server->caps |= NFS_CAP_READDIRPLUS; if (data->version < 4) { printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n"); goto out_unlock; @@ -409,62 +480,11 @@ sb->s_root->d_op = &nfs_dentry_operations; /* Get some general file system info */ - if (server->rpc_ops->statfs(server, root, &fsinfo) >= 0) { - if (server->namelen == 0) - server->namelen = fsinfo.namelen; - } else { + if (nfs_setup_superblock(sb, root) < 0) { printk(KERN_NOTICE "NFS: cannot retrieve file system info.\n"); goto out_no_root; - } - - /* Work out a lot of parameters */ - if (data->rsize == 0) - server->rsize = nfs_block_size(fsinfo.rtpref, NULL); - if (data->wsize == 0) - server->wsize = nfs_block_size(fsinfo.wtpref, NULL); - /* NFSv3: we don't have bsize, but rather rtmult and wtmult... */ - if (!fsinfo.bsize) - fsinfo.bsize = (fsinfo.rtmult>fsinfo.wtmult) ? fsinfo.rtmult : fsinfo.wtmult; - /* Also make sure we don't go below rsize/wsize since - * RPC calls are expensive */ - if (fsinfo.bsize < server->rsize) - fsinfo.bsize = server->rsize; - if (fsinfo.bsize < server->wsize) - fsinfo.bsize = server->wsize; - - if (data->bsize == 0) - sb->s_blocksize = nfs_block_bits(fsinfo.bsize, &sb->s_blocksize_bits); - if (server->rsize > fsinfo.rtmax) - server->rsize = fsinfo.rtmax; - if (server->wsize > fsinfo.wtmax) - server->wsize = fsinfo.wtmax; - - server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (server->rpages > NFS_READ_MAXIOV) { - server->rpages = NFS_READ_MAXIOV; - server->rsize = server->rpages << PAGE_CACHE_SHIFT; } - server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (server->wpages > NFS_WRITE_MAXIOV) { - server->wpages = NFS_WRITE_MAXIOV; - server->wsize = server->wpages << PAGE_CACHE_SHIFT; - } - - server->dtsize = nfs_block_size(fsinfo.dtpref, NULL); - if (server->dtsize > PAGE_CACHE_SIZE) - server->dtsize = PAGE_CACHE_SIZE; - if (server->dtsize > server->rsize) - server->dtsize = server->rsize; - - maxlen = (version == 2) ? NFS2_MAXNAMLEN : NFS3_MAXNAMLEN; - - if (server->namelen == 0 || server->namelen > maxlen) - server->namelen = maxlen; - - sb->s_maxbytes = fsinfo.maxfilesize; - if (sb->s_maxbytes > MAX_LFS_FILESIZE) - sb->s_maxbytes = MAX_LFS_FILESIZE; /* Fire up the writeback cache */ if (nfs_reqlist_alloc(server) < 0) { @@ -526,7 +546,8 @@ struct nfs_server *server = &sb->u.nfs_sb.s_server; unsigned char blockbits; unsigned long blockres; - struct nfs_fsinfo res; + struct nfs_fattr attr; + struct nfs_fsstat res = { &attr, }; int error; error = server->rpc_ops->statfs(server, NFS_FH(sb->s_root->d_inode), &res); @@ -534,18 +555,15 @@ if (error < 0) goto out_err; - if (res.bsize == 0) - res.bsize = sb->s_blocksize; - buf->f_bsize = nfs_block_bits(res.bsize, &blockbits); + buf->f_bsize = sb->s_blocksize; + blockbits = sb->s_blocksize_bits; blockres = (1 << blockbits) - 1; buf->f_blocks = (res.tbytes + blockres) >> blockbits; buf->f_bfree = (res.fbytes + blockres) >> blockbits; buf->f_bavail = (res.abytes + blockres) >> blockbits; buf->f_files = res.tfiles; buf->f_ffree = res.afiles; - if (res.namelen == 0 || res.namelen > server->namelen) - res.namelen = server->namelen; - buf->f_namelen = res.namelen; + buf->f_namelen = server->namelen; return 0; out_err: printk("nfs_statfs: statfs error = %d\n", -error); @@ -623,36 +641,35 @@ nfs_zap_caches(inode); } +/* Don't use READDIRPLUS on directories that we believe are too large */ +#define NFS_LIMIT_READDIRPLUS (8*PAGE_SIZE) + /* * Fill in inode information from the fattr. */ static void nfs_fill_inode(struct inode *inode, struct nfs_fh *fh, struct nfs_fattr *fattr) { - /* - * Check whether the mode has been set, as we only want to - * do this once. (We don't allow inodes to change types.) + NFS_FILEID(inode) = fattr->fileid; + inode->i_mode = fattr->mode; + /* Why so? Because we want revalidate for devices/FIFOs, and + * that's precisely what we have in nfs_file_inode_operations. */ - if (inode->i_mode == 0) { - NFS_FILEID(inode) = fattr->fileid; - inode->i_mode = fattr->mode; - /* Why so? Because we want revalidate for devices/FIFOs, and - * that's precisely what we have in nfs_file_inode_operations. - */ - inode->i_op = &nfs_file_inode_operations; - if (S_ISREG(inode->i_mode)) { - inode->i_fop = &nfs_file_operations; - inode->i_data.a_ops = &nfs_file_aops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &nfs_dir_inode_operations; - inode->i_fop = &nfs_dir_operations; - } else if (S_ISLNK(inode->i_mode)) - inode->i_op = &nfs_symlink_inode_operations; - else - init_special_inode(inode, inode->i_mode, fattr->rdev); - memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)); - } - nfs_refresh_inode(inode, fattr); + inode->i_op = &nfs_file_inode_operations; + if (S_ISREG(inode->i_mode)) { + inode->i_fop = &nfs_file_operations; + inode->i_data.a_ops = &nfs_file_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &nfs_dir_inode_operations; + inode->i_fop = &nfs_dir_operations; + if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS) + && fattr->size <= NFS_LIMIT_READDIRPLUS) + NFS_FLAGS(inode) |= NFS_INO_ADVISE_RDPLUS; + } else if (S_ISLNK(inode->i_mode)) + inode->i_op = &nfs_symlink_inode_operations; + else + init_special_inode(inode, inode->i_mode, fattr->rdev); + memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)); } struct nfs_find_desc { @@ -727,7 +744,14 @@ if (!(inode = iget4(sb, ino, nfs_find_actor, &desc))) goto out_no_inode; - nfs_fill_inode(inode, fh, fattr); + /* + * Check whether the mode has been set, as we only want to + * do this once. (We don't allow inodes to change types.) + */ + if (inode->i_mode == 0) + nfs_fill_inode(inode, fh, fattr); + + nfs_refresh_inode(inode, fattr); dprintk("NFS: __nfs_fhget(%x/%Ld ct=%d)\n", inode->i_dev, (long long)NFS_FILEID(inode), atomic_read(&inode->i_count)); @@ -740,12 +764,37 @@ goto out; } +#define IS_TRUNC_DOWN(_inode, _attr) \ + (_attr->ia_valid & ATTR_SIZE && _attr->ia_size < _inode->i_size) +static inline void +nfs_inode_flush_on(struct inode *inode) +{ + atomic_inc(&(NFS_I(inode)->flushers)); + lock_kernel(); + NFS_FLAGS(inode) |= NFS_INO_FLUSH; + unlock_kernel(); + return; +} + +static inline void +nfs_inode_flush_off(struct inode *inode) +{ + atomic_dec(&(NFS_I(inode)->flushers)); + if (atomic_read(&(NFS_I(inode)->flushers)) == 0) { + lock_kernel(); + NFS_FLAGS(inode) &= ~NFS_INO_FLUSH; + wake_up(&inode->i_wait); + unlock_kernel(); + } + return; +} + int nfs_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct nfs_fattr fattr; - int error; + int error, flusher=0; /* * Make sure the inode is up-to-date. @@ -761,11 +810,30 @@ if (!S_ISREG(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; - filemap_fdatasync(inode->i_mapping); - error = nfs_wb_all(inode); - filemap_fdatawait(inode->i_mapping); - if (error) - goto out; + /* + * If the file is going to be truncated down + * make sure all of the mmapped pages get flushed + * by telling nfs_writepage to flush them synchronously. + * If they are flushed asynchronously and the file size + * changes (again) before they are flushed, data corruption + * will occur. + * XXX: It would be nice if there was an filemap_ api + * that would tell how many (if any) dirty mmapped pages there + * are. That way I would have to take the lock_kernel() when + * its not necessary. + */ + if (IS_TRUNC_DOWN(inode, attr)) { + flusher = 1; + nfs_inode_flush_on(inode); + } + + do { + filemap_fdatasync(inode->i_mapping); + error = nfs_wb_all(inode); + filemap_fdatawait(inode->i_mapping); + if (error) + goto out; + } while (flusher && NFS_I(inode)->npages); error = NFS_PROTO(inode)->setattr(inode, &fattr, attr); if (error) @@ -795,6 +863,9 @@ NFS_CACHEINV(inode); error = nfs_refresh_inode(inode, &fattr); out: + if (flusher) { + nfs_inode_flush_off(inode); + } return error; } @@ -850,15 +921,23 @@ { struct rpc_auth *auth; struct rpc_cred *cred; + int err = 0; lock_kernel(); + /* Ensure that we revalidate the data cache */ + if (! (NFS_SERVER(inode)->flags & NFS_MOUNT_NOCTO)) { + err = __nfs_revalidate_inode(NFS_SERVER(inode),inode); + if (err) + goto out; + } auth = NFS_CLIENT(inode)->cl_auth; cred = rpcauth_lookupcred(auth, 0); filp->private_data = cred; if (filp->f_mode & FMODE_WRITE) nfs_set_mmcred(inode, cred); +out: unlock_kernel(); - return 0; + return err; } int nfs_release(struct inode *inode, struct file *filp) @@ -993,6 +1072,9 @@ goto out_err; } + /* Throw out obsolete READDIRPLUS attributes */ + if (time_before(fattr->timestamp, NFS_READTIME(inode))) + return 0; /* * Make sure the inode's type hasn't changed. */ @@ -1011,7 +1093,7 @@ /* * Update the read time so we don't revalidate too often. */ - NFS_READTIME(inode) = jiffies; + NFS_READTIME(inode) = fattr->timestamp; /* * Note: NFS_CACHE_ISIZE(inode) reflects the state of the cache. @@ -1060,7 +1142,8 @@ inode->i_atime = new_atime; if (NFS_CACHE_MTIME(inode) != new_mtime) { - NFS_MTIME_UPDATE(inode) = jiffies; + if (invalid) + NFS_MTIME_UPDATE(inode) = fattr->timestamp; NFS_CACHE_MTIME(inode) = new_mtime; inode->i_mtime = nfs_time_to_secs(new_mtime); } @@ -1068,6 +1151,16 @@ NFS_CACHE_ISIZE(inode) = new_size; inode->i_size = new_isize; + if (inode->i_mode != fattr->mode || + inode->i_uid != fattr->uid || + inode->i_gid != fattr->gid) { + struct rpc_cred **cred = &NFS_I(inode)->cache_access.cred; + if (*cred) { + put_rpccred(*cred); + *cred = NULL; + } + } + inode->i_mode = fattr->mode; inode->i_nlink = fattr->nlink; inode->i_uid = fattr->uid; @@ -1125,7 +1218,7 @@ /* * File system information */ -static DECLARE_FSTYPE(nfs_fs_type, "nfs", nfs_read_super, FS_ODD_RENAME); +static DECLARE_FSTYPE(nfs_fs_type, "nfs", nfs_read_super, FS_ODD_RENAME|FS_ALWAYS_REVAL); extern int nfs_init_nfspagecache(void); extern void nfs_destroy_nfspagecache(void); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/nfs2xdr.c linux.22-ac2/fs/nfs/nfs2xdr.c --- linux.vanilla/fs/nfs/nfs2xdr.c 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/fs/nfs/nfs2xdr.c 2003-06-29 16:09:17.000000000 +0100 @@ -118,6 +118,7 @@ fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; fattr->rdev = 0; } + fattr->timestamp = jiffies; return p; } @@ -395,7 +396,7 @@ int hdrlen, recvd; int status, nr; unsigned int len, pglen; - u32 *end, *entry; + u32 *end, *entry, *kaddr; if ((status = ntohl(*p++))) return -nfs_stat_to_errno(status); @@ -415,7 +416,7 @@ if (pglen > recvd) pglen = recvd; page = rcvbuf->pages; - p = kmap(*page); + kaddr = p = kmap_atomic(*page, KM_USER0); entry = p; end = (u32 *)((char *)p + pglen); for (nr = 0; *p++; nr++) { @@ -436,7 +437,7 @@ if (!nr && (entry[0] != 0 || entry[1] == 0)) goto short_pkt; out: - kunmap(*page); + kunmap_atomic(kaddr, KM_USER0); return nr; short_pkt: entry[0] = entry[1] = 0; @@ -447,8 +448,8 @@ } goto out; err_unmap: - kunmap(*page); - return -errno_NFSERR_IO; + nr = -errno_NFSERR_IO; + goto out; } u32 * @@ -568,7 +569,7 @@ xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } - strlen = (u32*)kmap(rcvbuf->pages[0]); + strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0); /* Convert length of symlink */ len = ntohl(*strlen); if (len > rcvbuf->page_len) @@ -577,7 +578,7 @@ /* NULL terminate the string we got */ string = (char *)(strlen + 1); string[len] = 0; - kunmap(rcvbuf->pages[0]); + kunmap_atomic(strlen, KM_USER0); return 0; } @@ -595,36 +596,18 @@ * Decode STATFS reply */ static int -nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs_xdr_statfsres(struct rpc_rqst *req, u32 *p, struct nfs2_statfs *res) { int status; - u32 xfer_size; if ((status = ntohl(*p++))) return -nfs_stat_to_errno(status); - /* For NFSv2, we more or less have to guess the preferred - * read/write/readdir sizes from the single 'transfer size' - * value. - */ - xfer_size = ntohl(*p++); /* tsize */ - res->rtmax = 8 * 1024; - res->rtpref = xfer_size; - res->rtmult = xfer_size; - res->wtmax = 8 * 1024; - res->wtpref = xfer_size; - res->wtmult = xfer_size; - res->dtpref = PAGE_CACHE_SIZE; - res->maxfilesize = 0x7FFFFFFF; /* just a guess */ + res->tsize = ntohl(*p++); res->bsize = ntohl(*p++); - - res->tbytes = ntohl(*p++) * res->bsize; - res->fbytes = ntohl(*p++) * res->bsize; - res->abytes = ntohl(*p++) * res->bsize; - res->tfiles = 0; - res->ffiles = 0; - res->afiles = 0; - res->namelen = 0; + res->blocks = ntohl(*p++); + res->bfree = ntohl(*p++); + res->bavail = ntohl(*p++); return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/nfs3proc.c linux.22-ac2/fs/nfs/nfs3proc.c --- linux.vanilla/fs/nfs/nfs3proc.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/nfs/nfs3proc.c 2003-07-06 14:06:34.000000000 +0100 @@ -117,12 +117,13 @@ } static int -nfs3_proc_access(struct inode *inode, int mode, int ruid) +nfs3_proc_access(struct inode *inode, struct rpc_cred *cred, int mode) { struct nfs_fattr fattr; struct nfs3_accessargs arg = { NFS_FH(inode), 0 }; struct nfs3_accessres res = { &fattr, 0 }; - int status, flags; + struct rpc_message msg = { NFS3PROC_ACCESS, &arg, &res, cred }; + int status; dprintk("NFS call access\n"); fattr.valid = 0; @@ -140,8 +141,7 @@ if (mode & MAY_EXEC) arg.access |= NFS3_ACCESS_EXECUTE; } - flags = (ruid) ? RPC_CALL_REALUID : 0; - status = rpc_call(NFS_CLIENT(inode), NFS3PROC_ACCESS, &arg, &res, flags); + status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_refresh_inode(inode, &fattr); dprintk("NFS reply access\n"); @@ -488,24 +488,42 @@ return status; } -/* - * This is a combo call of fsstat and fsinfo - */ static int nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, - struct nfs_fsinfo *info) + struct nfs_fsstat *stat) { int status; - dprintk("NFS call fsstat\n"); - memset((char *)info, 0, sizeof(*info)); - status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, info, 0); - if (status < 0) - goto error; + stat->fattr->valid = 0; + dprintk("NFS call statfs\n"); + status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0); + dprintk("NFS reply statfs: %d\n", status); + return status; +} + +static int +nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) +{ + int status; + + info->fattr->valid = 0; + dprintk("NFS call fsinfo\n"); status = rpc_call(server->client, NFS3PROC_FSINFO, fhandle, info, 0); + dprintk("NFS reply fsinfo: %d\n", status); + return status; +} -error: - dprintk("NFS reply statfs: %d\n", status); +static int +nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *info) +{ + int status; + + info->fattr->valid = 0; + dprintk("NFS call pathconf\n"); + status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0); + dprintk("NFS reply pathconf: %d\n", status); return status; } @@ -534,5 +552,7 @@ nfs3_proc_readdir, nfs3_proc_mknod, nfs3_proc_statfs, + nfs3_proc_fsinfo, + nfs3_proc_pathconf, nfs3_decode_dirent, }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/nfs3xdr.c linux.22-ac2/fs/nfs/nfs3xdr.c --- linux.vanilla/fs/nfs/nfs3xdr.c 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/fs/nfs/nfs3xdr.c 2003-06-29 16:09:17.000000000 +0100 @@ -181,6 +181,7 @@ /* Update the mode bits */ fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3); + fattr->timestamp = jiffies; return p; } @@ -506,7 +507,7 @@ int hdrlen, recvd; int status, nr; unsigned int len, pglen; - u32 *entry, *end; + u32 *entry, *end, *kaddr; status = ntohl(*p++); /* Decode post_op_attrs */ @@ -536,7 +537,7 @@ if (pglen > recvd) pglen = recvd; page = rcvbuf->pages; - p = kmap(*page); + kaddr = p = kmap_atomic(*page, KM_USER0); end = (u32 *)((char *)p + pglen); entry = p; for (nr = 0; *p++; nr++) { @@ -581,7 +582,7 @@ if (!nr && (entry[0] != 0 || entry[1] == 0)) goto short_pkt; out: - kunmap(*page); + kunmap_atomic(kaddr, KM_USER0); return nr; short_pkt: entry[0] = entry[1] = 0; @@ -592,8 +593,8 @@ } goto out; err_unmap: - kunmap(*page); - return -errno_NFSERR_IO; + nr = -errno_NFSERR_IO; + goto out; } u32 * @@ -616,21 +617,19 @@ p = xdr_decode_hyper(p, &entry->cookie); if (plus) { - p = xdr_decode_post_op_attr(p, &entry->fattr); + entry->fattr->valid = 0; + p = xdr_decode_post_op_attr(p, entry->fattr); /* In fact, a post_op_fh3: */ if (*p++) { - p = xdr_decode_fhandle(p, &entry->fh); + p = xdr_decode_fhandle(p, entry->fh); /* Ugh -- server reply was truncated */ if (p == NULL) { dprintk("NFS: FH truncated\n"); *entry = old; return ERR_PTR(-EAGAIN); } - } else { - /* If we don't get a file handle, the attrs - * aren't worth a lot. */ - entry->fattr.valid = 0; - } + } else + memset((u8*)(entry->fh), 0, sizeof(*entry->fh)); } entry->eof = !p[0] && p[1]; @@ -766,7 +765,7 @@ xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); } - strlen = (u32*)kmap(rcvbuf->pages[0]); + strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0); /* Convert length of symlink */ len = ntohl(*strlen); if (len > rcvbuf->page_len) @@ -775,7 +774,7 @@ /* NULL terminate the string we got */ string = (char *)(strlen + 1); string[len] = 0; - kunmap(rcvbuf->pages[0]); + kunmap_atomic(strlen, KM_USER0); return 0; } @@ -913,14 +912,13 @@ * Decode FSSTAT reply */ static int -nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs3_xdr_fsstatres(struct rpc_rqst *req, u32 *p, struct nfs_fsstat *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); @@ -930,8 +928,7 @@ p = xdr_decode_hyper(p, &res->tfiles); p = xdr_decode_hyper(p, &res->ffiles); p = xdr_decode_hyper(p, &res->afiles); - - /* ignore invarsec */ + res->invarsec = ntohl(*p++); return 0; } @@ -941,12 +938,11 @@ static int nfs3_xdr_fsinfores(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); @@ -958,8 +954,8 @@ res->wtmult = ntohl(*p++); res->dtpref = ntohl(*p++); p = xdr_decode_hyper(p, &res->maxfilesize); - - /* ignore time_delta and properties */ + p = xdr_decode_time3(p, &res->time_delta); + res->properties = ntohl(*p++); return 0; } @@ -967,20 +963,21 @@ * Decode PATHCONF reply */ static int -nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_fsinfo *res) +nfs3_xdr_pathconfres(struct rpc_rqst *req, u32 *p, struct nfs_pathconf *res) { - struct nfs_fattr dummy; int status; status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, &dummy); + p = xdr_decode_post_op_attr(p, res->fattr); if (status != 0) return -nfs_stat_to_errno(status); res->linkmax = ntohl(*p++); - res->namelen = ntohl(*p++); - - /* ignore remaining fields */ + res->name_max = ntohl(*p++); + res->no_trunc = ntohl(*p++) != 0; + res->chown_restricted = ntohl(*p++) != 0; + res->case_insensitive = ntohl(*p++) != 0; + res->case_preserving = ntohl(*p++) != 0; return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/proc.c linux.22-ac2/fs/nfs/proc.c --- linux.vanilla/fs/nfs/proc.c 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/fs/nfs/proc.c 2003-06-29 16:09:17.000000000 +0100 @@ -351,17 +351,62 @@ static int nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, - struct nfs_fsinfo *info) + struct nfs_fsstat *stat) { int status; + struct nfs2_statfs fsinfo; - dprintk("NFS call statfs\n"); - memset((char *)info, 0, sizeof(*info)); - status = rpc_call(server->client, NFSPROC_STATFS, fhandle, info, 0); + stat->fattr->valid = 0; + dprintk("NFS call statfs\n"); + status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply statfs: %d\n", status); + if (status) + goto out; + stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize; + stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize; + stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize; + stat->tfiles = 0; + stat->ffiles = 0; + stat->afiles = 0; + stat->invarsec = 0; + out: return status; } +static int +nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) +{ + int status; + struct nfs2_statfs fsinfo; + + info->fattr->valid = 0; + dprintk("NFS call fsinfo\n"); + status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); + dprintk("NFS reply fsinfo: %d\n", status); + if (status) + goto out; + info->rtmax = NFS_MAXDATA; + info->rtpref = fsinfo.tsize; + info->rtmult = fsinfo.bsize; + info->wtmax = NFS_MAXDATA; + info->wtpref = fsinfo.tsize; + info->wtmult = fsinfo.bsize; + info->dtpref = fsinfo.tsize; + info->maxfilesize = 0x7FFFFFFF; + info->time_delta = 0; + info->properties = 0x1b; + out: + return status; +} + +static int +nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *info) +{ + return -ENOTSUPP; +} + extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int); struct nfs_rpc_ops nfs_v2_clientops = { @@ -387,5 +432,7 @@ nfs_proc_readdir, nfs_proc_mknod, nfs_proc_statfs, + nfs_proc_fsinfo, + nfs_proc_pathconf, nfs_decode_dirent, }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/read.c linux.22-ac2/fs/nfs/read.c --- linux.vanilla/fs/nfs/read.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/nfs/read.c 2003-06-29 16:09:17.000000000 +0100 @@ -135,9 +135,9 @@ } while (count); if (count) { - char *kaddr = kmap(page); + char *kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + offset, 0, count); - kunmap(page); + kunmap_atomic(kaddr, KM_USER0); } flush_dcache_page(page); SetPageUptodate(page); @@ -420,9 +420,9 @@ if (task->tk_status >= 0) { if (count < PAGE_CACHE_SIZE) { - char *p = kmap(page); + char *p = kmap_atomic(page, KM_USER0); memset(p + count, 0, PAGE_CACHE_SIZE - count); - kunmap(page); + kunmap_atomic(p, KM_USER0); count = 0; } else count -= PAGE_CACHE_SIZE; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/nfs/write.c linux.22-ac2/fs/nfs/write.c --- linux.vanilla/fs/nfs/write.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/nfs/write.c 2003-07-06 14:06:42.000000000 +0100 @@ -225,7 +225,7 @@ struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; - int err; + int err, is_sync; end_index = inode->i_size >> PAGE_CACHE_SHIFT; @@ -244,7 +244,8 @@ goto out; do_it: lock_kernel(); - if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode)) { + is_sync = (IS_SYNC(inode) || NFS_FLUSH(inode)); + if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !is_sync) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; @@ -732,15 +733,18 @@ static void nfs_strategy(struct inode *inode) { - unsigned int dirty, wpages; + unsigned int dirty, wpages, flush; dirty = inode->u.nfs_i.ndirty; wpages = NFS_SERVER(inode)->wpages; + flush = NFS_FLUSH(inode); #ifdef CONFIG_NFS_V3 if (NFS_PROTO(inode)->version == 2) { if (dirty >= NFS_STRATEGY_PAGES * wpages) nfs_flush_file(inode, NULL, 0, 0, 0); - } else if (dirty >= wpages) + } else if (dirty >= wpages) { + nfs_flush_file(inode, NULL, 0, 0, 0); + } else if (dirty && flush) nfs_flush_file(inode, NULL, 0, 0, 0); #else if (dirty >= NFS_STRATEGY_PAGES * wpages) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/ntfs/dir.c linux.22-ac2/fs/ntfs/dir.c --- linux.vanilla/fs/ntfs/dir.c 2001-11-04 00:35:46.000000000 +0000 +++ linux.22-ac2/fs/ntfs/dir.c 2003-08-28 22:14:30.000000000 +0100 @@ -802,17 +802,17 @@ u8 ibs_bits; if (!ino) { - ntfs_error(__FUNCTION__ "(): No inode! Returning -EINVAL.\n"); + ntfs_error("%s(): No inode! Returning -EINVAL.\n",__FUNCTION__); return -EINVAL; } vol = ino->vol; if (!vol) { - ntfs_error(__FUNCTION__ "(): Inode 0x%lx has no volume. " - "Returning -EINVAL.\n", ino->i_number); + ntfs_error("%s(): Inode 0x%lx has no volume. Returning " + "-EINVAL.\n", __FUNCTION__, ino->i_number); return -EINVAL; } - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 1: Entering for " - "inode 0x%lx, p_high = 0x%x, p_low = 0x%x.\n", + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 1: Entering for inode 0x%lx, " + "p_high = 0x%x, p_low = 0x%x.\n", __FUNCTION__, ino->i_number, *p_high, *p_low); if (!*p_high) { /* We are still in the index root. */ @@ -827,8 +827,8 @@ ino->u.index.recordsize = ibs = NTFS_GETU32(buf + 0x8); ino->u.index.clusters_per_record = NTFS_GETU32(buf + 0xC); entry = buf + 0x20; - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 2: In index " - "root.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 2: In index root.\n", + __FUNCTION__); ibs_bits = ffs(ibs) - 1; /* Compensate for faked "." and "..". */ start = 2; @@ -850,15 +850,15 @@ if (err || io.size != ibs) goto read_err_ret; if (!ntfs_check_index_record(ino, buf)) { - ntfs_error(__FUNCTION__ "(): Index block 0x%x is not " - "an index record. Returning " - "-ENOTDIR.\n", *p_high - 1); + ntfs_error("%s(): Index block 0x%x is not an index " + "record. Returning -ENOTDIR.\n", + __FUNCTION__, *p_high - 1); ntfs_free(buf); return -ENOTDIR; } entry = buf + 0x18 + NTFS_GETU16(buf + 0x18); - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 3: In index " - "allocation.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 3: In index " + "allocation.\n", __FUNCTION__); start = 0; } /* Process the entries. */ @@ -867,29 +867,30 @@ entry += NTFS_GETU16(entry + 8)) { if (start < finish) { /* Skip entries that were already processed. */ - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 4: " - "Skipping already processed entry " - "p_high 0x%x, p_low 0x%x.\n", *p_high, + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 4: Skipping " + "already processed entry p_high 0x%x, " + "p_low 0x%x.\n", __FUNCTION__, *p_high, start); start++; continue; } - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 5: " - "Processing entry p_high 0x%x, p_low 0x%x.\n", + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 5: Processing entry " + "p_high 0x%x, p_low 0x%x.\n", __FUNCTION__, *p_high, *p_low); if ((err = cb(entry, param))) { /* filldir signalled us to stop. */ - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): " - "Unsorted 6: cb returned %i, " - "returning 0, p_high 0x%x, p_low 0x%x." - "\n", *p_high, *p_low); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 6: cb returned " + "%i, returning 0, p_high 0x%x, " + "p_low 0x%x.\n", __FUNCTION__, err, + *p_high, *p_low); ntfs_free(buf); return 0; } ++*p_low; } - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 7: After processing " - "entries, p_high 0x%x, p_low 0x%x.\n", *p_high, *p_low); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 7: After processing entries, " + "p_high 0x%x, p_low 0x%x.\n", __FUNCTION__, *p_high, + *p_low); /* We have to locate the next record. */ ntfs_free(buf); buf = 0; @@ -898,15 +899,15 @@ if (!attr) { /* Directory does not have index bitmap and index allocation. */ *p_high = 0x7fff; - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 8: No index " - "allocation. Returning 0, p_high 0x7fff, " - "p_low 0x0.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 8: No index allocation. " + "Returning 0, p_high 0x7fff, p_low 0x0.\n", + __FUNCTION__); return 0; } max_size = attr->size; if (max_size > 0x7fff >> 3) { - ntfs_error(__FUNCTION__ "(): Directory too large. Visible " - "length is truncated.\n"); + ntfs_error("%s(): Directory too large. Visible " + "length is truncated.\n", __FUNCTION__); max_size = 0x7fff >> 3; } buf = ntfs_malloc(max_size); @@ -920,26 +921,26 @@ attr = ntfs_find_attr(ino, vol->at_index_allocation, I30); if (!attr) { ntfs_free(buf); - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 9: Find " - "attr failed. Returning -EIO.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 9: Find attr failed. " + "Returning -EIO.\n", __FUNCTION__); return -EIO; } if (attr->resident) { ntfs_free(buf); - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 9.5: IA is " - "resident. Not allowed. Returning EINVAL.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 9.5: IA is resident. Not" + " allowed. Returning EINVAL.\n", __FUNCTION__); return -EINVAL; } /* Loop while going through non-allocated index records. */ max_size <<= 3; while (1) { if (++*p_high >= 0x7fff) { - ntfs_error(__FUNCTION__ "(): Unsorted 10: Directory " + ntfs_error("%s(): Unsorted 10: Directory " "inode 0x%lx overflowed the maximum " "number of index allocation buffers " "the driver can cope with. Pretending " "to be at end of directory.\n", - ino->i_number); + __FUNCTION__, ino->i_number); goto fake_eod; } if (*p_high > max_size || (s64)*p_high << ibs_bits > @@ -949,10 +950,9 @@ *p_high = 0x7fff; *p_low = 0; ntfs_free(buf); - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted " - "10.5: No more index records. " - "Returning 0, p_high 0x7fff, p_low " - "0.\n"); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 10.5: No more " + "index records. Returning 0, p_high " + "0x7fff, p_low 0.\n", __FUNCTION__); return 0; } byte = (ntfs_cluster_t)(*p_high - 1); @@ -961,16 +961,15 @@ if ((buf[byte] & bit)) break; }; - ntfs_debug(DEBUG_DIR3, __FUNCTION__ "(): Unsorted 11: Done. " - "Returning 0, p_high 0x%x, p_low 0x%x.\n", *p_high, - *p_low); + ntfs_debug(DEBUG_DIR3, "%s(): Unsorted 11: Done. Returning 0, p_high " + "0x%x, p_low 0x%x.\n", __FUNCTION__, *p_high, *p_low); ntfs_free(buf); return 0; read_err_ret: if (!err) err = -EIO; - ntfs_error(__FUNCTION__ "(): Read failed. Returning error code %i.\n", - err); + ntfs_error("%s(): Read failed. Returning error code %i.\n", + __FUNCTION__, err); ntfs_free(buf); return err; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/ntfs/fs.c linux.22-ac2/fs/ntfs/fs.c --- linux.vanilla/fs/ntfs/fs.c 2002-08-03 16:08:30.000000000 +0100 +++ linux.22-ac2/fs/ntfs/fs.c 2003-08-28 22:14:30.000000000 +0100 @@ -114,9 +114,9 @@ if (!ntfs_ino) return -EINVAL; - ntfs_debug(DEBUG_LINUX, __FUNCTION__ "(): Entering for inode 0x%lx, " - "*pos 0x%Lx, count 0x%x.\n", ntfs_ino->i_number, *pos, - count); + ntfs_debug(DEBUG_LINUX, "%s(): Entering for inode 0x%lx, *pos 0x%Lx, " + "count 0x%x.\n", __FUNCTION__, ntfs_ino->i_number, + *pos, count); /* Allows to lock fs ro at any time. */ if (vfs_ino->i_sb->s_flags & MS_RDONLY) return -EROFS; @@ -140,7 +140,7 @@ io.size = count; io.do_read = 0; err = ntfs_readwrite_attr(ntfs_ino, data, *pos, &io); - ntfs_debug(DEBUG_LINUX, __FUNCTION__ "(): Returning %i\n", -err); + ntfs_debug(DEBUG_LINUX, "%s(): Returning %i\n", __FUNCTION__, -err); if (!err) { *pos += io.size; if (*pos > vfs_ino->i_size) @@ -196,20 +196,20 @@ err = ntfs_encodeuni(NTFS_INO2VOL(nf->dir), (ntfs_u16*)(entry + 0x52), name_len, &nf->name, &nf->namelen); if (err) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Skipping " - "unrepresentable file.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Skipping unrepresentable " + "file.\n", __FUNCTION__); err = 0; goto err_ret; } if (!show_sys_files && inum < 0x10UL) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Skipping system " - "file (%s).\n", nf->name); + ntfs_debug(DEBUG_OTHER, "%s(): Skipping system file (%s).\n", + __FUNCTION__, nf->name); err = 0; goto err_ret; } /* Do not return ".", as this is faked. */ if (nf->namelen == 1 && nf->name[0] == '.') { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Skipping \".\"\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Skipping \".\"\n", __FUNCTION__); err = 0; goto err_ret; } @@ -218,8 +218,8 @@ file_type = DT_DIR; else file_type = DT_REG; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Calling filldir for %s with " - "len %i, f_pos 0x%Lx, inode %lu, %s.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Calling filldir for %s with " + "len %i, f_pos 0x%Lx, inode %lu, %s.\n", __FUNCTION__, nf->name, nf->namelen, (loff_t)(nf->ph << 16) | nf->pl, inum, file_type == DT_DIR ? "DT_DIR" : "DT_REG"); /* @@ -254,16 +254,16 @@ cb.pl = filp->f_pos & 0xffff; cb.ph = (filp->f_pos >> 16) & 0x7fff; filp->f_pos = (loff_t)(cb.ph << 16) | cb.pl; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Entering for inode %lu, " - "f_pos 0x%Lx, i_mode 0x%x, i_count %lu.\n", dir->i_ino, - filp->f_pos, (unsigned int)dir->i_mode, + ntfs_debug(DEBUG_OTHER, "%s(): Entering for inode %lu, f_pos 0x%Lx, " + "i_mode 0x%x, i_count %lu.\n", __FUNCTION__, + dir->i_ino, filp->f_pos, (unsigned int)dir->i_mode, atomic_read(&dir->i_count)); if (!cb.ph) { /* Start of directory. Emulate "." and "..". */ if (!cb.pl) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Calling " - "filldir for . with len 1, f_pos 0x%Lx, " - "inode %lu, DT_DIR.\n", filp->f_pos, + ntfs_debug(DEBUG_OTHER, "%s(): Calling filldir for . " + "with len 1, f_pos 0x%Lx, inode %lu, " + "DT_DIR.\n", __FUNCTION__, filp->f_pos, dir->i_ino); cb.ret_code = filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR); @@ -273,9 +273,9 @@ filp->f_pos = (loff_t)(cb.ph << 16) | cb.pl; } if (cb.pl == (u32)1) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Calling " - "filldir for .. with len 2, f_pos 0x%Lx, " - "inode %lu, DT_DIR.\n", filp->f_pos, + ntfs_debug(DEBUG_OTHER, "%s(): Calling filldir for .. " + "with len 2, f_pos 0x%Lx, inode %lu, " + "DT_DIR.\n", __FUNCTION__, filp->f_pos, filp->f_dentry->d_parent->d_inode->i_ino); cb.ret_code = filldir(dirent, "..", 2, filp->f_pos, filp->f_dentry->d_parent->d_inode->i_ino, @@ -293,30 +293,31 @@ cb.dirent = dirent; cb.type = NTFS_INO2VOL(dir)->ngt; do { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Looking for next " - "file using ntfs_getdir_unsorted(), f_pos " - "0x%Lx.\n", (loff_t)(cb.ph << 16) | cb.pl); + ntfs_debug(DEBUG_OTHER, "%s(): Looking for next file using " + "ntfs_getdir_unsorted(), f_pos 0x%Lx.\n", + __FUNCTION__, (loff_t)(cb.ph << 16) | cb.pl); err = ntfs_getdir_unsorted(NTFS_LINO2NINO(dir), &cb.ph, &cb.pl, ntfs_printcb, &cb); } while (!err && !cb.ret_code && cb.ph < 0x7fff); filp->f_pos = (loff_t)(cb.ph << 16) | cb.pl; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After ntfs_getdir_unsorted()" - " calls, f_pos 0x%Lx.\n", filp->f_pos); + ntfs_debug(DEBUG_OTHER, "%s(): After ntfs_getdir_unsorted()" + " calls, f_pos 0x%Lx.\n", __FUNCTION__, filp->f_pos); if (!err) { done: #ifdef DEBUG if (!cb.ret_code) - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): EOD, f_pos " - "0x%Lx, returning 0.\n", filp->f_pos); + ntfs_debug(DEBUG_OTHER, "%s(): EOD, f_pos 0x%Lx, " + "returning 0.\n", __FUNCTION__, + filp->f_pos); else - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): filldir " - "returned %i, returning 0, f_pos " - "0x%Lx.\n", cb.ret_code, filp->f_pos); + ntfs_debug(DEBUG_OTHER, "%s(): filldir returned %i, " + "returning 0, f_pos 0x%Lx.\n", + __FUNCTION__, cb.ret_code, filp->f_pos); #endif return 0; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Returning %i, f_pos 0x%Lx.\n", - err, filp->f_pos); + ntfs_debug(DEBUG_OTHER, "%s(): Returning %i, f_pos 0x%Lx.\n", + __FUNCTION__, err, filp->f_pos); return err; } @@ -524,8 +525,8 @@ ntfs_iterate_s walk; int err; - ntfs_debug(DEBUG_NAME1, __FUNCTION__ "(): Looking up %s in directory " - "ino 0x%x.\n", d->d_name.name, (unsigned)dir->i_ino); + ntfs_debug(DEBUG_NAME1, "%s(): Looking up %s in directory ino 0x%x.\n", + __FUNCTION__, d->d_name.name, (unsigned)dir->i_ino); walk.name = NULL; walk.namelen = 0; /* Convert to wide string. */ @@ -847,7 +848,6 @@ goto unl_out; } break; - default: /* Nothing. Just clear the inode and exit. */ } ntfs_clear_inode(&inode->u.ntfs_i); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/ntfs/inode.c linux.22-ac2/fs/ntfs/inode.c --- linux.vanilla/fs/ntfs/inode.c 2001-12-21 17:42:03.000000000 +0000 +++ linux.22-ac2/fs/ntfs/inode.c 2003-08-28 22:14:30.000000000 +0100 @@ -671,8 +671,8 @@ ntfs_cluster_t cluster, s_cluster, vcn, len; __s64 l, chunk, copied; - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): %s 0x%x bytes at offset " - "0x%Lx %s inode 0x%x, attr type 0x%x.\n", + ntfs_debug(DEBUG_FILE3, "%s(): %s 0x%x bytes at offset " + "0x%Lx %s inode 0x%x, attr type 0x%x.\n", __FUNCTION__, dest->do_read ? "Read" : "Write", dest->size, offset, dest->do_read ? "from" : "to", ino->i_number, attr->type); @@ -746,10 +746,10 @@ vcn + attr->d.r.runlist[rnum].len <= s_vcn; rnum++) vcn += attr->d.r.runlist[rnum].len; if (rnum == attr->d.r.len) { - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): EOPNOTSUPP: " + ntfs_debug(DEBUG_FILE3, "%s(): EOPNOTSUPP: " "inode = 0x%x, rnum = %i, offset = 0x%Lx, vcn = 0x%x, " - "s_vcn = 0x%x.\n", ino->i_number, rnum, offset, vcn, - s_vcn); + "s_vcn = 0x%x.\n", __FUNCTION__, ino->i_number, rnum, + offset, vcn, s_vcn); dump_runlist(attr->d.r.runlist, attr->d.r.len); /*FIXME: Should extend runlist. */ return -EOPNOTSUPP; @@ -793,8 +793,8 @@ buf->do_read = 1; attr = ntfs_find_attr(ino, type, name); if (!attr) { - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): attr 0x%x not found " - "in inode 0x%x\n", type, ino->i_number); + ntfs_debug(DEBUG_FILE3, "%s(): attr 0x%x not found in inode " + "0x%x\n", __FUNCTION__, type, ino->i_number); return -EINVAL; } return ntfs_readwrite_attr(ino, attr, offset, buf); @@ -808,8 +808,8 @@ buf->do_read = 0; attr = ntfs_find_attr(ino, type, name); if (!attr) { - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): attr 0x%x not found " - "in inode 0x%x\n", type, ino->i_number); + ntfs_debug(DEBUG_FILE3, "%s(): attr 0x%x not found in inode " + "0x%x\n", __FUNCTION__, type, ino->i_number); return -EINVAL; } return ntfs_readwrite_attr(ino, attr, offset, buf); @@ -1332,7 +1332,7 @@ int i; ntfs_cluster_t ct; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): rlen = %i.\n", rlen); + ntfs_debug(DEBUG_OTHER, "%s(): rlen = %i.\n", __FUNCTION__, rlen); ntfs_debug(DEBUG_OTHER, "VCN LCN Run length\n"); for (i = 0, ct = 0; i < rlen; ct += rl[i++].len) { if (rl[i].lcn == (ntfs_cluster_t)-1) @@ -1372,30 +1372,31 @@ ntfs_runlist *rl; int rlen, rl_size, rl2_pos; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Entering with *r1len = %i, " - "r2len = %i.\n", *r1len, r2len); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Dumping 1st runlist.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Entering with *r1len = %i, " + "r2len = %i.\n", __FUNCTION__, *r1len, r2len); + ntfs_debug(DEBUG_OTHER, "%s(): Dumping 1st runlist.\n", __FUNCTION__); if (*rl1) dump_runlist(*rl1, *r1len); else - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Not present.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Dumping 2nd runlist.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Not present.\n", __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Dumping 2nd runlist.\n", __FUNCTION__); dump_runlist(rl2, r2len); rlen = *r1len + r2len + 1; rl_size = (rlen * sizeof(ntfs_runlist) + PAGE_SIZE - 1) & PAGE_MASK; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): rlen = %i, rl_size = %i.\n", - rlen, rl_size); + ntfs_debug(DEBUG_OTHER, "%s(): rlen = %i, rl_size = %i.\n", + __FUNCTION__, rlen, rl_size); /* Do we have enough space? */ if (rl_size <= ((*r1len * sizeof(ntfs_runlist) + PAGE_SIZE - 1) & PAGE_MASK)) { /* Have enough space already. */ rl = *rl1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Have enough space " - "already.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Have enough space already.\n", + __FUNCTION__); } else { /* Need more space. Reallocate. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Need more space.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Need more space.\n", + __FUNCTION__); rl = ntfs_vmalloc(rlen << sizeof(ntfs_runlist)); if (!rl) return -ENOMEM; @@ -1406,17 +1407,17 @@ } /* Reuse rl_size as the current position index into rl. */ rl_size = *r1len - 1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): rl_size = %i.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): rl_size = %i.\n", __FUNCTION__,rl_size); /* Coalesce neighbouring elements, if present. */ rl2_pos = 0; if (rl[rl_size].lcn + rl[rl_size].len == rl2[rl2_pos].lcn) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Coalescing adjacent " - "runs.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before: " - "rl[rl_size].len = %i.\n", rl[rl_size].len); + ntfs_debug(DEBUG_OTHER, "%s(): Coalescing adjacent runs.\n", + __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Before: rl[rl_size].len = %i.\n", + __FUNCTION__, rl[rl_size].len); rl[rl_size].len += rl2[rl2_pos].len; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After: " - "rl[rl_size].len = %i.\n", rl[rl_size].len); + ntfs_debug(DEBUG_OTHER, "%s(): After: rl[rl_size].len = %i.\n", + __FUNCTION__, rl[rl_size].len); rl2_pos++; r2len--; rlen--; @@ -1428,10 +1429,11 @@ rl[rlen].lcn = (ntfs_cluster_t)-1; rl[rlen].len = (ntfs_cluster_t)0; *r1len = rlen; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Dumping result runlist.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Dumping result runlist.\n", + __FUNCTION__); dump_runlist(*rl1, *r1len); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Returning with *r1len = " - "%i.\n", rlen); + ntfs_debug(DEBUG_OTHER, "%s(): Returning with *r1len = %i.\n", + __FUNCTION__, rlen); return 0; } @@ -1546,7 +1548,7 @@ /* Determine the number of allocated mft records in the mft. */ pass_end = nr_mft_records = data->allocated >> vol->mft_record_size_bits; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): nr_mft_records = %lu.\n", + ntfs_debug(DEBUG_OTHER, "%s(): nr_mft_records = %lu.\n", __FUNCTION__, nr_mft_records); /* Make sure we don't overflow the bitmap. */ l = bmp->initialized << 3; @@ -1565,9 +1567,10 @@ lcn = rl[rlen].lcn + rl[rlen].len; io.fn_put = ntfs_put; io.fn_get = ntfs_get; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Starting bitmap search.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): pass = %i, pass_start = %lu, " - "pass_end = %lu.\n", pass, pass_start, pass_end); + ntfs_debug(DEBUG_OTHER, "%s(): Starting bitmap search.\n", + __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): pass = %i, pass_start = %lu, pass_end = " + "%lu.\n", __FUNCTION__, pass, pass_start, pass_end); byte = NULL; // FIXME: For debugging only. /* Loop until a free mft record is found. */ io.size = (nr_mft_records >> 3) & ~PAGE_MASK; @@ -1575,29 +1578,29 @@ io.param = buf; io.do_read = 1; last_read_pos = buf_pos >> 3; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): Before: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); err = ntfs_readwrite_attr(vol->mft_ino, bmp, last_read_pos, &io); if (err) goto err_ret; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Read %lu bytes.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Read %lu bytes.\n", __FUNCTION__, (unsigned long)io.size); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): After: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); if (!io.size) goto pass_done; buf_size = io.size << 3; bit = buf_pos & 7UL; buf_pos &= ~7UL; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before loop: " - "buf_size = %lu, buf_pos = %lu, bit = %lu, " - "*byte = 0x%x, b = %u.\n", - buf_size, buf_pos, bit, byte ? *byte : -1, b); + ntfs_debug(DEBUG_OTHER, "%s(): Before loop: buf_size = %lu, " + "buf_pos = %lu, bit = %lu, *byte = 0x%x, b = " + "%u.\n", __FUNCTION__, buf_size, buf_pos, bit, + byte ? *byte : -1, b); for (; bit < buf_size && bit + buf_pos < pass_end; bit &= ~7UL, bit += 8UL) { byte = buf + (bit >> 3); @@ -1606,34 +1609,35 @@ b = ffz((unsigned long)*byte); if (b < (__u8)8 && b >= (bit & 7UL)) { bit = b + (bit & ~7UL) + buf_pos; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Found free rec in for loop. " - "bit = %lu\n", bit); + ntfs_debug(DEBUG_OTHER, "%s(): Found free rec " + "in for loop. bit = %lu\n", + __FUNCTION__, bit); goto found_free_rec; } } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After loop: " - "buf_size = %lu, buf_pos = %lu, bit = %lu, " - "*byte = 0x%x, b = %u.\n", - buf_size, buf_pos, bit, byte ? *byte : -1, b); + ntfs_debug(DEBUG_OTHER, "%s(): After loop: buf_size = %lu, " + "buf_pos = %lu, bit = %lu, *byte = 0x%x, b = " + "%u.\n", __FUNCTION__, buf_size, buf_pos, bit, + byte ? *byte : -1, b); buf_pos += buf_size; if (buf_pos < pass_end) continue; pass_done: /* Finished with the current pass. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At pass_done.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At pass_done.\n", __FUNCTION__); if (pass == 1) { /* * Now do pass 2, scanning the first part of the zone * we omitted in pass 1. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Done pass " - "1.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Pass = 2.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Done pass 1.\n", + __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Pass = 2.\n", + __FUNCTION__); pass = 2; pass_end = pass_start; buf_pos = pass_start = 24UL; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): pass = %i, " - "pass_start = %lu, pass_end = %lu.\n", + ntfs_debug(DEBUG_OTHER, "%s(): pass = %i, pass_start = " + "%lu, pass_end = %lu.\n", __FUNCTION__, pass, pass_start, pass_end); continue; } /* pass == 2 */ @@ -1649,21 +1653,21 @@ bit = nr_mft_records; if (bit < 24UL) bit = 24UL; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Found free " - "record bit (#1) = 0x%lx.\n", bit); + ntfs_debug(DEBUG_OTHER, "%s(): Found free record bit " + "(#1) = 0x%lx.\n", __FUNCTION__, bit); goto found_free_rec; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Done pass 2.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): Done pass 2.\n", __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Before: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); /* Need to extend the mft bitmap. */ if (bmp->initialized + 8LL > bmp->allocated) { ntfs_io io2; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Initialized " - "> allocated.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Initialized " + "> allocated.\n", __FUNCTION__); /* Need to extend bitmap by one more cluster. */ rl = bmp->d.r.runlist; rlen = bmp->d.r.len - 1; @@ -1677,8 +1681,8 @@ &io2); if (err) goto err_ret; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Read %lu " - "bytes.\n", (unsigned long)io2.size); + ntfs_debug(DEBUG_OTHER, "%s(): Read %lu bytes.\n", + __FUNCTION__, (unsigned long)io2.size); if (io2.size == 1 && b != 0xff) { __u8 tb = 1 << (lcn & (ntfs_cluster_t)7); if (!(b & tb)) { @@ -1695,9 +1699,10 @@ } append_mftbmp_simple: rl[rlen].len++; have_allocated_mftbmp |= 1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Appending one " - "cluster to mftbmp.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): " + "Appending one cluster " + "to mftbmp.\n", + __FUNCTION__); } } if (!have_allocated_mftbmp) { @@ -1713,11 +1718,12 @@ if (count > 0) { rl2_dealloc_err_out: if (ntfs_deallocate_clusters( vol, rl2, r2len)) - ntfs_error(__FUNCTION__ - "(): Cluster " + ntfs_error("%s(): " + "Cluster " "deallocation in error " "code path failed! You " - "should run chkdsk.\n"); + "should run chkdsk.\n", + __FUNCTION__); } ntfs_vfree(rl2); if (!err) @@ -1752,10 +1758,9 @@ rl[rlen].len = count; bmp->d.r.len = ++rlen; have_allocated_mftbmp |= 2; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Adding run to mftbmp. " - "LCN = %i, len = %i\n", lcn, - count); + ntfs_debug(DEBUG_OTHER, "%s(): Adding run to " + "mftbmp. LCN = %i, len = %i\n", + __FUNCTION__, lcn, count); } /* * We now have extended the mft bitmap allocated size @@ -1763,24 +1768,24 @@ */ bmp->allocated += (__s64)vol->cluster_size; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): After: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); /* We now have sufficient allocated space. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Now have sufficient " - "allocated space in mftbmp.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): Now have sufficient allocated " + "space in mftbmp.\n", __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Before: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); buf_pos = bmp->initialized; bmp->initialized += 8LL; if (bmp->initialized > bmp->size) bmp->size = bmp->initialized; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): After: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); have_allocated_mftbmp |= 4; /* Update the mft bitmap attribute value. */ @@ -1794,27 +1799,27 @@ err = -EIO; goto shrink_mftbmp_err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Wrote extended " - "mftbmp bytes %lu.\n", (unsigned long)io.size); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After write: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): Wrote extended mftbmp bytes " + "%lu.\n", __FUNCTION__, (unsigned long)io.size); + ntfs_debug(DEBUG_OTHER, "%s(): After write: bmp->allocated = " + "0x%Lx, bmp->size = 0x%Lx, bmp->initialized = " + "0x%Lx.\n", __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); bit = buf_pos << 3; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Found free record " - "bit (#2) = 0x%lx.\n", bit); + ntfs_debug(DEBUG_OTHER, "%s(): Found free record bit (#2) = " + "0x%lx.\n", __FUNCTION__, bit); goto found_free_rec; } found_free_rec: /* bit is the found free mft record. Allocate it in the mft bitmap. */ vol->mft_data_pos = bit; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At found_free_rec.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At found_free_rec.\n", __FUNCTION__); io.param = buf; io.size = 1; io.do_read = 1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before update: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): Before update: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = 0x%Lx.\n", + __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); err = ntfs_readwrite_attr(vol->mft_ino, bmp, bit >> 3, &io); if (err || io.size != 1) { @@ -1822,7 +1827,7 @@ err = -EIO; goto shrink_mftbmp_err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Read %lu bytes.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Read %lu bytes.\n", __FUNCTION__, (unsigned long)io.size); #ifdef DEBUG /* Check our bit is really zero! */ @@ -1838,22 +1843,22 @@ err = -EIO; goto shrink_mftbmp_err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Wrote %lu bytes.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Wrote %lu bytes.\n", __FUNCTION__, (unsigned long)io.size); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After update: " - "bmp->allocated = 0x%Lx, bmp->size = 0x%Lx, " - "bmp->initialized = 0x%Lx.\n", bmp->allocated, + ntfs_debug(DEBUG_OTHER, "%s(): After update: bmp->allocated = 0x%Lx, " + "bmp->size = 0x%Lx, bmp->initialized = 0x%Lx.\n", + __FUNCTION__, bmp->allocated, bmp->size, bmp->initialized); /* The mft bitmap is now uptodate. Deal with mft data attribute now. */ ll = (__s64)(bit + 1) << vol->mft_record_size_bits; if (ll <= data->initialized) { /* The allocated record is already initialized. We are done! */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Allocated mft record " - "already initialized!\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Allocated mft record " + "already initialized!\n", __FUNCTION__); goto done_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Allocated mft record needs " - "to be initialized.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Allocated mft record needs " + "to be initialized.\n", __FUNCTION__); /* The mft record is outside the initialized data. */ mft_rec_size = (unsigned long)vol->mft_record_size; /* Preserve old values for undo purposes. */ @@ -1868,32 +1873,31 @@ while (ll > data->allocated) { ntfs_cluster_t lcn2, nr_lcn2, nr, min_nr; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Extending mft " - "data allocation, data->allocated = 0x%Lx, " - "data->size = 0x%Lx, data->initialized = " - "0x%Lx.\n", data->allocated, data->size, - data->initialized); + ntfs_debug(DEBUG_OTHER, "%s(): Extending mft data allocation, " + "data->allocated = 0x%Lx, data->size = 0x%Lx, " + "data->initialized = 0x%Lx.\n", __FUNCTION__, + data->allocated, data->size, data->initialized); /* Minimum allocation is one mft record worth of clusters. */ if (mft_rec_size <= vol->cluster_size) min_nr = (ntfs_cluster_t)1; else min_nr = mft_rec_size >> vol->cluster_size_bits; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): min_nr = %i.\n", + ntfs_debug(DEBUG_OTHER, "%s(): min_nr = %i.\n", __FUNCTION__, min_nr); /* Allocate 16 mft records worth of clusters. */ nr = mft_rec_size << 4 >> vol->cluster_size_bits; if (!nr) nr = (ntfs_cluster_t)1; /* Determine the preferred allocation location. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): nr = %i.\n", nr); + ntfs_debug(DEBUG_OTHER, "%s(): nr = %i.\n", __FUNCTION__, nr); rl2 = data->d.r.runlist; r2len = data->d.r.len; lcn2 = rl2[r2len - 1].lcn + rl2[r2len - 1].len; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): rl2[r2len - 1].lcn " - "= %i, .len = %i.\n", rl2[r2len - 1].lcn, + ntfs_debug(DEBUG_OTHER, "%s(): rl2[r2len - 1].lcn = %i, .len = " + "%i.\n", __FUNCTION__, rl2[r2len - 1].lcn, rl2[r2len - 1].len); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): lcn2 = %i, r2len = " - "%i.\n", lcn2, r2len); + ntfs_debug(DEBUG_OTHER, "%s(): lcn2 = %i, r2len = %i.\n", + __FUNCTION__, lcn2, r2len); retry_mft_data_allocation: nr_lcn2 = nr; err = ntfs_allocate_clusters(vol, &lcn2, &nr_lcn2, &rl2, @@ -1913,36 +1917,34 @@ if (err == -ENOSPC && nr > min_nr && nr_lcn2 >= min_nr) { nr = min_nr; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Retrying mft data " - "allocation, nr = min_nr = %i" - ".\n", nr); + ntfs_debug(DEBUG_OTHER, "%s(): Retrying mft " + "data allocation, nr = min_nr " + "= %i.\n", __FUNCTION__, nr); goto retry_mft_data_allocation; } goto undo_mftbmp_alloc_err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Allocated %i " - "clusters starting at LCN %i.\n", nr_lcn2, - lcn2); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Allocated " - "runlist:\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Allocated %i clusters starting " + "at LCN %i.\n", __FUNCTION__, nr_lcn2, lcn2); + ntfs_debug(DEBUG_OTHER, "%s(): Allocated runlist:\n", + __FUNCTION__); dump_runlist(rl2, r2len); /* Append rl2 to the mft data attribute's run list. */ err = splice_runlists(&data->d.r.runlist, (int*)&data->d.r.len, rl2, r2len); if (err) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "splice_runlists failed with error " - "code %i.\n", -err); + ntfs_debug(DEBUG_OTHER, "%s(): splice_runlists failed " + "with error code %i.\n", __FUNCTION__, + -err); goto undo_partial_data_alloc_err_ret; } /* Reflect the allocated clusters in the mft allocated data. */ data->allocated += nr_lcn2 << vol->cluster_size_bits; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After extending mft " - "data allocation, data->allocated = 0x%Lx, " + ntfs_debug(DEBUG_OTHER, "%s(): After extending mft data " + "allocation, data->allocated = 0x%Lx, " "data->size = 0x%Lx, data->initialized = " - "0x%Lx.\n", data->allocated, data->size, - data->initialized); + "0x%Lx.\n", __FUNCTION__, data->allocated, + data->size, data->initialized); } /* Prepare a formatted (empty) mft record. */ memset(buf, 0, mft_rec_size); @@ -1959,8 +1961,8 @@ old_data_initialized = data->initialized; old_data_size = data->size; while (ll > data->initialized) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Initializing mft " - "record 0x%Lx.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Initializing mft record " + "0x%Lx.\n", __FUNCTION__, data->initialized >> vol->mft_record_size_bits); io.param = buf; io.size = mft_rec_size; @@ -1972,15 +1974,15 @@ err = -EIO; goto undo_data_init_err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Wrote %i bytes to " - "mft data.\n", io.size); + ntfs_debug(DEBUG_OTHER, "%s(): Wrote %i bytes to mft data.\n", + __FUNCTION__, io.size); } /* Update the VFS inode size as well. */ VFS_I(vol->mft_ino)->i_size = data->size; #ifdef DEBUG - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After mft record " + ntfs_debug(DEBUG_OTHER, "%s(): After mft record " "initialization: data->allocated = 0x%Lx, data->size " - "= 0x%Lx, data->initialized = 0x%Lx.\n", + "= 0x%Lx, data->initialized = 0x%Lx.\n", __FUNCTION__, data->allocated, data->size, data->initialized); /* Sanity checks. */ if (data->size > data->allocated || data->size < data->initialized || @@ -1989,45 +1991,47 @@ #endif done_ret: /* Return the number of the allocated mft record. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At done_ret. *result = bit = " - "0x%lx.\n", bit); + ntfs_debug(DEBUG_OTHER, "%s(): At done_ret. *result = bit = 0x%lx.\n", + __FUNCTION__, bit); *result = bit; vol->mft_data_pos = bit + 1; err_ret: unlock_kernel(); free_page((unsigned long)buf); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Syncing inode $MFT.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Syncing inode $MFT.\n", __FUNCTION__); if (ntfs_update_inode(vol->mft_ino)) - ntfs_error(__FUNCTION__ "(): Failed to sync inode $MFT. " - "Continuing anyway.\n"); + ntfs_error("%s(): Failed to sync inode $MFT. " + "Continuing anyway.\n",__FUNCTION__); if (!err) { - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): Done. Allocated mft " - "record number *result = 0x%lx.\n", *result); + ntfs_debug(DEBUG_FILE3, "%s(): Done. Allocated mft record " + "number *result = 0x%lx.\n", __FUNCTION__, + *result); return 0; } if (err != -ENOSPC) - ntfs_error(__FUNCTION__ "(): Failed to allocate an mft " - "record. Returning error code %i.\n", -err); + ntfs_error("%s(): Failed to allocate an mft record. Returning " + "error code %i.\n", __FUNCTION__, -err); else - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): Failed to allocate " - "an mft record due to lack of free space.\n"); + ntfs_debug(DEBUG_FILE3, "%s(): Failed to allocate an mft " + "record due to lack of free space.\n", + __FUNCTION__); return err; undo_data_init_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At " - "undo_data_init_err_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At undo_data_init_err_ret.\n", + __FUNCTION__); data->initialized = old_data_initialized; data->size = old_data_size; undo_data_alloc_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At undo_data_alloc_err_ret." - "\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At undo_data_alloc_err_ret.\n", + __FUNCTION__); data->allocated = old_data_allocated; undo_partial_data_alloc_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At " - "undo_partial_data_alloc_err_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At undo_partial_data_alloc_err_ret.\n", + __FUNCTION__); /* Deallocate the clusters. */ if (ntfs_deallocate_clusters(vol, rl2, r2len)) - ntfs_error(__FUNCTION__ "(): Error deallocating clusters in " - "error code path. You should run chkdsk.\n"); + ntfs_error("%s(): Error deallocating clusters in error code " + "path. You should run chkdsk.\n", __FUNCTION__); ntfs_vfree(rl2); /* Revert the run list back to what it was before. */ r2len = data->d.r.len; @@ -2047,13 +2051,13 @@ ntfs_vfree(data->d.r.runlist); data->d.r.runlist = rl2; } else - ntfs_error(__FUNCTION__ "(): Error reallocating " + ntfs_error("%s(): Error reallocating " "memory in error code path. This " - "should be harmless.\n"); + "should be harmless.\n", __FUNCTION__); } undo_mftbmp_alloc_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At " - "undo_mftbmp_alloc_err_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At undo_mftbmp_alloc_err_ret.\n", + __FUNCTION__); /* Deallocate the allocated bit in the mft bitmap. */ io.param = buf; io.size = 1; @@ -2068,13 +2072,14 @@ if (err || io.size != 1) { if (!err) err = -EIO; - ntfs_error(__FUNCTION__ "(): Error deallocating mft record in " - "error code path. You should run chkdsk.\n"); + ntfs_error("%s(): Error deallocating mft record in error code " + "path. You should run chkdsk.\n", __FUNCTION__); } shrink_mftbmp_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At shrink_mftbmp_err_ret.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): have_allocated_mftbmp = " - "%i.\n", have_allocated_mftbmp); + ntfs_debug(DEBUG_OTHER, "%s(): At shrink_mftbmp_err_ret.\n", + __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): have_allocated_mftbmp = %i.\n", + __FUNCTION__, have_allocated_mftbmp); if (!have_allocated_mftbmp) goto err_ret; /* Shrink the mftbmp back to previous size. */ @@ -2083,15 +2088,15 @@ bmp->initialized -= 8LL; have_allocated_mftbmp &= ~4; /* If no allocation occured then we are done. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): have_allocated_mftbmp = " - "%i.\n", have_allocated_mftbmp); + ntfs_debug(DEBUG_OTHER, "%s(): have_allocated_mftbmp = %i.\n", + __FUNCTION__, have_allocated_mftbmp); if (!have_allocated_mftbmp) goto err_ret; /* Deallocate the allocated cluster. */ bmp->allocated -= (__s64)vol->cluster_size; if (ntfs_deallocate_cluster_run(vol, lcn, (ntfs_cluster_t)1)) - ntfs_error(__FUNCTION__ "(): Error deallocating cluster in " - "error code path. You should run chkdsk.\n"); + ntfs_error("%s(): Error deallocating cluster in error code " + "path. You should run chkdsk.\n", __FUNCTION__); switch (have_allocated_mftbmp & 3) { case 1: /* Delete the last lcn from the last run of mftbmp. */ @@ -2111,10 +2116,10 @@ ntfs_vfree(rl); bmp->d.r.runlist = rl = rlt; } else - ntfs_error(__FUNCTION__ "(): Error " + ntfs_error("%s(): Error " "reallocating memory in error " "code path. This should be " - "harmless.\n"); + "harmless.\n", __FUNCTION__); } bmp->d.r.runlist[bmp->d.r.len].lcn = (ntfs_cluster_t)-1; bmp->d.r.runlist[bmp->d.r.len].len = (ntfs_cluster_t)0; @@ -2256,7 +2261,7 @@ err = ntfs_alloc_mft_record(vol, &(result->i_number)); if (err) { if (err == -ENOSPC) - ntfs_error(__FUNCTION__ "(): No free inodes.\n"); + ntfs_error("%s(): No free inodes.\n", __FUNCTION__); return err; } /* Get the sequence number. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/ntfs/super.c linux.22-ac2/fs/ntfs/super.c --- linux.vanilla/fs/ntfs/super.c 2001-09-08 20:24:40.000000000 +0100 +++ linux.22-ac2/fs/ntfs/super.c 2003-08-28 22:14:30.000000000 +0100 @@ -639,13 +639,13 @@ int rlpos = 0, rlsize, buf_size, err = 0; ntfs_io io; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Entering with *location = " - "0x%x, *count = 0x%x, zone = %s_ZONE.\n", *location, - *count, zone == DATA_ZONE ? "DATA" : "MFT"); + ntfs_debug(DEBUG_OTHER, "%s(): Entering with *location = 0x%x, " + "*count = 0x%x, zone = %s_ZONE.\n", __FUNCTION__, + *location, *count, zone == DATA_ZONE ? "DATA" : "MFT"); buf = (char*)__get_free_page(GFP_NOFS); if (!buf) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Returning " - "-ENOMEM.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Returning -ENOMEM.\n", + __FUNCTION__); return -ENOMEM; } io.fn_put = ntfs_put; @@ -721,101 +721,101 @@ clusters = *count; rlpos = rlsize = 0; if (*count <= 0) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): *count <= 0, " - "returning -EINVAL.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): *count <= 0, " + "returning -EINVAL.\n", __FUNCTION__); err = -EINVAL; goto err_ret; } while (1) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Start of outer while " + ntfs_debug(DEBUG_OTHER, "%s(): Start of outer while " "loop: done_zones = 0x%x, search_zone = %i, " "pass = %i, zone_start = 0x%x, zone_end = " "0x%x, initial_location = 0x%x, buf_pos = " "0x%x, rlpos = %i, rlsize = %i.\n", - done_zones, search_zone, pass, zone_start, - zone_end, initial_location, buf_pos, rlpos, - rlsize); + __FUNCTION__, done_zones, search_zone, pass, + zone_start, zone_end, initial_location, buf_pos, + rlpos, rlsize); /* Loop until we run out of free clusters. */ io.param = buf; io.size = PAGE_SIZE; io.do_read = 1; last_read_pos = buf_pos >> 3; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): last_read_pos = " - "0x%x.\n", last_read_pos); + ntfs_debug(DEBUG_OTHER, "%s(): last_read_pos = 0x%x.\n", + __FUNCTION__, last_read_pos); err = ntfs_readwrite_attr(vol->bitmap, data, last_read_pos, &io); if (err) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "ntfs_read_attr failed with error " - "code %i, going to err_ret.\n", -err); + ntfs_debug(DEBUG_OTHER, "%s(): ntfs_read_attr failed " + "with error code %i, going to " + "err_ret.\n", __FUNCTION__, -err); goto err_ret; } if (!io.size) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): !io.size, " - "going to zone_pass_done.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): !io.size, going to " + "zone_pass_done.\n", __FUNCTION__); goto zone_pass_done; } buf_size = io.size << 3; lcn = buf_pos & 7; buf_pos &= ~7; need_writeback = 0; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Before inner while " + ntfs_debug(DEBUG_OTHER, "%s(): Before inner while " "loop: buf_size = 0x%x, lcn = 0x%x, buf_pos = " - "0x%x, need_writeback = %i.\n", buf_size, lcn, - buf_pos, need_writeback); + "0x%x, need_writeback = %i.\n", __FUNCTION__, + buf_size, lcn, buf_pos, need_writeback); while (lcn < buf_size && lcn + buf_pos < zone_end) { byte = buf + (lcn >> 3); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): In inner " - "while loop: buf_size = 0x%x, lcn = " - "0x%x, buf_pos = 0x%x, need_writeback " - "= %i, byte ofs = 0x%x, *byte = " - "0x%x.\n", buf_size, lcn, buf_pos, - need_writeback, lcn >> 3, *byte); + ntfs_debug(DEBUG_OTHER, "%s(): In inner while loop: " + "buf_size = 0x%x, lcn = 0x%x, buf_pos " + "= 0x%x, need_writeback = %i, byte ofs " + "= 0x%x, *byte = 0x%x.\n", __FUNCTION__, + buf_size, lcn, buf_pos, need_writeback, + lcn >> 3, *byte); /* Skip full bytes. */ if (*byte == 0xff) { lcn += 8; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "continuing while loop 1.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): continuing while" + " loop 1.\n", __FUNCTION__); continue; } bit = 1 << (lcn & 7); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): bit = %i.\n", - bit); + ntfs_debug(DEBUG_OTHER, "%s(): bit = %i.\n", + __FUNCTION__, bit); /* If the bit is already set, go onto the next one. */ if (*byte & bit) { lcn++; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "continuing while loop 2.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): continuing while" + " loop 2.\n", __FUNCTION__); continue; } /* Allocate the bitmap bit. */ *byte |= bit; /* We need to write this bitmap buffer back to disk! */ need_writeback = 1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): *byte = " - "0x%x, need_writeback = %i.\n", *byte, - need_writeback); + ntfs_debug(DEBUG_OTHER, "%s(): *byte = 0x%x, " + "need_writeback = %i.\n", __FUNCTION__, + *byte, need_writeback); /* Reallocate memory if necessary. */ if ((rlpos + 2) * sizeof(ntfs_runlist) >= rlsize) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Reallocating space.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Reallocating " + "space.\n", __FUNCTION__); /* Setup first free bit return value. */ if (!rl2) { *location = lcn + buf_pos; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): *location = " - "0x%x.\n", *location); + ntfs_debug(DEBUG_OTHER, "%s(): " + "*location = 0x%x.\n", + __FUNCTION__, + *location); } rlsize += PAGE_SIZE; rlt = ntfs_vmalloc(rlsize); if (!rlt) { err = -ENOMEM; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Failed to " - "allocate memory, " + ntfs_debug(DEBUG_OTHER, "%s(): Failed " + "to allocate memory, " "returning -ENOMEM, " - "going to " - "wb_err_ret.\n"); + "going to wb_err_ret.\n", + __FUNCTION__); goto wb_err_ret; } if (rl2) { @@ -824,45 +824,46 @@ ntfs_vfree(rl2); } rl2 = rlt; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Reallocated memory, rlsize = " - "0x%x.\n", rlsize); + ntfs_debug(DEBUG_OTHER, "%s(): Reallocated " + "memory, rlsize = 0x%x.\n", + __FUNCTION__, rlsize); } /* * Coalesce with previous run if adjacent LCNs. * Otherwise, append a new run. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Adding run " - "(lcn 0x%x, len 0x%x), prev_lcn = " - "0x%x, lcn = 0x%x, buf_pos = 0x%x, " - "prev_run_len = 0x%x, rlpos = %i.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Adding run (lcn 0x%x, " + "len 0x%x), prev_lcn = 0x%x, lcn = " + "0x%x, buf_pos = 0x%x, prev_run_len = " + "0x%x, rlpos = %i.\n", __FUNCTION__, lcn + buf_pos, 1, prev_lcn, lcn, buf_pos, prev_run_len, rlpos); if (prev_lcn == lcn + buf_pos - prev_run_len && rlpos) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Coalescing to run (lcn 0x%x, " - "len 0x%x).\n", + ntfs_debug(DEBUG_OTHER, "%s(): Coalescing to " + "run (lcn 0x%x, len 0x%x).\n", + __FUNCTION__, rl2[rlpos - 1].lcn, rl2[rlpos - 1].len); rl2[rlpos - 1].len = ++prev_run_len; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Run now (lcn 0x%x, len 0x%x), " - "prev_run_len = 0x%x.\n", + ntfs_debug(DEBUG_OTHER, "%s(): Run now (lcn " + "0x%x, len 0x%x), prev_run_len " + "= 0x%x.\n", __FUNCTION__, rl2[rlpos - 1].lcn, rl2[rlpos - 1].len, prev_run_len); } else { if (rlpos) - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Adding new run, " - "(previous run lcn " - "0x%x, len 0x%x).\n", + ntfs_debug(DEBUG_OTHER, "%s(): Adding " + "new run, (previous " + "run lcn 0x%x, " + "len 0x%x).\n", + __FUNCTION__, rl2[rlpos - 1].lcn, rl2[rlpos - 1].len); else - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Adding new run, " - "is first run.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Adding " + "new run, is first " + "run.\n", __FUNCTION__); rl2[rlpos].lcn = prev_lcn = lcn + buf_pos; rl2[rlpos].len = prev_run_len = (ntfs_cluster_t)1; @@ -878,17 +879,16 @@ * during the respective zone switches. */ tc = lcn + buf_pos + 1; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Done. Updating current zone " - "position, tc = 0x%x, " - "search_zone = %i.\n", tc, - search_zone); + ntfs_debug(DEBUG_OTHER, "%s(): Done. Updating " + "current zone position, tc = " + "0x%x, search_zone = %i.\n", + __FUNCTION__, tc, search_zone); switch (search_zone) { case 1: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->mft_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->mft_zone_pos); if (tc >= vol->mft_zone_end) { vol->mft_zone_pos = @@ -901,17 +901,17 @@ tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->mft_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->mft_zone_pos); break; case 2: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->data1_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data1_zone_pos); if (tc >= vol->nr_clusters) vol->data1_zone_pos = @@ -921,17 +921,17 @@ tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->data1_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data1_zone_pos); break; case 4: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->data2_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data2_zone_pos); if (tc >= vol->mft_zone_start) vol->data2_zone_pos = @@ -940,52 +940,52 @@ vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->data2_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data2_zone_pos); break; default: BUG(); } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Going to done_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Going to " + "done_ret.\n", __FUNCTION__); goto done_ret; } lcn++; } buf_pos += buf_size; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After inner while " + ntfs_debug(DEBUG_OTHER, "%s(): After inner while " "loop: buf_size = 0x%x, lcn = 0x%x, buf_pos = " - "0x%x, need_writeback = %i.\n", buf_size, lcn, - buf_pos, need_writeback); + "0x%x, need_writeback = %i.\n", __FUNCTION__, + buf_size, lcn, buf_pos, need_writeback); if (need_writeback) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Writing " - "back.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Writing back.\n", + __FUNCTION__); need_writeback = 0; io.param = buf; io.do_read = 0; err = ntfs_readwrite_attr(vol->bitmap, data, last_read_pos, &io); if (err) { - ntfs_error(__FUNCTION__ "(): Bitmap writeback " - "failed in read next buffer " - "code path with error code " - "%i.\n", -err); + ntfs_error("%s(): Bitmap writeback failed " + "in read next buffer code " + "path with error code %i.\n", + __FUNCTION__, -err); goto err_ret; } } if (buf_pos < zone_end) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Continuing " + ntfs_debug(DEBUG_OTHER, "%s(): Continuing " "outer while loop, buf_pos = 0x%x, " - "zone_end = 0x%x.\n", buf_pos, - zone_end); + "zone_end = 0x%x.\n", __FUNCTION__, + buf_pos, zone_end); continue; } zone_pass_done: /* Finished with the current zone pass. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At zone_pass_done, " - "pass = %i.\n", pass); + ntfs_debug(DEBUG_OTHER, "%s(): At zone_pass_done, pass = %i.\n", + __FUNCTION__, pass); if (pass == 1) { /* * Now do pass 2, scanning the first part of the zone @@ -1010,36 +1010,37 @@ if (zone_end < zone_start) zone_end = zone_start; buf_pos = zone_start; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Continuing " + ntfs_debug(DEBUG_OTHER, "%s(): Continuing " "outer while loop, pass = 2, " "zone_start = 0x%x, zone_end = 0x%x, " - "buf_pos = 0x%x.\n"); + "buf_pos = 0x%x.\n", __FUNCTION__, + zone_start, zone_end, buf_pos); continue; } /* pass == 2 */ done_zones_check: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At done_zones_check, " + ntfs_debug(DEBUG_OTHER, "%s(): At done_zones_check, " "search_zone = %i, done_zones before = 0x%x, " - "done_zones after = 0x%x.\n", + "done_zones after = 0x%x.\n", __FUNCTION__, search_zone, done_zones, done_zones | search_zone); done_zones |= search_zone; if (done_zones < 7) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Switching " - "zone.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Switching zone.\n", + __FUNCTION__); /* Now switch to the next zone we haven't done yet. */ pass = 1; switch (search_zone) { case 1: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Switching from mft zone to " - "data1 zone.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Switching from " + "mft zone to data1 zone.\n", + __FUNCTION__); /* Update mft zone position. */ if (rlpos) { ntfs_cluster_t tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->mft_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->mft_zone_pos); tc = rl2[rlpos - 1].lcn + rl2[rlpos - 1].len; @@ -1054,10 +1055,10 @@ tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->mft_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->mft_zone_pos); } /* Switch from mft zone to data1 zone. */ @@ -1074,16 +1075,16 @@ } break; case 2: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Switching from data1 zone to " - "data2 zone.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Switching from " + "data1 zone to data2 zone.\n", + __FUNCTION__); /* Update data1 zone position. */ if (rlpos) { ntfs_cluster_t tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->data1_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data1_zone_pos); tc = rl2[rlpos - 1].lcn + rl2[rlpos - 1].len; @@ -1095,10 +1096,10 @@ tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->data1_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data1_zone_pos); } /* Switch from data1 zone to data2 zone. */ @@ -1116,16 +1117,16 @@ } break; case 4: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Switching from data2 zone to " - "data1 zone.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Switching from " + "data2 zone to data1 zone.\n", + __FUNCTION__); /* Update data2 zone position. */ if (rlpos) { ntfs_cluster_t tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): Before checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): Before checks, " "vol->data2_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data2_zone_pos); tc = rl2[rlpos - 1].lcn + rl2[rlpos - 1].len; @@ -1136,10 +1137,10 @@ vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ - "(): After checks, " + ntfs_debug(DEBUG_OTHER, + "%s(): After checks, " "vol->data2_zone_pos = " - "0x%x.\n", + "0x%x.\n", __FUNCTION__, vol->data2_zone_pos); } /* Switch from data2 zone to data1 zone. */ @@ -1147,45 +1148,45 @@ default: BUG(); } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After zone " - "switch, search_zone = %i, pass = %i, " + ntfs_debug(DEBUG_OTHER, "%s(): After zone switch, " + "search_zone = %i, pass = %i, " "initial_location = 0x%x, zone_start " "= 0x%x, zone_end = 0x%x.\n", - search_zone, pass, initial_location, - zone_start, zone_end); + __FUNCTION__, search_zone, pass, + initial_location, zone_start, zone_end); buf_pos = zone_start; if (zone_start == zone_end) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): " - "Empty zone, going to " - "done_zones_check.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Empty zone, " + "going to done_zones_check.\n", + __FUNCTION__); /* Empty zone. Don't bother searching it. */ goto done_zones_check; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Continuing " - "outer while loop.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Continuing outer while " + "loop.\n", __FUNCTION__); continue; } /* done_zones == 7 */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): All zones are " - "finished.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): All zones are finished.\n", + __FUNCTION__); /* * All zones are finished! If DATA_ZONE, shrink mft zone. If * MFT_ZONE, we have really run out of space. */ mft_zone_size = vol->mft_zone_end - vol->mft_zone_start; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): vol->mft_zone_start " - "= 0x%x, vol->mft_zone_end = 0x%x, " - "mft_zone_size = 0x%x.\n", vol->mft_zone_start, + ntfs_debug(DEBUG_OTHER, "%s(): vol->mft_zone_start = 0x%x, " + "vol->mft_zone_end = 0x%x, mft_zone_size = " + "0x%x.\n", __FUNCTION__, vol->mft_zone_start, vol->mft_zone_end, mft_zone_size); if (zone == MFT_ZONE || mft_zone_size <= (ntfs_cluster_t)0) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): No free " - "clusters left, returning -ENOSPC, " - "going to fail_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): No free clusters left, " + "returning -ENOSPC, going to " + "fail_ret.\n", __FUNCTION__); /* Really no more space left on device. */ err = -ENOSPC; goto fail_ret; } /* zone == DATA_ZONE && mft_zone_size > 0 */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Shrinking mft " - "zone.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Shrinking mft zone.\n", + __FUNCTION__); zone_end = vol->mft_zone_end; mft_zone_size >>= 1; if (mft_zone_size > (ntfs_cluster_t)0) @@ -1203,71 +1204,72 @@ search_zone = 2; pass = 2; done_zones &= ~2; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After shrinking mft " + ntfs_debug(DEBUG_OTHER, "%s(): After shrinking mft " "zone, mft_zone_size = 0x%x, " "vol->mft_zone_start = 0x%x, vol->mft_zone_end " "= 0x%x, vol->mft_zone_pos = 0x%x, search_zone " "= 2, pass = 2, dones_zones = 0x%x, zone_start " "= 0x%x, zone_end = 0x%x, vol->data1_zone_pos " "= 0x%x, continuing outer while loop.\n", - mft_zone_size, vol->mft_zone_start, - vol->mft_zone_end, vol->mft_zone_pos, - search_zone, pass, done_zones, zone_start, + __FUNCTION__, mft_zone_size, + vol->mft_zone_start, vol->mft_zone_end, + vol->mft_zone_pos, done_zones, zone_start, zone_end, vol->data1_zone_pos); } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After outer while loop.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): After outer while loop.\n", + __FUNCTION__); done_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At done_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At done_ret.\n", __FUNCTION__); rl2[rlpos].lcn = (ntfs_cluster_t)-1; rl2[rlpos].len = (ntfs_cluster_t)0; *rl = rl2; *rl_len = rlpos; if (need_writeback) { - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Writing back.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Writing back.\n", __FUNCTION__); need_writeback = 0; io.param = buf; io.do_read = 0; err = ntfs_readwrite_attr(vol->bitmap, data, last_read_pos, &io); if (err) { - ntfs_error(__FUNCTION__ "(): Bitmap writeback failed " - "in done code path with error code " - "%i.\n", -err); + ntfs_error("%s(): Bitmap writeback failed in done " + "code path with error code %i.\n", + __FUNCTION__, -err); goto err_ret; } - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Wrote 0x%Lx bytes.\n", - io.size); + ntfs_debug(DEBUG_OTHER, "%s(): Wrote 0x%Lx bytes.\n", + __FUNCTION__, io.size); } done_fail_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At done_fail_ret (follows " - "done_ret).\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At done_fail_ret (follows done_ret).\n", + __FUNCTION__); unlock_kernel(); free_page((unsigned long)buf); if (err) - ntfs_debug(DEBUG_FILE3, __FUNCTION__ "(): Failed to allocate " + ntfs_debug(DEBUG_FILE3, "%s(): Failed to allocate " "clusters. Returning with error code %i.\n", - -err); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Syncing $Bitmap inode.\n"); + __FUNCTION__, -err); + ntfs_debug(DEBUG_OTHER, "%s(): Syncing $Bitmap inode.\n", __FUNCTION__); if (ntfs_update_inode(vol->bitmap)) - ntfs_error(__FUNCTION__ "(): Failed to sync inode $Bitmap. " - "Continuing anyway.\n"); - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Returning with code %i.\n", + ntfs_error("%s(): Failed to sync inode $Bitmap. " + "Continuing anyway.\n", __FUNCTION__); + ntfs_debug(DEBUG_OTHER, "%s(): Returning with code %i.\n", __FUNCTION__, err); return err; fail_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At fail_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At fail_ret.\n", __FUNCTION__); if (rl2) { if (err == -ENOSPC) { /* Return first free lcn and count of free clusters. */ *location = rl2[0].lcn; *count -= clusters; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): err = " - "-ENOSPC, *location = 0x%x, *count = " - "0x%x.\n", *location, *count); + ntfs_debug(DEBUG_OTHER, "%s(): err = -ENOSPC, " + "*location = 0x%x, *count = 0x%x.\n", + __FUNCTION__, *location, *count); } /* Deallocate all allocated clusters. */ - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Deallocating " - "allocated clusters.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Deallocating allocated " + "clusters.\n", __FUNCTION__); ntfs_deallocate_clusters(vol, rl2, rlpos); /* Free the runlist. */ ntfs_vfree(rl2); @@ -1276,34 +1278,35 @@ /* Nothing free at all. */ *location = vol->data1_zone_pos; /* Irrelevant... */ *count = 0; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): No space " - "left at all, err = -ENOSPC, *location " - "= 0x%x, *count = 0.\n", *location); + ntfs_debug(DEBUG_OTHER, "%s(): No space left at all, " + "err = -ENOSPC, *location = 0x%x, " + "*count = 0.\n", + __FUNCTION__, *location); } } *rl = NULL; *rl_len = 0; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): *rl = NULL, *rl_len = 0, " - "going to done_fail_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): *rl = NULL, *rl_len = 0, " + "going to done_fail_ret.\n", __FUNCTION__); goto done_fail_ret; wb_err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At wb_err_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At wb_err_ret.\n", __FUNCTION__); if (need_writeback) { int __err; - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): Writing back.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): Writing back.\n", __FUNCTION__); io.param = buf; io.do_read = 0; __err = ntfs_readwrite_attr(vol->bitmap, data, last_read_pos, &io); if (__err) - ntfs_error(__FUNCTION__ "(): Bitmap writeback failed " - "in error code path with error code " - "%i.\n", -__err); + ntfs_error("%s(): Bitmap writeback failed in error " + "code path with error code %i.\n", + __FUNCTION__, -__err); need_writeback = 0; } err_ret: - ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): At err_ret, *location = -1, " - "*count = 0, going to fail_ret.\n"); + ntfs_debug(DEBUG_OTHER, "%s(): At err_ret, *location = -1, " + "*count = 0, going to fail_ret.\n", __FUNCTION__); *location = -1; *count = 0; goto fail_ret; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/proc/array.c linux.22-ac2/fs/proc/array.c --- linux.vanilla/fs/proc/array.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/proc/array.c 2003-06-29 16:09:17.000000000 +0100 @@ -338,16 +338,15 @@ /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ - priority = task->counter; - priority = 20 - (priority * 10 + DEF_COUNTER / 2) / DEF_COUNTER; - nice = task->nice; + priority = task_prio(task); + nice = task_nice(task); read_lock(&tasklist_lock); ppid = task->pid ? task->p_opptr->pid : 0; read_unlock(&tasklist_lock); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d\n", +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, task->comm, state, @@ -390,7 +389,9 @@ task->nswap, task->cnswap, task->exit_signal, - task->processor); + task_cpu(task), + task->rt_priority, + task->policy); if(mm) mmput(mm); return res; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/proc/generic.c linux.22-ac2/fs/proc/generic.c --- linux.vanilla/fs/proc/generic.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/proc/generic.c 2003-07-09 13:45:05.000000000 +0100 @@ -485,12 +485,12 @@ return ent; } -struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) +struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, + struct proc_dir_entry *parent) { struct proc_dir_entry *ent; - ent = proc_create(&parent,name, - (S_IFDIR | S_IRUGO | S_IXUGO),2); + ent = proc_create(&parent, name, S_IFDIR | mode, 2); if (ent) { ent->proc_fops = &proc_dir_operations; ent->proc_iops = &proc_dir_inode_operations; @@ -503,6 +503,12 @@ return ent; } +struct proc_dir_entry *proc_mkdir(const char *name, + struct proc_dir_entry *parent) +{ + return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); +} + struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/proc/proc_misc.c linux.22-ac2/fs/proc/proc_misc.c --- linux.vanilla/fs/proc/proc_misc.c 2003-08-28 16:45:42.000000000 +0100 +++ linux.22-ac2/fs/proc/proc_misc.c 2003-06-29 16:09:17.000000000 +0100 @@ -107,11 +107,11 @@ a = avenrun[0] + (FIXED_1/200); b = avenrun[1] + (FIXED_1/200); c = avenrun[2] + (FIXED_1/200); - len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n", + len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), - nr_running, nr_threads, last_pid); + nr_running(), nr_threads, last_pid); return proc_calc_metrics(page, start, off, count, eof, len); } @@ -123,7 +123,7 @@ int len; uptime = jiffies; - idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime; + idle = init_task.times.tms_utime + init_task.times.tms_stime; /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but that would overflow about every five days at HZ == 100. @@ -156,7 +156,11 @@ struct sysinfo i; int len; int pg_size ; + int committed; + /* FIXME: needs to be in headers */ + extern atomic_t vm_committed_space; + /* * display in kilobytes. */ @@ -165,6 +169,7 @@ si_meminfo(&i); si_swapinfo(&i); pg_size = atomic_read(&page_cache_size) - i.bufferram ; + committed = atomic_read(&vm_committed_space); len = sprintf(page, " total: used: free: shared: buffers: cached:\n" "Mem: %8Lu %8Lu %8Lu %8Lu %8Lu %8Lu\n" @@ -371,11 +376,11 @@ } } - proc_sprintf(page, &off, &len, - "\nctxt %u\n" + len += sprintf(page + len, + "\nctxt %lu\n" "btime %lu\n" "processes %lu\n", - kstat.context_swtch, + nr_context_switches(), xtime.tv_sec - jif / HZ, total_forks); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/proc/proc_tty.c linux.22-ac2/fs/proc/proc_tty.c --- linux.vanilla/fs/proc/proc_tty.c 2000-04-21 23:17:57.000000000 +0100 +++ linux.22-ac2/fs/proc/proc_tty.c 2003-08-13 14:21:21.000000000 +0100 @@ -16,7 +16,7 @@ extern struct tty_driver *tty_drivers; /* linked list of tty drivers */ extern struct tty_ldisc ldiscs[]; - +extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, struct proc_dir_entry *parent); static int tty_drivers_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data); @@ -128,7 +128,7 @@ } /* - * Thsi function is called by register_tty_driver() to handle + * This function is called by tty_register_driver() to handle * registering the driver's /proc handler into /proc/tty/driver/ */ void proc_tty_register_driver(struct tty_driver *driver) @@ -151,7 +151,7 @@ } /* - * This function is called by unregister_tty_driver() + * This function is called by tty_unregister_driver() */ void proc_tty_unregister_driver(struct tty_driver *driver) { @@ -174,7 +174,13 @@ if (!proc_mkdir("tty", 0)) return; proc_tty_ldisc = proc_mkdir("tty/ldisc", 0); - proc_tty_driver = proc_mkdir("tty/driver", 0); + /* + * /proc/tty/driver/serial reveals the exact character counts for + * serial links which is just too easy to abuse for inferring + * password lengths and inter-keystroke timings during password + * entry. + */ + proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, 0); create_proc_read_entry("tty/ldiscs", 0, 0, tty_ldiscs_read_proc,NULL); create_proc_read_entry("tty/drivers", 0, 0, tty_drivers_read_proc,NULL); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/reiserfs/buffer2.c linux.22-ac2/fs/reiserfs/buffer2.c --- linux.vanilla/fs/reiserfs/buffer2.c 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/fs/reiserfs/buffer2.c 2003-07-06 14:06:59.000000000 +0100 @@ -51,11 +51,11 @@ struct buffer_head * reiserfs_bread (struct super_block *super, int n_block, int n_size) { struct buffer_head *result; - PROC_EXP( unsigned int ctx_switches = kstat.context_swtch ); + PROC_EXP( unsigned int ctx_switches = nr_context_switches(); ); result = bread (super -> s_dev, n_block, n_size); PROC_INFO_INC( super, breads ); - PROC_EXP( if( kstat.context_swtch != ctx_switches ) + PROC_EXP( if( nr_context_switches() != ctx_switches ) PROC_INFO_INC( super, bread_miss ) ); return result; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/seq_file.c linux.22-ac2/fs/seq_file.c --- linux.vanilla/fs/seq_file.c 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/fs/seq_file.c 2003-06-29 16:09:21.000000000 +0100 @@ -1,7 +1,7 @@ /* * linux/fs/seq_file.c * - * helper functions for making syntetic files from sequences of records. + * helper functions for making synthetic files from sequences of records. * initial implementation -- AV, Oct 2001. */ @@ -10,6 +10,7 @@ #include #include +#include /** * seq_open - initialize sequential file @@ -214,7 +215,7 @@ while ((retval=traverse(m, offset)) == -EAGAIN) ; if (retval) { - /* with extreme perjudice... */ + /* with extreme prejudice... */ file->f_pos = 0; m->index = 0; m->count = 0; @@ -249,7 +250,7 @@ * @s: string * @esc: set of characters that need escaping * - * Puts string into buffer, replacing each occurence of character from + * Puts string into buffer, replacing each occurrence of character from * @esc with usual octal escape. Returns 0 in case of success, -1 - in * case of overflow. */ @@ -295,3 +296,45 @@ m->count = m->size; return -1; } + +static void *single_start(struct seq_file *p, loff_t *pos) +{ + return NULL + (*pos == 0); +} + +static void *single_next(struct seq_file *p, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void single_stop(struct seq_file *p, void *v) +{ +} + +int single_open(struct file *file, int (*show)(struct seq_file *, void*), void *data) +{ + struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); + int res = -ENOMEM; + + if (op) { + op->start = single_start; + op->next = single_next; + op->stop = single_stop; + op->show = show; + res = seq_open(file, op); + if (!res) + ((struct seq_file *)file->private_data)->private = data; + else + kfree(op); + } + return res; +} + +int single_release(struct inode *inode, struct file *file) +{ + struct seq_operations *op = ((struct seq_file *)file->private_data)->op; + int res = seq_release(inode, file); + kfree(op); + return res; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/Makefile linux.22-ac2/fs/xfs/linux/Makefile --- linux.vanilla/fs/xfs/linux/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/Makefile 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,63 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# +# Makefile for XFS on Linux. + +EXTRA_CFLAGS += -I.. -funsigned-char + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG -DXFSDEBUG +endif + +O_TARGET := linux_xfs.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs := xfs_globals.o + +obj-$(CONFIG_PROC_FS) += xfs_stats.o +obj-$(CONFIG_SYSCTL) += xfs_sysctl.o +obj-y += xfs_aops.o \ + xfs_behavior.o \ + xfs_file.o \ + xfs_fs_subr.o \ + xfs_globals.o \ + xfs_ioctl.o \ + xfs_iomap.o \ + xfs_iops.o \ + xfs_lrw.o \ + xfs_syncd.o \ + xfs_super.o \ + xfs_vfs.o \ + xfs_vnode.o + +include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_aops.c linux.22-ac2/fs/xfs/linux/xfs_aops.c --- linux.vanilla/fs/xfs/linux/xfs_aops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_aops.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1214 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_trans.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include + +STATIC void convert_page(struct inode *, struct page *, + page_buf_bmap_t *, void *, int, int); + +void +linvfs_unwritten_done( + struct buffer_head *bh, + int uptodate) +{ + page_buf_t *pb = (page_buf_t *)bh->b_private; + + ASSERT(buffer_unwritten(bh)); + bh->b_end_io = NULL; + clear_bit(BH_Unwritten, &bh->b_state); + if (!uptodate) + pagebuf_ioerror(pb, -EIO); + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + pagebuf_iodone(pb, 1, 1); + } + end_buffer_io_async(bh, uptodate); +} + +/* + * Issue transactions to convert a buffer range from unwritten + * to written extents. + */ +STATIC void +linvfs_unwritten_conv( + xfs_buf_t *bp) +{ + vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); + int error; + + if (atomic_read(&bp->pb_hold) < 1) + BUG(); + + VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp), + BMAP_UNWRITTEN, NULL, NULL, error); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_UNDATAIO(bp); + pagebuf_iodone(bp, 0, 0); +} + +STATIC int +map_blocks( + struct inode *inode, + loff_t offset, + ssize_t count, + page_buf_bmap_t *pbmapp, + int flags) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error, nmaps = 1; + + if (((flags & (BMAP_DIRECT|BMAP_SYNC)) == BMAP_DIRECT) && + (offset >= inode->i_size)) + count = max_t(ssize_t, count, XFS_WRITE_IO_LOG); +retry: + VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error); + if ((error == EAGAIN) || (error == EIO)) + return -error; + if (unlikely((flags & (BMAP_WRITE|BMAP_DIRECT)) == + (BMAP_WRITE|BMAP_DIRECT) && nmaps && + (pbmapp->pbm_flags & PBMF_DELAY))) { + flags = BMAP_ALLOCATE; + goto retry; + } + if (flags & (BMAP_WRITE|BMAP_ALLOCATE)) { + VMODIFY(vp); + } + return -error; +} + +/* + * match_offset_to_mapping + * Finds the corresponding mapping in block @map array of the + * given @offset within a @page. + */ +STATIC page_buf_bmap_t * +match_offset_to_mapping( + struct page *page, + page_buf_bmap_t *map, + unsigned long offset) +{ + loff_t full_offset; /* offset from start of file */ + + ASSERT(offset < PAGE_CACHE_SIZE); + + full_offset = page->index; /* NB: using 64bit number */ + full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ + full_offset += offset; /* offset from page start */ + + if (full_offset < map->pbm_offset) + return NULL; + if (map->pbm_offset + map->pbm_bsize > full_offset) + return map; + return NULL; +} + +STATIC void +map_buffer_at_offset( + struct page *page, + struct buffer_head *bh, + unsigned long offset, + int block_bits, + page_buf_bmap_t *mp) +{ + page_buf_daddr_t bn; + loff_t delta; + int sector_shift; + + ASSERT(!(mp->pbm_flags & PBMF_HOLE)); + ASSERT(!(mp->pbm_flags & PBMF_DELAY)); + ASSERT(mp->pbm_bn != PAGE_BUF_DADDR_NULL); + + delta = page->index; + delta <<= PAGE_CACHE_SHIFT; + delta += offset; + delta -= mp->pbm_offset; + delta >>= block_bits; + + sector_shift = block_bits - BBSHIFT; + bn = mp->pbm_bn >> sector_shift; + bn += delta; + ASSERT((bn << sector_shift) >= mp->pbm_bn); + + lock_buffer(bh); + bh->b_blocknr = bn; + bh->b_dev = mp->pbm_target->pbr_kdev; + set_bit(BH_Mapped, &bh->b_state); + clear_bit(BH_Delay, &bh->b_state); +} + +/* + * Look for a page at index which is unlocked and contains our + * unwritten extent flagged buffers at its head. Returns page + * locked and with an extra reference count, and length of the + * unwritten extent component on this page that we can write, + * in units of filesystem blocks. + */ +STATIC struct page * +probe_unwritten_page( + struct address_space *mapping, + unsigned long index, + page_buf_bmap_t *mp, + page_buf_t *pb, + unsigned long max_offset, + unsigned long *fsbs) +{ + struct page *page; + + page = find_trylock_page(mapping, index); + if (!page) + return 0; + + if (page->mapping && page_has_buffers(page)) { + struct buffer_head *bh, *head; + unsigned long p_offset = 0; + + *fsbs = 0; + bh = head = page_buffers(page); + do { + if (!buffer_unwritten(bh)) + break; + if (!match_offset_to_mapping(page, mp, p_offset)) + break; + if (p_offset >= max_offset) + break; + set_buffer_unwritten_io(bh); + bh->b_private = pb; + p_offset += bh->b_size; + (*fsbs)++; + } while ((bh = bh->b_this_page) != head); + + if (p_offset) + return page; + } + + unlock_page(page); + return NULL; +} + +/* + * Look for a page at index which is unlocked and not mapped + * yet - clustering for mmap write case. + */ +STATIC unsigned int +probe_unmapped_page( + struct address_space *mapping, + unsigned long index, + unsigned int pg_offset) +{ + struct page *page; + int ret = 0; + + page = find_trylock_page(mapping, index); + if (!page) + return 0; + + if (page->mapping && PageDirty(page)) { + if (page_has_buffers(page)) { + struct buffer_head *bh, *head; + + bh = head = page_buffers(page); + do { + if (buffer_mapped(bh) || !buffer_uptodate(bh)) + break; + ret += bh->b_size; + if (ret >= pg_offset) + break; + } while ((bh = bh->b_this_page) != head); + } else + ret = PAGE_CACHE_SIZE; + } + + unlock_page(page); + return ret; +} + +STATIC unsigned int +probe_unmapped_cluster( + struct inode *inode, + struct page *startpage, + struct buffer_head *bh, + struct buffer_head *head) +{ + unsigned long tindex, tlast; + unsigned int len, total = 0; + struct address_space *mapping = inode->i_mapping; + + /* First sum forwards in this page */ + do { + if (buffer_mapped(bh)) + break; + total += bh->b_size; + } while ((bh = bh->b_this_page) != head); + + /* If we reached the end of the page, sum forwards in + * following pages. + */ + if (bh == head) { + tlast = inode->i_size >> PAGE_CACHE_SHIFT; + /* Prune this back to avoid pathological behavior */ + tlast = min(tlast, startpage->index + 64); + for (tindex = startpage->index + 1; tindex < tlast; tindex++) { + len = probe_unmapped_page(mapping, tindex, + PAGE_CACHE_SIZE); + if (!len) + break; + total += len; + } + if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { + len = probe_unmapped_page(mapping, tindex, + inode->i_size & ~PAGE_CACHE_MASK); + total += len; + } + } + return total; +} + +/* + * Probe for a given page (index) in the inode and test if it is delayed + * and without unwritten buffers. Returns page locked and with an extra + * reference count. + */ +STATIC struct page * +probe_delalloc_page( + struct inode *inode, + unsigned long index) +{ + struct page *page; + + page = find_trylock_page(inode->i_mapping, index); + if (!page) + return NULL; + + if (page->mapping && page_has_buffers(page)) { + struct buffer_head *bh, *head; + int acceptable = 0; + + bh = head = page_buffers(page); + do { + if (buffer_unwritten(bh)) { + acceptable = 0; + break; + } else if (buffer_delay(bh)) { + acceptable = 1; + } + } while ((bh = bh->b_this_page) != head); + + if (acceptable) + return page; + } + + unlock_page(page); + return NULL; +} + +STATIC int +map_unwritten( + struct inode *inode, + struct page *start_page, + struct buffer_head *head, + struct buffer_head *curr, + unsigned long p_offset, + int block_bits, + page_buf_bmap_t *mp, + int all_bh) +{ + struct buffer_head *bh = curr; + page_buf_bmap_t *tmp; + page_buf_t *pb; + loff_t offset, size; + unsigned long nblocks = 0; + + offset = start_page->index; + offset <<= PAGE_CACHE_SHIFT; + offset += p_offset; + + pb = pagebuf_lookup(mp->pbm_target, + mp->pbm_offset, mp->pbm_bsize, 0); + if (!pb) + return -ENOMEM; + + /* Set the count to 1 initially, this will stop an I/O + * completion callout which happens before we have started + * all the I/O from calling pagebuf_iodone too early. + */ + atomic_set(&pb->pb_io_remaining, 1); + + /* First map forwards in the page consecutive buffers + * covering this unwritten extent + */ + do { + if (!buffer_unwritten(bh)) + break; + tmp = match_offset_to_mapping(start_page, mp, p_offset); + if (!tmp) + break; + map_buffer_at_offset(start_page, bh, p_offset, block_bits, mp); + set_buffer_unwritten_io(bh); + bh->b_private = pb; + p_offset += bh->b_size; + nblocks++; + } while ((bh = bh->b_this_page) != head); + + if (unlikely(nblocks == 0)) { + printk("XFS: bad unwritten extent map: bh=0x%p, mp=0x%p\n", + curr, mp); + BUG(); + } + + atomic_add(nblocks, &pb->pb_io_remaining); + + /* If we reached the end of the page, map forwards in any + * following pages which are also covered by this extent. + */ + if (bh == head) { + struct address_space *mapping = inode->i_mapping; + unsigned long tindex, tlast, bs; + struct page *page; + + tlast = inode->i_size >> PAGE_CACHE_SHIFT; + tlast = min(tlast, start_page->index + pb->pb_page_count - 1); + for (tindex = start_page->index + 1; tindex < tlast; tindex++) { + page = probe_unwritten_page(mapping, tindex, mp, pb, + PAGE_CACHE_SIZE, &bs); + if (!page) + break; + nblocks += bs; + atomic_add(bs, &pb->pb_io_remaining); + convert_page(inode, page, mp, pb, 1, all_bh); + } + + if ((tindex == tlast) && (inode->i_size & ~PAGE_CACHE_MASK)) { + page = probe_unwritten_page(mapping, tindex, mp, pb, + inode->i_size & ~PAGE_CACHE_MASK, &bs); + if (page) { + nblocks += bs; + atomic_add(bs, &pb->pb_io_remaining); + convert_page(inode, page, + mp, pb, 1, all_bh); + } + } + } + + size = nblocks; /* NB: using 64bit number here */ + size <<= block_bits; /* convert fsb's to byte range */ + + XFS_BUF_DATAIO(pb); + XFS_BUF_ASYNC(pb); + XFS_BUF_SET_SIZE(pb, size); + XFS_BUF_SET_OFFSET(pb, offset); + XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); + XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + pagebuf_iodone(pb, 1, 1); + } + + return 0; +} + +STATIC void +submit_page( + struct page *page, + struct buffer_head *bh_arr[], + int cnt) +{ + struct buffer_head *bh; + int i; + + if (cnt) { + for (i = 0; i < cnt; i++) { + bh = bh_arr[i]; + set_buffer_async_io(bh); + if (buffer_unwritten(bh)) + set_buffer_unwritten_io(bh); + set_bit(BH_Uptodate, &bh->b_state); + clear_bit(BH_Dirty, &bh->b_state); + } + + for (i = 0; i < cnt; i++) + submit_bh(WRITE, bh_arr[i]); + } else + unlock_page(page); +} + +/* + * Allocate & map buffers for page given the extent map. Write it out. + * except for the original page of a writepage, this is called on + * delalloc/unwritten pages only, for the original page it is possible + * that the page has no mapping at all. + */ +STATIC void +convert_page( + struct inode *inode, + struct page *page, + page_buf_bmap_t *maps, + void *private, + int startio, + int all_bh) +{ + struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; + page_buf_bmap_t *mp = maps, *tmp; + unsigned long end, offset, end_index; + int i = 0, index = 0; + int bbits = inode->i_blkbits; + + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + if (page->index < end_index) { + end = PAGE_CACHE_SIZE; + } else { + end = inode->i_size & (PAGE_CACHE_SIZE-1); + } + bh = head = page_buffers(page); + do { + offset = i << bbits; + if (!(Page_Uptodate(page) || buffer_uptodate(bh))) + continue; + if (buffer_mapped(bh) && !buffer_delay(bh) && all_bh) { + if (startio && (offset < end)) { + lock_buffer(bh); + bh_arr[index++] = bh; + } + continue; + } + tmp = match_offset_to_mapping(page, mp, offset); + if (!tmp) + continue; + ASSERT(!(tmp->pbm_flags & PBMF_HOLE)); + ASSERT(!(tmp->pbm_flags & PBMF_DELAY)); + + /* If this is a new unwritten extent buffer (i.e. one + * that we haven't passed in private data for, we must + * now map this buffer too. + */ + if (buffer_unwritten(bh) && !bh->b_end_io) { + ASSERT(tmp->pbm_flags & PBMF_UNWRITTEN); + map_unwritten(inode, page, head, bh, + offset, bbits, tmp, all_bh); + } else { + map_buffer_at_offset(page, bh, offset, bbits, tmp); + if (buffer_unwritten(bh)) { + set_buffer_unwritten_io(bh); + bh->b_private = private; + ASSERT(private); + } + } + if (startio && (offset < end)) { + bh_arr[index++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + } while (i++, (bh = bh->b_this_page) != head); + + submit_page(page, bh_arr, index); +} + +/* + * Convert & write out a cluster of pages in the same extent as defined + * by mp and following the start page. + */ +STATIC void +cluster_write( + struct inode *inode, + unsigned long tindex, + page_buf_bmap_t *mp, + int startio, + int all_bh) +{ + unsigned long tlast; + struct page *page; + + tlast = (mp->pbm_offset + mp->pbm_bsize) >> PAGE_CACHE_SHIFT; + for (; tindex < tlast; tindex++) { + page = probe_delalloc_page(inode, tindex); + if (!page) + break; + convert_page(inode, page, mp, NULL, startio, all_bh); + } +} + +/* + * Calling this without startio set means we are being asked to make a dirty + * page ready for freeing it's buffers. When called with startio set then + * we are coming from writepage. + * + * When called with startio set it is important that we write the WHOLE + * page if possible. + * The bh->b_state's cannot know if any of the blocks or which block for + * that matter are dirty due to mmap writes, and therefore bh uptodate is + * only vaild if the page itself isn't completely uptodate. Some layers + * may clear the page dirty flag prior to calling write page, under the + * assumption the entire page will be written out; by not writing out the + * whole page the page can be reused before all valid dirty data is + * written out. Note: in the case of a page that has been dirty'd by + * mapwrite and but partially setup by block_prepare_write the + * bh->b_states's will not agree and only ones setup by BPW/BCW will have + * valid state, thus the whole page must be written out thing. + */ + +STATIC int +page_state_convert( + struct page *page, + int startio, + int unmapped) /* also implies page uptodate */ +{ + struct inode *inode = page->mapping->host; + struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; + page_buf_bmap_t *mp, map; + unsigned long p_offset = 0, end_index; + loff_t offset, end_offset; + int len, err, i, cnt = 0; + int flags = startio ? 0 : BMAP_TRYLOCK; + int page_dirty = 1; + + + /* Are we off the end of the file ? */ + end_index = inode->i_size >> PAGE_CACHE_SHIFT; + if (page->index >= end_index) { + unsigned remaining = inode->i_size & (PAGE_CACHE_SIZE-1); + if ((page->index >= end_index+1) || !remaining) { + return -EIO; + } + } + + offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + end_offset = offset + PAGE_CACHE_SIZE; + if (end_offset > inode->i_size) + end_offset = inode->i_size; + + bh = head = page_buffers(page); + mp = NULL; + + len = bh->b_size; + do { + if (!(Page_Uptodate(page) || buffer_uptodate(bh)) && !startio) { + goto next_bh; + } + + if (mp) { + mp = match_offset_to_mapping(page, &map, p_offset); + } + + /* + * First case, map an unwritten extent and prepare for + * extent state conversion transaction on completion. + */ + if (buffer_unwritten(bh)) { + if (!mp) { + err = map_blocks(inode, offset, len, &map, + BMAP_READ|BMAP_IGNSTATE); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + if (!bh->b_end_io) { + err = map_unwritten(inode, page, + head, bh, p_offset, + inode->i_blkbits, + mp, unmapped); + if (err) { + goto error; + } + } + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + /* + * Second case, allocate space for a delalloc buffer. + * We can return EAGAIN here in the release page case. + */ + } else if (buffer_delay(bh)) { + if (!mp) { + err = map_blocks(inode, offset, len, &map, + BMAP_ALLOCATE | flags); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + map_buffer_at_offset(page, bh, p_offset, + inode->i_blkbits, mp); + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + } else if ((buffer_uptodate(bh) || Page_Uptodate(page)) && + (unmapped || startio)) { + + if (!buffer_mapped(bh)) { + int size; + + /* + * Getting here implies an unmapped buffer + * was found, and we are in a path where we + * need to write the whole page out. + */ + if (!mp) { + size = probe_unmapped_cluster( + inode, page, bh, head); + err = map_blocks(inode, offset, + size, &map, + BMAP_WRITE | BMAP_MMAP); + if (err) { + goto error; + } + mp = match_offset_to_mapping(page, &map, + p_offset); + } + if (mp) { + map_buffer_at_offset(page, + bh, p_offset, + inode->i_blkbits, mp); + if (startio) { + bh_arr[cnt++] = bh; + } else { + unlock_buffer(bh); + mark_buffer_dirty(bh); + } + page_dirty = 0; + } + } else if (startio) { + if (buffer_uptodate(bh) && + !test_and_set_bit(BH_Lock, &bh->b_state)) { + bh_arr[cnt++] = bh; + page_dirty = 0; + } + } + } + +next_bh: + offset += len; + p_offset += len; + bh = bh->b_this_page; + } while (offset < end_offset); + + if (startio) + submit_page(page, bh_arr, cnt); + + if (mp) + cluster_write(inode, page->index + 1, mp, startio, unmapped); + + return page_dirty; + +error: + for (i = 0; i < cnt; i++) { + unlock_buffer(bh_arr[i]); + } + + /* + * If it's delalloc and we have nowhere to put it, + * throw it away, unless the lower layers told + * us to try again. + */ + if (err != -EAGAIN) { + if (!unmapped) { + block_flushpage(page, 0); + } + ClearPageUptodate(page); + } + return err; +} + +STATIC int +linvfs_get_block_core( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create, + int direct, + bmapi_flags_t flags) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + page_buf_bmap_t pbmap; + int retpbbm = 1; + int error; + ssize_t size; + loff_t offset = (loff_t)iblock << inode->i_blkbits; + + /* If we are doing writes at the end of the file, + * allocate in chunks + */ + if (create && (offset >= inode->i_size) /* && !(flags & BMAP_SYNC) */) + size = 1 << XFS_WRITE_IO_LOG; + else + size = 1 << inode->i_blkbits; + + VOP_BMAP(vp, offset, size, + create ? flags : BMAP_READ, &pbmap, &retpbbm, error); + if (error) + return -error; + + if (retpbbm == 0) + return 0; + + if (pbmap.pbm_bn != PAGE_BUF_DADDR_NULL) { + page_buf_daddr_t bn; + loff_t delta; + + /* For unwritten extents do not report a disk address on + * the read case. + */ + if (create || ((pbmap.pbm_flags & PBMF_UNWRITTEN) == 0)) { + delta = offset - pbmap.pbm_offset; + delta >>= inode->i_blkbits; + + bn = pbmap.pbm_bn >> (inode->i_blkbits - BBSHIFT); + bn += delta; + + bh_result->b_blocknr = bn; + set_bit(BH_Mapped, &bh_result->b_state); + } + if (pbmap.pbm_flags & PBMF_UNWRITTEN) { + if (create) + set_bit(BH_Mapped, &bh_result->b_state); + set_bit(BH_Unwritten, &bh_result->b_state); + set_bit(BH_Delay, &bh_result->b_state); + } + } + + /* If this is a realtime file, data might be on a new device */ + bh_result->b_dev = pbmap.pbm_target->pbr_kdev; + + /* If we previously allocated a block out beyond eof and + * we are now coming back to use it then we will need to + * flag it as new even if it has a disk address. + */ + if (create && + ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || + (offset >= inode->i_size))) { + set_bit(BH_New, &bh_result->b_state); + } + + if (pbmap.pbm_flags & PBMF_DELAY) { + if (unlikely(direct)) + BUG(); + if (create) { + set_bit(BH_Mapped, &bh_result->b_state); + } + set_bit(BH_Delay, &bh_result->b_state); + } + + return 0; +} + +int +linvfs_get_block( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 0, BMAP_WRITE); +} + +STATIC int +linvfs_get_block_sync( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 0, BMAP_SYNC|BMAP_WRITE); +} + +STATIC int +linvfs_get_block_direct( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return linvfs_get_block_core(inode, iblock, bh_result, + create, 1, BMAP_WRITE|BMAP_DIRECT); +} + +STATIC int +linvfs_bmap( + struct address_space *mapping, + long block) +{ + struct inode *inode = (struct inode *)mapping->host; + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + /* block - Linux disk blocks 512b */ + /* bmap input offset - bytes 1b */ + /* bmap output bn - XFS BBs 512b */ + /* bmap output delta - bytes 1b */ + + vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); + + VOP_RWLOCK(vp, VRWLOCK_READ); + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); + VOP_RWUNLOCK(vp, VRWLOCK_READ); + return generic_block_bmap(mapping, block, linvfs_get_block_direct); +} + +STATIC int +linvfs_readpage( + struct file *unused, + struct page *page) +{ + return block_read_full_page(page, linvfs_get_block); +} + +STATIC void +count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} + + +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + * We also need to set PF_NOIO ourselves. + */ + +STATIC int +linvfs_writepage( + struct page *page) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is upto date and we have unmapped buffers + * 3. The page is upto date and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!Page_Uptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + + if ((current->flags & (PF_FSTRANS|PF_NOIO)) && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, inode->i_dev, 1 << inode->i_blkbits); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + if (need_trans) + current->flags |= PF_NOIO; + error = page_state_convert(page, 1, unmapped); + if (need_trans) + current->flags &= ~PF_NOIO; + if (error == -EAGAIN) + goto out_fail; + + if (unlikely(error < 0)) { + unlock_page(page); + return error; + } + + return 0; + +out_fail: + SetPageDirty(page); + unlock_page(page); + return 0; +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ +STATIC int +linvfs_release_page( + struct page *page, + int gfp_mask) +{ + int delalloc, unmapped, unwritten; + + count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + return 1; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + return (page_state_convert(page, 0, 0) == 0) ? 1 : 0; +} + +STATIC int +linvfs_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + if (file && (file->f_flags & O_SYNC)) { + return block_prepare_write(page, from, to, + linvfs_get_block_sync); + } else { + return block_prepare_write(page, from, to, + linvfs_get_block); + } +} + +/* + * Initiate I/O on a kiobuf of user memory + */ +STATIC int +linvfs_direct_IO( + int rw, + struct file *file, + struct kiobuf *iobuf, + unsigned long blocknr, + int blocksize) +{ + page_buf_t *pb; + page_buf_bmap_t map; + int error = 0; + int pb_flags, map_flags, pg_index = 0; + size_t length, total; + loff_t offset; + size_t map_size, size; + struct inode *inode = file->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + struct page **maplist = iobuf->maplist; + size_t page_offset = iobuf->offset; + + total = length = iobuf->length; + offset = blocknr; + offset <<= inode->i_blkbits; + + map_flags = (rw ? BMAP_WRITE : BMAP_READ) | BMAP_DIRECT; + pb_flags = (rw ? PBF_WRITE : PBF_READ) | PBF_FORCEIO; + while (length) { + error = map_blocks(inode, offset, length, &map, map_flags); + if (error) + break; + BUG_ON(map.pbm_flags & PBMF_DELAY); + + map_size = map.pbm_bsize - map.pbm_delta; + size = min(map_size, length); + + if ((map.pbm_flags & PBMF_HOLE) || + ((map.pbm_flags & PBMF_UNWRITTEN) && rw == READ)) { + size_t zero_len = size; + + if (rw == WRITE) + break; + + /* Need to zero it all */ + while (zero_len) { + struct page *page; + size_t pg_len; + + pg_len = min((size_t) + (PAGE_CACHE_SIZE - page_offset), + zero_len); + + page = maplist[pg_index]; + + memset(kmap(page) + page_offset, 0, pg_len); + flush_dcache_page(page); + kunmap(page); + + zero_len -= pg_len; + if ((pg_len + page_offset) == PAGE_CACHE_SIZE) { + pg_index++; + page_offset = 0; + } else { + page_offset = (page_offset + pg_len) & + ~PAGE_CACHE_MASK; + } + } + } else { + int pg_count; + + pg_count = (size + page_offset + PAGE_CACHE_SIZE - 1) + >> PAGE_CACHE_SHIFT; + if ((pb = pagebuf_lookup(map.pbm_target, offset, + size, pb_flags)) == NULL) { + error = -ENOMEM; + break; + } + /* Need to hook up pagebuf to kiobuf pages */ + pb->pb_pages = &maplist[pg_index]; + pb->pb_offset = page_offset; + pb->pb_page_count = pg_count; + pb->pb_bn = map.pbm_bn + (map.pbm_delta >> BBSHIFT); + + XFS_BUF_DATAIO(pb); + if (map.pbm_flags & PBMF_UNWRITTEN) { + XFS_BUF_SET_FSPRIVATE(pb, vp); + XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv); + } + + error = pagebuf_iostart(pb, pb_flags); + pagebuf_rele(pb); + + if (error) { + if (error > 0) + error = -error; + break; + } + + page_offset = (page_offset + size) & ~PAGE_CACHE_MASK; + if (page_offset) + pg_count--; + pg_index += pg_count; + } + + offset += size; + length -= size; + } + + return (error ? error : (int)(total - length)); +} + +struct address_space_operations linvfs_aops = { + .readpage = linvfs_readpage, + .writepage = linvfs_writepage, + .sync_page = block_sync_page, + .releasepage = linvfs_release_page, + .prepare_write = linvfs_prepare_write, + .commit_write = generic_commit_write, + .bmap = linvfs_bmap, + .direct_IO = linvfs_direct_IO, +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_behavior.c linux.22-ac2/fs/xfs/linux/xfs_behavior.c --- linux.vanilla/fs/xfs/linux/xfs_behavior.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_behavior.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + * + */ +#include "xfs.h" + +/* + * Source file used to associate/disassociate behaviors with virtualized + * objects. See xfs_behavior.h for more information about behaviors, etc. + * + * The implementation is split between functions in this file and macros + * in xfs_behavior.h. + */ + +/* + * Insert a new behavior descriptor into a behavior chain. + * + * The behavior chain is ordered based on the 'position' number which + * lives in the first field of the ops vector (higher numbers first). + * + * Attemps to insert duplicate ops result in an EINVAL return code. + * Otherwise, return 0 to indicate success. + */ +int +bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp) +{ + bhv_desc_t *curdesc, *prev; + int position; + + /* + * Validate the position value of the new behavior. + */ + position = BHV_POSITION(bdp); + ASSERT(position >= BHV_POSITION_BASE && position <= BHV_POSITION_TOP); + + /* + * Find location to insert behavior. Check for duplicates. + */ + prev = NULL; + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + /* Check for duplication. */ + if (curdesc->bd_ops == bdp->bd_ops) { + ASSERT(0); + return EINVAL; + } + + /* Find correct position */ + if (position >= BHV_POSITION(curdesc)) { + ASSERT(position != BHV_POSITION(curdesc)); + break; /* found it */ + } + + prev = curdesc; + } + + if (prev == NULL) { + /* insert at front of chain */ + bdp->bd_next = bhp->bh_first; + bhp->bh_first = bdp; + } else { + /* insert after prev */ + bdp->bd_next = prev->bd_next; + prev->bd_next = bdp; + } + + return 0; +} + +/* + * Remove a behavior descriptor from a position in a behavior chain; + * the postition is guaranteed not to be the first position. + * Should only be called by the bhv_remove() macro. + */ +void +bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp) +{ + bhv_desc_t *curdesc, *prev; + + ASSERT(bhp->bh_first != NULL); + ASSERT(bhp->bh_first->bd_next != NULL); + + prev = bhp->bh_first; + for (curdesc = bhp->bh_first->bd_next; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc == bdp) + break; /* found it */ + prev = curdesc; + } + + ASSERT(curdesc == bdp); + prev->bd_next = bdp->bd_next; /* remove from after prev */ +} + +/* + * Look for a specific ops vector on the specified behavior chain. + * Return the associated behavior descriptor. Or NULL, if not found. + */ +bhv_desc_t * +bhv_lookup(bhv_head_t *bhp, void *ops) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc->bd_ops == ops) + return curdesc; + } + + return NULL; +} + +/* + * Looks for the first behavior within a specified range of positions. + * Return the associated behavior descriptor. Or NULL, if none found. + */ +bhv_desc_t * +bhv_lookup_range(bhv_head_t *bhp, int low, int high) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + int position = BHV_POSITION(curdesc); + + if (position <= high) { + if (position >= low) + return curdesc; + return NULL; + } + } + + return NULL; +} + +/* + * Return the base behavior in the chain, or NULL if the chain + * is empty. + * + * The caller has not read locked the behavior chain, so acquire the + * lock before traversing the chain. + */ +bhv_desc_t * +bhv_base(bhv_head_t *bhp) +{ + bhv_desc_t *curdesc; + + for (curdesc = bhp->bh_first; + curdesc != NULL; + curdesc = curdesc->bd_next) { + + if (curdesc->bd_next == NULL) { + return curdesc; + } + } + + return NULL; +} + +void +bhv_head_init( + bhv_head_t *bhp, + char *name) +{ + bhp->bh_first = NULL; +} + +void +bhv_insert_initial( + bhv_head_t *bhp, + bhv_desc_t *bdp) +{ + ASSERT(bhp->bh_first == NULL); + (bhp)->bh_first = bdp; +} + +void +bhv_head_destroy( + bhv_head_t *bhp) +{ + ASSERT(bhp->bh_first == NULL); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_behavior.h linux.22-ac2/fs/xfs/linux/xfs_behavior.h --- linux.vanilla/fs/xfs/linux/xfs_behavior.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_behavior.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BEHAVIOR_H__ +#define __XFS_BEHAVIOR_H__ + +/* + * Header file used to associate behaviors with virtualized objects. + * + * A virtualized object is an internal, virtualized representation of + * OS entities such as persistent files, processes, or sockets. Examples + * of virtualized objects include vnodes, vprocs, and vsockets. Often + * a virtualized object is referred to simply as an "object." + * + * A behavior is essentially an implementation layer associated with + * an object. Multiple behaviors for an object are chained together, + * the order of chaining determining the order of invocation. Each + * behavior of a given object implements the same set of interfaces + * (e.g., the VOP interfaces). + * + * Behaviors may be dynamically inserted into an object's behavior chain, + * such that the addition is transparent to consumers that already have + * references to the object. Typically, a given behavior will be inserted + * at a particular location in the behavior chain. Insertion of new + * behaviors is synchronized with operations-in-progress (oip's) so that + * the oip's always see a consistent view of the chain. + * + * The term "interpostion" is used to refer to the act of inserting + * a behavior such that it interposes on (i.e., is inserted in front + * of) a particular other behavior. A key example of this is when a + * system implementing distributed single system image wishes to + * interpose a distribution layer (providing distributed coherency) + * in front of an object that is otherwise only accessed locally. + * + * Note that the traditional vnode/inode combination is simply a virtualized + * object that has exactly one associated behavior. + * + * Behavior synchronization is logic which is necessary under certain + * circumstances that there is no conflict between ongoing operations + * traversing the behavior chain and those dunamically modifying the + * behavior chain. Because behavior synchronization adds extra overhead + * to virtual operation invocation, we want to restrict, as much as + * we can, the requirement for this extra code, to those situations + * in which it is truly necessary. + * + * Behavior synchronization is needed whenever there's at least one class + * of object in the system for which: + * 1) multiple behaviors for a given object are supported, + * -- AND -- + * 2a) insertion of a new behavior can happen dynamically at any time during + * the life of an active object, + * -- AND -- + * 3a) insertion of a new behavior needs to synchronize with existing + * ops-in-progress. + * -- OR -- + * 3b) multiple different behaviors can be dynamically inserted at + * any time during the life of an active object + * -- OR -- + * 3c) removal of a behavior can occur at any time during the life of + * an active object. + * -- OR -- + * 2b) removal of a behavior can occur at any time during the life of an + * active object + * + */ + +struct bhv_head_lock; + +/* + * Behavior head. Head of the chain of behaviors. + * Contained within each virtualized object data structure. + */ +typedef struct bhv_head { + struct bhv_desc *bh_first; /* first behavior in chain */ + struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */ +} bhv_head_t; + +/* + * Behavior descriptor. Descriptor associated with each behavior. + * Contained within the behavior's private data structure. + */ +typedef struct bhv_desc { + void *bd_pdata; /* private data for this behavior */ + void *bd_vobj; /* virtual object associated with */ + void *bd_ops; /* ops for this behavior */ + struct bhv_desc *bd_next; /* next behavior in chain */ +} bhv_desc_t; + +/* + * Behavior identity field. A behavior's identity determines the position + * where it lives within a behavior chain, and it's always the first field + * of the behavior's ops vector. The optional id field further identifies the + * subsystem responsible for the behavior. + */ +typedef struct bhv_identity { + __u16 bi_id; /* owning subsystem id */ + __u16 bi_position; /* position in chain */ +} bhv_identity_t; + +typedef bhv_identity_t bhv_position_t; + +#define BHV_IDENTITY_INIT(id,pos) {id, pos} +#define BHV_IDENTITY_INIT_POSITION(pos) BHV_IDENTITY_INIT(0, pos) + +/* + * Define boundaries of position values. + */ +#define BHV_POSITION_INVALID 0 /* invalid position number */ +#define BHV_POSITION_BASE 1 /* base (last) implementation layer */ +#define BHV_POSITION_TOP 63 /* top (first) implementation layer */ + +/* + * Plumbing macros. + */ +#define BHV_HEAD_FIRST(bhp) (ASSERT((bhp)->bh_first), (bhp)->bh_first) +#define BHV_NEXT(bdp) (ASSERT((bdp)->bd_next), (bdp)->bd_next) +#define BHV_NEXTNULL(bdp) ((bdp)->bd_next) +#define BHV_VOBJ(bdp) (ASSERT((bdp)->bd_vobj), (bdp)->bd_vobj) +#define BHV_VOBJNULL(bdp) ((bdp)->bd_vobj) +#define BHV_PDATA(bdp) (bdp)->bd_pdata +#define BHV_OPS(bdp) (bdp)->bd_ops +#define BHV_IDENTITY(bdp) ((bhv_identity_t *)(bdp)->bd_ops) +#define BHV_POSITION(bdp) (BHV_IDENTITY(bdp)->bi_position) + +extern void bhv_head_init(bhv_head_t *, char *); +extern void bhv_head_destroy(bhv_head_t *); +extern int bhv_insert(bhv_head_t *, bhv_desc_t *); +extern void bhv_insert_initial(bhv_head_t *, bhv_desc_t *); + +/* + * Initialize a new behavior descriptor. + * Arguments: + * bdp - pointer to behavior descriptor + * pdata - pointer to behavior's private data + * vobj - pointer to associated virtual object + * ops - pointer to ops for this behavior + */ +#define bhv_desc_init(bdp, pdata, vobj, ops) \ + { \ + (bdp)->bd_pdata = pdata; \ + (bdp)->bd_vobj = vobj; \ + (bdp)->bd_ops = ops; \ + (bdp)->bd_next = NULL; \ + } + +/* + * Remove a behavior descriptor from a behavior chain. + */ +#define bhv_remove(bhp, bdp) \ + { \ + if ((bhp)->bh_first == (bdp)) { \ + /* \ + * Remove from front of chain. \ + * Atomic wrt oip's. \ + */ \ + (bhp)->bh_first = (bdp)->bd_next; \ + } else { \ + /* remove from non-front of chain */ \ + bhv_remove_not_first(bhp, bdp); \ + } \ + (bdp)->bd_vobj = NULL; \ + } + +/* + * Behavior module prototypes. + */ +extern void bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp); +extern bhv_desc_t * bhv_lookup(bhv_head_t *bhp, void *ops); +extern bhv_desc_t * bhv_lookup_range(bhv_head_t *bhp, int low, int high); +extern bhv_desc_t * bhv_base(bhv_head_t *bhp); + +/* No bhv locking on Linux */ +#define bhv_lookup_unlocked bhv_lookup +#define bhv_base_unlocked bhv_base + +#endif /* __XFS_BEHAVIOR_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_cred.h linux.22-ac2/fs/xfs/linux/xfs_cred.h --- linux.vanilla/fs/xfs/linux/xfs_cred.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_cred.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CRED_H__ +#define __XFS_CRED_H__ + +/* + * Credentials + */ +typedef struct cred { + /* EMPTY */ +} cred_t; + +extern struct cred *sys_cred; + +/* this is a hack.. (assums sys_cred is the only cred_t in the system) */ +static __inline int capable_cred(cred_t *cr, int cid) +{ + return (cr == sys_cred) ? 1 : capable(cid); +} + +#endif /* __XFS_CRED_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_file.c linux.22-ac2/fs/xfs/linux/xfs_file.c --- linux.vanilla/fs/xfs/linux/xfs_file.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_file.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_trans.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include + +#include +#include /* for PROT_WRITE */ + +static struct vm_operations_struct linvfs_file_vm_ops; + + +STATIC ssize_t +linvfs_read( + struct file *filp, + char *buf, + size_t size, + loff_t *offset) +{ + vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + int error; + + ASSERT(vp); + VOP_READ(vp, filp, buf, size, offset, 0, NULL, error); + return error; +} + + +STATIC ssize_t +linvfs_write( + struct file *file, + const char *buf, + size_t count, + loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + loff_t pos; + vnode_t *vp; + int error; /* Use negative errors in this f'n */ + + if ((ssize_t) count < 0) + return -EINVAL; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + pos = *ppos; + if (pos < 0) + return -EINVAL; + + error = file->f_error; + if (error) { + file->f_error = 0; + return error; + } + + vp = LINVFS_GET_VP(inode); + ASSERT(vp); + + /* We allow multiple direct writers in, there is no + * potential call to vmtruncate in that path. + */ + if (!(file->f_flags & O_DIRECT)) + down(&inode->i_sem); + + VOP_WRITE(vp, file, buf, count, &pos, 0, NULL, error); + *ppos = pos; + + if (!(file->f_flags & O_DIRECT)) + up(&inode->i_sem); + return error; +} + + +STATIC int +linvfs_open( + struct inode *inode, + struct file *filp) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + if (!(filp->f_flags & O_LARGEFILE) && inode->i_size > MAX_NON_LFS) + return -EFBIG; + + ASSERT(vp); + VOP_OPEN(vp, NULL, error); + return -error; +} + + +STATIC int +linvfs_release( + struct inode *inode, + struct file *filp) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error = 0; + + if (vp) + VOP_RELEASE(vp, error); + return -error; +} + + +STATIC int +linvfs_fsync( + struct file *filp, + struct dentry *dentry, + int datasync) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + int flags = FSYNC_WAIT; + + if (datasync) + flags |= FSYNC_DATA; + + ASSERT(vp); + VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); + return -error; +} + +/* + * linvfs_readdir maps to VOP_READDIR(). + * We need to build a uio, cred, ... + */ + +#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen)) + +STATIC int +linvfs_readdir( + struct file *filp, + void *dirent, + filldir_t filldir) +{ + int error = 0; + vnode_t *vp; + uio_t uio; + iovec_t iov; + int eof = 0; + caddr_t read_buf; + int namelen, size = 0; + size_t rlen = PAGE_CACHE_SIZE; + xfs_off_t start_offset, curr_offset; + xfs_dirent_t *dbp = NULL; + + vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + ASSERT(vp); + + /* Try fairly hard to get memory */ + do { + if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL))) + break; + rlen >>= 1; + } while (rlen >= 1024); + + if (read_buf == NULL) + return -ENOMEM; + + uio.uio_iov = &iov; + uio.uio_fmode = filp->f_mode; + uio.uio_segflg = UIO_SYSSPACE; + curr_offset = uio.uio_offset = filp->f_pos; + + while (!eof) { + uio.uio_resid = iov.iov_len = rlen; + iov.iov_base = read_buf; + uio.uio_iovcnt = 1; + + start_offset = uio.uio_offset; + + VOP_READDIR(vp, &uio, NULL, &eof, error); + if ((uio.uio_offset == start_offset) || error) { + size = 0; + break; + } + + size = rlen - uio.uio_resid; + dbp = (xfs_dirent_t *)read_buf; + while (size > 0) { + namelen = strlen(dbp->d_name); + + if (filldir(dirent, dbp->d_name, namelen, + (loff_t) curr_offset, + (ino_t) dbp->d_ino, + DT_UNKNOWN)) { + goto done; + } + size -= dbp->d_reclen; + curr_offset = (loff_t)dbp->d_off & 0x7fffffff; + dbp = nextdp(dbp); + } + } +done: + if (!error) { + if (size == 0) + filp->f_pos = uio.uio_offset & 0x7fffffff; + else if (dbp) + filp->f_pos = curr_offset; + } + + kfree(read_buf); + return -error; +} + +STATIC int +linvfs_file_mmap( + struct file *filp, + struct vm_area_struct *vma) +{ + struct inode *ip = filp->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va = { .va_mask = XFS_AT_UPDATIME }; + int error; + + if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) { + xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); + + error = -XFS_SEND_MMAP(mp, vma, 0); + if (error) + return error; + } + + vma->vm_ops = &linvfs_file_vm_ops; + + VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); + return 0; +} + + +STATIC int +linvfs_ioctl( + struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int error; + vnode_t *vp = LINVFS_GET_VP(inode); + + ASSERT(vp); + VOP_IOCTL(vp, inode, filp, cmd, arg, error); + VMODIFY(vp); + + /* NOTE: some of the ioctl's return positive #'s as a + * byte count indicating success, such as + * readlink_by_handle. So we don't "sign flip" + * like most other routines. This means true + * errors need to be returned as a negative value. + */ + return error; +} + +#ifdef HAVE_VMOP_MPROTECT +STATIC int +linvfs_mprotect( + struct vm_area_struct *vma, + unsigned int newflags) +{ + vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); + int error = 0; + + if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) { + if ((vma->vm_flags & VM_MAYSHARE) && + (newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)) { + xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); + + error = XFS_SEND_MMAP(mp, vma, VM_WRITE); + } + } + return error; +} +#endif /* HAVE_VMOP_MPROTECT */ + + +struct file_operations linvfs_file_operations = { + .llseek = generic_file_llseek, + .read = linvfs_read, + .write = linvfs_write, + .ioctl = linvfs_ioctl, + .mmap = linvfs_file_mmap, + .open = linvfs_open, + .release = linvfs_release, + .fsync = linvfs_fsync, +}; + +struct file_operations linvfs_dir_operations = { + .read = generic_read_dir, + .readdir = linvfs_readdir, + .ioctl = linvfs_ioctl, + .fsync = linvfs_fsync, +}; + +static struct vm_operations_struct linvfs_file_vm_ops = { + .nopage = filemap_nopage, +#ifdef HAVE_VMOP_MPROTECT + .mprotect = linvfs_mprotect, +#endif +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_fs_subr.c linux.22-ac2/fs/xfs/linux/xfs_fs_subr.c --- linux.vanilla/fs/xfs/linux/xfs_fs_subr.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_fs_subr.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +/* + * Stub for no-op vnode operations that return error status. + */ +int +fs_noerr() +{ + return 0; +} + +/* + * Operation unsupported under this file system. + */ +int +fs_nosys() +{ + return ENOSYS; +} + +/* + * Stub for inactive, strategy, and read/write lock/unlock. Does nothing. + */ +/* ARGSUSED */ +void +fs_noval() +{ +} + +/* + * vnode pcache layer for vnode_tosspages. + * 'last' parameter unused but left in for IRIX compatibility + */ +void +fs_tosspages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) + truncate_inode_pages(ip->i_mapping, first); +} + + +/* + * vnode pcache layer for vnode_flushinval_pages. + * 'last' parameter unused but left in for IRIX compatibility + */ +void +fs_flushinval_pages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) { + filemap_fdatasync(ip->i_mapping); + fsync_inode_data_buffers(ip); + filemap_fdatawait(ip->i_mapping); + + truncate_inode_pages(ip->i_mapping, first); + } +} + +/* + * vnode pcache layer for vnode_flush_pages. + * 'last' parameter unused but left in for IRIX compatibility + */ +int +fs_flush_pages( + bhv_desc_t *bdp, + xfs_off_t first, + xfs_off_t last, + uint64_t flags, + int fiopt) +{ + vnode_t *vp = BHV_TO_VNODE(bdp); + struct inode *ip = LINVFS_GET_IP(vp); + + if (VN_CACHED(vp)) { + filemap_fdatasync(ip->i_mapping); + fsync_inode_data_buffers(ip); + filemap_fdatawait(ip->i_mapping); + } + + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_fs_subr.h linux.22-ac2/fs/xfs/linux/xfs_fs_subr.h --- linux.vanilla/fs/xfs/linux/xfs_fs_subr.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_fs_subr.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUBR_H__ +#define __XFS_SUBR_H__ + +/* + * Utilities shared among file system implementations. + */ + +struct cred; + +extern int fs_noerr(void); +extern int fs_nosys(void); +extern int fs_nodev(void); +extern void fs_noval(void); +extern void fs_tosspages(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +extern void fs_flushinval_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +extern int fs_flush_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, uint64_t, int); + +#endif /* __XFS_FS_SUBR_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_globals.c linux.22-ac2/fs/xfs/linux/xfs_globals.c --- linux.vanilla/fs/xfs/linux/xfs_globals.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_globals.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains globals needed by XFS that were normally defined + * somewhere else in IRIX. + */ + +#include "xfs.h" +#include "xfs_bmap_btree.h" +#include "xfs_bit.h" + +/* + * System memory size - used to scale certain data structures in XFS. + */ +unsigned long xfs_physmem; + +/* + * Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n, + * other XFS code uses these values. + */ +xfs_param_t xfs_params = { 128, 32, 0, 1, 0, 0, 0, 3, 30 * HZ }; + +/* + * Global system credential structure. + */ +cred_t sys_cred_val, *sys_cred = &sys_cred_val; + +/* Export XFS symbols used by xfsidbg */ +EXPORT_SYMBOL(xfs_next_bit); +EXPORT_SYMBOL(xfs_contig_bits); +EXPORT_SYMBOL(xfs_bmbt_get_all); +#if ARCH_CONVERT != ARCH_NOCONVERT +EXPORT_SYMBOL(xfs_bmbt_disk_get_all); +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_globals.h linux.22-ac2/fs/xfs/linux/xfs_globals.h --- linux.vanilla/fs/xfs/linux/xfs_globals.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_globals.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_GLOBALS_H__ +#define __XFS_GLOBALS_H__ + +/* + * This file declares globals needed by XFS that were normally defined + * somewhere else in IRIX. + */ + +extern uint64_t xfs_panic_mask; /* set to cause more panics */ +extern unsigned long xfs_physmem; +extern struct cred *sys_cred; + +#endif /* __XFS_GLOBALS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_ioctl.c linux.22-ac2/fs/xfs/linux/xfs_ioctl.c --- linux.vanilla/fs/xfs/linux/xfs_ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_ioctl.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1117 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_dfrag.h" +#include "xfs_fsops.h" + +#include +#include + + +/* + * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to + * a file or fs handle. + * + * XFS_IOC_PATH_TO_FSHANDLE + * returns fs handle for a mount point or path within that mount point + * XFS_IOC_FD_TO_HANDLE + * returns full handle for a FD opened in user space + * XFS_IOC_PATH_TO_HANDLE + * returns full handle for a path + */ +STATIC int +xfs_find_handle( + unsigned int cmd, + unsigned long arg) +{ + int hsize; + xfs_handle_t handle; + xfs_fsop_handlereq_t hreq; + struct inode *inode; + struct vnode *vp; + + if (copy_from_user(&hreq, (xfs_fsop_handlereq_t *)arg, sizeof(hreq))) + return -XFS_ERROR(EFAULT); + + memset((char *)&handle, 0, sizeof(handle)); + + switch (cmd) { + case XFS_IOC_PATH_TO_FSHANDLE: + case XFS_IOC_PATH_TO_HANDLE: { + struct nameidata nd; + int error; + + error = user_path_walk_link(hreq.path, &nd); + if (error) + return error; + + ASSERT(nd.dentry); + ASSERT(nd.dentry->d_inode); + inode = igrab(nd.dentry->d_inode); + path_release(&nd); + break; + } + + case XFS_IOC_FD_TO_HANDLE: { + struct file *file; + + file = fget(hreq.fd); + if (!file) + return -EBADF; + + ASSERT(file->f_dentry); + ASSERT(file->f_dentry->d_inode); + inode = igrab(file->f_dentry->d_inode); + fput(file); + break; + } + + default: + ASSERT(0); + return -XFS_ERROR(EINVAL); + } + + if (inode->i_sb->s_magic != XFS_SB_MAGIC) { + /* we're not in XFS anymore, Toto */ + iput(inode); + return -XFS_ERROR(EINVAL); + } + + /* we need the vnode */ + vp = LINVFS_GET_VP(inode); + if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { + iput(inode); + return -XFS_ERROR(EBADF); + } + + /* now we can grab the fsid */ + memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); + hsize = sizeof(xfs_fsid_t); + + if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { + xfs_inode_t *ip; + bhv_desc_t *bhv; + int lock_mode; + + /* need to get access to the xfs_inode to read the generation */ + bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); + ASSERT(bhv); + ip = XFS_BHVTOI(bhv); + ASSERT(ip); + lock_mode = xfs_ilock_map_shared(ip); + + /* fill in fid section of handle from inode */ + handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) - + sizeof(handle.ha_fid.xfs_fid_len); + handle.ha_fid.xfs_fid_pad = 0; + handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen; + handle.ha_fid.xfs_fid_ino = ip->i_ino; + + xfs_iunlock_map_shared(ip, lock_mode); + + hsize = XFS_HSIZE(handle); + } + + /* now copy our handle into the user buffer & write out the size */ + if (copy_to_user((xfs_handle_t *)hreq.ohandle, &handle, hsize) || + copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) { + iput(inode); + return -XFS_ERROR(EFAULT); + } + + iput(inode); + return 0; +} + + +/* + * Convert userspace handle data into vnode (and inode). + * We [ab]use the fact that all the fsop_handlereq ioctl calls + * have a data structure argument whose first component is always + * a xfs_fsop_handlereq_t, so we can cast to and from this type. + * This allows us to optimise the copy_from_user calls and gives + * a handy, shared routine. + * + * If no error, caller must always VN_RELE the returned vp. + */ +STATIC int +xfs_vget_fsop_handlereq( + xfs_mount_t *mp, + struct inode *parinode, /* parent inode pointer */ + int cap, /* capability level for op */ + unsigned long arg, /* userspace data pointer */ + unsigned long size, /* size of expected struct */ + /* output arguments */ + xfs_fsop_handlereq_t *hreq, + vnode_t **vp, + struct inode **inode) +{ + void *hanp; + size_t hlen; + xfs_fid_t *xfid; + xfs_handle_t *handlep; + xfs_handle_t handle; + xfs_inode_t *ip; + struct inode *inodep; + vnode_t *vpp; + __u32 igen; + ino_t ino; + int error; + + if (!capable(cap)) + return XFS_ERROR(EPERM); + + /* + * Only allow handle opens under a directory. + */ + if (!S_ISDIR(parinode->i_mode)) + return XFS_ERROR(ENOTDIR); + + /* + * Copy the handle down from the user and validate + * that it looks to be in the correct format. + */ + if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size)) + return XFS_ERROR(EFAULT); + + hanp = hreq->ihandle; + hlen = hreq->ihandlen; + handlep = &handle; + + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) + return XFS_ERROR(EINVAL); + if (copy_from_user(handlep, hanp, hlen)) + return XFS_ERROR(EFAULT); + if (hlen < sizeof(*handlep)) + memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); + if (hlen > sizeof(handlep->ha_fsid)) { + if (handlep->ha_fid.xfs_fid_len != + (hlen - sizeof(handlep->ha_fsid) + - sizeof(handlep->ha_fid.xfs_fid_len)) + || handlep->ha_fid.xfs_fid_pad) + return XFS_ERROR(EINVAL); + } + + /* + * Crack the handle, obtain the inode # & generation # + */ + xfid = (struct xfs_fid *)&handlep->ha_fid; + if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { + ino = xfid->xfs_fid_ino; + igen = xfid->xfs_fid_gen; + } else { + return XFS_ERROR(EINVAL); + } + + /* + * Get the XFS inode, building a vnode to go with it. + */ + error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0); + if (error) + return error; + if (ip == NULL) + return XFS_ERROR(EIO); + if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + return XFS_ERROR(ENOENT); + } + + vpp = XFS_ITOV(ip); + inodep = LINVFS_GET_IP(vpp); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + *vp = vpp; + *inode = inodep; + return 0; +} + +STATIC int +xfs_open_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + int new_fd; + int permflag; + struct file *filp; + struct inode *inode; + struct dentry *dentry; + vnode_t *vp; + xfs_fsop_handlereq_t hreq; + struct list_head *lp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_handlereq_t), + &hreq, &vp, &inode); + if (error) + return -error; + + /* Restrict xfs_open_by_handle to directories & regular files. */ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { + iput(inode); + return -XFS_ERROR(EINVAL); + } + +#if BITS_PER_LONG != 32 + hreq.oflags |= O_LARGEFILE; +#endif + /* Put open permission in namei format. */ + permflag = hreq.oflags; + if ((permflag+1) & O_ACCMODE) + permflag++; + if (permflag & O_TRUNC) + permflag |= 2; + + /* Can't write directories. */ + if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { + iput(inode); + return -XFS_ERROR(EISDIR); + } + + if ((new_fd = get_unused_fd()) < 0) { + iput(inode); + return new_fd; + } + + /* Now to find a dentry. If possible, get a well-connected one. */ + spin_lock(&dcache_lock); + for (lp = inode->i_dentry.next; lp != &inode->i_dentry ; lp=lp->next) { + dentry = list_entry(lp, struct dentry, d_alias); + if (! (dentry->d_flags & DCACHE_NFSD_DISCONNECTED)) { + dget_locked(dentry); + dentry->d_vfs_flags |= DCACHE_REFERENCED; + spin_unlock(&dcache_lock); + iput(inode); + goto found; + } + } + spin_unlock(&dcache_lock); + + /* ELSE didn't find dentry. Create anonymous dcache entry. */ + dentry = d_alloc_root(inode); + if (dentry == NULL) { + iput(inode); + put_unused_fd(new_fd); + return -XFS_ERROR(ENOMEM); + } + + /* Keep nfsd happy. */ + dentry->d_flags |= DCACHE_NFSD_DISCONNECTED; + + found: + /* Ensure umount returns EBUSY on umounts while this file is open. */ + mntget(parfilp->f_vfsmnt); + + /* Create file pointer. */ + filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags); + if (IS_ERR(filp)) { + put_unused_fd(new_fd); + return -XFS_ERROR(-PTR_ERR(filp)); + } + filp->f_mode |= FINVIS; + + fd_install(new_fd, filp); + return new_fd; +} + +STATIC int +xfs_readlink_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + struct iovec aiov; + struct uio auio; + struct inode *inode; + xfs_fsop_handlereq_t hreq; + vnode_t *vp; + __u32 olen; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_handlereq_t), + &hreq, &vp, &inode); + if (error) + return -error; + + /* Restrict this handle operation to symlinks only. */ + if (vp->v_type != VLNK) { + VN_RELE(vp); + return -XFS_ERROR(EINVAL); + } + + if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) { + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + aiov.iov_len = olen; + aiov.iov_base = hreq.ohandle; + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_fmode = FINVIS; + auio.uio_offset = 0; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_resid = olen; + + VOP_READLINK(vp, &auio, NULL, error); + + VN_RELE(vp); + return (olen - auio.uio_resid); +} + +STATIC int +xfs_fssetdm_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + struct fsdmidata fsd; + xfs_fsop_setdm_handlereq_t dmhreq; + struct inode *inode; + bhv_desc_t *bdp; + vnode_t *vp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_MKNOD, arg, + sizeof(xfs_fsop_setdm_handlereq_t), + (xfs_fsop_handlereq_t *)&dmhreq, + &vp, &inode); + if (error) + return -error; + + if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + + bdp = bhv_base_unlocked(VN_BHV_HEAD(vp)); + error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL); + + VN_RELE(vp); + if (error) + return -error; + return 0; +} + +STATIC int +xfs_attrlist_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + attrlist_cursor_kern_t *cursor; + xfs_fsop_attrlist_handlereq_t al_hreq; + struct inode *inode; + vnode_t *vp; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_attrlist_handlereq_t), + (xfs_fsop_handlereq_t *)&al_hreq, + &vp, &inode); + if (error) + return -error; + + cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; + VOP_ATTR_LIST(vp, al_hreq.buffer, al_hreq.buflen, al_hreq.flags, + cursor, NULL, error); + VN_RELE(vp); + if (error) + return -error; + return 0; +} + +STATIC int +xfs_attrmulti_by_handle( + xfs_mount_t *mp, + unsigned long arg, + struct file *parfilp, + struct inode *parinode) +{ + int error; + xfs_attr_multiop_t *ops; + xfs_fsop_attrmulti_handlereq_t am_hreq; + struct inode *inode; + vnode_t *vp; + int i, size; + + error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg, + sizeof(xfs_fsop_attrmulti_handlereq_t), + (xfs_fsop_handlereq_t *)&am_hreq, + &vp, &inode); + if (error) + return -error; + + size = am_hreq.opcount * sizeof(attr_multiop_t); + ops = (xfs_attr_multiop_t *)kmalloc(size, GFP_KERNEL); + if (!ops) { + VN_RELE(vp); + return -XFS_ERROR(ENOMEM); + } + + if (copy_from_user(ops, am_hreq.ops, size)) { + kfree(ops); + VN_RELE(vp); + return -XFS_ERROR(EFAULT); + } + + for (i = 0; i < am_hreq.opcount; i++) { + switch(ops[i].am_opcode) { + case ATTR_OP_GET: + VOP_ATTR_GET(vp,ops[i].am_attrname, ops[i].am_attrvalue, + &ops[i].am_length, ops[i].am_flags, + NULL, ops[i].am_error); + break; + case ATTR_OP_SET: + VOP_ATTR_SET(vp,ops[i].am_attrname, ops[i].am_attrvalue, + ops[i].am_length, ops[i].am_flags, + NULL, ops[i].am_error); + break; + case ATTR_OP_REMOVE: + VOP_ATTR_REMOVE(vp, ops[i].am_attrname, ops[i].am_flags, + NULL, ops[i].am_error); + break; + default: + ops[i].am_error = EINVAL; + } + } + + if (copy_to_user(am_hreq.ops, ops, size)) + error = -XFS_ERROR(EFAULT); + + kfree(ops); + VN_RELE(vp); + return error; +} + +/* prototypes for a few of the stack-hungry cases that have + * their own functions. Functions are defined after their use + * so gcc doesn't get fancy and inline them with -03 */ + +STATIC int +xfs_ioc_space( + bhv_desc_t *bdp, + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + unsigned long arg); + +STATIC int +xfs_ioc_fsgeometry( + xfs_mount_t *mp, + unsigned long arg); + +STATIC int +xfs_ioc_xattr( + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_getbmap( + bhv_desc_t *bdp, + struct file *filp, + unsigned int cmd, + unsigned long arg); + +STATIC int +xfs_ioc_getbmapx( + bhv_desc_t *bdp, + unsigned long arg); + +int +xfs_ioctl( + bhv_desc_t *bdp, + struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int error; + vnode_t *vp; + xfs_inode_t *ip; + xfs_mount_t *mp; + + vp = LINVFS_GET_VP(inode); + + vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + switch (cmd) { + + case XFS_IOC_ALLOCSP: + case XFS_IOC_FREESP: + case XFS_IOC_RESVSP: + case XFS_IOC_UNRESVSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP64: + case XFS_IOC_RESVSP64: + case XFS_IOC_UNRESVSP64: + /* + * Only allow the sys admin to reserve space unless + * unwritten extents are enabled. + */ + if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + + return xfs_ioc_space(bdp, vp, filp, cmd, arg); + + case XFS_IOC_DIOINFO: { + struct dioattr da; + + da.d_miniosz = mp->m_sb.sb_blocksize; + da.d_mem = mp->m_sb.sb_blocksize; + + /* + * this only really needs to be BBSIZE. + * it is set to the file system block size to + * avoid having to do block zeroing on short writes. + */ + da.d_maxiosz = XFS_FSB_TO_B(mp, + XFS_B_TO_FSBT(mp, KIO_MAX_ATOMIC_IO << 10)); + + if (copy_to_user((struct dioattr *)arg, &da, sizeof(da))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_FSBULKSTAT_SINGLE: + case XFS_IOC_FSBULKSTAT: + case XFS_IOC_FSINUMBERS: + return xfs_ioc_bulkstat(mp, cmd, arg); + + case XFS_IOC_FSGEOMETRY_V1: + return xfs_ioc_fsgeometry_v1(mp, arg); + + case XFS_IOC_FSGEOMETRY: + return xfs_ioc_fsgeometry(mp, arg); + + case XFS_IOC_FSGETXATTR: + case XFS_IOC_FSSETXATTR: + case XFS_IOC_FSGETXATTRA: + return xfs_ioc_xattr(vp, filp, cmd, arg); + + case XFS_IOC_FSSETDM: { + struct fsdmidata dmi; + + if (copy_from_user(&dmi, (struct fsdmidata *)arg, sizeof(dmi))) + return -XFS_ERROR(EFAULT); + + error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate, + NULL); + if (error) + return -error; + return 0; + } + + case XFS_IOC_GETBMAP: + case XFS_IOC_GETBMAPA: + return xfs_ioc_getbmap(bdp, filp, cmd, arg); + + case XFS_IOC_GETBMAPX: + return xfs_ioc_getbmapx(bdp, arg); + + case XFS_IOC_FD_TO_HANDLE: + case XFS_IOC_PATH_TO_HANDLE: + case XFS_IOC_PATH_TO_FSHANDLE: + return xfs_find_handle(cmd, arg); + + case XFS_IOC_OPEN_BY_HANDLE: + return xfs_open_by_handle(mp, arg, filp, inode); + + case XFS_IOC_FSSETDM_BY_HANDLE: + return xfs_fssetdm_by_handle(mp, arg, filp, inode); + + case XFS_IOC_READLINK_BY_HANDLE: + return xfs_readlink_by_handle(mp, arg, filp, inode); + + case XFS_IOC_ATTRLIST_BY_HANDLE: + return xfs_attrlist_by_handle(mp, arg, filp, inode); + + case XFS_IOC_ATTRMULTI_BY_HANDLE: + return xfs_attrmulti_by_handle(mp, arg, filp, inode); + + case XFS_IOC_SWAPEXT: { + error = xfs_swapext((struct xfs_swapext *)arg); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSCOUNTS: { + xfs_fsop_counts_t out; + + error = xfs_fs_counts(mp, &out); + if (error) + return -error; + + if (copy_to_user((char *)arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_SET_RESBLKS: { + xfs_fsop_resblks_t inout; + __uint64_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&inout, (char *)arg, sizeof(inout))) + return -XFS_ERROR(EFAULT); + + /* input parameter is passed in resblks field of structure */ + in = inout.resblks; + error = xfs_reserve_blocks(mp, &in, &inout); + + if (copy_to_user((char *)arg, &inout, sizeof(inout))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_GET_RESBLKS: { + xfs_fsop_resblks_t out; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + error = xfs_reserve_blocks(mp, NULL, &out); + if (error) + return -error; + + if (copy_to_user((char *)arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + + return 0; + } + + case XFS_IOC_FSGROWFSDATA: { + xfs_growfs_data_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_data(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSGROWFSLOG: { + xfs_growfs_log_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_log(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FSGROWFSRT: { + xfs_growfs_rt_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_rt(mp, &in); + if (error) + return -error; + return 0; + } + + case XFS_IOC_FREEZE: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xfs_fs_freeze(mp); + return 0; + + case XFS_IOC_THAW: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xfs_fs_thaw(mp); + return 0; + + case XFS_IOC_ERROR_INJECTION: { + xfs_error_injection_t in; + + if (copy_from_user(&in, (char *)arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_errortag_add(in.errtag, mp); + if (error) + return -error; + return 0; + } + + case XFS_IOC_ERROR_CLEARALL: + error = xfs_errortag_clearall(mp); + return -error; + + default: + return -ENOTTY; + } +} + +STATIC int +xfs_ioc_space( + bhv_desc_t *bdp, + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + xfs_flock64_t bf; + int attr_flags = 0; + int error; + + if (filp->f_flags & O_RDONLY) + return -XFS_ERROR(EBADF); + + if (vp->v_type != VREG) + return -XFS_ERROR(EINVAL); + + if (copy_from_user(&bf, (xfs_flock64_t *)arg, sizeof(bf))) + return -XFS_ERROR(EFAULT); + + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + attr_flags |= ATTR_NONBLOCK; + if (filp->f_mode & FINVIS) + attr_flags |= ATTR_DMI; + + error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos, + NULL, attr_flags); + return -error; +} + +STATIC int +xfs_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + unsigned long arg) +{ + xfs_fsop_bulkreq_t bulkreq; + int count; /* # of records returned */ + xfs_ino_t inlast; /* last inode number */ + int done; + int error; + + /* done = 1 if there are more stats to get and if bulkstat */ + /* should be called again (unused here, but used in dmapi) */ + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + if (copy_from_user(&bulkreq, (xfs_fsop_bulkreq_t *)arg, + sizeof(xfs_fsop_bulkreq_t))) + return -XFS_ERROR(EFAULT); + + if (copy_from_user(&inlast, (__s64 *)bulkreq.lastip, + sizeof(__s64))) + return -XFS_ERROR(EFAULT); + + if ((count = bulkreq.icount) <= 0) + return -XFS_ERROR(EINVAL); + + if (cmd == XFS_IOC_FSINUMBERS) + error = xfs_inumbers(mp, NULL, &inlast, &count, + bulkreq.ubuffer); + else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) + error = xfs_bulkstat_single(mp, &inlast, + bulkreq.ubuffer, &done); + else { /* XFS_IOC_FSBULKSTAT */ + if (count == 1 && inlast != 0) { + inlast++; + error = xfs_bulkstat_single(mp, &inlast, + bulkreq.ubuffer, &done); + } else { + error = xfs_bulkstat(mp, NULL, &inlast, &count, + (bulkstat_one_pf)xfs_bulkstat_one, + sizeof(xfs_bstat_t), bulkreq.ubuffer, + BULKSTAT_FG_QUICK, &done); + } + } + + if (error) + return -error; + + if (bulkreq.ocount != NULL) { + if (copy_to_user((xfs_ino_t *)bulkreq.lastip, &inlast, + sizeof(xfs_ino_t))) + return -XFS_ERROR(EFAULT); + + if (copy_to_user((__s32 *)bulkreq.ocount, &count, + sizeof(count))) + return -XFS_ERROR(EFAULT); + } + + return 0; +} + +STATIC int +xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + unsigned long arg) +{ + xfs_fsop_geom_v1_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); + if (error) + return -error; + + if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_fsgeometry( + xfs_mount_t *mp, + unsigned long arg) +{ + xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 4); + if (error) + return -error; + + if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_xattr( + vnode_t *vp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + struct fsxattr fa; + vattr_t va; + int error; + + switch (cmd) { + case XFS_IOC_FSGETXATTR: { + va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; + + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_nextents; + + if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_FSSETXATTR: { + int attr_flags = 0; + + if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa))) + return -XFS_ERROR(EFAULT); + + va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE; + va.va_xflags = fa.fsx_xflags; + va.va_extsize = fa.fsx_extsize; + + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + attr_flags |= ATTR_NONBLOCK; + + VOP_SETATTR(vp, &va, attr_flags, NULL, error); + return -error; + } + + case XFS_IOC_FSGETXATTRA: { + + va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; + + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_anextents; + + if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; + } + + default: + return -ENOTTY; + + } +} + +STATIC int +xfs_ioc_getbmap( + bhv_desc_t *bdp, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + struct getbmap bm; + int iflags; + int error; + + if (copy_from_user(&bm, (struct getbmap *)arg, sizeof(bm))) + return -XFS_ERROR(EFAULT); + + if (bm.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); + if (filp->f_mode & FINVIS) + iflags |= BMV_IF_NO_DMAPI_READ; + + error = xfs_getbmap(bdp, &bm, (struct getbmap *)arg+1, iflags); + if (error) + return -error; + + if (copy_to_user((struct getbmap *)arg, &bm, sizeof(bm))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_getbmapx( + bhv_desc_t *bdp, + unsigned long arg) +{ + struct getbmapx bmx; + struct getbmap bm; + int iflags; + int error; + + if (copy_from_user(&bmx, (struct getbmapx *)arg, sizeof(bmx))) + return -XFS_ERROR(EFAULT); + + if (bmx.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + /* + * Map input getbmapx structure to a getbmap + * structure for xfs_getbmap. + */ + GETBMAP_CONVERT(bmx, bm); + + iflags = bmx.bmv_iflags; + + if (iflags & (~BMV_IF_VALID)) + return -XFS_ERROR(EINVAL); + + iflags |= BMV_IF_EXTENDED; + + error = xfs_getbmap(bdp, &bm, (struct getbmapx *)arg+1, iflags); + if (error) + return -error; + + GETBMAP_CONVERT(bm, bmx); + + if (copy_to_user((struct getbmapx *)arg, &bmx, sizeof(bmx))) + return -XFS_ERROR(EFAULT); + + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_iomap.c linux.22-ac2/fs/xfs/linux/xfs_iomap.c --- linux.vanilla/fs/xfs/linux/xfs_iomap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_iomap.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_utils.h" + +#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ + << mp->m_writeio_log) +#define XFS_STRAT_WRITE_IMAPS 2 +#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP + +STATIC int +_xfs_imap_to_bmap( + xfs_iocore_t *io, + xfs_off_t offset, + xfs_bmbt_irec_t *imap, + page_buf_bmap_t *pbmapp, + int imaps, /* Number of imap entries */ + int pbmaps) /* Number of pbmap entries */ +{ + xfs_mount_t *mp; + xfs_fsize_t nisize; + int pbm; + xfs_fsblock_t start_block; + + mp = io->io_mount; + nisize = XFS_SIZE(mp, io); + if (io->io_new_size > nisize) + nisize = io->io_new_size; + + for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) { + pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ? + mp->m_rtdev_targp : mp->m_ddev_targp; + pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff); + pbmapp->pbm_delta = offset - pbmapp->pbm_offset; + pbmapp->pbm_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); + pbmapp->pbm_flags = 0; + + start_block = imap->br_startblock; + if (start_block == HOLESTARTBLOCK) { + pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL; + pbmapp->pbm_flags = PBMF_HOLE; + } else if (start_block == DELAYSTARTBLOCK) { + pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL; + pbmapp->pbm_flags = PBMF_DELAY; + } else { + pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block); + if (ISUNWRITTEN(imap)) + pbmapp->pbm_flags |= PBMF_UNWRITTEN; + } + + if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) { + pbmapp->pbm_flags |= PBMF_EOF; + } + + offset += pbmapp->pbm_bsize - pbmapp->pbm_delta; + } + return pbm; /* Return the number filled */ +} + +int +xfs_iomap( + xfs_iocore_t *io, + xfs_off_t offset, + ssize_t count, + int flags, + page_buf_bmap_t *pbmapp, + int *npbmaps) +{ + xfs_mount_t *mp = io->io_mount; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int lockmode = 0; + xfs_bmbt_irec_t imap; + int nimaps = 1; + int bmap_flags = 0; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + switch (flags & + (BMAP_READ|BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) { + case BMAP_READ: + lockmode = XFS_LCK_MAP_SHARED(mp, io); + bmap_flags = XFS_BMAPI_ENTIRE; + if (flags & BMAP_IGNSTATE) + bmap_flags |= XFS_BMAPI_IGSTATE; + break; + case PBF_WRITE: + lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; + bmap_flags = 0; + XFS_ILOCK(mp, io, lockmode); + break; + case BMAP_ALLOCATE: + lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; + bmap_flags = XFS_BMAPI_ENTIRE; + /* Attempt non-blocking lock */ + if (flags & BMAP_TRYLOCK) { + if (!XFS_ILOCK_NOWAIT(mp, io, lockmode)) + return XFS_ERROR(EAGAIN); + } else { + XFS_ILOCK(mp, io, lockmode); + } + break; + case BMAP_UNWRITTEN: + goto phase2; + default: + BUG(); + } + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + + error = XFS_BMAPI(mp, NULL, io, offset_fsb, + (xfs_filblks_t)(end_fsb - offset_fsb) , + bmap_flags, NULL, 0, &imap, + &nimaps, NULL); + + if (error) + goto out; + +phase2: + switch (flags & (BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) { + case BMAP_WRITE: + /* If we found an extent, return it */ + if (nimaps && (imap.br_startblock != HOLESTARTBLOCK)) + break; + + if (flags & (BMAP_DIRECT|BMAP_MMAP)) { + error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset, + count, flags, &imap, &nimaps, nimaps); + } else { + error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, + flags, &imap, &nimaps); + } + break; + case BMAP_ALLOCATE: + /* If we found an extent, return it */ + XFS_IUNLOCK(mp, io, lockmode); + lockmode = 0; + + if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) + break; + + error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps); + break; + case BMAP_UNWRITTEN: + lockmode = 0; + error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count); + nimaps = 0; + break; + } + + if (nimaps) { + *npbmaps = _xfs_imap_to_bmap(io, offset, &imap, + pbmapp, nimaps, *npbmaps); + } else if (npbmaps) { + *npbmaps = 0; + } + +out: + if (lockmode) + XFS_IUNLOCK(mp, io, lockmode); + return XFS_ERROR(error); +} + +STATIC int +xfs_flush_space( + xfs_inode_t *ip, + int *fsynced, + int *ioflags) +{ + vnode_t *vp = XFS_ITOV(ip); + + switch (*fsynced) { + case 0: + if (ip->i_delayed_blks) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + fsync_inode_data_buffers(LINVFS_GET_IP(vp)); + xfs_ilock(ip, XFS_ILOCK_EXCL); + *fsynced = 1; + } else { + *ioflags |= BMAP_SYNC; + *fsynced = 2; + } + return 0; + case 1: + *fsynced = 2; + *ioflags |= BMAP_SYNC; + return 0; + case 2: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + fsync_no_super(LINVFS_GET_IP(vp)->i_dev); + xfs_log_force(ip->i_mount, (xfs_lsn_t)0, + XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_ilock(ip, XFS_ILOCK_EXCL); + *fsynced = 3; + return 0; + } + return 1; +} + +int +xfs_iomap_write_direct( + xfs_inode_t *ip, + loff_t offset, + size_t count, + int flags, + xfs_bmbt_irec_t *ret_imap, + int *nmaps, + int found) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_iocore_t *io = &ip->i_iocore; + xfs_fileoff_t offset_fsb; + xfs_fileoff_t last_fsb; + xfs_filblks_t count_fsb; + xfs_fsize_t isize; + xfs_fsblock_t firstfsb; + int nimaps, maps; + int error; + int bmapi_flag; + int rt; + xfs_trans_t *tp; + xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp; + xfs_bmap_free_t free_list; + int aeof; + xfs_filblks_t datablocks; + int committed; + int numrtextents; + uint resblks; + + /* + * Make sure that the dquots are there. This doesn't hold + * the ilock across a disk read. + */ + + error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED); + if (error) + return XFS_ERROR(error); + + maps = min(XFS_WRITE_IMAPS, *nmaps); + nimaps = maps; + + isize = ip->i_d.di_size; + aeof = (offset + count) > isize; + + if (io->io_new_size > isize) + isize = io->io_new_size; + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + count_fsb = last_fsb - offset_fsb; + if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) { + xfs_fileoff_t map_last_fsb; + + map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff; + + if (map_last_fsb < last_fsb) { + last_fsb = map_last_fsb; + count_fsb = last_fsb - offset_fsb; + } + ASSERT(count_fsb > 0); + } + + /* + * determine if reserving space on + * the data or realtime partition. + */ + if ((rt = XFS_IS_REALTIME_INODE(ip))) { + int sbrtextsize, iprtextsize; + + sbrtextsize = mp->m_sb.sb_rextsize; + iprtextsize = + ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize; + numrtextents = (count_fsb + iprtextsize - 1); + do_div(numrtextents, sbrtextsize); + datablocks = 0; + } else { + datablocks = count_fsb; + numrtextents = 0; + } + + /* + * allocate and setup the transaction + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + + resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); + + error = xfs_trans_reserve(tp, resblks, + XFS_WRITE_LOG_RES(mp), numrtextents, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) + /* + * Free the transaction structure. + */ + xfs_trans_cancel(tp, 0); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + if (error) + goto error_out; /* Don't return in above if .. trans .., + need lock to return */ + + if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) { + error = (EDQUOT); + goto error1; + } + nimaps = 1; + + bmapi_flag = XFS_BMAPI_WRITE; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + if (!(flags & BMAP_MMAP) && (offset < ip->i_d.di_size || rt)) + bmapi_flag |= XFS_BMAPI_PREALLOC; + + /* + * issue the bmapi() call to allocate the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + imapp = &imap[0]; + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, + bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + goto error_out; + } + + /* copy any maps to caller's array and return any error. */ + if (nimaps == 0) { + error = (ENOSPC); + goto error_out; + } + + *ret_imap = imap[0]; + *nmaps = 1; + return 0; + + error0: /* Cancel bmap, unlock inode, and cancel trans */ + xfs_bmap_cancel(&free_list); + + error1: /* Just cancel transaction */ + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + *nmaps = 0; /* nothing set-up here */ + +error_out: + return XFS_ERROR(error); +} + +int +xfs_iomap_write_delay( + xfs_inode_t *ip, + loff_t offset, + size_t count, + int ioflag, + xfs_bmbt_irec_t *ret_imap, + int *nmaps) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_iocore_t *io = &ip->i_iocore; + xfs_fileoff_t offset_fsb; + xfs_fileoff_t last_fsb; + xfs_fsize_t isize; + xfs_fsblock_t firstblock; + int nimaps; + int error; + xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; + int aeof; + int fsynced = 0; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + + /* + * Make sure that the dquots are there. This doesn't hold + * the ilock across a disk read. + */ + + error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); + if (error) + return XFS_ERROR(error); + +retry: + isize = ip->i_d.di_size; + if (io->io_new_size > isize) { + isize = io->io_new_size; + } + + aeof = 0; + offset_fsb = XFS_B_TO_FSBT(mp, offset); + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); + /* + * If the caller is doing a write at the end of the file, + * then extend the allocation (and the buffer used for the write) + * out to the file system's write iosize. We clean up any extra + * space left over when the file is closed in xfs_inactive(). + * + * We don't bother with this for sync writes, because we need + * to minimize the amount we write for good performance. + */ + if (!(ioflag & BMAP_SYNC) && ((offset + count) > ip->i_d.di_size)) { + xfs_off_t aligned_offset; + unsigned int iosize; + xfs_fileoff_t ioalign; + + iosize = mp->m_writeio_blocks; + aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); + ioalign = XFS_B_TO_FSBT(mp, aligned_offset); + last_fsb = ioalign + iosize; + aeof = 1; + } + + nimaps = XFS_WRITE_IMAPS; + firstblock = NULLFSBLOCK; + + /* + * roundup the allocation request to m_dalign boundary if file size + * is greater that 512K and we are allocating past the allocation eof + */ + if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) { + int eof; + xfs_fileoff_t new_last_fsb; + new_last_fsb = roundup_64(last_fsb, mp->m_dalign); + error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); + if (error) { + return error; + } + if (eof) { + last_fsb = new_last_fsb; + } + } + + error = xfs_bmapi(NULL, ip, offset_fsb, + (xfs_filblks_t)(last_fsb - offset_fsb), + XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | + XFS_BMAPI_ENTIRE, &firstblock, 1, imap, + &nimaps, NULL); + /* + * This can be EDQUOT, if nimaps == 0 + */ + if (error && (error != ENOSPC)) { + return XFS_ERROR(error); + } + /* + * If bmapi returned us nothing, and if we didn't get back EDQUOT, + * then we must have run out of space. + */ + + if (nimaps == 0) { + if (xfs_flush_space(ip, &fsynced, &ioflag)) + return XFS_ERROR(ENOSPC); + + error = 0; + goto retry; + } + + *ret_imap = imap[0]; + *nmaps = 1; + return 0; +} + +/* + * Pass in a delayed allocate extent, convert it to real extents; + * return to the caller the extent we create which maps on top of + * the originating callers request. + * + * Called without a lock on the inode. + */ +int +xfs_iomap_write_allocate( + xfs_inode_t *ip, + xfs_bmbt_irec_t *map, + int *retmap) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_fileoff_t offset_fsb, last_block; + xfs_fileoff_t end_fsb, map_start_fsb; + xfs_fsblock_t first_block; + xfs_bmap_free_t free_list; + xfs_filblks_t count_fsb; + xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS]; + xfs_trans_t *tp; + int i, nimaps, committed; + int error = 0; + int nres; + + *retmap = 0; + + /* + * Make sure that the dquots are there. + */ + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return XFS_ERROR(error); + + offset_fsb = map->br_startoff; + count_fsb = map->br_blockcount; + map_start_fsb = offset_fsb; + + XFS_STATS_ADD(xfsstats.xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); + + while (count_fsb != 0) { + /* + * Set up a transaction with which to allocate the + * backing store for the file. Do allocations in a + * loop until we get some space in the range we are + * interested in. The other space that might be allocated + * is in the delayed allocation extent on which we sit + * but before our buffer starts. + */ + + nimaps = 0; + while (nimaps == 0) { + tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); + nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + error = xfs_trans_reserve(tp, nres, + XFS_WRITE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + if (error == ENOSPC) { + error = xfs_trans_reserve(tp, 0, + XFS_WRITE_LOG_RES(mp), + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + } + if (error) { + xfs_trans_cancel(tp, 0); + return XFS_ERROR(error); + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + XFS_BMAP_INIT(&free_list, &first_block); + + nimaps = XFS_STRAT_WRITE_IMAPS; + /* + * Ensure we don't go beyond eof - it is possible + * the extents changed since we did the read call, + * we dropped the ilock in the interim. + */ + + end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size); + xfs_bmap_last_offset(NULL, ip, &last_block, + XFS_DATA_FORK); + last_block = XFS_FILEOFF_MAX(last_block, end_fsb); + if ((map_start_fsb + count_fsb) > last_block) { + count_fsb = last_block - map_start_fsb; + if (count_fsb == 0) { + error = EAGAIN; + goto trans_cancel; + } + } + + /* Go get the actual blocks */ + error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, + XFS_BMAPI_WRITE, &first_block, 1, + imap, &nimaps, &free_list); + + if (error) + goto trans_cancel; + + error = xfs_bmap_finish(&tp, &free_list, + first_block, &committed); + + if (error) + goto trans_cancel; + + error = xfs_trans_commit(tp, + XFS_TRANS_RELEASE_LOG_RES, NULL); + + if (error) + goto error0; + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } + + /* + * See if we were able to allocate an extent that + * covers at least part of the callers request + */ + + for (i = 0; i < nimaps; i++) { + if ((map->br_startoff >= imap[i].br_startoff) && + (map->br_startoff < (imap[i].br_startoff + + imap[i].br_blockcount))) { + *map = imap[i]; + *retmap = 1; + XFS_STATS_INC(xfsstats.xs_xstrat_quick); + return 0; + } + count_fsb -= imap[i].br_blockcount; + } + + /* So far we have not mapped the requested part of the + * file, just surrounding data, try again. + */ + nimaps--; + offset_fsb = imap[nimaps].br_startoff + + imap[nimaps].br_blockcount; + map_start_fsb = offset_fsb; + } + +trans_cancel: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); +error0: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return XFS_ERROR(error); +} + +int +xfs_iomap_write_unwritten( + xfs_inode_t *ip, + loff_t offset, + size_t count) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_trans_t *tp; + xfs_fileoff_t offset_fsb; + xfs_filblks_t count_fsb; + xfs_filblks_t numblks_fsb; + xfs_bmbt_irec_t imap; + int committed; + int error; + int nres; + int nimaps; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + count_fsb = XFS_B_TO_FSB(mp, count); + + do { + nres = XFS_DIOSTRAT_SPACE_RES(mp, 0); + + /* + * set up a transaction to convert the range of extents + * from unwritten to real. Do allocations in a loop until + * we have covered the range passed in. + */ + + tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); + error = xfs_trans_reserve(tp, nres, + XFS_WRITE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + if (error) { + xfs_trans_cancel(tp, 0); + goto error0; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * Modify the unwritten extent state of the buffer. + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + nimaps = 1; + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, + XFS_BMAPI_WRITE, &firstfsb, + 1, &imap, &nimaps, &free_list); + if (error) + goto error_on_bmapi_transaction; + + error = xfs_bmap_finish(&(tp), &(free_list), + firstfsb, &committed); + if (error) + goto error_on_bmapi_transaction; + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + if (error) + goto error0; + + if ((numblks_fsb = imap.br_blockcount) == 0) { + /* + * The numblks_fsb value should always get + * smaller, otherwise the loop is stuck. + */ + ASSERT(imap.br_blockcount); + break; + } + offset_fsb += numblks_fsb; + count_fsb -= numblks_fsb; + } while (count_fsb > 0); + + return 0; + +error_on_bmapi_transaction: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +error0: + return XFS_ERROR(error); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_iops.c linux.22-ac2/fs/xfs/linux/xfs_iops.c --- linux.vanilla/fs/xfs/linux/xfs_iops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_iops.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,832 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include + + +/* + * Pull the link count and size up from the xfs inode to the linux inode + */ +STATIC void +validate_fields( + struct inode *ip) +{ + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va; + int error; + + va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; + VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); + ip->i_nlink = va.va_nlink; + ip->i_size = va.va_size; + ip->i_blocks = va.va_nblocks; +} + +#ifdef CONFIG_XFS_POSIX_ACL +/* + * Determine whether a process has a valid fs_struct (kernel daemons + * like knfsd don't have an fs_struct). + */ +STATIC int inline +has_fs_struct(struct task_struct *task) +{ + return (task->fs != init_task.fs); +} +#endif + +STATIC int +linvfs_mknod( + struct inode *dir, + struct dentry *dentry, + int mode, + int rdev) +{ + struct inode *ip; + vattr_t va; + vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); + xattr_exists_t test_default_acl = _ACL_DEFAULT_EXISTS; + int have_default_acl = 0; + int error = EINVAL; + + if (test_default_acl) + have_default_acl = test_default_acl(dvp); + +#ifdef CONFIG_XFS_POSIX_ACL + /* + * Conditionally compiled so that the ACL base kernel changes can be + * split out into separate patches - remove this once MS_POSIXACL is + * accepted, or some other way to implement this exists. + */ + if (IS_POSIXACL(dir) && !have_default_acl && has_fs_struct(current)) + mode &= ~current->fs->umask; +#endif + + memset(&va, 0, sizeof(va)); + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + va.va_type = IFTOVT(mode); + va.va_mode = mode; + + switch (mode & S_IFMT) { + case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: + va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev)); + va.va_mask |= XFS_AT_RDEV; + /*FALLTHROUGH*/ + case S_IFREG: + VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); + break; + case S_IFDIR: + VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); + break; + default: + error = EINVAL; + break; + } + + if (!error) { + ASSERT(vp); + ip = LINVFS_GET_IP(vp); + if (!ip) { + VN_RELE(vp); + return -ENOMEM; + } + + if (S_ISCHR(mode) || S_ISBLK(mode)) + ip->i_rdev = to_kdev_t(rdev); + validate_fields(dir); + d_instantiate(dentry, ip); + } + + if (!error && have_default_acl) { + _ACL_DECL (pdacl); + + if (!_ACL_ALLOC(pdacl)) { + error = -ENOMEM; + } else { + if (_ACL_GET_DEFAULT(dvp, pdacl)) + error = _ACL_INHERIT(vp, &va, pdacl); + VMODIFY(vp); + _ACL_FREE(pdacl); + } + } + return -error; +} + +STATIC int +linvfs_create( + struct inode *dir, + struct dentry *dentry, + int mode) +{ + return linvfs_mknod(dir, dentry, mode, 0); +} + +STATIC int +linvfs_mkdir( + struct inode *dir, + struct dentry *dentry, + int mode) +{ + return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); +} + +STATIC struct dentry * +linvfs_lookup( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *ip = NULL; + vnode_t *vp, *cvp = NULL; + int error; + + if (dentry->d_name.len >= MAXNAMELEN) + return ERR_PTR(-ENAMETOOLONG); + + vp = LINVFS_GET_VP(dir); + VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error); + if (!error) { + ASSERT(cvp); + ip = LINVFS_GET_IP(cvp); + if (!ip) { + VN_RELE(cvp); + return ERR_PTR(-EACCES); + } + } + if (error && (error != ENOENT)) + return ERR_PTR(-error); + d_add(dentry, ip); /* Negative entry goes in if ip is NULL */ + return NULL; +} + +STATIC int +linvfs_link( + struct dentry *old_dentry, + struct inode *dir, + struct dentry *dentry) +{ + struct inode *ip; /* inode of guy being linked to */ + vnode_t *tdvp; /* target directory for new name/link */ + vnode_t *vp; /* vp of name being linked */ + int error; + + ip = old_dentry->d_inode; /* inode being linked to */ + if (S_ISDIR(ip->i_mode)) + return -EPERM; + + tdvp = LINVFS_GET_VP(dir); + vp = LINVFS_GET_VP(ip); + + VOP_LINK(tdvp, vp, dentry, NULL, error); + if (!error) { + VMODIFY(tdvp); + VN_HOLD(vp); + validate_fields(ip); + d_instantiate(dentry, ip); + } + return -error; +} + +STATIC int +linvfs_unlink( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode; + vnode_t *dvp; /* directory containing name to remove */ + int error; + + inode = dentry->d_inode; + dvp = LINVFS_GET_VP(dir); + + VOP_REMOVE(dvp, dentry, NULL, error); + if (!error) { + validate_fields(dir); /* For size only */ + validate_fields(inode); + } + + return -error; +} + +STATIC int +linvfs_symlink( + struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + struct inode *ip; + vattr_t va; + vnode_t *dvp; /* directory containing name to remove */ + vnode_t *cvp; /* used to lookup symlink to put in dentry */ + int error; + + dvp = LINVFS_GET_VP(dir); + cvp = NULL; + + memset(&va, 0, sizeof(va)); + va.va_type = VLNK; + va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO; + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + + error = 0; + VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); + if (!error && cvp) { + ASSERT(cvp->v_type == VLNK); + ip = LINVFS_GET_IP(cvp); + d_instantiate(dentry, ip); + validate_fields(dir); + validate_fields(ip); /* size needs update */ + } + return -error; +} + +STATIC int +linvfs_rmdir( + struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + vnode_t *dvp = LINVFS_GET_VP(dir); + int error; + + VOP_RMDIR(dvp, dentry, NULL, error); + if (!error) { + validate_fields(inode); + validate_fields(dir); + } + return -error; +} + +STATIC int +linvfs_rename( + struct inode *odir, + struct dentry *odentry, + struct inode *ndir, + struct dentry *ndentry) +{ + struct inode *new_inode = ndentry->d_inode; + vnode_t *fvp; /* from directory */ + vnode_t *tvp; /* target directory */ + int error; + + fvp = LINVFS_GET_VP(odir); + tvp = LINVFS_GET_VP(ndir); + + VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); + if (error) + return -error; + + if (new_inode) + validate_fields(new_inode); + + validate_fields(odir); + if (ndir != odir) + validate_fields(ndir); + return 0; +} + +STATIC int +linvfs_readlink( + struct dentry *dentry, + char *buf, + int size) +{ + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + uio_t uio; + iovec_t iov; + int error; + + iov.iov_base = buf; + iov.iov_len = size; + + uio.uio_iov = &iov; + uio.uio_offset = 0; + uio.uio_segflg = UIO_USERSPACE; + uio.uio_resid = size; + uio.uio_iovcnt = 1; + + VOP_READLINK(vp, &uio, NULL, error); + if (error) + return -error; + + return (size - uio.uio_resid); +} + +/* + * careful here - this function can get called recursively, so + * we need to be very careful about how much stack we use. + * uio is kmalloced for this reason... + */ +STATIC int +linvfs_follow_link( + struct dentry *dentry, + struct nameidata *nd) +{ + vnode_t *vp; + uio_t *uio; + iovec_t iov; + int error; + char *link; + + ASSERT(dentry); + ASSERT(nd); + + link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); + if (!link) + return -ENOMEM; + + uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); + if (!uio) { + kfree(link); + return -ENOMEM; + } + + vp = LINVFS_GET_VP(dentry->d_inode); + + iov.iov_base = link; + iov.iov_len = MAXNAMELEN; + + uio->uio_iov = &iov; + uio->uio_offset = 0; + uio->uio_segflg = UIO_SYSSPACE; + uio->uio_resid = MAXNAMELEN; + uio->uio_fmode = 0; + uio->uio_iovcnt = 1; + + VOP_READLINK(vp, uio, NULL, error); + if (error) { + kfree(uio); + kfree(link); + return -error; + } + + link[MAXNAMELEN - uio->uio_resid] = '\0'; + kfree(uio); + + /* vfs_follow_link returns (-) errors */ + error = vfs_follow_link(nd, link); + kfree(link); + return error; +} + +STATIC int +linvfs_permission( + struct inode *inode, + int mode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error; + + mode <<= 6; /* convert from linux to vnode access bits */ + VOP_ACCESS(vp, mode, NULL, error); + return -error; +} + +STATIC int +linvfs_revalidate( + struct dentry *dentry) +{ + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + + if (unlikely(vp->v_flag & VMODIFIED)) + return vn_revalidate(vp); + return 0; +} + +STATIC int +linvfs_setattr( + struct dentry *dentry, + struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + unsigned int ia_valid = attr->ia_valid; + vnode_t *vp = LINVFS_GET_VP(inode); + vattr_t vattr; + int flags = 0; + int error; + + memset(&vattr, 0, sizeof(vattr_t)); + if (ia_valid & ATTR_UID) { + vattr.va_mask |= XFS_AT_UID; + vattr.va_uid = attr->ia_uid; + } + if (ia_valid & ATTR_GID) { + vattr.va_mask |= XFS_AT_GID; + vattr.va_gid = attr->ia_gid; + } + if (ia_valid & ATTR_SIZE) { + vattr.va_mask |= XFS_AT_SIZE; + vattr.va_size = attr->ia_size; + } + if (ia_valid & ATTR_ATIME) { + vattr.va_mask |= XFS_AT_ATIME; + vattr.va_atime.tv_sec = attr->ia_atime; + vattr.va_atime.tv_nsec = 0; + } + if (ia_valid & ATTR_MTIME) { + vattr.va_mask |= XFS_AT_MTIME; + vattr.va_mtime.tv_sec = attr->ia_mtime; + vattr.va_mtime.tv_nsec = 0; + } + if (ia_valid & ATTR_CTIME) { + vattr.va_mask |= XFS_AT_CTIME; + vattr.va_ctime.tv_sec = attr->ia_ctime; + vattr.va_ctime.tv_nsec = 0; + } + if (ia_valid & ATTR_MODE) { + vattr.va_mask |= XFS_AT_MODE; + vattr.va_mode = attr->ia_mode; + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + inode->i_mode &= ~S_ISGID; + } + + if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) + flags = ATTR_UTIME; + + VOP_SETATTR(vp, &vattr, flags, NULL, error); + if (error) + return(-error); /* Positive error up from XFS */ + if (ia_valid & ATTR_SIZE) { + error = vmtruncate(inode, attr->ia_size); + } + + if (!error) { + vn_revalidate(vp); + } + return error; +} + +STATIC void +linvfs_truncate( + struct inode *inode) +{ + block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); +} + + + +/* + * Extended attributes interfaces + */ + +#define SYSTEM_NAME "system." /* VFS shared names/values */ +#define ROOT_NAME "trusted." /* root's own names/values */ +#define USER_NAME "user." /* user's own names/values */ +STATIC xattr_namespace_t xfs_namespace_array[] = { + { .name= SYSTEM_NAME, .namelen= sizeof(SYSTEM_NAME)-1,.exists= NULL }, + { .name= ROOT_NAME, .namelen= sizeof(ROOT_NAME)-1, .exists= NULL }, + { .name= USER_NAME, .namelen= sizeof(USER_NAME)-1, .exists= NULL }, + { .name= NULL } +}; +xattr_namespace_t *xfs_namespaces = &xfs_namespace_array[0]; + +#define POSIXACL_ACCESS "posix_acl_access" +#define POSIXACL_ACCESS_SIZE (sizeof(POSIXACL_ACCESS)-1) +#define POSIXACL_DEFAULT "posix_acl_default" +#define POSIXACL_DEFAULT_SIZE (sizeof(POSIXACL_DEFAULT)-1) +#define POSIXCAP "posix_capabilities" +#define POSIXCAP_SIZE (sizeof(POSIXCAP)-1) +#define POSIXMAC "posix_mac" +#define POSIXMAC_SIZE (sizeof(POSIXMAC)-1) +STATIC xattr_namespace_t sys_namespace_array[] = { + { .name= POSIXACL_ACCESS, + .namelen= POSIXACL_ACCESS_SIZE, .exists= _ACL_ACCESS_EXISTS }, + { .name= POSIXACL_DEFAULT, + .namelen= POSIXACL_DEFAULT_SIZE, .exists= _ACL_DEFAULT_EXISTS }, + { .name= POSIXCAP, + .namelen= POSIXCAP_SIZE, .exists= _CAP_EXISTS }, + { .name= POSIXMAC, + .namelen= POSIXMAC_SIZE, .exists= _MAC_EXISTS }, + { .name= NULL } +}; + +/* + * Some checks to prevent people abusing EAs to get over quota: + * - Don't allow modifying user EAs on devices/symlinks; + * - Don't allow modifying user EAs if sticky bit set; + */ +STATIC int +capable_user_xattr( + struct inode *inode) +{ + if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && + !capable(CAP_SYS_ADMIN)) + return 0; + if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && + (current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) + return 0; + return 1; +} + +STATIC int +linvfs_setxattr( + struct dentry *dentry, + const char *name, + void *data, + size_t size, + int flags) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + int error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EINVAL; + if (flags & XATTR_CREATE) + return error; + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vset(vp, (void *) data, size, + _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vset(vp, (void *) data, size, + _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vset(vp, (void *) data, size); + if (!error) + error = vn_revalidate(vp); + return error; + } + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (flags & XATTR_CREATE) + xflags |= ATTR_CREATE; + if (flags & XATTR_REPLACE) + xflags |= ATTR_REPLACE; + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error); + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + if (!capable_user_xattr(inode)) + return -EPERM; + p += xfs_namespaces[USER_NAMES].namelen; + VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error); + return -error; + } + return -EOPNOTSUPP; +} + +STATIC ssize_t +linvfs_getxattr( + struct dentry *dentry, + const char *name, + void *data, + size_t size) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + ssize_t error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vget(vp, data, size); + return error; + } + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (!size) { + xflags |= ATTR_KERNOVAL; + data = NULL; + } + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error); + if (!error) + error = -size; + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + p += xfs_namespaces[USER_NAMES].namelen; + if (!capable_user_xattr(inode)) + return -EPERM; + VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error); + if (!error) + error = -size; + return -error; + } + return -EOPNOTSUPP; +} + + +STATIC ssize_t +linvfs_listxattr( + struct dentry *dentry, + char *data, + size_t size) +{ + attrlist_cursor_kern_t cursor; + xattr_namespace_t *sys; + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + char *k = data; + int xflags = ATTR_KERNAMELS; + int result = 0; + ssize_t error; + + if (!size) + xflags |= ATTR_KERNOVAL; + if (capable(CAP_SYS_ADMIN)) + xflags |= ATTR_KERNFULLS; + + memset(&cursor, 0, sizeof(cursor)); + VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error); + if (error > 0) + return -error; + result += -error; + + k += result; /* advance start of our buffer */ + for (sys = &sys_namespace_array[0]; sys->name != NULL; sys++) { + if (sys->exists == NULL || !sys->exists(vp)) + continue; + result += xfs_namespaces[SYSTEM_NAMES].namelen; + result += sys->namelen + 1; + if (size) { + if (result > size) + return -ERANGE; + strcpy(k, xfs_namespaces[SYSTEM_NAMES].name); + k += xfs_namespaces[SYSTEM_NAMES].namelen; + strcpy(k, sys->name); + k += sys->namelen + 1; + } + } + return result; +} + +STATIC int +linvfs_removexattr( + struct dentry *dentry, + const char *name) +{ + struct inode *inode = dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); + char *p = (char *)name; + int xflags = 0; + int error; + + if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name, + xfs_namespaces[SYSTEM_NAMES].namelen) == 0) { + error = -EOPNOTSUPP; + p += xfs_namespaces[SYSTEM_NAMES].namelen; + if (strcmp(p, POSIXACL_ACCESS) == 0) + error = xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); + else if (strcmp(p, POSIXACL_DEFAULT) == 0) + error = xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT); + else if (strcmp(p, POSIXCAP) == 0) + error = xfs_cap_vremove(vp); + return error; + } + + if (strncmp(name, xfs_namespaces[ROOT_NAMES].name, + xfs_namespaces[ROOT_NAMES].namelen) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + xflags |= ATTR_ROOT; + p += xfs_namespaces[ROOT_NAMES].namelen; + VOP_ATTR_REMOVE(vp, p, xflags, NULL, error); + return -error; + } + if (strncmp(name, xfs_namespaces[USER_NAMES].name, + xfs_namespaces[USER_NAMES].namelen) == 0) { + p += xfs_namespaces[USER_NAMES].namelen; + if (!capable_user_xattr(inode)) + return -EPERM; + VOP_ATTR_REMOVE(vp, p, xflags, NULL, error); + return -error; + } + return -EOPNOTSUPP; +} + + +struct inode_operations linvfs_file_inode_operations = +{ + .permission = linvfs_permission, + .truncate = linvfs_truncate, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; + +struct inode_operations linvfs_dir_inode_operations = +{ + .create = linvfs_create, + .lookup = linvfs_lookup, + .link = linvfs_link, + .unlink = linvfs_unlink, + .symlink = linvfs_symlink, + .mkdir = linvfs_mkdir, + .rmdir = linvfs_rmdir, + .mknod = linvfs_mknod, + .rename = linvfs_rename, + .permission = linvfs_permission, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; + +struct inode_operations linvfs_symlink_inode_operations = +{ + .readlink = linvfs_readlink, + .follow_link = linvfs_follow_link, + .permission = linvfs_permission, + .revalidate = linvfs_revalidate, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_iops.h linux.22-ac2/fs/xfs/linux/xfs_iops.h --- linux.vanilla/fs/xfs/linux/xfs_iops.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_iops.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IOPS_H__ +#define __XFS_IOPS_H__ + +/* + * Extended system attributes. + * So far only POSIX ACLs are supported, but this will need to + * grow in time (capabilities, mandatory access control, etc). + */ +#define XFS_SYSTEM_NAMESPACE SYSTEM_POSIXACL + +/* + * Define a table of the namespaces XFS supports + */ +typedef int (*xattr_exists_t)(vnode_t *); + +typedef struct xattr_namespace { + char *name; + unsigned int namelen; + xattr_exists_t exists; +} xattr_namespace_t; + +#define SYSTEM_NAMES 0 +#define ROOT_NAMES 1 +#define USER_NAMES 2 +extern struct xattr_namespace *xfs_namespaces; + + +extern struct inode_operations linvfs_file_inode_operations; +extern struct inode_operations linvfs_dir_inode_operations; +extern struct inode_operations linvfs_symlink_inode_operations; + +extern struct file_operations linvfs_file_operations; +extern struct file_operations linvfs_dir_operations; + +extern struct address_space_operations linvfs_aops; + +extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); +extern void linvfs_unwritten_done(struct buffer_head *, int); + +#endif /* __XFS_IOPS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_linux.h linux.22-ac2/fs/xfs/linux/xfs_linux.h --- linux.vanilla/fs/xfs/linux/xfs_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_linux.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LINUX__ +#define __XFS_LINUX__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifndef HAVE_SECTOR_T +typedef long sector_t; /* offset- or number- of disk blocks */ +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef STATIC +#define STATIC static +#endif + +#ifndef EVMS_MAJOR +#define EVMS_MAJOR 117 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,9) +#define page_buffers(page) ((page)->buffers) +#define page_has_buffers(page) ((page)->buffers) +#endif + +/* + * State flag for unwritten extent buffers. + * + * We need to be able to distinguish between these and delayed + * allocate buffers within XFS. The generic IO path code does + * not need to distinguish - we use the BH_Delay flag for both + * delalloc and these ondisk-uninitialised buffers. + */ +#define BH_Unwritten BH_PrivateStart +#define buffer_unwritten(bh) __buffer_state(bh, Unwritten) +static inline void set_buffer_unwritten_io(struct buffer_head *bh) +{ + bh->b_end_io = linvfs_unwritten_done; +} + +#define restricted_chown xfs_params.restrict_chown +#define irix_sgid_inherit xfs_params.sgid_inherit +#define irix_symlink_mode xfs_params.symlink_mode +#define xfs_panic_mask xfs_params.panic_mask +#define xfs_error_level xfs_params.error_level + +#define NBPP PAGE_SIZE +#define DPPSHFT (PAGE_SHIFT - 9) +#define NDPP (1 << (PAGE_SHIFT - 9)) +#define dtop(DD) (((DD) + NDPP - 1) >> DPPSHFT) +#define dtopt(DD) ((DD) >> DPPSHFT) +#define dpoff(DD) ((DD) & (NDPP-1)) + +#define NBBY 8 /* number of bits per byte */ +#define NBPC PAGE_SIZE /* Number of bytes per click */ +#define BPCSHIFT PAGE_SHIFT /* LOG2(NBPC) if exact */ + +/* + * Size of block device i/o is parameterized here. + * Currently the system supports page-sized i/o. + */ +#define BLKDEV_IOSHIFT BPCSHIFT +#define BLKDEV_IOSIZE (1<>BPCSHIFT) +#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) +#define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) +#define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT) +#define io_btoc(x) (((__psunsigned_t)(x)+(IO_NBPC-1))>>IO_BPCSHIFT) +#define io_btoct(x) ((__psunsigned_t)(x)>>IO_BPCSHIFT) + +/* off_t bytes to clicks */ +#define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) +#define offtoct(x) ((xfs_off_t)(x)>>BPCSHIFT) + +/* clicks to off_t bytes */ +#define ctooff(x) ((xfs_off_t)(x)<>BPCSHIFT) +#define ctob64(x) ((__uint64_t)(x)<>BPCSHIFT) + +#ifndef CELL_CAPABLE +#define FSC_NOTIFY_NAME_CHANGED(vp) +#endif + +#ifndef ENOATTR +#define ENOATTR ENODATA /* Attribute not found */ +#endif + +/* Note: EWRONGFS never visible outside the kernel */ +#define EWRONGFS EINVAL /* Mount with wrong filesystem type */ + +/* + * XXX EFSCORRUPTED needs a real value in errno.h. asm-i386/errno.h won't + * return codes out of its known range in errno. + * XXX Also note: needs to be < 1000 and fairly unique on Linux (mustn't + * conflict with any code we use already or any code a driver may use) + * XXX Some options (currently we do #2): + * 1/ New error code ["Filesystem is corrupted", _after_ glibc updated] + * 2/ 990 ["Unknown error 990"] + * 3/ EUCLEAN ["Structure needs cleaning"] + * 4/ Convert EFSCORRUPTED to EIO [just prior to return into userspace] + */ +#define EFSCORRUPTED 990 /* Filesystem is corrupted */ + +#define SYNCHRONIZE() barrier() +#define __return_address __builtin_return_address(0) + +/* + * IRIX (BSD) quotactl makes use of separate commands for user/group, + * whereas on Linux the syscall encodes this information into the cmd + * field (see the QCMD macro in quota.h). These macros help keep the + * code portable - they are not visible from the syscall interface. + */ +#define Q_XSETGQLIM XQM_CMD(0x8) /* set groups disk limits */ +#define Q_XGETGQUOTA XQM_CMD(0x9) /* get groups disk limits */ + +/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */ +/* we may well need to fine-tune this if it ever becomes an issue. */ +#define DQUOT_MAX_HEURISTIC 1024 /* NR_DQUOTS */ +#define ndquot DQUOT_MAX_HEURISTIC + +/* IRIX uses the current size of the name cache to guess a good value */ +/* - this isn't the same but is a good enough starting point for now. */ +#define DQUOT_HASH_HEURISTIC files_stat.nr_files + +/* IRIX inodes maintain the project ID also, zero this field on Linux */ +#define DEFAULT_PROJID 0 +#define dfltprid DEFAULT_PROJID + +#define MAXPATHLEN 1024 + +#define FINVIS 0x0100 /* don't update timestamps - XFS */ + +#define MIN(a,b) (min(a,b)) +#define MAX(a,b) (max(a,b)) +#define howmany(x, y) (((x)+((y)-1))/(y)) +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) + +/* Move the kernel do_div definition off to one side */ + +#if defined __i386__ +/* For ia32 we need to pull some tricks to get past various versions + * of the compiler which do not like us using do_div in the middle + * of large functions. + */ +static inline __u32 xfs_do_div(void *a, __u32 b, int n) +{ + __u32 mod; + + switch (n) { + case 4: + mod = *(__u32 *)a % b; + *(__u32 *)a = *(__u32 *)a / b; + return mod; + case 8: + { + unsigned long __upper, __low, __high, __mod; + __u64 c = *(__u64 *)a; + __upper = __high = c >> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + *(__u64 *)a = c; + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + unsigned long __upper, __low, __high, __mod; + __u64 c = *(__u64 *)a; + __upper = __high = c >> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} +#else +static inline __u32 xfs_do_div(void *a, __u32 b, int n) +{ + __u32 mod; + + switch (n) { + case 4: + mod = *(__u32 *)a % b; + *(__u32 *)a = *(__u32 *)a / b; + return mod; + case 8: + mod = do_div(*(__u64 *)a, b); + return mod; + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + __u64 c = *(__u64 *)a; + return do_div(c, b); + } + } + + /* NOTREACHED */ + return 0; +} +#endif + +#undef do_div +#define do_div(a, b) xfs_do_div(&(a), (b), sizeof(a)) +#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a)) + +static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y) +{ + x += y - 1; + do_div(x, y); + return(x * y); +} + +#endif /* __XFS_LINUX__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_lrw.c linux.22-ac2/fs/xfs/linux/xfs_lrw.c --- linux.vanilla/fs/xfs/linux/xfs_lrw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_lrw.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,837 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +/* + * fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff) + * + */ + +#include "xfs.h" + +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_inode_item.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include + + +/* + * xfs_iozero + * + * xfs_iozero clears the specified range of buffer supplied, + * and marks all the affected blocks as valid and modified. If + * an affected block is not allocated, it will be allocated. If + * an affected block is not completely overwritten, and is not + * valid before the operation, it will be read from disk before + * being partially zeroed. + */ +STATIC int +xfs_iozero( + struct inode *ip, /* inode */ + loff_t pos, /* offset in file */ + size_t count, /* size of data to zero */ + loff_t end_size) /* max file size to set */ +{ + unsigned bytes; + struct page *page; + struct address_space *mapping; + char *kaddr; + int status; + + mapping = ip->i_mapping; + do { + unsigned long index, offset; + + offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ + index = pos >> PAGE_CACHE_SHIFT; + bytes = PAGE_CACHE_SIZE - offset; + if (bytes > count) + bytes = count; + + status = -ENOMEM; + page = grab_cache_page(mapping, index); + if (!page) + break; + + kaddr = kmap(page); + status = mapping->a_ops->prepare_write(NULL, page, offset, + offset + bytes); + if (status) { + goto unlock; + } + + memset((void *) (kaddr + offset), 0, bytes); + flush_dcache_page(page); + status = mapping->a_ops->commit_write(NULL, page, offset, + offset + bytes); + if (!status) { + pos += bytes; + count -= bytes; + if (pos > ip->i_size) + ip->i_size = pos < end_size ? pos : end_size; + } + +unlock: + kunmap(page); + unlock_page(page); + page_cache_release(page); + if (status) + break; + } while (count); + + return (-status); +} + +ssize_t /* bytes read, or (-) error */ +xfs_read( + bhv_desc_t *bdp, + struct file *file, + char *buf, + size_t size, + loff_t *offset, + int ioflags, + cred_t *credp) +{ + ssize_t ret; + xfs_fsize_t n; + xfs_inode_t *ip; + xfs_mount_t *mp; + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + XFS_STATS_INC(xfsstats.xs_read_calls); + + if (file->f_flags & O_DIRECT) { + if (((__psint_t)buf & BBMASK) || + (*offset & mp->m_blockmask) || + (size & mp->m_blockmask)) { + if (*offset == ip->i_d.di_size) { + return (0); + } + return -XFS_ERROR(EINVAL); + } + } + + + n = XFS_MAX_FILE_OFFSET - *offset; + if ((n <= 0) || (size == 0)) + return 0; + + if (n < size) + size = n; + + if (XFS_FORCED_SHUTDOWN(mp)) { + return -EIO; + } + + if (!(ioflags & IO_ISLOCKED)) + xfs_ilock(ip, XFS_IOLOCK_SHARED); + + if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) && + !(file->f_mode & FINVIS)) { + int error; + vrwlock_t locktype = VRWLOCK_READ; + + error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, size, + FILP_DELAY_FLAG(file), &locktype); + if (error) { + if (!(ioflags & IO_ISLOCKED)) + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + return -error; + } + } + + ret = generic_file_read(file, buf, size, offset); + + if (!(ioflags & IO_ISLOCKED)) + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + XFS_STATS_ADD(xfsstats.xs_read_bytes, ret); + + if (unlikely(file->f_mode & FINVIS)) { + /* generic_file_read updates the atime but we need to + * undo that because this I/O was supposed to be invisible. + */ + struct inode *inode = LINVFS_GET_IP(BHV_TO_VNODE(bdp)); + inode->i_atime = ip->i_d.di_atime.t_sec; + } + else { + xfs_ichgtime(ip, XFS_ICHGTIME_ACC); + } + + return ret; +} + +/* + * This routine is called to handle zeroing any space in the last + * block of the file that is beyond the EOF. We do this since the + * size is being increased without writing anything to that block + * and we don't want anyone to read the garbage on the disk. + */ +STATIC int /* error (positive) */ +xfs_zero_last_block( + struct inode *ip, + xfs_iocore_t *io, + xfs_off_t offset, + xfs_fsize_t isize, + xfs_fsize_t end_size) +{ + xfs_fileoff_t last_fsb; + xfs_mount_t *mp; + int nimaps; + int zero_offset; + int zero_len; + int isize_fsb_offset; + int error = 0; + xfs_bmbt_irec_t imap; + loff_t loff; + size_t lsize; + + ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); + ASSERT(offset > isize); + + mp = io->io_mount; + + isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize); + if (isize_fsb_offset == 0) { + /* + * There are no extra bytes in the last block on disk to + * zero, so return. + */ + return 0; + } + + last_fsb = XFS_B_TO_FSBT(mp, isize); + nimaps = 1; + error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, + &nimaps, NULL); + if (error) { + return error; + } + ASSERT(nimaps > 0); + /* + * If the block underlying isize is just a hole, then there + * is nothing to zero. + */ + if (imap.br_startblock == HOLESTARTBLOCK) { + return 0; + } + /* + * Zero the part of the last block beyond the EOF, and write it + * out sync. We need to drop the ilock while we do this so we + * don't deadlock when the buffer cache calls back to us. + */ + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); + loff = XFS_FSB_TO_B(mp, last_fsb); + lsize = XFS_FSB_TO_B(mp, 1); + + zero_offset = isize_fsb_offset; + zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset; + + error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + ASSERT(error >= 0); + return error; +} + +/* + * Zero any on disk space between the current EOF and the new, + * larger EOF. This handles the normal case of zeroing the remainder + * of the last block in the file and the unusual case of zeroing blocks + * out beyond the size of the file. This second case only happens + * with fixed size extents and when the system crashes before the inode + * size was updated but after blocks were allocated. If fill is set, + * then any holes in the range are filled and zeroed. If not, the holes + * are left alone as holes. + */ + +int /* error (positive) */ +xfs_zero_eof( + vnode_t *vp, + xfs_iocore_t *io, + xfs_off_t offset, /* starting I/O offset */ + xfs_fsize_t isize, /* current inode size */ + xfs_fsize_t end_size) /* terminal inode size */ +{ + struct inode *ip = LINVFS_GET_IP(vp); + xfs_fileoff_t start_zero_fsb; + xfs_fileoff_t end_zero_fsb; + xfs_fileoff_t prev_zero_fsb; + xfs_fileoff_t zero_count_fsb; + xfs_fileoff_t last_fsb; + xfs_extlen_t buf_len_fsb; + xfs_extlen_t prev_zero_count; + xfs_mount_t *mp; + int nimaps; + int error = 0; + xfs_bmbt_irec_t imap; + loff_t loff; + size_t lsize; + + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + + mp = io->io_mount; + + /* + * First handle zeroing the block on which isize resides. + * We only zero a part of that block so it is handled specially. + */ + error = xfs_zero_last_block(ip, io, offset, isize, end_size); + if (error) { + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + return error; + } + + /* + * Calculate the range between the new size and the old + * where blocks needing to be zeroed may exist. To get the + * block where the last byte in the file currently resides, + * we need to subtract one from the size and truncate back + * to a block boundary. We subtract 1 in case the size is + * exactly on a block boundary. + */ + last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; + start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); + end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); + ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); + if (last_fsb == end_zero_fsb) { + /* + * The size was only incremented on its last block. + * We took care of that above, so just return. + */ + return 0; + } + + ASSERT(start_zero_fsb <= end_zero_fsb); + prev_zero_fsb = NULLFILEOFF; + prev_zero_count = 0; + while (start_zero_fsb <= end_zero_fsb) { + nimaps = 1; + zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; + error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, + 0, NULL, 0, &imap, &nimaps, NULL); + if (error) { + ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); + ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); + return error; + } + ASSERT(nimaps > 0); + + if (imap.br_startblock == HOLESTARTBLOCK) { + /* + * This loop handles initializing pages that were + * partially initialized by the code below this + * loop. It basically zeroes the part of the page + * that sits on a hole and sets the page as P_HOLE + * and calls remapf if it is a mapped file. + */ + prev_zero_fsb = NULLFILEOFF; + prev_zero_count = 0; + start_zero_fsb = imap.br_startoff + + imap.br_blockcount; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + continue; + } + + /* + * There are blocks in the range requested. + * Zero them a single write at a time. We actually + * don't zero the entire range returned if it is + * too big and simply loop around to get the rest. + * That is not the most efficient thing to do, but it + * is simple and this path should not be exercised often. + */ + buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount, + mp->m_writeio_blocks << 8); + /* + * Drop the inode lock while we're doing the I/O. + * We'll still have the iolock to protect us. + */ + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + + loff = XFS_FSB_TO_B(mp, start_zero_fsb); + lsize = XFS_FSB_TO_B(mp, buf_len_fsb); + + error = xfs_iozero(ip, loff, lsize, end_size); + + if (error) { + goto out_lock; + } + + prev_zero_fsb = start_zero_fsb; + prev_zero_count = buf_len_fsb; + start_zero_fsb = imap.br_startoff + buf_len_fsb; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + } + + return 0; + +out_lock: + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + ASSERT(error >= 0); + return error; +} + +ssize_t /* bytes written, or (-) error */ +xfs_write( + bhv_desc_t *bdp, + struct file *file, + const char *buf, + size_t size, + loff_t *offset, + int ioflags, + cred_t *credp) +{ + xfs_inode_t *xip; + xfs_mount_t *mp; + ssize_t ret; + int error = 0; + xfs_fsize_t isize, new_size; + xfs_fsize_t n, limit = XFS_MAX_FILE_OFFSET; + xfs_iocore_t *io; + vnode_t *vp; + int iolock; + int direct = file->f_flags & O_DIRECT; + int eventsent = 0; + vrwlock_t locktype; + + XFS_STATS_INC(xfsstats.xs_write_calls); + + vp = BHV_TO_VNODE(bdp); + xip = XFS_BHVTOI(bdp); + + if (size == 0) + return 0; + + io = &(xip->i_iocore); + mp = io->io_mount; + + xfs_check_frozen(mp, bdp, XFS_FREEZE_WRITE); + + if (XFS_FORCED_SHUTDOWN(xip->i_mount)) { + return -EIO; + } + + if (direct) { + if (((__psint_t)buf & BBMASK) || + (*offset & mp->m_blockmask) || + (size & mp->m_blockmask)) { + return XFS_ERROR(-EINVAL); + } + iolock = XFS_IOLOCK_SHARED; + locktype = VRWLOCK_WRITE_DIRECT; + } else { + if (io->io_flags & XFS_IOCORE_RT) + return XFS_ERROR(-EINVAL); + iolock = XFS_IOLOCK_EXCL; + locktype = VRWLOCK_WRITE; + } + + if (ioflags & IO_ISLOCKED) + iolock = 0; + + xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); + + isize = xip->i_d.di_size; + + if (file->f_flags & O_APPEND) + *offset = isize; + +start: + n = limit - *offset; + if (n <= 0) { + xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); + return -EFBIG; + } + if (n < size) + size = n; + + new_size = *offset + size; + if (new_size > isize) { + io->io_new_size = new_size; + } + + if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) && + !(file->f_mode & FINVIS) && !eventsent)) { + loff_t savedsize = *offset; + + xfs_iunlock(xip, XFS_ILOCK_EXCL); + error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, bdp, + *offset, size, + FILP_DELAY_FLAG(file), &locktype); + if (error) { + if (iolock) xfs_iunlock(xip, iolock); + return -error; + } + xfs_ilock(xip, XFS_ILOCK_EXCL); + eventsent = 1; + + /* + * The iolock was dropped and reaquired in XFS_SEND_DATA + * so we have to recheck the size when appending. + * We will only "goto start;" once, since having sent the + * event prevents another call to XFS_SEND_DATA, which is + * what allows the size to change in the first place. + */ + if ((file->f_flags & O_APPEND) && + savedsize != xip->i_d.di_size) { + *offset = isize = xip->i_d.di_size; + goto start; + } + } + + /* + * If the offset is beyond the size of the file, we have a couple + * of things to do. First, if there is already space allocated + * we need to either create holes or zero the disk or ... + * + * If there is a page where the previous size lands, we need + * to zero it out up to the new size. + */ + + if (!direct && (*offset > isize && isize)) { + error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, *offset, + isize, *offset + size); + if (error) { + xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); + return(-error); + } + } + xfs_iunlock(xip, XFS_ILOCK_EXCL); + + /* + * If we're writing the file then make sure to clear the + * setuid and setgid bits if the process is not being run + * by root. This keeps people from modifying setuid and + * setgid binaries. + */ + + if (((xip->i_d.di_mode & ISUID) || + ((xip->i_d.di_mode & (ISGID | (IEXEC >> 3))) == + (ISGID | (IEXEC >> 3)))) && + !capable(CAP_FSETID)) { + error = xfs_write_clear_setuid(xip); + if (error) { + xfs_iunlock(xip, iolock); + return -error; + } + } + +retry: + if (direct) { + xfs_inval_cached_pages(vp, &xip->i_iocore, *offset, 1, 1); + } + + ret = do_generic_file_write(file, buf, size, offset); + + if (unlikely(file->f_mode & FINVIS)) { + /* generic_file_write updates the mtime/ctime but we need + * to undo that because this I/O was supposed to be + * invisible. + */ + struct inode *inode = LINVFS_GET_IP(vp); + inode->i_mtime = xip->i_d.di_mtime.t_sec; + inode->i_ctime = xip->i_d.di_ctime.t_sec; + } + else { + xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + } + + if ((ret == -ENOSPC) && + DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && + !(file->f_mode & FINVIS)) { + + xfs_rwunlock(bdp, locktype); + error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, bdp, + DM_RIGHT_NULL, bdp, DM_RIGHT_NULL, NULL, NULL, + 0, 0, 0); /* Delay flag intentionally unused */ + if (error) + return -error; + xfs_rwlock(bdp, locktype); + *offset = xip->i_d.di_size; + goto retry; + + } + + if (ret <= 0) { + xfs_rwunlock(bdp, locktype); + return ret; + } + + XFS_STATS_ADD(xfsstats.xs_write_bytes, ret); + + if (*offset > xip->i_d.di_size) { + xfs_ilock(xip, XFS_ILOCK_EXCL); + if (*offset > xip->i_d.di_size) { + struct inode *inode = LINVFS_GET_IP(vp); + + inode->i_size = xip->i_d.di_size = *offset; + xip->i_update_core = 1; + xip->i_update_size = 1; + } + xfs_iunlock(xip, XFS_ILOCK_EXCL); + } + + /* Handle various SYNC-type writes */ + if ((file->f_flags & O_SYNC) || IS_SYNC(file->f_dentry->d_inode)) { + + /* + * If we're treating this as O_DSYNC and we have not updated the + * size, force the log. + */ + + if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) + && !(xip->i_update_size)) { + /* + * If an allocation transaction occurred + * without extending the size, then we have to force + * the log up the proper point to ensure that the + * allocation is permanent. We can't count on + * the fact that buffered writes lock out direct I/O + * writes - the direct I/O write could have extended + * the size nontransactionally, then finished before + * we started. xfs_write_file will think that the file + * didn't grow but the update isn't safe unless the + * size change is logged. + * + * Force the log if we've committed a transaction + * against the inode or if someone else has and + * the commit record hasn't gone to disk (e.g. + * the inode is pinned). This guarantees that + * all changes affecting the inode are permanent + * when we return. + */ + + xfs_inode_log_item_t *iip; + xfs_lsn_t lsn; + + iip = xip->i_itemp; + if (iip && iip->ili_last_lsn) { + lsn = iip->ili_last_lsn; + xfs_log_force(mp, lsn, + XFS_LOG_FORCE | XFS_LOG_SYNC); + } else if (xfs_ipincount(xip) > 0) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE | XFS_LOG_SYNC); + } + + } else { + xfs_trans_t *tp; + + /* + * O_SYNC or O_DSYNC _with_ a size update are handled + * the same way. + * + * If the write was synchronous then we need to make + * sure that the inode modification time is permanent. + * We'll have updated the timestamp above, so here + * we use a synchronous transaction to log the inode. + * It's not fast, but it's necessary. + * + * If this a dsync write and the size got changed + * non-transactionally, then we need to ensure that + * the size change gets logged in a synchronous + * transaction. + */ + + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); + if ((error = xfs_trans_reserve(tp, 0, + XFS_SWRITE_LOG_RES(mp), + 0, 0, 0))) { + /* Transaction reserve failed */ + xfs_trans_cancel(tp, 0); + } else { + /* Transaction reserve successful */ + xfs_ilock(xip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, xip); + xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, (xfs_lsn_t)0); + xfs_iunlock(xip, XFS_ILOCK_EXCL); + } + } + } /* (ioflags & O_SYNC) */ + + /* + * If we are coming from an nfsd thread then insert into the + * reference cache. + */ + + if (!strcmp(current->comm, "nfsd")) + xfs_refcache_insert(xip); + + /* Drop lock this way - the old refcache release is in here */ + if (iolock) + xfs_rwunlock(bdp, locktype); + + return(ret); +} + +/* + * All xfs metadata buffers except log state machine buffers + * get this attached as their b_bdstrat callback function. + * This is so that we can catch a buffer + * after prematurely unpinning it to forcibly shutdown the filesystem. + */ +int +xfs_bdstrat_cb(struct xfs_buf *bp) +{ + xfs_mount_t *mp; + + mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); + if (!XFS_FORCED_SHUTDOWN(mp)) { + pagebuf_iorequest(bp); + return 0; + } else { + xfs_buftrace("XFS__BDSTRAT IOERROR", bp); + /* + * Metadata write that didn't get logged but + * written delayed anyway. These aren't associated + * with a transaction, and can be ignored. + */ + if (XFS_BUF_IODONE_FUNC(bp) == NULL && + (XFS_BUF_ISREAD(bp)) == 0) + return (xfs_bioerror_relse(bp)); + else + return (xfs_bioerror(bp)); + } +} + + +int +xfs_bmap(bhv_desc_t *bdp, + xfs_off_t offset, + ssize_t count, + int flags, + page_buf_bmap_t *pbmapp, + int *npbmaps) +{ + xfs_inode_t *ip = XFS_BHVTOI(bdp); + xfs_iocore_t *io = &ip->i_iocore; + + ASSERT((ip->i_d.di_mode & IFMT) == IFREG); + ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == + ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); + + return xfs_iomap(io, offset, count, flags, pbmapp, npbmaps); +} + +/* + * Wrapper around bdstrat so that we can stop data + * from going to disk in case we are shutting down the filesystem. + * Typically user data goes thru this path; one of the exceptions + * is the superblock. + */ +int +xfsbdstrat( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + ASSERT(mp); + if (!XFS_FORCED_SHUTDOWN(mp)) { + /* Grio redirection would go here + * if (XFS_BUF_IS_GRIO(bp)) { + */ + + pagebuf_iorequest(bp); + return 0; + } + + xfs_buftrace("XFSBDSTRAT IOERROR", bp); + return (xfs_bioerror_relse(bp)); +} + + +void +XFS_bflush(xfs_buftarg_t *target) +{ + pagebuf_delwri_flush(target, PBDF_WAIT, NULL); +} + +/* + * If the underlying (log or data) device is readonly, there are some + * operations that cannot proceed. + */ +int +xfs_dev_is_read_only(xfs_mount_t *mp, char *message) +{ + if (is_read_only(mp->m_ddev_targp->pbr_kdev) || + is_read_only(mp->m_logdev_targp->pbr_kdev) || + (mp->m_rtdev_targp && is_read_only(mp->m_rtdev_targp->pbr_kdev))) { + cmn_err(CE_NOTE, + "XFS: %s required on read-only device.", message); + cmn_err(CE_NOTE, + "XFS: write access unavailable, cannot proceed."); + return EROFS; + } + + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_lrw.h linux.22-ac2/fs/xfs/linux/xfs_lrw.h --- linux.vanilla/fs/xfs/linux/xfs_lrw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_lrw.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LRW_H__ +#define __XFS_LRW_H__ + +struct vnode; +struct bhv_desc; +struct xfs_mount; +struct xfs_iocore; +struct xfs_inode; +struct xfs_bmbt_irec; +struct page_buf_s; +struct page_buf_bmap_s; + +#define XFS_IOMAP_READ_ENTER 3 +/* + * Maximum count of bmaps used by read and write paths. + */ +#define XFS_MAX_RW_NBMAPS 4 + +extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +extern int xfsbdstrat(struct xfs_mount *, struct page_buf_s *); +extern int xfs_bdstrat_cb(struct page_buf_s *); + +extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t, + xfs_fsize_t, xfs_fsize_t); +extern ssize_t xfs_read(struct bhv_desc *, struct file *, char *, + size_t, loff_t *, int, struct cred *); +extern ssize_t xfs_write(struct bhv_desc *, struct file *, const char *, + size_t, loff_t *, int, struct cred *); + +extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t, + int, struct xfs_bmbt_irec *, int *, int); +extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, + int, struct xfs_bmbt_irec *, int *); +extern int xfs_iomap_write_allocate(struct xfs_inode *, + struct xfs_bmbt_irec *, int *); +extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t); + +extern int xfs_dev_is_read_only(struct xfs_mount *, char *); + +#define XFS_FSB_TO_DB_IO(io,fsb) \ + (((io)->io_flags & XFS_IOCORE_RT) ? \ + XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((io)->io_mount, (fsb))) + +#endif /* __XFS_LRW_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_stats.c linux.22-ac2/fs/xfs/linux/xfs_stats.c --- linux.vanilla/fs/xfs/linux/xfs_stats.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_stats.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include + +struct xfsstats xfsstats; + +STATIC int +xfs_read_xfsstats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int i, j, len; + static struct xstats_entry { + char *desc; + int endpoint; + } xstats[] = { + { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC }, + { "abt", XFSSTAT_END_ALLOC_BTREE }, + { "blk_map", XFSSTAT_END_BLOCK_MAPPING }, + { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE }, + { "dir", XFSSTAT_END_DIRECTORY_OPS }, + { "trans", XFSSTAT_END_TRANSACTIONS }, + { "ig", XFSSTAT_END_INODE_OPS }, + { "log", XFSSTAT_END_LOG_OPS }, + { "push_ail", XFSSTAT_END_TAIL_PUSHING }, + { "xstrat", XFSSTAT_END_WRITE_CONVERT }, + { "rw", XFSSTAT_END_READ_WRITE_OPS }, + { "attr", XFSSTAT_END_ATTRIBUTE_OPS }, + { "icluster", XFSSTAT_END_INODE_CLUSTER }, + { "vnodes", XFSSTAT_END_VNODE_OPS }, + }; + + for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) { + len += sprintf(buffer + len, xstats[i].desc); + /* inner loop does each group */ + while (j < xstats[i].endpoint) { + len += sprintf(buffer + len, " %u", + *(((__u32*)&xfsstats) + j)); + j++; + } + buffer[len++] = '\n'; + } + /* extra precision counters */ + len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n", + xfsstats.xs_xstrat_bytes, + xfsstats.xs_write_bytes, + xfsstats.xs_read_bytes); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +void +xfs_init_procfs(void) +{ + if (!proc_mkdir("fs/xfs", 0)) + return; + create_proc_read_entry("fs/xfs/stat", 0, 0, xfs_read_xfsstats, NULL); +} + +void +xfs_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/stat", NULL); + remove_proc_entry("fs/xfs", NULL); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_stats.h linux.22-ac2/fs/xfs/linux/xfs_stats.h --- linux.vanilla/fs/xfs/linux/xfs_stats.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_stats.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_STATS_H__ +#define __XFS_STATS_H__ + + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +/* + * XFS global statistics + */ +struct xfsstats { +# define XFSSTAT_END_EXTENT_ALLOC 4 + __uint32_t xs_allocx; + __uint32_t xs_allocb; + __uint32_t xs_freex; + __uint32_t xs_freeb; +# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4) + __uint32_t xs_abt_lookup; + __uint32_t xs_abt_compare; + __uint32_t xs_abt_insrec; + __uint32_t xs_abt_delrec; +# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7) + __uint32_t xs_blk_mapr; + __uint32_t xs_blk_mapw; + __uint32_t xs_blk_unmap; + __uint32_t xs_add_exlist; + __uint32_t xs_del_exlist; + __uint32_t xs_look_exlist; + __uint32_t xs_cmp_exlist; +# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4) + __uint32_t xs_bmbt_lookup; + __uint32_t xs_bmbt_compare; + __uint32_t xs_bmbt_insrec; + __uint32_t xs_bmbt_delrec; +# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4) + __uint32_t xs_dir_lookup; + __uint32_t xs_dir_create; + __uint32_t xs_dir_remove; + __uint32_t xs_dir_getdents; +# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3) + __uint32_t xs_trans_sync; + __uint32_t xs_trans_async; + __uint32_t xs_trans_empty; +# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7) + __uint32_t xs_ig_attempts; + __uint32_t xs_ig_found; + __uint32_t xs_ig_frecycle; + __uint32_t xs_ig_missed; + __uint32_t xs_ig_dup; + __uint32_t xs_ig_reclaims; + __uint32_t xs_ig_attrchg; +# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5) + __uint32_t xs_log_writes; + __uint32_t xs_log_blocks; + __uint32_t xs_log_noiclogs; + __uint32_t xs_log_force; + __uint32_t xs_log_force_sleep; +# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10) + __uint32_t xs_try_logspace; + __uint32_t xs_sleep_logspace; + __uint32_t xs_push_ail; + __uint32_t xs_push_ail_success; + __uint32_t xs_push_ail_pushbuf; + __uint32_t xs_push_ail_pinned; + __uint32_t xs_push_ail_locked; + __uint32_t xs_push_ail_flushing; + __uint32_t xs_push_ail_restarts; + __uint32_t xs_push_ail_flush; +# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2) + __uint32_t xs_xstrat_quick; + __uint32_t xs_xstrat_split; +# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2) + __uint32_t xs_write_calls; + __uint32_t xs_read_calls; +# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4) + __uint32_t xs_attr_get; + __uint32_t xs_attr_set; + __uint32_t xs_attr_remove; + __uint32_t xs_attr_list; +# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3) + __uint32_t xs_iflush_count; + __uint32_t xs_icluster_flushcnt; + __uint32_t xs_icluster_flushinode; +# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8) + __uint32_t vn_active; /* # vnodes not on free lists */ + __uint32_t vn_alloc; /* # times vn_alloc called */ + __uint32_t vn_get; /* # times vn_get called */ + __uint32_t vn_hold; /* # times vn_hold called */ + __uint32_t vn_rele; /* # times vn_rele called */ + __uint32_t vn_reclaim; /* # times vn_reclaim called */ + __uint32_t vn_remove; /* # times vn_remove called */ + __uint32_t vn_free; /* # times vn_free called */ +/* Extra precision counters */ + __uint64_t xs_xstrat_bytes; + __uint64_t xs_write_bytes; + __uint64_t xs_read_bytes; +}; + +extern struct xfsstats xfsstats; + +# define XFS_STATS_INC(count) ( (count)++ ) +# define XFS_STATS_DEC(count) ( (count)-- ) +# define XFS_STATS_ADD(count, inc) ( (count) += (inc) ) + +extern void xfs_init_procfs(void); +extern void xfs_cleanup_procfs(void); + + +#else /* !CONFIG_PROC_FS */ + +# define XFS_STATS_INC(count) +# define XFS_STATS_DEC(count) +# define XFS_STATS_ADD(count, inc) + +static __inline void xfs_init_procfs(void) { }; +static __inline void xfs_cleanup_procfs(void) { }; + +#endif /* !CONFIG_PROC_FS */ + +#endif /* __XFS_STATS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_super.c linux.22-ac2/fs/xfs/linux/xfs_super.c --- linux.vanilla/fs/xfs/linux/xfs_super.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_super.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,845 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_version.h" + +#include +#include + +STATIC struct quotactl_ops linvfs_qops; +STATIC struct super_operations linvfs_sops; +STATIC kmem_cache_t * linvfs_inode_cachep; + +STATIC struct xfs_mount_args * +args_allocate( + struct super_block *sb) +{ + struct xfs_mount_args *args; + + args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP); + args->logbufs = args->logbufsize = -1; + strncpy(args->fsname, bdevname(sb->s_dev), MAXNAMELEN); + + /* Copy the already-parsed mount(2) flags we're interested in */ + if (sb->s_flags & MS_NOATIME) + args->flags |= XFSMNT_NOATIME; + + /* Default to 32 bit inodes on Linux all the time */ + args->flags |= XFSMNT_32BITINODES; + + return args; +} + +STATIC __inline__ void +xfs_set_inodeops( + struct inode *inode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + + if (vp->v_type == VNON) { + remove_inode_hash(inode); + make_bad_inode(inode); + } else if (S_ISREG(inode->i_mode)) { + inode->i_op = &linvfs_file_inode_operations; + inode->i_fop = &linvfs_file_operations; + inode->i_mapping->a_ops = &linvfs_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &linvfs_dir_inode_operations; + inode->i_fop = &linvfs_dir_operations; + } else if (S_ISLNK(inode->i_mode)) { + inode->i_op = &linvfs_symlink_inode_operations; + if (inode->i_blocks) + inode->i_mapping->a_ops = &linvfs_aops; + } else { + inode->i_op = &linvfs_file_inode_operations; + init_special_inode(inode, inode->i_mode, + kdev_t_to_nr(inode->i_rdev)); + } +} + +STATIC __inline__ void +xfs_revalidate_inode( + xfs_mount_t *mp, + vnode_t *vp, + xfs_inode_t *ip) +{ + struct inode *inode = LINVFS_GET_IP(vp); + + inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type); + inode->i_nlink = ip->i_d.di_nlink; + inode->i_uid = ip->i_d.di_uid; + inode->i_gid = ip->i_d.di_gid; + if (((1 << vp->v_type) & ((1<i_rdev = NODEV; + } else { + xfs_dev_t dev = ip->i_df.if_u2.if_rdev; + inode->i_rdev = XFS_DEV_TO_KDEVT(dev); + } + inode->i_blksize = PAGE_CACHE_SIZE; + inode->i_generation = ip->i_d.di_gen; + inode->i_size = ip->i_d.di_size; + inode->i_blocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + inode->i_atime = ip->i_d.di_atime.t_sec; + inode->i_mtime = ip->i_d.di_mtime.t_sec; + inode->i_ctime = ip->i_d.di_ctime.t_sec; + + vp->v_flag &= ~VMODIFIED; +} + +void +xfs_initialize_vnode( + bhv_desc_t *bdp, + vnode_t *vp, + bhv_desc_t *inode_bhv, + int unlock) +{ + xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); + struct inode *inode = LINVFS_GET_IP(vp); + + if (!inode_bhv->bd_vobj) { + vp->v_vfsp = bhvtovfs(bdp); + bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops); + bhv_insert(VN_BHV_HEAD(vp), inode_bhv); + } + + vp->v_type = IFTOVT(ip->i_d.di_mode); + + /* Have we been called during the new inode create process, + * in which case we are too early to fill in the Linux inode. + */ + if (vp->v_type == VNON) + return; + + xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); + + /* For new inodes we need to set the ops vectors, + * and unlock the inode. + */ + if (unlock && (inode->i_state & I_NEW)) { + xfs_set_inodeops(inode); + unlock_new_inode(inode); + } +} + +struct inode * +xfs_get_inode( + bhv_desc_t *bdp, + xfs_ino_t ino, + int flags) +{ + struct vfs *vfsp = bhvtovfs(bdp); + + return iget_locked(vfsp->vfs_super, ino); +} + +/*ARGSUSED*/ +int +xfs_blkdev_get( + xfs_mount_t *mp, + const char *name, + struct block_device **bdevp) +{ + struct nameidata nd; + int error; + + error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd); + if (error) { + printk("XFS: Invalid device [%s], error=%d\n", name, error); + return -error; + } + + /* I think we actually want bd_acquire here.. --hch */ + *bdevp = bdget(kdev_t_to_nr(nd.dentry->d_inode->i_rdev)); + if (*bdevp) + error = blkdev_get(*bdevp, FMODE_READ|FMODE_WRITE, 0, BDEV_FS); + else + error = -ENOMEM; + + path_release(&nd); + return -error; +} + +void +xfs_blkdev_put( + struct block_device *bdev) +{ + if (bdev) + blkdev_put(bdev, BDEV_FS); +} + +void +xfs_free_buftarg( + xfs_buftarg_t *btp) +{ + pagebuf_delwri_flush(btp, PBDF_WAIT, NULL); + kmem_free(btp, sizeof(*btp)); +} + +void +xfs_relse_buftarg( + xfs_buftarg_t *btp) +{ + destroy_buffers(btp->pbr_kdev); + truncate_inode_pages(btp->pbr_mapping, 0LL); +} + +unsigned int +xfs_getsize_buftarg( + xfs_buftarg_t *btp) +{ + return block_size(btp->pbr_kdev); +} + +void +xfs_setsize_buftarg( + xfs_buftarg_t *btp, + unsigned int blocksize, + unsigned int sectorsize) +{ + btp->pbr_bsize = blocksize; + btp->pbr_sshift = ffs(sectorsize) - 1; + btp->pbr_smask = sectorsize - 1; + + if (set_blocksize(btp->pbr_kdev, sectorsize)) { + printk(KERN_WARNING + "XFS: Cannot set_blocksize to %u on device 0x%x\n", + sectorsize, kdev_t_to_nr(btp->pbr_kdev)); + } +} + +xfs_buftarg_t * +xfs_alloc_buftarg( + struct block_device *bdev) +{ + xfs_buftarg_t *btp; + + btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); + + btp->pbr_dev = bdev->bd_dev; + btp->pbr_kdev = to_kdev_t(btp->pbr_dev); + btp->pbr_bdev = bdev; + btp->pbr_mapping = bdev->bd_inode->i_mapping; + xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, + get_hardsect_size(btp->pbr_kdev)); + + switch (MAJOR(btp->pbr_dev)) { + case MD_MAJOR: + case EVMS_MAJOR: + btp->pbr_flags = PBR_ALIGNED_ONLY; + break; + case LVM_BLK_MAJOR: + btp->pbr_flags = PBR_SECTOR_ONLY; + break; + } + + return btp; +} + +STATIC __inline__ unsigned int gfp_mask(void) +{ + /* If we're not in a transaction, FS activity is ok */ + if (current->flags & PF_FSTRANS) return GFP_NOFS; + return GFP_KERNEL; +} + +STATIC struct inode * +linvfs_alloc_inode( + struct super_block *sb) +{ + vnode_t *vp; + + vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_cachep, gfp_mask()); + if (!vp) + return NULL; + return LINVFS_GET_IP(vp); +} + +STATIC void +linvfs_destroy_inode( + struct inode *inode) +{ + kmem_cache_free(linvfs_inode_cachep, LINVFS_GET_VP(inode)); +} + +#define VNODE_SIZE \ + (sizeof(vnode_t) - sizeof(struct inode) + offsetof(struct inode, u)) + +STATIC void +init_once( + void *data, + kmem_cache_t *cachep, + unsigned long flags) +{ + vnode_t *vp = (vnode_t *)data; + + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == + SLAB_CTOR_CONSTRUCTOR) { + struct inode *inode = LINVFS_GET_IP(vp); + + memset(vp, 0, VNODE_SIZE); + init_waitqueue_head(&inode->i_wait); + INIT_LIST_HEAD(&inode->i_hash); + INIT_LIST_HEAD(&inode->i_data.clean_pages); + INIT_LIST_HEAD(&inode->i_data.dirty_pages); + INIT_LIST_HEAD(&inode->i_data.locked_pages); + INIT_LIST_HEAD(&inode->i_dentry); + INIT_LIST_HEAD(&inode->i_dirty_buffers); + INIT_LIST_HEAD(&inode->i_dirty_data_buffers); + INIT_LIST_HEAD(&inode->i_devices); + sema_init(&inode->i_sem, 1); + sema_init(&inode->i_zombie, 1); + spin_lock_init(&inode->i_data.i_shared_lock); + } +} + +STATIC int +init_inodecache( void ) +{ + linvfs_inode_cachep = kmem_cache_create("linvfs_icache", + VNODE_SIZE, 0, SLAB_HWCACHE_ALIGN, + init_once, NULL); + + if (linvfs_inode_cachep == NULL) + return -ENOMEM; + return 0; +} + +STATIC void +destroy_inodecache( void ) +{ + if (kmem_cache_destroy(linvfs_inode_cachep)) + printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__); +} + +/* + * Attempt to flush the inode, this will actually fail + * if the inode is pinned, but we dirty the inode again + * at the point when it is unpinned after a log write, + * since this is when the inode itself becomes flushable. + */ +STATIC void +linvfs_write_inode( + struct inode *inode, + int sync) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + int error, flags = FLUSH_INODE; + + if (vp) { + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + if (sync) + flags |= FLUSH_SYNC; + VOP_IFLUSH(vp, flags, error); + } +} + +STATIC void +linvfs_clear_inode( + struct inode *inode) +{ + vnode_t *vp = LINVFS_GET_VP(inode); + + if (vp) { + vn_rele(vp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + /* + * Do all our cleanup, and remove this vnode. + */ + vn_remove(vp); + } +} + +STATIC void +linvfs_put_super( + struct super_block *sb) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + linvfs_stop_syncd(vfsp); + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); + if (error == 0) { + VFS_UNMOUNT(vfsp, 0, NULL, error); + } + if (error) { + printk("XFS unmount got error %d\n", error); + printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp); + return; + } + + vfs_deallocate(vfsp); +} + +STATIC void +linvfs_write_super( + struct super_block *sb) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + if (sb->s_flags & MS_RDONLY) { + sb->s_dirt = 0; /* paranoia */ + return; + } + /* Push the log and superblock a little */ + VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error); + sb->s_dirt = 0; +} + +STATIC int +linvfs_statfs( + struct super_block *sb, + struct statfs *statp) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_STATVFS(vfsp, statp, NULL, error); + return error; +} + +STATIC int +linvfs_remount( + struct super_block *sb, + int *flags, + char *options) +{ + vfs_t *vfsp = LINVFS_GET_VFS(sb); + struct xfs_mount_args *args = args_allocate(sb); + int error; + + VFS_PARSEARGS(vfsp, options, args, 1, error); + if (error) + goto out; + + VFS_MNTUPDATE(vfsp, flags, args, error); + +out: + kmem_free(args, sizeof(*args)); + return error; +} + +STATIC void +linvfs_freeze_fs( + struct super_block *sb) +{ + vfs_t *vfsp; + vnode_t *vp; + int error; + + vfsp = LINVFS_GET_VFS(sb); + if (sb->s_flags & MS_RDONLY) + return; + VFS_ROOT(vfsp, &vp, error); + VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, XFS_IOC_FREEZE, 0, error); + VN_RELE(vp); +} + +STATIC void +linvfs_unfreeze_fs( + struct super_block *sb) +{ + vfs_t *vfsp; + vnode_t *vp; + int error; + + vfsp = LINVFS_GET_VFS(sb); + VFS_ROOT(vfsp, &vp, error); + VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, XFS_IOC_THAW, 0, error); + VN_RELE(vp); +} + +STATIC int +linvfs_dentry_to_fh( + struct dentry *dentry, + __u32 *data, + int *lenp, + int need_parent) +{ + struct inode *inode = dentry->d_inode ; + vnode_t *vp = LINVFS_GET_VP(inode); + int maxlen = *lenp; + xfs_fid2_t fid; + int error; + + if (maxlen < 3) + return 255 ; + + VOP_FID2(vp, (struct fid *)&fid, error); + data[0] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */ + data[1] = fid.fid_gen; + + *lenp = 2 ; + if (maxlen < 4 || ! need_parent) + return 2 ; + + inode = dentry->d_parent->d_inode ; + vp = LINVFS_GET_VP(inode); + + VOP_FID2(vp, (struct fid *)&fid, error); + data[2] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */ + *lenp = 3 ; + if (maxlen < 4) + return 3 ; + data[3] = fid.fid_gen; + *lenp = 4 ; + return 4 ; +} + +STATIC struct dentry * +linvfs_fh_to_dentry( + struct super_block *sb, + __u32 *data, + int len, + int fhtype, + int parent) +{ + vnode_t *vp; + struct inode *inode = NULL; + struct list_head *lp; + struct dentry *result; + xfs_fid2_t xfid; + vfs_t *vfsp = LINVFS_GET_VFS(sb); + int error; + + xfid.fid_len = sizeof(xfs_fid2_t) - sizeof(xfid.fid_len); + xfid.fid_pad = 0; + + if (!parent) { + xfid.fid_gen = data[1]; + xfid.fid_ino = (__u64)data[0]; + } else { + if (fhtype == 4) + xfid.fid_gen = data[3]; + else + xfid.fid_gen = 0; + xfid.fid_ino = (__u64)data[2]; + } + + VFS_VGET(vfsp, &vp, (fid_t *)&xfid, error); + if (error || vp == NULL) + return ERR_PTR(-ESTALE) ; + + inode = LINVFS_GET_IP(vp); + spin_lock(&dcache_lock); + for (lp = inode->i_dentry.next; lp != &inode->i_dentry ; lp=lp->next) { + result = list_entry(lp,struct dentry, d_alias); + if (! (result->d_flags & DCACHE_NFSD_DISCONNECTED)) { + dget_locked(result); + result->d_vfs_flags |= DCACHE_REFERENCED; + spin_unlock(&dcache_lock); + iput(inode); + return result; + } + } + spin_unlock(&dcache_lock); + result = d_alloc_root(inode); + if (result == NULL) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + result->d_flags |= DCACHE_NFSD_DISCONNECTED; + return result; +} + +STATIC int +linvfs_show_options( + struct seq_file *m, + struct vfsmount *mnt) +{ + struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); + int error; + + VFS_SHOWARGS(vfsp, m, error); + return error; +} + +STATIC int +linvfs_getxstate( + struct super_block *sb, + struct fs_quota_stat *fqs) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); + return -error; +} + +STATIC int +linvfs_setxstate( + struct super_block *sb, + unsigned int flags, + int op) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error; + + VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); + return -error; +} + +STATIC int +linvfs_getxquota( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error, getmode; + + getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA; + VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error); + return -error; +} + +STATIC int +linvfs_setxquota( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct vfs *vfsp = LINVFS_GET_VFS(sb); + int error, setmode; + + setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM; + VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error); + return -error; +} + +STATIC struct super_block * +linvfs_read_super( + struct super_block *sb, + void *data, + int silent) +{ + vnode_t *rootvp; + struct vfs *vfsp = vfs_allocate(); + struct xfs_mount_args *args = args_allocate(sb); + struct statfs statvfs; + int error; + + vfsp->vfs_super = sb; + LINVFS_SET_VFS(sb, vfsp); + if (sb->s_flags & MS_RDONLY) + vfsp->vfs_flag |= VFS_RDONLY; + bhv_insert_all_vfsops(vfsp); + + VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); + if (error) { + bhv_remove_all_vfsops(vfsp, 1); + goto fail_vfsop; + } + + sb_min_blocksize(sb, BBSIZE); + sb->s_maxbytes = XFS_MAX_FILE_OFFSET; + sb->s_qcop = &linvfs_qops; + sb->s_op = &linvfs_sops; + + VFS_MOUNT(vfsp, args, NULL, error); + if (error) { + bhv_remove_all_vfsops(vfsp, 1); + goto fail_vfsop; + } + + VFS_STATVFS(vfsp, &statvfs, NULL, error); + if (error) + goto fail_unmount; + + sb->s_dirt = 1; + sb->s_magic = XFS_SB_MAGIC; + sb->s_blocksize = statvfs.f_bsize; + sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; + set_posix_acl_flag(sb); + + VFS_ROOT(vfsp, &rootvp, error); + if (error) + goto fail_unmount; + + sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); + if (!sb->s_root) + goto fail_vnrele; + if (is_bad_inode(sb->s_root->d_inode)) + goto fail_vnrele; + if (linvfs_start_syncd(vfsp)) + goto fail_vnrele; + vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); + + kmem_free(args, sizeof(*args)); + return sb; + +fail_vnrele: + if (sb->s_root) { + dput(sb->s_root); + sb->s_root = NULL; + } else { + VN_RELE(rootvp); + } + +fail_unmount: + VFS_UNMOUNT(vfsp, 0, NULL, error); + +fail_vfsop: + vfs_deallocate(vfsp); + kmem_free(args, sizeof(*args)); + return NULL; +} + + +STATIC struct super_operations linvfs_sops = { + .alloc_inode = linvfs_alloc_inode, + .destroy_inode = linvfs_destroy_inode, + .write_inode = linvfs_write_inode, + .clear_inode = linvfs_clear_inode, + .put_super = linvfs_put_super, + .write_super = linvfs_write_super, + .write_super_lockfs = linvfs_freeze_fs, + .unlockfs = linvfs_unfreeze_fs, + .statfs = linvfs_statfs, + .remount_fs = linvfs_remount, + .fh_to_dentry = linvfs_fh_to_dentry, + .dentry_to_fh = linvfs_dentry_to_fh, + .show_options = linvfs_show_options, +}; + +STATIC struct quotactl_ops linvfs_qops = { + .get_xstate = linvfs_getxstate, + .set_xstate = linvfs_setxstate, + .get_xquota = linvfs_getxquota, + .set_xquota = linvfs_setxquota, +}; + +STATIC struct file_system_type xfs_fs_type = { + .owner = THIS_MODULE, + .name = "xfs", + .read_super = linvfs_read_super, + .fs_flags = FS_REQUIRES_DEV, +}; + + +STATIC int __init +init_xfs_fs( void ) +{ + int error; + struct sysinfo si; + static char message[] __initdata = KERN_INFO \ + XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"; + + printk(message); + + si_meminfo(&si); + xfs_physmem = si.totalram; + + error = init_inodecache(); + if (error < 0) + goto undo_inodecache; + + error = pagebuf_init(); + if (error < 0) + goto undo_pagebuf; + + vn_init(); + xfs_init(); + uuid_init(); + vfs_initdmapi(); + vfs_initquota(); + + error = register_filesystem(&xfs_fs_type); + if (error) + goto undo_register; + return 0; + +undo_register: + pagebuf_terminate(); + +undo_pagebuf: + destroy_inodecache(); + +undo_inodecache: + return error; +} + +STATIC void __exit +exit_xfs_fs( void ) +{ + unregister_filesystem(&xfs_fs_type); + xfs_cleanup(); + vfs_exitquota(); + vfs_exitdmapi(); + pagebuf_terminate(); + destroy_inodecache(); +} + +module_init(init_xfs_fs); +module_exit(exit_xfs_fs); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); +MODULE_LICENSE("GPL"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_super.h linux.22-ac2/fs/xfs/linux/xfs_super.h --- linux.vanilla/fs/xfs/linux/xfs_super.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_super.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPER_H__ +#define __XFS_SUPER_H__ + +#ifdef CONFIG_XFS_DMAPI +# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops) +# define vfs_initdmapi() dmapi_init() +# define vfs_exitdmapi() dmapi_uninit() +#else +# define vfs_insertdmapi(vfs) do { } while (0) +# define vfs_initdmapi() do { } while (0) +# define vfs_exitdmapi() do { } while (0) +#endif + +#ifdef CONFIG_XFS_QUOTA +# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops) +# define vfs_initquota() xfs_qm_init() +# define vfs_exitquota() xfs_qm_exit() +#else +# define vfs_insertquota(vfs) do { } while (0) +# define vfs_initquota() do { } while (0) +# define vfs_exitquota() do { } while (0) +#endif + +#ifdef CONFIG_XFS_POSIX_ACL +# define XFS_ACL_STRING "ACLs, " +# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL) +#else +# define XFS_ACL_STRING +# define set_posix_acl_flag(sb) do { } while (0) +#endif + +#ifdef CONFIG_XFS_RT +# define XFS_REALTIME_STRING "realtime, " +#else +# define XFS_REALTIME_STRING +#endif + +#ifdef CONFIG_XFS_VNODE_TRACING +# define XFS_VNTRACE_STRING "VN-trace, " +#else +# define XFS_VNTRACE_STRING +#endif + +#ifdef XFSDEBUG +# define XFS_DBG_STRING "debug" +#else +# define XFS_DBG_STRING "no debug" +#endif + +#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ + XFS_REALTIME_STRING \ + XFS_VNTRACE_STRING \ + XFS_DBG_STRING /* DBG must be last */ + +#define LINVFS_GET_VFS(s) \ + (vfs_t *)((s)->u.generic_sbp) +#define LINVFS_SET_VFS(s, vfsp) \ + ((s)->u.generic_sbp = vfsp) + +struct xfs_mount; +struct pb_target; +struct block_device; + +extern struct inode *xfs_get_inode(bhv_desc_t *, xfs_ino_t, int); +extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int); + +extern int xfs_blkdev_get(struct xfs_mount *, const char *, + struct block_device **); +extern void xfs_blkdev_put(struct block_device *); + +extern struct pb_target *xfs_alloc_buftarg(struct block_device *); +extern void xfs_relse_buftarg(struct pb_target *); +extern void xfs_free_buftarg(struct pb_target *); + +extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int); +extern unsigned int xfs_getsize_buftarg(struct pb_target *); +extern int linvfs_start_syncd(vfs_t *); +extern void linvfs_stop_syncd(vfs_t *); + +#endif /* __XFS_SUPER_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_syncd.c linux.22-ac2/fs/xfs/linux/xfs_syncd.c --- linux.vanilla/fs/xfs/linux/xfs_syncd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_syncd.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include + +static void sync_timeout(unsigned long __data) +{ + struct task_struct * p = (struct task_struct *) __data; + + wake_up_process(p); +} + +#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE) + +int syncd(void *arg) +{ + vfs_t *vfsp = (vfs_t *) arg; + int error; + struct timer_list timer; + + daemonize(); + reparent_to_init(); + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + sprintf(current->comm, "xfs_syncd"); + + vfsp->vfs_sync_task = current; + wmb(); + wake_up(&vfsp->vfs_wait_sync_task); + + init_timer(&timer); + timer.data = (unsigned long)current; + timer.function = sync_timeout; + + do { + mod_timer(&timer, jiffies + xfs_params.sync_interval); + interruptible_sleep_on(&vfsp->vfs_sync); + + if (!(vfsp->vfs_flag & VFS_RDONLY)) + VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error); + } while (!(vfsp->vfs_flag & VFS_UMOUNT)); + + del_timer_sync(&timer); + vfsp->vfs_sync_task = NULL; + wmb(); + wake_up(&vfsp->vfs_wait_sync_task); + return 0; +} + +int +linvfs_start_syncd(vfs_t *vfsp) +{ + int pid; + + pid = kernel_thread(syncd, (void *) vfsp, + CLONE_VM | CLONE_FS | CLONE_FILES); + if (pid < 0) + return pid; + wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task); + return 0; +} + +void +linvfs_stop_syncd(vfs_t *vfsp) +{ + vfsp->vfs_flag |= VFS_UMOUNT; + wmb(); + + wake_up(&vfsp->vfs_sync); + wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_sysctl.c linux.22-ac2/fs/xfs/linux/xfs_sysctl.c --- linux.vanilla/fs/xfs/linux/xfs_sysctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_sysctl.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_rw.h" +#include +#include + + +STATIC ulong xfs_min[XFS_PARAM] = { \ + 0, 0, 0, 0, 0, 0, 0, 0, HZ }; +STATIC ulong xfs_max[XFS_PARAM] = { \ + XFS_REFCACHE_SIZE_MAX, XFS_REFCACHE_SIZE_MAX, + 1, 1, 1, 1, 127, 11, HZ * 60 }; + +static struct ctl_table_header *xfs_table_header; + + +/* Custom proc handlers */ + +STATIC int +xfs_refcache_resize_proc_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret, *valp = ctl->data; + int xfs_refcache_new_size; + int xfs_refcache_old_size = *valp; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + xfs_refcache_new_size = *valp; + + if (!ret && write && xfs_refcache_new_size != xfs_refcache_old_size) { + xfs_refcache_resize(xfs_refcache_new_size); + /* Don't purge more than size of the cache */ + if (xfs_refcache_new_size < xfs_params.refcache_purge) + xfs_params.refcache_purge = xfs_refcache_new_size; + } + + return ret; +} + +STATIC int +xfs_stats_clear_proc_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret, *valp = ctl->data; + __uint32_t vn_active; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + + if (!ret && write && *valp) { + printk("XFS Clearing xfsstats\n"); + /* save vn_active, it's a universal truth! */ + vn_active = xfsstats.vn_active; + memset(&xfsstats, 0, sizeof(xfsstats)); + xfsstats.vn_active = vn_active; + xfs_params.stats_clear = 0; + } + + return ret; +} + +STATIC ctl_table xfs_table[] = { + {XFS_REFCACHE_SIZE, "refcache_size", &xfs_params.refcache_size, + sizeof(ulong), 0644, NULL, &xfs_refcache_resize_proc_handler, + &sysctl_intvec, NULL, &xfs_min[0], &xfs_max[0]}, + + {XFS_REFCACHE_PURGE, "refcache_purge", &xfs_params.refcache_purge, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[1], &xfs_params.refcache_size}, + + {XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear, + sizeof(ulong), 0644, NULL, &xfs_stats_clear_proc_handler, + &sysctl_intvec, NULL, &xfs_min[2], &xfs_max[2]}, + + {XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[3], &xfs_max[3]}, + + {XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[4], &xfs_max[4]}, + + {XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[5], &xfs_max[5]}, + + {XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[6], &xfs_max[6]}, + + {XFS_ERRLEVEL, "error_level", &xfs_params.error_level, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[7], &xfs_max[7]}, + + {XFS_SYNC_INTERVAL, "sync_interval", &xfs_params.sync_interval, + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &xfs_min[8], &xfs_max[8]}, + + {0} +}; + +STATIC ctl_table xfs_dir_table[] = { + {FS_XFS, "xfs", NULL, 0, 0555, xfs_table}, + {0} +}; + +STATIC ctl_table xfs_root_table[] = { + {CTL_FS, "fs", NULL, 0, 0555, xfs_dir_table}, + {0} +}; + +void +xfs_sysctl_register(void) +{ + xfs_table_header = register_sysctl_table(xfs_root_table, 1); +} + +void +xfs_sysctl_unregister(void) +{ + if (xfs_table_header) + unregister_sysctl_table(xfs_table_header); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_sysctl.h linux.22-ac2/fs/xfs/linux/xfs_sysctl.h --- linux.vanilla/fs/xfs/linux/xfs_sysctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_sysctl.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __XFS_SYSCTL_H__ +#define __XFS_SYSCTL_H__ + +#include + +/* + * Tunable xfs parameters + */ + +#define XFS_PARAM (sizeof(struct xfs_param) / sizeof(ulong)) + +typedef struct xfs_param { + ulong refcache_size; /* Size of NFS reference cache. */ + ulong refcache_purge; /* # of entries to purge each time. */ + ulong stats_clear; /* Reset all XFS statistics to zero. */ + ulong restrict_chown; /* Root/non-root can give away files. */ + ulong sgid_inherit; /* Inherit ISGID bit if process' GID is */ + /* not a member of the parent dir GID. */ + ulong symlink_mode; /* Symlink creat mode affected by umask. */ + ulong panic_mask; /* bitmask to specify panics on errors. */ + ulong error_level; /* Degree of reporting for internal probs*/ + ulong sync_interval; /* time between sync calls */ +} xfs_param_t; + +/* + * xfs_error_level: + * + * How much error reporting will be done when internal problems are + * encountered. These problems normally return an EFSCORRUPTED to their + * caller, with no other information reported. + * + * 0 No error reports + * 1 Report EFSCORRUPTED errors that will cause a filesystem shutdown + * 5 Report all EFSCORRUPTED errors (all of the above errors, plus any + * additional errors that are known to not cause shutdowns) + * + * xfs_panic_mask bit 0x8 turns the error reports into panics + */ + +enum { + XFS_REFCACHE_SIZE = 1, + XFS_REFCACHE_PURGE = 2, + XFS_STATS_CLEAR = 3, + XFS_RESTRICT_CHOWN = 4, + XFS_SGID_INHERIT = 5, + XFS_SYMLINK_MODE = 6, + XFS_PANIC_MASK = 7, + XFS_ERRLEVEL = 8, + XFS_SYNC_INTERVAL = 9, +}; + +extern xfs_param_t xfs_params; + +#ifdef CONFIG_SYSCTL +extern void xfs_sysctl_register(void); +extern void xfs_sysctl_unregister(void); +#else +# define xfs_sysctl_register() do { } while (0) +# define xfs_sysctl_unregister() do { } while (0) +#endif /* CONFIG_SYSCTL */ + +#endif /* __XFS_SYSCTL_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_version.h linux.22-ac2/fs/xfs/linux/xfs_version.h --- linux.vanilla/fs/xfs/linux/xfs_version.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_version.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Dummy file that can contain a timestamp to put into the + * XFS init string, to help users keep track of what they're + * running + */ + +#ifndef __XFS_VERSION_H__ +#define __XFS_VERSION_H__ + +#define XFS_VERSION_STRING "SGI XFS" + +#endif /* __XFS_VERSION_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_vfs.c linux.22-ac2/fs/xfs/linux/xfs_vfs.c --- linux.vanilla/fs/xfs/linux/xfs_vfs.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_vfs.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_macros.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_imap.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_quota.h" + +int +vfs_mount( + struct bhv_desc *bdp, + struct xfs_mount_args *args, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_mount) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_mount)(next, args, cr)); +} + +int +vfs_parseargs( + struct bhv_desc *bdp, + char *s, + struct xfs_mount_args *args, + int f) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_parseargs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_parseargs)(next, s, args, f)); +} + +int +vfs_showargs( + struct bhv_desc *bdp, + struct seq_file *m) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_showargs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_showargs)(next, m)); +} + +int +vfs_unmount( + struct bhv_desc *bdp, + int fl, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_unmount) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_unmount)(next, fl, cr)); +} + +int +vfs_mntupdate( + struct bhv_desc *bdp, + int *fl, + struct xfs_mount_args *args) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_mntupdate) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_mntupdate)(next, fl, args)); +} + + +int +vfs_root( + struct bhv_desc *bdp, + struct vnode **vpp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_root) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_root)(next, vpp)); +} + +int +vfs_statvfs( + struct bhv_desc *bdp, + struct statfs *sp, + struct vnode *vp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_statvfs) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_statvfs)(next, sp, vp)); +} + +int +vfs_sync( + struct bhv_desc *bdp, + int fl, + struct cred *cr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_sync) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_sync)(next, fl, cr)); +} + +int +vfs_vget( + struct bhv_desc *bdp, + struct vnode **vpp, + struct fid *fidp) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_vget) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_vget)(next, vpp, fidp)); +} + +int +vfs_dmapiops( + struct bhv_desc *bdp, + caddr_t addr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_dmapiops) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_dmapiops)(next, addr)); +} + +int +vfs_quotactl( + struct bhv_desc *bdp, + int cmd, + int id, + caddr_t addr) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_quotactl) + next = BHV_NEXT(next); + return ((*bhvtovfsops(next)->vfs_quotactl)(next, cmd, id, addr)); +} + +struct inode * +vfs_get_inode( + struct bhv_desc *bdp, + xfs_ino_t ino, + int fl) +{ + struct bhv_desc *next = bdp; + + while (! (bhvtovfsops(next))->vfs_get_inode) + next = BHV_NEXTNULL(next); + return ((*bhvtovfsops(next)->vfs_get_inode)(next, ino, fl)); +} + +void +vfs_init_vnode( + struct bhv_desc *bdp, + struct vnode *vp, + struct bhv_desc *bp, + int unlock) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_init_vnode) + next = BHV_NEXT(next); + ((*bhvtovfsops(next)->vfs_init_vnode)(next, vp, bp, unlock)); +} + +void +vfs_force_shutdown( + struct bhv_desc *bdp, + int fl, + char *file, + int line) +{ + struct bhv_desc *next = bdp; + + ASSERT(next); + while (! (bhvtovfsops(next))->vfs_force_shutdown) + next = BHV_NEXT(next); + ((*bhvtovfsops(next)->vfs_force_shutdown)(next, fl, file, line)); +} + +vfs_t * +vfs_allocate( void ) +{ + struct vfs *vfsp; + + vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP); + bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); + init_waitqueue_head(&vfsp->vfs_wait_sync_task); + init_waitqueue_head(&vfsp->vfs_sync); + return vfsp; +} + +void +vfs_deallocate( + struct vfs *vfsp) +{ + bhv_head_destroy(VFS_BHVHEAD(vfsp)); + kmem_free(vfsp, sizeof(vfs_t)); +} + +void +vfs_insertops( + struct vfs *vfsp, + struct bhv_vfsops *vfsops) +{ + struct bhv_desc *bdp; + + bdp = kmem_alloc(sizeof(struct bhv_desc), KM_SLEEP); + bhv_desc_init(bdp, NULL, vfsp, vfsops); + bhv_insert(&vfsp->vfs_bh, bdp); +} + +void +vfs_insertbhv( + struct vfs *vfsp, + struct bhv_desc *bdp, + struct vfsops *vfsops, + void *mount) +{ + bhv_desc_init(bdp, mount, vfsp, vfsops); + bhv_insert_initial(&vfsp->vfs_bh, bdp); +} + +void +bhv_remove_vfsops( + struct vfs *vfsp, + int pos) +{ + struct bhv_desc *bhv; + + bhv = bhv_lookup_range(&vfsp->vfs_bh, pos, pos); + if (!bhv) + return; + bhv_remove(&vfsp->vfs_bh, bhv); + kmem_free(bhv, sizeof(*bhv)); +} + +void +bhv_remove_all_vfsops( + struct vfs *vfsp, + int freebase) +{ + struct xfs_mount *mp; + + bhv_remove_vfsops(vfsp, VFS_POSITION_QM); + bhv_remove_vfsops(vfsp, VFS_POSITION_DM); + if (!freebase) + return; + mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); + VFS_REMOVEBHV(vfsp, &mp->m_bhv); + xfs_mount_free(mp, 0); +} + +void +bhv_insert_all_vfsops( + struct vfs *vfsp) +{ + struct xfs_mount *mp; + + mp = xfs_mount_init(); + vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp); + vfs_insertdmapi(vfsp); + vfs_insertquota(vfsp); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_vfs.h linux.22-ac2/fs/xfs/linux/xfs_vfs.h --- linux.vanilla/fs/xfs/linux/xfs_vfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_vfs.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_VFS_H__ +#define __XFS_VFS_H__ + +#include + +struct fid; +struct cred; +struct vnode; +struct statfs; +struct seq_file; +struct super_block; +struct xfs_mount_args; + +typedef struct vfs { + u_int vfs_flag; /* flags */ + fsid_t vfs_fsid; /* file system ID */ + fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ + bhv_head_t vfs_bh; /* head of vfs behavior chain */ + struct super_block *vfs_super; /* Linux superblock structure */ + struct task_struct *vfs_sync_task; + wait_queue_head_t vfs_sync; + wait_queue_head_t vfs_wait_sync_task; +} vfs_t; + +#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */ + +#define bhvtovfs(bdp) ( (struct vfs *)BHV_VOBJ(bdp) ) +#define bhvtovfsops(bdp) ( (struct vfsops *)BHV_OPS(bdp) ) +#define VFS_BHVHEAD(vfs) ( &(vfs)->vfs_bh ) +#define VFS_REMOVEBHV(vfs, bdp) ( bhv_remove(VFS_BHVHEAD(vfs), bdp) ) + +#define VFS_POSITION_BASE BHV_POSITION_BASE /* chain bottom */ +#define VFS_POSITION_TOP BHV_POSITION_TOP /* chain top */ +#define VFS_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */ + +typedef enum { + VFS_BHV_UNKNOWN, /* not specified */ + VFS_BHV_XFS, /* xfs */ + VFS_BHV_DM, /* data migration */ + VFS_BHV_QM, /* quota manager */ + VFS_BHV_IO, /* IO path */ + VFS_BHV_END /* housekeeping end-of-range */ +} vfs_bhv_t; + +#define VFS_POSITION_XFS (BHV_POSITION_BASE) +#define VFS_POSITION_DM (VFS_POSITION_BASE+10) +#define VFS_POSITION_QM (VFS_POSITION_BASE+20) +#define VFS_POSITION_IO (VFS_POSITION_BASE+30) + +#define VFS_RDONLY 0x0001 /* read-only vfs */ +#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ +#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ +#define VFS_UMOUNT 0x0008 /* unmount in progress */ +#define VFS_END 0x0008 /* max flag */ + +#define SYNC_ATTR 0x0001 /* sync attributes */ +#define SYNC_CLOSE 0x0002 /* close file system down */ +#define SYNC_DELWRI 0x0004 /* look at delayed writes */ +#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ +#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ +#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ +#define SYNC_REFCACHE 0x0020 /* prune some of the nfs ref cache */ + + +#define IGET_NOALLOC 0x0001 /* vfs_get_inode may return NULL */ + +typedef int (*vfs_mount_t)(bhv_desc_t *, + struct xfs_mount_args *, struct cred *); +typedef int (*vfs_parseargs_t)(bhv_desc_t *, char *, + struct xfs_mount_args *, int); +typedef int (*vfs_showargs_t)(bhv_desc_t *, struct seq_file *); +typedef int (*vfs_unmount_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vfs_mntupdate_t)(bhv_desc_t *, int *, + struct xfs_mount_args *); +typedef int (*vfs_root_t)(bhv_desc_t *, struct vnode **); +typedef int (*vfs_statvfs_t)(bhv_desc_t *, struct statfs *, struct vnode *); +typedef int (*vfs_sync_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *); +typedef int (*vfs_dmapiops_t)(bhv_desc_t *, caddr_t); +typedef int (*vfs_quotactl_t)(bhv_desc_t *, int, int, caddr_t); +typedef void (*vfs_init_vnode_t)(bhv_desc_t *, + struct vnode *, bhv_desc_t *, int); +typedef void (*vfs_force_shutdown_t)(bhv_desc_t *, int, char *, int); +typedef struct inode * (*vfs_get_inode_t)(bhv_desc_t *, xfs_ino_t, int); + +typedef struct vfsops { + bhv_position_t vf_position; /* behavior chain position */ + vfs_mount_t vfs_mount; /* mount file system */ + vfs_parseargs_t vfs_parseargs; /* parse mount options */ + vfs_showargs_t vfs_showargs; /* unparse mount options */ + vfs_unmount_t vfs_unmount; /* unmount file system */ + vfs_mntupdate_t vfs_mntupdate; /* update file system options */ + vfs_root_t vfs_root; /* get root vnode */ + vfs_statvfs_t vfs_statvfs; /* file system statistics */ + vfs_sync_t vfs_sync; /* flush files */ + vfs_vget_t vfs_vget; /* get vnode from fid */ + vfs_dmapiops_t vfs_dmapiops; /* data migration */ + vfs_quotactl_t vfs_quotactl; /* disk quota */ + vfs_get_inode_t vfs_get_inode; /* bhv specific iget */ + vfs_init_vnode_t vfs_init_vnode; /* initialize a new vnode */ + vfs_force_shutdown_t vfs_force_shutdown; /* crash and burn */ +} vfsops_t; + +/* + * VFS's. Operates on vfs structure pointers (starts at bhv head). + */ +#define VHEAD(v) ((v)->vfs_fbhv) +#define VFS_MOUNT(v, ma,cr, rv) ((rv) = vfs_mount(VHEAD(v), ma,cr)) +#define VFS_PARSEARGS(v, o,ma,f, rv) ((rv) = vfs_parseargs(VHEAD(v), o,ma,f)) +#define VFS_SHOWARGS(v, m, rv) ((rv) = vfs_showargs(VHEAD(v), m)) +#define VFS_UNMOUNT(v, f, cr, rv) ((rv) = vfs_unmount(VHEAD(v), f,cr)) +#define VFS_MNTUPDATE(v, fl, args, rv) ((rv) = vfs_mntupdate(VHEAD(v), fl, args)) +#define VFS_ROOT(v, vpp, rv) ((rv) = vfs_root(VHEAD(v), vpp)) +#define VFS_STATVFS(v, sp,vp, rv) ((rv) = vfs_statvfs(VHEAD(v), sp,vp)) +#define VFS_SYNC(v, flag,cr, rv) ((rv) = vfs_sync(VHEAD(v), flag,cr)) +#define VFS_VGET(v, vpp,fidp, rv) ((rv) = vfs_vget(VHEAD(v), vpp,fidp)) +#define VFS_DMAPIOPS(v, p, rv) ((rv) = vfs_dmapiops(VHEAD(v), p)) +#define VFS_QUOTACTL(v, c,id,p, rv) ((rv) = vfs_quotactl(VHEAD(v), c,id,p)) +#define VFS_GET_INODE(v, ino, fl) ( vfs_get_inode(VHEAD(v), ino,fl) ) +#define VFS_INIT_VNODE(v, vp,b,ul) ( vfs_init_vnode(VHEAD(v), vp,b,ul) ) +#define VFS_FORCE_SHUTDOWN(v, fl,f,l) ( vfs_force_shutdown(VHEAD(v), fl,f,l) ) + +/* + * PVFS's. Operates on behavior descriptor pointers. + */ +#define PVFS_MOUNT(b, ma,cr, rv) ((rv) = vfs_mount(b, ma,cr)) +#define PVFS_PARSEARGS(b, o,ma,f, rv) ((rv) = vfs_parseargs(b, o,ma,f)) +#define PVFS_SHOWARGS(b, m, rv) ((rv) = vfs_showargs(b, m)) +#define PVFS_UNMOUNT(b, f,cr, rv) ((rv) = vfs_unmount(b, f,cr)) +#define PVFS_MNTUPDATE(b, fl, args, rv) ((rv) = vfs_mntupdate(b, fl, args)) +#define PVFS_ROOT(b, vpp, rv) ((rv) = vfs_root(b, vpp)) +#define PVFS_STATVFS(b, sp,vp, rv) ((rv) = vfs_statvfs(b, sp,vp)) +#define PVFS_SYNC(b, flag,cr, rv) ((rv) = vfs_sync(b, flag,cr)) +#define PVFS_VGET(b, vpp,fidp, rv) ((rv) = vfs_vget(b, vpp,fidp)) +#define PVFS_DMAPIOPS(b, p, rv) ((rv) = vfs_dmapiops(b, p)) +#define PVFS_QUOTACTL(b, c,id,p, rv) ((rv) = vfs_quotactl(b, c,id,p)) +#define PVFS_GET_INODE(b, ino,fl) ( vfs_get_inode(b, ino,fl) ) +#define PVFS_INIT_VNODE(b, vp,b2,ul) ( vfs_init_vnode(b, vp,b2,ul) ) +#define PVFS_FORCE_SHUTDOWN(b, fl,f,l) ( vfs_force_shutdown(b, fl,f,l) ) + +extern int vfs_mount(bhv_desc_t *, struct xfs_mount_args *, struct cred *); +extern int vfs_parseargs(bhv_desc_t *, char *, struct xfs_mount_args *, int); +extern int vfs_showargs(bhv_desc_t *, struct seq_file *); +extern int vfs_unmount(bhv_desc_t *, int, struct cred *); +extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *); +extern int vfs_root(bhv_desc_t *, struct vnode **); +extern int vfs_statvfs(bhv_desc_t *, struct statfs *, struct vnode *); +extern int vfs_sync(bhv_desc_t *, int, struct cred *); +extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *); +extern int vfs_dmapiops(bhv_desc_t *, caddr_t); +extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t); +extern struct inode *vfs_get_inode(bhv_desc_t *, xfs_ino_t, int); +extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int); +extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int); + +typedef struct bhv_vfsops { + struct vfsops bhv_common; + void * bhv_custom; +} bhv_vfsops_t; + +#define vfs_bhv_lookup(v, id) ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) ) +#define vfs_bhv_custom(b) ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom ) +#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) +#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) + +extern vfs_t *vfs_allocate(void); +extern void vfs_deallocate(vfs_t *); +extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); +extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); + +extern void bhv_insert_all_vfsops(struct vfs *); +extern void bhv_remove_all_vfsops(struct vfs *, int); +extern void bhv_remove_vfsops(struct vfs *, int); + +#endif /* __XFS_VFS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_vnode.c linux.22-ac2/fs/xfs/linux/xfs_vnode.c --- linux.vanilla/fs/xfs/linux/xfs_vnode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_vnode.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + + +uint64_t vn_generation; /* vnode generation number */ +spinlock_t vnumber_lock = SPIN_LOCK_UNLOCKED; + +/* + * Dedicated vnode inactive/reclaim sync semaphores. + * Prime number of hash buckets since address is used as the key. + */ +#define NVSYNC 37 +#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) +sv_t vsync[NVSYNC]; + +/* + * Translate stat(2) file types to vnode types and vice versa. + * Aware of numeric order of S_IFMT and vnode type values. + */ +enum vtype iftovt_tab[] = { + VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, + VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON +}; + +u_short vttoif_tab[] = { + 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK +}; + + +void +vn_init(void) +{ + register sv_t *svp; + register int i; + + for (svp = vsync, i = 0; i < NVSYNC; i++, svp++) + init_sv(svp, SV_DEFAULT, "vsy", i); +} + +/* + * Clean a vnode of filesystem-specific data and prepare it for reuse. + */ +STATIC int +vn_reclaim( + struct vnode *vp) +{ + int error; + + XFS_STATS_INC(xfsstats.vn_reclaim); + vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address); + + /* + * Only make the VOP_RECLAIM call if there are behaviors + * to call. + */ + if (vp->v_fbhv) { + VOP_RECLAIM(vp, error); + if (error) + return -error; + } + ASSERT(vp->v_fbhv == NULL); + + VN_LOCK(vp); + vp->v_flag &= (VRECLM|VWAIT); + VN_UNLOCK(vp, 0); + + vp->v_type = VNON; + vp->v_fbhv = NULL; + +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_free(vp->v_trace); + vp->v_trace = NULL; +#endif + + return 0; +} + +STATIC void +vn_wakeup( + struct vnode *vp) +{ + VN_LOCK(vp); + if (vp->v_flag & VWAIT) + sv_broadcast(vptosync(vp)); + vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED); + VN_UNLOCK(vp, 0); +} + +int +vn_wait( + struct vnode *vp) +{ + VN_LOCK(vp); + if (vp->v_flag & (VINACT | VRECLM)) { + vp->v_flag |= VWAIT; + sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); + return 1; + } + VN_UNLOCK(vp, 0); + return 0; +} + +struct vnode * +vn_initialize( + struct inode *inode) +{ + struct vnode *vp = LINVFS_GET_VP(inode); + + XFS_STATS_INC(xfsstats.vn_active); + XFS_STATS_INC(xfsstats.vn_alloc); + + vp->v_flag = VMODIFIED; + spinlock_init(&vp->v_lock, "v_lock"); + + spin_lock(&vnumber_lock); + if (!++vn_generation) /* v_number shouldn't be zero */ + vn_generation++; + vp->v_number = vn_generation; + spin_unlock(&vnumber_lock); + + ASSERT(VN_CACHED(vp) == 0); + + /* Initialize the first behavior and the behavior chain head. */ + vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode"); + +#ifdef CONFIG_XFS_VNODE_TRACING + vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); +#endif /* CONFIG_XFS_VNODE_TRACING */ + + vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); + return vp; +} + +/* + * Get a reference on a vnode. + */ +vnode_t * +vn_get( + struct vnode *vp, + vmap_t *vmap) +{ + struct inode *inode; + + XFS_STATS_INC(xfsstats.vn_get); + inode = LINVFS_GET_IP(vp); + if (inode->i_state & I_FREEING) + return NULL; + + inode = VFS_GET_INODE(vmap->v_vfsp, vmap->v_ino, IGET_NOALLOC); + if (!inode) /* Inode not present */ + return NULL; + + /* We do not want to create new inodes via vn_get, + * returning NULL here is OK. + */ + if (inode->i_state & I_NEW) { + remove_inode_hash(inode); + make_bad_inode(inode); + unlock_new_inode(inode); + iput(inode); + return NULL; + } + + vn_trace_exit(vp, "vn_get", (inst_t *)__return_address); + + return vp; +} + +/* + * Revalidate the Linux inode from the vnode. + */ +int +vn_revalidate( + struct vnode *vp) +{ + struct inode *inode; + vattr_t va; + int error; + + vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); + ASSERT(vp->v_fbhv != NULL); + + va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (!error) { + inode = LINVFS_GET_IP(vp); + inode->i_mode = VTTOIF(va.va_type) | va.va_mode; + inode->i_nlink = va.va_nlink; + inode->i_uid = va.va_uid; + inode->i_gid = va.va_gid; + inode->i_size = va.va_size; + inode->i_blocks = va.va_nblocks; + inode->i_mtime = va.va_mtime.tv_sec; + inode->i_ctime = va.va_ctime.tv_sec; + inode->i_atime = va.va_atime.tv_sec; + VUNMODIFY(vp); + } + return -error; +} + +/* + * purge a vnode from the cache + * At this point the vnode is guaranteed to have no references (vn_count == 0) + * The caller has to make sure that there are no ways someone could + * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock). + */ +void +vn_purge( + struct vnode *vp, + vmap_t *vmap) +{ + vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address); + +again: + /* + * Check whether vp has already been reclaimed since our caller + * sampled its version while holding a filesystem cache lock that + * its VOP_RECLAIM function acquires. + */ + VN_LOCK(vp); + if (vp->v_number != vmap->v_number) { + VN_UNLOCK(vp, 0); + return; + } + + /* + * If vp is being reclaimed or inactivated, wait until it is inert, + * then proceed. Can't assume that vnode is actually reclaimed + * just because the reclaimed flag is asserted -- a vn_alloc + * reclaim can fail. + */ + if (vp->v_flag & (VINACT | VRECLM)) { + ASSERT(vn_count(vp) == 0); + vp->v_flag |= VWAIT; + sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); + goto again; + } + + /* + * Another process could have raced in and gotten this vnode... + */ + if (vn_count(vp) > 0) { + VN_UNLOCK(vp, 0); + return; + } + + XFS_STATS_DEC(xfsstats.vn_active); + vp->v_flag |= VRECLM; + VN_UNLOCK(vp, 0); + + /* + * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells + * vp's filesystem to flush and invalidate all cached resources. + * When vn_reclaim returns, vp should have no private data, + * either in a system cache or attached to v_data. + */ + if (vn_reclaim(vp) != 0) + panic("vn_purge: cannot reclaim"); + + /* + * Wakeup anyone waiting for vp to be reclaimed. + */ + vn_wakeup(vp); +} + +/* + * Add a reference to a referenced vnode. + */ +struct vnode * +vn_hold( + struct vnode *vp) +{ + struct inode *inode; + + XFS_STATS_INC(xfsstats.vn_hold); + + VN_LOCK(vp); + inode = igrab(LINVFS_GET_IP(vp)); + ASSERT(inode); + VN_UNLOCK(vp, 0); + + return vp; +} + +/* + * Call VOP_INACTIVE on last reference. + */ +void +vn_rele( + struct vnode *vp) +{ + int vcnt; + int cache; + + XFS_STATS_INC(xfsstats.vn_rele); + + VN_LOCK(vp); + + vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address); + vcnt = vn_count(vp); + + /* + * Since we always get called from put_inode we know + * that i_count won't be decremented after we + * return. + */ + if (!vcnt) { + /* + * As soon as we turn this on, noone can find us in vn_get + * until we turn off VINACT or VRECLM + */ + vp->v_flag |= VINACT; + VN_UNLOCK(vp, 0); + + /* + * Do not make the VOP_INACTIVE call if there + * are no behaviors attached to the vnode to call. + */ + if (vp->v_fbhv) + VOP_INACTIVE(vp, NULL, cache); + + VN_LOCK(vp); + if (vp->v_flag & VWAIT) + sv_broadcast(vptosync(vp)); + + vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED); + } + + VN_UNLOCK(vp, 0); + + vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address); +} + +/* + * Finish the removal of a vnode. + */ +void +vn_remove( + struct vnode *vp) +{ + vmap_t vmap; + + /* Make sure we don't do this to the same vnode twice */ + if (!(vp->v_fbhv)) + return; + + XFS_STATS_INC(xfsstats.vn_remove); + vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address); + + /* + * After the following purge the vnode + * will no longer exist. + */ + VMAP(vp, vmap); + vn_purge(vp, &vmap); +} + + +#ifdef CONFIG_XFS_VNODE_TRACING + +#define KTRACE_ENTER(vp, vk, s, line, ra) \ + ktrace_enter( (vp)->v_trace, \ +/* 0 */ (void *)(__psint_t)(vk), \ +/* 1 */ (void *)(s), \ +/* 2 */ (void *)(__psint_t) line, \ +/* 3 */ (void *)(vn_count(vp)), \ +/* 4 */ (void *)(ra), \ +/* 5 */ (void *)(__psunsigned_t)(vp)->v_flag, \ +/* 6 */ (void *)(__psint_t)smp_processor_id(), \ +/* 7 */ (void *)(__psint_t)(current->pid), \ +/* 8 */ (void *)__return_address, \ +/* 9 */ 0, 0, 0, 0, 0, 0, 0) + +/* + * Vnode tracing code. + */ +void +vn_trace_entry(vnode_t *vp, char *func, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra); +} + +void +vn_trace_exit(vnode_t *vp, char *func, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra); +} + +void +vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra); +} + +void +vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra); +} + +void +vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra); +} +#endif /* CONFIG_XFS_VNODE_TRACING */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/linux/xfs_vnode.h linux.22-ac2/fs/xfs/linux/xfs_vnode.h --- linux.vanilla/fs/xfs/linux/xfs_vnode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/linux/xfs_vnode.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_VNODE_H__ +#define __XFS_VNODE_H__ + +struct uio; +struct file; +struct vattr; +struct page_buf_bmap_s; +struct attrlist_cursor_kern; + +/* + * Vnode types (unrelated to on-disk inodes). VNON means no type. + */ +typedef enum vtype { + VNON = 0, + VREG = 1, + VDIR = 2, + VBLK = 3, + VCHR = 4, + VLNK = 5, + VFIFO = 6, + VBAD = 7, + VSOCK = 8 +} vtype_t; + +typedef xfs_ino_t vnumber_t; +typedef struct dentry vname_t; +typedef bhv_head_t vn_bhv_head_t; + +/* + * MP locking protocols: + * v_flag, v_vfsp VN_LOCK/VN_UNLOCK + * v_type read-only or fs-dependent + */ +typedef struct vnode { + __u32 v_flag; /* vnode flags (see below) */ + enum vtype v_type; /* vnode type */ + struct vfs *v_vfsp; /* ptr to containing VFS */ + vnumber_t v_number; /* in-core vnode number */ + vn_bhv_head_t v_bh; /* behavior head */ + spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */ + struct inode v_inode; /* Linux inode */ +#ifdef CONFIG_XFS_VNODE_TRACING + struct ktrace *v_trace; /* trace header structure */ +#endif +} vnode_t; + +#define v_fbhv v_bh.bh_first /* first behavior */ +#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */ + +#define VNODE_POSITION_BASE BHV_POSITION_BASE /* chain bottom */ +#define VNODE_POSITION_TOP BHV_POSITION_TOP /* chain top */ +#define VNODE_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */ + +typedef enum { + VN_BHV_UNKNOWN, /* not specified */ + VN_BHV_XFS, /* xfs */ + VN_BHV_DM, /* data migration */ + VN_BHV_QM, /* quota manager */ + VN_BHV_IO, /* IO path */ + VN_BHV_END /* housekeeping end-of-range */ +} vn_bhv_t; + +#define VNODE_POSITION_XFS (VNODE_POSITION_BASE) +#define VNODE_POSITION_DM (VNODE_POSITION_BASE+10) +#define VNODE_POSITION_QM (VNODE_POSITION_BASE+20) +#define VNODE_POSITION_IO (VNODE_POSITION_BASE+30) + +/* + * Macros for dealing with the behavior descriptor inside of the vnode. + */ +#define BHV_TO_VNODE(bdp) ((vnode_t *)BHV_VOBJ(bdp)) +#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp)) + +#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh))) +#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name) +#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp) +#define vn_bhv_lookup(bhp,ops) bhv_lookup(bhp,ops) +#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops) + +/* + * Vnode to Linux inode mapping. + */ +#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) +#define LINVFS_GET_IP(vp) (&(vp)->v_inode) + +/* + * Conversion between vnode types/modes and encoded type/mode as + * seen by stat(2) and mknod(2). + */ +extern enum vtype iftovt_tab[]; +extern ushort vttoif_tab[]; +#define IFTOVT(M) (iftovt_tab[((M) & S_IFMT) >> 12]) +#define VTTOIF(T) (vttoif_tab[(int)(T)]) +#define MAKEIMODE(T, M) (VTTOIF(T) | ((M) & ~S_IFMT)) + +/* + * Vnode flags. + */ +#define VINACT 0x1 /* vnode is being inactivated */ +#define VRECLM 0x2 /* vnode is being reclaimed */ +#define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */ +#define VMODIFIED 0x8 /* XFS inode state possibly differs */ + /* to the Linux inode state. */ + +typedef enum vrwlock { VRWLOCK_NONE, VRWLOCK_READ, + VRWLOCK_WRITE, VRWLOCK_WRITE_DIRECT, + VRWLOCK_TRY_READ, VRWLOCK_TRY_WRITE } vrwlock_t; + +/* + * Return values for VOP_INACTIVE. A return value of + * VN_INACTIVE_NOCACHE implies that the file system behavior + * has disassociated its state and bhv_desc_t from the vnode. + */ +#define VN_INACTIVE_CACHE 0 +#define VN_INACTIVE_NOCACHE 1 + +/* + * Values for the cmd code given to VOP_VNODE_CHANGE. + */ +typedef enum vchange { + VCHANGE_FLAGS_FRLOCKS = 0, + VCHANGE_FLAGS_ENF_LOCKING = 1, + VCHANGE_FLAGS_TRUNCATED = 2, + VCHANGE_FLAGS_PAGE_DIRTY = 3, + VCHANGE_FLAGS_IOEXCL_COUNT = 4 +} vchange_t; + + +typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); +typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *, char *, + size_t, loff_t *, int, struct cred *); +typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct file *, const char *, + size_t, loff_t *, int, struct cred *); +typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *, + unsigned int, unsigned long); +typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int, + struct cred *); +typedef int (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int, + struct cred *); +typedef int (*vop_access_t)(bhv_desc_t *, int, struct cred *); +typedef int (*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **, + int, vnode_t *, struct cred *); +typedef int (*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *, + vnode_t **, struct cred *); +typedef int (*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *); +typedef int (*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *, + struct cred *); +typedef int (*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *, + struct cred *); +typedef int (*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *, + vnode_t **, struct cred *); +typedef int (*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *); +typedef int (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *, + int *); +typedef int (*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *, + char *, vnode_t **, struct cred *); +typedef int (*vop_readlink_t)(bhv_desc_t *, struct uio *, struct cred *); +typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *, + xfs_off_t, xfs_off_t); +typedef int (*vop_inactive_t)(bhv_desc_t *, struct cred *); +typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *); +typedef int (*vop_release_t)(bhv_desc_t *); +typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t); +typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t); +typedef int (*vop_frlock_t)(bhv_desc_t *, int, struct file_lock *,int, + xfs_off_t, struct cred *); +typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, + struct page_buf_bmap_s *, int *); +typedef int (*vop_reclaim_t)(bhv_desc_t *); +typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int, + struct cred *); +typedef int (*vop_attr_set_t)(bhv_desc_t *, char *, char *, int, int, + struct cred *); +typedef int (*vop_attr_remove_t)(bhv_desc_t *, char *, int, struct cred *); +typedef int (*vop_attr_list_t)(bhv_desc_t *, char *, int, int, + struct attrlist_cursor_kern *, struct cred *); +typedef void (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int); +typedef void (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t); +typedef void (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +typedef void (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); +typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, + uint64_t, int); +typedef int (*vop_iflush_t)(bhv_desc_t *, int); + + +typedef struct vnodeops { + bhv_position_t vn_position; /* position within behavior chain */ + vop_open_t vop_open; + vop_read_t vop_read; + vop_write_t vop_write; + vop_ioctl_t vop_ioctl; + vop_getattr_t vop_getattr; + vop_setattr_t vop_setattr; + vop_access_t vop_access; + vop_lookup_t vop_lookup; + vop_create_t vop_create; + vop_remove_t vop_remove; + vop_link_t vop_link; + vop_rename_t vop_rename; + vop_mkdir_t vop_mkdir; + vop_rmdir_t vop_rmdir; + vop_readdir_t vop_readdir; + vop_symlink_t vop_symlink; + vop_readlink_t vop_readlink; + vop_fsync_t vop_fsync; + vop_inactive_t vop_inactive; + vop_fid2_t vop_fid2; + vop_rwlock_t vop_rwlock; + vop_rwunlock_t vop_rwunlock; + vop_frlock_t vop_frlock; + vop_bmap_t vop_bmap; + vop_reclaim_t vop_reclaim; + vop_attr_get_t vop_attr_get; + vop_attr_set_t vop_attr_set; + vop_attr_remove_t vop_attr_remove; + vop_attr_list_t vop_attr_list; + vop_link_removed_t vop_link_removed; + vop_vnode_change_t vop_vnode_change; + vop_ptossvp_t vop_tosspages; + vop_pflushinvalvp_t vop_flushinval_pages; + vop_pflushvp_t vop_flush_pages; + vop_release_t vop_release; + vop_iflush_t vop_iflush; +} vnodeops_t; + +/* + * VOP's. + */ +#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op) + +#define VOP_READ(vp,file,buf,size,offset,ioflags,cr,rv) \ + rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,buf,size,offset,ioflags,cr) +#define VOP_WRITE(vp,file,buf,size,offset,ioflags,cr,rv) \ + rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,buf,size,offset,ioflags,cr) +#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \ + rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n) +#define VOP_OPEN(vp, cr, rv) \ + rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr) +#define VOP_GETATTR(vp, vap, f, cr, rv) \ + rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr) +#define VOP_SETATTR(vp, vap, f, cr, rv) \ + rv = _VOP_(vop_setattr, vp)((vp)->v_fbhv, vap, f, cr) +#define VOP_ACCESS(vp, mode, cr, rv) \ + rv = _VOP_(vop_access, vp)((vp)->v_fbhv, mode, cr) +#define VOP_LOOKUP(vp,d,vpp,f,rdir,cr,rv) \ + rv = _VOP_(vop_lookup, vp)((vp)->v_fbhv,d,vpp,f,rdir,cr) +#define VOP_CREATE(dvp,d,vap,vpp,cr,rv) \ + rv = _VOP_(vop_create, dvp)((dvp)->v_fbhv,d,vap,vpp,cr) +#define VOP_REMOVE(dvp,d,cr,rv) \ + rv = _VOP_(vop_remove, dvp)((dvp)->v_fbhv,d,cr) +#define VOP_LINK(tdvp,fvp,d,cr,rv) \ + rv = _VOP_(vop_link, tdvp)((tdvp)->v_fbhv,fvp,d,cr) +#define VOP_RENAME(fvp,fnm,tdvp,tnm,cr,rv) \ + rv = _VOP_(vop_rename, fvp)((fvp)->v_fbhv,fnm,tdvp,tnm,cr) +#define VOP_MKDIR(dp,d,vap,vpp,cr,rv) \ + rv = _VOP_(vop_mkdir, dp)((dp)->v_fbhv,d,vap,vpp,cr) +#define VOP_RMDIR(dp,d,cr,rv) \ + rv = _VOP_(vop_rmdir, dp)((dp)->v_fbhv,d,cr) +#define VOP_READDIR(vp,uiop,cr,eofp,rv) \ + rv = _VOP_(vop_readdir, vp)((vp)->v_fbhv,uiop,cr,eofp) +#define VOP_SYMLINK(dvp,d,vap,tnm,vpp,cr,rv) \ + rv = _VOP_(vop_symlink, dvp) ((dvp)->v_fbhv,d,vap,tnm,vpp,cr) +#define VOP_READLINK(vp,uiop,cr,rv) \ + rv = _VOP_(vop_readlink, vp)((vp)->v_fbhv,uiop,cr) +#define VOP_FSYNC(vp,f,cr,b,e,rv) \ + rv = _VOP_(vop_fsync, vp)((vp)->v_fbhv,f,cr,b,e) +#define VOP_INACTIVE(vp, cr, rv) \ + rv = _VOP_(vop_inactive, vp)((vp)->v_fbhv, cr) +#define VOP_RELEASE(vp, rv) \ + rv = _VOP_(vop_release, vp)((vp)->v_fbhv) +#define VOP_FID2(vp, fidp, rv) \ + rv = _VOP_(vop_fid2, vp)((vp)->v_fbhv, fidp) +#define VOP_RWLOCK(vp,i) \ + (void)_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i) +#define VOP_RWLOCK_TRY(vp,i) \ + _VOP_(vop_rwlock, vp)((vp)->v_fbhv, i) +#define VOP_RWUNLOCK(vp,i) \ + (void)_VOP_(vop_rwunlock, vp)((vp)->v_fbhv, i) +#define VOP_FRLOCK(vp,c,fl,flags,offset,fr,rv) \ + rv = _VOP_(vop_frlock, vp)((vp)->v_fbhv,c,fl,flags,offset,fr) +#define VOP_RECLAIM(vp, rv) \ + rv = _VOP_(vop_reclaim, vp)((vp)->v_fbhv) +#define VOP_ATTR_GET(vp, name, val, vallenp, fl, cred, rv) \ + rv = _VOP_(vop_attr_get, vp)((vp)->v_fbhv,name,val,vallenp,fl,cred) +#define VOP_ATTR_SET(vp, name, val, vallen, fl, cred, rv) \ + rv = _VOP_(vop_attr_set, vp)((vp)->v_fbhv,name,val,vallen,fl,cred) +#define VOP_ATTR_REMOVE(vp, name, flags, cred, rv) \ + rv = _VOP_(vop_attr_remove, vp)((vp)->v_fbhv,name,flags,cred) +#define VOP_ATTR_LIST(vp, buf, buflen, fl, cursor, cred, rv) \ + rv = _VOP_(vop_attr_list, vp)((vp)->v_fbhv,buf,buflen,fl,cursor,cred) +#define VOP_LINK_REMOVED(vp, dvp, linkzero) \ + (void)_VOP_(vop_link_removed, vp)((vp)->v_fbhv, dvp, linkzero) +#define VOP_VNODE_CHANGE(vp, cmd, val) \ + (void)_VOP_(vop_vnode_change, vp)((vp)->v_fbhv,cmd,val) +/* + * These are page cache functions that now go thru VOPs. + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_TOSS_PAGES(vp, first, last, fiopt) \ + _VOP_(vop_tosspages, vp)((vp)->v_fbhv,first, last, fiopt) +/* + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_FLUSHINVAL_PAGES(vp, first, last, fiopt) \ + _VOP_(vop_flushinval_pages, vp)((vp)->v_fbhv,first,last,fiopt) +/* + * 'last' parameter is unused and left in for IRIX compatibility + */ +#define VOP_FLUSH_PAGES(vp, first, last, flags, fiopt, rv) \ + rv = _VOP_(vop_flush_pages, vp)((vp)->v_fbhv,first,last,flags,fiopt) +#define VOP_IOCTL(vp, inode, filp, cmd, arg, rv) \ + rv = _VOP_(vop_ioctl, vp)((vp)->v_fbhv,inode,filp,cmd,arg) +#define VOP_IFLUSH(vp, flags, rv) \ + rv = _VOP_(vop_iflush, vp)((vp)->v_fbhv, flags) + +/* + * Flags for read/write calls - same values as IRIX + */ + +#define IO_NFS 0x00100 +#define IO_ISLOCKED 0x00800 +#define IO_NFS3 0x02000 + +/* + * Flags for VOP_IFLUSH call + */ + +#define FLUSH_SYNC 1 /* wait for flush to complete */ +#define FLUSH_INODE 2 /* flush the inode itself */ +#define FLUSH_LOG 4 /* force the last log entry for + * this inode out to disk */ + +/* + * Flush/Invalidate options for VOP_TOSS_PAGES, VOP_FLUSHINVAL_PAGES and + * VOP_FLUSH_PAGES. + */ +#define FI_NONE 0 /* none */ +#define FI_REMAPF 1 /* Do a remapf prior to the operation */ +#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation. + Prevent VM access to the pages until + the operation completes. */ + +/* + * Vnode attributes. va_mask indicates those attributes the caller + * wants to set (setattr) or extract (getattr). + */ +typedef struct vattr { + int va_mask; /* bit-mask of attributes */ + vtype_t va_type; /* vnode type (for create) */ + mode_t va_mode; /* file access mode */ + uid_t va_uid; /* owner user id */ + gid_t va_gid; /* owner group id */ + xfs_dev_t va_fsid; /* file system id (dev for now) */ + xfs_ino_t va_nodeid; /* node id */ + nlink_t va_nlink; /* number of references to file */ + xfs_off_t va_size; /* file size in bytes */ + timespec_t va_atime; /* time of last access */ + timespec_t va_mtime; /* time of last modification */ + timespec_t va_ctime; /* time file ``created'' */ + xfs_dev_t va_rdev; /* device the file represents */ + u_long va_blksize; /* fundamental block size */ + __int64_t va_nblocks; /* # of blocks allocated */ + u_long va_vcode; /* version code */ + u_long va_xflags; /* random extended file flags */ + u_long va_extsize; /* file extent size */ + u_long va_nextents; /* number of extents in file */ + u_long va_anextents; /* number of attr extents in file */ + int va_projid; /* project id */ + u_int va_gencount; /* object generation count */ +} vattr_t; + +/* + * setattr or getattr attributes + */ +#define XFS_AT_TYPE 0x00000001 +#define XFS_AT_MODE 0x00000002 +#define XFS_AT_UID 0x00000004 +#define XFS_AT_GID 0x00000008 +#define XFS_AT_FSID 0x00000010 +#define XFS_AT_NODEID 0x00000020 +#define XFS_AT_NLINK 0x00000040 +#define XFS_AT_SIZE 0x00000080 +#define XFS_AT_ATIME 0x00000100 +#define XFS_AT_MTIME 0x00000200 +#define XFS_AT_CTIME 0x00000400 +#define XFS_AT_RDEV 0x00000800 +#define XFS_AT_BLKSIZE 0x00001000 +#define XFS_AT_NBLOCKS 0x00002000 +#define XFS_AT_VCODE 0x00004000 +#define XFS_AT_MAC 0x00008000 +#define XFS_AT_UPDATIME 0x00010000 +#define XFS_AT_UPDMTIME 0x00020000 +#define XFS_AT_UPDCTIME 0x00040000 +#define XFS_AT_ACL 0x00080000 +#define XFS_AT_CAP 0x00100000 +#define XFS_AT_INF 0x00200000 +#define XFS_AT_XFLAGS 0x00400000 +#define XFS_AT_EXTSIZE 0x00800000 +#define XFS_AT_NEXTENTS 0x01000000 +#define XFS_AT_ANEXTENTS 0x02000000 +#define XFS_AT_PROJID 0x04000000 +#define XFS_AT_SIZE_NOPERM 0x08000000 +#define XFS_AT_GENCOUNT 0x10000000 + +#define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ + XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ + XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ + XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\ + XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\ + XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT) + +#define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\ + XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\ + XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\ + XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID) + +#define XFS_AT_TIMES (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME) + +#define XFS_AT_UPDTIMES (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME) + +#define XFS_AT_NOSET (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\ + XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\ + XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT) + +#define VREAD 00400 +#define VWRITE 00200 +#define VEXEC 00100 +#define VSGID 02000 /* set group id on execution */ +#define MODEMASK 07777 /* mode bits plus permission bits */ + +/* + * Check whether mandatory file locking is enabled. + */ +#define MANDLOCK(vp, mode) \ + ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) + +extern void vn_init(void); +extern int vn_wait(struct vnode *); +extern vnode_t *vn_initialize(struct inode *); + +/* + * Acquiring and invalidating vnodes: + * + * if (vn_get(vp, version, 0)) + * ...; + * vn_purge(vp, version); + * + * vn_get and vn_purge must be called with vmap_t arguments, sampled + * while a lock that the vnode's VOP_RECLAIM function acquires is + * held, to ensure that the vnode sampled with the lock held isn't + * recycled (VOP_RECLAIMed) or deallocated between the release of the lock + * and the subsequent vn_get or vn_purge. + */ + +/* + * vnode_map structures _must_ match vn_epoch and vnode structure sizes. + */ +typedef struct vnode_map { + vfs_t *v_vfsp; + vnumber_t v_number; /* in-core vnode number */ + xfs_ino_t v_ino; /* inode # */ +} vmap_t; + +#define VMAP(vp, vmap) {(vmap).v_vfsp = (vp)->v_vfsp, \ + (vmap).v_number = (vp)->v_number, \ + (vmap).v_ino = (vp)->v_inode.i_ino; } + +extern void vn_purge(struct vnode *, vmap_t *); +extern vnode_t *vn_get(struct vnode *, vmap_t *); +extern int vn_revalidate(struct vnode *); +extern void vn_remove(struct vnode *); + +static inline int vn_count(struct vnode *vp) +{ + return atomic_read(&LINVFS_GET_IP(vp)->i_count); +} + +/* + * Vnode reference counting functions (and macros for compatibility). + */ +extern vnode_t *vn_hold(struct vnode *); +extern void vn_rele(struct vnode *); + +#if defined(CONFIG_XFS_VNODE_TRACING) + +#define VN_HOLD(vp) \ + ((void)vn_hold(vp), \ + vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) +#define VN_RELE(vp) \ + (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ + iput(LINVFS_GET_IP(vp))) + +#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#define VN_HOLD(vp) ((void)vn_hold(vp)) +#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) + +#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +/* + * Vname handling macros. + */ +#define VNAME(dentry) ((char *) (dentry)->d_name.name) +#define VNAMELEN(dentry) ((dentry)->d_name.len) +#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) + +/* + * Vnode spinlock manipulation. + */ +#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock) +#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s) +#define VN_FLAGSET(vp,b) vn_flagset(vp,b) +#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b) + +static __inline__ void vn_flagset(struct vnode *vp, uint flag) +{ + spin_lock(&vp->v_lock); + vp->v_flag |= flag; + spin_unlock(&vp->v_lock); +} + +static __inline__ void vn_flagclr(struct vnode *vp, uint flag) +{ + spin_lock(&vp->v_lock); + vp->v_flag &= ~flag; + spin_unlock(&vp->v_lock); +} + +/* + * Some useful predicates. + */ +#define VN_MAPPED(vp) ((LINVFS_GET_IP(vp)->i_mapping->i_mmap != NULL) || \ + (LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared != NULL)) +#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) (!list_empty(&(LINVFS_GET_IP(vp)->i_dirty_data_buffers))) +#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) +#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) + +/* + * Flags to VOP_SETATTR/VOP_GETATTR. + */ +#define ATTR_UTIME 0x01 /* non-default utime(2) request */ +#define ATTR_EXEC 0x02 /* invocation from exec(2) */ +#define ATTR_COMM 0x04 /* yield common vp attributes */ +#define ATTR_DMI 0x08 /* invocation from a DMI function */ +#define ATTR_LAZY 0x80 /* set/get attributes lazily */ +#define ATTR_NONBLOCK 0x100 /* return EAGAIN if operation would block */ +#define ATTR_NOLOCK 0x200 /* Don't grab any conflicting locks */ +#define ATTR_NOSIZETOK 0x400 /* Don't get the DVN_SIZE_READ token */ + +/* + * Flags to VOP_FSYNC and VOP_RECLAIM. + */ +#define FSYNC_NOWAIT 0 /* asynchronous flush */ +#define FSYNC_WAIT 0x1 /* synchronous fsync or forced reclaim */ +#define FSYNC_INVAL 0x2 /* flush and invalidate cached data */ +#define FSYNC_DATA 0x4 /* synchronous fsync of data only */ + +#if (defined(CONFIG_XFS_VNODE_TRACING)) + +#define VNODE_TRACE_SIZE 16 /* number of trace entries */ + +/* + * Tracing entries. + */ +#define VNODE_KTRACE_ENTRY 1 +#define VNODE_KTRACE_EXIT 2 +#define VNODE_KTRACE_HOLD 3 +#define VNODE_KTRACE_REF 4 +#define VNODE_KTRACE_RELE 5 + +extern void vn_trace_entry(struct vnode *, char *, inst_t *); +extern void vn_trace_exit(struct vnode *, char *, inst_t *); +extern void vn_trace_hold(struct vnode *, char *, int, inst_t *); +extern void vn_trace_ref(struct vnode *, char *, int, inst_t *); +extern void vn_trace_rele(struct vnode *, char *, int, inst_t *); +#define VN_TRACE(vp) \ + vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address) + +#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#define vn_trace_entry(a,b,c) +#define vn_trace_exit(a,b,c) +#define vn_trace_hold(a,b,c,d) +#define vn_trace_ref(a,b,c,d) +#define vn_trace_rele(a,b,c,d) +#define VN_TRACE(vp) + +#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */ + +#endif /* __XFS_VNODE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/Makefile linux.22-ac2/fs/xfs/Makefile --- linux.vanilla/fs/xfs/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/Makefile 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,146 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I. -funsigned-char + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG -DXFSDEBUG +endif +ifeq ($(CONFIG_PAGEBUF_DEBUG),y) + EXTRA_CFLAGS += -DPAGEBUF_TRACE +endif + +# fs/Makefile enters fs/xfs twice if CONFIG_XFS_FS is y, once for kernel and +# once for modules. This is necessary because xfsidbg can be built as a module +# even if xfs is in kernel. Alas the shorthand form +# O_TARGET := xfs.o +# obj-m := $(O_TARGET) +# fails when the makefile is run more than once, code gets compiled as both +# kernel and as module, which one gets linked depends on the phase of the moon. +# I just love these layer violations where a makefile behaves differently +# depending on changes to its parent. Work around by only setting obj-m when +# xfs is selected as a module. Keith Owens. + +O_TARGET := xfs.o +ifeq ($(CONFIG_XFS_FS),m) + obj-m := $(O_TARGET) +endif + +obj-$(CONFIG_XFS_RT) += xfs_rtalloc.o +obj-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o +obj-$(CONFIG_XFS_POSIX_CAP) += xfs_cap.o +obj-$(CONFIG_XFS_POSIX_MAC) += xfs_mac.o + +obj-y += xfs_alloc.o \ + xfs_alloc_btree.o \ + xfs_attr.o \ + xfs_attr_fetch.o \ + xfs_attr_leaf.o \ + xfs_bit.o \ + xfs_bmap.o \ + xfs_bmap_btree.o \ + xfs_btree.o \ + xfs_buf_item.o \ + xfs_da_btree.o \ + xfs_dir.o \ + xfs_dir2.o \ + xfs_dir2_block.o \ + xfs_dir2_data.o \ + xfs_dir2_leaf.o \ + xfs_dir2_node.o \ + xfs_dir2_sf.o \ + xfs_dir2_trace.o \ + xfs_dir_leaf.o \ + xfs_error.o \ + xfs_extfree_item.o \ + xfs_fsops.o \ + xfs_ialloc.o \ + xfs_ialloc_btree.o \ + xfs_iget.o \ + xfs_inode.o \ + xfs_inode_item.o \ + xfs_iocore.o \ + xfs_itable.o \ + xfs_dfrag.o \ + xfs_log.o \ + xfs_log_recover.o \ + xfs_macros.o \ + xfs_mount.o \ + xfs_rename.o \ + xfs_trans.o \ + xfs_trans_ail.o \ + xfs_trans_buf.o \ + xfs_trans_extfree.o \ + xfs_trans_inode.o \ + xfs_trans_item.o \ + xfs_utils.o \ + xfs_vfsops.o \ + xfs_vnodeops.o \ + xfs_rw.o + +# Objects not built in this directory +obj-y += pagebuf/pagebuf.o \ + linux/linux_xfs.o \ + support/support_xfs.o + +subdir-$(CONFIG_XFS_FS) += pagebuf linux support + +ifeq ($(CONFIG_XFS_QUOTA),y) + subdir-$(CONFIG_XFS_FS) += quota + obj-y += quota/xfs_quota.o +endif + +# Quota and DMAPI stubs +obj-y += xfs_dmops.o \ + xfs_qmops.o + +# If both xfs and kdb modules are built in then xfsidbg is built in. If xfs is +# a module and kdb modules are being compiled then xfsidbg must be a module, to +# follow xfs. If xfs is built in then xfsidbg tracks the kdb module state. +# This must come after the main xfs code so xfs initialises before xfsidbg. +# KAO +ifneq ($(CONFIG_KDB_MODULES),) + ifeq ($(CONFIG_XFS_FS),y) + obj-$(CONFIG_KDB_MODULES) += xfsidbg.o + else + obj-$(CONFIG_XFS_FS) += xfsidbg.o + endif +endif + +CFLAGS_xfsidbg.o += -I $(TOPDIR)/arch/$(ARCH)/kdb + +include $(TOPDIR)/Rules.make + +# This is really nasty, but Rules.make was never designed for multi directory +# modules. Keith Owens. + +xfs.o: $(patsubst %,_modsubdir_%,$(subdir-m)) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/Makefile linux.22-ac2/fs/xfs/pagebuf/Makefile --- linux.vanilla/fs/xfs/pagebuf/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/Makefile 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,50 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# +# Makefile for the linux pagebuf routines. +# + +ifeq ($(CONFIG_PAGEBUF_DEBUG),y) + EXTRA_CFLAGS += -g -DDEBUG -DSTATIC="" -DPAGEBUF_TRACE +endif +EXTRA_CFLAGS += -I.. + +O_TARGET := pagebuf.o + +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs += page_buf.o +obj-y += page_buf.o \ + page_buf_locking.o + +include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/page_buf.c linux.22-ac2/fs/xfs/pagebuf/page_buf.c --- linux.vanilla/fs/xfs/pagebuf/page_buf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/page_buf.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2421 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * page_buf.c + * + * The page_buf module provides an abstract buffer cache model on top of + * the Linux page cache. Cached metadata blocks for a file system are + * hashed to the inode for the block device. The page_buf module + * assembles buffer (page_buf_t) objects on demand to aggregate such + * cached pages for I/O. + * + * + * Written by Steve Lord, Jim Mostek, Russell Cattelan + * and Rajagopal Ananthanarayanan ("ananth") at SGI. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "page_buf_internal.h" + +#define NBBY 8 +#define BBSHIFT 9 +#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1) + +#ifndef GFP_READAHEAD +#define GFP_READAHEAD 0 +#endif + +/* + * A backport of the 2.5 scheduler is used by many vendors of 2.4-based + * distributions. + * We can only guess it's presences by the lack of the SCHED_YIELD flag. + * If the heuristic doesn't work, change this define by hand. + */ +#ifndef SCHED_YIELD +#define __HAVE_NEW_SCHEDULER 1 +#endif + +/* + * cpumask_t is used for supporting NR_CPUS > BITS_PER_LONG. + * If support for this is present, migrate_to_cpu exists and provides + * a wrapper around the set_cpus_allowed routine. + */ +#ifdef copy_cpumask +#define __HAVE_CPUMASK_T 1 +#endif + +#ifndef __HAVE_CPUMASK_T +# ifndef __HAVE_NEW_SCHEDULER +# define migrate_to_cpu(cpu) \ + do { current->cpus_allowed = 1UL << (cpu); } while (0) +# else +# define migrate_to_cpu(cpu) \ + set_cpus_allowed(current, 1UL << (cpu)) +# endif +#endif + +/* + * Debug code + */ + +#ifdef PAGEBUF_TRACE +static spinlock_t pb_trace_lock = SPIN_LOCK_UNLOCKED; +struct pagebuf_trace_buf pb_trace; +EXPORT_SYMBOL(pb_trace); +EXPORT_SYMBOL(pb_trace_func); +#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1)) + +void +pb_trace_func( + page_buf_t *pb, + int event, + void *misc, + void *ra) +{ + int j; + unsigned long flags; + + if (!pb_params.p_un.debug) return; + + if (ra == NULL) ra = (void *)__builtin_return_address(0); + + spin_lock_irqsave(&pb_trace_lock, flags); + j = pb_trace.start; + pb_trace.start = CIRC_INC(j); + spin_unlock_irqrestore(&pb_trace_lock, flags); + + pb_trace.buf[j].pb = (unsigned long) pb; + pb_trace.buf[j].event = event; + pb_trace.buf[j].flags = pb->pb_flags; + pb_trace.buf[j].hold = pb->pb_hold.counter; + pb_trace.buf[j].lock_value = pb->pb_sema.count.counter; + pb_trace.buf[j].task = (void *)current; + pb_trace.buf[j].misc = misc; + pb_trace.buf[j].ra = ra; + pb_trace.buf[j].offset = pb->pb_file_offset; + pb_trace.buf[j].size = pb->pb_buffer_length; +} +#endif /* PAGEBUF_TRACE */ + +/* + * File wide globals + */ + +STATIC kmem_cache_t *pagebuf_cache; + +#define MAX_IO_DAEMONS NR_CPUS +#define CPU_TO_DAEMON(cpu) (cpu) +STATIC int pb_logio_daemons[MAX_IO_DAEMONS]; +STATIC struct list_head pagebuf_logiodone_tq[MAX_IO_DAEMONS]; +STATIC wait_queue_head_t pagebuf_logiodone_wait[MAX_IO_DAEMONS]; +STATIC int pb_dataio_daemons[MAX_IO_DAEMONS]; +STATIC struct list_head pagebuf_dataiodone_tq[MAX_IO_DAEMONS]; +STATIC wait_queue_head_t pagebuf_dataiodone_wait[MAX_IO_DAEMONS]; + +/* + * For pre-allocated buffer head pool + */ + +#define NR_RESERVED_BH 64 +static wait_queue_head_t pb_resv_bh_wait; +static spinlock_t pb_resv_bh_lock = SPIN_LOCK_UNLOCKED; +struct buffer_head *pb_resv_bh = NULL; /* list of bh */ +int pb_resv_bh_cnt = 0; /* # of bh available */ + +STATIC void pagebuf_daemon_wakeup(int); +STATIC void _pagebuf_ioapply(page_buf_t *); +STATIC void pagebuf_delwri_queue(page_buf_t *, int); + +/* + * Pagebuf module configuration parameters, exported via + * /proc/sys/vm/pagebuf + */ + +unsigned long pagebuf_min[P_PARAM] = { HZ/2, 1*HZ, 0, 0 }; +unsigned long pagebuf_max[P_PARAM] = { HZ*30, HZ*300, 1, 1 }; + +pagebuf_param_t pb_params = {{ HZ, 15 * HZ, 0, 0 }}; + +/* + * Pagebuf statistics variables + */ + +struct pbstats pbstats; + +/* + * Pagebuf allocation / freeing. + */ + +#define pb_to_gfp(flags) \ + (((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \ + ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) + +#define pagebuf_allocate(flags) \ + kmem_cache_alloc(pagebuf_cache, pb_to_gfp(flags)) +#define pagebuf_deallocate(pb) \ + kmem_cache_free(pagebuf_cache, (pb)); + +/* + * Pagebuf hashing + */ + +#define NBITS 8 +#define NHASH (1<pb_hash_index] + +STATIC int +_bhash( + dev_t dev, + loff_t base) +{ + int bit, hval; + + base >>= 9; + /* + * dev_t is 16 bits, loff_t is always 64 bits + */ + base ^= dev; + for (bit = hval = 0; base != 0 && bit < sizeof(base) * 8; bit += NBITS) { + hval ^= (int)base & (NHASH-1); + base >>= NBITS; + } + return hval; +} + +/* + * Mapping of multi-page buffers into contingous virtual space + */ + +STATIC void *pagebuf_mapout_locked(page_buf_t *); + +STATIC spinlock_t as_lock = SPIN_LOCK_UNLOCKED; +typedef struct a_list { + void *vm_addr; + struct a_list *next; +} a_list_t; +STATIC a_list_t *as_free_head; +STATIC int as_list_len; + + +/* + * Try to batch vunmaps because they are costly. + */ +STATIC void +free_address( + void *addr) +{ + a_list_t *aentry; + + aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC); + if (aentry) { + spin_lock(&as_lock); + aentry->next = as_free_head; + aentry->vm_addr = addr; + as_free_head = aentry; + as_list_len++; + spin_unlock(&as_lock); + } else { + vunmap(addr); + } +} + +STATIC void +purge_addresses(void) +{ + a_list_t *aentry, *old; + + if (as_free_head == NULL) + return; + + spin_lock(&as_lock); + aentry = as_free_head; + as_free_head = NULL; + as_list_len = 0; + spin_unlock(&as_lock); + + while ((old = aentry) != NULL) { + vunmap(aentry->vm_addr); + aentry = aentry->next; + kfree(old); + } +} + +/* + * Locking model: + * + * Buffers associated with inodes for which buffer locking + * is not enabled are not protected by semaphores, and are + * assumed to be exclusively owned by the caller. There is + * spinlock in the buffer, for use by the caller when concurrent + * access is possible. + */ + +/* + * Internal pagebuf object manipulation + */ + +STATIC void +_pagebuf_initialize( + page_buf_t *pb, + pb_target_t *target, + loff_t range_base, + size_t range_length, + page_buf_flags_t flags) +{ + /* + * We don't want certain flags to appear in pb->pb_flags. + */ + flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); + + memset(pb, 0, sizeof(page_buf_t)); + atomic_set(&pb->pb_hold, 1); + init_MUTEX_LOCKED(&pb->pb_iodonesema); + INIT_LIST_HEAD(&pb->pb_list); + INIT_LIST_HEAD(&pb->pb_hash_list); + init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ + PB_SET_OWNER(pb); + pb->pb_target = target; + pb->pb_file_offset = range_base; + /* + * Set buffer_length and count_desired to the same value initially. + * IO routines should use count_desired, which will be the same in + * most cases but may be reset (e.g. XFS recovery). + */ + pb->pb_buffer_length = pb->pb_count_desired = range_length; + pb->pb_flags = flags | PBF_NONE; + pb->pb_bn = PAGE_BUF_DADDR_NULL; + atomic_set(&pb->pb_pin_count, 0); + init_waitqueue_head(&pb->pb_waiters); + + PB_STATS_INC(pbstats.pb_create); + PB_TRACE(pb, PB_TRACE_REC(get), target); +} + +/* + * Allocate a page array capable of holding a specified number + * of pages, and point the page buf at it. + */ +STATIC int +_pagebuf_get_pages( + page_buf_t *pb, + int page_count, + page_buf_flags_t flags) +{ + int gpf_mask = pb_to_gfp(flags); + + /* Make sure that we have a page list */ + if (pb->pb_pages == NULL) { + pb->pb_offset = page_buf_poff(pb->pb_file_offset); + pb->pb_page_count = page_count; + if (page_count <= PB_PAGES) { + pb->pb_pages = pb->pb_page_array; + } else { + pb->pb_pages = kmalloc(sizeof(struct page *) * + page_count, gpf_mask); + if (pb->pb_pages == NULL) + return -ENOMEM; + } + memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); + } + return 0; +} + +/* + * Walk a pagebuf releasing all the pages contained within it. + */ +STATIC inline void +_pagebuf_freepages( + page_buf_t *pb) +{ + int buf_index; + + for (buf_index = 0; buf_index < pb->pb_page_count; buf_index++) { + struct page *page = pb->pb_pages[buf_index]; + + if (page) { + pb->pb_pages[buf_index] = NULL; + page_cache_release(page); + } + } + + if (pb->pb_pages != pb->pb_page_array) + kfree(pb->pb_pages); +} + +/* + * _pagebuf_free_object + * + * _pagebuf_free_object releases the contents specified buffer. + * The modification state of any associated pages is left unchanged. + */ +void +_pagebuf_free_object( + pb_hash_t *hash, /* hash bucket for buffer */ + page_buf_t *pb) /* buffer to deallocate */ +{ + page_buf_flags_t pb_flags = pb->pb_flags; + + PB_TRACE(pb, PB_TRACE_REC(free_obj), 0); + pb->pb_flags |= PBF_FREED; + + if (hash) { + if (!list_empty(&pb->pb_hash_list)) { + hash->pb_count--; + list_del_init(&pb->pb_hash_list); + } + spin_unlock(&hash->pb_hash_lock); + } + + if (!(pb_flags & PBF_FREED)) { + /* release any virtual mapping */ ; + if (pb->pb_flags & _PBF_ADDR_ALLOCATED) { + void *vaddr = pagebuf_mapout_locked(pb); + if (vaddr) { + free_address(vaddr); + } + } + + if (pb->pb_flags & _PBF_MEM_ALLOCATED) { + if (pb->pb_pages) { + /* release the pages in the address list */ + if (pb->pb_pages[0] && + PageSlab(pb->pb_pages[0])) { + /* + * This came from the slab + * allocator free it as such + */ + kfree(pb->pb_addr); + } else { + _pagebuf_freepages(pb); + } + + pb->pb_pages = NULL; + } + pb->pb_flags &= ~_PBF_MEM_ALLOCATED; + } + } + + pagebuf_deallocate(pb); +} + +/* + * _pagebuf_lookup_pages + * + * _pagebuf_lookup_pages finds all pages which match the buffer + * in question and the range of file offsets supplied, + * and builds the page list for the buffer, if the + * page list is not already formed or if not all of the pages are + * already in the list. Invalid pages (pages which have not yet been + * read in from disk) are assigned for any pages which are not found. + */ +STATIC int +_pagebuf_lookup_pages( + page_buf_t *pb, + struct address_space *aspace, + page_buf_flags_t flags) +{ + loff_t next_buffer_offset; + unsigned long page_count, pi, index; + struct page *page; + int gfp_mask, retry_count = 5, rval = 0; + int all_mapped, good_pages; + size_t blocksize; + + /* For pagebufs where we want to map an address, do not use + * highmem pages - so that we do not need to use kmap resources + * to access the data. + * + * For pages where the caller has indicated there may be resource + * contention (e.g. called from a transaction) do not flush + * delalloc pages to obtain memory. + */ + + if (flags & PBF_READ_AHEAD) { + gfp_mask = GFP_READAHEAD; + retry_count = 0; + } else if (flags & PBF_DONT_BLOCK) { + gfp_mask = GFP_NOFS; + } else if (flags & PBF_MAPPABLE) { + gfp_mask = GFP_KERNEL; + } else { + gfp_mask = GFP_HIGHUSER; + } + + next_buffer_offset = pb->pb_file_offset + pb->pb_buffer_length; + + good_pages = page_count = (page_buf_btoc(next_buffer_offset) - + page_buf_btoct(pb->pb_file_offset)); + + if (pb->pb_flags & _PBF_ALL_PAGES_MAPPED) { + /* Bring pages forward in cache */ + for (pi = 0; pi < page_count; pi++) { + mark_page_accessed(pb->pb_pages[pi]); + } + if ((flags & PBF_MAPPED) && !(pb->pb_flags & PBF_MAPPED)) { + all_mapped = 1; + goto mapit; + } + return 0; + } + + /* Ensure pb_pages field has been initialised */ + rval = _pagebuf_get_pages(pb, page_count, flags); + if (rval) + return rval; + + rval = pi = 0; + blocksize = pb->pb_target->pbr_bsize; + + /* Enter the pages in the page list */ + index = (pb->pb_file_offset - pb->pb_offset) >> PAGE_CACHE_SHIFT; + for (all_mapped = 1; pi < page_count; pi++, index++) { + if (pb->pb_pages[pi] == 0) { + retry: + page = find_or_create_page(aspace, index, gfp_mask); + if (!page) { + if (--retry_count > 0) { + PB_STATS_INC(pbstats.pb_page_retries); + pagebuf_daemon_wakeup(1); + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(10); + goto retry; + } + rval = -ENOMEM; + all_mapped = 0; + continue; + } + PB_STATS_INC(pbstats.pb_page_found); + mark_page_accessed(page); + pb->pb_pages[pi] = page; + } else { + page = pb->pb_pages[pi]; + lock_page(page); + } + + /* If we need to do I/O on a page record the fact */ + if (!Page_Uptodate(page)) { + good_pages--; + if ((blocksize == PAGE_CACHE_SIZE) && + (flags & PBF_READ)) + pb->pb_locked = 1; + } + } + + if (!pb->pb_locked) { + for (pi = 0; pi < page_count; pi++) { + if (pb->pb_pages[pi]) + unlock_page(pb->pb_pages[pi]); + } + } + +mapit: + pb->pb_flags |= _PBF_MEM_ALLOCATED; + if (all_mapped) { + pb->pb_flags |= _PBF_ALL_PAGES_MAPPED; + + /* A single page buffer is always mappable */ + if (page_count == 1) { + pb->pb_addr = (caddr_t) + page_address(pb->pb_pages[0]) + pb->pb_offset; + pb->pb_flags |= PBF_MAPPED; + } else if (flags & PBF_MAPPED) { + if (as_list_len > 64) + purge_addresses(); + pb->pb_addr = vmap(pb->pb_pages, page_count, + VM_ALLOC, PAGE_KERNEL); + if (pb->pb_addr == NULL) + return -ENOMEM; + pb->pb_addr += pb->pb_offset; + pb->pb_flags |= PBF_MAPPED | _PBF_ADDR_ALLOCATED; + } + } + /* If some pages were found with data in them + * we are not in PBF_NONE state. + */ + if (good_pages != 0) { + pb->pb_flags &= ~(PBF_NONE); + if (good_pages != page_count) { + pb->pb_flags |= PBF_PARTIAL; + } + } + + PB_TRACE(pb, PB_TRACE_REC(look_pg), good_pages); + + return rval; +} + + +/* + * Pre-allocation of a pool of buffer heads for use in + * low-memory situations. + */ + +/* + * _pagebuf_prealloc_bh + * + * Pre-allocate a pool of "count" buffer heads at startup. + * Puts them on a list at "pb_resv_bh" + * Returns number of bh actually allocated to pool. + */ +STATIC int +_pagebuf_prealloc_bh( + int count) +{ + struct buffer_head *bh; + int i; + + for (i = 0; i < count; i++) { + bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL); + if (!bh) + break; + bh->b_pprev = &pb_resv_bh; + bh->b_next = pb_resv_bh; + pb_resv_bh = bh; + pb_resv_bh_cnt++; + } + return i; +} + +/* + * _pagebuf_get_prealloc_bh + * + * Get one buffer head from our pre-allocated pool. + * If pool is empty, sleep 'til one comes back in. + * Returns aforementioned buffer head. + */ +STATIC struct buffer_head * +_pagebuf_get_prealloc_bh(void) +{ + unsigned long flags; + struct buffer_head *bh; + DECLARE_WAITQUEUE (wait, current); + + spin_lock_irqsave(&pb_resv_bh_lock, flags); + + if (pb_resv_bh_cnt < 1) { + add_wait_queue(&pb_resv_bh_wait, &wait); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + pagebuf_run_queues(NULL); + schedule(); + spin_lock_irqsave(&pb_resv_bh_lock, flags); + } while (pb_resv_bh_cnt < 1); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&pb_resv_bh_wait, &wait); + } + + BUG_ON(pb_resv_bh_cnt < 1); + BUG_ON(!pb_resv_bh); + + bh = pb_resv_bh; + pb_resv_bh = bh->b_next; + pb_resv_bh_cnt--; + + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + return bh; +} + +/* + * _pagebuf_free_bh + * + * Take care of buffer heads that we're finished with. + * Call this instead of just kmem_cache_free(bh_cachep, bh) + * when you're done with a bh. + * + * If our pre-allocated pool is full, just free the buffer head. + * Otherwise, put it back in the pool, and wake up anybody + * waiting for one. + */ +STATIC inline void +_pagebuf_free_bh( + struct buffer_head *bh) +{ + unsigned long flags; + int free; + + if (! (free = pb_resv_bh_cnt >= NR_RESERVED_BH)) { + spin_lock_irqsave(&pb_resv_bh_lock, flags); + + if (! (free = pb_resv_bh_cnt >= NR_RESERVED_BH)) { + bh->b_pprev = &pb_resv_bh; + bh->b_next = pb_resv_bh; + pb_resv_bh = bh; + pb_resv_bh_cnt++; + + if (waitqueue_active(&pb_resv_bh_wait)) { + wake_up(&pb_resv_bh_wait); + } + } + + spin_unlock_irqrestore(&pb_resv_bh_lock, flags); + } + if (free) { + kmem_cache_free(bh_cachep, bh); + } +} + +/* + * Finding and Reading Buffers + */ + +/* + * _pagebuf_find + * + * Looks up, and creates if absent, a lockable buffer for + * a given range of an inode. The buffer is returned + * locked. If other overlapping buffers exist, they are + * released before the new buffer is created and locked, + * which may imply that this call will block until those buffers + * are unlocked. No I/O is implied by this call. + */ +STATIC page_buf_t * +_pagebuf_find( /* find buffer for block */ + pb_target_t *target,/* target for block */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags, /* PBF_TRYLOCK */ + page_buf_t *new_pb)/* newly allocated buffer */ +{ + loff_t range_base; + size_t range_length; + int hval; + pb_hash_t *h; + struct list_head *p; + page_buf_t *pb; + int not_locked; + + range_base = (ioff << BBSHIFT); + range_length = (isize << BBSHIFT); + + /* Ensure we never do IOs smaller than the sector size */ + BUG_ON(range_length < (1 << target->pbr_sshift)); + + /* Ensure we never do IOs that are not sector aligned */ + BUG_ON(range_base & (loff_t)target->pbr_smask); + + hval = _bhash(target->pbr_bdev->bd_dev, range_base); + h = &pbhash[hval]; + + spin_lock(&h->pb_hash_lock); + list_for_each(p, &h->pb_hash) { + pb = list_entry(p, page_buf_t, pb_hash_list); + + if ((target == pb->pb_target) && + (pb->pb_file_offset == range_base) && + (pb->pb_buffer_length == range_length)) { + if (pb->pb_flags & PBF_FREED) + break; + /* If we look at something bring it to the + * front of the list for next time + */ + list_del(&pb->pb_hash_list); + list_add(&pb->pb_hash_list, &h->pb_hash); + goto found; + } + } + + /* No match found */ + if (new_pb) { + _pagebuf_initialize(new_pb, target, range_base, + range_length, flags | _PBF_LOCKABLE); + new_pb->pb_hash_index = hval; + h->pb_count++; + list_add(&new_pb->pb_hash_list, &h->pb_hash); + } else { + PB_STATS_INC(pbstats.pb_miss_locked); + } + + spin_unlock(&h->pb_hash_lock); + return (new_pb); + +found: + atomic_inc(&pb->pb_hold); + spin_unlock(&h->pb_hash_lock); + + /* Attempt to get the semaphore without sleeping, + * if this does not work then we need to drop the + * spinlock and do a hard attempt on the semaphore. + */ + not_locked = down_trylock(&pb->pb_sema); + if (not_locked) { + if (!(flags & PBF_TRYLOCK)) { + /* wait for buffer ownership */ + PB_TRACE(pb, PB_TRACE_REC(get_lk), 0); + pagebuf_lock(pb); + PB_STATS_INC(pbstats.pb_get_locked_waited); + } else { + /* We asked for a trylock and failed, no need + * to look at file offset and length here, we + * know that this pagebuf at least overlaps our + * pagebuf and is locked, therefore our buffer + * either does not exist, or is this buffer + */ + + pagebuf_rele(pb); + PB_STATS_INC(pbstats.pb_busy_locked); + return (NULL); + } + } else { + /* trylock worked */ + PB_SET_OWNER(pb); + } + + if (pb->pb_flags & PBF_STALE) + pb->pb_flags &= PBF_MAPPABLE | \ + PBF_MAPPED | \ + _PBF_LOCKABLE | \ + _PBF_ALL_PAGES_MAPPED | \ + _PBF_ADDR_ALLOCATED | \ + _PBF_MEM_ALLOCATED; + PB_TRACE(pb, PB_TRACE_REC(got_lk), 0); + PB_STATS_INC(pbstats.pb_get_locked); + return (pb); +} + + +/* + * pagebuf_find + * + * pagebuf_find returns a buffer matching the specified range of + * data for the specified target, if any of the relevant blocks + * are in memory. The buffer may have unallocated holes, if + * some, but not all, of the blocks are in memory. Even where + * pages are present in the buffer, not all of every page may be + * valid. + */ +page_buf_t * +pagebuf_find( /* find buffer for block */ + /* if the block is in memory */ + pb_target_t *target,/* target for block */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags) /* PBF_TRYLOCK */ +{ + return _pagebuf_find(target, ioff, isize, flags, NULL); +} + +/* + * pagebuf_get + * + * pagebuf_get assembles a buffer covering the specified range. + * Some or all of the blocks in the range may be valid. Storage + * in memory for all portions of the buffer will be allocated, + * although backing storage may not be. If PBF_READ is set in + * flags, pagebuf_iostart is called also. + */ +page_buf_t * +pagebuf_get( /* allocate a buffer */ + pb_target_t *target,/* target for buffer */ + loff_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + page_buf_flags_t flags) /* PBF_TRYLOCK */ +{ + page_buf_t *pb, *new_pb; + int error; + + new_pb = pagebuf_allocate(flags); + if (unlikely(!new_pb)) + return (NULL); + + pb = _pagebuf_find(target, ioff, isize, flags, new_pb); + if (pb != new_pb) { + pagebuf_deallocate(new_pb); + if (unlikely(!pb)) + return (NULL); + } + + PB_STATS_INC(pbstats.pb_get); + + /* fill in any missing pages */ + error = _pagebuf_lookup_pages(pb, pb->pb_target->pbr_mapping, flags); + if (unlikely(error)) { + pagebuf_free(pb); + return (NULL); + } + + /* + * Always fill in the block number now, the mapped cases can do + * their own overlay of this later. + */ + pb->pb_bn = ioff; + pb->pb_count_desired = pb->pb_buffer_length; + + if (flags & PBF_READ) { + if (PBF_NOT_DONE(pb)) { + PB_TRACE(pb, PB_TRACE_REC(get_read), flags); + PB_STATS_INC(pbstats.pb_get_read); + pagebuf_iostart(pb, flags); + } else if (flags & PBF_ASYNC) { + /* + * Read ahead call which is already satisfied, + * drop the buffer + */ + if (flags & (PBF_LOCK | PBF_TRYLOCK)) + pagebuf_unlock(pb); + pagebuf_rele(pb); + return NULL; + } else { + /* We do not want read in the flags */ + pb->pb_flags &= ~PBF_READ; + } + } + + PB_TRACE(pb, PB_TRACE_REC(get_obj), flags); + return (pb); +} + +/* + * Create a skeletal pagebuf (no pages associated with it). + */ +page_buf_t * +pagebuf_lookup( + struct pb_target *target, + loff_t ioff, + size_t isize, + page_buf_flags_t flags) +{ + page_buf_t *pb; + + flags |= _PBF_PRIVATE_BH; + pb = pagebuf_allocate(flags); + if (pb) { + _pagebuf_initialize(pb, target, ioff, isize, flags); + } + return pb; +} + +/* + * If we are not low on memory then do the readahead in a deadlock + * safe manner. + */ +void +pagebuf_readahead( + pb_target_t *target, + loff_t ioff, + size_t isize, + page_buf_flags_t flags) +{ + flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_MAPPABLE|PBF_READ_AHEAD); + pagebuf_get(target, ioff, isize, flags); +} + +page_buf_t * +pagebuf_get_empty( + pb_target_t *target) +{ + page_buf_t *pb; + + pb = pagebuf_allocate(_PBF_LOCKABLE); + if (pb) + _pagebuf_initialize(pb, target, 0, 0, _PBF_LOCKABLE); + return pb; +} + +static inline struct page * +mem_to_page( + void *addr) +{ + if (((unsigned long)addr < VMALLOC_START) || + ((unsigned long)addr >= VMALLOC_END)) { + return virt_to_page(addr); + } else { + return vmalloc_to_page(addr); + } +} + +int +pagebuf_associate_memory( + page_buf_t *pb, + void *mem, + size_t len) +{ + int rval; + int i = 0; + size_t ptr; + size_t end, end_cur; + off_t offset; + int page_count; + + page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; + offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK); + if (offset && (len > PAGE_CACHE_SIZE)) + page_count++; + + /* Free any previous set of page pointers */ + if (pb->pb_pages && (pb->pb_pages != pb->pb_page_array)) { + kfree(pb->pb_pages); + } + pb->pb_pages = NULL; + pb->pb_addr = mem; + + rval = _pagebuf_get_pages(pb, page_count, 0); + if (rval) + return rval; + + pb->pb_offset = offset; + ptr = (size_t) mem & PAGE_CACHE_MASK; + end = PAGE_CACHE_ALIGN((size_t) mem + len); + end_cur = end; + /* set up first page */ + pb->pb_pages[0] = mem_to_page(mem); + + ptr += PAGE_CACHE_SIZE; + pb->pb_page_count = ++i; + while (ptr < end) { + pb->pb_pages[i] = mem_to_page((void *)ptr); + pb->pb_page_count = ++i; + ptr += PAGE_CACHE_SIZE; + } + pb->pb_locked = 0; + + pb->pb_count_desired = pb->pb_buffer_length = len; + pb->pb_flags |= PBF_MAPPED | _PBF_PRIVATE_BH; + + return 0; +} + +page_buf_t * +pagebuf_get_no_daddr( + size_t len, + pb_target_t *target) +{ + int rval; + void *rmem = NULL; + page_buf_flags_t flags = _PBF_LOCKABLE | PBF_FORCEIO; + page_buf_t *pb; + size_t tlen = 0; + + if (len > 0x20000) + return(NULL); + + pb = pagebuf_allocate(flags); + if (!pb) + return NULL; + + _pagebuf_initialize(pb, target, 0, len, flags); + + do { + if (tlen == 0) { + tlen = len; /* first time */ + } else { + kfree(rmem); /* free the mem from the previous try */ + tlen <<= 1; /* double the size and try again */ + } + if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) { + pagebuf_free(pb); + return NULL; + } + } while ((size_t)rmem != ((size_t)rmem & ~target->pbr_smask)); + + if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) { + kfree(rmem); + pagebuf_free(pb); + return NULL; + } + /* otherwise pagebuf_free just ignores it */ + pb->pb_flags |= _PBF_MEM_ALLOCATED; + PB_CLEAR_OWNER(pb); + up(&pb->pb_sema); /* Return unlocked pagebuf */ + + PB_TRACE(pb, PB_TRACE_REC(no_daddr), rmem); + + return pb; +} + + +/* + * pagebuf_hold + * + * Increment reference count on buffer, to hold the buffer concurrently + * with another thread which may release (free) the buffer asynchronously. + * + * Must hold the buffer already to call this function. + */ +void +pagebuf_hold( + page_buf_t *pb) +{ + atomic_inc(&pb->pb_hold); + PB_TRACE(pb, PB_TRACE_REC(hold), 0); +} + +/* + * pagebuf_free + * + * pagebuf_free releases the specified buffer. The modification + * state of any associated pages is left unchanged. + */ +void +pagebuf_free( + page_buf_t *pb) +{ + if (pb->pb_flags & _PBF_LOCKABLE) { + pb_hash_t *h = pb_hash(pb); + + spin_lock(&h->pb_hash_lock); + _pagebuf_free_object(h, pb); + } else { + _pagebuf_free_object(NULL, pb); + } +} + +/* + * pagebuf_rele + * + * pagebuf_rele releases a hold on the specified buffer. If the + * the hold count is 1, pagebuf_rele calls pagebuf_free. + */ +void +pagebuf_rele( + page_buf_t *pb) +{ + pb_hash_t *h; + + PB_TRACE(pb, PB_TRACE_REC(rele), pb->pb_relse); + if (pb->pb_flags & _PBF_LOCKABLE) { + h = pb_hash(pb); + spin_lock(&h->pb_hash_lock); + } else { + h = NULL; + } + + if (atomic_dec_and_test(&pb->pb_hold)) { + int do_free = 1; + + if (pb->pb_relse) { + atomic_inc(&pb->pb_hold); + if (h) + spin_unlock(&h->pb_hash_lock); + (*(pb->pb_relse)) (pb); + do_free = 0; + } + if (pb->pb_flags & PBF_DELWRI) { + pb->pb_flags |= PBF_ASYNC; + atomic_inc(&pb->pb_hold); + if (h && do_free) + spin_unlock(&h->pb_hash_lock); + pagebuf_delwri_queue(pb, 0); + do_free = 0; + } else if (pb->pb_flags & PBF_FS_MANAGED) { + if (h) + spin_unlock(&h->pb_hash_lock); + do_free = 0; + } + + if (do_free) { + _pagebuf_free_object(h, pb); + } + } else if (h) { + spin_unlock(&h->pb_hash_lock); + } +} + + +/* + * Pinning Buffer Storage in Memory + */ + +/* + * pagebuf_pin + * + * pagebuf_pin locks all of the memory represented by a buffer in + * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for + * the same or different buffers affecting a given page, will + * properly count the number of outstanding "pin" requests. The + * buffer may be released after the pagebuf_pin and a different + * buffer used when calling pagebuf_unpin, if desired. + * pagebuf_pin should be used by the file system when it wants be + * assured that no attempt will be made to force the affected + * memory to disk. It does not assure that a given logical page + * will not be moved to a different physical page. + */ +void +pagebuf_pin( + page_buf_t *pb) +{ + atomic_inc(&pb->pb_pin_count); + PB_TRACE(pb, PB_TRACE_REC(pin), pb->pb_pin_count.counter); +} + +/* + * pagebuf_unpin + * + * pagebuf_unpin reverses the locking of memory performed by + * pagebuf_pin. Note that both functions affected the logical + * pages associated with the buffer, not the buffer itself. + */ +void +pagebuf_unpin( + page_buf_t *pb) +{ + if (atomic_dec_and_test(&pb->pb_pin_count)) { + wake_up_all(&pb->pb_waiters); + } + PB_TRACE(pb, PB_TRACE_REC(unpin), pb->pb_pin_count.counter); +} + +int +pagebuf_ispin( + page_buf_t *pb) +{ + return atomic_read(&pb->pb_pin_count); +} + +/* + * pagebuf_wait_unpin + * + * pagebuf_wait_unpin waits until all of the memory associated + * with the buffer is not longer locked in memory. It returns + * immediately if none of the affected pages are locked. + */ +static inline void +_pagebuf_wait_unpin( + page_buf_t *pb) +{ + DECLARE_WAITQUEUE (wait, current); + + if (atomic_read(&pb->pb_pin_count) == 0) + return; + + add_wait_queue(&pb->pb_waiters, &wait); + for (;;) { + current->state = TASK_UNINTERRUPTIBLE; + if (atomic_read(&pb->pb_pin_count) == 0) { + break; + } + pagebuf_run_queues(pb); + schedule(); + } + remove_wait_queue(&pb->pb_waiters, &wait); + current->state = TASK_RUNNING; +} + + +/* + * Buffer Utility Routines + */ + +/* + * pagebuf_iodone + * + * pagebuf_iodone marks a buffer for which I/O is in progress + * done with respect to that I/O. The pb_iodone routine, if + * present, will be called as a side-effect. + */ +void +pagebuf_iodone_sched( + void *v) +{ + page_buf_t *pb = (page_buf_t *)v; + + if (pb->pb_iodone) { + (*(pb->pb_iodone)) (pb); + return; + } + + if (pb->pb_flags & PBF_ASYNC) { + if ((pb->pb_flags & _PBF_LOCKABLE) && !pb->pb_relse) + pagebuf_unlock(pb); + pagebuf_rele(pb); + } +} + +void +pagebuf_iodone( + page_buf_t *pb, + int dataio, + int schedule) +{ + pb->pb_flags &= ~(PBF_READ | PBF_WRITE); + if (pb->pb_error == 0) { + pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE); + } + + PB_TRACE(pb, PB_TRACE_REC(done), pb->pb_iodone); + + if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { + if (schedule) { + int daemon = CPU_TO_DAEMON(smp_processor_id()); + + INIT_TQUEUE(&pb->pb_iodone_sched, + pagebuf_iodone_sched, (void *)pb); + queue_task(&pb->pb_iodone_sched, dataio ? + &pagebuf_dataiodone_tq[daemon] : + &pagebuf_logiodone_tq[daemon]); + wake_up(dataio ? + &pagebuf_dataiodone_wait[daemon] : + &pagebuf_logiodone_wait[daemon]); + } else { + pagebuf_iodone_sched(pb); + } + } else { + up(&pb->pb_iodonesema); + } +} + +/* + * pagebuf_ioerror + * + * pagebuf_ioerror sets the error code for a buffer. + */ +void +pagebuf_ioerror( /* mark/clear buffer error flag */ + page_buf_t *pb, /* buffer to mark */ + unsigned int error) /* error to store (0 if none) */ +{ + pb->pb_error = error; + PB_TRACE(pb, PB_TRACE_REC(ioerror), error); +} + +/* + * pagebuf_iostart + * + * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied. + * If necessary, it will arrange for any disk space allocation required, + * and it will break up the request if the block mappings require it. + * The pb_iodone routine in the buffer supplied will only be called + * when all of the subsidiary I/O requests, if any, have been completed. + * pagebuf_iostart calls the pagebuf_ioinitiate routine or + * pagebuf_iorequest, if the former routine is not defined, to start + * the I/O on a given low-level request. + */ +int +pagebuf_iostart( /* start I/O on a buffer */ + page_buf_t *pb, /* buffer to start */ + page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ + /* PBF_WRITE, PBF_DELWRI, */ + /* PBF_SYNC, PBF_DONT_BLOCK */ +{ + int status = 0; + + PB_TRACE(pb, PB_TRACE_REC(iostart), flags); + + if (flags & PBF_DELWRI) { + pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); + pb->pb_flags |= flags & + (PBF_DELWRI | PBF_ASYNC | PBF_SYNC); + pagebuf_delwri_queue(pb, 1); + return status; + } + + pb->pb_flags &= + ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD); + pb->pb_flags |= flags & + (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD); + + BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL); + + /* For writes call internal function which checks for + * filesystem specific callout function and execute it. + */ + if (flags & PBF_WRITE) { + status = __pagebuf_iorequest(pb); + } else { + status = pagebuf_iorequest(pb); + } + + /* Wait for I/O if we are not an async request */ + if ((status == 0) && (flags & PBF_ASYNC) == 0) { + status = pagebuf_iowait(pb); + } + + return status; +} + + +/* + * Helper routines for pagebuf_iorequest (pagebuf I/O completion) + */ + +STATIC __inline__ int +_pagebuf_iolocked( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); + if (pb->pb_flags & PBF_READ) + return pb->pb_locked; + return ((pb->pb_flags & _PBF_LOCKABLE) == 0); +} + +STATIC void +_pagebuf_iodone( + page_buf_t *pb, + int fullpage, + int schedule) +{ + if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { + struct page *page; + int i; + + for (i = 0; i < pb->pb_page_count; i++) { + page = pb->pb_pages[i]; + if (fullpage && !PageError(page)) + SetPageUptodate(page); + if (_pagebuf_iolocked(pb)) + unlock_page(page); + } + pb->pb_locked = 0; + pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule); + } +} + +STATIC void +_end_io_pagebuf( + struct buffer_head *bh, + int uptodate, + int fullpage) +{ + struct page *page = bh->b_page; + page_buf_t *pb = (page_buf_t *)bh->b_private; + + mark_buffer_uptodate(bh, uptodate); + put_bh(bh); + + if (!uptodate) { + SetPageError(page); + pb->pb_error = EIO; + } + + if (fullpage) { + unlock_buffer(bh); + _pagebuf_free_bh(bh); + } else { + static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; + struct buffer_head *bp; + unsigned long flags; + + spin_lock_irqsave(&page_uptodate_lock, flags); + clear_bit(BH_Async, &bh->b_state); + unlock_buffer(bh); + for (bp = bh->b_this_page; bp != bh; bp = bp->b_this_page) { + if (buffer_locked(bp)) { + if (buffer_async(bp)) + break; + } else if (!buffer_uptodate(bp)) + break; + } + spin_unlock_irqrestore(&page_uptodate_lock, flags); + if (bp == bh && !PageError(page)) + SetPageUptodate(page); + } + + _pagebuf_iodone(pb, fullpage, 1); +} + +STATIC void +_pagebuf_end_io_complete_pages( + struct buffer_head *bh, + int uptodate) +{ + _end_io_pagebuf(bh, uptodate, 1); +} + +STATIC void +_pagebuf_end_io_partial_pages( + struct buffer_head *bh, + int uptodate) +{ + _end_io_pagebuf(bh, uptodate, 0); +} + + +/* + * Initiate I/O on part of a page we are interested in + */ +STATIC void +_pagebuf_page_io( + struct page *page, /* Page structure we are dealing with */ + pb_target_t *pbr, /* device parameters (bsz, ssz, dev) */ + page_buf_t *pb, /* pagebuf holding it, can be NULL */ + page_buf_daddr_t bn, /* starting block number */ + size_t pg_offset, /* starting offset in page */ + size_t pg_length, /* count of data to process */ + int locking, /* page locking in use */ + int rw, /* read/write operation */ + int flush) +{ + size_t sector; + size_t blk_length = 0; + struct buffer_head *bh, *head, *bufferlist[MAX_BUF_PER_PAGE]; + int sector_shift = pbr->pbr_sshift; + int i = 0, cnt = 0; + int public_bh = 0; + int multi_ok; + + if ((pbr->pbr_bsize < PAGE_CACHE_SIZE) && + !(pb->pb_flags & _PBF_PRIVATE_BH)) { + int cache_ok; + + cache_ok = !((pb->pb_flags & PBF_FORCEIO) || (rw == WRITE)); + public_bh = multi_ok = 1; + + if (!page_has_buffers(page)) { + if (!locking) { + lock_page(page); + if (!page_has_buffers(page)) + create_empty_buffers(page, + pbr->pbr_kdev, + 1 << sector_shift); + unlock_page(page); + } else { + create_empty_buffers(page, pbr->pbr_kdev, + 1 << sector_shift); + } + } + + /* Find buffer_heads belonging to just this pagebuf */ + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && cache_ok) + continue; + blk_length = i << sector_shift; + if (blk_length < pg_offset) + continue; + if (blk_length >= pg_offset + pg_length) + break; + + lock_buffer(bh); + get_bh(bh); + bh->b_size = 1 << sector_shift; + bh->b_blocknr = bn + (i - (pg_offset >> sector_shift)); + bufferlist[cnt++] = bh; + } while (i++, (bh = bh->b_this_page) != head); + + goto request; + } + + /* Calculate the block offsets and length we will be using */ + if (pg_offset) { + size_t block_offset; + + block_offset = pg_offset >> sector_shift; + block_offset = pg_offset - (block_offset << sector_shift); + blk_length = (pg_length + block_offset + pbr->pbr_smask) >> + sector_shift; + } else { + blk_length = (pg_length + pbr->pbr_smask) >> sector_shift; + } + + /* This will attempt to make a request bigger than the sector + * size if we are well aligned. + */ + switch (pb->pb_target->pbr_flags) { + case 0: + sector = blk_length << sector_shift; + blk_length = 1; + break; + case PBR_ALIGNED_ONLY: + if ((pg_offset == 0) && (pg_length == PAGE_CACHE_SIZE) && + (((unsigned int) bn) & BN_ALIGN_MASK) == 0) { + sector = blk_length << sector_shift; + blk_length = 1; + break; + } + case PBR_SECTOR_ONLY: + /* Fallthrough, same as default */ + default: + sector = 1 << sector_shift; + } + + /* If we are doing I/O larger than the bh->b_size field then + * we need to split this request up. + */ + while (sector > ((1UL << NBBY * sizeof(bh->b_size)) - 1)) { + sector >>= 1; + blk_length++; + } + + multi_ok = (blk_length != 1); + + for (; blk_length > 0; blk_length--, pg_offset += sector) { + bh = kmem_cache_alloc(bh_cachep, SLAB_NOFS); + if (!bh) + bh = _pagebuf_get_prealloc_bh(); + memset(bh, 0, sizeof(*bh)); + bh->b_size = sector; + bh->b_blocknr = bn++; + bh->b_dev = pbr->pbr_kdev; + set_bit(BH_Lock, &bh->b_state); + set_bh_page(bh, page, pg_offset); + init_waitqueue_head(&bh->b_wait); + atomic_set(&bh->b_count, 1); + bufferlist[cnt++] = bh; + } + +request: + if (cnt) { + void (*callback)(struct buffer_head *, int); + + callback = (multi_ok && public_bh) ? + _pagebuf_end_io_partial_pages : + _pagebuf_end_io_complete_pages; + + /* Account for additional buffers in progress */ + atomic_add(cnt, &pb->pb_io_remaining); + +#ifdef RQ_WRITE_ORDERED + if (flush) + set_bit(BH_Ordered_Flush, &bufferlist[cnt-1]->b_state); +#endif + + for (i = 0; i < cnt; i++) { + bh = bufferlist[i]; + init_buffer(bh, callback, pb); + bh->b_rdev = bh->b_dev; + bh->b_rsector = bh->b_blocknr; + set_bit(BH_Mapped, &bh->b_state); + set_bit(BH_Async, &bh->b_state); + set_bit(BH_Req, &bh->b_state); + if (rw == WRITE) + set_bit(BH_Uptodate, &bh->b_state); + generic_make_request(rw, bh); + } + } else { + if (locking) + unlock_page(page); + } +} + +STATIC void +_pagebuf_page_apply( + page_buf_t *pb, + loff_t offset, + struct page *page, + size_t pg_offset, + size_t pg_length, + int last) +{ + page_buf_daddr_t bn = pb->pb_bn; + pb_target_t *pbr = pb->pb_target; + loff_t pb_offset; + int locking; + + ASSERT(page); + ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); + + if ((pbr->pbr_bsize == PAGE_CACHE_SIZE) && + (pb->pb_buffer_length < PAGE_CACHE_SIZE) && + (pb->pb_flags & PBF_READ) && pb->pb_locked) { + bn -= (pb->pb_offset >> pbr->pbr_sshift); + pg_offset = 0; + pg_length = PAGE_CACHE_SIZE; + } else { + pb_offset = offset - pb->pb_file_offset; + if (pb_offset) { + bn += (pb_offset + pbr->pbr_smask) >> pbr->pbr_sshift; + } + } + + locking = _pagebuf_iolocked(pb); + if (pb->pb_flags & PBF_WRITE) { + if (locking && (pb->pb_locked == 0)) + lock_page(page); + _pagebuf_page_io(page, pbr, pb, bn, + pg_offset, pg_length, locking, WRITE, + last && (pb->pb_flags & PBF_FLUSH)); + } else { + _pagebuf_page_io(page, pbr, pb, bn, + pg_offset, pg_length, locking, READ, 0); + } +} + +/* + * pagebuf_iorequest + * + * pagebuf_iorequest is the core I/O request routine. + * It assumes that the buffer is well-formed and + * mapped and ready for physical I/O, unlike + * pagebuf_iostart() and pagebuf_iophysio(). Those + * routines call the pagebuf_ioinitiate routine to start I/O, + * if it is present, or else call pagebuf_iorequest() + * directly if the pagebuf_ioinitiate routine is not present. + * + * This function will be responsible for ensuring access to the + * pages is restricted whilst I/O is in progress - for locking + * pagebufs the pagebuf lock is the mediator, for non-locking + * pagebufs the pages will be locked. In the locking case we + * need to use the pagebuf lock as multiple meta-data buffers + * will reference the same page. + */ +int +pagebuf_iorequest( /* start real I/O */ + page_buf_t *pb) /* buffer to convey to device */ +{ + PB_TRACE(pb, PB_TRACE_REC(ioreq), 0); + + if (pb->pb_flags & PBF_DELWRI) { + pagebuf_delwri_queue(pb, 1); + return 0; + } + + if (pb->pb_flags & PBF_WRITE) { + _pagebuf_wait_unpin(pb); + } + + /* Set the count to 1 initially, this will stop an I/O + * completion callout which happens before we have started + * all the I/O from calling pagebuf_iodone too early. + */ + atomic_set(&pb->pb_io_remaining, 1); + _pagebuf_ioapply(pb); + _pagebuf_iodone(pb, 0, 0); + return 0; +} + +/* + * pagebuf_iowait + * + * pagebuf_iowait waits for I/O to complete on the buffer supplied. + * It returns immediately if no I/O is pending. In any case, it returns + * the error code, if any, or 0 if there is no error. + */ +int +pagebuf_iowait( + page_buf_t *pb) +{ + PB_TRACE(pb, PB_TRACE_REC(iowait), 0); + pagebuf_run_queues(pb); + down(&pb->pb_iodonesema); + PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error); + return pb->pb_error; +} + +STATIC void * +pagebuf_mapout_locked( + page_buf_t *pb) +{ + void *old_addr = NULL; + + if (pb->pb_flags & PBF_MAPPED) { + if (pb->pb_flags & _PBF_ADDR_ALLOCATED) + old_addr = pb->pb_addr - pb->pb_offset; + pb->pb_addr = NULL; + pb->pb_flags &= ~(PBF_MAPPED | _PBF_ADDR_ALLOCATED); + } + + return old_addr; /* Caller must free the address space, + * we are under a spin lock, probably + * not safe to do vfree here + */ +} + +caddr_t +pagebuf_offset( + page_buf_t *pb, + size_t offset) +{ + struct page *page; + + offset += pb->pb_offset; + + page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; + return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); +} + +/* + * pagebuf_iomove + * + * Move data into or out of a buffer. + */ +void +pagebuf_iomove( + page_buf_t *pb, /* buffer to process */ + size_t boff, /* starting buffer offset */ + size_t bsize, /* length to copy */ + caddr_t data, /* data address */ + page_buf_rw_t mode) /* read/write flag */ +{ + size_t bend, cpoff, csize; + struct page *page; + + bend = boff + bsize; + while (boff < bend) { + page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; + cpoff = page_buf_poff(boff + pb->pb_offset); + csize = min_t(size_t, + PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); + + ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); + + switch (mode) { + case PBRW_ZERO: + memset(page_address(page) + cpoff, 0, csize); + break; + case PBRW_READ: + memcpy(data, page_address(page) + cpoff, csize); + break; + case PBRW_WRITE: + memcpy(page_address(page) + cpoff, data, csize); + } + + boff += csize; + data += csize; + } +} + +/* + * _pagebuf_ioapply + * + * Applies _pagebuf_page_apply to each page of the page_buf_t. + */ +STATIC void +_pagebuf_ioapply( /* apply function to pages */ + page_buf_t *pb) /* buffer to examine */ +{ + int buf_index; + loff_t buffer_offset = pb->pb_file_offset; + size_t buffer_len = pb->pb_count_desired; + size_t page_offset, len; + size_t cur_offset, cur_len; + + pagebuf_hold(pb); + + cur_offset = pb->pb_offset; + cur_len = buffer_len; + + for (buf_index = 0; buf_index < pb->pb_page_count; buf_index++) { + if (cur_len == 0) + break; + if (cur_offset >= PAGE_CACHE_SIZE) { + cur_offset -= PAGE_CACHE_SIZE; + continue; + } + + page_offset = cur_offset; + cur_offset = 0; + + len = PAGE_CACHE_SIZE - page_offset; + if (len > cur_len) + len = cur_len; + cur_len -= len; + + _pagebuf_page_apply(pb, buffer_offset, + pb->pb_pages[buf_index], page_offset, len, + buf_index+1 == pb->pb_page_count); + buffer_offset += len; + buffer_len -= len; + } + + pagebuf_rele(pb); +} + + +/* + * Pagebuf delayed write buffer handling + */ + +STATIC int pbd_active = 1; +LIST_HEAD(pbd_delwrite_queue); +STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED; + +STATIC void +pagebuf_delwri_queue( + page_buf_t *pb, + int unlock) +{ + PB_TRACE(pb, PB_TRACE_REC(delwri_q), unlock); + spin_lock(&pbd_delwrite_lock); + /* If already in the queue, dequeue and place at tail */ + if (!list_empty(&pb->pb_list)) { + if (unlock) { + atomic_dec(&pb->pb_hold); + } + list_del(&pb->pb_list); + } + + list_add_tail(&pb->pb_list, &pbd_delwrite_queue); + pb->pb_flushtime = jiffies + pb_params.p_un.age_buffer; + spin_unlock(&pbd_delwrite_lock); + + if (unlock && (pb->pb_flags & _PBF_LOCKABLE)) { + pagebuf_unlock(pb); + } +} + +void +pagebuf_delwri_dequeue( + page_buf_t *pb) +{ + PB_TRACE(pb, PB_TRACE_REC(delwri_uq), 0); + spin_lock(&pbd_delwrite_lock); + list_del_init(&pb->pb_list); + pb->pb_flags &= ~PBF_DELWRI; + spin_unlock(&pbd_delwrite_lock); +} + + +/* + * The pagebuf iodone daemons + */ + +STATIC int +pagebuf_iodone_daemon( + void *__bind_cpu, + const char *name, + int pagebuf_daemons[], + struct list_head pagebuf_iodone_tq[], + wait_queue_head_t pagebuf_iodone_wait[]) +{ + int bind_cpu, cpu; + DECLARE_WAITQUEUE (wait, current); + + bind_cpu = (int) (long)__bind_cpu; + cpu = CPU_TO_DAEMON(cpu_logical_map(bind_cpu)); + + /* Set up the thread */ + daemonize(); + + /* Avoid signals */ + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + /* Migrate to the right CPU */ + migrate_to_cpu(cpu); +#ifdef __HAVE_NEW_SCHEDULER + if (smp_processor_id() != cpu) + BUG(); +#else + while (smp_processor_id() != cpu) + schedule(); +#endif + + sprintf(current->comm, "%s/%d", name, bind_cpu); + INIT_LIST_HEAD(&pagebuf_iodone_tq[cpu]); + init_waitqueue_head(&pagebuf_iodone_wait[cpu]); + __set_current_state(TASK_INTERRUPTIBLE); + mb(); + + pagebuf_daemons[cpu] = 1; + + for (;;) { + add_wait_queue(&pagebuf_iodone_wait[cpu], &wait); + + if (TQ_ACTIVE(pagebuf_iodone_tq[cpu])) + __set_task_state(current, TASK_RUNNING); + schedule(); + remove_wait_queue(&pagebuf_iodone_wait[cpu], &wait); + run_task_queue(&pagebuf_iodone_tq[cpu]); + if (pagebuf_daemons[cpu] == 0) + break; + __set_current_state(TASK_INTERRUPTIBLE); + } + + pagebuf_daemons[cpu] = -1; + wake_up_interruptible(&pagebuf_iodone_wait[cpu]); + return 0; +} + +STATIC void +pagebuf_runall_queues( + struct list_head pagebuf_iodone_tq[]) +{ + int pcpu, cpu; + + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + run_task_queue(&pagebuf_iodone_tq[pcpu]); + } +} + +STATIC int +pagebuf_logiodone_daemon( + void *__bind_cpu) +{ + return pagebuf_iodone_daemon(__bind_cpu, "xfslogd", pb_logio_daemons, + pagebuf_logiodone_tq, pagebuf_logiodone_wait); +} + +STATIC int +pagebuf_dataiodone_daemon( + void *__bind_cpu) +{ + return pagebuf_iodone_daemon(__bind_cpu, "xfsdatad", pb_dataio_daemons, + pagebuf_dataiodone_tq, pagebuf_dataiodone_wait); +} + + +/* Defines for pagebuf daemon */ +DECLARE_WAIT_QUEUE_HEAD(pbd_waitq); +STATIC int force_flush; + +STATIC void +pagebuf_daemon_wakeup( + int flag) +{ + force_flush = flag; + if (waitqueue_active(&pbd_waitq)) { + wake_up_interruptible(&pbd_waitq); + } +} + +typedef void (*timeout_fn)(unsigned long); + +STATIC int +pagebuf_daemon( + void *data) +{ + int count; + page_buf_t *pb; + struct list_head *curr, *next, tmp; + struct timer_list pb_daemon_timer = + { {NULL, NULL}, 0, 0, (timeout_fn)pagebuf_daemon_wakeup }; + + /* Set up the thread */ + daemonize(); + + /* Avoid signals */ + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + strcpy(current->comm, "pagebufd"); + current->flags |= PF_MEMALLOC; + + INIT_LIST_HEAD(&tmp); + do { + if (pbd_active == 1) { + mod_timer(&pb_daemon_timer, + jiffies + pb_params.p_un.flush_interval); + interruptible_sleep_on(&pbd_waitq); + } + + if (pbd_active == 0) { + del_timer_sync(&pb_daemon_timer); + } + + spin_lock(&pbd_delwrite_lock); + + count = 0; + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + pb = list_entry(curr, page_buf_t, pb_list); + + PB_TRACE(pb, PB_TRACE_REC(walkq1), pagebuf_ispin(pb)); + + if ((pb->pb_flags & PBF_DELWRI) && !pagebuf_ispin(pb) && + (((pb->pb_flags & _PBF_LOCKABLE) == 0) || + !pagebuf_cond_lock(pb))) { + + if (!force_flush && + time_before(jiffies, pb->pb_flushtime)) { + pagebuf_unlock(pb); + break; + } + + list_del(&pb->pb_list); + list_add(&pb->pb_list, &tmp); + + count++; + } + } + + spin_unlock(&pbd_delwrite_lock); + while (!list_empty(&tmp)) { + pb = list_entry(tmp.next, page_buf_t, pb_list); + list_del_init(&pb->pb_list); + pb->pb_flags &= ~PBF_DELWRI; + pb->pb_flags |= PBF_WRITE; + + __pagebuf_iorequest(pb); + } + + if (as_list_len > 0) + purge_addresses(); + if (count) + pagebuf_run_queues(NULL); + + force_flush = 0; + } while (pbd_active == 1); + + pbd_active = -1; + wake_up_interruptible(&pbd_waitq); + + return 0; +} + +void +pagebuf_delwri_flush( + pb_target_t *target, + u_long flags, + int *pinptr) +{ + page_buf_t *pb; + struct list_head *curr, *next, tmp; + int pincount = 0; + int flush_cnt = 0; + + spin_lock(&pbd_delwrite_lock); + INIT_LIST_HEAD(&tmp); + + pagebuf_runall_queues(pagebuf_dataiodone_tq); + + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + pb = list_entry(curr, page_buf_t, pb_list); + + /* + * Skip other targets, markers and in progress buffers + */ + + if ((pb->pb_flags == 0) || (pb->pb_target != target) || + !(pb->pb_flags & PBF_DELWRI)) { + continue; + } + + PB_TRACE(pb, PB_TRACE_REC(walkq2), pagebuf_ispin(pb)); + if (pagebuf_ispin(pb)) { + pincount++; + continue; + } + + if (flags & PBDF_TRYLOCK) { + if (!pagebuf_cond_lock(pb)) { + pincount++; + continue; + } + } + + list_del_init(&pb->pb_list); + if (flags & PBDF_WAIT) { + list_add(&pb->pb_list, &tmp); + pb->pb_flags &= ~PBF_ASYNC; + } + + spin_unlock(&pbd_delwrite_lock); + + if ((flags & PBDF_TRYLOCK) == 0) { + pagebuf_lock(pb); + } + + pb->pb_flags &= ~PBF_DELWRI; + pb->pb_flags |= PBF_WRITE; + + __pagebuf_iorequest(pb); + if (++flush_cnt > 32) { + pagebuf_run_queues(NULL); + flush_cnt = 0; + } + + spin_lock(&pbd_delwrite_lock); + } + + spin_unlock(&pbd_delwrite_lock); + + pagebuf_run_queues(NULL); + + if (pinptr) + *pinptr = pincount; + + if ((flags & PBDF_WAIT) == 0) + return; + + while (!list_empty(&tmp)) { + pb = list_entry(tmp.next, page_buf_t, pb_list); + + list_del_init(&pb->pb_list); + pagebuf_iowait(pb); + if (!pb->pb_relse) + pagebuf_unlock(pb); + pagebuf_rele(pb); + } +} + +STATIC int +pagebuf_daemon_start(void) +{ + int cpu, pcpu; + + kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES|CLONE_VM); + + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + if (kernel_thread(pagebuf_logiodone_daemon, + (void *)(long) cpu, + CLONE_FS|CLONE_FILES|CLONE_VM) < 0) { + printk("pagebuf_logiodone daemon failed to start\n"); + } else { + while (!pb_logio_daemons[pcpu]) + yield(); + } + } + for (cpu = 0; cpu < min(smp_num_cpus, MAX_IO_DAEMONS); cpu++) { + pcpu = CPU_TO_DAEMON(cpu_logical_map(cpu)); + + if (kernel_thread(pagebuf_dataiodone_daemon, + (void *)(long) cpu, + CLONE_FS|CLONE_FILES|CLONE_VM) < 0) { + printk("pagebuf_dataiodone daemon failed to start\n"); + } else { + while (!pb_dataio_daemons[pcpu]) + yield(); + } + } + return 0; +} + +/* + * pagebuf_daemon_stop + * + * Note: do not mark as __exit, it is called from pagebuf_terminate. + */ +STATIC void +pagebuf_daemon_stop(void) +{ + int cpu, pcpu; + + pbd_active = 0; + + wake_up_interruptible(&pbd_waitq); + wait_event_interruptible(pbd_waitq, pbd_active); + + for (pcpu = 0; pcpu < min(smp_num_cpus, MAX_IO_DAEMONS); pcpu++) { + cpu = CPU_TO_DAEMON(cpu_logical_map(pcpu)); + + pb_logio_daemons[cpu] = 0; + wake_up(&pagebuf_logiodone_wait[cpu]); + wait_event_interruptible(pagebuf_logiodone_wait[cpu], + pb_logio_daemons[cpu] == -1); + + pb_dataio_daemons[cpu] = 0; + wake_up(&pagebuf_dataiodone_wait[cpu]); + wait_event_interruptible(pagebuf_dataiodone_wait[cpu], + pb_dataio_daemons[cpu] == -1); + } +} + + +/* + * Pagebuf sysctl interface + */ + +STATIC int +pb_stats_clear_handler( + ctl_table *ctl, + int write, + struct file *filp, + void *buffer, + size_t *lenp) +{ + int ret; + int *valp = ctl->data; + + ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); + + if (!ret && write && *valp) { + printk("XFS Clearing pbstats\n"); + memset(&pbstats, 0, sizeof(pbstats)); + pb_params.p_un.stats_clear = 0; + } + + return ret; +} + +STATIC struct ctl_table_header *pagebuf_table_header; + +STATIC ctl_table pagebuf_table[] = { + {PB_FLUSH_INT, "flush_int", &pb_params.data[0], + sizeof(ulong), 0644, NULL, &proc_doulongvec_ms_jiffies_minmax, + &sysctl_intvec, NULL, &pagebuf_min[0], &pagebuf_max[0]}, + + {PB_FLUSH_AGE, "flush_age", &pb_params.data[1], + sizeof(ulong), 0644, NULL, &proc_doulongvec_ms_jiffies_minmax, + &sysctl_intvec, NULL, &pagebuf_min[1], &pagebuf_max[1]}, + + {PB_STATS_CLEAR, "stats_clear", &pb_params.data[2], + sizeof(ulong), 0644, NULL, &pb_stats_clear_handler, + &sysctl_intvec, NULL, &pagebuf_min[2], &pagebuf_max[2]}, + +#ifdef PAGEBUF_TRACE + {PB_DEBUG, "debug", &pb_params.data[3], + sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, + &sysctl_intvec, NULL, &pagebuf_min[3], &pagebuf_max[3]}, +#endif + {0} +}; + +STATIC ctl_table pagebuf_dir_table[] = { + {VM_PAGEBUF, "pagebuf", NULL, 0, 0555, pagebuf_table}, + {0} +}; + +STATIC ctl_table pagebuf_root_table[] = { + {CTL_VM, "vm", NULL, 0, 0555, pagebuf_dir_table}, + {0} +}; + +#ifdef CONFIG_PROC_FS +STATIC int +pagebuf_readstats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int i, len; + + len = 0; + len += sprintf(buffer + len, "pagebuf"); + for (i = 0; i < sizeof(pbstats) / sizeof(u_int32_t); i++) { + len += sprintf(buffer + len, " %u", + *(((u_int32_t*)&pbstats) + i)); + } + buffer[len++] = '\n'; + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} +#endif /* CONFIG_PROC_FS */ + +STATIC void +pagebuf_shaker(void) +{ + pagebuf_daemon_wakeup(1); +} + + +/* + * Initialization and Termination + */ + +int __init +pagebuf_init(void) +{ + int i; + + pagebuf_table_header = register_sysctl_table(pagebuf_root_table, 1); + +#ifdef CONFIG_PROC_FS + if (proc_mkdir("fs/pagebuf", 0)) + create_proc_read_entry( + "fs/pagebuf/stat", 0, 0, pagebuf_readstats, NULL); +#endif + + pagebuf_cache = kmem_cache_create("page_buf_t", sizeof(page_buf_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (pagebuf_cache == NULL) { + printk("pagebuf: couldn't init pagebuf cache\n"); + pagebuf_terminate(); + return -ENOMEM; + } + + if (_pagebuf_prealloc_bh(NR_RESERVED_BH) < NR_RESERVED_BH) { + printk("pagebuf: couldn't pre-allocate %d buffer heads\n", + NR_RESERVED_BH); + pagebuf_terminate(); + return -ENOMEM; + } + + init_waitqueue_head(&pb_resv_bh_wait); + + for (i = 0; i < NHASH; i++) { + spin_lock_init(&pbhash[i].pb_hash_lock); + INIT_LIST_HEAD(&pbhash[i].pb_hash); + } + +#ifdef PAGEBUF_TRACE + pb_trace.buf = (pagebuf_trace_t *)kmalloc( + PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t), GFP_KERNEL); + memset(pb_trace.buf, 0, PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t)); + pb_trace.start = 0; + pb_trace.end = PB_TRACE_BUFSIZE - 1; +#endif + + pagebuf_daemon_start(); + kmem_shake_register(pagebuf_shaker); + return 0; +} + +/* + * pagebuf_terminate. + * + * Note: do not mark as __exit, this is also called from the __init code. + */ +void +pagebuf_terminate(void) +{ + pagebuf_daemon_stop(); + + kmem_cache_destroy(pagebuf_cache); + kmem_shake_deregister(pagebuf_shaker); + + unregister_sysctl_table(pagebuf_table_header); +#ifdef CONFIG_PROC_FS + remove_proc_entry("fs/pagebuf/stat", NULL); + remove_proc_entry("fs/pagebuf", NULL); +#endif +} + + +/* + * Module management (for kernel debugger module) + */ +EXPORT_SYMBOL(pagebuf_offset); +EXPORT_SYMBOL(pbd_delwrite_queue); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/page_buf.h linux.22-ac2/fs/xfs/pagebuf/page_buf.h --- linux.vanilla/fs/xfs/pagebuf/page_buf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/page_buf.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Written by Steve Lord, Jim Mostek, Russell Cattelan at SGI + */ + +#ifndef __PAGE_BUF_H__ +#define __PAGE_BUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Turn this on to get pagebuf lock ownership +#define PAGEBUF_LOCK_TRACKING +*/ + +/* + * Base types + */ + +/* daddr must be signed since -1 is used for bmaps that are not yet allocated */ +typedef loff_t page_buf_daddr_t; + +#define PAGE_BUF_DADDR_NULL ((page_buf_daddr_t) (-1LL)) + +typedef size_t page_buf_dsize_t; /* size of buffer in blocks */ + +#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) +#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) +#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) +#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) + +typedef enum page_buf_rw_e { + PBRW_READ = 1, /* transfer into target memory */ + PBRW_WRITE = 2, /* transfer from target memory */ + PBRW_ZERO = 3 /* Zero target memory */ +} page_buf_rw_t; + +typedef enum { /* pbm_flags values */ + PBMF_EOF = 0x01, /* mapping contains EOF */ + PBMF_HOLE = 0x02, /* mapping covers a hole */ + PBMF_DELAY = 0x04, /* mapping covers delalloc region */ + PBMF_UNWRITTEN = 0x20 /* mapping covers allocated */ + /* but uninitialized file data */ +} bmap_flags_t; + +typedef enum { + /* base extent manipulation calls */ + BMAP_READ = (1 << 0), /* read extents */ + BMAP_WRITE = (1 << 1), /* create extents */ + BMAP_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ + BMAP_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */ + /* modifiers */ + BMAP_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ + BMAP_DIRECT = (1 << 5), /* direct instead of buffered write */ + BMAP_MMAP = (1 << 6), /* allocate for mmap write */ + BMAP_SYNC = (1 << 7), /* sync write */ + BMAP_TRYLOCK = (1 << 8), /* non-blocking request */ +} bmapi_flags_t; + +typedef enum page_buf_flags_e { /* pb_flags values */ + PBF_READ = (1 << 0), /* buffer intended for reading from device */ + PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ + PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ + PBF_PARTIAL = (1 << 3), /* buffer partially read */ + PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ + PBF_NONE = (1 << 5), /* buffer not read at all */ + PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ + PBF_FREED = (1 << 7), /* buffer has been freed and is invalid */ + PBF_SYNC = (1 << 8), /* force updates to disk */ + PBF_MAPPABLE = (1 << 9),/* use directly-addressable pages */ + PBF_STALE = (1 << 10), /* buffer has been staled, do not find it */ + PBF_FS_MANAGED = (1 << 11), /* filesystem controls freeing memory */ + PBF_FS_DATAIOD = (1 << 12), /* schedule IO completion on fs datad */ + + /* flags used only as arguments to access routines */ + PBF_LOCK = (1 << 13), /* lock requested */ + PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */ + PBF_DONT_BLOCK = (1 << 15), /* do not block in current thread */ + + /* flags used only internally */ + _PBF_LOCKABLE = (1 << 16), /* page_buf_t may be locked */ + _PBF_PRIVATE_BH = (1 << 17), /* do not use public buffer heads */ + _PBF_ALL_PAGES_MAPPED = (1 << 18), /* all pages in range mapped */ + _PBF_ADDR_ALLOCATED = (1 << 19), /* pb_addr space was allocated */ + _PBF_MEM_ALLOCATED = (1 << 20), /* pb_mem+underlying pages alloc'd */ + + PBF_FORCEIO = (1 << 21), + PBF_FLUSH = (1 << 22), /* flush disk write cache */ + PBF_READ_AHEAD = (1 << 23), + +} page_buf_flags_t; + +#define PBF_UPDATE (PBF_READ | PBF_WRITE) +#define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0) +#define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0) + +#define PBR_SECTOR_ONLY 1 /* only use sector size buffer heads */ +#define PBR_ALIGNED_ONLY 2 /* only use aligned I/O */ + +typedef struct pb_target { + int pbr_flags; + dev_t pbr_dev; + kdev_t pbr_kdev; + struct block_device *pbr_bdev; + struct address_space *pbr_mapping; + unsigned int pbr_bsize; + unsigned int pbr_sshift; + size_t pbr_smask; +} pb_target_t; + +/* + * page_buf_bmap_t: File system I/O map + * + * The pbm_bn, pbm_offset and pbm_length fields are expressed in disk blocks. + * The pbm_length field specifies the size of the underlying backing store + * for the particular mapping. + * + * The pbm_bsize, pbm_size and pbm_delta fields are in bytes and indicate + * the size of the mapping, the number of bytes that are valid to access + * (read or write), and the offset into the mapping, given the offset + * supplied to the file I/O map routine. pbm_delta is the offset of the + * desired data from the beginning of the mapping. + * + * When a request is made to read beyond the logical end of the object, + * pbm_size may be set to 0, but pbm_offset and pbm_length should be set to + * the actual amount of underlying storage that has been allocated, if any. + */ + +typedef struct page_buf_bmap_s { + page_buf_daddr_t pbm_bn; /* block number in file system */ + pb_target_t *pbm_target; /* device to do I/O to */ + loff_t pbm_offset; /* byte offset of mapping in file */ + size_t pbm_delta; /* offset of request into bmap */ + size_t pbm_bsize; /* size of this mapping in bytes */ + bmap_flags_t pbm_flags; /* options flags for mapping */ +} page_buf_bmap_t; + +typedef page_buf_bmap_t pb_bmap_t; + + +/* + * page_buf_t: Buffer structure for page cache-based buffers + * + * This buffer structure is used by the page cache buffer management routines + * to refer to an assembly of pages forming a logical buffer. The actual + * I/O is performed with buffer_head or bio structures, as required by drivers, + * for drivers which do not understand this structure. The buffer structure is + * used on temporary basis only, and discarded when released. + * + * The real data storage is recorded in the page cache. Metadata is + * hashed to the inode for the block device on which the file system resides. + * File data is hashed to the inode for the file. Pages which are only + * partially filled with data have bits set in their block_map entry + * to indicate which disk blocks in the page are not valid. + */ + +struct page_buf_s; +typedef void (*page_buf_iodone_t)(struct page_buf_s *); + /* call-back function on I/O completion */ +typedef void (*page_buf_relse_t)(struct page_buf_s *); + /* call-back function on I/O completion */ +typedef int (*page_buf_bdstrat_t)(struct page_buf_s *); + +#define PB_PAGES 4 + +typedef struct page_buf_s { + struct semaphore pb_sema; /* semaphore for lockables */ + unsigned long pb_flushtime; /* time to flush pagebuf */ + atomic_t pb_pin_count; /* pin count */ + wait_queue_head_t pb_waiters; /* unpin waiters */ + struct list_head pb_list; + page_buf_flags_t pb_flags; /* status flags */ + struct list_head pb_hash_list; + struct pb_target *pb_target; /* logical object */ + atomic_t pb_hold; /* reference count */ + page_buf_daddr_t pb_bn; /* block number for I/O */ + loff_t pb_file_offset; /* offset in file */ + size_t pb_buffer_length; /* size of buffer in bytes */ + size_t pb_count_desired; /* desired transfer size */ + void *pb_addr; /* virtual address of buffer */ + struct tq_struct pb_iodone_sched; + atomic_t pb_io_remaining;/* #outstanding I/O requests */ + page_buf_iodone_t pb_iodone; /* I/O completion function */ + page_buf_relse_t pb_relse; /* releasing function */ + page_buf_bdstrat_t pb_strat; /* pre-write function */ + struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ + void *pb_fspriv; + void *pb_fspriv2; + void *pb_fspriv3; + unsigned short pb_error; /* error code on I/O */ + unsigned short pb_page_count; /* size of page array */ + unsigned short pb_offset; /* page offset in first page */ + unsigned char pb_locked; /* page array is locked */ + unsigned char pb_hash_index; /* hash table index */ + struct page **pb_pages; /* array of page pointers */ + struct page *pb_page_array[PB_PAGES]; /* inline pages */ +#ifdef PAGEBUF_LOCK_TRACKING + int pb_last_holder; +#endif +} page_buf_t; + + +/* + * page_buf module entry points + */ + +/* Finding and Reading Buffers */ + +extern page_buf_t *pagebuf_find( /* find buffer for block if */ + /* the block is in memory */ + struct pb_target *, /* inode for block */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_LOCK */ + +extern page_buf_t *pagebuf_get( /* allocate a buffer */ + struct pb_target *, /* inode for buffer */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_LOCK, PBF_READ, */ + /* PBF_ASYNC */ + +extern page_buf_t *pagebuf_lookup( + struct pb_target *, + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* PBF_READ, PBF_WRITE, */ + /* PBF_FORCEIO, _PBF_LOCKABLE */ + +extern page_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ + /* no memory or disk address */ + struct pb_target *); /* mount point "fake" inode */ + +extern page_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */ + /* without disk address */ + size_t len, + struct pb_target *); /* mount point "fake" inode */ + +extern int pagebuf_associate_memory( + page_buf_t *, + void *, + size_t); + + +extern void pagebuf_hold( /* increment reference count */ + page_buf_t *); /* buffer to hold */ + +extern void pagebuf_readahead( /* read ahead into cache */ + struct pb_target *, /* target for buffer (or NULL) */ + loff_t, /* starting offset of range */ + size_t, /* length of range */ + page_buf_flags_t); /* additional read flags */ + +/* Writing and Releasing Buffers */ + +extern void pagebuf_free( /* deallocate a buffer */ + page_buf_t *); /* buffer to deallocate */ + +extern void pagebuf_rele( /* release hold on a buffer */ + page_buf_t *); /* buffer to release */ + +/* Locking and Unlocking Buffers */ + +extern int pagebuf_cond_lock( /* lock buffer, if not locked */ + /* (returns -EBUSY if locked) */ + page_buf_t *); /* buffer to lock */ + +extern int pagebuf_lock_value( /* return count on lock */ + page_buf_t *); /* buffer to check */ + +extern int pagebuf_lock( /* lock buffer */ + page_buf_t *); /* buffer to lock */ + +extern void pagebuf_unlock( /* unlock buffer */ + page_buf_t *); /* buffer to unlock */ + +/* Buffer Utility Routines */ +static inline int pagebuf_geterror(page_buf_t *pb) +{ + return (pb ? pb->pb_error : ENOMEM); +} + +extern void pagebuf_iodone( /* mark buffer I/O complete */ + page_buf_t *, /* buffer to mark */ + int, /* use data/log helper thread. */ + int); /* run completion locally, or in + * a helper thread. */ + +extern void pagebuf_ioerror( /* mark buffer in error (or not) */ + page_buf_t *, /* buffer to mark */ + unsigned int); /* error to store (0 if none) */ + +extern int pagebuf_iostart( /* start I/O on a buffer */ + page_buf_t *, /* buffer to start */ + page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */ + /* PBF_READ, PBF_WRITE, */ + /* PBF_DELWRI, PBF_SYNC */ + +extern int pagebuf_iorequest( /* start real I/O */ + page_buf_t *); /* buffer to convey to device */ + + /* + * pagebuf_iorequest is the core I/O request routine. + * It assumes that the buffer is well-formed and + * mapped and ready for physical I/O, unlike + * pagebuf_iostart() and pagebuf_iophysio(). Those + * routines call the inode pagebuf_ioinitiate routine to start I/O, + * if it is present, or else call pagebuf_iorequest() + * directly if the inode pagebuf_ioinitiate routine is not present. + */ + +extern int pagebuf_iowait( /* wait for buffer I/O done */ + page_buf_t *); /* buffer to wait on */ + +extern caddr_t pagebuf_offset(page_buf_t *, size_t); + +extern void pagebuf_iomove( /* move data in/out of pagebuf */ + page_buf_t *, /* buffer to manipulate */ + size_t, /* starting buffer offset */ + size_t, /* length in buffer */ + caddr_t, /* data pointer */ + page_buf_rw_t); /* direction */ + +/* Pinning Buffer Storage in Memory */ + +extern void pagebuf_pin( /* pin buffer in memory */ + page_buf_t *); /* buffer to pin */ + +extern void pagebuf_unpin( /* unpin buffered data */ + page_buf_t *); /* buffer to unpin */ + +extern int pagebuf_ispin( page_buf_t *); /* check if pagebuf is pinned */ + +/* Reading and writing pages */ + +extern void pagebuf_delwri_dequeue(page_buf_t *); + +#define PBDF_WAIT 0x01 +#define PBDF_TRYLOCK 0x02 +extern void pagebuf_delwri_flush( + struct pb_target *, + unsigned long, + int *); + +extern int pagebuf_init(void); +extern void pagebuf_terminate(void); + +static __inline__ int __pagebuf_iorequest(page_buf_t *pb) +{ + if (pb->pb_strat) + return pb->pb_strat(pb); + return pagebuf_iorequest(pb); +} + +static __inline__ void pagebuf_run_queues(page_buf_t *pb) +{ + if (!pb || atomic_read(&pb->pb_io_remaining)) + run_task_queue(&tq_disk); +} + +#endif /* __PAGE_BUF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/page_buf_internal.h linux.22-ac2/fs/xfs/pagebuf/page_buf_internal.h --- linux.vanilla/fs/xfs/pagebuf/page_buf_internal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/page_buf_internal.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Written by Steve Lord at SGI + */ + +#ifndef __PAGE_BUF_PRIVATE_H__ +#define __PAGE_BUF_PRIVATE_H__ + +#include "page_buf.h" + +#define _PAGE_BUF_INTERNAL_ +#define PB_DEFINE_TRACES +#include "page_buf_trace.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,9) +#define page_buffers(page) ((page)->buffers) +#define page_has_buffers(page) ((page)->buffers) +#endif + +#ifdef PAGEBUF_LOCK_TRACKING +#define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid) +#define PB_CLEAR_OWNER(pb) (pb->pb_last_holder = -1) +#define PB_GET_OWNER(pb) (pb->pb_last_holder) +#else +#define PB_SET_OWNER(pb) +#define PB_CLEAR_OWNER(pb) +#define PB_GET_OWNER(pb) +#endif /* PAGEBUF_LOCK_TRACKING */ + +/* Tracing utilities for pagebuf */ +typedef struct { + int event; + unsigned long pb; + page_buf_flags_t flags; + unsigned short hold; + unsigned short lock_value; + void *task; + void *misc; + void *ra; + loff_t offset; + size_t size; +} pagebuf_trace_t; + +struct pagebuf_trace_buf { + pagebuf_trace_t *buf; + volatile int start; + volatile int end; +}; + +#define PB_TRACE_BUFSIZE 1024 +#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1)) + +/* + * Tunable pagebuf parameters + */ + +#define P_PARAM 4 + +typedef union pagebuf_param { + struct { + ulong flush_interval; /* interval between runs of the + * delwri flush daemon. */ + ulong age_buffer; /* time for buffer to age before + * we flush it. */ + ulong debug; /* debug tracing on or off */ + ulong stats_clear; /* clear the pagebuf stats */ + } p_un; + ulong data[P_PARAM]; +} pagebuf_param_t; + +enum { + PB_FLUSH_INT = 1, + PB_FLUSH_AGE = 2, + PB_STATS_CLEAR = 3, + PB_DEBUG = 4 +}; + +extern pagebuf_param_t pb_params; + +/* + * Pagebuf statistics + */ + +struct pbstats { + u_int32_t pb_get; + u_int32_t pb_create; + u_int32_t pb_get_locked; + u_int32_t pb_get_locked_waited; + u_int32_t pb_busy_locked; + u_int32_t pb_miss_locked; + u_int32_t pb_page_retries; + u_int32_t pb_page_found; + u_int32_t pb_get_read; +}; + +extern struct pbstats pbstats; + +#define PB_STATS_INC(count) ( count ++ ) + +#ifndef STATIC +# define STATIC static +#endif + +#endif /* __PAGE_BUF_PRIVATE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/page_buf_locking.c linux.22-ac2/fs/xfs/pagebuf/page_buf_locking.c --- linux.vanilla/fs/xfs/pagebuf/page_buf_locking.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/page_buf_locking.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * page_buf_locking.c + * + * The page_buf module provides an abstract buffer cache model on top of + * the Linux page cache. Cached metadata blocks for a file system are + * hashed to the inode for the block device. The page_buf module + * assembles buffer (page_buf_t) objects on demand to aggregate such + * cached pages for I/O. The page_buf_locking module adds support for + * locking such page buffers. + * + * Written by Steve Lord at SGI + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "page_buf_internal.h" + +/* + * pagebuf_cond_lock + * + * pagebuf_cond_lock locks a buffer object, if it is not already locked. + * Note that this in no way + * locks the underlying pages, so it is only useful for synchronizing + * concurrent use of page buffer objects, not for synchronizing independent + * access to the underlying pages. + */ +int +pagebuf_cond_lock( /* lock buffer, if not locked */ + /* returns -EBUSY if locked) */ + page_buf_t *pb) +{ + int locked; + + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + + locked = down_trylock(&pb->pb_sema) == 0; + if (locked) { + PB_SET_OWNER(pb); + } + + PB_TRACE(pb, PB_TRACE_REC(condlck), locked); + + return(locked ? 0 : -EBUSY); +} + +/* + * pagebuf_lock_value + * + * Return lock value for a pagebuf + */ +int +pagebuf_lock_value( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + return(atomic_read(&pb->pb_sema.count)); +} + +/* + * pagebuf_lock + * + * pagebuf_lock locks a buffer object. Note that this in no way + * locks the underlying pages, so it is only useful for synchronizing + * concurrent use of page buffer objects, not for synchronizing independent + * access to the underlying pages. + */ +int +pagebuf_lock( + page_buf_t *pb) +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + + PB_TRACE(pb, PB_TRACE_REC(lock), 0); + pagebuf_run_queues(pb); + down(&pb->pb_sema); + PB_SET_OWNER(pb); + PB_TRACE(pb, PB_TRACE_REC(locked), 0); + return 0; +} + +/* + * pagebuf_unlock + * + * pagebuf_unlock releases the lock on the buffer object created by + * pagebuf_lock or pagebuf_cond_lock (not any + * pinning of underlying pages created by pagebuf_pin). + */ +void +pagebuf_unlock( /* unlock buffer */ + page_buf_t *pb) /* buffer to unlock */ +{ + ASSERT(pb->pb_flags & _PBF_LOCKABLE); + PB_CLEAR_OWNER(pb); + up(&pb->pb_sema); + PB_TRACE(pb, PB_TRACE_REC(unlock), 0); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/pagebuf/page_buf_trace.h linux.22-ac2/fs/xfs/pagebuf/page_buf_trace.h --- linux.vanilla/fs/xfs/pagebuf/page_buf_trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/pagebuf/page_buf_trace.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __PAGEBUF_TRACE__ +#define __PAGEBUF_TRACE__ + +#ifdef PB_DEFINE_TRACES +#define PB_TRACE_START typedef enum { +#define PB_TRACE_REC(x) pb_trace_point_##x +#define PB_TRACE_END } pb_trace_var_t; +#else +#define PB_TRACE_START static char *event_names[] = { +#define PB_TRACE_REC(x) #x +#define PB_TRACE_END }; +#endif + +PB_TRACE_START +PB_TRACE_REC(get), +PB_TRACE_REC(get_obj), +PB_TRACE_REC(free_obj), +PB_TRACE_REC(look_pg), +PB_TRACE_REC(get_read), +PB_TRACE_REC(no_daddr), +PB_TRACE_REC(hold), +PB_TRACE_REC(rele), +PB_TRACE_REC(done), +PB_TRACE_REC(ioerror), +PB_TRACE_REC(iostart), +PB_TRACE_REC(end_io), +PB_TRACE_REC(do_io), +PB_TRACE_REC(ioreq), +PB_TRACE_REC(iowait), +PB_TRACE_REC(iowaited), +PB_TRACE_REC(free_lk), +PB_TRACE_REC(freed_l), +PB_TRACE_REC(cmp), +PB_TRACE_REC(get_lk), +PB_TRACE_REC(got_lk), +PB_TRACE_REC(skip), +PB_TRACE_REC(lock), +PB_TRACE_REC(locked), +PB_TRACE_REC(unlock), +PB_TRACE_REC(avl_ret), +PB_TRACE_REC(condlck), +PB_TRACE_REC(avl_ins), +PB_TRACE_REC(walkq1), +PB_TRACE_REC(walkq2), +PB_TRACE_REC(walkq3), +PB_TRACE_REC(delwri_q), +PB_TRACE_REC(delwri_uq), +PB_TRACE_REC(pin), +PB_TRACE_REC(unpin), +PB_TRACE_REC(file_write), +PB_TRACE_REC(external), +PB_TRACE_END + +extern void pb_trace_func(page_buf_t *, int, void *, void *); +#ifdef PAGEBUF_TRACE +# define PB_TRACE(pb, event, misc) \ + pb_trace_func(pb, event, (void *) misc, \ + (void *)__builtin_return_address(0)) +#else +# define PB_TRACE(pb, event, misc) do { } while (0) +#endif + +#endif /* __PAGEBUF_TRACE__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/Makefile linux.22-ac2/fs/xfs/quota/Makefile --- linux.vanilla/fs/xfs/quota/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/Makefile 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,56 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I $(TOPDIR)/fs/xfs + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -g -DDEBUG -DXFSDEBUG + #EXTRA_CFLAGS += -DQUOTADEBUG +endif + +O_TARGET := xfs_quota.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +obj-$(CONFIG_PROC_FS) += xfs_qm_stats.o + +obj-y += xfs_dquot.o \ + xfs_dquot_item.o \ + xfs_trans_dquot.o \ + xfs_qm_syscalls.o \ + xfs_qm_bhv.o \ + xfs_qm.o + +export-objs += xfs_qm.o + +include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_dquot.c linux.22-ac2/fs/xfs/quota/xfs_dquot.c --- linux.vanilla/fs/xfs/quota/xfs_dquot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_dquot.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1588 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + + +/* + LOCK ORDER + + inode lock (ilock) + dquot hash-chain lock (hashlock) + xqm dquot freelist lock (freelistlock + mount's dquot list lock (mplistlock) + user dquot lock - lock ordering among dquots is based on the uid or gid + group dquot lock - similar to udquots. Between the two dquots, the udquot + has to be locked first. + pin lock - the dquot lock must be held to take this lock. + flush lock - ditto. +*/ + +STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *); + +#ifdef DEBUG +dev_t xfs_dqerror_dev = 0; +int xfs_do_dqerror = 0; +int xfs_dqreq_num = 0; +int xfs_dqerror_mod = 33; +#endif + +/* + * Allocate and initialize a dquot. We don't always allocate fresh memory; + * we try to reclaim a free dquot if the number of incore dquots are above + * a threshold. + * The only field inside the core that gets initialized at this point + * is the d_id field. The idea is to fill in the entire q_core + * when we read in the on disk dquot. + */ +xfs_dquot_t * +xfs_qm_dqinit( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type) +{ + xfs_dquot_t *dqp; + boolean_t brandnewdquot; + + brandnewdquot = xfs_qm_dqalloc_incore(&dqp); + dqp->dq_flags = type; + INT_SET(dqp->q_core.d_id, ARCH_CONVERT, id); + dqp->q_mount = mp; + + /* + * No need to re-initialize these if this is a reclaimed dquot. + */ + if (brandnewdquot) { + dqp->dq_flnext = dqp->dq_flprev = dqp; + mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq"); + initnsema(&dqp->q_flock, 1, "fdq"); + sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); + +#ifdef DQUOT_TRACING + dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); + xfs_dqtrace_entry(dqp, "DQINIT"); +#endif + } else { + /* + * Only the q_core portion was zeroed in dqreclaim_one(). + * So, we need to reset others. + */ + dqp->q_nrefs = 0; + dqp->q_blkno = 0; + dqp->MPL_NEXT = dqp->HL_NEXT = NULL; + dqp->HL_PREVP = dqp->MPL_PREVP = NULL; + dqp->q_bufoffset = 0; + dqp->q_fileoffset = 0; + dqp->q_transp = NULL; + dqp->q_gdquot = NULL; + dqp->q_res_bcount = 0; + dqp->q_res_icount = 0; + dqp->q_res_rtbcount = 0; + dqp->q_pincount = 0; + dqp->q_hash = 0; + ASSERT(dqp->dq_flnext == dqp->dq_flprev); + +#ifdef DQUOT_TRACING + ASSERT(dqp->q_trace); + xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); +#endif + } + + /* + * log item gets initialized later + */ + return (dqp); +} + +/* + * This is called to free all the memory associated with a dquot + */ +void +xfs_qm_dqdestroy( + xfs_dquot_t *dqp) +{ + ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); + + mutex_destroy(&dqp->q_qlock); + freesema(&dqp->q_flock); + sv_destroy(&dqp->q_pinwait); + +#ifdef DQUOT_TRACING + if (dqp->q_trace) + ktrace_free(dqp->q_trace); + dqp->q_trace = NULL; +#endif + kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); + atomic_dec(&xfs_Gqm->qm_totaldquots); +} + +/* + * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. + */ +STATIC void +xfs_qm_dqinit_core( + xfs_dqid_t id, + uint type, + xfs_dqblk_t *d) +{ + /* + * Caller has zero'd the entire dquot 'chunk' already. + */ + INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC); + INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION); + INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id); + INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type); +} + + +#ifdef DQUOT_TRACING +/* + * Dquot tracing for debugging. + */ +/* ARGSUSED */ +void +xfs_dqtrace_entry__( + xfs_dquot_t *dqp, + char *func, + void *retaddr, + xfs_inode_t *ip) +{ + xfs_dquot_t *udqp = NULL; + int ino; + + ASSERT(dqp->q_trace); + if (ip) { + ino = ip->i_ino; + udqp = ip->i_udquot; + } + ktrace_enter(dqp->q_trace, + (void *)(__psint_t)DQUOT_KTRACE_ENTRY, + (void *)func, + (void *)(__psint_t)dqp->q_nrefs, + (void *)(__psint_t)dqp->dq_flags, + (void *)(__psint_t)dqp->q_res_bcount, + (void *)(__psint_t)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT), + (void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT), /* 11 */ + (void *)(__psint_t)current_pid(), + (void *)(__psint_t)ino, + (void *)(__psint_t)retaddr, + (void *)(__psint_t)udqp); + return; +} +#endif + + +/* + * Check the limits and timers of a dquot and start or reset timers + * if necessary. + * This gets called even when quota enforcement is OFF, which makes our + * life a little less complicated. (We just don't reject any quota + * reservations in that case, when enforcement is off). + * We also return 0 as the values of the timers in Q_GETQUOTA calls, when + * enforcement's off. + * In contrast, warnings are a little different in that they don't + * 'automatically' get started when limits get exceeded. + */ +void +xfs_qm_adjust_dqtimers( + xfs_mount_t *mp, + xfs_disk_dquot_t *d) +{ + /* + * The dquot had better be locked. We are modifying it here. + */ + + /* + * root's limits are not real limits. + */ + if (INT_ISZERO(d->d_id, ARCH_CONVERT)) + return; + +#ifdef QUOTADEBUG + if (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)) + ASSERT(INT_GET(d->d_blk_softlimit, ARCH_CONVERT) <= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)); + if (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)) + ASSERT(INT_GET(d->d_ino_softlimit, ARCH_CONVERT) <= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)); +#endif + if (INT_ISZERO(d->d_btimer, ARCH_CONVERT)) { + if ((INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) || + (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { + INT_SET(d->d_btimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_BTIMELIMIT(mp)); + } + } else { + if ((INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) && + (INT_ISZERO(d->d_blk_hardlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { + INT_ZERO(d->d_btimer, ARCH_CONVERT); + } + } + + if (INT_ISZERO(d->d_itimer, ARCH_CONVERT)) { + if ((INT_GET(d->d_ino_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) || + (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT) && + (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { + INT_SET(d->d_itimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_ITIMELIMIT(mp)); + } + } else { + if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) && + (INT_ISZERO(d->d_ino_hardlimit, ARCH_CONVERT) || + (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { + INT_ZERO(d->d_itimer, ARCH_CONVERT); + } + } +} + +/* + * Increment or reset warnings of a given dquot. + */ +int +xfs_qm_dqwarn( + xfs_disk_dquot_t *d, + uint flags) +{ + int warned; + + /* + * root's limits are not real limits. + */ + if (INT_ISZERO(d->d_id, ARCH_CONVERT)) + return (0); + + warned = 0; + if (INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && + (INT_GET(d->d_bcount, ARCH_CONVERT) >= + INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) { + if (flags & XFS_QMOPT_DOWARN) { + INT_MOD(d->d_bwarns, ARCH_CONVERT, +1); + warned++; + } + } else { + if (INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || + (INT_GET(d->d_bcount, ARCH_CONVERT) < + INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) { + INT_ZERO(d->d_bwarns, ARCH_CONVERT); + } + } + + if (INT_GET(d->d_ino_softlimit, ARCH_CONVERT) > 0 && + (INT_GET(d->d_icount, ARCH_CONVERT) >= + INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) { + if (flags & XFS_QMOPT_DOWARN) { + INT_MOD(d->d_iwarns, ARCH_CONVERT, +1); + warned++; + } + } else { + if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT)) || + (INT_GET(d->d_icount, ARCH_CONVERT) < + INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) { + INT_ZERO(d->d_iwarns, ARCH_CONVERT); + } + } +#ifdef QUOTADEBUG + if (INT_GET(d->d_iwarns, ARCH_CONVERT)) + cmn_err(CE_DEBUG, + "--------@@Inode warnings running : %Lu >= %Lu", + INT_GET(d->d_icount, ARCH_CONVERT), + INT_GET(d->d_ino_softlimit, ARCH_CONVERT)); + if (INT_GET(d->d_bwarns, ARCH_CONVERT)) + cmn_err(CE_DEBUG, + "--------@@Blks warnings running : %Lu >= %Lu", + INT_GET(d->d_bcount, ARCH_CONVERT), + INT_GET(d->d_blk_softlimit, ARCH_CONVERT)); +#endif + return (warned); +} + + +/* + * initialize a buffer full of dquots and log the whole thing + */ +STATIC void +xfs_qm_init_dquot_blk( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + xfs_buf_t *bp) +{ + xfs_dqblk_t *d; + int curid, i; + + ASSERT(tp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); + + /* + * ID of the first dquot in the block - id's are zero based. + */ + curid = id - (id % XFS_QM_DQPERBLK(mp)); + ASSERT(curid >= 0); + memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp))); + for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++) + xfs_qm_dqinit_core(curid, type, d); + xfs_trans_dquot_buf(tp, bp, + type & XFS_DQ_USER ? + XFS_BLI_UDQUOT_BUF : + XFS_BLI_GDQUOT_BUF); + xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1); +} + + + +/* + * Allocate a block and fill it with dquots. + * This is called when the bmapi finds a hole. + */ +STATIC int +xfs_qm_dqalloc( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *dqp, + xfs_inode_t *quotip, + xfs_fileoff_t offset_fsb, + xfs_buf_t **O_bpp) +{ + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + xfs_bmbt_irec_t map; + int nmaps, error, committed; + xfs_buf_t *bp; + + ASSERT(tp != NULL); + xfs_dqtrace_entry(dqp, "DQALLOC"); + + /* + * Initialize the bmap freelist prior to calling bmapi code. + */ + XFS_BMAP_INIT(&flist, &firstblock); + xfs_ilock(quotip, XFS_ILOCK_EXCL); + /* + * Return if this type of quotas is turned off while we didn't + * have an inode lock + */ + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + return (ESRCH); + } + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to keep the quota + * inode around, we bump the vnode ref count now. + */ + VN_HOLD(XFS_ITOV(quotip)); + + xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); + nmaps = 1; + if ((error = xfs_bmapi(tp, quotip, + offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, + XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, + &firstblock, + XFS_QM_DQALLOC_SPACE_RES(mp), + &map, &nmaps, &flist))) { + goto error0; + } + ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(nmaps == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + /* + * Keep track of the blkno to save a lookup later + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + + /* now we can just get the buffer (there's nothing to read yet) */ + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, + dqp->q_blkno, + XFS_QI_DQCHUNKLEN(mp), + 0); + if (!bp || (error = XFS_BUF_GETERROR(bp))) + goto error1; + /* + * Make a chunk of dquots out of this buffer and log + * the entire thing. + */ + xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT), + dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP), + bp); + + if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) { + goto error1; + } + + *O_bpp = bp; + return 0; + + error1: + xfs_bmap_cancel(&flist); + error0: + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + + return (error); +} + +/* + * Maps a dquot to the buffer containing its on-disk version. + * This returns a ptr to the buffer containing the on-disk dquot + * in the bpp param, and a ptr to the on-disk dquot within that buffer + */ +STATIC int +xfs_qm_dqtobp( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + xfs_disk_dquot_t **O_ddpp, + xfs_buf_t **O_bpp, + uint flags) +{ + xfs_bmbt_irec_t map; + int nmaps, error; + xfs_buf_t *bp; + xfs_inode_t *quotip; + xfs_mount_t *mp; + xfs_disk_dquot_t *ddq; + xfs_dqid_t id; + boolean_t newdquot; + + mp = dqp->q_mount; + id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); + nmaps = 1; + newdquot = B_FALSE; + + /* + * If we don't know where the dquot lives, find out. + */ + if (dqp->q_blkno == (xfs_daddr_t) 0) { + /* We use the id as an index */ + dqp->q_fileoffset = (xfs_fileoff_t) ((uint)id / + XFS_QM_DQPERBLK(mp)); + nmaps = 1; + quotip = XFS_DQ_TO_QIP(dqp); + xfs_ilock(quotip, XFS_ILOCK_SHARED); + /* + * Return if this type of quotas is turned off while we didn't + * have an inode lock + */ + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + return (ESRCH); + } + /* + * Find the block map; no allocations yet + */ + error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, + XFS_DQUOT_CLUSTER_SIZE_FSB, + XFS_BMAPI_METADATA, + NULL, 0, &map, &nmaps, NULL); + + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + if (error) + return (error); + ASSERT(nmaps == 1); + ASSERT(map.br_blockcount == 1); + + /* + * offset of dquot in the (fixed sized) dquot chunk. + */ + dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) * + sizeof(xfs_dqblk_t); + if (map.br_startblock == HOLESTARTBLOCK) { + /* + * We don't allocate unless we're asked to + */ + if (!(flags & XFS_QMOPT_DQALLOC)) + return (ENOENT); + + ASSERT(tp); + if ((error = xfs_qm_dqalloc(tp, mp, dqp, quotip, + dqp->q_fileoffset, &bp))) + return (error); + newdquot = B_TRUE; + } else { + /* + * store the blkno etc so that we don't have to do the + * mapping all the time + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + } + } + ASSERT(dqp->q_blkno != DELAYSTARTBLOCK); + ASSERT(dqp->q_blkno != HOLESTARTBLOCK); + + /* + * Read in the buffer, unless we've just done the allocation + * (in which case we already have the buf). + */ + if (! newdquot) { + xfs_dqtrace_entry(dqp, "DQTOBP READBUF"); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + dqp->q_blkno, + XFS_QI_DQCHUNKLEN(mp), + 0, &bp))) { + return (error); + } + if (error || !bp) + return XFS_ERROR(error); + } + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + /* + * calculate the location of the dquot inside the buffer. + */ + ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); + + /* + * A simple sanity check in case we got a corrupted dquot... + */ + if (xfs_qm_dqcheck(ddq, id, + dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP), + flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), + "dqtobp")) { + if (!(flags & XFS_QMOPT_DQREPAIR)) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EIO); + } + XFS_BUF_BUSY(bp); /* We dirtied this */ + } + + *O_bpp = bp; + *O_ddpp = ddq; + + return (0); +} + + +/* + * Read in the ondisk dquot using dqtobp() then copy it to an incore version, + * and release the buffer immediately. + * + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqread( + xfs_trans_t *tp, + xfs_dqid_t id, + xfs_dquot_t *dqp, /* dquot to get filled in */ + uint flags) +{ + xfs_disk_dquot_t *ddqp; + xfs_buf_t *bp; + int error; + + /* + * get a pointer to the on-disk dquot and the buffer containing it + * dqp already knows its own type (GROUP/USER). + */ + xfs_dqtrace_entry(dqp, "DQREAD"); + if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) { + return (error); + } + + /* copy everything from disk dquot to the incore dquot */ + memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); + ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); + xfs_qm_dquot_logitem_init(dqp); + + /* + * Reservation counters are defined as reservation plus current usage + * to avoid having to add everytime. + */ + dqp->q_res_bcount = INT_GET(ddqp->d_bcount, ARCH_CONVERT); + dqp->q_res_icount = INT_GET(ddqp->d_icount, ARCH_CONVERT); + dqp->q_res_rtbcount = INT_GET(ddqp->d_rtbcount, ARCH_CONVERT); + + /* Mark the buf so that this will stay incore a little longer */ + XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); + + /* + * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) + * So we need to release with xfs_trans_brelse(). + * The strategy here is identical to that of inodes; we lock + * the dquot in xfs_qm_dqget() before making it accessible to + * others. This is because dquots, like inodes, need a good level of + * concurrency, and we don't want to take locks on the entire buffers + * for dquot accesses. + * Note also that the dquot buffer may even be dirty at this point, if + * this particular dquot was repaired. We still aren't afraid to + * brelse it because we have the changes incore. + */ + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + xfs_trans_brelse(tp, bp); + + return (error); +} + + +/* + * allocate an incore dquot from the kernel heap, + * and fill its core with quota information kept on disk. + * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk + * if it wasn't already allocated. + */ +STATIC int +xfs_qm_idtodq( + xfs_mount_t *mp, + xfs_dqid_t id, /* gid or uid, depending on type */ + uint type, /* UDQUOT or GDQUOT */ + uint flags, /* DQALLOC, DQREPAIR */ + xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */ +{ + xfs_dquot_t *dqp; + int error; + xfs_trans_t *tp; + int cancelflags=0; + + dqp = xfs_qm_dqinit(mp, id, type); + tp = NULL; + if (flags & XFS_QMOPT_DQALLOC) { + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); + if ((error = xfs_trans_reserve(tp, + XFS_QM_DQALLOC_SPACE_RES(mp), + XFS_WRITE_LOG_RES(mp) + + BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 + + 128, + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT))) { + cancelflags = 0; + goto error0; + } + cancelflags = XFS_TRANS_RELEASE_LOG_RES; + } + + /* + * Read it from disk; xfs_dqread() takes care of + * all the necessary initialization of dquot's fields (locks, etc) + */ + if ((error = xfs_qm_dqread(tp, id, dqp, flags))) { + /* + * This can happen if quotas got turned off (ESRCH), + * or if the dquot didn't exist on disk and we ask to + * allocate (ENOENT). + */ + xfs_dqtrace_entry(dqp, "DQREAD FAIL"); + cancelflags |= XFS_TRANS_ABORT; + goto error0; + } + if (tp) { + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL))) + goto error1; + } + + *O_dqpp = dqp; + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + return (0); + + error0: + ASSERT(error); + if (tp) + xfs_trans_cancel(tp, cancelflags); + error1: + xfs_qm_dqdestroy(dqp); + *O_dqpp = NULL; + return (error); +} + +/* + * Lookup a dquot in the incore dquot hashtable. We keep two separate + * hashtables for user and group dquots; and, these are global tables + * inside the XQM, not per-filesystem tables. + * The hash chain must be locked by caller, and it is left locked + * on return. Returning dquot is locked. + */ +STATIC int +xfs_qm_dqlookup( + xfs_mount_t *mp, + xfs_dqid_t id, + xfs_dqhash_t *qh, + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + uint flist_locked; + xfs_dquot_t *d; + + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + + flist_locked = B_FALSE; + + /* + * Traverse the hashchain looking for a match + */ + for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { + /* + * We already have the hashlock. We don't need the + * dqlock to look at the id field of the dquot, since the + * id can't be modified without the hashlock anyway. + */ + if (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id && dqp->q_mount == mp) { + xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); + /* + * All in core dquots must be on the dqlist of mp + */ + ASSERT(dqp->MPL_PREVP != NULL); + + xfs_dqlock(dqp); + if (dqp->q_nrefs == 0) { + ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); + if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); + + /* + * We may have raced with dqreclaim_one() + * (and lost). So, flag that we don't + * want the dquot to be reclaimed. + */ + dqp->dq_flags |= XFS_DQ_WANT; + xfs_dqunlock(dqp); + xfs_qm_freelist_lock(xfs_Gqm); + xfs_dqlock(dqp); + dqp->dq_flags &= ~(XFS_DQ_WANT); + } + flist_locked = B_TRUE; + } + + /* + * id couldn't have changed; we had the hashlock all + * along + */ + ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); + + if (flist_locked) { + if (dqp->q_nrefs != 0) { + xfs_qm_freelist_unlock(xfs_Gqm); + flist_locked = B_FALSE; + } else { + /* + * take it off the freelist + */ + xfs_dqtrace_entry(dqp, + "DQLOOKUP: TAKEOFF FL"); + XQM_FREELIST_REMOVE(dqp); + /* xfs_qm_freelist_print(&(xfs_Gqm-> + qm_dqfreelist), + "after removal"); */ + } + } + + /* + * grab a reference + */ + XFS_DQHOLD(dqp); + + if (flist_locked) + xfs_qm_freelist_unlock(xfs_Gqm); + /* + * move the dquot to the front of the hashchain + */ + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + if (dqp->HL_PREVP != &qh->qh_next) { + xfs_dqtrace_entry(dqp, + "DQLOOKUP: HASH MOVETOFRONT"); + if ((d = dqp->HL_NEXT)) + d->HL_PREVP = dqp->HL_PREVP; + *(dqp->HL_PREVP) = d; + d = qh->qh_next; + d->HL_PREVP = &dqp->HL_NEXT; + dqp->HL_NEXT = d; + dqp->HL_PREVP = &qh->qh_next; + qh->qh_next = dqp; + } + xfs_dqtrace_entry(dqp, "LOOKUP END"); + *O_dqpp = dqp; + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + return (0); + } + } + + *O_dqpp = NULL; + ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); + return (1); +} + +/* + * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a + * a locked dquot, doing an allocation (if requested) as needed. + * When both an inode and an id are given, the inode's id takes precedence. + * That is, if the id changes while we don't hold the ilock inside this + * function, the new dquot is returned, not necessarily the one requested + * in the id argument. + */ +int +xfs_qm_dqget( + xfs_mount_t *mp, + xfs_inode_t *ip, /* locked inode (optional) */ + xfs_dqid_t id, /* gid or uid, depending on type */ + uint type, /* UDQUOT or GDQUOT */ + uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ + xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ +{ + xfs_dquot_t *dqp; + xfs_dqhash_t *h; + uint version; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || + (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { + return (ESRCH); + } + h = XFS_DQ_HASH(mp, id, type); + +#ifdef DEBUG + if (xfs_do_dqerror) { + if ((xfs_dqerror_dev == mp->m_dev) && + (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { + cmn_err(CE_DEBUG, "Returning error in dqget"); + return (EIO); + } + } +#endif + + again: + +#ifdef DEBUG + ASSERT(type == XFS_DQ_USER || type == XFS_DQ_GROUP); + if (ip) { + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + if (type == XFS_DQ_USER) + ASSERT(ip->i_udquot == NULL); + else + ASSERT(ip->i_gdquot == NULL); + } +#endif + XFS_DQ_HASH_LOCK(h); + + /* + * Look in the cache (hashtable). + * The chain is kept locked during lookup. + */ + if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { + XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); + /* + * The dquot was found, moved to the front of the chain, + * taken off the freelist if it was on it, and locked + * at this point. Just unlock the hashchain and return. + */ + ASSERT(*O_dqpp); + ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); + XFS_DQ_HASH_UNLOCK(h); + xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); + return (0); /* success */ + } + XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); + + /* + * Dquot cache miss. We don't want to keep the inode lock across + * a (potential) disk read. Also we don't want to deal with the lock + * ordering between quotainode and this inode. OTOH, dropping the inode + * lock here means dealing with a chown that can happen before + * we re-acquire the lock. + */ + if (ip) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * Save the hashchain version stamp, and unlock the chain, so that + * we don't keep the lock across a disk read + */ + version = h->qh_version; + XFS_DQ_HASH_UNLOCK(h); + + /* + * Allocate the dquot on the kernel heap, and read the ondisk + * portion off the disk. Also, do all the necessary initialization + * This can return ENOENT if dquot didn't exist on disk and we didn't + * ask it to allocate; ESRCH if quotas got turned off suddenly. + */ + if ((error = xfs_qm_idtodq(mp, id, type, + flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| + XFS_QMOPT_DOWARN), + &dqp))) { + if (ip) + xfs_ilock(ip, XFS_ILOCK_EXCL); + return (error); + } + + /* + * See if this is mount code calling to look at the overall quota limits + * which are stored in the id == 0 user or group's dquot. + * Since we may not have done a quotacheck by this point, just return + * the dquot without attaching it to any hashtables, lists, etc, or even + * taking a reference. + * The caller must dqdestroy this once done. + */ + if (flags & XFS_QMOPT_DQSUSER) { + ASSERT(id == 0); + ASSERT(! ip); + goto dqret; + } + + /* + * Dquot lock comes after hashlock in the lock ordering + */ + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + if (ip) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (! XFS_IS_DQTYPE_ON(mp, type)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } + /* + * A dquot could be attached to this inode by now, since + * we had dropped the ilock. + */ + if (type == XFS_DQ_USER) { + if (ip->i_udquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_udquot; + xfs_dqlock(dqp); + goto dqret; + } + } else { + if (ip->i_gdquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_gdquot; + xfs_dqlock(dqp); + goto dqret; + } + } + } + + /* + * Hashlock comes after ilock in lock order + */ + XFS_DQ_HASH_LOCK(h); + if (version != h->qh_version) { + xfs_dquot_t *tmpdqp; + /* + * Now, see if somebody else put the dquot in the + * hashtable before us. This can happen because we didn't + * keep the hashchain lock. We don't have to worry about + * lock order between the two dquots here since dqp isn't + * on any findable lists yet. + */ + if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { + /* + * Duplicate found. Just throw away the new dquot + * and start over. + */ + xfs_qm_dqput(tmpdqp); + XFS_DQ_HASH_UNLOCK(h); + xfs_qm_dqdestroy(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); + goto again; + } + } + + /* + * Put the dquot at the beginning of the hash-chain and mp's list + * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. + */ + ASSERT(XFS_DQ_IS_HASH_LOCKED(h)); + dqp->q_hash = h; + XQM_HASHLIST_INSERT(h, dqp); + + /* + * Attach this dquot to this filesystem's list of all dquots, + * kept inside the mount structure in m_quotainfo field + */ + xfs_qm_mplist_lock(mp); + + /* + * We return a locked dquot to the caller, with a reference taken + */ + xfs_dqlock(dqp); + dqp->q_nrefs = 1; + + XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); + + xfs_qm_mplist_unlock(mp); + XFS_DQ_HASH_UNLOCK(h); + dqret: + ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); + xfs_dqtrace_entry(dqp, "DQGET DONE"); + *O_dqpp = dqp; + return (0); +} + + +/* + * Release a reference to the dquot (decrement ref-count) + * and unlock it. If there is a group quota attached to this + * dquot, carefully release that too without tripping over + * deadlocks'n'stuff. + */ +void +xfs_qm_dqput( + xfs_dquot_t *dqp) +{ + xfs_dquot_t *gdqp; + + ASSERT(dqp->q_nrefs > 0); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "DQPUT"); + + if (dqp->q_nrefs != 1) { + dqp->q_nrefs--; + xfs_dqunlock(dqp); + return; + } + + /* + * drop the dqlock and acquire the freelist and dqlock + * in the right order; but try to get it out-of-order first + */ + if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); + xfs_dqunlock(dqp); + xfs_qm_freelist_lock(xfs_Gqm); + xfs_dqlock(dqp); + } + + while (1) { + gdqp = NULL; + + /* We can't depend on nrefs being == 1 here */ + if (--dqp->q_nrefs == 0) { + xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); + /* + * insert at end of the freelist. + */ + XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp); + + /* + * If we just added a udquot to the freelist, then + * we want to release the gdquot reference that + * it (probably) has. Otherwise it'll keep the + * gdquot from getting reclaimed. + */ + if ((gdqp = dqp->q_gdquot)) { + /* + * Avoid a recursive dqput call + */ + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + + /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist), + "@@@@@++ Free list (after append) @@@@@+"); + */ + } + xfs_dqunlock(dqp); + + /* + * If we had a group quota inside the user quota as a hint, + * release it now. + */ + if (! gdqp) + break; + dqp = gdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); +} + +/* + * Release a dquot. Flush it if dirty, then dqput() it. + * dquot must not be locked. + */ +void +xfs_qm_dqrele( + xfs_dquot_t *dqp) +{ + ASSERT(dqp); + xfs_dqtrace_entry(dqp, "DQRELE"); + + xfs_dqlock(dqp); + /* + * We don't care to flush it if the dquot is dirty here. + * That will create stutters that we want to avoid. + * Instead we do a delayed write when we try to reclaim + * a dirty dquot. Also xfs_sync will take part of the burden... + */ + xfs_qm_dqput(dqp); +} + + +/* + * Write a modified dquot to disk. + * The dquot must be locked and the flush lock too taken by caller. + * The flush lock will not be unlocked until the dquot reaches the disk, + * but the dquot is free to be unlocked and modified by the caller + * in the interim. Dquot is still locked on return. This behavior is + * identical to that of inodes. + */ +int +xfs_qm_dqflush( + xfs_dquot_t *dqp, + uint flags) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + xfs_disk_dquot_t *ddqp; + int error; + SPLDECL(s); + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "DQFLUSH"); + + /* + * If not dirty, nada. + */ + if (!XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqfunlock(dqp); + return (0); + } + + /* + * Cant flush a pinned dquot. Wait for it. + */ + xfs_qm_dqunpin_wait(dqp); + + /* + * This may have been unpinned because the filesystem is shutting + * down forcibly. If that's the case we must not write this dquot + * to disk, because the log record didn't make it to disk! + */ + if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { + dqp->dq_flags &= ~(XFS_DQ_DIRTY); + xfs_dqfunlock(dqp); + return XFS_ERROR(EIO); + } + + /* + * Get the buffer containing the on-disk dquot + * We don't need a transaction envelope because we know that the + * the ondisk-dquot has already been allocated for. + */ + if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { + xfs_dqtrace_entry(dqp, "DQTOBP FAIL"); + ASSERT(error != ENOENT); + /* + * Quotas could have gotten turned off (ESRCH) + */ + xfs_dqfunlock(dqp); + return (error); + } + + if (xfs_qm_dqcheck(&dqp->q_core, INT_GET(ddqp->d_id, ARCH_CONVERT), 0, XFS_QMOPT_DOWARN, + "dqflush (incore copy)")) { + xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE); + return XFS_ERROR(EIO); + } + + /* This is the only portion of data that needs to persist */ + memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); + + /* + * Clear the dirty field and remember the flush lsn for later use. + */ + dqp->dq_flags &= ~(XFS_DQ_DIRTY); + mp = dqp->q_mount; + + /* lsn is 64 bits */ + AIL_LOCK(mp, s); + dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; + AIL_UNLOCK(mp, s); + + /* + * Attach an iodone routine so that we can remove this dquot from the + * AIL and release the flush lock once the dquot is synced to disk. + */ + xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *)) + xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item)); + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for too long. + */ + if (XFS_BUF_ISPINNED(bp)) { + xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + if (flags & XFS_QMOPT_DELWRI) { + xfs_bdwrite(mp, bp); + } else if (flags & XFS_QMOPT_ASYNC) { + xfs_bawrite(mp, bp); + } else { + error = xfs_bwrite(mp, bp); + } + xfs_dqtrace_entry(dqp, "DQFLUSH END"); + /* + * dqp is still locked, but caller is free to unlock it now. + */ + return (error); + +} + +/* + * This is the dquot flushing I/O completion routine. It is called + * from interrupt level when the buffer containing the dquot is + * flushed to disk. It is responsible for removing the dquot logitem + * from the AIL if it has not been re-logged, and unlocking the dquot's + * flush lock. This behavior is very similar to that of inodes.. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_dqflush_done( + xfs_buf_t *bp, + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + SPLDECL(s); + + dqp = qip->qli_dquot; + + /* + * We only want to pull the item from the AIL if its + * location in the log has not changed since we started the flush. + * Thus, we only bother if the dquot's lsn has + * not changed. First we check the lsn outside the lock + * since it's cheaper, and then we recheck while + * holding the lock before removing the dquot from the AIL. + */ + if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && + qip->qli_item.li_lsn == qip->qli_flush_lsn) { + + AIL_LOCK(dqp->q_mount, s); + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + if (qip->qli_item.li_lsn == qip->qli_flush_lsn) + xfs_trans_delete_ail(dqp->q_mount, + (xfs_log_item_t*)qip, s); + else + AIL_UNLOCK(dqp->q_mount, s); + } + + /* + * Release the dq's flush lock since we're done with it. + */ + xfs_dqfunlock(dqp); +} + + +int +xfs_qm_dqflock_nowait( + xfs_dquot_t *dqp) +{ + int locked; + + locked = cpsema(&((dqp)->q_flock)); + + /* XXX ifdef these out */ + if (locked) + (dqp)->dq_flags |= XFS_DQ_FLOCKED; + return (locked); +} + + +int +xfs_qm_dqlock_nowait( + xfs_dquot_t *dqp) +{ + return (mutex_trylock(&((dqp)->q_qlock))); +} + +void +xfs_dqlock( + xfs_dquot_t *dqp) +{ + mutex_lock(&(dqp->q_qlock), PINOD); +} + +void +xfs_dqunlock( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); + if (dqp->q_logitem.qli_dquot == dqp) { + /* Once was dqp->q_mount, but might just have been cleared */ + xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, + (xfs_log_item_t*)&(dqp->q_logitem)); + } +} + + +void +xfs_dqunlock_nonotify( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); +} + +void +xfs_dqlock2( + xfs_dquot_t *d1, + xfs_dquot_t *d2) +{ + if (d1 && d2) { + ASSERT(d1 != d2); + if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) { + xfs_dqlock(d2); + xfs_dqlock(d1); + } else { + xfs_dqlock(d1); + xfs_dqlock(d2); + } + } else { + if (d1) { + xfs_dqlock(d1); + } else if (d2) { + xfs_dqlock(d2); + } + } +} + + +/* + * Take a dquot out of the mount's dqlist as well as the hashlist. + * This is called via unmount as well as quotaoff, and the purge + * will always succeed unless there are soft (temp) references + * outstanding. + * + * This returns 0 if it was purged, 1 if it wasn't. It's not an error code + * that we're returning! XXXsup - not cool. + */ +/* ARGSUSED */ +int +xfs_qm_dqpurge( + xfs_dquot_t *dqp, + uint flags) +{ + xfs_dqhash_t *thishash; + xfs_mount_t *mp; + + mp = dqp->q_mount; + + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); + + xfs_dqlock(dqp); + /* + * We really can't afford to purge a dquot that is + * referenced, because these are hard refs. + * It shouldn't happen in general because we went thru _all_ inodes in + * dqrele_all_inodes before calling this and didn't let the mountlock go. + * However it is possible that we have dquots with temporary + * references that are not attached to an inode. e.g. see xfs_setattr(). + */ + if (dqp->q_nrefs != 0) { + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + return (1); + } + + ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + + /* + * If we're turning off quotas, we have to make sure that, for + * example, we don't delete quota disk blocks while dquots are + * in the process of getting written to those disk blocks. + * This dquot might well be on AIL, and we can't leave it there + * if we're turning off quotas. Basically, we need this flush + * lock, and are willing to block on it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * Block on the flush lock after nudging dquot buffer, + * if it is incore. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + + /* + * XXXIf we're turning this type of quotas off, we don't care + * about the dirty metadata sitting in this dquot. OTOH, if + * we're unmounting, we do care, so we flush it and wait. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); + /* dqflush unlocks dqflock */ + /* + * Given that dqpurge is a very rare occurrence, it is OK + * that we're holding the hashlist and mplist locks + * across the disk write. But, ... XXXsup + * + * We don't care about getting disk errors here. We need + * to purge this dquot anyway, so we go ahead regardless. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); + xfs_dqflock(dqp); + } + ASSERT(dqp->q_pincount == 0); + ASSERT(XFS_FORCED_SHUTDOWN(mp) || + !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); + + thishash = dqp->q_hash; + XQM_HASHLIST_REMOVE(thishash, dqp); + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); + /* + * XXX Move this to the front of the freelist, if we can get the + * freelist lock. + */ + ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + + dqp->q_mount = NULL;; + dqp->q_hash = NULL; + dqp->dq_flags = XFS_DQ_INACTIVE; + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(thishash); + return (0); +} + + +#ifdef QUOTADEBUG +void +xfs_qm_dqprint(xfs_dquot_t *dqp) +{ + cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------"); + cmn_err(CE_DEBUG, "---- dquotID = %d", + (int)INT_GET(dqp->q_core.d_id, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- type = %s", + XFS_QM_ISUDQ(dqp) ? "USR" : "GRP"); + cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount); + cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno); + cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset); + cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT), + (int) INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)", + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + (int)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- btimer = %d", + (int)INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---- itimer = %d", + (int)INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)); + cmn_err(CE_DEBUG, "---------------------------"); +} +#endif + +/* + * Give the buffer a little push if it is incore and + * wait on the flush lock. + */ +void +xfs_qm_dqflock_pushbuf_wait( + xfs_dquot_t *dqp) +{ + xfs_buf_t *bp; + + /* + * Check to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, + XFS_QI_DQCHUNKLEN(dqp->q_mount), + XFS_INCORE_TRYLOCK); + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(dqp->q_mount, + (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + xfs_bawrite(dqp->q_mount, bp); + } else { + xfs_buf_relse(bp); + } + } + xfs_dqflock(dqp); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_dquot.h linux.22-ac2/fs/xfs/quota/xfs_dquot.h --- linux.vanilla/fs/xfs/quota/xfs_dquot.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_dquot.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DQUOT_H__ +#define __XFS_DQUOT_H__ + +/* + * Dquots are structures that hold quota information about a user or a group, + * much like inodes are for files. In fact, dquots share many characteristics + * with inodes. However, dquots can also be a centralized resource, relative + * to a collection of inodes. In this respect, dquots share some characteristics + * of the superblock. + * XFS dquots exploit both those in its algorithms. They make every attempt + * to not be a bottleneck when quotas are on and have minimal impact, if any, + * when quotas are off. + */ + +/* + * The hash chain headers (hash buckets) + */ +typedef struct xfs_dqhash { + struct xfs_dquot *qh_next; + mutex_t qh_lock; + uint qh_version; /* ever increasing version */ + uint qh_nelems; /* number of dquots on the list */ +} xfs_dqhash_t; + +typedef struct xfs_dqlink { + struct xfs_dquot *ql_next; /* forward link */ + struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */ +} xfs_dqlink_t; + +struct xfs_mount; +struct xfs_trans; + +/* + * This is the marker which is designed to occupy the first few + * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers + * must come first. + * This serves as the marker ("sentinel") when we have to restart list + * iterations because of locking considerations. + */ +typedef struct xfs_dqmarker { + struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */ + struct xfs_dquot*dqm_flprev; + xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */ + xfs_dqlink_t dqm_hashlist; /* link to the hash chain */ + uint dqm_flags; /* various flags (XFS_DQ_*) */ +} xfs_dqmarker_t; + +/* + * The incore dquot structure + */ +typedef struct xfs_dquot { + xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */ + xfs_dqhash_t *q_hash; /* the hashchain header */ + struct xfs_mount*q_mount; /* filesystem this relates to */ + struct xfs_trans*q_transp; /* trans this belongs to currently */ + uint q_nrefs; /* # active refs from inodes */ + xfs_daddr_t q_blkno; /* blkno of dquot buffer */ + int q_bufoffset; /* off of dq in buffer (# dquots) */ + xfs_fileoff_t q_fileoffset; /* offset in quotas file */ + + struct xfs_dquot*q_gdquot; /* group dquot, hint only */ + xfs_disk_dquot_t q_core; /* actual usage & quotas */ + xfs_dq_logitem_t q_logitem; /* dquot log item */ + xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ + xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ + xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ + mutex_t q_qlock; /* quota lock */ + sema_t q_flock; /* flush lock */ + uint q_pincount; /* pin count for this dquot */ + sv_t q_pinwait; /* sync var for pinning */ +#ifdef DQUOT_TRACING + struct ktrace *q_trace; /* trace header structure */ +#endif +} xfs_dquot_t; + + +#define dq_flnext q_lists.dqm_flnext +#define dq_flprev q_lists.dqm_flprev +#define dq_mplist q_lists.dqm_mplist +#define dq_hashlist q_lists.dqm_hashlist +#define dq_flags q_lists.dqm_flags + +#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) + +/* + * Quota Accounting flags + */ +#define XFS_ALL_QUOTA_ACCT (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT) +#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD) +#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD) +#define XFS_ALL_QUOTA_ACTV (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE) +#define XFS_ALL_QUOTA_ACCT_ENFD (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ + XFS_GQUOTA_ACCT|XFS_GQUOTA_ENFD) + +#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) +#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) +#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) + +/* + * Quota Limit Enforcement flags + */ +#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD) +#define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) +#define XFS_IS_GQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_GQUOTA_ENFD) + +#ifdef DEBUG +static inline int +XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) +{ + if (mutex_trylock(&dqp->q_qlock)) { + mutex_unlock(&dqp->q_qlock); + return 0; + } + return 1; +} +#endif + + +/* + * The following three routines simply manage the q_flock + * semaphore embedded in the dquot. This semaphore synchronizes + * processes attempting to flush the in-core dquot back to disk. + */ +#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ + (dqp)->dq_flags |= XFS_DQ_FLOCKED; } +#define xfs_dqfunlock(dqp) { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \ + vsema(&((dqp)->q_flock)); \ + (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } + +#define XFS_DQ_PINLOCK(dqp) mutex_spinlock( \ + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock)) +#define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \ + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s) + +#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0) +#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) +#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) +#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) +#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) +#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ + XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ + XFS_DQ_TO_QINF(dqp)->qi_gquotaip) + +#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ + (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ + (XFS_IS_GQUOTA_ON((d)->q_mount)))) +#ifdef DQUOT_TRACING +/* + * Dquot Tracing stuff. + */ +#define DQUOT_TRACE_SIZE 64 +#define DQUOT_KTRACE_ENTRY 1 + +#define xfs_dqtrace_entry_ino(a,b,ip) \ +xfs_dqtrace_entry__((a), (b), (void*)__return_address, (ip)) +#define xfs_dqtrace_entry(a,b) \ +xfs_dqtrace_entry__((a), (b), (void*)__return_address, NULL) +extern void xfs_dqtrace_entry__(xfs_dquot_t *dqp, char *func, + void *, xfs_inode_t *); +#else +#define xfs_dqtrace_entry(a,b) +#define xfs_dqtrace_entry_ino(a,b,ip) +#endif +#ifdef QUOTADEBUG +extern void xfs_qm_dqprint(xfs_dquot_t *); +#else +#define xfs_qm_dqprint(a) +#endif + +extern void xfs_qm_dqdestroy(xfs_dquot_t *); +extern int xfs_qm_dqflush(xfs_dquot_t *, uint); +extern int xfs_qm_dqpurge(xfs_dquot_t *, uint); +extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); +extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); +extern int xfs_qm_dqflock_nowait(xfs_dquot_t *); +extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); +extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, + xfs_disk_dquot_t *); +extern int xfs_qm_dqwarn(xfs_disk_dquot_t *, uint); +extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, + xfs_dqid_t, uint, uint, xfs_dquot_t **); +extern void xfs_qm_dqput(xfs_dquot_t *); +extern void xfs_qm_dqrele(xfs_dquot_t *); +extern void xfs_dqlock(xfs_dquot_t *); +extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); +extern void xfs_dqunlock(xfs_dquot_t *); +extern void xfs_dqunlock_nonotify(xfs_dquot_t *); + +#endif /* __XFS_DQUOT_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_dquot_item.c linux.22-ac2/fs/xfs/quota/xfs_dquot_item.c --- linux.vanilla/fs/xfs/quota/xfs_dquot_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_dquot_item.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,715 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + + +/* + * returns the number of iovecs needed to log the given dquot item. + */ +/* ARGSUSED */ +STATIC uint +xfs_qm_dquot_logitem_size( + xfs_dq_logitem_t *logitem) +{ + /* + * we need only two iovecs, one for the format, one for the real thing + */ + return (2); +} + +/* + * fills in the vector of log iovecs for the given dquot log item. + */ +STATIC void +xfs_qm_dquot_logitem_format( + xfs_dq_logitem_t *logitem, + xfs_log_iovec_t *logvec) +{ + ASSERT(logitem); + ASSERT(logitem->qli_dquot); + + logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; + logvec->i_len = sizeof(xfs_dq_logformat_t); + logvec++; + logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; + logvec->i_len = sizeof(xfs_disk_dquot_t); + + ASSERT(2 == logitem->qli_item.li_desc->lid_size); + logitem->qli_format.qlf_size = 2; + +} + +/* + * Increment the pin count of the given dquot. + * This value is protected by pinlock spinlock in the xQM structure. + */ +STATIC void +xfs_qm_dquot_logitem_pin( + xfs_dq_logitem_t *logitem) +{ + unsigned long s; + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + s = XFS_DQ_PINLOCK(dqp); + dqp->q_pincount++; + XFS_DQ_PINUNLOCK(dqp, s); +} + +/* + * Decrement the pin count of the given dquot, and wake up + * anyone in xfs_dqwait_unpin() if the count goes to 0. The + * dquot must have been previously pinned with a call to xfs_dqpin(). + */ +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_unpin( + xfs_dq_logitem_t *logitem, + int stale) +{ + unsigned long s; + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + ASSERT(dqp->q_pincount > 0); + s = XFS_DQ_PINLOCK(dqp); + dqp->q_pincount--; + if (dqp->q_pincount == 0) { + sv_broadcast(&dqp->q_pinwait); + } + XFS_DQ_PINUNLOCK(dqp, s); +} + +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_unpin_remove( + xfs_dq_logitem_t *logitem, + xfs_trans_t *tp) +{ + xfs_qm_dquot_logitem_unpin(logitem, 0); +} + +/* + * Given the logitem, this writes the corresponding dquot entry to disk + * asynchronously. This is called with the dquot entry securely locked; + * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot + * at the end. + */ +STATIC void +xfs_qm_dquot_logitem_push( + xfs_dq_logitem_t *logitem) +{ + xfs_dquot_t *dqp; + + dqp = logitem->qli_dquot; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); + + /* + * Since we were able to lock the dquot's flush lock and + * we found it on the AIL, the dquot must be dirty. This + * is because the dquot is removed from the AIL while still + * holding the flush lock in xfs_dqflush_done(). Thus, if + * we found it in the AIL and were able to obtain the flush + * lock without sleeping, then there must not have been + * anyone in the process of flushing the dquot. + */ + xfs_qm_dqflush(dqp, XFS_B_DELWRI); + xfs_dqunlock(dqp); +} + +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_dquot_logitem_committed( + xfs_dq_logitem_t *l, + xfs_lsn_t lsn) +{ + /* + * We always re-log the entire dquot when it becomes dirty, + * so, the latest copy _is_ the only one that matters. + */ + return (lsn); +} + + +/* + * This is called to wait for the given dquot to be unpinned. + * Most of these pin/unpin routines are plagiarized from inode code. + */ +void +xfs_qm_dqunpin_wait( + xfs_dquot_t *dqp) +{ + SPLDECL(s); + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (dqp->q_pincount == 0) { + return; + } + + /* + * Give the log a push so we don't wait here too long. + */ + xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); + s = XFS_DQ_PINLOCK(dqp); + if (dqp->q_pincount == 0) { + XFS_DQ_PINUNLOCK(dqp, s); + return; + } + sv_wait(&(dqp->q_pinwait), PINOD, + &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s); +} + +/* + * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that + * the dquot is locked by us, but the flush lock isn't. So, here we are + * going to see if the relevant dquot buffer is incore, waiting on DELWRI. + * If so, we want to push it out to help us take this item off the AIL as soon + * as possible. + * + * We must not be holding the AIL_LOCK at this point. Calling incore() to + * search the buffercache can be a time consuming thing, and AIL_LOCK is a + * spinlock. + */ +STATIC void +xfs_qm_dquot_logitem_pushbuf( + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + xfs_mount_t *mp; + xfs_buf_t *bp; + uint dopush; + + dqp = qip->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * The qli_pushbuf_flag keeps others from + * trying to duplicate our effort. + */ + ASSERT(qip->qli_pushbuf_flag != 0); + ASSERT(qip->qli_push_owner == get_thread_id()); + + /* + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. + */ + if ((valusema(&(dqp->q_flock)) > 0) || + ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + return; + } + mp = dqp->q_mount; + bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, + XFS_QI_DQCHUNKLEN(mp), + XFS_INCORE_TRYLOCK); + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && + (valusema(&(dqp->q_flock)) <= 0)); + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + if (dopush) { +#ifdef XFSRACEDEBUG + delay_for_intr(); + delay(300); +#endif + xfs_bawrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); + xfs_buf_relse(bp); + } + return; + } + + qip->qli_pushbuf_flag = 0; + xfs_dqunlock(dqp); +} + +/* + * This is called to attempt to lock the dquot associated with this + * dquot log item. Don't sleep on the dquot lock or the flush lock. + * If the flush lock is already held, indicating that the dquot has + * been or is in the process of being flushed, then see if we can + * find the dquot's buffer in the buffer cache without sleeping. If + * we can and it is marked delayed write, then we want to send it out. + * We delay doing so until the push routine, though, to avoid sleeping + * in any device strategy routines. + */ +STATIC uint +xfs_qm_dquot_logitem_trylock( + xfs_dq_logitem_t *qip) +{ + xfs_dquot_t *dqp; + uint retval; + + dqp = qip->qli_dquot; + if (dqp->q_pincount > 0) + return (XFS_ITEM_PINNED); + + if (! xfs_qm_dqlock_nowait(dqp)) + return (XFS_ITEM_LOCKED); + + retval = XFS_ITEM_SUCCESS; + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * The dquot is already being flushed. It may have been + * flushed delayed write, however, and we don't want to + * get stuck waiting for that to complete. So, we want to check + * to see if we can lock the dquot's buffer without sleeping. + * If we can and it is marked for delayed write, then we + * hold it and send it out from the push routine. We don't + * want to do that now since we might sleep in the device + * strategy routine. We also don't want to grab the buffer lock + * here because we'd like not to call into the buffer cache + * while holding the AIL_LOCK. + * Make sure to only return PUSHBUF if we set pushbuf_flag + * ourselves. If someone else is doing it then we don't + * want to go to the push routine and duplicate their efforts. + */ + if (qip->qli_pushbuf_flag == 0) { + qip->qli_pushbuf_flag = 1; + ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno); +#ifdef DEBUG + qip->qli_push_owner = get_thread_id(); +#endif + /* + * The dquot is left locked. + */ + retval = XFS_ITEM_PUSHBUF; + } else { + retval = XFS_ITEM_FLUSHING; + xfs_dqunlock_nonotify(dqp); + } + } + + ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL); + return (retval); +} + + +/* + * Unlock the dquot associated with the log item. + * Clear the fields of the dquot and dquot log item that + * are specific to the current transaction. If the + * hold flags is set, do not unlock the dquot. + */ +STATIC void +xfs_qm_dquot_logitem_unlock( + xfs_dq_logitem_t *ql) +{ + xfs_dquot_t *dqp; + + ASSERT(ql != NULL); + dqp = ql->qli_dquot; + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * Clear the transaction pointer in the dquot + */ + dqp->q_transp = NULL; + + /* + * dquots are never 'held' from getting unlocked at the end of + * a transaction. Their locking and unlocking is hidden inside the + * transaction layer, within trans_commit. Hence, no LI_HOLD flag + * for the logitem. + */ + xfs_dqunlock(dqp); +} + + +/* + * The transaction with the dquot locked has aborted. The dquot + * must not be dirty within the transaction. We simply unlock just + * as if the transaction had been cancelled. + */ +STATIC void +xfs_qm_dquot_logitem_abort( + xfs_dq_logitem_t *ql) +{ + xfs_qm_dquot_logitem_unlock(ql); +} + +/* + * this needs to stamp an lsn into the dquot, I think. + * rpc's that look at user dquot's would then have to + * push on the dependency recorded in the dquot + */ +/* ARGSUSED */ +STATIC void +xfs_qm_dquot_logitem_committing( + xfs_dq_logitem_t *l, + xfs_lsn_t lsn) +{ + return; +} + + +/* + * This is the ops vector for dquots + */ +struct xfs_item_ops xfs_dquot_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_dquot_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int)) + xfs_qm_dquot_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_qm_dquot_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*)) + xfs_qm_dquot_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_dquot_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_abort, + .iop_pushbuf = (void(*)(xfs_log_item_t*)) + xfs_qm_dquot_logitem_pushbuf, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_dquot_logitem_committing +}; + +/* + * Initialize the dquot log item for a newly allocated dquot. + * The dquot isn't locked at this point, but it isn't on any of the lists + * either, so we don't care. + */ +void +xfs_qm_dquot_logitem_init( + struct xfs_dquot *dqp) +{ + xfs_dq_logitem_t *lp; + lp = &dqp->q_logitem; + + lp->qli_item.li_type = XFS_LI_DQUOT; + lp->qli_item.li_ops = &xfs_dquot_item_ops; + lp->qli_item.li_mountp = dqp->q_mount; + lp->qli_dquot = dqp; + lp->qli_format.qlf_type = XFS_LI_DQUOT; + lp->qli_format.qlf_id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); + lp->qli_format.qlf_blkno = dqp->q_blkno; + lp->qli_format.qlf_len = 1; + /* + * This is just the offset of this dquot within its buffer + * (which is currently 1 FSB and probably won't change). + * Hence 32 bits for this offset should be just fine. + * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) + * here, and recompute it at recovery time. + */ + lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; +} + +/*------------------ QUOTAOFF LOG ITEMS -------------------*/ + +/* + * This returns the number of iovecs needed to log the given quotaoff item. + * We only need 1 iovec for an quotaoff item. It just logs the + * quotaoff_log_format structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf) +{ + return (1); +} + +/* + * This is called to fill in the vector of log iovecs for the + * given quotaoff log item. We use only 1 iovec, and we point that + * at the quotaoff_log_format structure embedded in the quotaoff item. + * It is at this point that we assert that all of the extent + * slots in the quotaoff item have been filled. + */ +STATIC void +xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf, + xfs_log_iovec_t *log_vector) +{ + ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF); + + log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); + log_vector->i_len = sizeof(xfs_qoff_logitem_t); + qf->qql_format.qf_size = 1; +} + + +/* + * Pinning has no meaning for an quotaoff item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf) +{ + return; +} + + +/* + * Since pinning has no meaning for an quotaoff item, unpinning does + * not either. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale) +{ + return; +} + +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp) +{ + return; +} + +/* + * Quotaoff items have no locking, so just return success. + */ +/*ARGSUSED*/ +STATIC uint +xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf) +{ + return XFS_ITEM_LOCKED; +} + +/* + * Quotaoff items have no locking or pushing, so return failure + * so that the caller doesn't bother with us. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf) +{ + return; +} + +/* + * The quotaoff-start-item is logged only once and cannot be moved in the log, + * so simply return the lsn at which it's been logged. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn) +{ + return (lsn); +} + +/* + * The transaction of which this QUOTAOFF is a part has been aborted. + * Just clean up after ourselves. + * Shouldn't this never happen in the case of qoffend logitems? XXX + */ +STATIC void +xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t *qf) +{ + kmem_free(qf, sizeof(xfs_qoff_logitem_t)); +} + +/* + * There isn't much you can do to push on an quotaoff item. It is simply + * stuck waiting for the log to be flushed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf) +{ + return; +} + + +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_qm_qoffend_logitem_committed( + xfs_qoff_logitem_t *qfe, + xfs_lsn_t lsn) +{ + xfs_qoff_logitem_t *qfs; + SPLDECL(s); + + qfs = qfe->qql_start_lip; + AIL_LOCK(qfs->qql_item.li_mountp,s); + /* + * Delete the qoff-start logitem from the AIL. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s); + kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); + kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); + return (xfs_lsn_t)-1; +} + +/* + * XXX rcc - don't know quite what to do with this. I think we can + * just ignore it. The only time that isn't the case is if we allow + * the client to somehow see that quotas have been turned off in which + * we can't allow that to get back until the quotaoff hits the disk. + * So how would that happen? Also, do we need different routines for + * quotaoff start and quotaoff end? I suspect the answer is yes but + * to be sure, I need to look at the recovery code and see how quota off + * recovery is handled (do we roll forward or back or do something else). + * If we roll forwards or backwards, then we need two separate routines, + * one that does nothing and one that stamps in the lsn that matters + * (truly makes the quotaoff irrevocable). If we do something else, + * then maybe we don't need two. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) +{ + return; +} + +/* ARGSUSED */ +STATIC void +xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) +{ + return; +} + +struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_qoff_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t* ,int)) + xfs_qm_qoff_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) + xfs_qm_qoff_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoffend_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoffend_logitem_committing +}; + +/* + * This is the ops vector shared by all quotaoff-start log items. + */ +struct xfs_item_ops xfs_qm_qoff_logitem_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_qm_qoff_logitem_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int)) + xfs_qm_qoff_logitem_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) + xfs_qm_qoff_logitem_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoff_logitem_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_qm_qoff_logitem_committing +}; + +/* + * Allocate and initialize an quotaoff item of the correct quota type(s). + */ +xfs_qoff_logitem_t * +xfs_qm_qoff_logitem_init( + struct xfs_mount *mp, + xfs_qoff_logitem_t *start, + uint flags) +{ + xfs_qoff_logitem_t *qf; + + qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP); + + qf->qql_item.li_type = XFS_LI_QUOTAOFF; + if (start) + qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops; + else + qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops; + qf->qql_item.li_mountp = mp; + qf->qql_format.qf_type = XFS_LI_QUOTAOFF; + qf->qql_format.qf_flags = flags; + qf->qql_start_lip = start; + return (qf); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_dquot_item.h linux.22-ac2/fs/xfs/quota/xfs_dquot_item.h --- linux.vanilla/fs/xfs/quota/xfs_dquot_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_dquot_item.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DQUOT_ITEM_H__ +#define __XFS_DQUOT_ITEM_H__ + +struct xfs_dquot; +struct xfs_trans; +struct xfs_mount; +struct xfs_qoff_logitem; + +typedef struct xfs_dq_logitem { + xfs_log_item_t qli_item; /* common portion */ + struct xfs_dquot *qli_dquot; /* dquot ptr */ + xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ + unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */ +#ifdef DEBUG + uint64_t qli_push_owner; +#endif + xfs_dq_logformat_t qli_format; /* logged structure */ +} xfs_dq_logitem_t; + +typedef struct xfs_qoff_logitem { + xfs_log_item_t qql_item; /* common portion */ + struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ + xfs_qoff_logformat_t qql_format; /* logged structure */ +} xfs_qoff_logitem_t; + + +extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *); +extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *, + struct xfs_qoff_logitem *, uint); +extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *, uint); +extern void xfs_trans_log_quotaoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *); + +#endif /* __XFS_DQUOT_ITEM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm_bhv.c linux.22-ac2/fs/xfs/quota/xfs_qm_bhv.c --- linux.vanilla/fs/xfs/quota/xfs_qm_bhv.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm_bhv.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" + +#include "xfs_qm.h" + +#define MNTOPT_QUOTA "quota" /* disk quotas (user) */ +#define MNTOPT_NOQUOTA "noquota" /* no quotas */ +#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ +#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ +#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ +#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ +#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ +#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ +#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ + +STATIC int +xfs_qm_parseargs( + struct bhv_desc *bhv, + char *options, + struct xfs_mount_args *args, + int update) +{ + size_t length; + char *local_options = options; + char *this_char; + int error; + int referenced = update; + + while ((this_char = strsep(&local_options, ",")) != NULL) { + length = strlen(this_char); + if (local_options) + length++; + + if (!strcmp(this_char, MNTOPT_NOQUOTA)) { + args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); + args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); + referenced = update; + } else if (!strcmp(this_char, MNTOPT_QUOTA) || + !strcmp(this_char, MNTOPT_UQUOTA) || + !strcmp(this_char, MNTOPT_USRQUOTA)) { + args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || + !strcmp(this_char, MNTOPT_UQUOTANOENF)) { + args->flags |= XFSMNT_UQUOTA; + args->flags &= ~XFSMNT_UQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_GQUOTA) || + !strcmp(this_char, MNTOPT_GRPQUOTA)) { + args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; + referenced = 1; + } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { + args->flags |= XFSMNT_GQUOTA; + args->flags &= ~XFSMNT_GQUOTAENF; + referenced = 1; + } else { + if (local_options) + *(local_options-1) = ','; + continue; + } + + while (length--) + *this_char++ = ','; + } + + PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error); + if (!error && !referenced) + bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM); + return error; +} + +STATIC int +xfs_qm_showargs( + struct bhv_desc *bhv, + struct seq_file *m) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + if (mp->m_qflags & XFS_UQUOTA_ACCT) { + (mp->m_qflags & XFS_UQUOTA_ENFD) ? + seq_puts(m, "," MNTOPT_USRQUOTA) : + seq_puts(m, "," MNTOPT_UQUOTANOENF); + } + + if (mp->m_qflags & XFS_GQUOTA_ACCT) { + (mp->m_qflags & XFS_GQUOTA_ENFD) ? + seq_puts(m, "," MNTOPT_GRPQUOTA) : + seq_puts(m, "," MNTOPT_GQUOTANOENF); + } + + if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) + seq_puts(m, "," MNTOPT_NOQUOTA); + + PVFS_SHOWARGS(BHV_NEXT(bhv), m, error); + return error; +} + +STATIC int +xfs_qm_mount( + struct bhv_desc *bhv, + struct xfs_mount_args *args, + struct cred *cr) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA)) + xfs_qm_mount_quotainit(mp, args->flags); + PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error); + return error; +} + +STATIC int +xfs_qm_syncall( + struct bhv_desc *bhv, + int flags, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bhv); + struct xfs_mount *mp = XFS_VFSTOM(vfsp); + int error; + + /* + * Get the Quota Manager to flush the dquots. + */ + if (XFS_IS_QUOTA_ON(mp)) { + if ((error = xfs_qm_sync(mp, flags))) { + /* + * If we got an IO error, we will be shutting down. + * So, there's nothing more for us to do here. + */ + ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); + if (XFS_FORCED_SHUTDOWN(mp)) { + return XFS_ERROR(error); + } + } + } + PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error); + return error; +} + +/* + * When xfsquotas isn't installed and the superblock had quotas, we need to + * clear the quotaflags from superblock. + */ +STATIC void +xfs_mount_reset_sbqflags( + xfs_mount_t *mp) +{ + xfs_trans_t *tp; + unsigned long s; + + mp->m_qflags = 0; + /* + * It is OK to look at sb_qflags here in mount path, + * without SB_LOCK. + */ + if (mp->m_sb.sb_qflags == 0) + return; + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = 0; + XFS_SB_UNLOCK(mp, s); + + /* + * if the fs is readonly, let the incore superblock run + * with quotas off but don't flush the update out to disk + */ + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return; +#ifdef QUOTADEBUG + xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); +#endif + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); + if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, + XFS_DEFAULT_LOG_COUNT)) { + xfs_trans_cancel(tp, 0); + return; + } + xfs_mod_sb(tp, XFS_SB_QFLAGS); + xfs_trans_commit(tp, 0, NULL); +} + +STATIC int +xfs_qm_newmount( + xfs_mount_t *mp, + uint *needquotamount, + uint *quotaflags) +{ + uint quotaondisk; + uint uquotaondisk = 0, gquotaondisk = 0; + + *quotaflags = 0; + *needquotamount = B_FALSE; + + quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT); + + if (quotaondisk) { + uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; + gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; + } + + /* + * If the device itself is read-only, we can't allow + * the user to change the state of quota on the mount - + * this would generate a transaction on the ro device, + * which would lead to an I/O error and shutdown + */ + + if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || + (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || + (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || + (!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) && + xfs_dev_is_read_only(mp, "changing quota state")) { + cmn_err(CE_WARN, + "XFS: please mount with%s%s%s.", + (!quotaondisk ? "out quota" : ""), + (uquotaondisk ? " usrquota" : ""), + (gquotaondisk ? " grpquota" : "")); + return XFS_ERROR(EPERM); + } + + if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { + /* + * Call mount_quotas at this point only if we won't have to do + * a quotacheck. + */ + if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { + /* + * If the xfs quota code isn't installed, + * we have to reset the quotachk'd bit. + * If an error occured, qm_mount_quotas code + * has already disabled quotas. So, just finish + * mounting, and get on with the boring life + * without disk quotas. + */ + if (xfs_qm_mount_quotas(mp)) + xfs_mount_reset_sbqflags(mp); + } else { + /* + * Clear the quota flags, but remember them. This + * is so that the quota code doesn't get invoked + * before we're ready. This can happen when an + * inode goes inactive and wants to free blocks, + * or via xfs_log_mount_finish. + */ + *needquotamount = B_TRUE; + *quotaflags = mp->m_qflags; + mp->m_qflags = 0; + } + } + + return 0; +} + +STATIC int +xfs_qm_endmount( + xfs_mount_t *mp, + uint needquotamount, + uint quotaflags) +{ + if (needquotamount) { + ASSERT(mp->m_qflags == 0); + mp->m_qflags = quotaflags; + if (xfs_qm_mount_quotas(mp)) + xfs_mount_reset_sbqflags(mp); + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + if (! (XFS_IS_QUOTA_ON(mp))) + xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on"); + else + xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on"); +#endif + +#ifdef QUOTADEBUG + if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp)) + cmn_err(CE_WARN, "XFS: mount internalqcheck failed"); +#endif + + return 0; +} + +STATIC void +xfs_qm_dqrele_null( + xfs_dquot_t *dq) +{ + /* + * Called from XFS, where we always check first for a NULL dquot. + */ + if (!dq) + return; + xfs_qm_dqrele(dq); +} + + +struct xfs_qmops xfs_qmcore_xfs = { + .xfs_qminit = xfs_qm_newmount, + .xfs_qmdone = xfs_qm_unmount_quotadestroy, + .xfs_qmmount = xfs_qm_endmount, + .xfs_qmunmount = xfs_qm_unmount_quotas, + .xfs_dqrele = xfs_qm_dqrele_null, + .xfs_dqattach = xfs_qm_dqattach, + .xfs_dqdetach = xfs_qm_dqdetach, + .xfs_dqpurgeall = xfs_qm_dqpurge_all, + .xfs_dqvopalloc = xfs_qm_vop_dqalloc, + .xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode, + .xfs_dqvoprename = xfs_qm_vop_rename_dqattach, + .xfs_dqvopchown = xfs_qm_vop_chown, + .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve, + .xfs_dqtrxops = &xfs_trans_dquot_ops, +}; + +struct bhv_vfsops xfs_qmops = { { + BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM), + .vfs_parseargs = xfs_qm_parseargs, + .vfs_showargs = xfs_qm_showargs, + .vfs_mount = xfs_qm_mount, + .vfs_sync = xfs_qm_syncall, + .vfs_quotactl = xfs_qm_quotactl, }, +}; + + +void __init +xfs_qm_init(void) +{ + static char message[] __initdata = + KERN_INFO "SGI XFS Quota Management subsystem\n"; + + printk(message); + mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock"); + vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs); + xfs_qm_init_procfs(); +} + +void __exit +xfs_qm_exit(void) +{ + vfs_bhv_clr_custom(&xfs_qmops); + xfs_qm_cleanup_procfs(); + if (qm_dqzone) + kmem_cache_destroy(qm_dqzone); + if (qm_dqtrxzone) + kmem_cache_destroy(qm_dqtrxzone); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm.c linux.22-ac2/fs/xfs/quota/xfs_qm.c --- linux.vanilla/fs/xfs/quota/xfs_qm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,2831 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_clnt.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_utils.h" + +#include "xfs_qm.h" + +/* + * The global quota manager. There is only one of these for the entire + * system, _not_ one per file system. XQM keeps track of the overall + * quota functionality, including maintaining the freelist and hash + * tables of dquots. + */ +mutex_t xfs_Gqm_lock; +struct xfs_qm *xfs_Gqm; +EXPORT_SYMBOL(xfs_Gqm); /* used by xfsidbg */ + +kmem_zone_t *qm_dqzone; +kmem_zone_t *qm_dqtrxzone; + +STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); +STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); +STATIC int xfs_qm_quotacheck(xfs_mount_t *); + +STATIC int xfs_qm_init_quotainos(xfs_mount_t *); +STATIC void xfs_qm_shake(void); + +#ifdef DEBUG +extern mutex_t qcheck_lock; +#endif + +#ifdef QUOTADEBUG +#define XQM_LIST_PRINT(l, NXT, title) \ +{ \ + xfs_dquot_t *dqp; int i = 0; \ + cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ + for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \ + cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \ + "bcnt = %d, icnt = %d, refs = %d", \ + ++i, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \ + DQFLAGTO_TYPESTR(dqp), \ + (int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \ + (int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \ + (int) dqp->q_nrefs); } \ +} +#else +#define XQM_LIST_PRINT(l, NXT, title) do { } while (0) +#endif + +/* + * Initialize the XQM structure. + * Note that there is not one quota manager per file system. + */ +STATIC struct xfs_qm * +xfs_Gqm_init(void) +{ + xfs_qm_t *xqm; + int hsize, i; + + xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); + ASSERT(xqm); + + /* + * Initialize the dquot hash tables. + */ + hsize = (DQUOT_HASH_HEURISTIC < XFS_QM_NCSIZE_THRESHOLD) ? + XFS_QM_HASHSIZE_LOW : XFS_QM_HASHSIZE_HIGH; + xqm->qm_dqhashmask = hsize - 1; + + xqm->qm_usr_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize * + sizeof(xfs_dqhash_t), + KM_SLEEP); + xqm->qm_grp_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize * + sizeof(xfs_dqhash_t), + KM_SLEEP); + ASSERT(xqm->qm_usr_dqhtable != NULL); + ASSERT(xqm->qm_grp_dqhtable != NULL); + + for (i = 0; i < hsize; i++) { + xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); + xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); + } + + /* + * Freelist of all dquots of all file systems + */ + xfs_qm_freelist_init(&(xqm->qm_dqfreelist)); + + /* + * dquot zone. we register our own low-memory callback. + */ + if (!qm_dqzone) { + xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), + "xfs_dquots"); + qm_dqzone = xqm->qm_dqzone; + } else + xqm->qm_dqzone = qm_dqzone; + + kmem_shake_register(xfs_qm_shake); + + /* + * The t_dqinfo portion of transactions. + */ + if (!qm_dqtrxzone) { + xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), + "xfs_dqtrx"); + qm_dqtrxzone = xqm->qm_dqtrxzone; + } else + xqm->qm_dqtrxzone = qm_dqtrxzone; + + atomic_set(&xqm->qm_totaldquots, 0); + xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; + xqm->qm_nrefs = 0; +#ifdef DEBUG + mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk"); +#endif + return xqm; +} + +/* + * Destroy the global quota manager when its reference count goes to zero. + */ +void +xfs_qm_destroy( + struct xfs_qm *xqm) +{ + int hsize, i; + + ASSERT(xqm != NULL); + ASSERT(xqm->qm_nrefs == 0); + kmem_shake_deregister(xfs_qm_shake); + hsize = xqm->qm_dqhashmask + 1; + for (i = 0; i < hsize; i++) { + xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); + xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); + } + kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t)); + kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t)); + xqm->qm_usr_dqhtable = NULL; + xqm->qm_grp_dqhtable = NULL; + xqm->qm_dqhashmask = 0; + xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist)); +#ifdef DEBUG + mutex_destroy(&qcheck_lock); +#endif + kmem_free(xqm, sizeof(xfs_qm_t)); +} + +/* + * Called at mount time to let XQM know that another file system is + * starting quotas. This isn't crucial information as the individual mount + * structures are pretty independent, but it helps the XQM keep a + * global view of what's going on. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_hold_quotafs_ref( + struct xfs_mount *mp) +{ + /* + * Need to lock the xfs_Gqm structure for things like this. For example, + * the structure could disappear between the entry to this routine and + * a HOLD operation if not locked. + */ + XFS_QM_LOCK(xfs_Gqm); + + if (xfs_Gqm == NULL) { + if ((xfs_Gqm = xfs_Gqm_init()) == NULL) { + return (XFS_ERROR(EINVAL)); + } + } + /* + * We can keep a list of all filesystems with quotas mounted for + * debugging and statistical purposes, but ... + * Just take a reference and get out. + */ + XFS_QM_HOLD(xfs_Gqm); + XFS_QM_UNLOCK(xfs_Gqm); + + return 0; +} + + +/* + * Release the reference that a filesystem took at mount time, + * so that we know when we need to destroy the entire quota manager. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_rele_quotafs_ref( + struct xfs_mount *mp) +{ + xfs_dquot_t *dqp, *nextdqp; + + ASSERT(xfs_Gqm); + ASSERT(xfs_Gqm->qm_nrefs > 0); + + /* + * Go thru the freelist and destroy all inactive dquots. + */ + xfs_qm_freelist_lock(xfs_Gqm); + + for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; + dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) { + xfs_dqlock(dqp); + nextdqp = dqp->dq_flnext; + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + } else { + xfs_dqunlock(dqp); + } + dqp = nextdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); + + /* + * Destroy the entire XQM. If somebody mounts with quotaon, this'll + * be restarted. + */ + XFS_QM_LOCK(xfs_Gqm); + XFS_QM_RELE(xfs_Gqm); + if (xfs_Gqm->qm_nrefs == 0) { + xfs_qm_destroy(xfs_Gqm); + xfs_Gqm = NULL; + } + XFS_QM_UNLOCK(xfs_Gqm); +} + +/* + * This is called at mount time from xfs_mountfs to initialize the quotainfo + * structure and start the global quotamanager (xfs_Gqm) if it hasn't done + * so already. Note that the superblock has not been read in yet. + */ +void +xfs_qm_mount_quotainit( + xfs_mount_t *mp, + uint flags) +{ + /* + * User or group quotas has to be on. + */ + ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA)); + + /* + * Initialize the flags in the mount structure. From this point + * onwards we look at m_qflags to figure out if quotas's ON/OFF, etc. + * Note that we enforce nothing if accounting is off. + * ie. XFSMNT_*QUOTA must be ON for XFSMNT_*QUOTAENF. + * It isn't necessary to take the quotaoff lock to do this; this is + * called from mount. + */ + if (flags & XFSMNT_UQUOTA) { + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + if (flags & XFSMNT_UQUOTAENF) + mp->m_qflags |= XFS_UQUOTA_ENFD; + } + if (flags & XFSMNT_GQUOTA) { + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + if (flags & XFSMNT_GQUOTAENF) + mp->m_qflags |= XFS_GQUOTA_ENFD; + } +} + +/* + * Just destroy the quotainfo structure. + */ +void +xfs_qm_unmount_quotadestroy( + xfs_mount_t *mp) +{ + if (mp->m_quotainfo) + xfs_qm_destroy_quotainfo(mp); +} + + +/* + * This is called from xfs_mountfs to start quotas and initialize all + * necessary data structures like quotainfo. This is also responsible for + * running a quotacheck as necessary. We are guaranteed that the superblock + * is consistently read in at this point. + */ +int +xfs_qm_mount_quotas( + xfs_mount_t *mp) +{ + unsigned long s; + int error = 0; + uint sbf; + + /* + * If a file system had quotas running earlier, but decided to + * mount without -o quota/uquota/gquota options, revoke the + * quotachecked license, and bail out. + */ + if (! XFS_IS_QUOTA_ON(mp) && + (mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) { + mp->m_qflags = 0; + goto write_changes; + } + + /* + * If quotas on realtime volumes is not supported, we disable + * quotas immediately. + */ + if (mp->m_sb.sb_rextents) { + cmn_err(CE_NOTE, + "Cannot turn on quotas for realtime filesystem %s", + mp->m_fsname); + mp->m_qflags = 0; + goto write_changes; + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, "Attempting to turn on disk quotas."); +#endif + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + /* + * Allocate the quotainfo structure inside the mount struct, and + * create quotainode(s), and change/rev superblock if necessary. + */ + if ((error = xfs_qm_init_quotainfo(mp))) { + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo == NULL); + mp->m_qflags = 0; + goto write_changes; + } + /* + * If any of the quotas are not consistent, do a quotacheck. + */ + if (XFS_QM_NEED_QUOTACHECK(mp)) { +#ifdef DEBUG + cmn_err(CE_NOTE, "Doing a quotacheck. Please wait."); +#endif + if ((error = xfs_qm_quotacheck(mp))) { + cmn_err(CE_WARN, "Quotacheck unsuccessful (Error %d): " + "Disabling quotas.", + error); + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo != NULL); + ASSERT(xfs_Gqm != NULL); + xfs_qm_destroy_quotainfo(mp); + mp->m_qflags = 0; + goto write_changes; + } +#ifdef DEBUG + cmn_err(CE_NOTE, "Done quotacheck."); +#endif + } + write_changes: + /* + * We actually don't have to acquire the SB_LOCK at all. + * This can only be called from mount, and that's single threaded. XXX + */ + s = XFS_SB_LOCK(mp); + sbf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; + XFS_SB_UNLOCK(mp, s); + + if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { + if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { + /* + * We could only have been turning quotas off. + * We aren't in very good shape actually because + * the incore structures are convinced that quotas are + * off, but the on disk superblock doesn't know that ! + */ + ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); + xfs_fs_cmn_err(CE_ALERT, mp, + "XFS mount_quotas: Superblock update failed!"); + } + } + + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, + "Failed to initialize disk quotas."); + } + return XFS_ERROR(error); +} + +/* + * Called from the vfsops layer. + */ +int +xfs_qm_unmount_quotas( + xfs_mount_t *mp) +{ + xfs_inode_t *uqp, *gqp; + int error = 0; + + /* + * Release the dquots that root inode, et al might be holding, + * before we flush quotas and blow away the quotainfo structure. + */ + ASSERT(mp->m_rootip); + xfs_qm_dqdetach(mp->m_rootip); + if (mp->m_rbmip) + xfs_qm_dqdetach(mp->m_rbmip); + if (mp->m_rsumip) + xfs_qm_dqdetach(mp->m_rsumip); + + /* + * Flush out the quota inodes. + */ + uqp = gqp = NULL; + if (mp->m_quotainfo) { + if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) { + xfs_ilock(uqp, XFS_ILOCK_EXCL); + xfs_iflock(uqp); + error = xfs_iflush(uqp, XFS_IFLUSH_SYNC); + xfs_iunlock(uqp, XFS_ILOCK_EXCL); + if (unlikely(error == EFSCORRUPTED)) { + XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)", + XFS_ERRLEVEL_LOW, mp); + goto out; + } + } + if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) { + xfs_ilock(gqp, XFS_ILOCK_EXCL); + xfs_iflock(gqp); + error = xfs_iflush(gqp, XFS_IFLUSH_SYNC); + xfs_iunlock(gqp, XFS_ILOCK_EXCL); + if (unlikely(error == EFSCORRUPTED)) { + XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)", + XFS_ERRLEVEL_LOW, mp); + goto out; + } + } + } + if (uqp) { + XFS_PURGE_INODE(uqp); + mp->m_quotainfo->qi_uquotaip = NULL; + } + if (gqp) { + XFS_PURGE_INODE(gqp); + mp->m_quotainfo->qi_gquotaip = NULL; + } +out: + return XFS_ERROR(error); +} + +/* + * Flush all dquots of the given file system to disk. The dquots are + * _not_ purged from memory here, just their data written to disk. + */ +int +xfs_qm_dqflush_all( + xfs_mount_t *mp, + int flags) +{ + int recl; + xfs_dquot_t *dqp; + int niters; + int error; + + if (mp->m_quotainfo == NULL) + return (0); + niters = 0; +again: + xfs_qm_mplist_lock(mp); + FOREACH_DQUOT_IN_MP(dqp, mp) { + xfs_dqlock(dqp); + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); + /* XXX a sentinel would be better */ + recl = XFS_QI_MPLRECLAIMS(mp); + if (! xfs_qm_dqflock_nowait(dqp)) { + /* + * If we can't grab the flush lock then check + * to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write. + */ + xfs_qm_mplist_unlock(mp); + error = xfs_qm_dqflush(dqp, flags); + xfs_dqunlock(dqp); + if (error) + return (error); + + xfs_qm_mplist_lock(mp); + if (recl != XFS_QI_MPLRECLAIMS(mp)) { + xfs_qm_mplist_unlock(mp); + /* XXX restart limit */ + goto again; + } + } + + xfs_qm_mplist_unlock(mp); + /* return ! busy */ + return (0); +} +/* + * Release the group dquot pointers the user dquots may be + * carrying around as a hint. mplist is locked on entry and exit. + */ +STATIC void +xfs_qm_detach_gdquots( + xfs_mount_t *mp) +{ + xfs_dquot_t *dqp, *gdqp; + int nrecl; + + again: + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + dqp = XFS_QI_MPLNEXT(mp); + while (dqp) { + xfs_dqlock(dqp); + if ((gdqp = dqp->q_gdquot)) { + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + xfs_dqunlock(dqp); + + if (gdqp) { + /* + * Can't hold the mplist lock across a dqput. + * XXXmust convert to marker based iterations here. + */ + nrecl = XFS_QI_MPLRECLAIMS(mp); + xfs_qm_mplist_unlock(mp); + xfs_qm_dqput(gdqp); + + xfs_qm_mplist_lock(mp); + if (nrecl != XFS_QI_MPLRECLAIMS(mp)) + goto again; + } + dqp = dqp->MPL_NEXT; + } +} + +/* + * Go through all the incore dquots of this file system and take them + * off the mplist and hashlist, if the dquot type matches the dqtype + * parameter. This is used when turning off quota accounting for + * users and/or groups, as well as when the filesystem is unmounting. + */ +STATIC int +xfs_qm_dqpurge_int( + xfs_mount_t *mp, + uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */ +{ + xfs_dquot_t *dqp; + uint dqtype; + int nrecl; + xfs_dquot_t *nextdqp; + int nmisses; + + if (mp->m_quotainfo == NULL) + return (0); + + dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; + dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; + + xfs_qm_mplist_lock(mp); + + /* + * In the first pass through all incore dquots of this filesystem, + * we release the group dquot pointers the user dquots may be + * carrying around as a hint. We need to do this irrespective of + * what's being turned off. + */ + xfs_qm_detach_gdquots(mp); + + again: + nmisses = 0; + ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + /* + * Try to get rid of all of the unwanted dquots. The idea is to + * get them off mplist and hashlist, but leave them on freelist. + */ + dqp = XFS_QI_MPLNEXT(mp); + while (dqp) { + /* + * It's OK to look at the type without taking dqlock here. + * We're holding the mplist lock here, and that's needed for + * a dqreclaim. + */ + if ((dqp->dq_flags & dqtype) == 0) { + dqp = dqp->MPL_NEXT; + continue; + } + + if (! xfs_qm_dqhashlock_nowait(dqp)) { + nrecl = XFS_QI_MPLRECLAIMS(mp); + xfs_qm_mplist_unlock(mp); + XFS_DQ_HASH_LOCK(dqp->q_hash); + xfs_qm_mplist_lock(mp); + + /* + * XXXTheoretically, we can get into a very long + * ping pong game here. + * No one can be adding dquots to the mplist at + * this point, but somebody might be taking things off. + */ + if (nrecl != XFS_QI_MPLRECLAIMS(mp)) { + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + goto again; + } + } + + /* + * Take the dquot off the mplist and hashlist. It may remain on + * freelist in INACTIVE state. + */ + nextdqp = dqp->MPL_NEXT; + nmisses += xfs_qm_dqpurge(dqp, flags); + dqp = nextdqp; + } + xfs_qm_mplist_unlock(mp); + return nmisses; +} + +int +xfs_qm_dqpurge_all( + xfs_mount_t *mp, + uint flags) +{ + int ndquots; + + /* + * Purge the dquot cache. + * None of the dquots should really be busy at this point. + */ + if (mp->m_quotainfo) { + while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { + delay(ndquots * 10); + } + } + return 0; +} + +STATIC int +xfs_qm_dqattach_one( + xfs_inode_t *ip, + xfs_dqid_t id, + uint type, + uint doalloc, + uint dolock, + xfs_dquot_t *udqhint, /* hint */ + xfs_dquot_t **IO_idqpp) +{ + xfs_dquot_t *dqp; + int error; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + error = 0; + /* + * See if we already have it in the inode itself. IO_idqpp is + * &i_udquot or &i_gdquot. This made the code look weird, but + * made the logic a lot simpler. + */ + if ((dqp = *IO_idqpp)) { + if (dolock) + xfs_dqlock(dqp); + xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); + goto done; + } + + /* + * udqhint is the i_udquot field in inode, and is non-NULL only + * when the type arg is XFS_DQ_GROUP. Its purpose is to save a + * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside + * the user dquot. + */ + ASSERT(!udqhint || type == XFS_DQ_GROUP); + if (udqhint && !dolock) + xfs_dqlock(udqhint); + + /* + * No need to take dqlock to look at the id. + * The ID can't change until it gets reclaimed, and it won't + * be reclaimed as long as we have a ref from inode and we hold + * the ilock. + */ + if (udqhint && + (dqp = udqhint->q_gdquot) && + (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id)) { + ASSERT(XFS_DQ_IS_LOCKED(udqhint)); + xfs_dqlock(dqp); + XFS_DQHOLD(dqp); + ASSERT(*IO_idqpp == NULL); + *IO_idqpp = dqp; + if (!dolock) { + xfs_dqunlock(dqp); + xfs_dqunlock(udqhint); + } + goto done; + } + /* + * We can't hold a dquot lock when we call the dqget code. + * We'll deadlock in no time, because of (not conforming to) + * lock ordering - the inodelock comes before any dquot lock, + * and we may drop and reacquire the ilock in xfs_qm_dqget(). + */ + if (udqhint) + xfs_dqunlock(udqhint); + /* + * Find the dquot from somewhere. This bumps the + * reference count of dquot and returns it locked. + * This can return ENOENT if dquot didn't exist on + * disk and we didn't ask it to allocate; + * ESRCH if quotas got turned off suddenly. + */ + if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type, + doalloc|XFS_QMOPT_DOWARN, &dqp))) { + if (udqhint && dolock) + xfs_dqlock(udqhint); + goto done; + } + + xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); + /* + * dqget may have dropped and re-acquired the ilock, but it guarantees + * that the dquot returned is the one that should go in the inode. + */ + *IO_idqpp = dqp; + ASSERT(dqp); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (! dolock) { + xfs_dqunlock(dqp); + ASSERT(!udqhint || !XFS_DQ_IS_LOCKED(udqhint)); + goto done; + } + if (! udqhint) + goto done; + + ASSERT(udqhint); + ASSERT(dolock); + ASSERT(! XFS_DQ_IS_LOCKED(udqhint)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (! xfs_qm_dqlock_nowait(udqhint)) { + xfs_dqunlock(dqp); + xfs_dqlock(udqhint); + xfs_dqlock(dqp); + } + done: +#ifdef QUOTADEBUG + if (udqhint) { + if (dolock) + ASSERT(XFS_DQ_IS_LOCKED(udqhint)); + else + ASSERT(! XFS_DQ_IS_LOCKED(udqhint)); + } + if (! error) { + if (dolock) + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + else + ASSERT(! XFS_DQ_IS_LOCKED(dqp)); + } +#endif + return (error); +} + + +/* + * Given a udquot and gdquot, attach a ptr to the group dquot in the + * udquot as a hint for future lookups. The idea sounds simple, but the + * execution isn't, because the udquot might have a group dquot attached + * already and getting rid of that gets us into lock ordering contraints. + * The process is complicated more by the fact that the dquots may or may not + * be locked on entry. + */ +STATIC void +xfs_qm_dqattach_grouphint( + xfs_dquot_t *udq, + xfs_dquot_t *gdq, + uint locked) +{ + xfs_dquot_t *tmp; + +#ifdef QUOTADEBUG + if (locked) { + ASSERT(XFS_DQ_IS_LOCKED(udq)); + ASSERT(XFS_DQ_IS_LOCKED(gdq)); + } else { + ASSERT(! XFS_DQ_IS_LOCKED(udq)); + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + } +#endif + if (! locked) + xfs_dqlock(udq); + + if ((tmp = udq->q_gdquot)) { + if (tmp == gdq) { + if (! locked) + xfs_dqunlock(udq); + return; + } + + udq->q_gdquot = NULL; + /* + * We can't keep any dqlocks when calling dqrele, + * because the freelist lock comes before dqlocks. + */ + xfs_dqunlock(udq); + if (locked) + xfs_dqunlock(gdq); + /* + * we took a hard reference once upon a time in dqget, + * so give it back when the udquot no longer points at it + * dqput() does the unlocking of the dquot. + */ + xfs_qm_dqrele(tmp); + + ASSERT(! XFS_DQ_IS_LOCKED(udq)); + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + xfs_dqlock(udq); + xfs_dqlock(gdq); + + } else { + ASSERT(XFS_DQ_IS_LOCKED(udq)); + if (! locked) { + ASSERT(! XFS_DQ_IS_LOCKED(gdq)); + xfs_dqlock(gdq); + } + } + + ASSERT(XFS_DQ_IS_LOCKED(udq)); + ASSERT(XFS_DQ_IS_LOCKED(gdq)); + /* + * Somebody could have attached a gdquot here, + * when we dropped the uqlock. If so, just do nothing. + */ + if (udq->q_gdquot == NULL) { + XFS_DQHOLD(gdq); + udq->q_gdquot = gdq; + } + if (! locked) { + xfs_dqunlock(gdq); + xfs_dqunlock(udq); + } +} + + +/* + * Given a locked inode, attach dquot(s) to it, taking UQUOTAON / GQUOTAON + * in to account. + * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. + * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty + * much made this code a complete mess, but it has been pretty useful. + * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL. + * Inode may get unlocked and relocked in here, and the caller must deal with + * the consequences. + */ +int +xfs_qm_dqattach( + xfs_inode_t *ip, + uint flags) +{ + xfs_mount_t *mp = ip->i_mount; + uint nquotas = 0; + int error = 0; + + if ((! XFS_IS_QUOTA_ON(mp)) || + (! XFS_NOT_DQATTACHED(mp, ip)) || + (ip->i_ino == mp->m_sb.sb_uquotino) || + (ip->i_ino == mp->m_sb.sb_gquotino)) + return (0); + + ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || + XFS_ISLOCKED_INODE_EXCL(ip)); + + if (! (flags & XFS_QMOPT_ILOCKED)) + xfs_ilock(ip, XFS_ILOCK_EXCL); + + if (XFS_IS_UQUOTA_ON(mp)) { + error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, + flags & XFS_QMOPT_DQALLOC, + flags & XFS_QMOPT_DQLOCK, + NULL, &ip->i_udquot); + if (error) + goto done; + nquotas++; + } + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + if (XFS_IS_GQUOTA_ON(mp)) { + error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, + flags & XFS_QMOPT_DQALLOC, + flags & XFS_QMOPT_DQLOCK, + ip->i_udquot, &ip->i_gdquot); + /* + * Don't worry about the udquot that we may have + * attached above. It'll get detached, if not already. + */ + if (error) + goto done; + nquotas++; + } + + /* + * Attach this group quota to the user quota as a hint. + * This WON'T, in general, result in a thrash. + */ + if (nquotas == 2) { + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(ip->i_udquot); + ASSERT(ip->i_gdquot); + + /* + * We may or may not have the i_udquot locked at this point, + * but this check is OK since we don't depend on the i_gdquot to + * be accurate 100% all the time. It is just a hint, and this + * will succeed in general. + */ + if (ip->i_udquot->q_gdquot == ip->i_gdquot) + goto done; + /* + * Attach i_gdquot to the gdquot hint inside the i_udquot. + */ + xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot, + flags & XFS_QMOPT_DQLOCK); + } + + done: + +#ifdef QUOTADEBUG + if (! error) { + if (ip->i_udquot) { + if (flags & XFS_QMOPT_DQLOCK) + ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot)); + else + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot)); + } + if (ip->i_gdquot) { + if (flags & XFS_QMOPT_DQLOCK) + ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot)); + else + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot)); + } + if (XFS_IS_UQUOTA_ON(mp)) + ASSERT(ip->i_udquot); + if (XFS_IS_GQUOTA_ON(mp)) + ASSERT(ip->i_gdquot); + } +#endif + + if (! (flags & XFS_QMOPT_ILOCKED)) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + +#ifdef QUOTADEBUG + else + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); +#endif + return (error); +} + +/* + * Release dquots (and their references) if any. + * The inode should be locked EXCL except when this's called by + * xfs_ireclaim. + */ +void +xfs_qm_dqdetach( + xfs_inode_t *ip) +{ + if (!(ip->i_udquot || ip->i_gdquot)) + return; + + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); + if (ip->i_udquot) + xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip); + if (ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if (ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } +} + +/* + * This is called by VFS_SYNC and flags arg determines the caller, + * and its motives, as done in xfs_sync. + * + * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31 + * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25 + * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA + */ + +int +xfs_qm_sync( + xfs_mount_t *mp, + short flags) +{ + int recl, restarts; + xfs_dquot_t *dqp; + uint flush_flags; + boolean_t nowait; + int error; + + restarts = 0; + /* + * We won't block unless we are asked to. + */ + nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0); + + again: + xfs_qm_mplist_lock(mp); + /* + * dqpurge_all() also takes the mplist lock and iterate thru all dquots + * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared + * when we have the mplist lock, we know that dquots will be consistent + * as long as we have it locked. + */ + if (! XFS_IS_QUOTA_ON(mp)) { + xfs_qm_mplist_unlock(mp); + return (0); + } + FOREACH_DQUOT_IN_MP(dqp, mp) { + /* + * If this is vfs_sync calling, then skip the dquots that + * don't 'seem' to be dirty. ie. don't acquire dqlock. + * This is very similar to what xfs_sync does with inodes. + */ + if (flags & SYNC_BDFLUSH) { + if (! XFS_DQ_IS_DIRTY(dqp)) + continue; + } + + if (nowait) { + /* + * Try to acquire the dquot lock. We are NOT out of + * lock order, but we just don't want to wait for this + * lock, unless somebody wanted us to. + */ + if (! xfs_qm_dqlock_nowait(dqp)) + continue; + } else { + xfs_dqlock(dqp); + } + + /* + * Now, find out for sure if this dquot is dirty or not. + */ + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* XXX a sentinel would be better */ + recl = XFS_QI_MPLRECLAIMS(mp); + if (! xfs_qm_dqflock_nowait(dqp)) { + if (nowait) { + xfs_dqunlock(dqp); + continue; + } + /* + * If we can't grab the flush lock then if the caller + * really wanted us to give this our best shot, + * see if we can give a push to the buffer before we wait + * on the flush lock. At this point, we know that + * eventhough the dquot is being flushed, + * it has (new) dirty data. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write + */ + flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC; + xfs_qm_mplist_unlock(mp); + xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); + error = xfs_qm_dqflush(dqp, flush_flags); + xfs_dqunlock(dqp); + if (error && XFS_FORCED_SHUTDOWN(mp)) + return(0); /* Need to prevent umount failure */ + else if (error) + return (error); + + xfs_qm_mplist_lock(mp); + if (recl != XFS_QI_MPLRECLAIMS(mp)) { + if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS) + break; + + xfs_qm_mplist_unlock(mp); + goto again; + } + } + + xfs_qm_mplist_unlock(mp); + return (0); +} + + +/* + * This initializes all the quota information that's kept in the + * mount structure + */ +int +xfs_qm_init_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qinf; + int error; + xfs_dquot_t *dqp; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * Tell XQM that we exist as soon as possible. + */ + if ((error = xfs_qm_hold_quotafs_ref(mp))) { + return (error); + } + + qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); + + /* + * See if quotainodes are setup, and if not, allocate them, + * and change the superblock accordingly. + */ + if ((error = xfs_qm_init_quotainos(mp))) { + kmem_free(qinf, sizeof(xfs_quotainfo_t)); + mp->m_quotainfo = NULL; + return (error); + } + + spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); + xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); + qinf->qi_dqreclaims = 0; + + /* mutex used to serialize quotaoffs */ + mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff"); + + /* Precalc some constants */ + qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(qinf->qi_dqchunklen); + qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); + do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); + + mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); + + /* + * We try to get the limits from the superuser's limits fields. + * This is quite hacky, but it is standard quota practice. + * We look at the USR dquot with id == 0 first, but if user quotas + * are not enabled we goto the GRP dquot with id == 0. + * We don't really care to keep separate default limits for user + * and group quotas, at least not at this point. + */ + error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0, + (XFS_IS_UQUOTA_RUNNING(mp)) ? + XFS_DQ_USER : XFS_DQ_GROUP, + XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN, + &dqp); + if (! error) { + /* + * The warnings and timers set the grace period given to + * a user or group before he or she can not perform any + * more writing. If it is zero, a default is used. + */ + qinf->qi_btimelimit = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT) : XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT) : XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT) : XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) : XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) ? + INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) : XFS_QM_IWARNLIMIT; + + /* + * We sent the XFS_QMOPT_DQSUSER flag to dqget because + * we don't want this dquot cached. We haven't done a + * quotacheck yet, and quotacheck doesn't like incore dquots. + */ + xfs_qm_dqdestroy(dqp); + } else { + qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; + } + + return (0); +} + + +/* + * Gets called when unmounting a filesystem or when all quotas get + * turned off. + * This purges the quota inodes, destroys locks and frees itself. + */ +void +xfs_qm_destroy_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qi; + + qi = mp->m_quotainfo; + ASSERT(qi != NULL); + ASSERT(xfs_Gqm != NULL); + + /* + * Release the reference that XQM kept, so that we know + * when the XQM structure should be freed. We cannot assume + * that xfs_Gqm is non-null after this point. + */ + xfs_qm_rele_quotafs_ref(mp); + + spinlock_destroy(&qi->qi_pinlock); + xfs_qm_list_destroy(&qi->qi_dqlist); + + if (qi->qi_uquotaip) { + XFS_PURGE_INODE(qi->qi_uquotaip); + qi->qi_uquotaip = NULL; /* paranoia */ + } + if (qi->qi_gquotaip) { + XFS_PURGE_INODE(qi->qi_gquotaip); + qi->qi_gquotaip = NULL; + } + mutex_destroy(&qi->qi_quotaofflock); + kmem_free(qi, sizeof(xfs_quotainfo_t)); + mp->m_quotainfo = NULL; +} + + + +/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ + +/* ARGSUSED */ +STATIC void +xfs_qm_list_init( + xfs_dqlist_t *list, + char *str, + int n) +{ + mutex_init(&list->qh_lock, MUTEX_DEFAULT, str); + list->qh_next = NULL; + list->qh_version = 0; + list->qh_nelems = 0; +} + +STATIC void +xfs_qm_list_destroy( + xfs_dqlist_t *list) +{ + mutex_destroy(&(list->qh_lock)); +} + + +/* + * Stripped down version of dqattach. This doesn't attach, or even look at the + * dquots attached to the inode. The rationale is that there won't be any + * attached at the time this is called from quotacheck. + */ +STATIC int +xfs_qm_dqget_noattach( + xfs_inode_t *ip, + xfs_dquot_t **O_udqpp, + xfs_dquot_t **O_gdqpp) +{ + int error; + xfs_mount_t *mp; + xfs_dquot_t *udqp, *gdqp; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + mp = ip->i_mount; + udqp = NULL; + gdqp = NULL; + + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(ip->i_udquot == NULL); + /* + * We want the dquot allocated if it doesn't exist. + */ + if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER, + XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, + &udqp))) { + /* + * Shouldn't be able to turn off quotas here. + */ + ASSERT(error != ESRCH); + ASSERT(error != ENOENT); + return (error); + } + ASSERT(udqp); + } + + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(ip->i_gdquot == NULL); + if (udqp) + xfs_dqunlock(udqp); + if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_gid, XFS_DQ_GROUP, + XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN, + &gdqp))) { + if (udqp) + xfs_qm_dqrele(udqp); + ASSERT(error != ESRCH); + ASSERT(error != ENOENT); + return (error); + } + ASSERT(gdqp); + + /* Reacquire the locks in the right order */ + if (udqp) { + if (! xfs_qm_dqlock_nowait(udqp)) { + xfs_dqunlock(gdqp); + xfs_dqlock(udqp); + xfs_dqlock(gdqp); + } + } + } + + *O_udqpp = udqp; + *O_gdqpp = gdqp; + +#ifdef QUOTADEBUG + if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp)); + if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp)); +#endif + return (0); +} + +/* + * Create an inode and return with a reference already taken, but unlocked + * This is how we create quota inodes + */ +STATIC int +xfs_qm_qino_alloc( + xfs_mount_t *mp, + xfs_inode_t **ip, + __int64_t sbfields, + uint flags) +{ + xfs_trans_t *tp; + int error; + unsigned long s; + cred_t zerocr; + int committed; + + tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE); + if ((error = xfs_trans_reserve(tp, + XFS_QM_QINOCREATE_SPACE_RES(mp), + XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_CREATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + memset(&zerocr, 0, sizeof(zerocr)); + + if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, IFREG, 1, 0, + &zerocr, 0, 1, ip, &committed))) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + return (error); + } + + /* + * Keep an extra reference to this quota inode. This inode is + * locked exclusively and joined to the transaction already. + */ + ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip)); + VN_HOLD(XFS_ITOV((*ip))); + + /* + * Make the changes in the superblock, and log those too. + * sbfields arg may contain fields other than *QUOTINO; + * VERSIONNUM for example. + */ + s = XFS_SB_LOCK(mp); + if (flags & XFS_QMOPT_SBVERSION) { +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + unsigned oldv = mp->m_sb.sb_versionnum; +#endif + ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb)); + ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == + (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); + + XFS_SB_VERSION_ADDQUOTA(&mp->m_sb); + mp->m_sb.sb_uquotino = NULLFSINO; + mp->m_sb.sb_gquotino = NULLFSINO; + + /* qflags will get updated _after_ quotacheck */ + mp->m_sb.sb_qflags = 0; +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Old superblock version %x, converting to %x.", + oldv, mp->m_sb.sb_versionnum); +#endif + } + if (flags & XFS_QMOPT_UQUOTA) + mp->m_sb.sb_uquotino = (*ip)->i_ino; + else + mp->m_sb.sb_gquotino = (*ip)->i_ino; + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, sbfields); + + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL))) { + xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!"); + return (error); + } + return (0); +} + + +STATIC int +xfs_qm_reset_dqcounts( + xfs_mount_t *mp, + xfs_buf_t *bp, + xfs_dqid_t id, + uint type) +{ + xfs_disk_dquot_t *ddq; + int j; + + xfs_buftrace("RESET DQUOTS", bp); + /* + * Reset all counters and timers. They'll be + * started afresh by xfs_qm_quotacheck. + */ +#ifdef DEBUG + j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + do_div(j, sizeof(xfs_dqblk_t)); + ASSERT(XFS_QM_DQPERBLK(mp) == j); +#endif + ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp); + for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) { + /* + * Do a sanity check, and if needed, repair the dqblk. Don't + * output any warnings because it's perfectly possible to + * find unitialized dquot blks. See comment in xfs_qm_dqcheck. + */ + (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, + "xfs_quotacheck"); + INT_SET(ddq->d_bcount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_icount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_rtbcount, ARCH_CONVERT, 0ULL); + INT_SET(ddq->d_btimer, ARCH_CONVERT, (time_t)0); + INT_SET(ddq->d_itimer, ARCH_CONVERT, (time_t)0); + INT_SET(ddq->d_bwarns, ARCH_CONVERT, 0UL); + INT_SET(ddq->d_iwarns, ARCH_CONVERT, 0UL); + ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); + } + + return (0); +} + +STATIC int +xfs_qm_dqiter_bufs( + xfs_mount_t *mp, + xfs_dqid_t firstid, + xfs_fsblock_t bno, + xfs_filblks_t blkcnt, + uint flags) +{ + xfs_buf_t *bp; + int error; + int notcommitted; + int incr; + + ASSERT(blkcnt > 0); + notcommitted = 0; + incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ? + XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt; + error = 0; + + /* + * Blkcnt arg can be a very big number, and might even be + * larger than the log itself. So, we have to break it up into + * manageable-sized transactions. + * Note that we don't start a permanent transaction here; we might + * not be able to get a log reservation for the whole thing up front, + * and we don't really care to either, because we just discard + * everything if we were to crash in the middle of this loop. + */ + while (blkcnt--) { + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, bno), + (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp); + if (error) + break; + + (void) xfs_qm_reset_dqcounts(mp, bp, firstid, + flags & XFS_QMOPT_UQUOTA ? + XFS_DQ_USER : XFS_DQ_GROUP); + xfs_bdwrite(mp, bp); + /* + * goto the next block. + */ + bno++; + firstid += XFS_QM_DQPERBLK(mp); + } + return (error); +} + +/* + * Iterate over all allocated USR/GRP dquots in the system, calling a + * caller supplied function for every chunk of dquots that we find. + */ +STATIC int +xfs_qm_dqiterate( + xfs_mount_t *mp, + xfs_inode_t *qip, + uint flags) +{ + xfs_bmbt_irec_t *map; + int i, nmaps; /* number of map entries */ + int error; /* return value */ + xfs_fileoff_t lblkno; + xfs_filblks_t maxlblkcnt; + xfs_dqid_t firstid; + xfs_fsblock_t rablkno; + xfs_filblks_t rablkcnt; + + error = 0; + /* + * This looks racey, but we can't keep an inode lock across a + * trans_reserve. But, this gets called during quotacheck, and that + * happens only at mount time which is single threaded. + */ + if (qip->i_d.di_nblocks == 0) + return (0); + + map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); + + lblkno = 0; + maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + do { + nmaps = XFS_DQITER_MAP_SIZE; + /* + * We aren't changing the inode itself. Just changing + * some of its data. No new blocks are added here, and + * the inode is never added to the transaction. + */ + xfs_ilock(qip, XFS_ILOCK_SHARED); + error = xfs_bmapi(NULL, qip, lblkno, + maxlblkcnt - lblkno, + XFS_BMAPI_METADATA, + NULL, + 0, map, &nmaps, NULL); + xfs_iunlock(qip, XFS_ILOCK_SHARED); + if (error) + break; + + ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); + for (i = 0; i < nmaps; i++) { + ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + ASSERT(map[i].br_blockcount); + + + lblkno += map[i].br_blockcount; + + if (map[i].br_startblock == HOLESTARTBLOCK) + continue; + + firstid = (xfs_dqid_t) map[i].br_startoff * + XFS_QM_DQPERBLK(mp); + /* + * Do a read-ahead on the next extent. + */ + if ((i+1 < nmaps) && + (map[i+1].br_startblock != HOLESTARTBLOCK)) { + rablkcnt = map[i+1].br_blockcount; + rablkno = map[i+1].br_startblock; + while (rablkcnt--) { + xfs_baread(mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, rablkno), + (int)XFS_QI_DQCHUNKLEN(mp)); + rablkno++; + } + } + /* + * Iterate thru all the blks in the extent and + * reset the counters of all the dquots inside them. + */ + if ((error = xfs_qm_dqiter_bufs(mp, + firstid, + map[i].br_startblock, + map[i].br_blockcount, + flags))) { + break; + } + } + + if (error) + break; + } while (nmaps > 0); + + kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map)); + + return (error); +} + +/* + * Called by dqusage_adjust in doing a quotacheck. + * Given the inode, and a dquot (either USR or GRP, doesn't matter), + * this updates its incore copy as well as the buffer copy. This is + * so that once the quotacheck is done, we can just log all the buffers, + * as opposed to logging numerous updates to individual dquots. + */ +STATIC void +xfs_qm_quotacheck_dqadjust( + xfs_dquot_t *dqp, + xfs_qcnt_t nblks, + xfs_qcnt_t rtblks) +{ + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + xfs_dqtrace_entry(dqp, "QCHECK DQADJUST"); + /* + * Adjust the inode count and the block count to reflect this inode's + * resource usage. + */ + INT_MOD(dqp->q_core.d_icount, ARCH_CONVERT, +1); + dqp->q_res_icount++; + if (nblks) { + INT_MOD(dqp->q_core.d_bcount, ARCH_CONVERT, nblks); + dqp->q_res_bcount += nblks; + } + if (rtblks) { + INT_MOD(dqp->q_core.d_rtbcount, ARCH_CONVERT, rtblks); + dqp->q_res_rtbcount += rtblks; + } + + /* + * Adjust the timers since we just changed usages + */ + if (! XFS_IS_SUSER_DQUOT(dqp)) + xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); + + dqp->dq_flags |= XFS_DQ_DIRTY; +} + +STATIC int +xfs_qm_get_rtblks( + xfs_inode_t *ip, + xfs_qcnt_t *O_rtblks) +{ + xfs_filblks_t rtblks; /* total rt blks */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extent entries */ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + int error; + + ASSERT(XFS_IS_REALTIME_INODE(ip)); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) + return (error); + } + rtblks = 0; + nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (ep = base; ep < &base[nextents]; ep++) + rtblks += xfs_bmbt_get_blockcount(ep); + *O_rtblks = (xfs_qcnt_t)rtblks; + return (0); +} + +/* + * callback routine supplied to bulkstat(). Given an inumber, find its + * dquots and update them to account for resources taken by that inode. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqusage_adjust( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer - NULL */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* not used */ + xfs_daddr_t bno, /* starting block of inode cluster */ + void *dip, /* on-disk inode pointer (not used) */ + int *res) /* result code value */ +{ + xfs_inode_t *ip; + xfs_dquot_t *udqp, *gdqp; + xfs_qcnt_t nblks, rtblks; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * rootino must have its resources accounted for, not so with the quota + * inodes. + */ + if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(EINVAL); + } + + /* + * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget + * interface expects the inode to be exclusively locked because that's + * the case in all other instances. It's OK that we do this because + * quotacheck is done only at mount time. + */ + if ((error = xfs_iget(mp, tp, ino, XFS_ILOCK_EXCL, &ip, bno))) { + *res = BULKSTAT_RV_NOTHING; + return (error); + } + + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, XFS_ILOCK_EXCL); + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + + /* + * Obtain the locked dquots. In case of an error (eg. allocation + * fails for ENOSPC), we return the negative of the error number + * to bulkstat, so that it can get propagated to quotacheck() and + * making us disable quotas for the file system. + */ + if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { + xfs_iput(ip, XFS_ILOCK_EXCL); + *res = BULKSTAT_RV_GIVEUP; + return (error); + } + + rtblks = 0; + if (! XFS_IS_REALTIME_INODE(ip)) { + nblks = (xfs_qcnt_t)ip->i_d.di_nblocks; + } else { + /* + * Walk thru the extent list and count the realtime blocks. + */ + if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { + xfs_iput(ip, XFS_ILOCK_EXCL); + if (udqp) + xfs_qm_dqput(udqp); + if (gdqp) + xfs_qm_dqput(gdqp); + *res = BULKSTAT_RV_GIVEUP; + return (error); + } + nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; + } + ASSERT(ip->i_delayed_blks == 0); + + /* + * We can't release the inode while holding its dquot locks. + * The inode can go into inactive and might try to acquire the dquotlocks. + * So, just unlock here and do a vn_rele at the end. + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + /* + * Add the (disk blocks and inode) resources occupied by this + * inode to its dquots. We do this adjustment in the incore dquot, + * and also copy the changes to its buffer. + * We don't care about putting these changes in a transaction + * envelope because if we crash in the middle of a 'quotacheck' + * we have to start from the beginning anyway. + * Once we're done, we'll log all the dquot bufs. + * + * The *QUOTA_ON checks below may look pretty racey, but quotachecks + * and quotaoffs don't race. (Quotachecks happen at mount time only). + */ + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(udqp); + xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); + xfs_qm_dqput(udqp); + } + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(gdqp); + xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks); + xfs_qm_dqput(gdqp); + } + /* + * Now release the inode. This will send it to 'inactive', and + * possibly even free blocks. + */ + VN_RELE(XFS_ITOV(ip)); + + /* + * Goto next inode. + */ + *res = BULKSTAT_RV_DIDONE; + return (0); +} + +/* + * Walk thru all the filesystem inodes and construct a consistent view + * of the disk quota world. + */ +STATIC int +xfs_qm_quotacheck( + xfs_mount_t *mp) +{ + int done, count, error; + xfs_ino_t lastino; + size_t structsz; + xfs_inode_t *uip, *gip; + uint flags; + + count = INT_MAX; + structsz = 1; + lastino = 0; + flags = 0; + + ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp)); + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * There should be no cached dquots. The (simplistic) quotacheck + * algorithm doesn't like that. + */ + ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0); + + cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname); + + /* + * First we go thru all the dquots on disk, USR and GRP, and reset + * their counters to zero. We need a clean slate. + * We don't log our changes till later. + */ + if ((uip = XFS_QI_UQIP(mp))) { + if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA))) + goto error_return; + flags |= XFS_UQUOTA_CHKD; + } + + if ((gip = XFS_QI_GQIP(mp))) { + if ((error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA))) + goto error_return; + flags |= XFS_GQUOTA_CHKD; + } + + do { + /* + * Iterate thru all the inodes in the file system, + * adjusting the corresponding dquot counters in core. + */ + if ((error = xfs_bulkstat(mp, NULL, &lastino, &count, + xfs_qm_dqusage_adjust, + structsz, NULL, + BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED, + &done))) + break; + + } while (! done); + + /* + * We can get this error if we couldn't do a dquot allocation inside + * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the + * dirty dquots that might be cached, we just want to get rid of them + * and turn quotaoff. The dquots won't be attached to any of the inodes + * at this point (because we intentionally didn't in dqget_noattach). + */ + if (error) { + xfs_qm_dqpurge_all(mp, + XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA| + XFS_QMOPT_QUOTAOFF); + goto error_return; + } + /* + * We've made all the changes that we need to make incore. + * Now flush_them down to disk buffers. + */ + xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI); + + /* + * We didn't log anything, because if we crashed, we'll have to + * start the quotacheck from scratch anyway. However, we must make + * sure that our dquot changes are secure before we put the + * quotacheck'd stamp on the superblock. So, here we do a synchronous + * flush. + */ + XFS_bflush(mp->m_ddev_targp); + + /* + * If one type of quotas is off, then it will lose its + * quotachecked status, since we won't be doing accounting for + * that type anymore. + */ + mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD); + mp->m_qflags |= flags; + + XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++"); + + error_return: + cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); + return (error); +} + +/* + * This is called after the superblock has been read in and we're ready to + * iget the quota inodes. + */ +STATIC int +xfs_qm_init_quotainos( + xfs_mount_t *mp) +{ + xfs_inode_t *uip, *gip; + int error; + __int64_t sbflags; + uint flags; + + ASSERT(mp->m_quotainfo); + uip = gip = NULL; + sbflags = 0; + flags = 0; + + /* + * Get the uquota and gquota inodes + */ + if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) { + if (XFS_IS_UQUOTA_ON(mp) && + mp->m_sb.sb_uquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_uquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, + 0, &uip, 0))) + return XFS_ERROR(error); + } + if (XFS_IS_GQUOTA_ON(mp) && + mp->m_sb.sb_gquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_gquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, + 0, &gip, 0))) { + if (uip) + VN_RELE(XFS_ITOV(uip)); + return XFS_ERROR(error); + } + } + } else { + flags |= XFS_QMOPT_SBVERSION; + sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS); + } + + /* + * Create the two inodes, if they don't exist already. The changes + * made above will get added to a transaction and logged in one of + * the qino_alloc calls below. If the device is readonly, + * temporarily switch to read-write to do this. + */ + if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { + if ((error = xfs_qm_qino_alloc(mp, &uip, + sbflags | XFS_SB_UQUOTINO, + flags | XFS_QMOPT_UQUOTA))) + return XFS_ERROR(error); + + flags &= ~XFS_QMOPT_SBVERSION; + } + if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { + if ((error = xfs_qm_qino_alloc(mp, &gip, + sbflags | XFS_SB_GQUOTINO, + flags | XFS_QMOPT_GQUOTA))) { + if (uip) + VN_RELE(XFS_ITOV(uip)); + + return XFS_ERROR(error); + } + } + + XFS_QI_UQIP(mp) = uip; + XFS_QI_GQIP(mp) = gip; + + return (0); +} + + +/* + * Traverse the freelist of dquots and attempt to reclaim a maximum of + * 'howmany' dquots. This operation races with dqlookup(), and attempts to + * favor the lookup function ... + * XXXsup merge this with qm_reclaim_one(). + */ +STATIC int +xfs_qm_shake_freelist( + int howmany) +{ + int nreclaimed; + xfs_dqhash_t *hash; + xfs_dquot_t *dqp, *nextdqp; + int restarts; + int nflushes; + + if (howmany <= 0) + return (0); + + nreclaimed = 0; + restarts = 0; + nflushes = 0; + +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "Shake free 0x%x", howmany); +#endif + /* lock order is : hashchainlock, freelistlock, mplistlock */ + tryagain: + xfs_qm_freelist_lock(xfs_Gqm); + + for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; + ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) && + nreclaimed < howmany); ) { + xfs_dqlock(dqp); + + /* + * We are racing with dqlookup here. Naturally we don't + * want to reclaim a dquot that lookup wants. + */ + if (dqp->dq_flags & XFS_DQ_WANT) { + xfs_dqunlock(dqp); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (nreclaimed != howmany); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); + goto tryagain; + } + + /* + * If the dquot is inactive, we are assured that it is + * not on the mplist or the hashlist, and that makes our + * life easier. + */ + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); + nextdqp = dqp->dq_flnext; + goto off_freelist; + } + + ASSERT(dqp->MPL_PREVP); + /* + * Try to grab the flush lock. If this dquot is in the process of + * getting flushed to disk, we don't want to reclaim it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + xfs_dqunlock(dqp); + dqp = dqp->dq_flnext; + continue; + } + + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); + /* + * We flush it delayed write, so don't bother + * releasing the mplock. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ + dqp = dqp->dq_flnext; + continue; + } + /* + * We're trying to get the hashlock out of order. This races + * with dqlookup; so, we giveup and goto the next dquot if + * we couldn't get the hashlock. This way, we won't starve + * a dqlookup process that holds the hashlock that is + * waiting for the freelist lock. + */ + if (! xfs_qm_dqhashlock_nowait(dqp)) { + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + dqp = dqp->dq_flnext; + continue; + } + /* + * This races with dquot allocation code as well as dqflush_all + * and reclaim code. So, if we failed to grab the mplist lock, + * giveup everything and start over. + */ + hash = dqp->q_hash; + ASSERT(hash); + if (! xfs_qm_mplist_nowait(dqp->q_mount)) { + /* XXX put a sentinel so that we can come back here */ + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + XFS_DQ_HASH_UNLOCK(hash); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (nreclaimed != howmany); + goto tryagain; + } + xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", + dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT)); +#endif + ASSERT(dqp->q_nrefs == 0); + nextdqp = dqp->dq_flnext; + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); + XQM_HASHLIST_REMOVE(hash, dqp); + xfs_dqfunlock(dqp); + xfs_qm_mplist_unlock(dqp->q_mount); + XFS_DQ_HASH_UNLOCK(hash); + + off_freelist: + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + nreclaimed++; + XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims); + xfs_qm_dqdestroy(dqp); + dqp = nextdqp; + } + xfs_qm_freelist_unlock(xfs_Gqm); + return (nreclaimed != howmany); +} + + +/* + * The shake manager routine called by shaked() when memory is + * running low. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_shake(void) +{ + int ndqused, nfree, n; + + if (!xfs_Gqm) + return; + + nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ + /* incore dquots in all f/s's */ + ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; + + ASSERT(ndqused >= 0); + + if (nfree <= ndqused && nfree < ndquot) + return; + + ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ + n = nfree - ndqused - ndquot; /* # over target */ + + (void) xfs_qm_shake_freelist(MAX(nfree, n)); +} + + +/* + * Just pop the least recently used dquot off the freelist and + * recycle it. The returned dquot is locked. + */ +STATIC xfs_dquot_t * +xfs_qm_dqreclaim_one(void) +{ + xfs_dquot_t *dqpout; + xfs_dquot_t *dqp; + int restarts; + int nflushes; + + restarts = 0; + dqpout = NULL; + nflushes = 0; + + /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ + startagain: + xfs_qm_freelist_lock(xfs_Gqm); + + FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) { + xfs_dqlock(dqp); + + /* + * We are racing with dqlookup here. Naturally we don't + * want to reclaim a dquot that lookup wants. We release the + * freelist lock and start over, so that lookup will grab + * both the dquot and the freelistlock. + */ + if (dqp->dq_flags & XFS_DQ_WANT) { + ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); + xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT"); + xfs_dqunlock(dqp); + xfs_qm_freelist_unlock(xfs_Gqm); + if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return (NULL); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); + goto startagain; + } + + /* + * If the dquot is inactive, we are assured that it is + * not on the mplist or the hashlist, and that makes our + * life easier. + */ + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(dqp->HL_PREVP == NULL); + ASSERT(dqp->MPL_PREVP == NULL); + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + dqpout = dqp; + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); + break; + } + + ASSERT(dqp->q_hash); + ASSERT(dqp->MPL_PREVP); + + /* + * Try to grab the flush lock. If this dquot is in the process of + * getting flushed to disk, we don't want to reclaim it. + */ + if (! xfs_qm_dqflock_nowait(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); + /* + * We flush it delayed write, so don't bother + * releasing the freelist lock. + */ + (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ + continue; + } + + if (! xfs_qm_mplist_nowait(dqp->q_mount)) { + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + continue; + } + + if (! xfs_qm_dqhashlock_nowait(dqp)) + goto mplistunlock; + + ASSERT(dqp->q_nrefs == 0); + xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING"); + XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); + XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); + XQM_FREELIST_REMOVE(dqp); + dqpout = dqp; + XFS_DQ_HASH_UNLOCK(dqp->q_hash); + mplistunlock: + xfs_qm_mplist_unlock(dqp->q_mount); + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + if (dqpout) + break; + } + + xfs_qm_freelist_unlock(xfs_Gqm); + return (dqpout); +} + + +/*------------------------------------------------------------------*/ + +/* + * Return a new incore dquot. Depending on the number of + * dquots in the system, we either allocate a new one on the kernel heap, + * or reclaim a free one. + * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed + * to reclaim an existing one from the freelist. + */ +boolean_t +xfs_qm_dqalloc_incore( + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + + /* + * Check against high water mark to see if we want to pop + * a nincompoop dquot off the freelist. + */ + if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { + /* + * Try to recycle a dquot from the freelist. + */ + if ((dqp = xfs_qm_dqreclaim_one())) { + XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); + /* + * Just zero the core here. The rest will get + * reinitialized by caller. XXX we shouldn't even + * do this zero ... + */ + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + *O_dqpp = dqp; + return (B_FALSE); + } + XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); + } + + /* + * Allocate a brand new dquot on the kernel heap and return it + * to the caller to initialize. + */ + ASSERT(xfs_Gqm->qm_dqzone != NULL); + *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); + atomic_inc(&xfs_Gqm->qm_totaldquots); + + return (B_TRUE); +} + + +/* + * Start a transaction and write the incore superblock changes to + * disk. flags parameter indicates which fields have changed. + */ +int +xfs_qm_write_sb_changes( + xfs_mount_t *mp, + __int64_t flags) +{ + xfs_trans_t *tp; + int error; + +#ifdef QUOTADEBUG + cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname); +#endif + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); + if ((error = xfs_trans_reserve(tp, 0, + mp->m_sb.sb_sectsize + 128, 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + xfs_mod_sb(tp, flags); + (void) xfs_trans_commit(tp, 0, NULL); + + return (0); +} + + +/* --------------- utility functions for vnodeops ---------------- */ + + +/* + * Given an inode, a uid and gid (from cred_t) make sure that we have + * allocated relevant dquot(s) on disk, and that we won't exceed inode + * quotas by creating this file. + * This also attaches dquot(s) to the given inode after locking it, + * and returns the dquots corresponding to the uid and/or gid. + * + * in : inode (unlocked) + * out : udquot, gdquot with references taken and unlocked + */ +int +xfs_qm_vop_dqalloc( + xfs_mount_t *mp, + xfs_inode_t *ip, + uid_t uid, + gid_t gid, + uint flags, + xfs_dquot_t **O_udqpp, + xfs_dquot_t **O_gdqpp) +{ + int error; + xfs_dquot_t *uq, *gq; + uint lockflags; + + if (!XFS_IS_QUOTA_ON(mp)) + return 0; + + lockflags = XFS_ILOCK_EXCL; + xfs_ilock(ip, lockflags); + + if ((flags & XFS_QMOPT_INHERIT) && + XFS_INHERIT_GID(ip, XFS_MTOVFS(mp))) + gid = ip->i_d.di_gid; + + /* + * Attach the dquot(s) to this inode, doing a dquot allocation + * if necessary. The dquot(s) will not be locked. + */ + if (XFS_NOT_DQATTACHED(mp, ip)) { + if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | + XFS_QMOPT_ILOCKED))) { + xfs_iunlock(ip, lockflags); + return (error); + } + } + + uq = gq = NULL; + if ((flags & XFS_QMOPT_UQUOTA) && + XFS_IS_UQUOTA_ON(mp)) { + if (ip->i_d.di_uid != uid) { + /* + * What we need is the dquot that has this uid, and + * if we send the inode to dqget, the uid of the inode + * takes priority over what's sent in the uid argument. + * We must unlock inode here before calling dqget if + * we're not sending the inode, because otherwise + * we'll deadlock by doing trans_reserve while + * holding ilock. + */ + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, + XFS_DQ_USER, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &uq))) { + ASSERT(error != ENOENT); + return (error); + } + /* + * Get the ilock in the right order. + */ + xfs_dqunlock(uq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + /* + * Take an extra reference, because we'll return + * this to caller + */ + ASSERT(ip->i_udquot); + uq = ip->i_udquot; + xfs_dqlock(uq); + XFS_DQHOLD(uq); + xfs_dqunlock(uq); + } + } + if ((flags & XFS_QMOPT_GQUOTA) && + XFS_IS_GQUOTA_ON(mp)) { + if (ip->i_d.di_gid != gid) { + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, + XFS_DQ_GROUP, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &gq))) { + if (uq) + xfs_qm_dqrele(uq); + ASSERT(error != ENOENT); + return (error); + } + xfs_dqunlock(gq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + ASSERT(ip->i_gdquot); + gq = ip->i_gdquot; + xfs_dqlock(gq); + XFS_DQHOLD(gq); + xfs_dqunlock(gq); + } + } + if (uq) + xfs_dqtrace_entry_ino(uq, "DQALLOC", ip); + + xfs_iunlock(ip, lockflags); + if (O_udqpp) + *O_udqpp = uq; + else if (uq) + xfs_qm_dqrele(uq); + if (O_gdqpp) + *O_gdqpp = gq; + else if (gq) + xfs_qm_dqrele(gq); + return (0); +} + +/* + * Actually transfer ownership, and do dquot modifications. + * These were already reserved. + */ +xfs_dquot_t * +xfs_qm_vop_chown( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t **IO_olddq, + xfs_dquot_t *newdq) +{ + xfs_dquot_t *prevdq; + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); + + /* old dquot */ + prevdq = *IO_olddq; + ASSERT(prevdq); + ASSERT(prevdq != newdq); + + xfs_trans_mod_dquot(tp, prevdq, + XFS_TRANS_DQ_BCOUNT, + -(ip->i_d.di_nblocks)); + xfs_trans_mod_dquot(tp, prevdq, + XFS_TRANS_DQ_ICOUNT, + -1); + + /* the sparkling new dquot */ + xfs_trans_mod_dquot(tp, newdq, + XFS_TRANS_DQ_BCOUNT, + ip->i_d.di_nblocks); + xfs_trans_mod_dquot(tp, newdq, + XFS_TRANS_DQ_ICOUNT, + 1); + + /* + * Take an extra reference, because the inode + * is going to keep this dquot pointer even + * after the trans_commit. + */ + xfs_dqlock(newdq); + XFS_DQHOLD(newdq); + xfs_dqunlock(newdq); + *IO_olddq = newdq; + + return (prevdq); +} + +/* + * Quota reservations for setattr(AT_UID|AT_GID). + */ +int +xfs_qm_vop_chown_reserve( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + uint flags) +{ + int error; + xfs_mount_t *mp; + uint delblks; + xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; + + ASSERT(XFS_ISLOCKED_INODE(ip)); + mp = ip->i_mount; + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + delblks = ip->i_delayed_blks; + delblksudq = delblksgdq = unresudq = unresgdq = NULL; + + if (XFS_IS_UQUOTA_ON(mp) && udqp && + ip->i_d.di_uid != (uid_t)INT_GET(udqp->q_core.d_id, ARCH_CONVERT)) { + delblksudq = udqp; + /* + * If there are delayed allocation blocks, then we have to + * unreserve those from the old dquot, and add them to the + * new dquot. + */ + if (delblks) { + ASSERT(ip->i_udquot); + unresudq = ip->i_udquot; + } + } + if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && + ip->i_d.di_gid != INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) { + delblksgdq = gdqp; + if (delblks) { + ASSERT(ip->i_gdquot); + unresgdq = ip->i_gdquot; + } + } + + if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, + delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, + flags | XFS_QMOPT_RES_REGBLKS))) + return (error); + + /* + * Do the delayed blks reservations/unreservations now. Since, these + * are done without the help of a transaction, if a reservation fails + * its previous reservations won't be automatically undone by trans + * code. So, we have to do it manually here. + */ + if (delblks) { + /* + * Do the reservations first. Unreservation can't fail. + */ + ASSERT(delblksudq || delblksgdq); + ASSERT(unresudq || unresgdq); + if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, + flags | XFS_QMOPT_RES_REGBLKS))) + return (error); + xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, + XFS_QMOPT_RES_REGBLKS); + } + + return (0); +} + +int +xfs_qm_vop_rename_dqattach( + xfs_inode_t **i_tab) +{ + xfs_inode_t *ip; + int i; + int error; + + ip = i_tab[0]; + + if (! XFS_IS_QUOTA_ON(ip->i_mount)) + return (0); + + if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { + error = xfs_qm_dqattach(ip, 0); + if (error) + return (error); + } + for (i = 1; (i < 4 && i_tab[i]); i++) { + /* + * Watch out for duplicate entries in the table. + */ + if ((ip = i_tab[i]) != i_tab[i-1]) { + if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { + error = xfs_qm_dqattach(ip, 0); + if (error) + return (error); + } + } + } + return (0); +} + +void +xfs_qm_vop_dqattach_and_dqmod_newinode( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp) +{ + if (!XFS_IS_QUOTA_ON(tp->t_mountp)) + return; + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); + + if (udqp) { + xfs_dqlock(udqp); + XFS_DQHOLD(udqp); + xfs_dqunlock(udqp); + ASSERT(ip->i_udquot == NULL); + ip->i_udquot = udqp; + ASSERT(ip->i_d.di_uid == INT_GET(udqp->q_core.d_id, ARCH_CONVERT)); + xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); + } + if (gdqp) { + xfs_dqlock(gdqp); + XFS_DQHOLD(gdqp); + xfs_dqunlock(gdqp); + ASSERT(ip->i_gdquot == NULL); + ip->i_gdquot = gdqp; + ASSERT(ip->i_d.di_gid == INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)); + xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); + } +} + +/* ------------- list stuff -----------------*/ +void +xfs_qm_freelist_init(xfs_frlist_t *ql) +{ + ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql; + mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf"); + ql->qh_version = 0; + ql->qh_nelems = 0; +} + +void +xfs_qm_freelist_destroy(xfs_frlist_t *ql) +{ + xfs_dquot_t *dqp, *nextdqp; + + mutex_lock(&ql->qh_lock, PINOD); + for (dqp = ql->qh_next; + dqp != (xfs_dquot_t *)ql; ) { + xfs_dqlock(dqp); + nextdqp = dqp->dq_flnext; +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp); +#endif + XQM_FREELIST_REMOVE(dqp); + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + dqp = nextdqp; + } + /* + * Don't bother about unlocking. + */ + mutex_destroy(&ql->qh_lock); + + ASSERT(ql->qh_nelems == 0); +} + +void +xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq) +{ + dq->dq_flnext = ql->qh_next; + dq->dq_flprev = (xfs_dquot_t *)ql; + ql->qh_next = dq; + dq->dq_flnext->dq_flprev = dq; + xfs_Gqm->qm_dqfreelist.qh_nelems++; + xfs_Gqm->qm_dqfreelist.qh_version++; +} + +void +xfs_qm_freelist_unlink(xfs_dquot_t *dq) +{ + xfs_dquot_t *next = dq->dq_flnext; + xfs_dquot_t *prev = dq->dq_flprev; + + next->dq_flprev = prev; + prev->dq_flnext = next; + dq->dq_flnext = dq->dq_flprev = dq; + xfs_Gqm->qm_dqfreelist.qh_nelems--; + xfs_Gqm->qm_dqfreelist.qh_version++; +} + +void +xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq) +{ + xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq); +} + +int +xfs_qm_dqhashlock_nowait( + xfs_dquot_t *dqp) +{ + int locked; + + locked = mutex_trylock(&((dqp)->q_hash->qh_lock)); + return (locked); +} + +int +xfs_qm_freelist_lock_nowait( + xfs_qm_t *xqm) +{ + int locked; + + locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock)); + return (locked); +} + +int +xfs_qm_mplist_nowait( + xfs_mount_t *mp) +{ + int locked; + + ASSERT(mp->m_quotainfo); + locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp))); + return (locked); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm.h linux.22-ac2/fs/xfs/quota/xfs_qm.h --- linux.vanilla/fs/xfs/quota/xfs_qm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QM_H__ +#define __XFS_QM_H__ + +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" +#include "xfs_quota_priv.h" +#include "xfs_qm_stats.h" + +struct xfs_qm; +struct xfs_inode; + +extern mutex_t xfs_Gqm_lock; +extern struct xfs_qm *xfs_Gqm; +extern kmem_zone_t *qm_dqzone; +extern kmem_zone_t *qm_dqtrxzone; + +/* + * Used in xfs_qm_sync called by xfs_sync to count the max times that it can + * iterate over the mountpt's dquot list in one call. + */ +#define XFS_QM_SYNC_MAX_RESTARTS 7 + +/* + * Ditto, for xfs_qm_dqreclaim_one. + */ +#define XFS_QM_RECLAIM_MAX_RESTARTS 4 + +/* + * Ideal ratio of free to in use dquots. Quota manager makes an attempt + * to keep this balance. + */ +#define XFS_QM_DQFREE_RATIO 2 + +/* + * Dquot hashtable constants/threshold values. + */ +#define XFS_QM_NCSIZE_THRESHOLD 5000 +#define XFS_QM_HASHSIZE_LOW 32 +#define XFS_QM_HASHSIZE_HIGH 64 + +/* + * We output a cmn_err when quotachecking a quota file with more than + * this many fsbs. + */ +#define XFS_QM_BIG_QCHECK_NBLKS 500 + +/* + * This defines the unit of allocation of dquots. + * Currently, it is just one file system block, and a 4K blk contains 30 + * (136 * 30 = 4080) dquots. It's probably not worth trying to make + * this more dynamic. + * XXXsup However, if this number is changed, we have to make sure that we don't + * implicitly assume that we do allocations in chunks of a single filesystem + * block in the dquot/xqm code. + */ +#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 +/* + * When doing a quotacheck, we log dquot clusters of this many FSBs at most + * in a single transaction. We don't want to ask for too huge a log reservation. + */ +#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3 + +typedef xfs_dqhash_t xfs_dqlist_t; +/* + * The freelist head. The first two fields match the first two in the + * xfs_dquot_t structure (in xfs_dqmarker_t) + */ +typedef struct xfs_frlist { + struct xfs_dquot *qh_next; + struct xfs_dquot *qh_prev; + mutex_t qh_lock; + uint qh_version; + uint qh_nelems; +} xfs_frlist_t; + +/* + * Quota Manager (global) structure. Lives only in core. + */ +typedef struct xfs_qm { + xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ + xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ + uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ + xfs_frlist_t qm_dqfreelist; /* freelist of dquots */ + atomic_t qm_totaldquots; /* total incore dquots */ + uint qm_nrefs; /* file systems with quota on */ + int qm_dqfree_ratio;/* ratio of free to inuse dquots */ + kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ + kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ +} xfs_qm_t; + +/* + * Various quota information for individual filesystems. + * The mount structure keeps a pointer to this. + */ +typedef struct xfs_quotainfo { + xfs_inode_t *qi_uquotaip; /* user quota inode */ + xfs_inode_t *qi_gquotaip; /* group quota inode */ + lock_t qi_pinlock; /* dquot pinning mutex */ + xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ + int qi_dqreclaims; /* a change here indicates + a removal in the dqlist */ + time_t qi_btimelimit; /* limit for blks timer */ + time_t qi_itimelimit; /* limit for inodes timer */ + time_t qi_rtbtimelimit;/* limit for rt blks timer */ + xfs_qwarncnt_t qi_bwarnlimit; /* limit for num warnings */ + xfs_qwarncnt_t qi_iwarnlimit; /* limit for num warnings */ + mutex_t qi_quotaofflock;/* to serialize quotaoff */ + /* Some useful precalculated constants */ + xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ + uint qi_dqperchunk; /* # ondisk dqs in above chunk */ +} xfs_quotainfo_t; + + +extern xfs_dqtrxops_t xfs_trans_dquot_ops; + +extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); +extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, + xfs_dquot_t *, xfs_dquot_t *, long, long, uint); +extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *); +extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *); + +/* + * We keep the usr and grp dquots separately so that locking will be easier + * to do at commit time. All transactions that we know of at this point + * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. + */ +#define XFS_QM_TRANS_MAXDQS 2 +typedef struct xfs_dquot_acct { + xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS]; + xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS]; +} xfs_dquot_acct_t; + +/* + * Users are allowed to have a usage exceeding their softlimit for + * a period this long. + */ +#define XFS_QM_BTIMELIMIT DQ_BTIMELIMIT +#define XFS_QM_RTBTIMELIMIT DQ_BTIMELIMIT +#define XFS_QM_ITIMELIMIT DQ_FTIMELIMIT + +#define XFS_QM_BWARNLIMIT 5 +#define XFS_QM_IWARNLIMIT 5 + +#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD)) +#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock)) +#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++) +#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) + +extern int xfs_qm_init_quotainfo(xfs_mount_t *); +extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); +extern int xfs_qm_mount_quotas(xfs_mount_t *); +extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint); +extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); +extern int xfs_qm_unmount_quotas(xfs_mount_t *); +extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); +extern int xfs_qm_sync(xfs_mount_t *, short); + +/* dquot stuff */ +extern void xfs_qm_dqunlink(xfs_dquot_t *); +extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); +extern int xfs_qm_dqattach(xfs_inode_t *, uint); +extern void xfs_qm_dqdetach(xfs_inode_t *); +extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); +extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); + +/* vop stuff */ +extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *, + uid_t, gid_t, uint, + xfs_dquot_t **, xfs_dquot_t **); +extern void xfs_qm_vop_dqattach_and_dqmod_newinode( + xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t *, xfs_dquot_t *); +extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **); +extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t **, xfs_dquot_t *); +extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *, + xfs_dquot_t *, xfs_dquot_t *, uint); + +/* list stuff */ +extern void xfs_qm_freelist_init(xfs_frlist_t *); +extern void xfs_qm_freelist_destroy(xfs_frlist_t *); +extern void xfs_qm_freelist_insert(xfs_frlist_t *, xfs_dquot_t *); +extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); +extern void xfs_qm_freelist_unlink(xfs_dquot_t *); +extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *); +extern int xfs_qm_mplist_nowait(xfs_mount_t *); +extern int xfs_qm_dqhashlock_nowait(xfs_dquot_t *); + +/* system call interface */ +extern int xfs_qm_quotactl(bhv_desc_t *, int, int, xfs_caddr_t); + +#ifdef DEBUG +extern int xfs_qm_internalqcheck(xfs_mount_t *); +#else +#define xfs_qm_internalqcheck(mp) (0) +#endif + +#endif /* __XFS_QM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm_stats.c linux.22-ac2/fs/xfs/quota/xfs_qm_stats.c --- linux.vanilla/fs/xfs/quota/xfs_qm_stats.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm_stats.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" + +#include "xfs_qm.h" + +struct xqmstats xqmstats; + +STATIC int +xfs_qm_read_xfsquota( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int len; + + /* maximum; incore; ratio free to inuse; freelist */ + len = sprintf(buffer, "%d\t%d\t%d\t%u\n", + ndquot, + xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, + xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, + xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +STATIC int +xfs_qm_read_stats( + char *buffer, + char **start, + off_t offset, + int count, + int *eof, + void *data) +{ + int len; + + /* quota performance statistics */ + len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n", + xqmstats.xs_qm_dqreclaims, + xqmstats.xs_qm_dqreclaim_misses, + xqmstats.xs_qm_dquot_dups, + xqmstats.xs_qm_dqcachemisses, + xqmstats.xs_qm_dqcachehits, + xqmstats.xs_qm_dqwants, + xqmstats.xs_qm_dqshake_reclaims, + xqmstats.xs_qm_dqinact_reclaims); + + if (offset >= len) { + *start = buffer; + *eof = 1; + return 0; + } + *start = buffer + offset; + if ((len -= offset) > count) + return count; + *eof = 1; + + return len; +} + +void +xfs_qm_init_procfs(void) +{ + create_proc_read_entry("fs/xfs/xqmstat", 0, 0, xfs_qm_read_stats, NULL); + create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_qm_read_xfsquota, NULL); +} + +void +xfs_qm_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/xqm", NULL); + remove_proc_entry("fs/xfs/xqmstat", NULL); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm_stats.h linux.22-ac2/fs/xfs/quota/xfs_qm_stats.h --- linux.vanilla/fs/xfs/quota/xfs_qm_stats.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm_stats.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QM_STATS_H__ +#define __XFS_QM_STATS_H__ + + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +/* + * XQM global statistics + */ +struct xqmstats { + __uint32_t xs_qm_dqreclaims; + __uint32_t xs_qm_dqreclaim_misses; + __uint32_t xs_qm_dquot_dups; + __uint32_t xs_qm_dqcachemisses; + __uint32_t xs_qm_dqcachehits; + __uint32_t xs_qm_dqwants; + __uint32_t xs_qm_dqshake_reclaims; + __uint32_t xs_qm_dqinact_reclaims; +}; + +extern struct xqmstats xqmstats; + +# define XQM_STATS_INC(count) ( (count)++ ) + +extern void xfs_qm_init_procfs(void); +extern void xfs_qm_cleanup_procfs(void); + +#else + +# define XQM_STATS_INC(count) do { } while (0) + +static __inline void xfs_qm_init_procfs(void) { }; +static __inline void xfs_qm_cleanup_procfs(void) { }; + +#endif + +#endif /* __XFS_QM_STATS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_qm_syscalls.c linux.22-ac2/fs/xfs/quota/xfs_qm_syscalls.c --- linux.vanilla/fs/xfs/quota/xfs_qm_syscalls.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_qm_syscalls.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1444 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" + +#include "xfs_qm.h" + +#ifdef DEBUG +# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) +#else +# define qdprintk(s, args...) do { } while (0) +#endif + +STATIC int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint); +STATIC int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +STATIC int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); +STATIC int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +STATIC int xfs_qm_scall_quotaon(xfs_mount_t *, uint); +STATIC int xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t); +STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); +STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, + uint); +STATIC uint xfs_qm_import_flags(uint); +STATIC uint xfs_qm_export_flags(uint); +STATIC uint xfs_qm_import_qtype_flags(uint); +STATIC uint xfs_qm_export_qtype_flags(uint); +STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, + fs_disk_quota_t *); + + +/* + * The main distribution switch of all XFS quotactl system calls. + */ +int +xfs_qm_quotactl( + struct bhv_desc *bdp, + int cmd, + int id, + xfs_caddr_t addr) +{ + xfs_mount_t *mp; + int error; + struct vfs *vfsp; + + vfsp = bhvtovfs(bdp); + mp = XFS_VFSTOM(vfsp); + + if (addr == NULL && cmd != Q_SYNC) + return XFS_ERROR(EINVAL); + if (id < 0 && cmd != Q_SYNC) + return XFS_ERROR(EINVAL); + + /* + * The following commands are valid even when quotaoff. + */ + switch (cmd) { + /* + * truncate quota files. quota must be off. + */ + case Q_XQUOTARM: + if (XFS_IS_QUOTA_ON(mp) || addr == NULL) + return XFS_ERROR(EINVAL); + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + return (xfs_qm_scall_trunc_qfiles(mp, + xfs_qm_import_qtype_flags(*(uint *)addr))); + /* + * Get quota status information. + */ + case Q_XGETQSTAT: + return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr)); + + /* + * QUOTAON for root f/s and quota enforcement on others.. + * Quota accounting for non-root f/s's must be turned on + * at mount time. + */ + case Q_XQUOTAON: + if (addr == NULL) + return XFS_ERROR(EINVAL); + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + return (xfs_qm_scall_quotaon(mp, + xfs_qm_import_flags(*(uint *)addr))); + case Q_XQUOTAOFF: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + break; + + default: + break; + } + + if (! XFS_IS_QUOTA_ON(mp)) + return XFS_ERROR(ESRCH); + + switch (cmd) { + case Q_XQUOTAOFF: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_quotaoff(mp, + xfs_qm_import_flags(*(uint *)addr), + B_FALSE); + break; + + /* + * Defaults to XFS_GETUQUOTA. + */ + case Q_XGETQUOTA: + error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER, + (fs_disk_quota_t *)addr); + break; + /* + * Set limits, both hard and soft. Defaults to Q_SETUQLIM. + */ + case Q_XSETQLIM: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER, + (fs_disk_quota_t *)addr); + break; + + case Q_XSETGQLIM: + if (vfsp->vfs_flag & VFS_RDONLY) + return XFS_ERROR(EROFS); + error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, + (fs_disk_quota_t *)addr); + break; + + + case Q_XGETGQUOTA: + error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP, + (fs_disk_quota_t *)addr); + break; + + /* + * Quotas are entirely undefined after quotaoff in XFS quotas. + * For instance, there's no way to set limits when quotaoff. + */ + + default: + error = XFS_ERROR(EINVAL); + break; + } + + return (error); +} + +/* + * Turn off quota accounting and/or enforcement for all udquots and/or + * gdquots. Called only at unmount time. + * + * This assumes that there are no dquots of this file system cached + * incore, and modifies the ondisk dquot directly. Therefore, for example, + * it is an error to call this twice, without purging the cache. + */ +STATIC int +xfs_qm_scall_quotaoff( + xfs_mount_t *mp, + uint flags, + boolean_t force) +{ + uint dqtype; + unsigned long s; + int error; + uint inactivate_flags; + xfs_qoff_logitem_t *qoffstart; + int nculprits; + + if (!force && !capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + /* + * No file system can have quotas enabled on disk but not in core. + * Note that quota utilities (like quotaoff) _expect_ + * errno == EEXIST here. + */ + if ((mp->m_qflags & flags) == 0) + return XFS_ERROR(EEXIST); + error = 0; + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + + /* + * We don't want to deal with two quotaoffs messing up each other, + * so we're going to serialize it. quotaoff isn't exactly a performance + * critical thing. + * If quotaoff, then we must be dealing with the root filesystem. + */ + ASSERT(mp->m_quotainfo); + if (mp->m_quotainfo) + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + + ASSERT(mp->m_quotainfo); + + /* + * If we're just turning off quota enforcement, change mp and go. + */ + if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { + mp->m_qflags &= ~(flags); + + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = mp->m_qflags; + XFS_SB_UNLOCK(mp, s); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + /* XXX what to do if error ? Revert back to old vals incore ? */ + error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); + return (error); + } + + dqtype = 0; + inactivate_flags = 0; + /* + * If accounting is off, we must turn enforcement off, clear the + * quota 'CHKD' certificate to make it known that we have to + * do a quotacheck the next time this quota is turned on. + */ + if (flags & XFS_UQUOTA_ACCT) { + dqtype |= XFS_QMOPT_UQUOTA; + flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); + inactivate_flags |= XFS_UQUOTA_ACTIVE; + } + if (flags & XFS_GQUOTA_ACCT) { + dqtype |= XFS_QMOPT_GQUOTA; + flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD); + inactivate_flags |= XFS_GQUOTA_ACTIVE; + } + + /* + * Nothing to do? Don't complain. This happens when we're just + * turning off quota enforcement. + */ + if ((mp->m_qflags & flags) == 0) { + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + return (0); + } + + /* + * Write the LI_QUOTAOFF log record, and do SB changes atomically, + * and synchronously. + */ + xfs_qm_log_quotaoff(mp, &qoffstart, flags); + + /* + * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct + * to take care of the race between dqget and quotaoff. We don't take + * any special locks to reset these bits. All processes need to check + * these bits *after* taking inode lock(s) to see if the particular + * quota type is in the process of being turned off. If *ACTIVE, it is + * guaranteed that all dquot structures and all quotainode ptrs will all + * stay valid as long as that inode is kept locked. + * + * There is no turning back after this. + */ + mp->m_qflags &= ~inactivate_flags; + + /* + * Give back all the dquot reference(s) held by inodes. + * Here we go thru every single incore inode in this file system, and + * do a dqrele on the i_udquot/i_gdquot that it may have. + * Essentially, as long as somebody has an inode locked, this guarantees + * that quotas will not be turned off. This is handy because in a + * transaction once we lock the inode(s) and check for quotaon, we can + * depend on the quota inodes (and other things) being valid as long as + * we keep the lock(s). + */ + xfs_qm_dqrele_all_inodes(mp, flags); + + /* + * Next we make the changes in the quota flag in the mount struct. + * This isn't protected by a particular lock directly, because we + * don't want to take a mrlock everytime we depend on quotas being on. + */ + mp->m_qflags &= ~(flags); + + /* + * Go through all the dquots of this file system and purge them, + * according to what was turned off. We may not be able to get rid + * of all dquots, because dquots can have temporary references that + * are not attached to inodes. eg. xfs_setattr, xfs_create. + * So, if we couldn't purge all the dquots from the filesystem, + * we can't get rid of the incore data structures. + */ + while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF))) + delay(10 * nculprits); + + /* + * Transactions that had started before ACTIVE state bit was cleared + * could have logged many dquots, so they'd have higher LSNs than + * the first QUOTAOFF log record does. If we happen to crash when + * the tail of the log has gone past the QUOTAOFF record, but + * before the last dquot modification, those dquots __will__ + * recover, and that's not good. + * + * So, we have QUOTAOFF start and end logitems; the start + * logitem won't get overwritten until the end logitem appears... + */ + xfs_qm_log_quotaoff_end(mp, qoffstart, flags); + + /* + * If quotas is completely disabled, close shop. + */ + if ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_ALL) { + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + xfs_qm_destroy_quotainfo(mp); + return (0); + } + + /* + * Release our quotainode references, and vn_purge them, + * if we don't need them anymore. + */ + if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) { + XFS_PURGE_INODE(XFS_QI_UQIP(mp)); + XFS_QI_UQIP(mp) = NULL; + } + if ((dqtype & XFS_QMOPT_GQUOTA) && XFS_QI_GQIP(mp)) { + XFS_PURGE_INODE(XFS_QI_GQIP(mp)); + XFS_QI_GQIP(mp) = NULL; + } + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (error); +} + +STATIC int +xfs_qm_scall_trunc_qfiles( + xfs_mount_t *mp, + uint flags) +{ + int error; + xfs_inode_t *qip; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + error = 0; + if (!XFS_SB_VERSION_HASQUOTA(&mp->m_sb) || flags == 0) { + qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { + error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &qip, 0); + if (! error) { + (void) xfs_truncate_file(mp, qip); + VN_RELE(XFS_ITOV(qip)); + } + } + + if ((flags & XFS_DQ_GROUP) && mp->m_sb.sb_gquotino != NULLFSINO) { + error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &qip, 0); + if (! error) { + (void) xfs_truncate_file(mp, qip); + VN_RELE(XFS_ITOV(qip)); + } + } + + return (error); +} + + +/* + * Switch on (a given) quota enforcement for a filesystem. This takes + * effect immediately. + * (Switching on quota accounting must be done at mount time.) + */ +STATIC int +xfs_qm_scall_quotaon( + xfs_mount_t *mp, + uint flags) +{ + int error; + unsigned long s; + uint qf; + uint accflags; + __int64_t sbflags; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + /* + * Switching on quota accounting must be done at mount time. + */ + accflags = flags & XFS_ALL_QUOTA_ACCT; + flags &= ~(XFS_ALL_QUOTA_ACCT); + + sbflags = 0; + + if (flags == 0) { + qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + /* No fs can turn on quotas with a delayed effect */ + ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); + + /* + * Can't enforce without accounting. We check the superblock + * qflags here instead of m_qflags because rootfs can have + * quota acct on ondisk without m_qflags' knowing. + */ + if (((flags & XFS_UQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && + (flags & XFS_UQUOTA_ENFD)) + || + ((flags & XFS_GQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && + (flags & XFS_GQUOTA_ENFD))) { + qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n", + flags, mp->m_sb.sb_qflags); + return XFS_ERROR(EINVAL); + } + /* + * If everything's upto-date incore, then don't waste time. + */ + if ((mp->m_qflags & flags) == flags) + return XFS_ERROR(EEXIST); + + /* + * Change sb_qflags on disk but not incore mp->qflags + * if this is the root filesystem. + */ + s = XFS_SB_LOCK(mp); + qf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = qf | flags; + XFS_SB_UNLOCK(mp, s); + + /* + * There's nothing to change if it's the same. + */ + if ((qf & flags) == flags && sbflags == 0) + return XFS_ERROR(EEXIST); + sbflags |= XFS_SB_QFLAGS; + + if ((error = xfs_qm_write_sb_changes(mp, sbflags))) + return (error); + /* + * If we aren't trying to switch on quota enforcement, we are done. + */ + if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != + (mp->m_qflags & XFS_UQUOTA_ACCT)) || + (flags & XFS_ALL_QUOTA_ENFD) == 0) + return (0); + + if (! XFS_IS_QUOTA_RUNNING(mp)) + return XFS_ERROR(ESRCH); + + /* + * Switch on quota enforcement in core. + */ + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (0); +} + + + +/* + * Return quota status information, such as uquota-off, enforcements, etc. + */ +STATIC int +xfs_qm_scall_getqstat( + xfs_mount_t *mp, + fs_quota_stat_t *out) +{ + xfs_inode_t *uip, *gip; + boolean_t tempuqip, tempgqip; + + uip = gip = NULL; + tempuqip = tempgqip = B_FALSE; + memset(out, 0, sizeof(fs_quota_stat_t)); + + out->qs_version = FS_QSTAT_VERSION; + if (! XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) { + out->qs_uquota.qfs_ino = NULLFSINO; + out->qs_gquota.qfs_ino = NULLFSINO; + return (0); + } + out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & + (XFS_ALL_QUOTA_ACCT| + XFS_ALL_QUOTA_ENFD)); + out->qs_pad = 0; + out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; + out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; + + if (mp->m_quotainfo) { + uip = mp->m_quotainfo->qi_uquotaip; + gip = mp->m_quotainfo->qi_gquotaip; + } + if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, &uip, 0) == 0) + tempuqip = B_TRUE; + } + if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, &gip, 0) == 0) + tempgqip = B_TRUE; + } + if (uip) { + out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; + out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; + if (tempuqip) + VN_RELE(XFS_ITOV(uip)); + } + if (gip) { + out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; + out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; + if (tempgqip) + VN_RELE(XFS_ITOV(gip)); + } + if (mp->m_quotainfo) { + out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); + out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp); + out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp); + out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp); + out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp); + out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp); + } + return (0); +} + +/* + * Adjust quota limits, and start/stop timers accordingly. + */ +STATIC int +xfs_qm_scall_setqlim( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *newlim) +{ + xfs_disk_dquot_t *ddq; + xfs_dquot_t *dqp; + xfs_trans_t *tp; + int error; + xfs_qcnt_t hard, soft; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0) + return (0); + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + /* + * We don't want to race with a quotaoff so take the quotaoff lock. + * (We don't hold an inode lock, so there's nothing else to stop + * a quotaoff from happening). (XXXThis doesn't currently happen + * because we take the vfslock before calling xfs_qm_sysent). + */ + mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); + + /* + * Get the dquot (locked), and join it to the transaction. + * Allocate the dquot if this doesn't exist. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + ASSERT(error != ENOENT); + return (error); + } + xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); + xfs_trans_dqjoin(tp, dqp); + ddq = &dqp->q_core; + + /* + * Make sure that hardlimits are >= soft limits before changing. + */ + hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : + INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : + INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft); + } + else { + qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); + } + hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : + INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : + INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft); + } + else + qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); + + hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? + (xfs_qcnt_t) newlim->d_ino_hardlimit : + INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT); + soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? + (xfs_qcnt_t) newlim->d_ino_softlimit : + INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT); + if (hard == 0 || hard >= soft) { + INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard); + INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft); + } + else + qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); + + if (id == 0) { + /* + * Timelimits for the super user set the relative time + * the other users can be over quota for this file system. + * If it is zero a default is used. + */ + if (newlim->d_fieldmask & FS_DQ_BTIMER) { + mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; + INT_SET(dqp->q_core.d_btimer, ARCH_CONVERT, newlim->d_btimer); + } + if (newlim->d_fieldmask & FS_DQ_ITIMER) { + mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; + INT_SET(dqp->q_core.d_itimer, ARCH_CONVERT, newlim->d_itimer); + } + if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { + mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; + INT_SET(dqp->q_core.d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer); + } + } else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ { + /* + * If the user is now over quota, start the timelimit. + * The user will not be 'warned'. + * Note that we keep the timers ticking, whether enforcement + * is on or off. We don't really want to bother with iterating + * over all ondisk dquots and turning the timers on/off. + */ + xfs_qm_adjust_dqtimers(mp, ddq); + } + dqp->dq_flags |= XFS_DQ_DIRTY; + xfs_trans_log_dquot(tp, dqp); + + xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); + xfs_trans_commit(tp, 0, NULL); + xfs_qm_dqprint(dqp); + xfs_qm_dqrele(dqp); + mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + + return (0); +} + +STATIC int +xfs_qm_scall_getquota( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *out) +{ + xfs_dquot_t *dqp; + int error; + + /* + * Try to get the dquot. We don't want it allocated on disk, so + * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't + * exist, we'll get ENOENT back. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) { + return (error); + } + + xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS"); + /* + * If everything's NULL, this dquot doesn't quite exist as far as + * our utility programs are concerned. + */ + if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { + xfs_qm_dqput(dqp); + return XFS_ERROR(ENOENT); + } + /* xfs_qm_dqprint(dqp); */ + /* + * Convert the disk dquot to the exportable format + */ + xfs_qm_export_dquot(mp, &dqp->q_core, out); + xfs_qm_dqput(dqp); + return (error ? XFS_ERROR(EFAULT) : 0); +} + + +STATIC int +xfs_qm_log_quotaoff_end( + xfs_mount_t *mp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_trans_t *tp; + int error; + xfs_qoff_logitem_t *qoffi; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); + + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + qoffi = xfs_trans_get_qoff_item(tp, startqoff, + flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + return (error); +} + + +STATIC int +xfs_qm_log_quotaoff( + xfs_mount_t *mp, + xfs_qoff_logitem_t **qoffstartp, + uint flags) +{ + xfs_trans_t *tp; + int error; + unsigned long s; + xfs_qoff_logitem_t *qoffi=NULL; + uint oldsbqflag=0; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); + if ((error = xfs_trans_reserve(tp, 0, + sizeof(xfs_qoff_logitem_t) * 2 + + mp->m_sb.sb_sectsize + 128, + 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + goto error0; + } + + qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + s = XFS_SB_LOCK(mp); + oldsbqflag = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; + XFS_SB_UNLOCK(mp, s); + + xfs_mod_sb(tp, XFS_SB_QFLAGS); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + +error0: + if (error) { + xfs_trans_cancel(tp, 0); + /* + * No one else is modifying sb_qflags, so this is OK. + * We still hold the quotaofflock. + */ + s = XFS_SB_LOCK(mp); + mp->m_sb.sb_qflags = oldsbqflag; + XFS_SB_UNLOCK(mp, s); + } + *qoffstartp = qoffi; + return (error); +} + + +/* + * Translate an internal style on-disk-dquot to the exportable format. + * The main differences are that the counters/limits are all in Basic + * Blocks (BBs) instead of the internal FSBs, and all on-disk data has + * to be converted to the native endianness. + */ +STATIC void +xfs_qm_export_dquot( + xfs_mount_t *mp, + xfs_disk_dquot_t *src, + struct fs_disk_quota *dst) +{ + memset(dst, 0, sizeof(*dst)); + dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ + dst->d_flags = + xfs_qm_export_qtype_flags(INT_GET(src->d_flags, ARCH_CONVERT)); + dst->d_id = INT_GET(src->d_id, ARCH_CONVERT); + dst->d_blk_hardlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_hardlimit, ARCH_CONVERT)); + dst->d_blk_softlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_softlimit, ARCH_CONVERT)); + dst->d_ino_hardlimit = (__uint64_t) + INT_GET(src->d_ino_hardlimit, ARCH_CONVERT); + dst->d_ino_softlimit = (__uint64_t) + INT_GET(src->d_ino_softlimit, ARCH_CONVERT); + dst->d_bcount = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_bcount, ARCH_CONVERT)); + dst->d_icount = (__uint64_t) INT_GET(src->d_icount, ARCH_CONVERT); + dst->d_btimer = (__uint32_t) INT_GET(src->d_btimer, ARCH_CONVERT); + dst->d_itimer = (__uint32_t) INT_GET(src->d_itimer, ARCH_CONVERT); + dst->d_iwarns = INT_GET(src->d_iwarns, ARCH_CONVERT); + dst->d_bwarns = INT_GET(src->d_bwarns, ARCH_CONVERT); + + dst->d_rtb_hardlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_hardlimit, ARCH_CONVERT)); + dst->d_rtb_softlimit = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_softlimit, ARCH_CONVERT)); + dst->d_rtbcount = (__uint64_t) + XFS_FSB_TO_BB(mp, INT_GET(src->d_rtbcount, ARCH_CONVERT)); + dst->d_rtbtimer = (__uint32_t) INT_GET(src->d_rtbtimer, ARCH_CONVERT); + dst->d_rtbwarns = INT_GET(src->d_rtbwarns, ARCH_CONVERT); + + /* + * Internally, we don't reset all the timers when quota enforcement + * gets turned off. No need to confuse the userlevel code, + * so return zeroes in that case. + */ + if (! XFS_IS_QUOTA_ENFORCED(mp)) { + dst->d_btimer = 0; + dst->d_itimer = 0; + dst->d_rtbtimer = 0; + } + +#ifdef DEBUG + if (XFS_IS_QUOTA_ENFORCED(mp) && dst->d_id != 0) { + if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && + (dst->d_blk_softlimit > 0)) { + ASSERT(dst->d_btimer != 0); + } + if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && + (dst->d_ino_softlimit > 0)) { + ASSERT(dst->d_itimer != 0); + } + } +#endif +} + +STATIC uint +xfs_qm_import_qtype_flags( + uint uflags) +{ + /* + * Can't be both at the same time. + */ + if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == + (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) || + ((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == 0)) + return (0); + + return (uflags & XFS_USER_QUOTA) ? + XFS_DQ_USER : XFS_DQ_GROUP; +} + +STATIC uint +xfs_qm_export_qtype_flags( + uint flags) +{ + /* + * Can't be both at the same time. + */ + ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != + (XFS_GROUP_QUOTA | XFS_USER_QUOTA)); + ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != 0); + + return (flags & XFS_DQ_USER) ? + XFS_USER_QUOTA : XFS_GROUP_QUOTA; +} + +STATIC uint +xfs_qm_import_flags( + uint uflags) +{ + uint flags = 0; + + if (uflags & XFS_QUOTA_UDQ_ACCT) + flags |= XFS_UQUOTA_ACCT; + if (uflags & XFS_QUOTA_GDQ_ACCT) + flags |= XFS_GQUOTA_ACCT; + if (uflags & XFS_QUOTA_UDQ_ENFD) + flags |= XFS_UQUOTA_ENFD; + if (uflags & XFS_QUOTA_GDQ_ENFD) + flags |= XFS_GQUOTA_ENFD; + return (flags); +} + + +STATIC uint +xfs_qm_export_flags( + uint flags) +{ + uint uflags; + + uflags = 0; + if (flags & XFS_UQUOTA_ACCT) + uflags |= XFS_QUOTA_UDQ_ACCT; + if (flags & XFS_GQUOTA_ACCT) + uflags |= XFS_QUOTA_GDQ_ACCT; + if (flags & XFS_UQUOTA_ENFD) + uflags |= XFS_QUOTA_UDQ_ENFD; + if (flags & XFS_GQUOTA_ENFD) + uflags |= XFS_QUOTA_GDQ_ENFD; + return (uflags); +} + + +/* + * Go thru all the inodes in the file system, releasing their dquots. + * Note that the mount structure gets modified to indicate that quotas are off + * AFTER this, in the case of quotaoff. This also gets called from + * xfs_rootumount. + */ +void +xfs_qm_dqrele_all_inodes( + struct xfs_mount *mp, + uint flags) +{ + vmap_t vmap; + xfs_inode_t *ip, *topino; + uint ireclaims; + vnode_t *vp; + boolean_t vnode_refd; + + ASSERT(mp->m_quotainfo); + +again: + XFS_MOUNT_ILOCK(mp); + ip = mp->m_inodes; + if (ip == NULL) { + XFS_MOUNT_IUNLOCK(mp); + return; + } + do { + /* Skip markers inserted by xfs_sync */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + /* Root inode, rbmip and rsumip have associated blocks */ + if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { + ASSERT(ip->i_udquot == NULL); + ASSERT(ip->i_gdquot == NULL); + ip = ip->i_mnext; + continue; + } + vp = XFS_ITOV_NULL(ip); + if (!vp) { + ASSERT(ip->i_udquot == NULL); + ASSERT(ip->i_gdquot == NULL); + ip = ip->i_mnext; + continue; + } + vnode_refd = B_FALSE; + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { + /* + * Sample vp mapping while holding the mplock, lest + * we come across a non-existent vnode. + */ + VMAP(vp, vmap); + ireclaims = mp->m_ireclaims; + topino = mp->m_inodes; + XFS_MOUNT_IUNLOCK(mp); + + /* XXX restart limit ? */ + if ( ! (vp = vn_get(vp, &vmap))) + goto again; + xfs_ilock(ip, XFS_ILOCK_EXCL); + vnode_refd = B_TRUE; + } else { + ireclaims = mp->m_ireclaims; + topino = mp->m_inodes; + XFS_MOUNT_IUNLOCK(mp); + } + + /* + * We don't keep the mountlock across the dqrele() call, + * since it can take a while.. + */ + if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * Wait until we've dropped the ilock and mountlock to + * do the vn_rele. Or be condemned to an eternity in the + * inactive code in hell. + */ + if (vnode_refd) + VN_RELE(vp); + XFS_MOUNT_ILOCK(mp); + /* + * If an inode was inserted or removed, we gotta + * start over again. + */ + if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { + /* XXX use a sentinel */ + XFS_MOUNT_IUNLOCK(mp); + goto again; + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + + XFS_MOUNT_IUNLOCK(mp); +} + +/*------------------------------------------------------------------------*/ +#ifdef DEBUG +/* + * This contains all the test functions for XFS disk quotas. + * Currently it does a quota accounting check. ie. it walks through + * all inodes in the file system, calculating the dquot accounting fields, + * and prints out any inconsistencies. + */ +xfs_dqhash_t *qmtest_udqtab; +xfs_dqhash_t *qmtest_gdqtab; +int qmtest_hashmask; +int qmtest_nfails; +mutex_t qcheck_lock; + +#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ + (__psunsigned_t)(id)) & \ + (qmtest_hashmask - 1)) + +#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \ + (qmtest_udqtab + \ + DQTEST_HASHVAL(mp, id)) : \ + (qmtest_gdqtab + \ + DQTEST_HASHVAL(mp, id))) + +#define DQTEST_LIST_PRINT(l, NXT, title) \ +{ \ + xfs_dqtest_t *dqp; int i = 0;\ + cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ + for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ + dqp = (xfs_dqtest_t *)dqp->NXT) { \ + cmn_err(CE_DEBUG, " %d\. \"%d (%s)\" bcnt = %d, icnt = %d", \ + ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \ + dqp->d_bcount, dqp->d_icount); } \ +} + +typedef struct dqtest { + xfs_dqmarker_t q_lists; + xfs_dqhash_t *q_hash; /* the hashchain header */ + xfs_mount_t *q_mount; /* filesystem this relates to */ + xfs_dqid_t d_id; /* user id or group id */ + xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */ + xfs_qcnt_t d_icount; /* # inodes owned by the user */ +} xfs_dqtest_t; + +STATIC void +xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp) +{ + xfs_dquot_t *d; + if (((d) = (h)->qh_next)) + (d)->HL_PREVP = &((dqp)->HL_NEXT); + (dqp)->HL_NEXT = d; + (dqp)->HL_PREVP = &((h)->qh_next); + (h)->qh_next = (xfs_dquot_t *)dqp; + (h)->qh_version++; + (h)->qh_nelems++; +} +STATIC void +xfs_qm_dqtest_print( + xfs_dqtest_t *d) +{ + cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------"); + cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id); + cmn_err(CE_DEBUG, "---- type = %s", XFS_QM_ISUDQ(d)? "USR" : "GRP"); + cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount); + cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", + d->d_bcount, (int)d->d_bcount); + cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", + d->d_icount, (int)d->d_icount); + cmn_err(CE_DEBUG, "---------------------------"); +} + +STATIC void +xfs_qm_dqtest_failed( + xfs_dqtest_t *d, + xfs_dquot_t *dqp, + char *reason, + xfs_qcnt_t a, + xfs_qcnt_t b, + int error) +{ + qmtest_nfails++; + if (error) + cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s", + INT_GET(d->d_id, ARCH_CONVERT), error, reason); + else + cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]", + INT_GET(d->d_id, ARCH_CONVERT), reason, (int)a, (int)b); + xfs_qm_dqtest_print(d); + if (dqp) + xfs_qm_dqprint(dqp); +} + +STATIC int +xfs_dqtest_cmp2( + xfs_dqtest_t *d, + xfs_dquot_t *dqp) +{ + int err = 0; + if (INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) != d->d_icount) { + xfs_qm_dqtest_failed(d, dqp, "icount mismatch", + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), + d->d_icount, 0); + err++; + } + if (INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) != d->d_bcount) { + xfs_qm_dqtest_failed(d, dqp, "bcount mismatch", + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), + d->d_bcount, 0); + err++; + } + if (INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(dqp->q_core.d_btimer, ARCH_CONVERT) && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { + cmn_err(CE_DEBUG, + "%d [%s] [0x%p] BLK TIMER NOT STARTED", + d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); + err++; + } + } + if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { + cmn_err(CE_DEBUG, + "%d [%s] [0x%p] INO TIMER NOT STARTED", + d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); + err++; + } + } +#ifdef QUOTADEBUG + if (!err) { + cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked", + d->d_id, XFS_QM_ISUDQ(d) ? "USR" : "GRP", d->q_mount); + } +#endif + return (err); +} + +STATIC void +xfs_dqtest_cmp( + xfs_dqtest_t *d) +{ + xfs_dquot_t *dqp; + int error; + + /* xfs_qm_dqtest_print(d); */ + if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0, + &dqp))) { + xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error); + return; + } + xfs_dqtest_cmp2(d, dqp); + xfs_qm_dqput(dqp); +} + +STATIC int +xfs_qm_internalqcheck_dqget( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + xfs_dqtest_t **O_dq) +{ + xfs_dqtest_t *d; + xfs_dqhash_t *h; + + h = DQTEST_HASH(mp, id, type); + for (d = (xfs_dqtest_t *) h->qh_next; d != NULL; + d = (xfs_dqtest_t *) d->HL_NEXT) { + /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */ + if (d->d_id == id && mp == d->q_mount) { + *O_dq = d; + return (0); + } + } + d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP); + d->dq_flags = type; + d->d_id = id; + d->q_mount = mp; + d->q_hash = h; + xfs_qm_hashinsert(h, d); + *O_dq = d; + return (0); +} + +STATIC void +xfs_qm_internalqcheck_get_dquots( + xfs_mount_t *mp, + xfs_dqid_t uid, + xfs_dqid_t gid, + xfs_dqtest_t **ud, + xfs_dqtest_t **gd) +{ + if (XFS_IS_UQUOTA_ON(mp)) + xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud); + if (XFS_IS_GQUOTA_ON(mp)) + xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd); +} + + +STATIC void +xfs_qm_internalqcheck_dqadjust( + xfs_inode_t *ip, + xfs_dqtest_t *d) +{ + d->d_icount++; + d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks; +} + +STATIC int +xfs_qm_internalqcheck_adjust( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* not used */ + xfs_daddr_t bno, /* starting block of inode cluster */ + void *dip, /* not used */ + int *res) /* bulkstat result code */ +{ + xfs_inode_t *ip; + xfs_dqtest_t *ud, *gd; + uint lock_flags; + boolean_t ipreleased; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { + *res = BULKSTAT_RV_NOTHING; + qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", + (unsigned long long) ino, + (unsigned long long) mp->m_sb.sb_uquotino, + (unsigned long long) mp->m_sb.sb_gquotino); + return XFS_ERROR(EINVAL); + } + ipreleased = B_FALSE; + again: + lock_flags = XFS_ILOCK_SHARED; + if ((error = xfs_iget(mp, tp, ino, lock_flags, &ip, bno))) { + *res = BULKSTAT_RV_NOTHING; + return (error); + } + + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, lock_flags); + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + + /* + * This inode can have blocks after eof which can get released + * when we send it to inactive. Since we don't check the dquot + * until the after all our calculations are done, we must get rid + * of those now. + */ + if (! ipreleased) { + xfs_iput(ip, lock_flags); + ipreleased = B_TRUE; + goto again; + } + xfs_qm_internalqcheck_get_dquots(mp, + (xfs_dqid_t) ip->i_d.di_uid, + (xfs_dqid_t) ip->i_d.di_gid, + &ud, &gd); + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(ud); + xfs_qm_internalqcheck_dqadjust(ip, ud); + } + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(gd); + xfs_qm_internalqcheck_dqadjust(ip, gd); + } + xfs_iput(ip, lock_flags); + *res = BULKSTAT_RV_DIDONE; + return (0); +} + + +/* PRIVATE, debugging */ +int +xfs_qm_internalqcheck( + xfs_mount_t *mp) +{ + xfs_ino_t lastino; + int done, count; + int i; + xfs_dqtest_t *d, *e; + xfs_dqhash_t *h1; + int error; + + lastino = 0; + qmtest_hashmask = 32; + count = 5; + done = 0; + qmtest_nfails = 0; + + if (! XFS_IS_QUOTA_ON(mp)) + return XFS_ERROR(ESRCH); + + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_bflush(mp->m_ddev_targp); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_bflush(mp->m_ddev_targp); + + mutex_lock(&qcheck_lock, PINOD); + /* There should be absolutely no quota activity while this + is going on. */ + qmtest_udqtab = kmem_zalloc(qmtest_hashmask * + sizeof(xfs_dqhash_t), KM_SLEEP); + qmtest_gdqtab = kmem_zalloc(qmtest_hashmask * + sizeof(xfs_dqhash_t), KM_SLEEP); + do { + /* + * Iterate thru all the inodes in the file system, + * adjusting the corresponding dquot counters + */ + if ((error = xfs_bulkstat(mp, NULL, &lastino, &count, + xfs_qm_internalqcheck_adjust, + 0, NULL, BULKSTAT_FG_IGET, &done))) { + break; + } + } while (! done); + if (error) { + cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); + } + cmn_err(CE_DEBUG, "Checking results against system dquots"); + for (i = 0; i < qmtest_hashmask; i++) { + h1 = &qmtest_udqtab[i]; + for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + xfs_dqtest_cmp(d); + e = (xfs_dqtest_t *) d->HL_NEXT; + kmem_free(d, sizeof(xfs_dqtest_t)); + d = e; + } + h1 = &qmtest_gdqtab[i]; + for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + xfs_dqtest_cmp(d); + e = (xfs_dqtest_t *) d->HL_NEXT; + kmem_free(d, sizeof(xfs_dqtest_t)); + d = e; + } + } + + if (qmtest_nfails) { + cmn_err(CE_DEBUG, "******** quotacheck failed ********"); + cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails); + } else { + cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); + } + kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); + kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); + mutex_unlock(&qcheck_lock); + return (qmtest_nfails); +} + +#endif /* DEBUG */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_quota_priv.h linux.22-ac2/fs/xfs/quota/xfs_quota_priv.h --- linux.vanilla/fs/xfs/quota/xfs_quota_priv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_quota_priv.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QUOTA_PRIV_H__ +#define __XFS_QUOTA_PRIV_H__ + +/* + * Number of bmaps that we ask from bmapi when doing a quotacheck. + * We make this restriction to keep the memory usage to a minimum. + */ +#define XFS_DQITER_MAP_SIZE 10 + +/* Number of dquots that fit in to a dquot block */ +#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk) + +#define XFS_ISLOCKED_INODE(ip) (ismrlocked(&(ip)->i_lock, \ + MR_UPDATE | MR_ACCESS) != 0) +#define XFS_ISLOCKED_INODE_EXCL(ip) (ismrlocked(&(ip)->i_lock, \ + MR_UPDATE) != 0) + +#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t)) + +#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims) +#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip) +#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip) +#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen) +#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit) +#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit) +#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit) +#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit) +#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit) +#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock) + +#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist) +#define XFS_QI_MPLLOCK(mp) ((mp)->m_quotainfo->qi_dqlist.qh_lock) +#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next) +#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems) + +#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD)) +#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock))) +#ifdef DEBUG +struct xfs_dqhash; +static inline int XQMISLCKD(struct xfs_dqhash *h) +{ + if (mutex_trylock(&h->qh_lock)) { + mutex_unlock(&h->qh_lock); + return 0; + } + return 1; +} +#endif + +#define XFS_DQ_HASH_LOCK(h) XQMLCK(h) +#define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h) +#define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h) + +#define xfs_qm_mplist_lock(mp) XQMLCK(&(XFS_QI_MPL_LIST(mp))) +#define xfs_qm_mplist_unlock(mp) XQMUNLCK(&(XFS_QI_MPL_LIST(mp))) +#define XFS_QM_IS_MPLIST_LOCKED(mp) XQMISLCKD(&(XFS_QI_MPL_LIST(mp))) + +#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist)) +#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist)) +#define XFS_QM_IS_FREELIST_LOCKED(qm) XQMISLCKD(&((qm)->qm_dqfreelist)) + +/* + * Hash into a bucket in the dquot hash table, based on . + */ +#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ + (__psunsigned_t)(id)) & \ + (xfs_Gqm->qm_dqhashmask - 1)) +#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \ + (xfs_Gqm->qm_usr_dqhtable + \ + XFS_DQ_HASHVAL(mp, id)) : \ + (xfs_Gqm->qm_grp_dqhtable + \ + XFS_DQ_HASHVAL(mp, id))) +#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \ + XFS_IS_UQUOTA_ON(mp):XFS_IS_GQUOTA_ON(mp)) +#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ + INT_ISZERO(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_bcount, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_rtbcount, ARCH_CONVERT) && \ + INT_ISZERO(dqp->q_core.d_icount, ARCH_CONVERT)) + +#define HL_PREVP dq_hashlist.ql_prevp +#define HL_NEXT dq_hashlist.ql_next +#define MPL_PREVP dq_mplist.ql_prevp +#define MPL_NEXT dq_mplist.ql_next + + +#define _LIST_REMOVE(h, dqp, PVP, NXT) \ + { \ + xfs_dquot_t *d; \ + if (((d) = (dqp)->NXT)) \ + (d)->PVP = (dqp)->PVP; \ + *((dqp)->PVP) = d; \ + (dqp)->NXT = NULL; \ + (dqp)->PVP = NULL; \ + (h)->qh_version++; \ + (h)->qh_nelems--; \ + } + +#define _LIST_INSERT(h, dqp, PVP, NXT) \ + { \ + xfs_dquot_t *d; \ + if (((d) = (h)->qh_next)) \ + (d)->PVP = &((dqp)->NXT); \ + (dqp)->NXT = d; \ + (dqp)->PVP = &((h)->qh_next); \ + (h)->qh_next = dqp; \ + (h)->qh_version++; \ + (h)->qh_nelems++; \ + } + +#define FOREACH_DQUOT_IN_MP(dqp, mp) \ + for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT) + +#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \ +for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \ + (dqp) = (dqp)->dq_flnext) + +#define XQM_HASHLIST_INSERT(h, dqp) \ + _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT) + +#define XQM_FREELIST_INSERT(h, dqp) \ + xfs_qm_freelist_append(h, dqp) + +#define XQM_MPLIST_INSERT(h, dqp) \ + _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT) + +#define XQM_HASHLIST_REMOVE(h, dqp) \ + _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT) +#define XQM_FREELIST_REMOVE(dqp) \ + xfs_qm_freelist_unlink(dqp) +#define XQM_MPLIST_REMOVE(h, dqp) \ + { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \ + XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; } + +#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp)) + +#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \ + (tp)->t_dqinfo->dqa_usrdquots : \ + (tp)->t_dqinfo->dqa_grpdquots) +#define XFS_IS_SUSER_DQUOT(dqp) \ + (INT_ISZERO((dqp)->q_core.d_id, ARCH_CONVERT)) + +#define XFS_PURGE_INODE(ip) \ + { \ + vmap_t dqvmap; \ + vnode_t *dqvp; \ + dqvp = XFS_ITOV(ip); \ + VMAP(dqvp, dqvmap); \ + VN_RELE(dqvp); \ + } + +#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ + (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : "???")) +#define DQFLAGTO_DIRTYSTR(d) (XFS_DQ_IS_DIRTY(d) ? "DIRTY" : "NOTDIRTY") + +#endif /* __XFS_QUOTA_PRIV_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/quota/xfs_trans_dquot.c linux.22-ac2/fs/xfs/quota/xfs_trans_dquot.c --- linux.vanilla/fs/xfs/quota/xfs_trans_dquot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/quota/xfs_trans_dquot.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,929 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_dmapi.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_acl.h" +#include "xfs_cap.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" + +#include "xfs_qm.h" + +STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); + +/* + * Add the locked dquot to the transaction. + * The dquot must be locked, and it cannot be associated with any + * transaction. + */ +void +xfs_trans_dqjoin( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + xfs_dq_logitem_t *lp; + + ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp)); + lp = &dqp->q_logitem; + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); + + /* + * Initialize i_transp so we can later determine if this dquot is + * associated with this transaction. + */ + dqp->q_transp = tp; +} + + +/* + * This is called to mark the dquot as needing + * to be logged when the transaction is committed. The dquot must + * already be associated with the given transaction. + * Note that it marks the entire transaction as dirty. In the ordinary + * case, this gets called via xfs_trans_commit, after the transaction + * is already dirty. However, there's nothing stop this from getting + * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY + * flag. + */ +void +xfs_trans_log_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + xfs_log_item_desc_t *lidp; + + ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem)); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; +} + +/* + * Carry forward whatever is left of the quota blk reservation to + * the spanky new transaction + */ +STATIC void +xfs_trans_dup_dqinfo( + xfs_trans_t *otp, + xfs_trans_t *ntp) +{ + xfs_dqtrx_t *oq, *nq; + int i,j; + xfs_dqtrx_t *oqa, *nqa; + + if (!otp->t_dqinfo) + return; + + xfs_trans_alloc_dqinfo(ntp); + oqa = otp->t_dqinfo->dqa_usrdquots; + nqa = ntp->t_dqinfo->dqa_usrdquots; + + /* + * Because the quota blk reservation is carried forward, + * it is also necessary to carry forward the DQ_DIRTY flag. + */ + if(otp->t_flags & XFS_TRANS_DQ_DIRTY) + ntp->t_flags |= XFS_TRANS_DQ_DIRTY; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (oqa[i].qt_dquot == NULL) + break; + oq = &oqa[i]; + nq = &nqa[i]; + + nq->qt_dquot = oq->qt_dquot; + nq->qt_bcount_delta = nq->qt_icount_delta = 0; + nq->qt_rtbcount_delta = 0; + + /* + * Transfer whatever is left of the reservations. + */ + nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used; + oq->qt_blk_res = oq->qt_blk_res_used; + + nq->qt_rtblk_res = oq->qt_rtblk_res - + oq->qt_rtblk_res_used; + oq->qt_rtblk_res = oq->qt_rtblk_res_used; + + nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; + oq->qt_ino_res = oq->qt_ino_res_used; + + } + oqa = otp->t_dqinfo->dqa_grpdquots; + nqa = ntp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Wrap around mod_dquot to account for both user and group quotas. + */ +void +xfs_trans_mod_dquot_byino( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint field, + long delta) +{ + xfs_mount_t *mp; + + ASSERT(tp); + mp = tp->t_mountp; + + if (!XFS_IS_QUOTA_ON(mp) || + ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino) + return; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) { + (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); + } + if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) { + (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); + } +} + +STATIC xfs_dqtrx_t * +xfs_trans_get_dqtrx( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + int i; + xfs_dqtrx_t *qa; + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qa = XFS_QM_DQP_TO_DQACCT(tp, dqp); + + if (qa[i].qt_dquot == NULL || + qa[i].qt_dquot == dqp) { + return (&qa[i]); + } + } + + return (NULL); +} + +/* + * Make the changes in the transaction structure. + * The moral equivalent to xfs_trans_mod_sb(). + * We don't touch any fields in the dquot, so we don't care + * if it's locked or not (most of the time it won't be). + */ +void +xfs_trans_mod_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + uint field, + long delta) +{ + xfs_dqtrx_t *qtrx; + + ASSERT(tp); + qtrx = NULL; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + /* + * Find either the first free slot or the slot that belongs + * to this dquot. + */ + qtrx = xfs_trans_get_dqtrx(tp, dqp); + ASSERT(qtrx); + if (qtrx->qt_dquot == NULL) + qtrx->qt_dquot = dqp; + + switch (field) { + + /* + * regular disk blk reservation + */ + case XFS_TRANS_DQ_RES_BLKS: + qtrx->qt_blk_res += (ulong)delta; + break; + + /* + * inode reservation + */ + case XFS_TRANS_DQ_RES_INOS: + qtrx->qt_ino_res += (ulong)delta; + break; + + /* + * disk blocks used. + */ + case XFS_TRANS_DQ_BCOUNT: + if (qtrx->qt_blk_res && delta > 0) { + qtrx->qt_blk_res_used += (ulong)delta; + ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used); + } + qtrx->qt_bcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELBCOUNT: + qtrx->qt_delbcnt_delta += delta; + break; + + /* + * Inode Count + */ + case XFS_TRANS_DQ_ICOUNT: + if (qtrx->qt_ino_res && delta > 0) { + qtrx->qt_ino_res_used += (ulong)delta; + ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); + } + qtrx->qt_icount_delta += delta; + break; + + /* + * rtblk reservation + */ + case XFS_TRANS_DQ_RES_RTBLKS: + qtrx->qt_rtblk_res += (ulong)delta; + break; + + /* + * rtblk count + */ + case XFS_TRANS_DQ_RTBCOUNT: + if (qtrx->qt_rtblk_res && delta > 0) { + qtrx->qt_rtblk_res_used += (ulong)delta; + ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); + } + qtrx->qt_rtbcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELRTBCOUNT: + qtrx->qt_delrtb_delta += delta; + break; + + default: + ASSERT(0); + } + tp->t_flags |= XFS_TRANS_DQ_DIRTY; +} + + +/* + * Given an array of dqtrx structures, lock all the dquots associated + * and join them to the transaction, provided they have been modified. + * We know that the highest number of dquots (of one type - usr OR grp), + * involved in a transaction is 2 and that both usr and grp combined - 3. + * So, we don't attempt to make this very generic. + */ +STATIC void +xfs_trans_dqlockedjoin( + xfs_trans_t *tp, + xfs_dqtrx_t *q) +{ + ASSERT(q[0].qt_dquot != NULL); + if (q[1].qt_dquot == NULL) { + xfs_dqlock(q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + } else { + ASSERT(XFS_QM_TRANS_MAXDQS == 2); + xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[1].qt_dquot); + } +} + + +/* + * Called by xfs_trans_commit() and similar in spirit to + * xfs_trans_apply_sb_deltas(). + * Go thru all the dquots belonging to this transaction and modify the + * INCORE dquot to reflect the actual usages. + * Unreserve just the reservations done by this transaction. + * dquot is still left locked at exit. + */ +void +xfs_trans_apply_dquot_deltas( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + xfs_disk_dquot_t *d; + long totalbdelta; + long totalrtbdelta; + + if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + ASSERT(tp->t_dqinfo); + qa = tp->t_dqinfo->dqa_usrdquots; + for (j = 0; j < 2; j++) { + if (qa[0].qt_dquot == NULL) { + qa = tp->t_dqinfo->dqa_grpdquots; + continue; + } + + /* + * Lock all of the dquots and join them to the transaction. + */ + xfs_trans_dqlockedjoin(tp, qa); + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * The array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + + /* + * adjust the actual number of blocks used + */ + d = &dqp->q_core; + + /* + * The issue here is - sometimes we don't make a blkquota + * reservation intentionally to be fair to users + * (when the amount is small). On the other hand, + * delayed allocs do make reservations, but that's + * outside of a transaction, so we have no + * idea how much was really reserved. + * So, here we've accumulated delayed allocation blks and + * non-delay blks. The assumption is that the + * delayed ones are always reserved (outside of a + * transaction), and the others may or may not have + * quota reservations. + */ + totalbdelta = qtrx->qt_bcount_delta + + qtrx->qt_delbcnt_delta; + totalrtbdelta = qtrx->qt_rtbcount_delta + + qtrx->qt_delrtb_delta; +#ifdef QUOTADEBUG + if (totalbdelta < 0) + ASSERT(INT_GET(d->d_bcount, ARCH_CONVERT) >= + (xfs_qcnt_t) -totalbdelta); + + if (totalrtbdelta < 0) + ASSERT(INT_GET(d->d_rtbcount, ARCH_CONVERT) >= + (xfs_qcnt_t) -totalrtbdelta); + + if (qtrx->qt_icount_delta < 0) + ASSERT(INT_GET(d->d_icount, ARCH_CONVERT) >= + (xfs_qcnt_t) -qtrx->qt_icount_delta); +#endif + if (totalbdelta) + INT_MOD(d->d_bcount, ARCH_CONVERT, (xfs_qcnt_t)totalbdelta); + + if (qtrx->qt_icount_delta) + INT_MOD(d->d_icount, ARCH_CONVERT, (xfs_qcnt_t)qtrx->qt_icount_delta); + + if (totalrtbdelta) + INT_MOD(d->d_rtbcount, ARCH_CONVERT, (xfs_qcnt_t)totalrtbdelta); + + /* + * Start/reset the timer(s) if needed. + */ + xfs_qm_adjust_dqtimers(tp->t_mountp, d); + + dqp->dq_flags |= XFS_DQ_DIRTY; + /* + * add this to the list of items to get logged + */ + xfs_trans_log_dquot(tp, dqp); + /* + * Take off what's left of the original reservation. + * In case of delayed allocations, there's no + * reservation that a transaction structure knows of. + */ + if (qtrx->qt_blk_res != 0) { + if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { + if (qtrx->qt_blk_res > + qtrx->qt_blk_res_used) + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res - + qtrx->qt_blk_res_used); + else + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res_used - + qtrx->qt_blk_res); + } + } else { + /* + * These blks were never reserved, either inside + * a transaction or outside one (in a delayed + * allocation). Also, this isn't always a + * negative number since we sometimes + * deliberately skip quota reservations. + */ + if (qtrx->qt_bcount_delta) { + dqp->q_res_bcount += + (xfs_qcnt_t)qtrx->qt_bcount_delta; + } + } + /* + * Adjust the RT reservation. + */ + if (qtrx->qt_rtblk_res != 0) { + if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { + if (qtrx->qt_rtblk_res > + qtrx->qt_rtblk_res_used) + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res - + qtrx->qt_rtblk_res_used); + else + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res_used - + qtrx->qt_rtblk_res); + } + } else { + if (qtrx->qt_rtbcount_delta) + dqp->q_res_rtbcount += + (xfs_qcnt_t)qtrx->qt_rtbcount_delta; + } + + /* + * Adjust the inode reservation. + */ + if (qtrx->qt_ino_res != 0) { + ASSERT(qtrx->qt_ino_res >= + qtrx->qt_ino_res_used); + if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) + dqp->q_res_icount -= (xfs_qcnt_t) + (qtrx->qt_ino_res - + qtrx->qt_ino_res_used); + } else { + if (qtrx->qt_icount_delta) + dqp->q_res_icount += + (xfs_qcnt_t)qtrx->qt_icount_delta; + } + + +#ifdef QUOTADEBUG + if (qtrx->qt_rtblk_res != 0) + cmn_err(CE_DEBUG, "RT res %d for 0x%p\n", + (int) qtrx->qt_rtblk_res, dqp); +#endif + ASSERT(dqp->q_res_bcount >= + INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_icount >= + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + ASSERT(dqp->q_res_rtbcount >= + INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); + } + /* + * Do the group quotas next + */ + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Release the reservations, and adjust the dquots accordingly. + * This is called only when the transaction is being aborted. If by + * any chance we have done dquot modifications incore (ie. deltas) already, + * we simply throw those away, since that's the expected behavior + * when a transaction is curtailed without a commit. + */ +STATIC void +xfs_trans_unreserve_and_mod_dquots( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + boolean_t locked; + + if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + qa = tp->t_dqinfo->dqa_usrdquots; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * We assume that the array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + /* + * Unreserve the original reservation. We don't care + * about the number of blocks used field, or deltas. + * Also we don't bother to zero the fields. + */ + locked = B_FALSE; + if (qtrx->qt_blk_res) { + xfs_dqlock(dqp); + locked = B_TRUE; + dqp->q_res_bcount -= + (xfs_qcnt_t)qtrx->qt_blk_res; + } + if (qtrx->qt_ino_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_icount -= + (xfs_qcnt_t)qtrx->qt_ino_res; + } + + if (qtrx->qt_rtblk_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_rtbcount -= + (xfs_qcnt_t)qtrx->qt_rtblk_res; + } + if (locked) + xfs_dqunlock(dqp); + + } + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * This reserves disk blocks and inodes against a dquot. + * Flags indicate if the dquot is to be locked here and also + * if the blk reservation is for RT or regular blocks. + * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. + * Returns EDQUOT if quota is exceeded. + */ +STATIC int +xfs_trans_dqresv( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + long nblks, + long ninos, + uint flags) +{ + int error; + xfs_qcnt_t hardlimit; + xfs_qcnt_t softlimit; + time_t btimer; + xfs_qcnt_t *resbcountp; + + if (! (flags & XFS_QMOPT_DQLOCK)) { + xfs_dqlock(dqp); + } + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (flags & XFS_TRANS_DQ_RES_BLKS) { + hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT); + softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT); + btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT); + resbcountp = &dqp->q_res_bcount; + } else { + ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); + hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT); + softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT); + btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT); + resbcountp = &dqp->q_res_rtbcount; + } + error = 0; + + if ((flags & XFS_QMOPT_FORCE_RES) == 0 && + !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) && + XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) { +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld" + " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit); +#endif + if (nblks > 0) { + /* + * dquot is locked already. See if we'd go over the + * hardlimit or exceed the timelimit if we allocate + * nblks. + */ + if (hardlimit > 0ULL && + (hardlimit <= nblks + *resbcountp)) { + error = EDQUOT; + goto error_return; + } + + if (softlimit > 0ULL && + (softlimit <= nblks + *resbcountp)) { + /* + * If timer or warnings has expired, + * return EDQUOT + */ + if ((btimer != 0 && CURRENT_TIME > btimer) || + (!INT_ISZERO(dqp->q_core.d_bwarns, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >= + XFS_QI_BWARNLIMIT(dqp->q_mount))) { + error = EDQUOT; + goto error_return; + } + } + } + if (ninos > 0) { + if (INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) > 0ULL && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)) { + error = EDQUOT; + goto error_return; + } else if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) > 0ULL && + INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= + INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { + /* + * If timer or warnings has expired, + * return EDQUOT + */ + if ((!INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && + CURRENT_TIME > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) || + (!INT_ISZERO(dqp->q_core.d_iwarns, ARCH_CONVERT) && + INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >= + XFS_QI_IWARNLIMIT(dqp->q_mount))) { + error = EDQUOT; + goto error_return; + } + } + } + } + + /* + * Change the reservation, but not the actual usage. + * Note that q_res_bcount = q_core.d_bcount + resv + */ + (*resbcountp) += (xfs_qcnt_t)nblks; + if (ninos != 0) + dqp->q_res_icount += (xfs_qcnt_t)ninos; + + /* + * note the reservation amt in the trans struct too, + * so that the transaction knows how much was reserved by + * it against this particular dquot. + * We don't do this when we are reserving for a delayed allocation, + * because we don't have the luxury of a transaction envelope then. + */ + if (tp) { + ASSERT(tp->t_dqinfo); + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + if (nblks != 0) + xfs_trans_mod_dquot(tp, dqp, + flags & XFS_QMOPT_RESBLK_MASK, + nblks); + if (ninos != 0) + xfs_trans_mod_dquot(tp, dqp, + XFS_TRANS_DQ_RES_INOS, + ninos); + } + ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); + ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); + +error_return: + if (! (flags & XFS_QMOPT_DQLOCK)) { + xfs_dqunlock(dqp); + } + return (error); +} + + +/* + * Given a dquot(s), make disk block and/or inode reservations against them. + * The fact that this does the reservation against both the usr and + * grp quotas is important, because this follows a both-or-nothing + * approach. + * + * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked. + * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. + * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks + * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks + * dquots are unlocked on return, if they were not locked by caller. + */ +int +xfs_trans_reserve_quota_bydquots( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + long nblks, + long ninos, + uint flags) +{ + int resvd; + + if (! XFS_IS_QUOTA_ON(mp)) + return (0); + + if (tp && tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + resvd = 0; + + if (udqp) { + if (xfs_trans_dqresv(tp, udqp, nblks, ninos, flags)) + return (EDQUOT); + resvd = 1; + } + + if (gdqp) { + if (xfs_trans_dqresv(tp, gdqp, nblks, ninos, flags)) { + /* + * can't do it, so backout previous reservation + */ + if (resvd) { + xfs_trans_dqresv(tp, udqp, -nblks, -ninos, + flags); + } + return (EDQUOT); + } + } + + /* + * Didnt change anything critical, so, no need to log + */ + return (0); +} + + +/* + * Lock the dquot and change the reservation if we can. + * This doesn't change the actual usage, just the reservation. + * The inode sent in is locked. + * + * Returns 0 on success, EDQUOT or other errors otherwise + */ +STATIC int +xfs_trans_reserve_quota_nblks( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_inode_t *ip, + long nblks, + long ninos, + uint type) +{ + int error; + + if (!XFS_IS_QUOTA_ON(mp)) + return (0); + + ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); + ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); + +#ifdef QUOTADEBUG + if (ip->i_udquot) + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot)); + if (ip->i_gdquot) + ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot)); +#endif + + ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); + ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS || + (type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_BLKS); + + /* + * Reserve nblks against these dquots, with trans as the mediator. + */ + error = xfs_trans_reserve_quota_bydquots(tp, mp, + ip->i_udquot, ip->i_gdquot, + nblks, ninos, + type); + return (error); +} + +/* + * This routine is called to allocate a quotaoff log item. + */ +xfs_qoff_logitem_t * +xfs_trans_get_qoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_qoff_logitem_t *q; + + ASSERT(tp != NULL); + + q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); + ASSERT(q != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q); + + return (q); +} + + +/* + * This is called to mark the quotaoff logitem as needing + * to be logged when the transaction is committed. The logitem must + * already be associated with the given transaction. + */ +void +xfs_trans_log_quotaoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *qlp) +{ + xfs_log_item_desc_t *lidp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; +} + +STATIC void +xfs_trans_alloc_dqinfo( + xfs_trans_t *tp) +{ + (tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); +} + +STATIC void +xfs_trans_free_dqinfo( + xfs_trans_t *tp) +{ + if (!tp->t_dqinfo) + return; + kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); + (tp)->t_dqinfo = NULL; +} + +xfs_dqtrxops_t xfs_trans_dquot_ops = { + .qo_dup_dqinfo = xfs_trans_dup_dqinfo, + .qo_free_dqinfo = xfs_trans_free_dqinfo, + .qo_mod_dquot_byino = xfs_trans_mod_dquot_byino, + .qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas, + .qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks, + .qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots, + .qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots, +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/debug.c linux.22-ac2/fs/xfs/support/debug.c --- linux.vanilla/fs/xfs/support/debug.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/debug.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "debug.h" + +#include +#include +#include + +int doass = 1; +static char message[256]; /* keep it off the stack */ +static spinlock_t xfs_err_lock = SPIN_LOCK_UNLOCKED; + +/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ +#define XFS_MAX_ERR_LEVEL 7 +#define XFS_ERR_MASK ((1 << 3) - 1) +static char *err_level[XFS_MAX_ERR_LEVEL+1] = + {KERN_EMERG, KERN_ALERT, KERN_CRIT, + KERN_ERR, KERN_WARNING, KERN_NOTICE, + KERN_INFO, KERN_DEBUG}; + +void +assfail(char *a, char *f, int l) +{ + printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l); + BUG(); +} + +#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM)) + +unsigned long +random(void) +{ + static unsigned long RandomValue = 1; + /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ + register long rv = RandomValue; + register long lo; + register long hi; + + hi = rv / 127773; + lo = rv % 127773; + rv = 16807 * lo - 2836 * hi; + if( rv <= 0 ) rv += 2147483647; + return( RandomValue = rv ); +} + +int +get_thread_id(void) +{ + return current->pid; +} + +#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */ + +void +cmn_err(register int level, char *fmt, ...) +{ + char *fp = fmt; + int len; + ulong flags; + va_list ap; + + level &= XFS_ERR_MASK; + if (level > XFS_MAX_ERR_LEVEL) + level = XFS_MAX_ERR_LEVEL; + spin_lock_irqsave(&xfs_err_lock,flags); + va_start(ap, fmt); + if (*fmt == '!') fp++; + len = vsprintf(message, fp, ap); + if (message[len-1] != '\n') + strcat(message, "\n"); + printk("%s%s", err_level[level], message); + va_end(ap); + spin_unlock_irqrestore(&xfs_err_lock,flags); + + if (level == CE_PANIC) + BUG(); +} + + +void +icmn_err(register int level, char *fmt, va_list ap) +{ + ulong flags; + int len; + + level &= XFS_ERR_MASK; + if(level > XFS_MAX_ERR_LEVEL) + level = XFS_MAX_ERR_LEVEL; + spin_lock_irqsave(&xfs_err_lock,flags); + len = vsprintf(message, fmt, ap); + if (message[len-1] != '\n') + strcat(message, "\n"); + spin_unlock_irqrestore(&xfs_err_lock,flags); + printk("%s%s", err_level[level], message); + if (level == CE_PANIC) + BUG(); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/debug.h linux.22-ac2/fs/xfs/support/debug.h --- linux.vanilla/fs/xfs/support/debug.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/debug.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_DEBUG_H__ +#define __XFS_SUPPORT_DEBUG_H__ + +#include + +#define CE_DEBUG 7 /* debug */ +#define CE_CONT 6 /* continuation */ +#define CE_NOTE 5 /* notice */ +#define CE_WARN 4 /* warning */ +#define CE_ALERT 1 /* alert */ +#define CE_PANIC 0 /* panic */ + +extern void icmn_err(int, char *, va_list); +extern void cmn_err(int, char *, ...); + +#ifdef DEBUG +# ifdef lint +# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */ +# else +# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__)) +# endif /* lint */ +#else +# define ASSERT(x) ((void)0) +#endif + +extern int doass; /* dynamically turn off asserts */ +extern void assfail(char *, char *, int); +#ifdef DEBUG +extern unsigned long random(void); +extern int get_thread_id(void); +#endif + +#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__)) +#define debug_stop_all_cpus(param) /* param is "cpumask_t *" */ + +#endif /* __XFS_SUPPORT_DEBUG_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/kmem.c linux.22-ac2/fs/xfs/support/kmem.c --- linux.vanilla/fs/xfs/support/kmem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/kmem.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include + +#include "time.h" +#include "kmem.h" + +#define DEF_PRIORITY (6) +#define MAX_SLAB_SIZE 0x10000 + +static __inline unsigned int flag_convert(int flags) +{ +#if DEBUG + if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS))) { + printk(KERN_WARNING + "XFS: memory allocation with wrong flags (%x)\n", flags); + BUG(); + } +#endif + + if (flags & KM_NOSLEEP) + return GFP_ATOMIC; + /* If we're in a transaction, FS activity is not ok */ + else if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) + return GFP_NOFS; + else + return GFP_KERNEL; +} + +#define MAX_SHAKE 8 + +static kmem_shake_func_t shake_list[MAX_SHAKE]; +static DECLARE_MUTEX(shake_sem); + +void kmem_shake_register(kmem_shake_func_t sfunc) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE; i++) { + if (shake_list[i] == NULL) { + shake_list[i] = sfunc; + break; + } + } + if (i == MAX_SHAKE) + BUG(); + up(&shake_sem); +} + +void kmem_shake_deregister(kmem_shake_func_t sfunc) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE; i++) { + if (shake_list[i] == sfunc) + break; + } + if (i == MAX_SHAKE) + BUG(); + for (; i < MAX_SHAKE - 1; i++) { + shake_list[i] = shake_list[i+1]; + } + shake_list[i] = NULL; + up(&shake_sem); +} + +static __inline__ void kmem_shake(void) +{ + int i; + + down(&shake_sem); + for (i = 0; i < MAX_SHAKE && shake_list[i]; i++) + (*shake_list[i])(); + up(&shake_sem); + delay(10); +} + +void * +kmem_alloc(size_t size, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *rval; + +repeat: + if (MAX_SLAB_SIZE < size) { + /* Avoid doing filesystem sensitive stuff to get this */ + rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL); + } else { + rval = kmalloc(size, flag_convert(flags)); + } + + if (rval || (flags & KM_NOSLEEP)) + return rval; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL); + if (!rval && (flags & KM_SLEEP)) + panic("kmem_alloc: NULL memory on KM_SLEEP request!"); + + return rval; +} + +void * +kmem_zalloc(size_t size, int flags) +{ + void *ptr; + + ptr = kmem_alloc(size, flags); + + if (ptr) + memset((char *)ptr, 0, (int)size); + + return (ptr); +} + +void +kmem_free(void *ptr, size_t size) +{ + if (((unsigned long)ptr < VMALLOC_START) || + ((unsigned long)ptr >= VMALLOC_END)) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void * +kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags) +{ + void *new; + + new = kmem_alloc(newsize, flags); + if (ptr) { + if (new) + memcpy(new, ptr, + ((oldsize < newsize) ? oldsize : newsize)); + kmem_free(ptr, oldsize); + } + + return new; +} + +kmem_zone_t * +kmem_zone_init(int size, char *zone_name) +{ + return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); +} + +void * +kmem_zone_alloc(kmem_zone_t *zone, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *ptr = NULL; + +repeat: + ptr = kmem_cache_alloc(zone, flag_convert(flags)); + + if (ptr || (flags & KM_NOSLEEP)) + return ptr; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + if (flags & KM_SLEEP) + panic("kmem_zone_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} + +void * +kmem_zone_zalloc(kmem_zone_t *zone, int flags) +{ + int shrink = DEF_PRIORITY; /* # times to try to shrink cache */ + void *ptr = NULL; + +repeat: + ptr = kmem_cache_alloc(zone, flag_convert(flags)); + + if (ptr) { + memset(ptr, 0, kmem_cache_size(zone)); + return ptr; + } + + if (flags & KM_NOSLEEP) + return ptr; + + /* + * KM_SLEEP callers don't expect a failure + */ + if (shrink) { + kmem_shake(); + + shrink--; + goto repeat; + } + + if (flags & KM_SLEEP) + panic("kmem_zone_zalloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} + +void +kmem_zone_free(kmem_zone_t *zone, void *ptr) +{ + kmem_cache_free(zone, ptr); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/kmem.h linux.22-ac2/fs/xfs/support/kmem.h --- linux.vanilla/fs/xfs/support/kmem.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/kmem.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_KMEM_H__ +#define __XFS_SUPPORT_KMEM_H__ + +#include + +/* + * memory management routines + */ +#define KM_SLEEP 0x0001 +#define KM_NOSLEEP 0x0002 +#define KM_NOFS 0x0004 + +#define kmem_zone kmem_cache_s +#define kmem_zone_t kmem_cache_t + +extern kmem_zone_t *kmem_zone_init(int, char *); +extern void *kmem_zone_zalloc(kmem_zone_t *, int); +extern void *kmem_zone_alloc(kmem_zone_t *, int); +extern void kmem_zone_free(kmem_zone_t *, void *); + +extern void *kmem_alloc(size_t, int); +extern void *kmem_realloc(void *, size_t, size_t, int); +extern void *kmem_zalloc(size_t, int); +extern void kmem_free(void *, size_t); + +typedef void (*kmem_shake_func_t)(void); + +extern void kmem_shake_register(kmem_shake_func_t); +extern void kmem_shake_deregister(kmem_shake_func_t); + +#endif /* __XFS_SUPPORT_KMEM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/ktrace.c linux.22-ac2/fs/xfs/support/ktrace.c --- linux.vanilla/fs/xfs/support/ktrace.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/ktrace.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,378 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include + +#include +#include "kmem.h" +#include "spin.h" +#include "debug.h" +#include "ktrace.h" + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + +static kmem_zone_t *ktrace_hdr_zone; +static kmem_zone_t *ktrace_ent_zone; +static int ktrace_zentries; + +void +ktrace_init(int zentries) +{ + ktrace_zentries = zentries; + + ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), + "ktrace_hdr"); + ASSERT(ktrace_hdr_zone); + + ktrace_ent_zone = kmem_zone_init(ktrace_zentries + * sizeof(ktrace_entry_t), + "ktrace_ent"); + ASSERT(ktrace_ent_zone); +} + +void +ktrace_uninit(void) +{ + kmem_cache_destroy(ktrace_hdr_zone); + kmem_cache_destroy(ktrace_ent_zone); +} + +/* + * ktrace_alloc() + * + * Allocate a ktrace header and enough buffering for the given + * number of entries. + */ +ktrace_t * +ktrace_alloc(int nentries, int sleep) +{ + ktrace_t *ktp; + ktrace_entry_t *ktep; + + ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); + + if (ktp == (ktrace_t*)NULL) { + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; + } + + /* + * Special treatment for buffers with the ktrace_zentries entries + */ + if (nentries == ktrace_zentries) { + ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, + sleep); + } else { + ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), + sleep); + } + + if (ktep == NULL) { + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + kmem_free(ktp, sizeof(*ktp)); + + return NULL; + } + + spinlock_init(&(ktp->kt_lock), "kt_lock"); + + ktp->kt_entries = ktep; + ktp->kt_nentries = nentries; + ktp->kt_index = 0; + ktp->kt_rollover = 0; + + return ktp; +} + + +/* + * ktrace_free() + * + * Free up the ktrace header and buffer. It is up to the caller + * to ensure that no-one is referencing it. + */ +void +ktrace_free(ktrace_t *ktp) +{ + int entries_size; + + if (ktp == (ktrace_t *)NULL) + return; + + spinlock_destroy(&ktp->kt_lock); + + /* + * Special treatment for the Vnode trace buffer. + */ + if (ktp->kt_nentries == ktrace_zentries) { + kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); + } else { + entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); + + kmem_free(ktp->kt_entries, entries_size); + } + + kmem_zone_free(ktrace_hdr_zone, ktp); +} + + +/* + * Enter the given values into the "next" entry in the trace buffer. + * kt_index is always the index of the next entry to be filled. + */ +void +ktrace_enter( + ktrace_t *ktp, + void *val0, + void *val1, + void *val2, + void *val3, + void *val4, + void *val5, + void *val6, + void *val7, + void *val8, + void *val9, + void *val10, + void *val11, + void *val12, + void *val13, + void *val14, + void *val15) +{ + static lock_t wrap_lock = SPIN_LOCK_UNLOCKED; + int index; + ktrace_entry_t *ktep; + + ASSERT(ktp != NULL); + + /* + * Grab an entry by pushing the index up to the next one. + */ + spin_lock(&wrap_lock); + index = ktp->kt_index; + if (++ktp->kt_index == ktp->kt_nentries) + ktp->kt_index = 0; + spin_unlock(&wrap_lock); + + if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) + ktp->kt_rollover = 1; + + ASSERT((index >= 0) && (index < ktp->kt_nentries)); + + ktep = &(ktp->kt_entries[index]); + + ktep->val[0] = val0; + ktep->val[1] = val1; + ktep->val[2] = val2; + ktep->val[3] = val3; + ktep->val[4] = val4; + ktep->val[5] = val5; + ktep->val[6] = val6; + ktep->val[7] = val7; + ktep->val[8] = val8; + ktep->val[9] = val9; + ktep->val[10] = val10; + ktep->val[11] = val11; + ktep->val[12] = val12; + ktep->val[13] = val13; + ktep->val[14] = val14; + ktep->val[15] = val15; +} + +/* + * Return the number of entries in the trace buffer. + */ +int +ktrace_nentries( + ktrace_t *ktp) +{ + if (ktp == NULL) { + return 0; + } + + return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); +} + + +/* + * ktrace_first() + * + * This is used to find the start of the trace buffer. + * In conjunction with ktrace_next() it can be used to + * iterate through the entire trace buffer. This code does + * not do any locking because it is assumed that it is called + * from the debugger. + * + * The caller must pass in a pointer to a ktrace_snap + * structure in which we will keep some state used to + * iterate through the buffer. This state must not touched + * by any code outside of this module. + */ +ktrace_entry_t * +ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) +{ + ktrace_entry_t *ktep; + int index; + int nentries; + + if (ktp->kt_rollover) + index = ktp->kt_index; + else + index = 0; + + ktsp->ks_start = index; + ktep = &(ktp->kt_entries[index]); + + nentries = ktrace_nentries(ktp); + index++; + if (index < nentries) { + ktsp->ks_index = index; + } else { + ktsp->ks_index = 0; + if (index > nentries) + ktep = NULL; + } + return ktep; +} + + +/* + * ktrace_next() + * + * This is used to iterate through the entries of the given + * trace buffer. The caller must pass in the ktrace_snap_t + * structure initialized by ktrace_first(). The return value + * will be either a pointer to the next ktrace_entry or NULL + * if all of the entries have been traversed. + */ +ktrace_entry_t * +ktrace_next( + ktrace_t *ktp, + ktrace_snap_t *ktsp) +{ + int index; + ktrace_entry_t *ktep; + + index = ktsp->ks_index; + if (index == ktsp->ks_start) { + ktep = NULL; + } else { + ktep = &ktp->kt_entries[index]; + } + + index++; + if (index == ktrace_nentries(ktp)) { + ktsp->ks_index = 0; + } else { + ktsp->ks_index = index; + } + + return ktep; +} + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) +EXPORT_SYMBOL(ktrace_first); +EXPORT_SYMBOL(ktrace_next); +#endif + +/* + * ktrace_skip() + * + * Skip the next "count" entries and return the entry after that. + * Return NULL if this causes us to iterate past the beginning again. + */ + +ktrace_entry_t * +ktrace_skip( + ktrace_t *ktp, + int count, + ktrace_snap_t *ktsp) +{ + int index; + int new_index; + ktrace_entry_t *ktep; + int nentries = ktrace_nentries(ktp); + + index = ktsp->ks_index; + new_index = index + count; + while (new_index >= nentries) { + new_index -= nentries; + } + if (index == ktsp->ks_start) { + /* + * We've iterated around to the start, so we're done. + */ + ktep = NULL; + } else if ((new_index < index) && (index < ktsp->ks_index)) { + /* + * We've skipped past the start again, so we're done. + */ + ktep = NULL; + ktsp->ks_index = ktsp->ks_start; + } else { + ktep = &(ktp->kt_entries[new_index]); + new_index++; + if (new_index == nentries) { + ktsp->ks_index = 0; + } else { + ktsp->ks_index = new_index; + } + } + return ktep; +} + +#else + +ktrace_t * +ktrace_alloc(int nentries, int sleep) +{ + /* + * KM_SLEEP callers don't expect failure. + */ + if (sleep & KM_SLEEP) + panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); + + return NULL; +} +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/ktrace.h linux.22-ac2/fs/xfs/support/ktrace.h --- linux.vanilla/fs/xfs/support/ktrace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/ktrace.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_KTRACE_H__ +#define __XFS_SUPPORT_KTRACE_H__ + + +/* + * Trace buffer entry structure. + */ +typedef struct ktrace_entry { + void *val[16]; +} ktrace_entry_t; + +/* + * Trace buffer header structure. + */ +typedef struct ktrace { + lock_t kt_lock; /* mutex to guard counters */ + int kt_nentries; /* number of entries in trace buf */ + int kt_index; /* current index in entries */ + int kt_rollover; + ktrace_entry_t *kt_entries; /* buffer of entries */ +} ktrace_t; + +/* + * Trace buffer snapshot structure. + */ +typedef struct ktrace_snap { + int ks_start; /* kt_index at time of snap */ + int ks_index; /* current index */ +} ktrace_snap_t; + +/* + * Exported interfaces. + */ +extern ktrace_t *ktrace_alloc(int, int); + +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + +extern void ktrace_init(int zentries); +extern void ktrace_uninit(void); + +extern void ktrace_free(ktrace_t *); + +extern void ktrace_enter( + ktrace_t *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *, + void *); + +extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *); +extern int ktrace_nentries(ktrace_t *); +extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *); +extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *); + +#else + +#define ktrace_free(ktp) +#define ktrace_enter(ktp,v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15) + +#endif + +#endif /* __XFS_SUPPORT_KTRACE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/Makefile linux.22-ac2/fs/xfs/support/Makefile --- linux.vanilla/fs/xfs/support/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/Makefile 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,54 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# + +EXTRA_CFLAGS += -I.. + +ifeq ($(CONFIG_XFS_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif + +O_TARGET := support_xfs.o +ifneq ($(MAKECMDGOALS),modules_install) + obj-m := $(O_TARGET) +endif + +export-objs := ktrace.o + +obj-y := debug.o \ + kmem.o \ + ktrace.o \ + move.o \ + mrlock.o \ + qsort.o \ + uuid.o + +include $(TOPDIR)/Rules.make diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/move.c linux.22-ac2/fs/xfs/support/move.c --- linux.vanilla/fs/xfs/support/move.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/move.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include + +#include +#include "debug.h" +#include "move.h" + +/* + * Move "n" bytes at byte address "cp"; "rw" indicates the direction + * of the move, and the I/O parameters are provided in "uio", which is + * update to reflect the data which was moved. Returns 0 on success or + * a non-zero errno on failure. + */ +int +uiomove(void *cp, size_t n, enum uio_rw rw, struct uio *uio) +{ + register struct iovec *iov; + u_int cnt; + int error; + + while (n > 0 && uio->uio_resid) { + iov = uio->uio_iov; + cnt = (u_int)iov->iov_len; + if (cnt == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + continue; + } + if (cnt > n) + cnt = (u_int)n; + switch (uio->uio_segflg) { + case UIO_USERSPACE: + if (rw == UIO_READ) + error = copy_to_user(iov->iov_base, cp, cnt); + else + error = copy_from_user(cp, iov->iov_base, cnt); + if (error) + return EFAULT; + break; + + + case UIO_SYSSPACE: + if (rw == UIO_READ) + memcpy(iov->iov_base, cp, cnt); + else + memcpy(cp, iov->iov_base, cnt); + break; + + default: + ASSERT(0); + break; + } + iov->iov_base = (void *)((char *)iov->iov_base + cnt); + iov->iov_len -= cnt; + uio->uio_resid -= cnt; + uio->uio_offset += cnt; + cp = (void *)((char *)cp + cnt); + n -= cnt; + } + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/move.h linux.22-ac2/fs/xfs/support/move.h --- linux.vanilla/fs/xfs/support/move.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/move.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef __XFS_SUPPORT_MOVE_H__ +#define __XFS_SUPPORT_MOVE_H__ + +#include +#include + +typedef struct iovec iovec_t; + +typedef struct uio { + iovec_t *uio_iov; /* pointer to array of iovecs */ + int uio_iovcnt; /* number of iovecs */ + int uio_fmode; /* file mode flags */ + xfs_off_t uio_offset; /* file offset */ + short uio_segflg; /* address space (kernel or user) */ + ssize_t uio_resid; /* residual count */ +} uio_t; + +/* + * I/O direction. + */ +typedef enum uio_rw { UIO_READ, UIO_WRITE } uio_rw_t; + +/* + * Segment flag values. + */ +typedef enum uio_seg { + UIO_USERSPACE, /* uio_iov describes user space */ + UIO_SYSSPACE, /* uio_iov describes system space */ +} uio_seg_t; + + +extern int uiomove (void *, size_t, uio_rw_t, uio_t *); + +#endif /* __XFS_SUPPORT_MOVE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/mrlock.c linux.22-ac2/fs/xfs/support/mrlock.c --- linux.vanilla/fs/xfs/support/mrlock.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/mrlock.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include +#include + +#include "mrlock.h" + + +#if USE_RW_WAIT_QUEUE_SPINLOCK +# define wq_write_lock write_lock +#else +# define wq_write_lock spin_lock +#endif + +/* + * We don't seem to need lock_type (only one supported), name, or + * sequence. But, XFS will pass it so let's leave them here for now. + */ +/* ARGSUSED */ +void +mrlock_init(mrlock_t *mrp, int lock_type, char *name, long sequence) +{ + mrp->mr_count = 0; + mrp->mr_reads_waiting = 0; + mrp->mr_writes_waiting = 0; + init_waitqueue_head(&mrp->mr_readerq); + init_waitqueue_head(&mrp->mr_writerq); + mrp->mr_lock = SPIN_LOCK_UNLOCKED; +} + +/* + * Macros to lock/unlock the mrlock_t. + */ + +#define MRLOCK(m) spin_lock(&(m)->mr_lock); +#define MRUNLOCK(m) spin_unlock(&(m)->mr_lock); + + +/* + * lock_wait should never be called in an interrupt thread. + * + * mrlocks can sleep (i.e. call schedule) and so they can't ever + * be called from an interrupt thread. + * + * threads that wake-up should also never be invoked from interrupt threads. + * + * But, waitqueue_lock is locked from interrupt threads - and we are + * called with interrupts disabled, so it is all OK. + */ + +/* ARGSUSED */ +void +lock_wait(wait_queue_head_t *q, spinlock_t *lock, int rw) +{ + DECLARE_WAITQUEUE( wait, current ); + + __set_current_state(TASK_UNINTERRUPTIBLE); + + wq_write_lock(&q->lock); + if (rw) { + __add_wait_queue_tail(q, &wait); + } else { + __add_wait_queue(q, &wait); + } + + wq_write_unlock(&q->lock); + spin_unlock(lock); + + schedule(); + + wq_write_lock(&q->lock); + __remove_wait_queue(q, &wait); + wq_write_unlock(&q->lock); + + spin_lock(lock); + + /* return with lock held */ +} + +/* ARGSUSED */ +void +mrfree(mrlock_t *mrp) +{ +} + +/* ARGSUSED */ +void +mrlock(mrlock_t *mrp, int type, int flags) +{ + if (type == MR_ACCESS) + mraccess(mrp); + else + mrupdate(mrp); +} + +/* ARGSUSED */ +void +mraccessf(mrlock_t *mrp, int flags) +{ + MRLOCK(mrp); + if(mrp->mr_writes_waiting > 0) { + mrp->mr_reads_waiting++; + lock_wait(&mrp->mr_readerq, &mrp->mr_lock, 0); + mrp->mr_reads_waiting--; + } + while (mrp->mr_count < 0) { + mrp->mr_reads_waiting++; + lock_wait(&mrp->mr_readerq, &mrp->mr_lock, 0); + mrp->mr_reads_waiting--; + } + mrp->mr_count++; + MRUNLOCK(mrp); +} + +/* ARGSUSED */ +void +mrupdatef(mrlock_t *mrp, int flags) +{ + MRLOCK(mrp); + while(mrp->mr_count) { + mrp->mr_writes_waiting++; + lock_wait(&mrp->mr_writerq, &mrp->mr_lock, 1); + mrp->mr_writes_waiting--; + } + + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); +} + +int +mrtryaccess(mrlock_t *mrp) +{ + MRLOCK(mrp); + /* + * If anyone is waiting for update access or the lock is held for update + * fail the request. + */ + if(mrp->mr_writes_waiting > 0 || mrp->mr_count < 0) { + MRUNLOCK(mrp); + return 0; + } + mrp->mr_count++; + MRUNLOCK(mrp); + return 1; +} + +int +mrtrypromote(mrlock_t *mrp) +{ + MRLOCK(mrp); + + if(mrp->mr_count == 1) { /* We are the only thread with the lock */ + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); + return 1; + } + + MRUNLOCK(mrp); + return 0; +} + +int +mrtryupdate(mrlock_t *mrp) +{ + MRLOCK(mrp); + + if(mrp->mr_count) { + MRUNLOCK(mrp); + return 0; + } + + mrp->mr_count = -1; /* writer on it */ + MRUNLOCK(mrp); + return 1; +} + +static __inline__ void mrwake(mrlock_t *mrp) +{ + /* + * First, if the count is now 0, we need to wake-up anyone waiting. + */ + if (!mrp->mr_count) { + if (mrp->mr_writes_waiting) { /* Wake-up first writer waiting */ + wake_up(&mrp->mr_writerq); + } else if (mrp->mr_reads_waiting) { /* Wakeup any readers waiting */ + wake_up(&mrp->mr_readerq); + } + } +} + +void +mraccunlock(mrlock_t *mrp) +{ + MRLOCK(mrp); + mrp->mr_count--; + mrwake(mrp); + MRUNLOCK(mrp); +} + +void +mrunlock(mrlock_t *mrp) +{ + MRLOCK(mrp); + if (mrp->mr_count < 0) { + mrp->mr_count = 0; + } else { + mrp->mr_count--; + } + mrwake(mrp); + MRUNLOCK(mrp); +} + +int +ismrlocked(mrlock_t *mrp, int type) /* No need to lock since info can change */ +{ + if (type == MR_ACCESS) + return (mrp->mr_count > 0); /* Read lock */ + else if (type == MR_UPDATE) + return (mrp->mr_count < 0); /* Write lock */ + else if (type == (MR_UPDATE | MR_ACCESS)) + return (mrp->mr_count); /* Any type of lock held */ + else /* Any waiters */ + return (mrp->mr_reads_waiting | mrp->mr_writes_waiting); +} + +/* + * Demote from update to access. We better be the only thread with the + * lock in update mode so it should be easy to set to 1. + * Wake-up any readers waiting. + */ + +void +mrdemote(mrlock_t *mrp) +{ + MRLOCK(mrp); + mrp->mr_count = 1; + if (mrp->mr_reads_waiting) { /* Wakeup all readers waiting */ + wake_up(&mrp->mr_readerq); + } + MRUNLOCK(mrp); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/mrlock.h linux.22-ac2/fs/xfs/support/mrlock.h --- linux.vanilla/fs/xfs/support/mrlock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/mrlock.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_MRLOCK_H__ +#define __XFS_SUPPORT_MRLOCK_H__ + +#include +#include +#include +#include +#include + +/* + * Implement mrlocks on Linux that work for XFS. + * + * These are sleep locks and not spinlocks. If one wants read/write spinlocks, + * use read_lock, write_lock, ... see spinlock.h. + */ + +typedef struct mrlock_s { + int mr_count; + unsigned short mr_reads_waiting; + unsigned short mr_writes_waiting; + wait_queue_head_t mr_readerq; + wait_queue_head_t mr_writerq; + spinlock_t mr_lock; +} mrlock_t; + +#define MR_ACCESS 1 +#define MR_UPDATE 2 + +#define MRLOCK_BARRIER 0x1 +#define MRLOCK_ALLOW_EQUAL_PRI 0x8 + +/* + * mraccessf/mrupdatef take flags to be passed in while sleeping; + * only PLTWAIT is currently supported. + */ + +extern void mraccessf(mrlock_t *, int); +extern void mrupdatef(mrlock_t *, int); +extern void mrlock(mrlock_t *, int, int); +extern void mrunlock(mrlock_t *); +extern void mraccunlock(mrlock_t *); +extern int mrtryupdate(mrlock_t *); +extern int mrtryaccess(mrlock_t *); +extern int mrtrypromote(mrlock_t *); +extern void mrdemote(mrlock_t *); + +extern int ismrlocked(mrlock_t *, int); +extern void mrlock_init(mrlock_t *, int type, char *name, long sequence); +extern void mrfree(mrlock_t *); + +#define mrinit(mrp, name) mrlock_init(mrp, MRLOCK_BARRIER, name, -1) +#define mraccess(mrp) mraccessf(mrp, 0) /* grab for READ/ACCESS */ +#define mrupdate(mrp) mrupdatef(mrp, 0) /* grab for WRITE/UPDATE */ +#define mrislocked_access(mrp) ((mrp)->mr_count > 0) +#define mrislocked_update(mrp) ((mrp)->mr_count < 0) + +#endif /* __XFS_SUPPORT_MRLOCK_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/mutex.h linux.22-ac2/fs/xfs/support/mutex.h --- linux.vanilla/fs/xfs/support/mutex.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/mutex.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_MUTEX_H__ +#define __XFS_SUPPORT_MUTEX_H__ + +#include +#include + +/* + * Map the mutex'es from IRIX to Linux semaphores. + * + * Destroy just simply initializes to -99 which should block all other + * callers. + */ +#define MUTEX_DEFAULT 0x0 +typedef struct semaphore mutex_t; + +#define mutex_init(lock, type, name) sema_init(lock, 1) +#define mutex_destroy(lock) sema_init(lock, -99) +#define mutex_lock(lock, num) down(lock) +#define mutex_trylock(lock) (down_trylock(lock) ? 0 : 1) +#define mutex_unlock(lock) up(lock) + +#endif /* __XFS_SUPPORT_MUTEX_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/qsort.c linux.22-ac2/fs/xfs/support/qsort.c --- linux.vanilla/fs/xfs/support/qsort.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/qsort.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,243 @@ +/* Copyright (C) 1991, 1992, 1996, 1997, 1999 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Written by Douglas C. Schmidt (schmidt@ics.uci.edu). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* If you consider tuning this algorithm, you should consult first: + Engineering a sort function; Jon Bentley and M. Douglas McIlroy; + Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ + +#include +#include + +/* Byte-wise swap two items of size SIZE. */ +#define SWAP(a, b, size) \ + do \ + { \ + register size_t __size = (size); \ + register char *__a = (a), *__b = (b); \ + do \ + { \ + char __tmp = *__a; \ + *__a++ = *__b; \ + *__b++ = __tmp; \ + } while (--__size > 0); \ + } while (0) + +/* Discontinue quicksort algorithm when partition gets below this size. + This particular magic number was chosen to work best on a Sun 4/260. */ +#define MAX_THRESH 4 + +/* Stack node declarations used to store unfulfilled partition obligations. */ +typedef struct + { + char *lo; + char *hi; + } stack_node; + +/* The next 4 #defines implement a very fast in-line stack abstraction. */ +/* The stack needs log (total_elements) entries (we could even subtract + log(MAX_THRESH)). Since total_elements has type size_t, we get as + upper bound for log (total_elements): + bits per byte (CHAR_BIT) * sizeof(size_t). */ +#define STACK_SIZE (8 * sizeof(unsigned long int)) +#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top)) +#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi))) +#define STACK_NOT_EMPTY (stack < top) + + +/* Order size using quicksort. This implementation incorporates + four optimizations discussed in Sedgewick: + + 1. Non-recursive, using an explicit stack of pointer that store the + next array partition to sort. To save time, this maximum amount + of space required to store an array of SIZE_MAX is allocated on the + stack. Assuming a 32-bit (64 bit) integer for size_t, this needs + only 32 * sizeof(stack_node) == 256 bytes (for 64 bit: 1024 bytes). + Pretty cheap, actually. + + 2. Chose the pivot element using a median-of-three decision tree. + This reduces the probability of selecting a bad pivot value and + eliminates certain extraneous comparisons. + + 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving + insertion sort to order the MAX_THRESH items within each partition. + This is a big win, since insertion sort is faster for small, mostly + sorted array segments. + + 4. The larger of the two sub-partitions is always pushed onto the + stack first, with the algorithm then concentrating on the + smaller partition. This *guarantees* no more than log (total_elems) + stack size is needed (actually O(1) in this case)! */ + +void +qsort (void *const pbase, size_t total_elems, size_t size, + int (*cmp)(const void *, const void *)) +{ + register char *base_ptr = (char *) pbase; + + const size_t max_thresh = MAX_THRESH * size; + + if (total_elems == 0) + /* Avoid lossage with unsigned arithmetic below. */ + return; + + if (total_elems > MAX_THRESH) + { + char *lo = base_ptr; + char *hi = &lo[size * (total_elems - 1)]; + stack_node stack[STACK_SIZE]; + stack_node *top = stack + 1; + + while (STACK_NOT_EMPTY) + { + char *left_ptr; + char *right_ptr; + + /* Select median value from among LO, MID, and HI. Rearrange + LO and HI so the three values are sorted. This lowers the + probability of picking a pathological pivot value and + skips a comparison for both the LEFT_PTR and RIGHT_PTR in + the while loops. */ + + char *mid = lo + size * ((hi - lo) / size >> 1); + + if ((*cmp) ((void *) mid, (void *) lo) < 0) + SWAP (mid, lo, size); + if ((*cmp) ((void *) hi, (void *) mid) < 0) + SWAP (mid, hi, size); + else + goto jump_over; + if ((*cmp) ((void *) mid, (void *) lo) < 0) + SWAP (mid, lo, size); + jump_over:; + + left_ptr = lo + size; + right_ptr = hi - size; + + /* Here's the famous ``collapse the walls'' section of quicksort. + Gotta like those tight inner loops! They are the main reason + that this algorithm runs much faster than others. */ + do + { + while ((*cmp) ((void *) left_ptr, (void *) mid) < 0) + left_ptr += size; + + while ((*cmp) ((void *) mid, (void *) right_ptr) < 0) + right_ptr -= size; + + if (left_ptr < right_ptr) + { + SWAP (left_ptr, right_ptr, size); + if (mid == left_ptr) + mid = right_ptr; + else if (mid == right_ptr) + mid = left_ptr; + left_ptr += size; + right_ptr -= size; + } + else if (left_ptr == right_ptr) + { + left_ptr += size; + right_ptr -= size; + break; + } + } + while (left_ptr <= right_ptr); + + /* Set up pointers for next iteration. First determine whether + left and right partitions are below the threshold size. If so, + ignore one or both. Otherwise, push the larger partition's + bounds on the stack and continue sorting the smaller one. */ + + if ((size_t) (right_ptr - lo) <= max_thresh) + { + if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore both small partitions. */ + POP (lo, hi); + else + /* Ignore small left partition. */ + lo = left_ptr; + } + else if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore small right partition. */ + hi = right_ptr; + else if ((right_ptr - lo) > (hi - left_ptr)) + { + /* Push larger left partition indices. */ + PUSH (lo, right_ptr); + lo = left_ptr; + } + else + { + /* Push larger right partition indices. */ + PUSH (left_ptr, hi); + hi = right_ptr; + } + } + } + + /* Once the BASE_PTR array is partially sorted by quicksort the rest + is completely sorted using insertion sort, since this is efficient + for partitions below MAX_THRESH size. BASE_PTR points to the beginning + of the array to sort, and END_PTR points at the very last element in + the array (*not* one beyond it!). */ + { + char *const end_ptr = &base_ptr[size * (total_elems - 1)]; + char *tmp_ptr = base_ptr; + char *thresh = min(end_ptr, base_ptr + max_thresh); + register char *run_ptr; + + /* Find smallest element in first threshold and place it at the + array's beginning. This is the smallest array element, + and the operation speeds up insertion sort's inner loop. */ + + for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size) + if ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0) + tmp_ptr = run_ptr; + + if (tmp_ptr != base_ptr) + SWAP (tmp_ptr, base_ptr, size); + + /* Insertion sort, running from left-hand-side up to right-hand-side. */ + + run_ptr = base_ptr + size; + while ((run_ptr += size) <= end_ptr) + { + tmp_ptr = run_ptr - size; + while ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0) + tmp_ptr -= size; + + tmp_ptr += size; + if (tmp_ptr != run_ptr) + { + char *trav; + + trav = run_ptr + size; + while (--trav >= run_ptr) + { + char c = *trav; + char *hi, *lo; + + for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo) + *hi = *lo; + *hi = c; + } + } + } + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/qsort.h linux.22-ac2/fs/xfs/support/qsort.h --- linux.vanilla/fs/xfs/support/qsort.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/qsort.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#ifndef QSORT_H +#define QSORT_H + +extern void qsort (void *const pbase, + size_t total_elems, + size_t size, + int (*cmp)(const void *, const void *)); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/sema.h linux.22-ac2/fs/xfs/support/sema.h --- linux.vanilla/fs/xfs/support/sema.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/sema.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SEMA_H__ +#define __XFS_SUPPORT_SEMA_H__ + +#include +#include +#include +#include +#include + +/* + * sema_t structure just maps to struct semaphore in Linux kernel. + */ + +typedef struct semaphore sema_t; + +#define init_sema(sp, val, c, d) sema_init(sp, val) +#define initsema(sp, val) sema_init(sp, val) +#define initnsema(sp, val, name) sema_init(sp, val) +#define psema(sp, b) down(sp) +#define vsema(sp) up(sp) +#define valusema(sp) (atomic_read(&(sp)->count)) +#define freesema(sema) + +/* + * Map cpsema (try to get the sema) to down_trylock. We need to switch + * the return values since cpsema returns 1 (acquired) 0 (failed) and + * down_trylock returns the reverse 0 (acquired) 1 (failed). + */ + +#define cpsema(sp) (down_trylock(sp) ? 0 : 1) + +/* + * Didn't do cvsema(sp). Not sure how to map this to up/down/... + * It does a vsema if the values is < 0 other wise nothing. + */ + +#endif /* __XFS_SUPPORT_SEMA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/spin.h linux.22-ac2/fs/xfs/support/spin.h --- linux.vanilla/fs/xfs/support/spin.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/spin.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SPIN_H__ +#define __XFS_SUPPORT_SPIN_H__ + +#include /* preempt needs this */ +#include + +/* + * Map lock_t from IRIX to Linux spinlocks. + * + * Note that linux turns on/off spinlocks depending on CONFIG_SMP. + * We don't need to worry about SMP or not here. + */ + +typedef spinlock_t lock_t; + +#define spinlock_init(lock, name) spin_lock_init(lock) +#define spinlock_destroy(lock) + +static inline unsigned long mutex_spinlock(lock_t *lock) +{ + spin_lock(lock); + return 0; +} + +/*ARGSUSED*/ +static inline void mutex_spinunlock(lock_t *lock, unsigned long s) +{ + spin_unlock(lock); +} + +static inline void nested_spinlock(lock_t *lock) +{ + spin_lock(lock); +} + +static inline void nested_spinunlock(lock_t *lock) +{ + spin_unlock(lock); +} + +#endif /* __XFS_SUPPORT_SPIN_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/sv.h linux.22-ac2/fs/xfs/support/sv.h --- linux.vanilla/fs/xfs/support/sv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/sv.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_SV_H__ +#define __XFS_SUPPORT_SV_H__ + +#include +#include +#include + +/* + * Synchronisation variables. + * + * (Parameters "pri", "svf" and "rts" are not implemented) + */ + +typedef struct sv_s { + wait_queue_head_t waiters; +} sv_t; + +#define SV_FIFO 0x0 /* sv_t is FIFO type */ +#define SV_LIFO 0x2 /* sv_t is LIFO type */ +#define SV_PRIO 0x4 /* sv_t is PRIO type */ +#define SV_KEYED 0x6 /* sv_t is KEYED type */ +#define SV_DEFAULT SV_FIFO + + +static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, + unsigned long timeout) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue_exclusive(&sv->waiters, &wait); + __set_current_state(state); + spin_unlock(lock); + + schedule_timeout(timeout); + + remove_wait_queue(&sv->waiters, &wait); +} + +#define init_sv(sv,type,name,flag) \ + init_waitqueue_head(&(sv)->waiters) +#define sv_init(sv,flag,name) \ + init_waitqueue_head(&(sv)->waiters) +#define sv_destroy(sv) \ + /*NOTHING*/ +#define sv_wait(sv, pri, lock, s) \ + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) +#define sv_wait_sig(sv, pri, lock, s) \ + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) +#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \ + _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts)) +#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \ + _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts)) +#define sv_signal(sv) \ + wake_up(&(sv)->waiters) +#define sv_broadcast(sv) \ + wake_up_all(&(sv)->waiters) + +#endif /* __XFS_SUPPORT_SV_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/time.h linux.22-ac2/fs/xfs/support/time.h --- linux.vanilla/fs/xfs/support/time.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/time.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_TIME_H__ +#define __XFS_SUPPORT_TIME_H__ + +#include +#include + +typedef struct timespec timespec_t; + +static inline void delay(long ticks) +{ + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(ticks); +} + +static inline void nanotime(struct timespec *tvp) +{ + struct timeval tv; + + do_gettimeofday(&tv); + tvp->tv_sec = tv.tv_sec; + tvp->tv_nsec = tv.tv_usec * 1000; +} + +#endif /* __XFS_SUPPORT_TIME_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/uuid.c linux.22-ac2/fs/xfs/support/uuid.c --- linux.vanilla/fs/xfs/support/uuid.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/uuid.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include +#include +#include +#include "time.h" +#include "uuid.h" +#include "kmem.h" +#include "debug.h" +#include "mutex.h" + +static mutex_t uuid_monitor; +static int uuid_table_size; +static uuid_t *uuid_table; + +void +uuid_init(void) +{ + mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor"); +} + +/* + * uuid_getnodeuniq - obtain the node unique fields of a UUID. + * + * This is not in any way a standard or condoned UUID function; + * it just something that's needed for user-level file handles. + */ +void +uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) +{ + char *uu = (char *)uuid; + + /* on IRIX, this function assumes big-endian fields within + * the uuid, so we use INT_GET to get the same result on + * little-endian systems + */ + + fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) + + INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT); + fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT); +} + +void +uuid_create_nil(uuid_t *uuid) +{ + memset(uuid, 0, sizeof(*uuid)); +} + +int +uuid_is_nil(uuid_t *uuid) +{ + int i; + char *cp = (char *)uuid; + + if (uuid == NULL) + return 0; + /* implied check of version number here... */ + for (i = 0; i < sizeof *uuid; i++) + if (*cp++) return 0; /* not nil */ + return 1; /* is nil */ +} + +int +uuid_equal(uuid_t *uuid1, uuid_t *uuid2) +{ + return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1; +} + +/* + * Given a 128-bit uuid, return a 64-bit value by adding the top and bottom + * 64-bit words. NOTE: This function can not be changed EVER. Although + * brain-dead, some applications depend on this 64-bit value remaining + * persistent. Specifically, DMI vendors store the value as a persistent + * filehandle. + */ +__uint64_t +uuid_hash64(uuid_t *uuid) +{ + __uint64_t *sp = (__uint64_t *)uuid; + + return sp[0] + sp[1]; +} + +int +uuid_table_insert(uuid_t *uuid) +{ + int i, hole; + + mutex_lock(&uuid_monitor, PVFS); + for (i = 0, hole = -1; i < uuid_table_size; i++) { + if (uuid_is_nil(&uuid_table[i])) { + hole = i; + continue; + } + if (uuid_equal(uuid, &uuid_table[i])) { + mutex_unlock(&uuid_monitor); + return 0; + } + } + if (hole < 0) { + uuid_table = kmem_realloc(uuid_table, + (uuid_table_size + 1) * sizeof(*uuid_table), + uuid_table_size * sizeof(*uuid_table), + KM_SLEEP); + hole = uuid_table_size++; + } + uuid_table[hole] = *uuid; + mutex_unlock(&uuid_monitor); + return 1; +} + +void +uuid_table_remove(uuid_t *uuid) +{ + int i; + + mutex_lock(&uuid_monitor, PVFS); + for (i = 0; i < uuid_table_size; i++) { + if (uuid_is_nil(&uuid_table[i])) + continue; + if (!uuid_equal(uuid, &uuid_table[i])) + continue; + uuid_create_nil(&uuid_table[i]); + break; + } + ASSERT(i < uuid_table_size); + mutex_unlock(&uuid_monitor); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/support/uuid.h linux.22-ac2/fs/xfs/support/uuid.h --- linux.vanilla/fs/xfs/support/uuid.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/support/uuid.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SUPPORT_UUID_H__ +#define __XFS_SUPPORT_UUID_H__ + +typedef struct { + unsigned char __u_bits[16]; +} uuid_t; + +void uuid_init(void); +void uuid_create_nil(uuid_t *uuid); +int uuid_is_nil(uuid_t *uuid); +int uuid_equal(uuid_t *uuid1, uuid_t *uuid2); +void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]); +__uint64_t uuid_hash64(uuid_t *uuid); +int uuid_table_insert(uuid_t *uuid); +void uuid_table_remove(uuid_t *uuid); + +#endif /* __XFS_SUPPORT_UUID_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_acl.c linux.22-ac2/fs/xfs/xfs_acl.c --- linux.vanilla/fs/xfs/xfs_acl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_acl.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,975 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_inum.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_acl.h" +#include "xfs_mac.h" +#include "xfs_attr.h" + +#include + +STATIC int xfs_acl_setmode(vnode_t *, xfs_acl_t *, int *); +STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); +STATIC void xfs_acl_get_endian(xfs_acl_t *); +STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); +STATIC int xfs_acl_invalid(xfs_acl_t *); +STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); +STATIC void xfs_acl_get_attr(vnode_t *, xfs_acl_t *, int, int, int *); +STATIC void xfs_acl_set_attr(vnode_t *, xfs_acl_t *, int, int *); +STATIC int xfs_acl_allow_set(vnode_t *, int); + +kmem_zone_t *xfs_acl_zone; + + +/* + * Test for existence of access ACL attribute as efficiently as possible. + */ +int +xfs_acl_vhasacl_access( + vnode_t *vp) +{ + int error; + + xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error); + return (error == 0); +} + +/* + * Test for existence of default ACL attribute as efficiently as possible. + */ +int +xfs_acl_vhasacl_default( + vnode_t *vp) +{ + int error; + + if (vp->v_type != VDIR) + return 0; + xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); + return (error == 0); +} + +/* + * Convert from extended attribute representation to in-memory for XFS. + */ +STATIC int +posix_acl_xattr_to_xfs( + posix_acl_xattr_header *src, + size_t size, + xfs_acl_t *dest) +{ + posix_acl_xattr_entry *src_entry; + xfs_acl_entry_t *dest_entry; + int n; + + if (!src || !dest) + return EINVAL; + + if (size < sizeof(posix_acl_xattr_header)) + return EINVAL; + + if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) + return EINVAL; + + memset(dest, 0, sizeof(xfs_acl_t)); + dest->acl_cnt = posix_acl_xattr_count(size); + if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES) + return EINVAL; + + /* + * acl_set_file(3) may request that we set default ACLs with + * zero length -- defend (gracefully) against that here. + */ + if (!dest->acl_cnt) + return 0; + + src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src)); + dest_entry = &dest->acl_entry[0]; + + for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) { + dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm); + if (_ACL_PERM_INVALID(dest_entry->ae_perm)) + return EINVAL; + dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag); + switch(dest_entry->ae_tag) { + case ACL_USER: + case ACL_GROUP: + dest_entry->ae_id = le32_to_cpu(src_entry->e_id); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + dest_entry->ae_id = ACL_UNDEFINED_ID; + break; + default: + return EINVAL; + } + } + if (xfs_acl_invalid(dest)) + return EINVAL; + + return 0; +} + +/* + * Comparison function called from qsort(). + * Primary key is ae_tag, secondary key is ae_id. + */ +STATIC int +xfs_acl_entry_compare( + const void *va, + const void *vb) +{ + xfs_acl_entry_t *a = (xfs_acl_entry_t *)va, + *b = (xfs_acl_entry_t *)vb; + + if (a->ae_tag == b->ae_tag) + return (a->ae_id - b->ae_id); + return (a->ae_tag - b->ae_tag); +} + +/* + * Convert from in-memory XFS to extended attribute representation. + */ +STATIC int +posix_acl_xfs_to_xattr( + xfs_acl_t *src, + posix_acl_xattr_header *dest, + size_t size) +{ + int n; + size_t new_size = posix_acl_xattr_size(src->acl_cnt); + posix_acl_xattr_entry *dest_entry; + xfs_acl_entry_t *src_entry; + + if (size < new_size) + return -ERANGE; + + /* Need to sort src XFS ACL by */ + qsort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]), + xfs_acl_entry_compare); + + dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); + dest_entry = &dest->a_entries[0]; + src_entry = &src->acl_entry[0]; + for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) { + dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm); + if (_ACL_PERM_INVALID(src_entry->ae_perm)) + return -EINVAL; + dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag); + switch (src_entry->ae_tag) { + case ACL_USER: + case ACL_GROUP: + dest_entry->e_id = cpu_to_le32(src_entry->ae_id); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); + break; + default: + return -EINVAL; + } + } + return new_size; +} + +int +xfs_acl_vget( + vnode_t *vp, + void *acl, + size_t size, + int kind) +{ + int error; + xfs_acl_t *xfs_acl = NULL; + posix_acl_xattr_header *ext_acl = acl; + int flags = 0; + + VN_HOLD(vp); + if ((error = _MAC_VACCESS(vp, NULL, VREAD))) + goto out; + if(size) { + if (!(_ACL_ALLOC(xfs_acl))) { + error = ENOMEM; + goto out; + } + memset(xfs_acl, 0, sizeof(xfs_acl_t)); + } else + flags = ATTR_KERNOVAL; + + xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error); + if (error) + goto out; + + if (!size) { + error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES); + } else { + if (xfs_acl_invalid(xfs_acl)) { + error = EINVAL; + goto out; + } + if (kind == _ACL_TYPE_ACCESS) { + vattr_t va; + + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + if (error) + goto out; + xfs_acl_sync_mode(va.va_mode, xfs_acl); + } + error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); + } +out: + VN_RELE(vp); + if(xfs_acl) + _ACL_FREE(xfs_acl); + return -error; +} + +int +xfs_acl_vremove( + vnode_t *vp, + int kind) +{ + int error; + + VN_HOLD(vp); + error = xfs_acl_allow_set(vp, kind); + if (!error) { + VOP_ATTR_REMOVE(vp, kind == _ACL_TYPE_DEFAULT? + SGI_ACL_DEFAULT: SGI_ACL_FILE, + ATTR_ROOT, sys_cred, error); + if (error == ENOATTR) + error = 0; /* 'scool */ + } + VN_RELE(vp); + return -error; +} + +int +xfs_acl_vset( + vnode_t *vp, + void *acl, + size_t size, + int kind) +{ + posix_acl_xattr_header *ext_acl = acl; + xfs_acl_t *xfs_acl; + int error; + int basicperms = 0; /* more than std unix perms? */ + + if (!acl) + return -EINVAL; + + if (!(_ACL_ALLOC(xfs_acl))) + return -ENOMEM; + + error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl); + if (error) { + _ACL_FREE(xfs_acl); + return -error; + } + if (!xfs_acl->acl_cnt) { + _ACL_FREE(xfs_acl); + return 0; + } + + VN_HOLD(vp); + error = xfs_acl_allow_set(vp, kind); + if (error) + goto out; + + /* Incoming ACL exists, set file mode based on its value */ + if (kind == _ACL_TYPE_ACCESS) + xfs_acl_setmode(vp, xfs_acl, &basicperms); + + /* + * If we have more than std unix permissions, set up the actual attr. + * Otherwise, delete any existing attr. This prevents us from + * having actual attrs for permissions that can be stored in the + * standard permission bits. + */ + if (!basicperms) { + xfs_acl_set_attr(vp, xfs_acl, kind, &error); + } else { + xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); + } + + +out: + VN_RELE(vp); + _ACL_FREE(xfs_acl); + return -error; +} + +int +xfs_acl_iaccess( + xfs_inode_t *ip, + mode_t mode, + cred_t *cr) +{ + xfs_acl_t *acl; + int error; + + if (!(_ACL_ALLOC(acl))) + return -1; + + /* If the file has no ACL return -1. */ + if (xfs_attr_fetch(ip, SGI_ACL_FILE, (char *)acl, sizeof(xfs_acl_t))) { + _ACL_FREE(acl); + return -1; + } + xfs_acl_get_endian(acl); + + /* If the file has an empty ACL return -1. */ + if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) { + _ACL_FREE(acl); + return -1; + } + + /* Synchronize ACL with mode bits */ + xfs_acl_sync_mode(ip->i_d.di_mode, acl); + + error = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr); + _ACL_FREE(acl); + return error; +} + +STATIC int +xfs_acl_allow_set( + vnode_t *vp, + int kind) +{ + vattr_t va; + int error; + + if (kind == _ACL_TYPE_DEFAULT && vp->v_type != VDIR) + return ENOTDIR; + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return EROFS; + if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) + return error; + va.va_mask = XFS_AT_UID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return error; + if (va.va_uid != current->fsuid && !capable(CAP_FOWNER)) + return EPERM; + return error; +} + +/* + * Look for any effective exec access, to allow CAP_DAC_OVERRIDE for exec. + * Ignore checking for exec in USER_OBJ when there is no mask, because + * in this "minimal acl" case we don't have any actual acls, and we + * won't even be here. + */ +STATIC int +xfs_acl_find_any_exec( + xfs_acl_t *fap) +{ + int i; + int masked_aces = 0; + int mask = 0; + + for (i = 0; i < fap->acl_cnt; i++) { + if (fap->acl_entry[i].ae_perm & ACL_EXECUTE) { + if (fap->acl_entry[i].ae_tag & (ACL_USER_OBJ|ACL_OTHER)) + return 1; + + if (fap->acl_entry[i].ae_tag == ACL_MASK) + mask = fap->acl_entry[i].ae_perm; + else + masked_aces |= fap->acl_entry[i].ae_perm; + + if ((mask & masked_aces) & ACL_EXECUTE) + return 1; + } + } + + return 0; +} + +/* + * The access control process to determine the access permission: + * if uid == file owner id, use the file owner bits. + * if gid == file owner group id, use the file group bits. + * scan ACL for a maching user or group, and use matched entry + * permission. Use total permissions of all matching group entries, + * until all acl entries are exhausted. The final permission produced + * by matching acl entry or entries needs to be & with group permission. + * if not owner, owning group, or matching entry in ACL, use file + * other bits. Don't allow CAP_DAC_OVERRIDE on exec access unless + * there is some effective exec access somewhere. + */ +STATIC int +xfs_acl_capability_check( + mode_t mode, + cred_t *cr, + xfs_acl_t *fap) +{ + if ((mode & ACL_READ) && !capable_cred(cr, CAP_DAC_READ_SEARCH)) + return EACCES; + if ((mode & ACL_WRITE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) + return EACCES; + if ((mode & ACL_EXECUTE) && + (!capable_cred(cr, CAP_DAC_OVERRIDE) || + !xfs_acl_find_any_exec(fap))) { + return EACCES; + } + + return 0; +} + +/* + * Note: cr is only used here for the capability check if the ACL test fails. + * It is not used to find out the credentials uid or groups etc, as was + * done in IRIX. It is assumed that the uid and groups for the current + * thread are taken from "current" instead of the cr parameter. + */ +STATIC int +xfs_acl_access( + uid_t fuid, + gid_t fgid, + xfs_acl_t *fap, + mode_t md, + cred_t *cr) +{ + xfs_acl_entry_t matched; + int i, allows; + int maskallows = -1; /* true, but not 1, either */ + int seen_userobj = 0; + + matched.ae_tag = 0; /* Invalid type */ + md >>= 6; /* Normalize the bits for comparison */ + + for (i = 0; i < fap->acl_cnt; i++) { + /* + * Break out if we've got a user_obj entry or + * a user entry and the mask (and have processed USER_OBJ) + */ + if (matched.ae_tag == ACL_USER_OBJ) + break; + if (matched.ae_tag == ACL_USER) { + if (maskallows != -1 && seen_userobj) + break; + if (fap->acl_entry[i].ae_tag != ACL_MASK && + fap->acl_entry[i].ae_tag != ACL_USER_OBJ) + continue; + } + /* True if this entry allows the requested access */ + allows = ((fap->acl_entry[i].ae_perm & md) == md); + + switch (fap->acl_entry[i].ae_tag) { + case ACL_USER_OBJ: + seen_userobj = 1; + if (fuid != current->fsuid) + continue; + matched.ae_tag = ACL_USER_OBJ; + matched.ae_perm = allows; + break; + case ACL_USER: + if (fap->acl_entry[i].ae_id != current->fsuid) + continue; + matched.ae_tag = ACL_USER; + matched.ae_perm = allows; + break; + case ACL_GROUP_OBJ: + if ((matched.ae_tag == ACL_GROUP_OBJ || + matched.ae_tag == ACL_GROUP) && !allows) + continue; + if (!in_group_p(fgid)) + continue; + matched.ae_tag = ACL_GROUP_OBJ; + matched.ae_perm = allows; + break; + case ACL_GROUP: + if ((matched.ae_tag == ACL_GROUP_OBJ || + matched.ae_tag == ACL_GROUP) && !allows) + continue; + if (!in_group_p(fap->acl_entry[i].ae_id)) + continue; + matched.ae_tag = ACL_GROUP; + matched.ae_perm = allows; + break; + case ACL_MASK: + maskallows = allows; + break; + case ACL_OTHER: + if (matched.ae_tag != 0) + continue; + matched.ae_tag = ACL_OTHER; + matched.ae_perm = allows; + break; + } + } + /* + * First possibility is that no matched entry allows access. + * The capability to override DAC may exist, so check for it. + */ + switch (matched.ae_tag) { + case ACL_OTHER: + case ACL_USER_OBJ: + if (matched.ae_perm) + return 0; + break; + case ACL_USER: + case ACL_GROUP_OBJ: + case ACL_GROUP: + if (maskallows && matched.ae_perm) + return 0; + break; + case 0: + break; + } + + return xfs_acl_capability_check(md, cr, fap); +} + +/* + * ACL validity checker. + * This acl validation routine checks each ACL entry read in makes sense. + */ +STATIC int +xfs_acl_invalid( + xfs_acl_t *aclp) +{ + xfs_acl_entry_t *entry, *e; + int user = 0, group = 0, other = 0, mask = 0; + int mask_required = 0; + int i, j; + + if (!aclp) + goto acl_invalid; + + if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES) + goto acl_invalid; + + for (i = 0; i < aclp->acl_cnt; i++) { + entry = &aclp->acl_entry[i]; + switch (entry->ae_tag) { + case ACL_USER_OBJ: + if (user++) + goto acl_invalid; + break; + case ACL_GROUP_OBJ: + if (group++) + goto acl_invalid; + break; + case ACL_OTHER: + if (other++) + goto acl_invalid; + break; + case ACL_USER: + case ACL_GROUP: + for (j = i + 1; j < aclp->acl_cnt; j++) { + e = &aclp->acl_entry[j]; + if (e->ae_id == entry->ae_id && + e->ae_tag == entry->ae_tag) + goto acl_invalid; + } + mask_required++; + break; + case ACL_MASK: + if (mask++) + goto acl_invalid; + break; + default: + goto acl_invalid; + } + } + if (!user || !group || !other || (mask_required && !mask)) + goto acl_invalid; + else + return 0; +acl_invalid: + return EINVAL; +} + +/* + * Do ACL endian conversion. + */ +STATIC void +xfs_acl_get_endian( + xfs_acl_t *aclp) +{ + xfs_acl_entry_t *ace, *end; + + INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); + end = &aclp->acl_entry[0]+aclp->acl_cnt; + for (ace = &aclp->acl_entry[0]; ace < end; ace++) { + INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag); + INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id); + INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm); + } +} + +/* + * Get the ACL from the EA and do endian conversion. + */ +STATIC void +xfs_acl_get_attr( + vnode_t *vp, + xfs_acl_t *aclp, + int kind, + int flags, + int *error) +{ + int len = sizeof(xfs_acl_t); + + ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); + flags |= ATTR_ROOT; + VOP_ATTR_GET(vp, + kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT, + (char *)aclp, &len, flags, sys_cred, *error); + if (*error || (flags & ATTR_KERNOVAL)) + return; + xfs_acl_get_endian(aclp); +} + +/* + * Set the EA with the ACL and do endian conversion. + */ +STATIC void +xfs_acl_set_attr( + vnode_t *vp, + xfs_acl_t *aclp, + int kind, + int *error) +{ + xfs_acl_entry_t *ace, *newace, *end; + xfs_acl_t *newacl; + int len; + + if (!(_ACL_ALLOC(newacl))) { + *error = ENOMEM; + return; + } + + len = sizeof(xfs_acl_t) - + (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt)); + end = &aclp->acl_entry[0]+aclp->acl_cnt; + for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0]; + ace < end; + ace++, newace++) { + INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag); + INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id); + INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); + } + INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); + VOP_ATTR_SET(vp, + kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT, + (char *)newacl, len, ATTR_ROOT, sys_cred, *error); + _ACL_FREE(newacl); +} + +int +xfs_acl_vtoacl( + vnode_t *vp, + xfs_acl_t *access_acl, + xfs_acl_t *default_acl) +{ + vattr_t va; + int error = 0; + + if (access_acl) { + /* + * Get the Access ACL and the mode. If either cannot + * be obtained for some reason, invalidate the access ACL. + */ + xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); + if (!error) { + /* Got the ACL, need the mode... */ + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + } + + if (error) + access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; + else /* We have a good ACL and the file mode, synchronize. */ + xfs_acl_sync_mode(va.va_mode, access_acl); + } + + if (default_acl) { + xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error); + if (error) + default_acl->acl_cnt = XFS_ACL_NOT_PRESENT; + } + return error; +} + +/* + * This function retrieves the parent directory's acl, processes it + * and lets the child inherit the acl(s) that it should. + */ +int +xfs_acl_inherit( + vnode_t *vp, + vattr_t *vap, + xfs_acl_t *pdaclp) +{ + xfs_acl_t *cacl; + int error = 0; + int basicperms = 0; + + /* + * If the parent does not have a default ACL, or it's an + * invalid ACL, we're done. + */ + if (!vp) + return 0; + if (!pdaclp || xfs_acl_invalid(pdaclp)) + return 0; + + /* + * Copy the default ACL of the containing directory to + * the access ACL of the new file and use the mode that + * was passed in to set up the correct initial values for + * the u::,g::[m::], and o:: entries. This is what makes + * umask() "work" with ACL's. + */ + + if (!(_ACL_ALLOC(cacl))) + return ENOMEM; + + memcpy(cacl, pdaclp, sizeof(xfs_acl_t)); + xfs_acl_filter_mode(vap->va_mode, cacl); + xfs_acl_setmode(vp, cacl, &basicperms); + + /* + * Set the Default and Access ACL on the file. The mode is already + * set on the file, so we don't need to worry about that. + * + * If the new file is a directory, its default ACL is a copy of + * the containing directory's default ACL. + */ + if (vp->v_type == VDIR) + xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); + if (!error && !basicperms) + xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); + _ACL_FREE(cacl); + return error; +} + +/* + * Set up the correct mode on the file based on the supplied ACL. This + * makes sure that the mode on the file reflects the state of the + * u::,g::[m::], and o:: entries in the ACL. Since the mode is where + * the ACL is going to get the permissions for these entries, we must + * synchronize the mode whenever we set the ACL on a file. + */ +STATIC int +xfs_acl_setmode( + vnode_t *vp, + xfs_acl_t *acl, + int *basicperms) +{ + vattr_t va; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + int i, error, nomask = 1; + + *basicperms = 1; + + if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) + return 0; + + /* + * Copy the u::, g::, o::, and m:: bits from the ACL into the + * mode. The m:: bits take precedence over the g:: bits. + */ + va.va_mask = XFS_AT_MODE; + VOP_GETATTR(vp, &va, 0, sys_cred, error); + if (error) + return error; + + va.va_mask = XFS_AT_MODE; + va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); + ap = acl->acl_entry; + for (i = 0; i < acl->acl_cnt; ++i) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + va.va_mode |= ap->ae_perm << 6; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: /* more than just standard modes */ + nomask = 0; + va.va_mode |= ap->ae_perm << 3; + *basicperms = 0; + break; + case ACL_OTHER: + va.va_mode |= ap->ae_perm; + break; + default: /* more than just standard modes */ + *basicperms = 0; + break; + } + ap++; + } + + /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + va.va_mode |= gap->ae_perm << 3; + + VOP_SETATTR(vp, &va, 0, sys_cred, error); + return error; +} + +/* + * The permissions for the special ACL entries (u::, g::[m::], o::) are + * actually stored in the file mode (if there is both a group and a mask, + * the group is stored in the ACL entry and the mask is stored on the file). + * This allows the mode to remain automatically in sync with the ACL without + * the need for a call-back to the ACL system at every point where the mode + * could change. This function takes the permissions from the specified mode + * and places it in the supplied ACL. + * + * This implementation draws its validity from the fact that, when the ACL + * was assigned, the mode was copied from the ACL. + * If the mode did not change, therefore, the mode remains exactly what was + * taken from the special ACL entries at assignment. + * If a subsequent chmod() was done, the POSIX spec says that the change in + * mode must cause an update to the ACL seen at user level and used for + * access checks. Before and after a mode change, therefore, the file mode + * most accurately reflects what the special ACL entries should permit/deny. + * + * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly, + * the existing mode bits will override whatever is in the + * ACL. Similarly, if there is a pre-existing ACL that was + * never in sync with its mode (owing to a bug in 6.5 and + * before), it will now magically (or mystically) be + * synchronized. This could cause slight astonishment, but + * it is better than inconsistent permissions. + * + * The supplied ACL is a template that may contain any combination + * of special entries. These are treated as place holders when we fill + * out the ACL. This routine does not add or remove special entries, it + * simply unites each special entry with its associated set of permissions. + */ +STATIC void +xfs_acl_sync_mode( + mode_t mode, + xfs_acl_t *acl) +{ + int i, nomask = 1; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + + /* + * Set ACL entries. POSIX1003.1eD16 requires that the MASK + * be set instead of the GROUP entry, if there is a MASK. + */ + for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + ap->ae_perm = (mode >> 6) & 0x7; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: + nomask = 0; + ap->ae_perm = (mode >> 3) & 0x7; + break; + case ACL_OTHER: + ap->ae_perm = mode & 0x7; + break; + default: + break; + } + } + /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + gap->ae_perm = (mode >> 3) & 0x7; +} + +/* + * When inheriting an Access ACL from a directory Default ACL, + * the ACL bits are set to the intersection of the ACL default + * permission bits and the file permission bits in mode. If there + * are no permission bits on the file then we must not give them + * the ACL. This is what what makes umask() work with ACLs. + */ +STATIC void +xfs_acl_filter_mode( + mode_t mode, + xfs_acl_t *acl) +{ + int i, nomask = 1; + xfs_acl_entry_t *ap; + xfs_acl_entry_t *gap = NULL; + + /* + * Set ACL entries. POSIX1003.1eD16 requires that the MASK + * be merged with GROUP entry, if there is a MASK. + */ + for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { + switch (ap->ae_tag) { + case ACL_USER_OBJ: + ap->ae_perm &= (mode >> 6) & 0x7; + break; + case ACL_GROUP_OBJ: + gap = ap; + break; + case ACL_MASK: + nomask = 0; + ap->ae_perm &= (mode >> 3) & 0x7; + break; + case ACL_OTHER: + ap->ae_perm &= mode & 0x7; + break; + default: + break; + } + } + /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ + if (gap && nomask) + gap->ae_perm &= (mode >> 3) & 0x7; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_acl.h linux.22-ac2/fs/xfs/xfs_acl.h --- linux.vanilla/fs/xfs/xfs_acl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_acl.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ACL_H__ +#define __XFS_ACL_H__ + +/* + * Access Control Lists + */ +typedef __uint16_t xfs_acl_perm_t; +typedef __int32_t xfs_acl_type_t; +typedef __int32_t xfs_acl_tag_t; +typedef __int32_t xfs_acl_id_t; + +#define XFS_ACL_MAX_ENTRIES 25 +#define XFS_ACL_NOT_PRESENT (-1) + +typedef struct xfs_acl_entry { + xfs_acl_tag_t ae_tag; + xfs_acl_id_t ae_id; + xfs_acl_perm_t ae_perm; +} xfs_acl_entry_t; + +typedef struct xfs_acl { + __int32_t acl_cnt; + xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES]; +} xfs_acl_t; + +/* On-disk XFS extended attribute names */ +#define SGI_ACL_FILE "SGI_ACL_FILE" +#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" +#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) +#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) + + +#ifdef __KERNEL__ + +#ifdef CONFIG_XFS_POSIX_ACL + +struct vattr; +struct vnode; +struct xfs_inode; + +extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); +extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); +extern int xfs_acl_get(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_set(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_vtoacl(struct vnode *, xfs_acl_t *, xfs_acl_t *); +extern int xfs_acl_vhasacl_access(struct vnode *); +extern int xfs_acl_vhasacl_default(struct vnode *); +extern int xfs_acl_vset(struct vnode *, void *, size_t, int); +extern int xfs_acl_vget(struct vnode *, void *, size_t, int); +extern int xfs_acl_vremove(struct vnode *vp, int); + +extern struct kmem_zone *xfs_acl_zone; + +#define _ACL_TYPE_ACCESS 1 +#define _ACL_TYPE_DEFAULT 2 +#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) + +#define _ACL_DECL(a) xfs_acl_t *(a) = NULL +#define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) +#define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)) : 0) +#define _ACL_ZONE_INIT(z,name) ((z) = kmem_zone_init(sizeof(xfs_acl_t), name)) +#define _ACL_ZONE_DESTROY(z) (kmem_cache_destroy(z)) +#define _ACL_INHERIT(c,v,d) (xfs_acl_inherit(c,v,d)) +#define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0) +#define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) +#define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access +#define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default +#define _ACL_XFS_IACCESS(i,m,c) (XFS_IFORK_Q(i) ? xfs_acl_iaccess(i,m,c) : -1) + +#else +#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP) +#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP) +#define xfs_acl_vremove(v,t) (-EOPNOTSUPP) +#define _ACL_DECL(a) ((void)0) +#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */ +#define _ACL_FREE(a) ((void)0) +#define _ACL_ZONE_INIT(z,name) ((void)0) +#define _ACL_ZONE_DESTROY(z) ((void)0) +#define _ACL_INHERIT(c,v,d) (0) +#define _ACL_GET_ACCESS(pv,pa) (0) +#define _ACL_GET_DEFAULT(pv,pd) (0) +#define _ACL_ACCESS_EXISTS (NULL) +#define _ACL_DEFAULT_EXISTS (NULL) +#define _ACL_XFS_IACCESS(i,m,c) (-1) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_ACL_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_ag.h linux.22-ac2/fs/xfs/xfs_ag.h --- linux.vanilla/fs/xfs/xfs_ag.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_ag.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_AG_H__ +#define __XFS_AG_H__ + +/* + * Allocation group header + * This is divided into three structures, placed in sequential 512-byte + * buffers after a copy of the superblock (also in a 512-byte buffer). + */ + +struct xfs_buf; +struct xfs_mount; +struct xfs_trans; + +#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */ +#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */ +#define XFS_AGF_VERSION 1 +#define XFS_AGI_VERSION 1 +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_GOOD_VERSION) +int xfs_agf_good_version(unsigned v); +#define XFS_AGF_GOOD_VERSION(v) xfs_agf_good_version(v) +#else +#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_GOOD_VERSION) +int xfs_agi_good_version(unsigned v); +#define XFS_AGI_GOOD_VERSION(v) xfs_agi_good_version(v) +#else +#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION) +#endif + +/* + * Btree number 0 is bno, 1 is cnt. This value gives the size of the + * arrays below. + */ +#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1) + +/* + * The second word of agf_levels in the first a.g. overlaps the EFS + * superblock's magic number. Since the magic numbers valid for EFS + * are > 64k, our value cannot be confused for an EFS superblock's. + */ + +typedef struct xfs_agf +{ + /* + * Common allocation group header information + */ + __uint32_t agf_magicnum; /* magic number == XFS_AGF_MAGIC */ + __uint32_t agf_versionnum; /* header version == XFS_AGF_VERSION */ + xfs_agnumber_t agf_seqno; /* sequence # starting from 0 */ + xfs_agblock_t agf_length; /* size in blocks of a.g. */ + /* + * Freespace information + */ + xfs_agblock_t agf_roots[XFS_BTNUM_AGF]; /* root blocks */ + __uint32_t agf_spare0; /* spare field */ + __uint32_t agf_levels[XFS_BTNUM_AGF]; /* btree levels */ + __uint32_t agf_spare1; /* spare field */ + __uint32_t agf_flfirst; /* first freelist block's index */ + __uint32_t agf_fllast; /* last freelist block's index */ + __uint32_t agf_flcount; /* count of blocks in freelist */ + xfs_extlen_t agf_freeblks; /* total free blocks */ + xfs_extlen_t agf_longest; /* longest free space */ +} xfs_agf_t; + +#define XFS_AGF_MAGICNUM 0x00000001 +#define XFS_AGF_VERSIONNUM 0x00000002 +#define XFS_AGF_SEQNO 0x00000004 +#define XFS_AGF_LENGTH 0x00000008 +#define XFS_AGF_ROOTS 0x00000010 +#define XFS_AGF_LEVELS 0x00000020 +#define XFS_AGF_FLFIRST 0x00000040 +#define XFS_AGF_FLLAST 0x00000080 +#define XFS_AGF_FLCOUNT 0x00000100 +#define XFS_AGF_FREEBLKS 0x00000200 +#define XFS_AGF_LONGEST 0x00000400 +#define XFS_AGF_NUM_BITS 11 +#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) + +/* disk block (xfs_daddr_t) in the AG */ +#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGF_BLOCK) +xfs_agblock_t xfs_agf_block(struct xfs_mount *mp); +#define XFS_AGF_BLOCK(mp) xfs_agf_block(mp) +#else +#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) +#endif + +/* + * Size of the unlinked inode hash table in the agi. + */ +#define XFS_AGI_UNLINKED_BUCKETS 64 + +typedef struct xfs_agi +{ + /* + * Common allocation group header information + */ + __uint32_t agi_magicnum; /* magic number == XFS_AGI_MAGIC */ + __uint32_t agi_versionnum; /* header version == XFS_AGI_VERSION */ + xfs_agnumber_t agi_seqno; /* sequence # starting from 0 */ + xfs_agblock_t agi_length; /* size in blocks of a.g. */ + /* + * Inode information + * Inodes are mapped by interpreting the inode number, so no + * mapping data is needed here. + */ + xfs_agino_t agi_count; /* count of allocated inodes */ + xfs_agblock_t agi_root; /* root of inode btree */ + __uint32_t agi_level; /* levels in inode btree */ + xfs_agino_t agi_freecount; /* number of free inodes */ + xfs_agino_t agi_newino; /* new inode just allocated */ + xfs_agino_t agi_dirino; /* last directory inode chunk */ + /* + * Hash table of inodes which have been unlinked but are + * still being referenced. + */ + xfs_agino_t agi_unlinked[XFS_AGI_UNLINKED_BUCKETS]; +} xfs_agi_t; + +#define XFS_AGI_MAGICNUM 0x00000001 +#define XFS_AGI_VERSIONNUM 0x00000002 +#define XFS_AGI_SEQNO 0x00000004 +#define XFS_AGI_LENGTH 0x00000008 +#define XFS_AGI_COUNT 0x00000010 +#define XFS_AGI_ROOT 0x00000020 +#define XFS_AGI_LEVEL 0x00000040 +#define XFS_AGI_FREECOUNT 0x00000080 +#define XFS_AGI_NEWINO 0x00000100 +#define XFS_AGI_DIRINO 0x00000200 +#define XFS_AGI_UNLINKED 0x00000400 +#define XFS_AGI_NUM_BITS 11 +#define XFS_AGI_ALL_BITS ((1 << XFS_AGI_NUM_BITS) - 1) + +/* disk block (xfs_daddr_t) in the AG */ +#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGI_BLOCK) +xfs_agblock_t xfs_agi_block(struct xfs_mount *mp); +#define XFS_AGI_BLOCK(mp) xfs_agi_block(mp) +#else +#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) +#endif + +/* + * The third a.g. block contains the a.g. freelist, an array + * of block pointers to blocks owned by the allocation btree code. + */ +#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGFL_BLOCK) +xfs_agblock_t xfs_agfl_block(struct xfs_mount *mp); +#define XFS_AGFL_BLOCK(mp) xfs_agfl_block(mp) +#else +#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) +#endif +#define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t)) + +/* -- nathans TODO ... use of BBSIZE here - should be sector size -- */ +typedef struct xfs_agfl { + xfs_agblock_t agfl_bno[BBSIZE/sizeof(xfs_agblock_t)]; +} xfs_agfl_t; + +/* + * Busy block/extent entry. Used in perag to mark blocks that have been freed + * but whose transactions aren't committed to disk yet. + */ +typedef struct xfs_perag_busy { + xfs_agblock_t busy_start; + xfs_extlen_t busy_length; + struct xfs_trans *busy_tp; /* transaction that did the free */ +} xfs_perag_busy_t; + +/* + * Per-ag incore structure, copies of information in agf and agi, + * to improve the performance of allocation group selection. + * + * pick sizes which fit in allocation buckets well + */ +#if (BITS_PER_LONG == 32) +#define XFS_PAGB_NUM_SLOTS 84 +#elif (BITS_PER_LONG == 64) +#define XFS_PAGB_NUM_SLOTS 128 +#endif + +typedef struct xfs_perag +{ + char pagf_init; /* this agf's entry is initialized */ + char pagi_init; /* this agi's entry is initialized */ + char pagf_metadata; /* the agf is prefered to be metadata */ + char pagi_inodeok; /* The agi is ok for inodes */ + __uint8_t pagf_levels[XFS_BTNUM_AGF]; + /* # of levels in bno & cnt btree */ + __uint32_t pagf_flcount; /* count of blocks in freelist */ + xfs_extlen_t pagf_freeblks; /* total free blocks */ + xfs_extlen_t pagf_longest; /* longest free space */ + xfs_agino_t pagi_freecount; /* number of free inodes */ +#ifdef __KERNEL__ + lock_t pagb_lock; /* lock for pagb_list */ +#endif + int pagb_count; /* pagb slots in use */ + xfs_perag_busy_t *pagb_list; /* unstable blocks */ +} xfs_perag_t; + +#define XFS_AG_MIN_BYTES (1LL << 24) /* 16 MB */ +#define XFS_AG_BEST_BYTES (1LL << 30) /* 1 GB */ +#define XFS_AG_MAX_BYTES (1LL << 32) /* 4 GB */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MIN_BLOCKS) +xfs_extlen_t xfs_ag_min_blocks(int bl); +#define XFS_AG_MIN_BLOCKS(bl) xfs_ag_min_blocks(bl) +#else +#define XFS_AG_MIN_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_MIN_BYTES >> bl)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_BEST_BLOCKS) +xfs_extlen_t xfs_ag_best_blocks(int bl, xfs_drfsbno_t blks); +#define XFS_AG_BEST_BLOCKS(bl,blks) xfs_ag_best_blocks(bl,blks) +#else +/*--#define XFS_AG_BEST_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_BEST_BYTES >> bl))*/ +/* + * Best is XFS_AG_BEST_BLOCKS at and below 64 Gigabyte filesystems, and + * XFS_AG_MAX_BLOCKS above 64 Gigabytes. + */ +#define XFS_AG_BEST_BLOCKS(bl,blks) \ + ((xfs_extlen_t)((1LL << (36 - bl)) >= blks) ? \ + ((xfs_extlen_t)(XFS_AG_BEST_BYTES >> bl)) : \ + XFS_AG_MAX_BLOCKS(bl)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MAX_BLOCKS) +xfs_extlen_t xfs_ag_max_blocks(int bl); +#define XFS_AG_MAX_BLOCKS(bl) xfs_ag_max_blocks(bl) +#else +#define XFS_AG_MAX_BLOCKS(bl) ((xfs_extlen_t)(XFS_AG_MAX_BYTES >> bl)) +#endif + +#define XFS_MAX_AGNUMBER ((xfs_agnumber_t)(NULLAGNUMBER - 1)) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_MAXLEVELS) +int xfs_ag_maxlevels(struct xfs_mount *mp); +#define XFS_AG_MAXLEVELS(mp) xfs_ag_maxlevels(mp) +#else +#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST) +int xfs_min_freelist(xfs_agf_t *a, struct xfs_mount *mp); +#define XFS_MIN_FREELIST(a,mp) xfs_min_freelist(a,mp) +#else +#define XFS_MIN_FREELIST(a,mp) \ + XFS_MIN_FREELIST_RAW( \ + INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \ + INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_PAG) +int xfs_min_freelist_pag(xfs_perag_t *pag, struct xfs_mount *mp); +#define XFS_MIN_FREELIST_PAG(pag,mp) xfs_min_freelist_pag(pag,mp) +#else +#define XFS_MIN_FREELIST_PAG(pag,mp) \ + XFS_MIN_FREELIST_RAW((uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ + (uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MIN_FREELIST_RAW) +int xfs_min_freelist_raw(int bl, int cl, struct xfs_mount *mp); +#define XFS_MIN_FREELIST_RAW(bl,cl,mp) xfs_min_freelist_raw(bl,cl,mp) +#else +#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \ + (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + \ + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_FSB) +xfs_fsblock_t xfs_agb_to_fsb(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno); +#define XFS_AGB_TO_FSB(mp,agno,agbno) xfs_agb_to_fsb(mp,agno,agbno) +#else +#define XFS_AGB_TO_FSB(mp,agno,agbno) \ + (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGNO) +xfs_agnumber_t xfs_fsb_to_agno(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_AGNO(mp,fsbno) xfs_fsb_to_agno(mp,fsbno) +#else +#define XFS_FSB_TO_AGNO(mp,fsbno) \ + ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_AGBNO) +xfs_agblock_t xfs_fsb_to_agbno(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_AGBNO(mp,fsbno) xfs_fsb_to_agbno(mp,fsbno) +#else +#define XFS_FSB_TO_AGBNO(mp,fsbno) \ + ((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGB_TO_DADDR) +xfs_daddr_t xfs_agb_to_daddr(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno); +#define XFS_AGB_TO_DADDR(mp,agno,agbno) xfs_agb_to_daddr(mp,agno,agbno) +#else +#define XFS_AGB_TO_DADDR(mp,agno,agbno) \ + ((xfs_daddr_t)(XFS_FSB_TO_BB(mp, \ + (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))) +#endif +/* + * XFS_DADDR_TO_AGNO and XFS_DADDR_TO_AGBNO moved to xfs_mount.h + * to avoid header file ordering change + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_DADDR) +xfs_daddr_t xfs_ag_daddr(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_daddr_t d); +#define XFS_AG_DADDR(mp,agno,d) xfs_ag_daddr(mp,agno,d) +#else +#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGF) +xfs_agf_t *xfs_buf_to_agf(struct xfs_buf *bp); +#define XFS_BUF_TO_AGF(bp) xfs_buf_to_agf(bp) +#else +#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGI) +xfs_agi_t *xfs_buf_to_agi(struct xfs_buf *bp); +#define XFS_BUF_TO_AGI(bp) xfs_buf_to_agi(bp) +#else +#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_AGFL) +xfs_agfl_t *xfs_buf_to_agfl(struct xfs_buf *bp); +#define XFS_BUF_TO_AGFL(bp) xfs_buf_to_agfl(bp) +#else +#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp)) +#endif + +/* + * For checking for bad ranges of xfs_daddr_t's, covering multiple + * allocation groups or a single xfs_daddr_t that's a superblock copy. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AG_CHECK_DADDR) +void xfs_ag_check_daddr(struct xfs_mount *mp, xfs_daddr_t d, xfs_extlen_t len); +#define XFS_AG_CHECK_DADDR(mp,d,len) xfs_ag_check_daddr(mp,d,len) +#else +#define XFS_AG_CHECK_DADDR(mp,d,len) \ + ((len) == 1 ? \ + ASSERT((d) == XFS_SB_DADDR || \ + XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \ + ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \ + XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1))) +#endif + +#endif /* __XFS_AG_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_alloc_btree.c linux.22-ac2/fs/xfs/xfs_alloc_btree.c --- linux.vanilla/fs/xfs/xfs_alloc_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_alloc_btree.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2204 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free space allocation for XFS. + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_error.h" + +/* + * Prototypes for internal functions. + */ + +STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int); +STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); +STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, + xfs_alloc_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int); + +/* + * Internal functions. + */ + +/* + * Single level of the xfs_alloc_delete record deletion routine. + * Delete record pointed to by cur/level. + * Remove the record from its block then rebalance the tree. + * Return 0 for error, 1 for done, 2 to go on to the next level. + */ +STATIC int /* error */ +xfs_alloc_delrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level removing record from */ + int *stat) /* fail/done/go-on */ +{ + xfs_agf_t *agf; /* allocation group freelist header */ + xfs_alloc_block_t *block; /* btree block record/key lives in */ + xfs_agblock_t bno; /* btree block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* kp points here if block is level 0 */ + xfs_agblock_t lbno; /* left block's block number */ + xfs_buf_t *lbp; /* left block's buffer pointer */ + xfs_alloc_block_t *left; /* left btree block */ + xfs_alloc_key_t *lkp=NULL; /* left block key pointer */ + xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */ + int lrecs=0; /* number of records in left block */ + xfs_alloc_rec_t *lrp; /* left block record pointer */ + xfs_mount_t *mp; /* mount structure */ + int ptr; /* index in btree block for this rec */ + xfs_agblock_t rbno; /* right block's block number */ + xfs_buf_t *rbp; /* right block's buffer pointer */ + xfs_alloc_block_t *right; /* right btree block */ + xfs_alloc_key_t *rkp; /* right block key pointer */ + xfs_alloc_ptr_t *rpp; /* right block address pointer */ + int rrecs=0; /* number of records in right block */ + xfs_alloc_rec_t *rrp; /* right block record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + + /* + * Get the index of the entry being deleted, check for nothing there. + */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get the buffer & block containing the record or key/ptr. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Fail if we're off the end of the block. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_abt_delrec); + /* + * It's a nonleaf. Excise the key and ptr being deleted, by + * sliding the entries past them down one. + * Log the changed areas of the block. + */ + if (level > 0) { + lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&lkp[ptr - 1], &lkp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ + memmove(&lpp[ptr - 1], &lpp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ + xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + } + /* + * It's a leaf. Excise the record being deleted, by sliding the + * entries past it down one. Log the changed areas of the block. + */ + else { + lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&lrp[ptr - 1], &lrp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + if (ptr == 1) { + key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ + lkp = &key; + } + } + /* + * Decrement and log the number of entries in the block. + */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * See if the longest free extent in the allocation group was + * changed by this operation. True if it's the by-size btree, and + * this is the leaf level, and there is no right sibling block, + * and this was the last record. + */ + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + mp = cur->bc_mp; + + if (level == 0 && + cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); + /* + * There are still records in the block. Grab the size + * from the last one. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); + INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); + } + /* + * No free extents left. + */ + else + INT_ZERO(agf->agf_longest, ARCH_CONVERT); + mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = + INT_GET(agf->agf_longest, ARCH_CONVERT); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Is this the root level? If so, we're almost done. + */ + if (level == cur->bc_nlevels - 1) { + /* + * If this is the root level, + * and there's only one entry left, + * and it's NOT the leaf level, + * then we can get rid of this level. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { + /* + * lpp is still set to the first pointer in the block. + * Make it the new root of the btree. + */ + bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); + INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); + INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); + mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; + /* + * Put this buffer/block on the ag's freelist. + */ + if ((error = xfs_alloc_put_freelist(cur->bc_tp, + cur->bc_private.a.agbp, NULL, bno))) + return error; + /* + * Since blocks move to the free list without the + * coordination used in xfs_bmap_finish, we can't allow + * block to be available for reallocation and + * non-transaction writing (user data) until we know + * that the transaction that moved it to the free list + * is permanently on disk. We track the blocks by + * declaring these blocks as "busy"; the busy list is + * maintained on a per-ag basis and each transaction + * records which entries should be removed when the + * iclog commits to disk. If a busy block is + * allocated, the iclog is pushed up to the LSN + * that freed the block. + */ + xfs_alloc_mark_busy(cur->bc_tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + + xfs_trans_agbtree_delta(cur->bc_tp, -1); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_ROOTS | XFS_AGF_LEVELS); + /* + * Update the cursor so there's one fewer level. + */ + xfs_btree_setbuf(cur, level, 0); + cur->bc_nlevels--; + } else if (level > 0 && + (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * If we deleted the leftmost entry in the block, update the + * key values above us in the tree. + */ + if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1))) + return error; + /* + * If the number of records remaining in the block is at least + * the minimum, we're done. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * Otherwise, we have to move some records around to keep the + * tree balanced. Look at the left and right sibling blocks to + * see if we can re-balance by moving only one record. + */ + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + bno = NULLAGBLOCK; + ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); + /* + * Duplicate the cursor so our btree manipulations here won't + * disrupt the next level up. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + /* + * If there's a right sibling, see if it's ok to shift an entry + * out of it. + */ + if (rbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the last entry in the next block. + * Actually any entry but the first would suffice. + */ + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_increment(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Grab a pointer to the block. + */ + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + /* + * If right block is full enough so that removing one entry + * won't make it too empty, and left-shifting an entry out + * of right to us works, we're done. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if ((error = xfs_alloc_lshift(tcur, level, &i))) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level > 0 && + (error = xfs_alloc_decrement(cur, level, + &i))) + return error; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference, and fix up the temp cursor to point + * to our block again (last record). + */ + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLAGBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_decrement(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + /* + * If there's a left sibling, see if it's ok to shift an entry + * out of it. + */ + if (lbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the first entry in the + * previous block. + */ + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_decrement(tcur, level, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_btree_firstrec(tcur, level); + /* + * Grab a pointer to the block. + */ + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + /* + * If left block is full enough so that removing one entry + * won't make it too empty, and right-shifting an entry out + * of left to us works, we're done. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)) { + if ((error = xfs_alloc_rshift(tcur, level, &i))) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_ALLOC_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level == 0) + cur->bc_ptrs[0]++; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference. + */ + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * Delete the temp cursor, we're done with it. + */ + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + /* + * If here, we need to do a join to keep the tree balanced. + */ + ASSERT(bno != NULLAGBLOCK); + /* + * See if we can join with the left neighbor block. + */ + if (lbno != NULLAGBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * Set "right" to be the starting block, + * "left" to be the left neighbor. + */ + rbno = bno; + right = block; + rbp = bp; + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, lbno, 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + } + /* + * If that won't work, see if we can join with the right neighbor block. + */ + else if (rbno != NULLAGBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * Set "left" to be the starting block, + * "right" to be the right neighbor. + */ + lbno = bno; + left = block; + lbp = bp; + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, rbno, 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + } + /* + * Otherwise, we can't fix the imbalance. + * Just return. This is probably a logic error, but it's not fatal. + */ + else { + if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * We're now going to join "left" and "right" by moving all the stuff + * in "right" to "left" and deleting "right". + */ + if (level > 0) { + /* + * It's a non-leaf. Move keys and pointers. + */ + lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ + memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ + xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf. Move records. + */ + lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); + xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } + /* + * If we joined with the left neighbor, set the buffer in the + * cursor to the left block, and fix up the index. + */ + if (bp != lbp) { + xfs_btree_setbuf(cur, level, lbp); + cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If we joined with the right neighbor and there's a level above + * us, increment the cursor at that level. + */ + else if (level + 1 < cur->bc_nlevels && + (error = xfs_alloc_increment(cur, level + 1, &i))) + return error; + /* + * Fix up the number of records in the surviving block. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + /* + * Fix up the right block pointer in the surviving block, and log it. + */ + left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there is a right sibling now, make it point to the + * remaining block. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_alloc_block_t *rrblock; + xfs_buf_t *rrbp; + + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_ALLOC_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * Free the deleting block by putting it on the freelist. + */ + if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp, + NULL, rbno))) + return error; + /* + * Since blocks move to the free list without the coordination + * used in xfs_bmap_finish, we can't allow block to be available + * for reallocation and non-transaction writing (user data) + * until we know that the transaction that moved it to the free + * list is permanently on disk. We track the blocks by declaring + * these blocks as "busy"; the busy list is maintained on a + * per-ag basis and each transaction records which entries + * should be removed when the iclog commits to disk. If a + * busy block is allocated, the iclog is pushed up to the + * LSN that freed the block. + */ + xfs_alloc_mark_busy(cur->bc_tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + + xfs_trans_agbtree_delta(cur->bc_tp, -1); + /* + * Adjust the current level's cursor so that we're left referring + * to the right node, after we're done. + * If this leaves the ptr value 0 our caller will fix it up. + */ + if (level > 0) + cur->bc_ptrs[level]--; + /* + * Return value means the next level up has something to do. + */ + *stat = 2; + return 0; + +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_alloc_insrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to insert record at */ + xfs_agblock_t *bnop, /* i/o: block number inserted */ + xfs_alloc_rec_t *recp, /* i/o: record data inserted */ + xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ + int *stat) /* output: success/failure */ +{ + xfs_agf_t *agf; /* allocation group freelist header */ + xfs_alloc_block_t *block; /* btree block record/key lives in */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* key value being inserted */ + xfs_alloc_key_t *kp; /* pointer to btree keys */ + xfs_agblock_t nbno; /* block number of allocated block */ + xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ + xfs_alloc_key_t nkey; /* new key value, from split */ + xfs_alloc_rec_t nrec; /* new record value, for caller */ + int optr; /* old ptr value */ + xfs_alloc_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_alloc_rec_t *rp; /* pointer to btree records */ + + ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); + /* + * If we made it to the root level, allocate a new root block + * and we're done. + */ + if (level >= cur->bc_nlevels) { + XFS_STATS_INC(xfsstats.xs_abt_insrec); + if ((error = xfs_alloc_newroot(cur, &i))) + return error; + *bnop = NULLAGBLOCK; + *stat = i; + return 0; + } + /* + * Make a key out of the record data to be inserted, and save it. + */ + key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ + optr = ptr = cur->bc_ptrs[level]; + /* + * If we're off the left edge, return failure. + */ + if (ptr == 0) { + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_abt_insrec); + /* + * Get pointers to the btree buffer and block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; + /* + * Check that the new entry is being inserted in the right place. + */ + if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) { + rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); + xfs_btree_check_rec(cur->bc_btnum, recp, rp); + } else { + kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); + xfs_btree_check_key(cur->bc_btnum, &key, kp); + } + } +#endif + nbno = NULLAGBLOCK; + ncur = (xfs_btree_cur_t *)0; + /* + * If the block is full, we can't insert the new entry until we + * make the block un-full. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + /* + * First, try shifting an entry to the right neighbor. + */ + if ((error = xfs_alloc_rshift(cur, level, &i))) + return error; + if (i) { + /* nothing */ + } + /* + * Next, try shifting an entry to the left neighbor. + */ + else { + if ((error = xfs_alloc_lshift(cur, level, &i))) + return error; + if (i) + optr = ptr = cur->bc_ptrs[level]; + else { + /* + * Next, try splitting the current block in + * half. If this works we have to re-set our + * variables because we could be in a + * different block now. + */ + if ((error = xfs_alloc_split(cur, level, &nbno, + &nkey, &ncur, &i))) + return error; + if (i) { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = + xfs_btree_check_sblock(cur, + block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ + nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ + } + /* + * Otherwise the insert fails. + */ + else { + *stat = 0; + return 0; + } + } + } + } + /* + * At this point we know there's room for our new entry in the block + * we're pointing at. + */ + if (level > 0) { + /* + * It's a non-leaf entry. Make a hole for the new data + * in the key and ptr regions of the block. + */ + kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ + memmove(&pp[ptr], &pp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, *bnop, level))) + return error; +#endif + /* + * Now stuff the new data in, bump numrecs and log the new data. + */ + kp[ptr - 1] = key; + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); +#ifdef DEBUG + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, + kp + ptr); +#endif + } else { + /* + * It's a leaf entry. Make a hole for the new record. + */ + rp = XFS_ALLOC_REC_ADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); + /* + * Now stuff the new record in, bump numrecs + * and log the new data. + */ + rp[ptr - 1] = *recp; /* INT_: struct copy */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); +#ifdef DEBUG + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) + xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, + rp + ptr); +#endif + } + /* + * Log the new number of records in the btree header. + */ + xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * If we inserted at the start of a block, update the parents' keys. + */ + if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1))) + return error; + /* + * Look to see if the longest extent in the allocation group + * needs to be updated. + */ + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + if (level == 0 && + cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { + /* + * If this is a leaf in the by-size btree and there + * is no right sibling block and this block is bigger + * than the previous longest block, update it. + */ + INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); + cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest + = INT_GET(recp->ar_blockcount, ARCH_CONVERT); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Return the new block number, if any. + * If there is one, give back a record value and a cursor too. + */ + *bnop = nbno; + if (nbno != NULLAGBLOCK) { + *recp = nrec; /* INT_: struct copy */ + *curp = ncur; /* INT_: struct copy */ + } + *stat = 1; + return 0; +} + +/* + * Log header fields from a btree block. + */ +STATIC void +xfs_alloc_log_block( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer containing btree block */ + int fields) /* mask of fields: XFS_BB_... */ +{ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + static const short offsets[] = { /* table of offsets */ + offsetof(xfs_alloc_block_t, bb_magic), + offsetof(xfs_alloc_block_t, bb_level), + offsetof(xfs_alloc_block_t, bb_numrecs), + offsetof(xfs_alloc_block_t, bb_leftsib), + offsetof(xfs_alloc_block_t, bb_rightsib), + sizeof(xfs_alloc_block_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Log keys from a btree block (nonleaf). + */ +STATIC void +xfs_alloc_log_keys( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int kfirst, /* index of first key to log */ + int klast) /* index of last key to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + xfs_alloc_key_t *kp; /* key pointer in btree block */ + int last; /* last byte offset logged */ + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log block pointer fields from a btree block (nonleaf). + */ +STATIC void +xfs_alloc_log_ptrs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int pfirst, /* index of first pointer to log */ + int plast) /* index of last pointer to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */ + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log records from a btree block (leaf). + */ +STATIC void +xfs_alloc_log_recs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int rfirst, /* index of first record to log */ + int rlast) /* index of last record to log */ +{ + xfs_alloc_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_alloc_rec_t *rp; /* record pointer for btree block */ + + + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + rp = XFS_ALLOC_REC_ADDR(block, 1, cur); +#ifdef DEBUG + { + xfs_agf_t *agf; + xfs_alloc_rec_t *p; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) + ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= + INT_GET(agf->agf_length, ARCH_CONVERT)); + } +#endif + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + * Return 0 if can't find any such record, 1 for success. + */ +STATIC int /* error */ +xfs_alloc_lookup( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_lookup_t dir, /* <=, ==, or >= */ + int *stat) /* success/failure */ +{ + xfs_agblock_t agbno; /* a.g. relative btree block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_alloc_block_t *block=NULL; /* current btree block */ + int diff; /* difference for the current key */ + int error; /* error return value */ + int keyno=0; /* current key number */ + int level; /* level in the btree */ + xfs_mount_t *mp; /* file system mount point */ + + XFS_STATS_INC(xfsstats.xs_abt_lookup); + /* + * Get the allocation group header, and the root block number. + */ + mp = cur->bc_mp; + + { + xfs_agf_t *agf; /* a.g. freespace header */ + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); + } + /* + * Iterate over each level in the btree, starting at the root. + * For each level above the leaves, find the key we need, based + * on the lookup record, then follow the corresponding block + * pointer down to the next level. + */ + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + xfs_buf_t *bp; /* buffer pointer for btree block */ + xfs_daddr_t d; /* disk address of btree block */ + + /* + * Get the disk address we're looking for. + */ + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + /* + * If the old buffer at this level is for a different block, + * throw it away, otherwise just use it. + */ + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + /* + * Need to get a new buffer. Read it, then + * set it in the cursor, releasing the old one. + */ + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno, + agbno, 0, &bp, XFS_ALLOC_BTREE_REF))) + return error; + xfs_btree_setbuf(cur, level, bp); + /* + * Point to the btree block, now that we have the buffer + */ + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, level, + bp))) + return error; + } else + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + /* + * If we already had a key match at a higher level, we know + * we need to use the first entry in this block. + */ + if (diff == 0) + keyno = 1; + /* + * Otherwise we need to search this block. Do a binary search. + */ + else { + int high; /* high entry number */ + xfs_alloc_key_t *kkbase=NULL;/* base of keys in block */ + xfs_alloc_rec_t *krbase=NULL;/* base of records in block */ + int low; /* low entry number */ + + /* + * Get a pointer to keys or records. + */ + if (level > 0) + kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur); + else + krbase = XFS_ALLOC_REC_ADDR(block, 1, cur); + /* + * Set low and high entry numbers, 1-based. + */ + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + /* + * If the block is empty, the tree must + * be an empty leaf. + */ + ASSERT(level == 0 && cur->bc_nlevels == 1); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + *stat = 0; + return 0; + } + /* + * Binary search the block. + */ + while (low <= high) { + xfs_extlen_t blockcount; /* key value */ + xfs_agblock_t startblock; /* key value */ + + XFS_STATS_INC(xfsstats.xs_abt_compare); + /* + * keyno is average of low and high. + */ + keyno = (low + high) >> 1; + /* + * Get startblock & blockcount. + */ + if (level > 0) { + xfs_alloc_key_t *kkp; + + kkp = kkbase + keyno - 1; + startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); + blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); + } else { + xfs_alloc_rec_t *krp; + + krp = krbase + keyno - 1; + startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); + blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); + } + /* + * Compute difference to get next direction. + */ + if (cur->bc_btnum == XFS_BTNUM_BNO) + diff = (int)startblock - + (int)cur->bc_rec.a.ar_startblock; + else if (!(diff = (int)blockcount - + (int)cur->bc_rec.a.ar_blockcount)) + diff = (int)startblock - + (int)cur->bc_rec.a.ar_startblock; + /* + * Less than, move right. + */ + if (diff < 0) + low = keyno + 1; + /* + * Greater than, move left. + */ + else if (diff > 0) + high = keyno - 1; + /* + * Equal, we're done. + */ + else + break; + } + } + /* + * If there are more levels, set up for the next level + * by getting the block number and filling in the cursor. + */ + if (level > 0) { + /* + * If we moved left, need the previous key number, + * unless there isn't one. + */ + if (diff > 0 && --keyno < 1) + keyno = 1; + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, agbno, level))) + return error; +#endif + cur->bc_ptrs[level] = keyno; + } + } + /* + * Done with the search. + * See if we need to adjust the results. + */ + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && + keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + int i; + + cur->bc_ptrs[0] = keyno; + if ((error = xfs_alloc_increment(cur, 0, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + /* + * Return if we succeeded or not. + */ + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + *stat = 0; + else + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_alloc_lshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef DEBUG + int i; /* loop index */ +#endif + xfs_alloc_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left neighbor block */ + xfs_alloc_block_t *left; /* left neighbor btree block */ + int nrec; /* new number of left block entries */ + xfs_buf_t *rbp; /* buffer for right (current) block */ + xfs_alloc_block_t *right; /* right (current) btree block */ + xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */ + xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */ + xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */ + + /* + * Set up variables for this block as "right". + */ + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; +#endif + /* + * If we've got no left sibling then we can't shift an entry left. + */ + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] <= 1) { + *stat = 0; + return 0; + } + /* + * Set up the left neighbor as "left". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + /* + * If non-leaf, copy a key and a ptr to the left block. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* key pointer for left block */ + xfs_alloc_ptr_t *lpp; /* address pointer for left block */ + + lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + *lkp = *rkp; + xfs_alloc_log_keys(cur, lbp, nrec, nrec); + lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + return error; +#endif + *lpp = *rpp; /* INT_: copy */ + xfs_alloc_log_ptrs(cur, lbp, nrec, nrec); + xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); + } + /* + * If leaf, copy a record to the left block. + */ + else { + xfs_alloc_rec_t *lrp; /* record pointer for left block */ + + lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + *lrp = *rrp; + xfs_alloc_log_recs(cur, lbp, nrec, nrec); + xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); + } + /* + * Bump and log left's numrecs, decrement and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Slide the contents of right down one entry. + */ + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) + return error; + } +#endif + memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + rkp = &key; + } + /* + * Update the parent key values of right. + */ + if ((error = xfs_alloc_updkey(cur, rkp, level + 1))) + return error; + /* + * Slide the cursor value left one. + */ + cur->bc_ptrs[level]--; + *stat = 1; + return 0; +} + +/* + * Allocate a new root block, fill it in. + */ +STATIC int /* error */ +xfs_alloc_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + xfs_agblock_t lbno; /* left block number */ + xfs_buf_t *lbp; /* left btree buffer */ + xfs_alloc_block_t *left; /* left btree block */ + xfs_mount_t *mp; /* mount structure */ + xfs_agblock_t nbno; /* new block number */ + xfs_buf_t *nbp; /* new (root) buffer */ + xfs_alloc_block_t *new; /* new (root) btree block */ + int nptr; /* new value for key index, 1 or 2 */ + xfs_agblock_t rbno; /* right block number */ + xfs_buf_t *rbp; /* right btree buffer */ + xfs_alloc_block_t *right; /* right btree block */ + + mp = cur->bc_mp; + + ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp)); + /* + * Get a buffer from the freelist blocks, for the new root. + */ + if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, + &nbno))) + return error; + /* + * None available, we fail. + */ + if (nbno == NULLAGBLOCK) { + *stat = 0; + return 0; + } + xfs_trans_agbtree_delta(cur->bc_tp, 1); + nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno, + 0); + new = XFS_BUF_TO_ALLOC_BLOCK(nbp); + /* + * Set the root data in the a.g. freespace structure. + */ + { + xfs_agf_t *agf; /* a.g. freespace header */ + xfs_agnumber_t seqno; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); + INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); + seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_ROOTS | XFS_AGF_LEVELS); + } + /* + * At the previous root level there are now two blocks: the old + * root, and the new block generated when it was split. + * We don't know which one the cursor is pointing at, so we + * set up variables "left" and "right" for each case. + */ + lbp = cur->bc_bufs[cur->bc_nlevels - 1]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) + return error; +#endif + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + /* + * Our block is left, pick up the right block. + */ + lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); + rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, rbno, 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, + cur->bc_nlevels - 1, rbp))) + return error; + nptr = 1; + } else { + /* + * Our block is right, pick up the left block. + */ + rbp = lbp; + right = left; + rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); + lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + cur->bc_private.a.agno, lbno, 0, &lbp, + XFS_ALLOC_BTREE_REF))) + return error; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, + cur->bc_nlevels - 1, lbp))) + return error; + nptr = 2; + } + /* + * Fill in the new block's btree header and log it. + */ + INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); + INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); + INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); + ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); + /* + * Fill in the key data in the new root. + */ + { + xfs_alloc_key_t *kp; /* btree key pointer */ + + kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); + if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ + kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ + } else { + xfs_alloc_rec_t *rp; /* btree record pointer */ + + rp = XFS_ALLOC_REC_ADDR(left, 1, cur); + kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ + kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + rp = XFS_ALLOC_REC_ADDR(right, 1, cur); + kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ + kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ + } + } + xfs_alloc_log_keys(cur, nbp, 1, 2); + /* + * Fill in the pointer data in the new root. + */ + { + xfs_alloc_ptr_t *pp; /* btree address pointer */ + + pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); + INT_SET(pp[0], ARCH_CONVERT, lbno); + INT_SET(pp[1], ARCH_CONVERT, rbno); + } + xfs_alloc_log_ptrs(cur, nbp, 1, 2); + /* + * Fix up the cursor. + */ + xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); + cur->bc_ptrs[cur->bc_nlevels] = nptr; + cur->bc_nlevels++; + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_alloc_rshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index */ + xfs_alloc_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left (current) block */ + xfs_alloc_block_t *left; /* left (current) btree block */ + xfs_buf_t *rbp; /* buffer for right neighbor block */ + xfs_alloc_block_t *right; /* right neighbor btree block */ + xfs_alloc_key_t *rkp; /* key pointer for right block */ + xfs_btree_cur_t *tcur; /* temporary cursor */ + + /* + * Set up variables for this block as "left". + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * If we've got no right sibling then we can't shift an entry right. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * Set up the right neighbor as "right". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, + XFS_ALLOC_BTREE_REF))) + return error; + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + /* + * Make a hole at the start of the right neighbor block, then + * copy the last left block entry to the hole. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* key pointer for left block */ + xfs_alloc_ptr_t *lpp; /* address pointer for left block */ + xfs_alloc_ptr_t *rpp; /* address pointer for right block */ + + lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + return error; +#endif + *rkp = *lkp; /* INT_: copy */ + *rpp = *lpp; /* INT_: copy */ + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); + } else { + xfs_alloc_rec_t *lrp; /* record pointer for left block */ + xfs_alloc_rec_t *rrp; /* record pointer for right block */ + + lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + rkp = &key; + xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); + } + /* + * Decrement and log left's numrecs, bump and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Using a temporary cursor, update the parent key values of the + * block on the right. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_increment(tcur, level, &i)) || + (error = xfs_alloc_updkey(tcur, rkp, level + 1))) + goto error0; + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + *stat = 1; + return 0; +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_alloc_split( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to split */ + xfs_agblock_t *bnop, /* output: block number allocated */ + xfs_alloc_key_t *keyp, /* output: first key of new block */ + xfs_btree_cur_t **curp, /* output: new cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index/record number */ + xfs_agblock_t lbno; /* left (current) block number */ + xfs_buf_t *lbp; /* buffer for left block */ + xfs_alloc_block_t *left; /* left (current) btree block */ + xfs_agblock_t rbno; /* right (new) block number */ + xfs_buf_t *rbp; /* buffer for right block */ + xfs_alloc_block_t *right; /* right (new) btree block */ + + /* + * Allocate the new block from the freelist. + * If we can't do it, we're toast. Give up. + */ + if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, + &rbno))) + return error; + if (rbno == NULLAGBLOCK) { + *stat = 0; + return 0; + } + xfs_trans_agbtree_delta(cur->bc_tp, 1); + rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno, + rbno, 0); + /* + * Set up the new block as "right". + */ + right = XFS_BUF_TO_ALLOC_BLOCK(rbp); + /* + * "Left" is the current (according to the cursor) block. + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_ALLOC_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * Fill in the btree header for the new block. + */ + INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + /* + * Make sure that if there's an odd number of entries now, that + * each new block will have the same number of entries. + */ + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + /* + * For non-leaf blocks, copy keys and addresses over to the new block. + */ + if (level > 0) { + xfs_alloc_key_t *lkp; /* left btree key pointer */ + xfs_alloc_ptr_t *lpp; /* left btree address pointer */ + xfs_alloc_key_t *rkp; /* right btree key pointer */ + xfs_alloc_ptr_t *rpp; /* right btree address pointer */ + + lkp = XFS_ALLOC_KEY_ADDR(left, i, cur); + lpp = XFS_ALLOC_PTR_ADDR(left, i, cur); + rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); + rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ + xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + *keyp = *rkp; + } + /* + * For leaf blocks, copy records over to the new block. + */ + else { + xfs_alloc_rec_t *lrp; /* left btree record pointer */ + xfs_alloc_rec_t *rrp; /* right btree record pointer */ + + lrp = XFS_ALLOC_REC_ADDR(left, i, cur); + rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ + keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ + } + /* + * Find the left block number by looking in the buffer. + * Adjust numrecs, sibling pointers. + */ + lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); + xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there's a block to the new block's right, make that block + * point back to right instead of to left. + */ + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_alloc_block_t *rrblock; /* rr btree block */ + xfs_buf_t *rrbp; /* buffer for rrblock */ + + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_ALLOC_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); + xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * If the cursor is really in the right block, move it there. + * If it's just pointing past the last entry in left, then we'll + * insert there, so don't change anything in that case. + */ + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If there are more levels, we'll need another cursor which refers to + * the right block, no matter where this cursor was. + */ + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) + return error; + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = rbno; + *stat = 1; + return 0; +} + +/* + * Update keys at all levels from here to the root along the cursor's path. + */ +STATIC int /* error */ +xfs_alloc_updkey( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_alloc_key_t *keyp, /* new key value to update to */ + int level) /* starting level for update */ +{ + int ptr; /* index of key in block */ + + /* + * Go up the tree from this level toward the root. + * At each level, update the key value to the value input. + * Stop when we reach a level where the cursor isn't pointing + * at the first entry in the block. + */ + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + xfs_alloc_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer for block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + xfs_alloc_key_t *kp; /* ptr to btree block keys */ + + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); + *kp = *keyp; + xfs_alloc_log_keys(cur, bp, ptr, ptr); + } + return 0; +} + +/* + * Externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_decrement( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the left at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + /* + * Decrement the ptr at this level. If we're still in the block + * then we're done. + */ + if (--cur->bc_ptrs[level] > 0) { + *stat = 1; + return 0; + } + /* + * Get a pointer to the btree block. + */ + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, + cur->bc_bufs[level]))) + return error; +#endif + /* + * If we just went off the left edge of the tree, return failure. + */ + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree decrementing pointers. + * Stop when we don't go off the left edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + /* + * Read-ahead the left block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + xfs_buf_t *bp; /* buffer pointer for block */ + + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, agbno, 0, &bp, + XFS_ALLOC_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_alloc_delete( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result code */ + int level; /* btree level */ + + /* + * Go up the tree, starting at leaf level. + * If 2 is returned then a join was done; go to the next level. + * Otherwise we are done. + */ + for (level = 0, i = 2; i == 2; level++) { + if ((error = xfs_alloc_delrec(cur, level, &i))) + return error; + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if ((error = xfs_alloc_decrement(cur, level, &i))) + return error; + break; + } + } + } + *stat = i; + return 0; +} + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_alloc_get_rec( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat) /* output: success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + int ptr; /* record number */ + + ptr = cur->bc_ptrs[0]; + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) + return error; +#endif + /* + * Off the right end or left end, return failure. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + /* + * Point to the record and extract its data. + */ + { + xfs_alloc_rec_t *rec; /* record data */ + + rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); + *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); + *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_increment( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_alloc_block_t *block; /* btree block */ + xfs_buf_t *bp; /* tree block buffer */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the right at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + /* + * Get a pointer to the btree block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Increment the ptr at this level. If we're still in the block + * then we're done. + */ + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 1; + return 0; + } + /* + * If we just went off the right edge of the tree, return failure. + */ + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree incrementing pointers. + * Stop when we don't go off the right edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + bp = cur->bc_bufs[lev]; + block = XFS_BUF_TO_ALLOC_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + /* + * Read-ahead the right block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp); + lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + + agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agno, agbno, 0, &bp, + XFS_ALLOC_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_ALLOC_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = 1; + } + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_alloc_insert( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result value, 0 for failure */ + int level; /* current level number in btree */ + xfs_agblock_t nbno; /* new block number (split result) */ + xfs_btree_cur_t *ncur; /* new cursor (split result) */ + xfs_alloc_rec_t nrec; /* record being inserted this level */ + xfs_btree_cur_t *pcur; /* previous level's cursor */ + + level = 0; + nbno = NULLAGBLOCK; + INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); + INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + /* + * Loop going up the tree, starting at the leaf level. + * Stop when we don't get a split block, that must mean that + * the insert is finished with this level. + */ + do { + /* + * Insert nrec/nbno into this level of the tree. + * Note if we fail, nbno will be null. + */ + if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + return error; + } + /* + * See if the cursor we just used is trash. + * Can't trash the caller's cursor, but otherwise we should + * if ncur is a new cursor or we're about to be done. + */ + if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + /* + * If we got a new cursor, switch to it. + */ + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLAGBLOCK); + *stat = i; + return 0; +} + +/* + * Lookup the record equal to [bno, len] in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_eq( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_ge( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_le( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Update the record referred to by cur, to the value given by [bno, len]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_alloc_update( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len) /* length of extent */ +{ + xfs_alloc_block_t *block; /* btree block to update */ + int error; /* error return value */ + int ptr; /* current record number (updating) */ + + ASSERT(len > 0); + /* + * Pick up the a.g. freelist struct and the current block. + */ + block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) + return error; +#endif + /* + * Get the address of the rec to be updated. + */ + ptr = cur->bc_ptrs[0]; + { + xfs_alloc_rec_t *rp; /* pointer to updated record */ + + rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); + /* + * Fill in the new contents and log them. + */ + INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); + INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); + xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); + } + /* + * If it's the by-size btree and it's the last leaf block and + * it's the last record... then update the size of the longest + * extent in the a.g., which we cache in the a.g. freelist header. + */ + if (cur->bc_btnum == XFS_BTNUM_CNT && + INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && + ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + xfs_agf_t *agf; /* a.g. freespace header */ + xfs_agnumber_t seqno; + + agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); + cur->bc_mp->m_perag[seqno].pagf_longest = len; + INT_SET(agf->agf_longest, ARCH_CONVERT, len); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, + XFS_AGF_LONGEST); + } + /* + * Updating first record in leaf. Pass new key value up to our parent. + */ + if (ptr == 1) { + xfs_alloc_key_t key; /* key containing [bno, len] */ + + INT_SET(key.ar_startblock, ARCH_CONVERT, bno); + INT_SET(key.ar_blockcount, ARCH_CONVERT, len); + if ((error = xfs_alloc_updkey(cur, &key, 1))) + return error; + } + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_alloc_btree.h linux.22-ac2/fs/xfs/xfs_alloc_btree.h --- linux.vanilla/fs/xfs/xfs_alloc_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_alloc_btree.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ALLOC_BTREE_H__ +#define __XFS_ALLOC_BTREE_H__ + +/* + * Freespace on-disk structures + */ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_btree_sblock; +struct xfs_mount; + +/* + * There are two on-disk btrees, one sorted by blockno and one sorted + * by blockcount and blockno. All blocks look the same to make the code + * simpler; if we have time later, we'll make the optimizations. + */ +#define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */ +#define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */ + +/* + * Data record/key structure + */ +typedef struct xfs_alloc_rec +{ + xfs_agblock_t ar_startblock; /* starting block number */ + xfs_extlen_t ar_blockcount; /* count of free blocks */ +} xfs_alloc_rec_t, xfs_alloc_key_t; + +typedef xfs_agblock_t xfs_alloc_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_sblock xfs_alloc_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_ALLOC_BLOCK) +xfs_alloc_block_t *xfs_buf_to_alloc_block(struct xfs_buf *bp); +#define XFS_BUF_TO_ALLOC_BLOCK(bp) xfs_buf_to_alloc_block(bp) +#else +#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Real block structures have a size equal to the disk block size. + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_SIZE) +int xfs_alloc_block_size(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_SIZE(lev,cur) xfs_alloc_block_size(lev,cur) +#else +#define XFS_ALLOC_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MAXRECS) +int xfs_alloc_block_maxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) xfs_alloc_block_maxrecs(lev,cur) +#else +#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) \ + ((cur)->bc_mp->m_alloc_mxr[lev != 0]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_BLOCK_MINRECS) +int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) xfs_alloc_block_minrecs(lev,cur) +#else +#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) \ + ((cur)->bc_mp->m_alloc_mnr[lev != 0]) +#endif + +/* + * Minimum and maximum blocksize and sectorsize. + * The blocksize upper limit is pretty much arbitrary. + * The sectorsize upper limit is due to sizeof(sb_sectsize). + */ +#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */ +#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ +#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) +#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) +#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */ +#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */ +#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG) +#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG) + +/* + * Block numbers in the AG: + * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK) +xfs_agblock_t xfs_bno_block(struct xfs_mount *mp); +#define XFS_BNO_BLOCK(mp) xfs_bno_block(mp) +#else +#define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CNT_BLOCK) +xfs_agblock_t xfs_cnt_block(struct xfs_mount *mp); +#define XFS_CNT_BLOCK(mp) xfs_cnt_block(mp) +#else +#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1)) +#endif + +/* + * Record, key, and pointer address macros for btree blocks. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_REC_ADDR) +xfs_alloc_rec_t *xfs_alloc_rec_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_REC_ADDR(bb,i,cur) xfs_alloc_rec_addr(bb,i,cur) +#else +#define XFS_ALLOC_REC_ADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_ALLOC_BLOCK_SIZE(0,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(0, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_KEY_ADDR) +xfs_alloc_key_t *xfs_alloc_key_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_KEY_ADDR(bb,i,cur) xfs_alloc_key_addr(bb,i,cur) +#else +#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(1, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ALLOC_PTR_ADDR) +xfs_alloc_ptr_t *xfs_alloc_ptr_addr(xfs_alloc_block_t *bb, int i, + struct xfs_btree_cur *cur); +#define XFS_ALLOC_PTR_ADDR(bb,i,cur) xfs_alloc_ptr_addr(bb,i,cur) +#else +#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, bb, i, \ + XFS_ALLOC_BLOCK_MAXRECS(1, cur)) +#endif + +/* + * Prototypes for externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_decrement( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_alloc_delete( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_alloc_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat); /* output: success/failure */ + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_alloc_increment( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_alloc_insert( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Lookup the record equal to [bno, len] in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Lookup the first record greater than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Lookup the first record less than or equal to [bno, len] + * in the btree given by cur. + */ +int /* error */ +xfs_alloc_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +/* + * Update the record referred to by cur, to the value given by [bno, len]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_alloc_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len); /* length of extent */ + +#endif /* __XFS_ALLOC_BTREE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_alloc.c linux.22-ac2/fs/xfs/xfs_alloc.c --- linux.vanilla/fs/xfs/xfs_alloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_alloc.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2626 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free space allocation for XFS. + */ +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_error.h" + + +#if defined(DEBUG) +/* + * Allocation tracing. + */ +ktrace_t *xfs_alloc_trace_buf; +#endif + +#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) + +#define XFSA_FIXUP_BNO_OK 1 +#define XFSA_FIXUP_CNT_OK 2 + +int +xfs_alloc_search_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len); + +#if defined(XFS_ALLOC_TRACE) +#define TRACE_ALLOC(s,a) \ + xfs_alloc_trace_alloc(fname, s, a, __LINE__) +#define TRACE_FREE(s,a,b,x,f) \ + xfs_alloc_trace_free(fname, s, mp, a, b, x, f, __LINE__) +#define TRACE_MODAGF(s,a,f) \ + xfs_alloc_trace_modagf(fname, s, mp, a, f, __LINE__) +#define TRACE_BUSY(fname,s,ag,agb,l,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) +#define TRACE_UNBUSY(fname,s,ag,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) +#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) \ + xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) + + +#else +#define TRACE_ALLOC(s,a) +#define TRACE_FREE(s,a,b,x,f) +#define TRACE_MODAGF(s,a,f) +#define TRACE_BUSY(s,a,ag,agb,l,sl,tp) +#define TRACE_UNBUSY(fname,s,ag,sl,tp) +#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) +#endif /* XFS_ALLOC_TRACE */ + +/* + * Prototypes for per-ag allocation routines + */ + +STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); +STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, + xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); + +/* + * Internal functions. + */ + +/* + * Compute aligned version of the found extent. + * Takes alignment and min length into account. + */ +STATIC int /* success (>= minlen) */ +xfs_alloc_compute_aligned( + xfs_agblock_t foundbno, /* starting block in found extent */ + xfs_extlen_t foundlen, /* length in found extent */ + xfs_extlen_t alignment, /* alignment for allocation */ + xfs_extlen_t minlen, /* minimum length for allocation */ + xfs_agblock_t *resbno, /* result block number */ + xfs_extlen_t *reslen) /* result length */ +{ + xfs_agblock_t bno; + xfs_extlen_t diff; + xfs_extlen_t len; + + if (alignment > 1 && foundlen >= minlen) { + bno = roundup(foundbno, alignment); + diff = bno - foundbno; + len = diff >= foundlen ? 0 : foundlen - diff; + } else { + bno = foundbno; + len = foundlen; + } + *resbno = bno; + *reslen = len; + return len >= minlen; +} + +/* + * Compute best start block and diff for "near" allocations. + * freelen >= wantlen already checked by caller. + */ +STATIC xfs_extlen_t /* difference value (absolute) */ +xfs_alloc_compute_diff( + xfs_agblock_t wantbno, /* target starting block */ + xfs_extlen_t wantlen, /* target length */ + xfs_extlen_t alignment, /* target alignment */ + xfs_agblock_t freebno, /* freespace's starting block */ + xfs_extlen_t freelen, /* freespace's length */ + xfs_agblock_t *newbnop) /* result: best start block from free */ +{ + xfs_agblock_t freeend; /* end of freespace extent */ + xfs_agblock_t newbno1; /* return block number */ + xfs_agblock_t newbno2; /* other new block number */ + xfs_extlen_t newlen1=0; /* length with newbno1 */ + xfs_extlen_t newlen2=0; /* length with newbno2 */ + xfs_agblock_t wantend; /* end of target extent */ + + ASSERT(freelen >= wantlen); + freeend = freebno + freelen; + wantend = wantbno + wantlen; + if (freebno >= wantbno) { + if ((newbno1 = roundup(freebno, alignment)) >= freeend) + newbno1 = NULLAGBLOCK; + } else if (freeend >= wantend && alignment > 1) { + newbno1 = roundup(wantbno, alignment); + newbno2 = newbno1 - alignment; + if (newbno1 >= freeend) + newbno1 = NULLAGBLOCK; + else + newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); + if (newbno2 < freebno) + newbno2 = NULLAGBLOCK; + else + newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); + if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { + if (newlen1 < newlen2 || + (newlen1 == newlen2 && + XFS_ABSDIFF(newbno1, wantbno) > + XFS_ABSDIFF(newbno2, wantbno))) + newbno1 = newbno2; + } else if (newbno2 != NULLAGBLOCK) + newbno1 = newbno2; + } else if (freeend >= wantend) { + newbno1 = wantbno; + } else if (alignment > 1) { + newbno1 = roundup(freeend - wantlen, alignment); + if (newbno1 > freeend - wantlen && + newbno1 - alignment >= freebno) + newbno1 -= alignment; + else if (newbno1 >= freeend) + newbno1 = NULLAGBLOCK; + } else + newbno1 = freeend - wantlen; + *newbnop = newbno1; + return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); +} + +/* + * Fix up the length, based on mod and prod. + * len should be k * prod + mod for some k. + * If len is too small it is returned unchanged. + * If len hits maxlen it is left alone. + */ +STATIC void +xfs_alloc_fix_len( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_extlen_t k; + xfs_extlen_t rlen; + + ASSERT(args->mod < args->prod); + rlen = args->len; + ASSERT(rlen >= args->minlen); + ASSERT(rlen <= args->maxlen); + if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || + (args->mod == 0 && rlen < args->prod)) + return; + k = rlen % args->prod; + if (k == args->mod) + return; + if (k > args->mod) { + if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen) + return; + } else { + if ((int)(rlen = rlen - args->prod - (args->mod - k)) < + (int)args->minlen) + return; + } + ASSERT(rlen >= args->minlen); + ASSERT(rlen <= args->maxlen); + args->len = rlen; +} + +/* + * Fix up length if there is too little space left in the a.g. + * Return 1 if ok, 0 if too little, should give up. + */ +STATIC int +xfs_alloc_fix_minleft( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_agf_t *agf; /* a.g. freelist header */ + int diff; /* free space difference */ + + if (args->minleft == 0) + return 1; + agf = XFS_BUF_TO_AGF(args->agbp); + diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT) + + INT_GET(agf->agf_flcount, ARCH_CONVERT) + - args->len - args->minleft; + if (diff >= 0) + return 1; + args->len += diff; /* shrink the allocated space */ + if (args->len >= args->minlen) + return 1; + args->agbno = NULLAGBLOCK; + return 0; +} + +/* + * Update the two btrees, logically removing from freespace the extent + * starting at rbno, rlen blocks. The extent is contained within the + * actual (current) free extent fbno for flen blocks. + * Flags are passed in indicating whether the cursors are set to the + * relevant records. + */ +STATIC int /* error code */ +xfs_alloc_fixup_trees( + xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */ + xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */ + xfs_agblock_t fbno, /* starting block of free extent */ + xfs_extlen_t flen, /* length of free extent */ + xfs_agblock_t rbno, /* starting block of returned extent */ + xfs_extlen_t rlen, /* length of returned extent */ + int flags) /* flags, XFSA_FIXUP_... */ +{ + int error; /* error code */ + int i; /* operation results */ + xfs_agblock_t nfbno1; /* first new free startblock */ + xfs_agblock_t nfbno2; /* second new free startblock */ + xfs_extlen_t nflen1=0; /* first new free length */ + xfs_extlen_t nflen2=0; /* second new free length */ + + /* + * Look up the record in the by-size tree if necessary. + */ + if (flags & XFSA_FIXUP_CNT_OK) { +#ifdef DEBUG + if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN( + i == 1 && nfbno1 == fbno && nflen1 == flen); +#endif + } else { + if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + /* + * Look up the record in the by-block tree if necessary. + */ + if (flags & XFSA_FIXUP_BNO_OK) { +#ifdef DEBUG + if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN( + i == 1 && nfbno1 == fbno && nflen1 == flen); +#endif + } else { + if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } +#ifdef DEBUG + { + xfs_alloc_block_t *bnoblock; + xfs_alloc_block_t *cntblock; + + if (bno_cur->bc_nlevels == 1 && + cnt_cur->bc_nlevels == 1) { + bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); + cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); + XFS_WANT_CORRUPTED_RETURN( + INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT)); + } + } +#endif + /* + * Deal with all four cases: the allocated record is contained + * within the freespace record, so we can have new freespace + * at either (or both) end, or no freespace remaining. + */ + if (rbno == fbno && rlen == flen) + nfbno1 = nfbno2 = NULLAGBLOCK; + else if (rbno == fbno) { + nfbno1 = rbno + rlen; + nflen1 = flen - rlen; + nfbno2 = NULLAGBLOCK; + } else if (rbno + rlen == fbno + flen) { + nfbno1 = fbno; + nflen1 = flen - rlen; + nfbno2 = NULLAGBLOCK; + } else { + nfbno1 = fbno; + nflen1 = rbno - fbno; + nfbno2 = rbno + rlen; + nflen2 = (fbno + flen) - nfbno2; + } + /* + * Delete the entry from the by-size btree. + */ + if ((error = xfs_alloc_delete(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + /* + * Add new by-size btree entry(s). + */ + if (nfbno1 != NULLAGBLOCK) { + if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + if (nfbno2 != NULLAGBLOCK) { + if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + /* + * Fix up the by-block btree entry(s). + */ + if (nfbno1 == NULLAGBLOCK) { + /* + * No remaining freespace, just delete the by-block tree entry. + */ + if ((error = xfs_alloc_delete(bno_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } else { + /* + * Update the by-block entry to start later|be shorter. + */ + if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) + return error; + } + if (nfbno2 != NULLAGBLOCK) { + /* + * 2 resulting free entries, need to add one. + */ + if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 0); + if ((error = xfs_alloc_insert(bno_cur, &i))) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + return 0; +} + +/* + * Read in the allocation group free block array. + */ +STATIC int /* error */ +xfs_alloc_read_agfl( + xfs_mount_t *mp, /* mount point structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_buf_t **bpp) /* buffer for the ag free block array */ +{ + xfs_buf_t *bp; /* return value */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF); + *bpp = bp; + return 0; +} + +#if defined(XFS_ALLOC_TRACE) +/* + * Add an allocation trace entry for an alloc call. + */ +STATIC void +xfs_alloc_trace_alloc( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_alloc_arg_t *args, /* allocation argument structure */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)), + (void *)name, + (void *)str, + (void *)args->mp, + (void *)(__psunsigned_t)args->agno, + (void *)(__psunsigned_t)args->agbno, + (void *)(__psunsigned_t)args->minlen, + (void *)(__psunsigned_t)args->maxlen, + (void *)(__psunsigned_t)args->mod, + (void *)(__psunsigned_t)args->prod, + (void *)(__psunsigned_t)args->minleft, + (void *)(__psunsigned_t)args->total, + (void *)(__psunsigned_t)args->alignment, + (void *)(__psunsigned_t)args->len, + (void *)((((__psint_t)args->type) << 16) | + (__psint_t)args->otype), + (void *)(__psint_t)((args->wasdel << 3) | + (args->wasfromfl << 2) | + (args->isfl << 1) | + (args->userdata << 0))); +} + +/* + * Add an allocation trace entry for a free call. + */ +STATIC void +xfs_alloc_trace_free( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* a.g. relative block number */ + xfs_extlen_t len, /* length of extent */ + int isfl, /* set if is freelist allocation/free */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psunsigned_t)agno, + (void *)(__psunsigned_t)agbno, + (void *)(__psunsigned_t)len, + (void *)(__psint_t)isfl, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add an allocation trace entry for modifying an agf. + */ +STATIC void +xfs_alloc_trace_modagf( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount point */ + xfs_agf_t *agf, /* new agf value */ + int flags, /* logging flags for agf */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psint_t)flags, + (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT], + ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT), + (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT)); +} + +STATIC void +xfs_alloc_trace_busy( + char *name, /* function tag string */ + char *str, /* additional string */ + xfs_mount_t *mp, /* file system mount poing */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* a.g. relative block number */ + xfs_extlen_t len, /* length of extent */ + int slot, /* perag Busy slot */ + xfs_trans_t *tp, + int trtype, /* type: add, delete, search */ + int line) /* source line number */ +{ + ktrace_enter(xfs_alloc_trace_buf, + (void *)(__psint_t)(trtype | (line << 16)), + (void *)name, + (void *)str, + (void *)mp, + (void *)(__psunsigned_t)agno, + (void *)(__psunsigned_t)agbno, + (void *)(__psunsigned_t)len, + (void *)(__psint_t)slot, + (void *)tp, + NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} +#endif /* XFS_ALLOC_TRACE */ + +/* + * Allocation group level functions. + */ + +/* + * Allocate a variable extent in the allocation group agno. + * Type and bno are used to determine where in the allocation group the + * extent will start. + * Extent's length (returned in *len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent( + xfs_alloc_arg_t *args) /* argument structure for allocation */ +{ + int error=0; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent"; +#endif + + ASSERT(args->minlen > 0); + ASSERT(args->maxlen > 0); + ASSERT(args->minlen <= args->maxlen); + ASSERT(args->mod < args->prod); + ASSERT(args->alignment > 0); + /* + * Branch to correct routine based on the type. + */ + args->wasfromfl = 0; + switch (args->type) { + case XFS_ALLOCTYPE_THIS_AG: + error = xfs_alloc_ag_vextent_size(args); + break; + case XFS_ALLOCTYPE_NEAR_BNO: + error = xfs_alloc_ag_vextent_near(args); + break; + case XFS_ALLOCTYPE_THIS_BNO: + error = xfs_alloc_ag_vextent_exact(args); + break; + default: + ASSERT(0); + /* NOTREACHED */ + } + if (error) + return error; + /* + * If the allocation worked, need to change the agf structure + * (and log it), and the superblock. + */ + if (args->agbno != NULLAGBLOCK) { + xfs_agf_t *agf; /* allocation group freelist header */ +#ifdef XFS_ALLOC_TRACE + xfs_mount_t *mp = args->mp; +#endif + long slen = (long)args->len; + + ASSERT(args->len >= args->minlen && args->len <= args->maxlen); + ASSERT(!(args->wasfromfl) || !args->isfl); + ASSERT(args->agbno % args->alignment == 0); + if (!(args->wasfromfl)) { + + agf = XFS_BUF_TO_AGF(args->agbp); + INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len)); + xfs_trans_agblocks_delta(args->tp, + -((long)(args->len))); + args->pag->pagf_freeblks -= args->len; + ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + <= INT_GET(agf->agf_length, ARCH_CONVERT)); + TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); + xfs_alloc_log_agf(args->tp, args->agbp, + XFS_AGF_FREEBLKS); + /* search the busylist for these blocks */ + xfs_alloc_search_busy(args->tp, args->agno, + args->agbno, args->len); + } + if (!args->isfl) + xfs_trans_mod_sb(args->tp, + args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS : + XFS_TRANS_SB_FDBLOCKS, -slen); + XFS_STATS_INC(xfsstats.xs_allocx); + XFS_STATS_ADD(xfsstats.xs_allocb, args->len); + } + return 0; +} + +/* + * Allocate a variable extent at exactly agno/bno. + * Extent's length (returned in *len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_exact( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */ + xfs_btree_cur_t *cnt_cur;/* by count btree cursor */ + xfs_agblock_t end; /* end of allocated extent */ + int error; + xfs_agblock_t fbno; /* start block of found extent */ + xfs_agblock_t fend; /* end block of found extent */ + xfs_extlen_t flen; /* length of found extent */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_exact"; +#endif + int i; /* success/failure of operation */ + xfs_agblock_t maxend; /* end of maximal extent */ + xfs_agblock_t minend; /* end of minimal extent */ + xfs_extlen_t rlen; /* length of returned extent */ + + ASSERT(args->alignment == 1); + /* + * Allocate/initialize a cursor for the by-number freespace btree. + */ + bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Lookup bno and minlen in the btree (minlen is irrelevant, really). + * Look for the closest free block <= bno, it must contain bno + * if any free block does. + */ + if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i))) + goto error0; + if (!i) { + /* + * Didn't find it, return null. + */ + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * Grab the freespace record. + */ + if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + ASSERT(fbno <= args->agbno); + minend = args->agbno + args->minlen; + maxend = args->agbno + args->maxlen; + fend = fbno + flen; + /* + * Give up if the freespace isn't long enough for the minimum request. + */ + if (fend < minend) { + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * End of extent will be smaller of the freespace end and the + * maximal requested end. + */ + end = XFS_AGBLOCK_MIN(fend, maxend); + /* + * Fix the length according to mod and prod if given. + */ + args->len = end - args->agbno; + xfs_alloc_fix_len(args); + if (!xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + return 0; + } + rlen = args->len; + ASSERT(args->agbno + rlen <= fend); + end = args->agbno + rlen; + /* + * We are allocating agbno for rlen [agbno .. end] + * Allocate/initialize a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + ASSERT(args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, + args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + goto error0; + } + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("normal", args); + args->wasfromfl = 0; + return 0; + +error0: + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + TRACE_ALLOC("error", args); + return error; +} + +/* + * Allocate a variable extent near bno in the allocation group agno. + * Extent's length (returned in len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_near( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ + xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ + xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_near"; +#endif + xfs_agblock_t gtbno; /* start bno of right side entry */ + xfs_agblock_t gtbnoa; /* aligned ... */ + xfs_extlen_t gtdiff; /* difference to right side entry */ + xfs_extlen_t gtlen; /* length of right side entry */ + xfs_extlen_t gtlena; /* aligned ... */ + xfs_agblock_t gtnew; /* useful start bno of right side */ + int error; /* error code */ + int i; /* result code, temporary */ + int j; /* result code, temporary */ + xfs_agblock_t ltbno; /* start bno of left side entry */ + xfs_agblock_t ltbnoa; /* aligned ... */ + xfs_extlen_t ltdiff; /* difference to left side entry */ + /*REFERENCED*/ + xfs_agblock_t ltend; /* end bno of left side entry */ + xfs_extlen_t ltlen; /* length of left side entry */ + xfs_extlen_t ltlena; /* aligned ... */ + xfs_agblock_t ltnew; /* useful start bno of left side */ + xfs_extlen_t rlen; /* length of returned extent */ +#if defined(DEBUG) && defined(__KERNEL__) + /* + * Randomly don't execute the first algorithm. + */ + static int seed; /* randomizing seed value */ + int dofirst; /* set to do first algorithm */ + timespec_t now; /* current time */ + + if (!seed) { + nanotime(&now); + seed = (int)now.tv_sec ^ (int)now.tv_nsec; + } + dofirst = random() & 1; +#endif + /* + * Get a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + ltlen = 0; + bno_cur_lt = bno_cur_gt = NULL; + /* + * See if there are any free extents as big as maxlen. + */ + if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i))) + goto error0; + /* + * If none, then pick up the last entry in the tree unless the + * tree is empty. + */ + if (!i) { + if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno, + <len, &i))) + goto error0; + if (i == 0 || ltlen == 0) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + return 0; + } + ASSERT(i == 1); + } + args->wasfromfl = 0; + /* + * First algorithm. + * If the requested extent is large wrt the freespaces available + * in this a.g., then the cursor will be pointing to a btree entry + * near the right edge of the tree. If it's in the last btree leaf + * block, then we just examine all the entries in that block + * that are big enough, and pick the best one. + * This is written as a while loop so we can break out of it, + * but we never loop back to the top. + */ + while (xfs_btree_islastblock(cnt_cur, 0)) { + xfs_extlen_t bdiff; + int besti=0; + xfs_extlen_t blen=0; + xfs_agblock_t bnew=0; + +#if defined(DEBUG) && defined(__KERNEL__) + if (!dofirst) + break; +#endif + /* + * Start from the entry that lookup found, sequence through + * all larger free blocks. If we're actually pointing at a + * record smaller than maxlen, go to the start of this block, + * and skip all those smaller than minlen. + */ + if (ltlen || args->alignment > 1) { + cnt_cur->bc_ptrs[0] = 1; + do { + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, + <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (ltlen >= args->minlen) + break; + if ((error = xfs_alloc_increment(cnt_cur, 0, &i))) + goto error0; + } while (i); + ASSERT(ltlen >= args->minlen); + if (!i) + break; + } + i = cnt_cur->bc_ptrs[0]; + for (j = 1, blen = 0, bdiff = 0; + !error && j && (blen < args->maxlen || bdiff > 0); + error = xfs_alloc_increment(cnt_cur, 0, &j)) { + /* + * For each entry, decide if it's better than + * the previous best entry. + */ + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (!xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena)) + continue; + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + ASSERT(args->len >= args->minlen); + if (args->len < blen) + continue; + ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, + args->alignment, ltbno, ltlen, <new); + if (ltnew != NULLAGBLOCK && + (args->len > blen || ltdiff < bdiff)) { + bdiff = ltdiff; + bnew = ltnew; + blen = args->len; + besti = cnt_cur->bc_ptrs[0]; + } + } + /* + * It didn't work. We COULD be in a case where + * there's a good record somewhere, so try again. + */ + if (blen == 0) + break; + /* + * Point at the best entry, and retrieve it again. + */ + cnt_cur->bc_ptrs[0] = besti; + if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + ltend = ltbno + ltlen; + ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + args->len = blen; + if (!xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("nominleft", args); + return 0; + } + blen = args->len; + /* + * We are allocating starting at bnew for blen blocks. + */ + args->agbno = bnew; + ASSERT(bnew >= ltbno); + ASSERT(bnew + blen <= ltend); + /* + * Set up a cursor for the by-bno tree. + */ + bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, + args->agbp, args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Fix up the btree entries. + */ + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, + ltlen, bnew, blen, XFSA_FIXUP_CNT_OK))) + goto error0; + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + TRACE_ALLOC("first", args); + return 0; + } + /* + * Second algorithm. + * Search in the by-bno tree to the left and to the right + * simultaneously, until in each case we find a space big enough, + * or run into the edge of the tree. When we run into the edge, + * we deallocate that cursor. + * If both searches succeed, we compare the two spaces and pick + * the better one. + * With alignment, it's possible for both to fail; the upper + * level algorithm that picks allocation groups for allocations + * is not supposed to do this. + */ + /* + * Allocate and initialize the cursor for the leftward search. + */ + bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + /* + * Lookup <= bno to find the leftward search's starting point. + */ + if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i))) + goto error0; + if (!i) { + /* + * Didn't find anything; use this cursor for the rightward + * search. + */ + bno_cur_gt = bno_cur_lt; + bno_cur_lt = 0; + } + /* + * Found something. Duplicate the cursor for the rightward search. + */ + else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt))) + goto error0; + /* + * Increment the cursor, so we will point at the entry just right + * of the leftward entry if any, or to the leftmost entry. + */ + if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + /* + * It failed, there are no rightward entries. + */ + xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + /* + * Loop going left with the leftward cursor, right with the + * rightward cursor, until either both directions give up or + * we find an entry at least as big as minlen. + */ + do { + if (bno_cur_lt) { + if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena)) + break; + if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + } + if (bno_cur_gt) { + if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (xfs_alloc_compute_aligned(gtbno, gtlen, + args->alignment, args->minlen, + >bnoa, >lena)) + break; + if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + } + } while (bno_cur_lt || bno_cur_gt); + /* + * Got both cursors still active, need to find better entry. + */ + if (bno_cur_lt && bno_cur_gt) { + /* + * Left side is long enough, look for a right side entry. + */ + if (ltlena >= args->minlen) { + /* + * Fix up the length. + */ + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + ltdiff = xfs_alloc_compute_diff(args->agbno, rlen, + args->alignment, ltbno, ltlen, <new); + /* + * Not perfect. + */ + if (ltdiff) { + /* + * Look until we find a better one, run out of + * space, or run off the end. + */ + while (bno_cur_lt && bno_cur_gt) { + if ((error = xfs_alloc_get_rec( + bno_cur_gt, >bno, + >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_alloc_compute_aligned(gtbno, gtlen, + args->alignment, args->minlen, + >bnoa, >lena); + /* + * The left one is clearly better. + */ + if (gtbnoa >= args->agbno + ltdiff) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + break; + } + /* + * If we reach a big enough entry, + * compare the two and pick the best. + */ + if (gtlena >= args->minlen) { + args->len = + XFS_EXTLEN_MIN(gtlena, + args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + gtdiff = xfs_alloc_compute_diff( + args->agbno, rlen, + args->alignment, + gtbno, gtlen, >new); + /* + * Right side is better. + */ + if (gtdiff < ltdiff) { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + /* + * Left side is better. + */ + else { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + break; + } + /* + * Fell off the right end. + */ + if ((error = xfs_alloc_increment( + bno_cur_gt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + break; + } + } + } + /* + * The left side is perfect, trash the right side. + */ + else { + xfs_btree_del_cursor(bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + } + /* + * It's the right side that was found first, look left. + */ + else { + /* + * Fix up the length. + */ + args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + gtdiff = xfs_alloc_compute_diff(args->agbno, rlen, + args->alignment, gtbno, gtlen, >new); + /* + * Right side entry isn't perfect. + */ + if (gtdiff) { + /* + * Look until we find a better one, run out of + * space, or run off the end. + */ + while (bno_cur_lt && bno_cur_gt) { + if ((error = xfs_alloc_get_rec( + bno_cur_lt, <bno, + <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_alloc_compute_aligned(ltbno, ltlen, + args->alignment, args->minlen, + <bnoa, <lena); + /* + * The right one is clearly better. + */ + if (ltbnoa <= args->agbno - gtdiff) { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + break; + } + /* + * If we reach a big enough entry, + * compare the two and pick the best. + */ + if (ltlena >= args->minlen) { + args->len = XFS_EXTLEN_MIN( + ltlena, args->maxlen); + xfs_alloc_fix_len(args); + rlen = args->len; + ltdiff = xfs_alloc_compute_diff( + args->agbno, rlen, + args->alignment, + ltbno, ltlen, <new); + /* + * Left side is better. + */ + if (ltdiff < gtdiff) { + xfs_btree_del_cursor( + bno_cur_gt, + XFS_BTREE_NOERROR); + bno_cur_gt = NULL; + } + /* + * Right side is better. + */ + else { + xfs_btree_del_cursor( + bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + break; + } + /* + * Fell off the left end. + */ + if ((error = xfs_alloc_decrement( + bno_cur_lt, 0, &i))) + goto error0; + if (!i) { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + break; + } + } + } + /* + * The right side is perfect, trash the left side. + */ + else { + xfs_btree_del_cursor(bno_cur_lt, + XFS_BTREE_NOERROR); + bno_cur_lt = NULL; + } + } + } + /* + * If we couldn't get anything, give up. + */ + if (bno_cur_lt == NULL && bno_cur_gt == NULL) { + TRACE_ALLOC("neither", args); + args->agbno = NULLAGBLOCK; + return 0; + } + /* + * At this point we have selected a freespace entry, either to the + * left or to the right. If it's on the right, copy all the + * useful variables to the "left" set so we only have one + * copy of this code. + */ + if (bno_cur_gt) { + bno_cur_lt = bno_cur_gt; + bno_cur_gt = NULL; + ltbno = gtbno; + ltbnoa = gtbnoa; + ltlen = gtlen; + ltlena = gtlena; + j = 1; + } else + j = 0; + /* + * Fix up the length and compute the useful address. + */ + ltend = ltbno + ltlen; + args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); + xfs_alloc_fix_len(args); + if (!xfs_alloc_fix_minleft(args)) { + TRACE_ALLOC("nominleft", args); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + return 0; + } + rlen = args->len; + (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, + ltlen, <new); + ASSERT(ltnew >= ltbno); + ASSERT(ltnew + rlen <= ltend); + ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT)); + args->agbno = ltnew; + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, + ltnew, rlen, XFSA_FIXUP_BNO_OK))) + goto error0; + TRACE_ALLOC(j ? "gt" : "lt", args); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); + return 0; + + error0: + TRACE_ALLOC("error", args); + if (cnt_cur != NULL) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + if (bno_cur_lt != NULL) + xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR); + if (bno_cur_gt != NULL) + xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR); + return error; +} + +/* + * Allocate a variable extent anywhere in the allocation group agno. + * Extent's length (returned in len) will be between minlen and maxlen, + * and of the form k * prod + mod unless there's nothing that large. + * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_size( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_btree_cur_t *bno_cur; /* cursor for bno btree */ + xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */ + int error; /* error result */ + xfs_agblock_t fbno; /* start of found freespace */ + xfs_extlen_t flen; /* length of found freespace */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_size"; +#endif + int i; /* temp status variable */ + xfs_agblock_t rbno; /* returned block number */ + xfs_extlen_t rlen; /* length of returned extent */ + + /* + * Allocate and initialize a cursor for the by-size btree. + */ + cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT, 0, 0); + bno_cur = NULL; + /* + * Look for an entry >= maxlen+alignment-1 blocks. + */ + if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, + args->maxlen + args->alignment - 1, &i))) + goto error0; + /* + * If none, then pick up the last entry in the tree unless the + * tree is empty. + */ + if (!i) { + if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno, + &flen, &i))) + goto error0; + if (i == 0 || flen == 0) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("noentry", args); + return 0; + } + ASSERT(i == 1); + } + /* + * There's a freespace as big as maxlen+alignment-1, get it. + */ + else { + if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + /* + * In the first case above, we got the last entry in the + * by-size btree. Now we check to see if the space hits maxlen + * once aligned; if not, we search left for something better. + * This can't happen in the second case above. + */ + xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen, + &rbno, &rlen); + rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); + XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + (rlen <= flen && rbno + rlen <= fbno + flen), error0); + if (rlen < args->maxlen) { + xfs_agblock_t bestfbno; + xfs_extlen_t bestflen; + xfs_agblock_t bestrbno; + xfs_extlen_t bestrlen; + + bestrlen = rlen; + bestrbno = rbno; + bestflen = flen; + bestfbno = fbno; + for (;;) { + if ((error = xfs_alloc_decrement(cnt_cur, 0, &i))) + goto error0; + if (i == 0) + break; + if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (flen < bestrlen) + break; + xfs_alloc_compute_aligned(fbno, flen, args->alignment, + args->minlen, &rbno, &rlen); + rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); + XFS_WANT_CORRUPTED_GOTO(rlen == 0 || + (rlen <= flen && rbno + rlen <= fbno + flen), + error0); + if (rlen > bestrlen) { + bestrlen = rlen; + bestrbno = rbno; + bestflen = flen; + bestfbno = fbno; + if (rlen == args->maxlen) + break; + } + } + if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + rlen = bestrlen; + rbno = bestrbno; + flen = bestflen; + fbno = bestfbno; + } + args->wasfromfl = 0; + /* + * Fix up the length. + */ + args->len = rlen; + xfs_alloc_fix_len(args); + if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) { + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + TRACE_ALLOC("nominleft", args); + args->agbno = NULLAGBLOCK; + return 0; + } + rlen = args->len; + XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); + /* + * Allocate and initialize a cursor for the by-block tree. + */ + bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO, 0, 0); + if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, + rbno, rlen, XFSA_FIXUP_CNT_OK))) + goto error0; + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + cnt_cur = bno_cur = NULL; + args->len = rlen; + args->agbno = rbno; + XFS_WANT_CORRUPTED_GOTO( + args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT), + error0); + TRACE_ALLOC("normal", args); + return 0; + +error0: + TRACE_ALLOC("error", args); + if (cnt_cur) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + if (bno_cur) + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Deal with the case where only small freespaces remain. + * Either return the contents of the last freespace record, + * or allocate space from the freelist if there is nothing in the tree. + */ +STATIC int /* error */ +xfs_alloc_ag_vextent_small( + xfs_alloc_arg_t *args, /* allocation argument structure */ + xfs_btree_cur_t *ccur, /* by-size cursor */ + xfs_agblock_t *fbnop, /* result block number */ + xfs_extlen_t *flenp, /* result length */ + int *stat) /* status: 0-freelist, 1-normal/none */ +{ + int error; + xfs_agblock_t fbno; + xfs_extlen_t flen; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_ag_vextent_small"; +#endif + int i; + + if ((error = xfs_alloc_decrement(ccur, 0, &i))) + goto error0; + if (i) { + if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + /* + * Nothing in the btree, try the freelist. Make sure + * to respect minleft even when pulling from the + * freelist. + */ + else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && + (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount, + ARCH_CONVERT) > args->minleft)) { + if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) + goto error0; + if (fbno != NULLAGBLOCK) { + if (args->userdata) { + xfs_buf_t *bp; + + bp = xfs_btree_get_bufs(args->mp, args->tp, + args->agno, fbno, 0); + xfs_trans_binval(args->tp, bp); + } + args->len = 1; + args->agbno = fbno; + XFS_WANT_CORRUPTED_GOTO( + args->agbno + args->len <= + INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, + ARCH_CONVERT), + error0); + args->wasfromfl = 1; + TRACE_ALLOC("freelist", args); + *stat = 0; + return 0; + } + /* + * Nothing in the freelist. + */ + else + flen = 0; + } + /* + * Can't allocate from the freelist for some reason. + */ + else + flen = 0; + /* + * Can't do the allocation, give up. + */ + if (flen < args->minlen) { + args->agbno = NULLAGBLOCK; + TRACE_ALLOC("notenough", args); + flen = 0; + } + *fbnop = fbno; + *flenp = flen; + *stat = 1; + TRACE_ALLOC("normal", args); + return 0; + +error0: + TRACE_ALLOC("error", args); + return error; +} + +/* + * Free the extent starting at agno/bno for length. + */ +STATIC int /* error */ +xfs_free_ag_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer for a.g. freelist header */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t bno, /* starting block number */ + xfs_extlen_t len, /* length of extent */ + int isfl) /* set if is freelist blocks - no sb acctg */ +{ + xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ + xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ + int error; /* error return value */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_free_ag_extent"; +#endif + xfs_agblock_t gtbno; /* start of right neighbor block */ + xfs_extlen_t gtlen; /* length of right neighbor block */ + int haveleft; /* have a left neighbor block */ + int haveright; /* have a right neighbor block */ + int i; /* temp, result code */ + xfs_agblock_t ltbno; /* start of left neighbor block */ + xfs_extlen_t ltlen; /* length of left neighbor block */ + xfs_mount_t *mp; /* mount point struct for filesystem */ + xfs_agblock_t nbno; /* new starting block of freespace */ + xfs_extlen_t nlen; /* new length of freespace */ + + mp = tp->t_mountp; + /* + * Allocate and initialize a cursor for the by-block btree. + */ + bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, 0, + 0); + cnt_cur = NULL; + /* + * Look for a neighboring block on the left (lower block numbers) + * that is contiguous with this space. + */ + if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) + goto error0; + if (haveleft) { + /* + * There is a block to our left. + */ + if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * It's not contiguous, though. + */ + if (ltbno + ltlen < bno) + haveleft = 0; + else { + /* + * If this failure happens the request to free this + * space was invalid, it's (partly) already free. + * Very bad. + */ + XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); + } + } + /* + * Look for a neighboring block on the right (higher block numbers) + * that is contiguous with this space. + */ + if ((error = xfs_alloc_increment(bno_cur, 0, &haveright))) + goto error0; + if (haveright) { + /* + * There is a block to our right. + */ + if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * It's not contiguous, though. + */ + if (bno + len < gtbno) + haveright = 0; + else { + /* + * If this failure happens the request to free this + * space was invalid, it's (partly) already free. + * Very bad. + */ + XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); + } + } + /* + * Now allocate and initialize a cursor for the by-size tree. + */ + cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, 0, + 0); + /* + * Have both left and right contiguous neighbors. + * Merge all three into a single free block. + */ + if (haveleft && haveright) { + /* + * Delete the old by-size entry on the left. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Delete the old by-size entry on the right. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Delete the old by-block entry for the right block. + */ + if ((error = xfs_alloc_delete(bno_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Move the by-block cursor back to the left neighbor. + */ + if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); +#ifdef DEBUG + /* + * Check that this is the right record: delete didn't + * mangle the cursor. + */ + { + xfs_agblock_t xxbno; + xfs_extlen_t xxlen; + + if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, + &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO( + i == 1 && xxbno == ltbno && xxlen == ltlen, + error0); + } +#endif + /* + * Update remaining by-block entry to the new, joined block. + */ + nbno = ltbno; + nlen = len + ltlen + gtlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * Have only a left contiguous neighbor. + * Merge it together with the new freespace. + */ + else if (haveleft) { + /* + * Delete the old by-size entry on the left. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Back up the by-block cursor to the left neighbor, and + * update its length. + */ + if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + nbno = ltbno; + nlen = len + ltlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * Have only a right contiguous neighbor. + * Merge it together with the new freespace. + */ + else if (haveright) { + /* + * Delete the old by-size entry on the right. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_alloc_delete(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Update the starting block and length of the right + * neighbor in the by-block tree. + */ + nbno = bno; + nlen = len + gtlen; + if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) + goto error0; + } + /* + * No contiguous neighbors. + * Insert the new freespace into the by-block tree. + */ + else { + nbno = bno; + nlen = len; + if ((error = xfs_alloc_insert(bno_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + bno_cur = NULL; + /* + * In all cases we need to insert the new freespace in the by-size tree. + */ + if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 0, error0); + if ((error = xfs_alloc_insert(cnt_cur, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); + cnt_cur = NULL; + /* + * Update the freespace totals in the ag and superblock. + */ + { + xfs_agf_t *agf; + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + pag = &mp->m_perag[agno]; + INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len); + xfs_trans_agblocks_delta(tp, len); + pag->pagf_freeblks += len; + XFS_WANT_CORRUPTED_GOTO( + INT_GET(agf->agf_freeblks, ARCH_CONVERT) + <= INT_GET(agf->agf_length, ARCH_CONVERT), + error0); + TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); + if (!isfl) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); + XFS_STATS_INC(xfsstats.xs_freex); + XFS_STATS_ADD(xfsstats.xs_freeb, len); + } + TRACE_FREE(haveleft ? + (haveright ? "both" : "left") : + (haveright ? "right" : "none"), + agno, bno, len, isfl); + + /* + * Since blocks move to the free list without the coordination + * used in xfs_bmap_finish, we can't allow block to be available + * for reallocation and non-transaction writing (user data) + * until we know that the transaction that moved it to the free + * list is permanently on disk. We track the blocks by declaring + * these blocks as "busy"; the busy list is maintained on a per-ag + * basis and each transaction records which entries should be removed + * when the iclog commits to disk. If a busy block is allocated, + * the iclog is pushed up to the LSN that freed the block. + */ + xfs_alloc_mark_busy(tp, agno, bno, len); + return 0; + + error0: + TRACE_FREE("error", agno, bno, len, isfl); + if (bno_cur) + xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); + if (cnt_cur) + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Visible (exported) allocation/free functions. + * Some of these are used just by xfs_alloc_btree.c and this file. + */ + +/* + * Compute and fill in value of m_ag_maxlevels. + */ +void +xfs_alloc_compute_maxlevels( + xfs_mount_t *mp) /* file system mount structure */ +{ + int level; + uint maxblocks; + uint maxleafents; + int minleafrecs; + int minnoderecs; + + maxleafents = (mp->m_sb.sb_agblocks + 1) / 2; + minleafrecs = mp->m_alloc_mnr[0]; + minnoderecs = mp->m_alloc_mnr[1]; + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + mp->m_ag_maxlevels = level; +} + +/* + * Decide whether to use this allocation group for this allocation. + * If so, fix up the btree freelist's size. + */ +STATIC int /* error */ +xfs_alloc_fix_freelist( + xfs_alloc_arg_t *args, /* allocation argument structure */ + int flags) /* XFS_ALLOC_FLAG_... */ +{ + xfs_buf_t *agbp; /* agf buffer pointer */ + xfs_agf_t *agf; /* a.g. freespace structure pointer */ + xfs_buf_t *agflbp;/* agfl buffer pointer */ + xfs_agblock_t bno; /* freelist block */ + xfs_extlen_t delta; /* new blocks needed in freelist */ + int error; /* error result code */ + xfs_extlen_t longest;/* longest extent in allocation group */ + xfs_mount_t *mp; /* file system mount point structure */ + xfs_extlen_t need; /* total blocks needed in freelist */ + xfs_perag_t *pag; /* per-ag information structure */ + xfs_alloc_arg_t targs; /* local allocation arguments */ + xfs_trans_t *tp; /* transaction pointer */ + + mp = args->mp; + + pag = args->pag; + tp = args->tp; + if (!pag->pagf_init) { + if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, + &agbp))) + return error; + if (!pag->pagf_init) { + args->agbp = NULL; + return 0; + } + } else + agbp = NULL; + + /* If this is a metadata prefered pag and we are user data + * then try somewhere else if we are not being asked to + * try harder at this point + */ + if (pag->pagf_metadata && args->userdata && flags) { + args->agbp = NULL; + return 0; + } + + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; + /* + * If it looks like there isn't a long enough extent, or enough + * total blocks, reject it. + */ + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); + if (args->minlen + args->alignment + args->minalignslop - 1 > longest || + (args->minleft && + (int)(pag->pagf_freeblks + pag->pagf_flcount - + need - args->total) < + (int)args->minleft)) { + if (agbp) + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } + /* + * Get the a.g. freespace buffer. + * Can fail if we're not blocking on locks, and it's held. + */ + if (agbp == NULL) { + if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags, + &agbp))) + return error; + if (agbp == NULL) { + args->agbp = NULL; + return 0; + } + } + /* + * Figure out how many blocks we should have in the freelist. + */ + agf = XFS_BUF_TO_AGF(agbp); + need = XFS_MIN_FREELIST(agf, mp); + delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ? + (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0; + /* + * If there isn't enough total or single-extent, reject it. + */ + longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + longest = (longest > delta) ? (longest - delta) : + (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0); + if (args->minlen + args->alignment + args->minalignslop - 1 > longest || + (args->minleft && + (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + + INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) < + (int)args->minleft)) { + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } + /* + * Make the freelist shorter if it's too long. + */ + while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) { + xfs_buf_t *bp; + + if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) + return error; + if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) + return error; + bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); + xfs_trans_binval(tp, bp); + } + /* + * Initialize the args structure. + */ + targs.tp = tp; + targs.mp = mp; + targs.agbp = agbp; + targs.agno = args->agno; + targs.mod = targs.minleft = targs.wasdel = targs.userdata = + targs.minalignslop = 0; + targs.alignment = targs.minlen = targs.prod = targs.isfl = 1; + targs.type = XFS_ALLOCTYPE_THIS_AG; + targs.pag = pag; + if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp))) + return error; + /* + * Make the freelist longer if it's too short. + */ + while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) { + targs.agbno = 0; + targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT); + /* + * Allocate as many blocks as possible at once. + */ + if ((error = xfs_alloc_ag_vextent(&targs))) + return error; + /* + * Stop if we run out. Won't happen if callers are obeying + * the restrictions correctly. Can happen for free calls + * on a completely full ag. + */ + if (targs.agbno == NULLAGBLOCK) + break; + /* + * Put each allocated block on the list. + */ + for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { + if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp, + bno))) + return error; + } + } + args->agbp = agbp; + return 0; +} + +/* + * Get a block from the freelist. + * Returns with the buffer for the block gotten. + */ +int /* error */ +xfs_alloc_get_freelist( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer containing the agf structure */ + xfs_agblock_t *bnop) /* block address retrieved from freelist */ +{ + xfs_agf_t *agf; /* a.g. freespace structure */ + xfs_agfl_t *agfl; /* a.g. freelist structure */ + xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ + xfs_agblock_t bno; /* block number returned */ + int error; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_get_freelist"; +#endif + xfs_mount_t *mp; /* mount structure */ + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + /* + * Freelist is empty, give up. + */ + if (INT_ISZERO(agf->agf_flcount, ARCH_CONVERT)) { + *bnop = NULLAGBLOCK; + return 0; + } + /* + * Read the array of free blocks. + */ + mp = tp->t_mountp; + if ((error = xfs_alloc_read_agfl(mp, tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + return error; + agfl = XFS_BUF_TO_AGFL(agflbp); + /* + * Get the block number and update the data structures. + */ + bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT); + INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1); + xfs_trans_brelse(tp, agflbp); + if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + INT_ZERO(agf->agf_flfirst, ARCH_CONVERT); + pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; + INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1); + xfs_trans_agflist_delta(tp, -1); + pag->pagf_flcount--; + TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); + *bnop = bno; + + /* + * As blocks are freed, they are added to the per-ag busy list + * and remain there until the freeing transaction is committed to + * disk. Now that we have allocated blocks, this list must be + * searched to see if a block is being reused. If one is, then + * the freeing transaction must be pushed to disk NOW by forcing + * to disk all iclogs up that transaction's LSN. + */ + xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); + return 0; +} + +/* + * Log the given fields from the agf structure. + */ +void +xfs_alloc_log_agf( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer for a.g. freelist header */ + int fields) /* mask of fields to be logged (XFS_AGF_...) */ +{ + int first; /* first byte offset */ + int last; /* last byte offset */ + static const short offsets[] = { + offsetof(xfs_agf_t, agf_magicnum), + offsetof(xfs_agf_t, agf_versionnum), + offsetof(xfs_agf_t, agf_seqno), + offsetof(xfs_agf_t, agf_length), + offsetof(xfs_agf_t, agf_roots[0]), + offsetof(xfs_agf_t, agf_levels[0]), + offsetof(xfs_agf_t, agf_flfirst), + offsetof(xfs_agf_t, agf_fllast), + offsetof(xfs_agf_t, agf_flcount), + offsetof(xfs_agf_t, agf_freeblks), + offsetof(xfs_agf_t, agf_longest), + sizeof(xfs_agf_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); +} + +/* + * Interface for inode allocation to force the pag data to be initialized. + */ +int /* error */ +xfs_alloc_pagf_init( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags) /* XFS_ALLOC_FLAGS_... */ +{ + xfs_buf_t *bp; + int error; + + if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) + return error; + if (bp) + xfs_trans_brelse(tp, bp); + return 0; +} + +/* + * Put the block on the freelist for the allocation group. + */ +int /* error */ +xfs_alloc_put_freelist( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* buffer for a.g. freelist header */ + xfs_buf_t *agflbp,/* buffer for a.g. free block array */ + xfs_agblock_t bno) /* block being freed */ +{ + xfs_agf_t *agf; /* a.g. freespace structure */ + xfs_agfl_t *agfl; /* a.g. free block array */ + xfs_agblock_t *blockp;/* pointer to array entry */ + int error; +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_put_freelist"; +#endif + xfs_mount_t *mp; /* mount structure */ + xfs_perag_t *pag; /* per allocation group data */ + + agf = XFS_BUF_TO_AGF(agbp); + mp = tp->t_mountp; + + if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, + INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) + return error; + agfl = XFS_BUF_TO_AGFL(agflbp); + INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1); + if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) + INT_ZERO(agf->agf_fllast, ARCH_CONVERT); + pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; + INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1); + xfs_trans_agflist_delta(tp, 1); + pag->pagf_flcount++; + ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp)); + blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)]; + INT_SET(*blockp, ARCH_CONVERT, bno); + TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); + xfs_trans_log_buf(tp, agflbp, + (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), + (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl + + sizeof(xfs_agblock_t) - 1)); + return 0; +} + +/* + * Read in the allocation group header (free/alloc section). + */ +int /* error */ +xfs_alloc_read_agf( + xfs_mount_t *mp, /* mount point structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_ALLOC_FLAG_... */ + xfs_buf_t **bpp) /* buffer for the ag freelist header */ +{ + xfs_agf_t *agf; /* ag freelist header */ + int agf_ok; /* set if agf is consistent */ + xfs_buf_t *bp; /* return value */ + xfs_perag_t *pag; /* per allocation group data */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), + (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U, + &bp); + if (error) + return error; + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (!bp) { + *bpp = NULL; + return 0; + } + /* + * Validate the magic number of the agf block. + */ + agf = XFS_BUF_TO_AGF(bp); + agf_ok = + INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC && + XFS_AGF_GOOD_VERSION( + INT_GET(agf->agf_versionnum, ARCH_CONVERT)) && + INT_GET(agf->agf_freeblks, ARCH_CONVERT) <= + INT_GET(agf->agf_length, ARCH_CONVERT) && + INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && + INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && + INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp); + if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, + XFS_RANDOM_ALLOC_READ_AGF))) { + XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", + XFS_ERRLEVEL_LOW, mp, agf); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + pag = &mp->m_perag[agno]; + if (!pag->pagf_init) { + pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT); + pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT); + pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT); + pag->pagf_levels[XFS_BTNUM_BNOi] = + INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT); + pag->pagf_levels[XFS_BTNUM_CNTi] = + INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT); + spinlock_init(&pag->pagb_lock, "xfspagb"); + pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * + sizeof(xfs_perag_busy_t), KM_SLEEP); + pag->pagf_init = 1; + } +#ifdef DEBUG + else if (!XFS_FORCED_SHUTDOWN(mp)) { + ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT)); + ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT)); + ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT)); + ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == + INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT)); + ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == + INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT)); + } +#endif + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); + *bpp = bp; + return 0; +} + +/* + * Allocate an extent (variable-size). + * Depending on the allocation type, we either look in a single allocation + * group or loop over the allocation groups to find the result. + */ +int /* error */ +xfs_alloc_vextent( + xfs_alloc_arg_t *args) /* allocation argument structure */ +{ + xfs_agblock_t agsize; /* allocation group size */ + int error; + int flags; /* XFS_ALLOC_FLAG_... locking flags */ +#ifdef XFS_ALLOC_TRACE + static char fname[] = "xfs_alloc_vextent"; +#endif + xfs_extlen_t minleft;/* minimum left value, temp copy */ + xfs_mount_t *mp; /* mount structure pointer */ + xfs_agnumber_t sagno; /* starting allocation group number */ + xfs_alloctype_t type; /* input allocation type */ + int bump_rotor = 0; + int no_min = 0; + + mp = args->mp; + type = args->otype = args->type; + args->agbno = NULLAGBLOCK; + /* + * Just fix this up, for the case where the last a.g. is shorter + * (or there's only one a.g.) and the caller couldn't easily figure + * that out (xfs_bmap_alloc). + */ + agsize = mp->m_sb.sb_agblocks; + if (args->maxlen > agsize) + args->maxlen = agsize; + if (args->alignment == 0) + args->alignment = 1; + ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize); + ASSERT(args->minlen <= args->maxlen); + ASSERT(args->minlen <= agsize); + ASSERT(args->mod < args->prod); + if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount || + XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize || + args->minlen > args->maxlen || args->minlen > agsize || + args->mod >= args->prod) { + args->fsbno = NULLFSBLOCK; + TRACE_ALLOC("badargs", args); + return 0; + } + minleft = args->minleft; + + switch (type) { + case XFS_ALLOCTYPE_THIS_AG: + case XFS_ALLOCTYPE_NEAR_BNO: + case XFS_ALLOCTYPE_THIS_BNO: + /* + * These three force us into a single a.g. + */ + args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); + down_read(&mp->m_peraglock); + args->pag = &mp->m_perag[args->agno]; + args->minleft = 0; + error = xfs_alloc_fix_freelist(args, 0); + args->minleft = minleft; + if (error) { + TRACE_ALLOC("nofix", args); + goto error0; + } + if (!args->agbp) { + up_read(&mp->m_peraglock); + TRACE_ALLOC("noagbp", args); + break; + } + args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); + if ((error = xfs_alloc_ag_vextent(args))) + goto error0; + up_read(&mp->m_peraglock); + break; + case XFS_ALLOCTYPE_START_BNO: + /* + * Try near allocation first, then anywhere-in-ag after + * the first a.g. fails. + */ + if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) && + (mp->m_flags & XFS_MOUNT_32BITINODES)) { + args->fsbno = XFS_AGB_TO_FSB(mp, mp->m_agfrotor, 0); + bump_rotor = 1; + } + args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); + args->type = XFS_ALLOCTYPE_NEAR_BNO; + /* FALLTHROUGH */ + case XFS_ALLOCTYPE_ANY_AG: + case XFS_ALLOCTYPE_START_AG: + case XFS_ALLOCTYPE_FIRST_AG: + /* + * Rotate through the allocation groups looking for a winner. + */ + if (type == XFS_ALLOCTYPE_ANY_AG) { + /* + * Start with the last place we left off. + */ + args->agno = sagno = mp->m_agfrotor; + args->type = XFS_ALLOCTYPE_THIS_AG; + flags = XFS_ALLOC_FLAG_TRYLOCK; + } else if (type == XFS_ALLOCTYPE_FIRST_AG) { + /* + * Start with allocation group given by bno. + */ + args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); + args->type = XFS_ALLOCTYPE_THIS_AG; + sagno = 0; + flags = 0; + } else { + if (type == XFS_ALLOCTYPE_START_AG) + args->type = XFS_ALLOCTYPE_THIS_AG; + /* + * Start with the given allocation group. + */ + args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno); + flags = XFS_ALLOC_FLAG_TRYLOCK; + } + /* + * Loop over allocation groups twice; first time with + * trylock set, second time without. + */ + down_read(&mp->m_peraglock); + for (;;) { + args->pag = &mp->m_perag[args->agno]; + if (no_min) args->minleft = 0; + error = xfs_alloc_fix_freelist(args, flags); + args->minleft = minleft; + if (error) { + TRACE_ALLOC("nofix", args); + goto error0; + } + /* + * If we get a buffer back then the allocation will fly. + */ + if (args->agbp) { + if ((error = xfs_alloc_ag_vextent(args))) + goto error0; + break; + } + TRACE_ALLOC("loopfailed", args); + /* + * Didn't work, figure out the next iteration. + */ + if (args->agno == sagno && + type == XFS_ALLOCTYPE_START_BNO) + args->type = XFS_ALLOCTYPE_THIS_AG; + if (++(args->agno) == mp->m_sb.sb_agcount) + args->agno = 0; + /* + * Reached the starting a.g., must either be done + * or switch to non-trylock mode. + */ + if (args->agno == sagno) { + if (no_min == 1) { + args->agbno = NULLAGBLOCK; + TRACE_ALLOC("allfailed", args); + break; + } + if (flags == 0) { + no_min = 1; + } else { + flags = 0; + if (type == XFS_ALLOCTYPE_START_BNO) { + args->agbno = XFS_FSB_TO_AGBNO(mp, + args->fsbno); + args->type = XFS_ALLOCTYPE_NEAR_BNO; + } + } + } + } + up_read(&mp->m_peraglock); + if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) + mp->m_agfrotor = (args->agno + 1) % mp->m_sb.sb_agcount; + break; + default: + ASSERT(0); + /* NOTREACHED */ + } + if (args->agbno == NULLAGBLOCK) + args->fsbno = NULLFSBLOCK; + else { + args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno); +#ifdef DEBUG + ASSERT(args->len >= args->minlen); + ASSERT(args->len <= args->maxlen); + ASSERT(args->agbno % args->alignment == 0); + XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), + args->len); +#endif + } + return 0; +error0: + up_read(&mp->m_peraglock); + return error; +} + +/* + * Free an extent. + * Just break up the extent address and hand off to xfs_free_ag_extent + * after fixing up the freelist. + */ +int /* error */ +xfs_free_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t bno, /* starting block number of extent */ + xfs_extlen_t len) /* length of extent */ +{ +#ifdef DEBUG + xfs_agf_t *agf; /* a.g. freespace header */ +#endif + xfs_alloc_arg_t args; /* allocation argument structure */ + int error; + + ASSERT(len != 0); + args.tp = tp; + args.mp = tp->t_mountp; + args.agno = XFS_FSB_TO_AGNO(args.mp, bno); + ASSERT(args.agno < args.mp->m_sb.sb_agcount); + args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); + args.alignment = 1; + args.minlen = args.minleft = args.minalignslop = 0; + down_read(&args.mp->m_peraglock); + args.pag = &args.mp->m_perag[args.agno]; + if ((error = xfs_alloc_fix_freelist(&args, 0))) + goto error0; +#ifdef DEBUG + ASSERT(args.agbp != NULL); + agf = XFS_BUF_TO_AGF(args.agbp); + ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT)); +#endif + error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, + len, 0); +error0: + up_read(&args.mp->m_peraglock); + return error; +} + + +/* + * AG Busy list management + * The busy list contains block ranges that have been freed but whose + * transacations have not yet hit disk. If any block listed in a busy + * list is reused, the transaction that freed it must be forced to disk + * before continuing to use the block. + * + * xfs_alloc_mark_busy - add to the per-ag busy list + * xfs_alloc_clear_busy - remove an item from the per-ag busy list + */ +void +xfs_alloc_mark_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *bsy; + int n; + SPLDECL(s); + + mp = tp->t_mountp; + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + + /* search pagb_list for an open slot */ + for (bsy = mp->m_perag[agno].pagb_list, n = 0; + n < XFS_PAGB_NUM_SLOTS; + bsy++, n++) { + if (bsy->busy_tp == NULL) { + break; + } + } + + if (n < XFS_PAGB_NUM_SLOTS) { + bsy = &mp->m_perag[agno].pagb_list[n]; + mp->m_perag[agno].pagb_count++; + TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp); + bsy->busy_start = bno; + bsy->busy_length = len; + bsy->busy_tp = tp; + xfs_trans_add_busy(tp, agno, n); + } else { + TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp); + /* + * The busy list is full! Since it is now not possible to + * track the free block, make this a synchronous transaction + * to insure that the block is not reused before this + * transaction commits. + */ + xfs_trans_set_sync(tp); + } + + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); +} + +void +xfs_alloc_clear_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + int idx) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *list; + SPLDECL(s); + + mp = tp->t_mountp; + + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + list = mp->m_perag[agno].pagb_list; + + ASSERT(idx < XFS_PAGB_NUM_SLOTS); + if (list[idx].busy_tp == tp) { + TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp); + list[idx].busy_tp = NULL; + mp->m_perag[agno].pagb_count--; + } else { + TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); + } + + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); +} + + +/* + * returns non-zero if any of (agno,bno):len is in a busy list + */ +int +xfs_alloc_search_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len) +{ + xfs_mount_t *mp; + xfs_perag_busy_t *bsy; + int n; + xfs_agblock_t uend, bend; + xfs_lsn_t lsn; + int cnt; + SPLDECL(s); + + mp = tp->t_mountp; + + s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); + cnt = mp->m_perag[agno].pagb_count; + + uend = bno + len - 1; + + /* search pagb_list for this slot, skipping open slots */ + for (bsy = mp->m_perag[agno].pagb_list, n = 0; + cnt; bsy++, n++) { + + /* + * (start1,length1) within (start2, length2) + */ + if (bsy->busy_tp != NULL) { + bend = bsy->busy_start + bsy->busy_length - 1; + if ((bno > bend) || + (uend < bsy->busy_start)) { + cnt--; + } else { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", + "found1", agno, bno, len, n, + tp); + break; + } + } + } + + /* + * If a block was found, force the log through the LSN of the + * transaction that freed the block + */ + if (cnt) { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); + lsn = bsy->busy_tp->t_commit_lsn; + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); + xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); + } else { + TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); + n = -1; + mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); + } + + return n; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_alloc.h linux.22-ac2/fs/xfs/xfs_alloc.h --- linux.vanilla/fs/xfs/xfs_alloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_alloc.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ALLOC_H__ +#define __XFS_ALLOC_H__ + +struct xfs_buf; +struct xfs_mount; +struct xfs_perag; +struct xfs_trans; + +/* + * Freespace allocation types. Argument to xfs_alloc_[v]extent. + */ +typedef enum xfs_alloctype +{ + XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */ + XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */ + XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */ + XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */ + XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */ + XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */ + XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ +} xfs_alloctype_t; + +/* + * Flags for xfs_alloc_fix_freelist. + */ +#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */ + +/* + * Argument structure for xfs_alloc routines. + * This is turned into a structure to avoid having 20 arguments passed + * down several levels of the stack. + */ +typedef struct xfs_alloc_arg { + struct xfs_trans *tp; /* transaction pointer */ + struct xfs_mount *mp; /* file system mount point */ + struct xfs_buf *agbp; /* buffer for a.g. freelist header */ + struct xfs_perag *pag; /* per-ag struct for this agno */ + xfs_fsblock_t fsbno; /* file system block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agblock_t agbno; /* allocation group-relative block # */ + xfs_extlen_t minlen; /* minimum size of extent */ + xfs_extlen_t maxlen; /* maximum size of extent */ + xfs_extlen_t mod; /* mod value for extent size */ + xfs_extlen_t prod; /* prod value for extent size */ + xfs_extlen_t minleft; /* min blocks must be left after us */ + xfs_extlen_t total; /* total blocks needed in xaction */ + xfs_extlen_t alignment; /* align answer to multiple of this */ + xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */ + xfs_extlen_t len; /* output: actual size of extent */ + xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */ + xfs_alloctype_t otype; /* original allocation type */ + char wasdel; /* set if allocation was prev delayed */ + char wasfromfl; /* set if allocation is from freelist */ + char isfl; /* set if is freelist blocks - !actg */ + char userdata; /* set if this is user data */ +} xfs_alloc_arg_t; + +/* + * Defines for userdata + */ +#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/ +#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */ + + +#ifdef __KERNEL__ + +/* + * Types for alloc tracing. + */ +#define XFS_ALLOC_KTRACE_ALLOC 1 +#define XFS_ALLOC_KTRACE_FREE 2 +#define XFS_ALLOC_KTRACE_MODAGF 3 +#define XFS_ALLOC_KTRACE_BUSY 4 +#define XFS_ALLOC_KTRACE_UNBUSY 5 +#define XFS_ALLOC_KTRACE_BUSYSEARCH 6 + + +/* + * Allocation tracing buffer size. + */ +#define XFS_ALLOC_TRACE_SIZE 4096 + +#ifdef XFS_ALL_TRACE +#define XFS_ALLOC_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ALLOC_TRACE +#endif + +/* + * Prototypes for visible xfs_alloc.c routines + */ + +/* + * Compute and fill in value of m_ag_maxlevels. + */ +void +xfs_alloc_compute_maxlevels( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Get a block from the freelist. + * Returns with the buffer for the block gotten. + */ +int /* error */ +xfs_alloc_get_freelist( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer containing the agf structure */ + xfs_agblock_t *bnop); /* block address retrieved from freelist */ + +/* + * Log the given fields from the agf structure. + */ +void +xfs_alloc_log_agf( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *bp, /* buffer for a.g. freelist header */ + int fields);/* mask of fields to be logged (XFS_AGF_...) */ + +/* + * Interface for inode allocation to force the pag data to be initialized. + */ +int /* error */ +xfs_alloc_pagf_init( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags); /* XFS_ALLOC_FLAGS_... */ + +/* + * Put the block on the freelist for the allocation group. + */ +int /* error */ +xfs_alloc_put_freelist( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer for a.g. freelist header */ + struct xfs_buf *agflbp,/* buffer for a.g. free block array */ + xfs_agblock_t bno); /* block being freed */ + +/* + * Read in the allocation group header (free/alloc section). + */ +int /* error */ +xfs_alloc_read_agf( + struct xfs_mount *mp, /* mount point structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_ALLOC_FLAG_... */ + struct xfs_buf **bpp); /* buffer for the ag freelist header */ + +/* + * Allocate an extent (variable-size). + */ +int /* error */ +xfs_alloc_vextent( + xfs_alloc_arg_t *args); /* allocation argument structure */ + +/* + * Free an extent. + */ +int /* error */ +xfs_free_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t bno, /* starting block number of extent */ + xfs_extlen_t len); /* length of extent */ + +void +xfs_alloc_mark_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len); + +void +xfs_alloc_clear_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + int idx); + + +#endif /* __KERNEL__ */ + +#endif /* __XFS_ALLOC_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_arch.h linux.22-ac2/fs/xfs/xfs_arch.h --- linux.vanilla/fs/xfs/xfs_arch.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_arch.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ARCH_H__ +#define __XFS_ARCH_H__ + +#ifndef XFS_BIG_FILESYSTEMS +#error XFS_BIG_FILESYSTEMS must be defined true or false +#endif + +#ifdef __KERNEL__ + +#include + +#ifdef __LITTLE_ENDIAN +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif +#ifdef __BIG_ENDIAN +#define __BYTE_ORDER __BIG_ENDIAN +#endif + +#endif /* __KERNEL__ */ + +/* do we need conversion? */ + +#define ARCH_NOCONVERT 1 +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define ARCH_CONVERT 0 +#else +#define ARCH_CONVERT ARCH_NOCONVERT +#endif + +/* generic swapping macros */ + +#ifndef HAVE_SWABMACROS +#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var)))) +#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var)))) +#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var)))) +#endif + +#define INT_SWAP(type, var) \ + ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \ + ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \ + ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \ + (var)))) + +#define INT_SWAP_UNALIGNED_32(from,to) \ + { \ + ((__u8*)(to))[0] = ((__u8*)(from))[3]; \ + ((__u8*)(to))[1] = ((__u8*)(from))[2]; \ + ((__u8*)(to))[2] = ((__u8*)(from))[1]; \ + ((__u8*)(to))[3] = ((__u8*)(from))[0]; \ + } + +#define INT_SWAP_UNALIGNED_64(from,to) \ + { \ + INT_SWAP_UNALIGNED_32( ((__u8*)(from)) + 4, ((__u8*)(to))); \ + INT_SWAP_UNALIGNED_32( ((__u8*)(from)), ((__u8*)(to)) + 4); \ + } + +/* + * get and set integers from potentially unaligned locations + */ + +#define INT_GET_UNALIGNED_16_LE(pointer) \ + ((__u16)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ))) +#define INT_GET_UNALIGNED_16_BE(pointer) \ + ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1]))) +#define INT_SET_UNALIGNED_16_LE(pointer,value) \ + { \ + ((__u8*)(pointer))[0] = (((value) ) & 0xff); \ + ((__u8*)(pointer))[1] = (((value) >> 8) & 0xff); \ + } +#define INT_SET_UNALIGNED_16_BE(pointer,value) \ + { \ + ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \ + ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ + } + +#define INT_GET_UNALIGNED_32_LE(pointer) \ + ((__u32)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ) \ + |(((__u8*)(pointer))[2] << 16) | (((__u8*)(pointer))[3] << 24))) +#define INT_GET_UNALIGNED_32_BE(pointer) \ + ((__u32)((((__u8*)(pointer))[0] << 24) | (((__u8*)(pointer))[1] << 16) \ + |(((__u8*)(pointer))[2] << 8) | (((__u8*)(pointer))[3] ))) + +#define INT_GET_UNALIGNED_64_LE(pointer) \ + (((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))+4)) << 32 ) \ + |((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer)) )) )) +#define INT_GET_UNALIGNED_64_BE(pointer) \ + (((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer)) )) << 32 ) \ + |((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))+4)) )) + +/* + * now pick the right ones for our MACHINE ARCHITECTURE + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_LE(pointer) +#define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_LE(pointer,value) +#define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_LE(pointer) +#define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_LE(pointer) +#else +#define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_BE(pointer) +#define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_BE(pointer,value) +#define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_BE(pointer) +#define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_BE(pointer) +#endif + +/* define generic INT_ macros */ + +#define INT_GET(reference,arch) \ + (((arch) == ARCH_NOCONVERT) \ + ? \ + (reference) \ + : \ + INT_SWAP((reference),(reference)) \ + ) + +/* does not return a value */ +#define INT_SET(reference,arch,valueref) \ + (__builtin_constant_p(valueref) ? \ + (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \ + (void)( \ + ((reference) = (valueref)), \ + ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \ + ) \ + ) + +/* does not return a value */ +#define INT_MOD_EXPR(reference,arch,code) \ + (void)(((arch) == ARCH_NOCONVERT) \ + ? \ + ((reference) code) \ + : \ + ( \ + (reference) = INT_GET((reference),arch) , \ + ((reference) code), \ + INT_SET(reference, arch, reference) \ + ) \ + ) + +/* does not return a value */ +#define INT_MOD(reference,arch,delta) \ + (void)( \ + INT_MOD_EXPR(reference,arch,+=(delta)) \ + ) + +/* + * INT_COPY - copy a value between two locations with the + * _same architecture_ but _potentially different sizes_ + * + * if the types of the two parameters are equal or they are + * in native architecture, a simple copy is done + * + * otherwise, architecture conversions are done + * + */ + +/* does not return a value */ +#define INT_COPY(dst,src,arch) \ + (void)( \ + ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \ + ? \ + ((dst) = (src)) \ + : \ + INT_SET(dst, arch, INT_GET(src, arch)) \ + ) + +/* + * INT_XLATE - copy a value in either direction between two locations + * with different architectures + * + * dir < 0 - copy from memory to buffer (native to arch) + * dir > 0 - copy from buffer to memory (arch to native) + */ + +/* does not return a value */ +#define INT_XLATE(buf,mem,dir,arch) {\ + ASSERT(dir); \ + if (dir>0) { \ + (mem)=INT_GET(buf, arch); \ + } else { \ + INT_SET(buf, arch, mem); \ + } \ +} + +#define INT_ISZERO(reference,arch) \ + ((reference) == 0) + +#define INT_ZERO(reference,arch) \ + ((reference) = 0) + +#define INT_GET_UNALIGNED_16_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_16(pointer)) \ + : \ + (INT_GET_UNALIGNED_16_BE(pointer)) \ + ) +#define INT_SET_UNALIGNED_16_ARCH(pointer,value,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + INT_SET_UNALIGNED_16(pointer,value); \ + } else { \ + INT_SET_UNALIGNED_16_BE(pointer,value); \ + } + +#define DIRINO4_GET_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_32(pointer)) \ + : \ + (INT_GET_UNALIGNED_32_BE(pointer)) \ + ) + +#if XFS_BIG_FILESYSTEMS +#define DIRINO_GET_ARCH(pointer,arch) \ + ( ((arch) == ARCH_NOCONVERT) \ + ? \ + (INT_GET_UNALIGNED_64(pointer)) \ + : \ + (INT_GET_UNALIGNED_64_BE(pointer)) \ + ) +#else +/* MACHINE ARCHITECTURE dependent */ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define DIRINO_GET_ARCH(pointer,arch) \ + DIRINO4_GET_ARCH((((__u8*)pointer)+4),arch) +#else +#define DIRINO_GET_ARCH(pointer,arch) \ + DIRINO4_GET_ARCH(pointer,arch) +#endif +#endif + +#define DIRINO_COPY_ARCH(from,to,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + memcpy(to,from,sizeof(xfs_ino_t)); \ + } else { \ + INT_SWAP_UNALIGNED_64(from,to); \ + } +#define DIRINO4_COPY_ARCH(from,to,arch) \ + if ((arch) == ARCH_NOCONVERT) { \ + memcpy(to,(((__u8*)from+4)),sizeof(xfs_dir2_ino4_t)); \ + } else { \ + INT_SWAP_UNALIGNED_32(from,to); \ + } + +#endif /* __XFS_ARCH_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr.c linux.22-ac2/fs/xfs/xfs_attr.c --- linux.vanilla/fs/xfs/xfs_attr.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2336 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_rw.h" +#include "xfs_trans_space.h" + +/* + * xfs_attr.c + * + * Provide the external interfaces to manage attribute lists. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when attribute list fits inside the inode. + */ +STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args); + +/* + * Internal routines when attribute list is one block. + */ +STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args); +STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); +STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context); + +/* + * Internal routines when attribute list is more than one block. + */ +STATIC int xfs_attr_node_addname(xfs_da_args_t *args); +STATIC int xfs_attr_node_removename(xfs_da_args_t *args); +STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context); +STATIC int xfs_attr_fillstate(xfs_da_state_t *state); +STATIC int xfs_attr_refillstate(xfs_da_state_t *state); + +/* + * Routines to manipulate out-of-line attribute values. + */ +STATIC int xfs_attr_rmtval_get(xfs_da_args_t *args); +STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args); +STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); + +#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ +#define ATTR_RMTVALUE_TRANSBLKS 8 /* max # of blks in a transaction */ + +#if defined(DEBUG) +ktrace_t *xfs_attr_trace_buf; +#endif + + + +/*======================================================================== + * Overall external interface routines. + *========================================================================*/ + +/*ARGSUSED*/ +int /* error */ +xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp, + int flags, struct cred *cred) +{ + xfs_da_args_t args; + int error; + int namelen; + xfs_inode_t *ip = XFS_BHVTOI(bdp); + + if (!name) + return EINVAL; + ASSERT(MAXNAMELEN-1 <= 0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen >= MAXNAMELEN) + return EFAULT; /* match IRIX behaviour */ + XFS_STATS_INC(xfsstats.xs_attr_get); + + if (XFS_IFORK_Q(ip) == 0) + return ENOATTR; + + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return (EIO); + + /* + * Do we answer them, or ignore them? + */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(XFS_BHVTOI(bdp), IREAD, cred))) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.value = value; + args.valuelen = *valuelenp; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = ip; + args.whichfork = XFS_ATTR_FORK; + args.trans = NULL; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (XFS_IFORK_Q(ip) == 0 || + (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_anextents == 0)) { + error = XFS_ERROR(ENOATTR); + } else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + error = xfs_attr_shortform_getvalue(&args); + } else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_get(&args); + } else { + error = xfs_attr_node_get(&args); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + /* + * Return the number of bytes in the value to the caller. + */ + *valuelenp = args.valuelen; + + if (error == EEXIST) + error = 0; + return(error); +} + +/*ARGSUSED*/ +int /* error */ +xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags, + struct cred *cred) +{ + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + int error, err2, committed; + int local, size; + uint nblks; + xfs_mount_t *mp; + int rsvd = (flags & ATTR_ROOT) != 0; + int namelen; + + ASSERT(MAXNAMELEN-1 <= 0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen >= MAXNAMELEN) + return EFAULT; /* match irix behaviour */ + + XFS_STATS_INC(xfsstats.xs_attr_set); + /* + * Do we answer them, or ignore them? + */ + dp = XFS_BHVTOI(bdp); + mp = dp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return (EIO); + + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IWRITE, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + + /* + * Attach the dquots to the inode. + */ + if ((error = XFS_QM_DQATTACH(mp, dp, 0))) + return (error); + + /* + * If the inode doesn't have an attribute fork, add one. + * (inode must not be locked when we call this routine) + */ + if (XFS_IFORK_Q(dp) == 0) { + error = xfs_bmap_add_attrfork(dp, rsvd); + if (error) + return(error); + } + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.value = value; + args.valuelen = valuelen; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = dp; + args.firstblock = &firstblock; + args.flist = &flist; + args.whichfork = XFS_ATTR_FORK; + args.oknoent = 1; + + /* Determine space new attribute will use, and if it will be inline + * or out of line. + */ + size = xfs_attr_leaf_newentsize(&args, mp->m_sb.sb_blocksize, &local); + + nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); + if (local) { + if (size > (mp->m_sb.sb_blocksize >> 1)) { + /* Double split possible */ + nblks <<= 1; + } + } else { + uint dblocks = XFS_B_TO_FSB(mp, valuelen); + /* Out of line attribute, cannot double split, but make + * room for the attribute value itself. + */ + nblks += dblocks; + nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); + } + + /* Size is now blocks for attribute data */ + args.total = nblks; + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET); + + /* + * Root fork attributes can use reserved data blocks for this + * operation if necessary + */ + + if (rsvd) + args.trans->t_flags |= XFS_TRANS_RESERVE; + + if ((error = xfs_trans_reserve(args.trans, (uint) nblks, + XFS_ATTRSET_LOG_RES(mp, nblks), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ATTRSET_LOG_COUNT))) { + xfs_trans_cancel(args.trans, 0); + return(error); + } + xfs_ilock(dp, XFS_ILOCK_EXCL); + + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0, + rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); + if (error) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); + return (error); + } + + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + + /* + * If the attribute list is non-existant or a shortform list, + * upgrade it to a single-leaf-block attribute list. + */ + if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + ((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) && + (dp->i_d.di_anextents == 0))) { + + /* + * Build initial attribute list (if required). + */ + if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) + (void)xfs_attr_shortform_create(&args); + + /* + * Try to add the attr to the attribute list in + * the inode. + */ + error = xfs_attr_shortform_addname(&args); + if (error != ENOSPC) { + /* + * Commit the shortform mods, and we're done. + * NOTE: this is also the error path (EEXIST, etc). + */ + ASSERT(args.trans != NULL); + + /* + * If this is a synchronous mount, make sure that + * the transaction goes to disk before returning + * to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + err2 = xfs_trans_commit(args.trans, + XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + return(error == 0 ? err2 : error); + } + + /* + * It won't fit in the shortform, transform to a leaf block. + * GROT: another possible req'mt for a double-split btree op. + */ + XFS_BMAP_INIT(args.flist, args.firstblock); + error = xfs_attr_shortform_to_leaf(&args); + if (!error) { + error = xfs_bmap_finish(&args.trans, args.flist, + *args.firstblock, &committed); + } + if (error) { + ASSERT(committed); + args.trans = NULL; + xfs_bmap_cancel(&flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + } + + /* + * Commit the leaf transformation. We'll need another (linked) + * transaction to add the new attribute to the leaf. + */ + if ((error = xfs_attr_rolltrans(&args.trans, dp))) + goto out; + + } + + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_addname(&args); + } else { + error = xfs_attr_node_addname(&args); + } + if (error) { + goto out; + } + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + + return(error); + +out: + if (args.trans) + xfs_trans_cancel(args.trans, + XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + +/* + * Generic handler routine to remove a name from an attribute list. + * Transitions attribute list from Btree to shortform as necessary. + */ +/*ARGSUSED*/ +int /* error */ +xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred) +{ + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + int error; + xfs_mount_t *mp; + int namelen; + + ASSERT(MAXNAMELEN-1<=0xff); /* length is stored in uint8 */ + namelen = strlen(name); + if (namelen>=MAXNAMELEN) + return EFAULT; /* match irix behaviour */ + + XFS_STATS_INC(xfsstats.xs_attr_remove); + + /* + * Do we answer them, or ignore them? + */ + dp = XFS_BHVTOI(bdp); + mp = dp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return (EIO); + + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IWRITE, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } else if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(ENOATTR)); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + + /* + * Fill in the arg structure for this request. + */ + memset((char *)&args, 0, sizeof(args)); + args.name = name; + args.namelen = namelen; + args.flags = flags; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.dp = dp; + args.firstblock = &firstblock; + args.flist = &flist; + args.total = 0; + args.whichfork = XFS_ATTR_FORK; + + /* + * Attach the dquots to the inode. + */ + if ((error = XFS_QM_DQATTACH(mp, dp, 0))) + return (error); + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM); + + /* + * Root fork attributes can use reserved data blocks for this + * operation if necessary + */ + + if (flags & ATTR_ROOT) + args.trans->t_flags |= XFS_TRANS_RESERVE; + + if ((error = xfs_trans_reserve(args.trans, + XFS_ATTRRM_SPACE_RES(mp), + XFS_ATTRRM_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ATTRRM_LOG_COUNT))) { + xfs_trans_cancel(args.trans, 0); + return(error); + + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + /* + * No need to make quota reservations here. We expect to release some + * blocks not allocate in the common case. + */ + xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args.trans, dp); + + /* + * Decide on what work routines to call based on the inode size. + */ + if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = XFS_ERROR(ENOATTR); + goto out; + } + if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); + error = xfs_attr_shortform_remove(&args); + if (error) { + goto out; + } + } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_removename(&args); + } else { + error = xfs_attr_node_removename(&args); + } + if (error) { + goto out; + } + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(args.trans); + } + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Hit the inode change time. + */ + if (!error && (flags & ATTR_KERNOTIME) == 0) { + xfs_ichgtime(dp, XFS_ICHGTIME_CHG); + } + + return(error); + +out: + if (args.trans) + xfs_trans_cancel(args.trans, + XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + +/* + * Generate a list of extended attribute names and optionally + * also value lengths. Positive return value follows the XFS + * convention of being an error, zero or negative return code + * is the length of the buffer returned (negated), indicating + * success. + */ +int +xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags, + attrlist_cursor_kern_t *cursor, struct cred *cred) +{ + xfs_attr_list_context_t context; + xfs_inode_t *dp; + int error; + + XFS_STATS_INC(xfsstats.xs_attr_list); + + /* + * Validate the cursor. + */ + if (cursor->pad1 || cursor->pad2) + return(XFS_ERROR(EINVAL)); + if ((cursor->initted == 0) && + (cursor->hashval || cursor->blkno || cursor->offset)) + return(XFS_ERROR(EINVAL)); + + /* + * Check for a properly aligned buffer. + */ + if (((long)buffer) & (sizeof(int)-1)) + return(XFS_ERROR(EFAULT)); + if (flags & ATTR_KERNOVAL) + bufsize = 0; + + /* + * Initialize the output buffer. + */ + context.dp = dp = XFS_BHVTOI(bdp); + context.cursor = cursor; + context.count = 0; + context.dupcnt = 0; + context.resynch = 1; + context.flags = flags; + if (!(flags & ATTR_KERNAMELS)) { + context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ + context.firstu = context.bufsize; + context.alist = (attrlist_t *)buffer; + context.alist->al_count = 0; + context.alist->al_more = 0; + context.alist->al_offset[0] = context.bufsize; + } + else { + context.bufsize = bufsize; + context.firstu = context.bufsize; + context.alist = (attrlist_t *)buffer; + } + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) + return (EIO); + /* + * Do they have permission? + */ + xfs_ilock(dp, XFS_ILOCK_SHARED); + if ((error = xfs_iaccess(dp, IREAD, cred))) { + xfs_iunlock(dp, XFS_ILOCK_SHARED); + return(XFS_ERROR(error)); + } + + /* + * Decide on what work routines to call based on the inode size. + */ + xfs_attr_trace_l_c("syscall start", &context); + if (XFS_IFORK_Q(dp) == 0 || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = 0; + } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { + error = xfs_attr_shortform_list(&context); + } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_list(&context); + } else { + error = xfs_attr_node_list(&context); + } + xfs_iunlock(dp, XFS_ILOCK_SHARED); + xfs_attr_trace_l_c("syscall end", &context); + + if (!(context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS))) { + ASSERT(error >= 0); + } + else { /* must return negated buffer size or the error */ + if (context.count < 0) + error = XFS_ERROR(ERANGE); + else + error = -context.count; + } + + return(error); +} + +int /* error */ +xfs_attr_inactive(xfs_inode_t *dp) +{ + xfs_trans_t *trans; + xfs_mount_t *mp; + int error; + + mp = dp->i_mount; + ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); + + /* XXXsup - why on earth are we taking ILOCK_EXCL here??? */ + xfs_ilock(dp, XFS_ILOCK_EXCL); + if ((XFS_IFORK_Q(dp) == 0) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(0); + } + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + /* + * Start our first transaction of the day. + * + * All future transactions during this code must be "chained" off + * this one via the trans_dup() call. All transactions will contain + * the inode, and the inode will always be marked with trans_ihold(). + * Since the inode will be locked in all transactions, we must log + * the inode in every transaction to let it float upward through + * the log. + */ + trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); + if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ATTRINVAL_LOG_COUNT))) { + xfs_trans_cancel(trans, 0); + return(error); + } + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * No need to make quota reservations here. We expect to release some + * blocks, not allocate, in the common case. + */ + xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(trans, dp); + + /* + * Decide on what work routines to call based on the inode size. + */ + if ((XFS_IFORK_Q(dp) == 0) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && + dp->i_d.di_anextents == 0)) { + error = 0; + goto out; + } + error = xfs_attr_root_inactive(&trans, dp); + if (error) + goto out; + /* + * signal synchronous inactive transactions unless this + * is a synchronous mount filesystem in which case we + * know that we're here because we've been called out of + * xfs_inactive which means that the last reference is gone + * and the unlink transaction has already hit the disk so + * async inactive transactions are safe. + */ + if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK, + (!(mp->m_flags & XFS_MOUNT_WSYNC) + ? 1 : 0)))) + goto out; + + /* + * Commit the last in the sequence of transactions. + */ + xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); + error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES, + NULL); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + return(error); + +out: + xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + xfs_iunlock(dp, XFS_ILOCK_EXCL); + return(error); +} + + + +/*======================================================================== + * External routines when attribute list is inside the inode + *========================================================================*/ + +/* + * Add a name to the shortform attribute list structure + * This is the external routine. + */ +STATIC int +xfs_attr_shortform_addname(xfs_da_args_t *args) +{ + int newsize, retval; + + retval = xfs_attr_shortform_lookup(args); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + return(retval); + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) + return(retval); + retval = xfs_attr_shortform_remove(args); + ASSERT(retval == 0); + } + + newsize = XFS_ATTR_SF_TOTSIZE(args->dp); + newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); + if ((newsize <= XFS_IFORK_ASIZE(args->dp)) && + (args->namelen < XFS_ATTR_SF_ENTSIZE_MAX) && + (args->valuelen < XFS_ATTR_SF_ENTSIZE_MAX)) { + retval = xfs_attr_shortform_add(args); + ASSERT(retval == 0); + } else { + return(XFS_ERROR(ENOSPC)); + } + return(0); +} + + +/*======================================================================== + * External routines when attribute list is one block + *========================================================================*/ + +/* + * Add a name to the leaf attribute list structure + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +int +xfs_attr_leaf_addname(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval, error, committed; + + /* + * Read the (only) block in the attribute list in. + */ + dp = args->dp; + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + /* + * Look up the given attribute in the leaf block. Figure out if + * the given flags produce an error or call for an atomic rename. + */ + retval = xfs_attr_leaf_lookup_int(bp, args); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + xfs_da_brelse(args->trans, bp); + return(retval); + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) { /* pure create op */ + xfs_da_brelse(args->trans, bp); + return(retval); + } + args->rename = 1; /* an atomic rename */ + args->blkno2 = args->blkno; /* set 2nd entry info*/ + args->index2 = args->index; + args->rmtblkno2 = args->rmtblkno; + args->rmtblkcnt2 = args->rmtblkcnt; + } + + /* + * Add the attribute to the leaf block, transitioning to a Btree + * if required. + */ + retval = xfs_attr_leaf_add(bp, args); + xfs_da_buf_done(bp); + if (retval == ENOSPC) { + /* + * Promote the attribute list to the Btree format, then + * Commit that transaction so that the node_addname() call + * can manage its own transactions. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_node(args); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the current trans (including the inode) and start + * a new one. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + + /* + * Fob the whole rest of the problem off on the Btree code. + */ + error = xfs_attr_node_addname(args); + return(error); + } + + /* + * Commit the transaction that added the attr name so that + * later routines can manage their own transactions. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + + /* + * If there was an out-of-line value, allocate the blocks we + * identified for its storage and copy the value. This is done + * after we create the attribute so that we don't overflow the + * maximum size of a transaction and/or hit a deadlock. + */ + if (args->rmtblkno > 0) { + error = xfs_attr_rmtval_set(args); + if (error) + return(error); + } + + /* + * If this is an atomic rename operation, we must "flip" the + * incomplete flags on the "new" and "old" attribute/value pairs + * so that one disappears and one appears atomically. Then we + * must remove the "old" attribute/value pair. + */ + if (args->rename) { + /* + * In a separate transaction, set the incomplete flag on the + * "old" attr and clear the incomplete flag on the "new" attr. + */ + error = xfs_attr_leaf_flipflags(args); + if (error) + return(error); + + /* + * Dismantle the "old" attribute/value pair by removing + * a "remote" value (if it exists). + */ + args->index = args->index2; + args->blkno = args->blkno2; + args->rmtblkno = args->rmtblkno2; + args->rmtblkcnt = args->rmtblkcnt2; + if (args->rmtblkno) { + error = xfs_attr_rmtval_remove(args); + if (error) + return(error); + } + + /* + * Read in the block containing the "old" attr, then + * remove the "old" attr from that block (neat, huh!) + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + (void)xfs_attr_leaf_remove(bp, args); + + /* + * If the result is small enough, shrink it all into the inode. + */ + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_buf_done(bp); + + /* + * Commit the remove and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, dp); + + } else if (args->rmtblkno > 0) { + /* + * Added a "remote" value, just clear the incomplete flag. + */ + error = xfs_attr_leaf_clearflag(args); + } + return(error); +} + +/* + * Remove a name from the leaf attribute list structure + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +STATIC int +xfs_attr_leaf_removename(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int committed; + int error; + + /* + * Remove the attribute. + */ + dp = args->dp; + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + + ASSERT(bp != NULL); + error = xfs_attr_leaf_lookup_int(bp, args); + if (error == ENOATTR) { + xfs_da_brelse(args->trans, bp); + return(error); + } + + (void)xfs_attr_leaf_remove(bp, args); + + /* + * If the result is small enough, shrink it all into the inode. + */ + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_buf_done(bp); + return(0); +} + +/* + * Look up a name in a leaf attribute list structure. + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +int +xfs_attr_leaf_get(xfs_da_args_t *args) +{ + xfs_dabuf_t *bp; + int error; + + args->blkno = 0; + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + error = xfs_attr_leaf_lookup_int(bp, args); + if (error != EEXIST) { + xfs_da_brelse(args->trans, bp); + return(error); + } + error = xfs_attr_leaf_getvalue(bp, args); + xfs_da_brelse(args->trans, bp); + if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) { + error = xfs_attr_rmtval_get(args); + } + return(error); +} + +/* + * Copy out attribute entries for attr_list(), for leaf attribute lists. + */ +STATIC int +xfs_attr_leaf_list(xfs_attr_list_context_t *context) +{ + xfs_attr_leafblock_t *leaf; + int error; + xfs_dabuf_t *bp; + + context->cursor->blkno = 0; + error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + != XFS_ATTR_LEAF_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, + context->dp->i_mount, leaf); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + + (void)xfs_attr_leaf_list_int(bp, context); + xfs_da_brelse(NULL, bp); + return(0); +} + + +/*======================================================================== + * External routines when attribute list size > XFS_LBSIZE(mp). + *========================================================================*/ + +/* + * Add a name to a Btree-format attribute list. + * + * This will involve walking down the Btree, and may involve splitting + * leaf nodes and even splitting intermediate nodes up to and including + * the root node (a special case of an intermediate node). + * + * "Remote" attribute values confuse the issue and atomic rename operations + * add a whole extra layer of confusion on top of that. + */ +STATIC int +xfs_attr_node_addname(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_inode_t *dp; + xfs_mount_t *mp; + int committed, retval, error; + + /* + * Fill in bucket of arguments/results/context to carry around. + */ + dp = args->dp; + mp = dp->i_mount; +restart: + state = xfs_da_state_alloc(); + state->args = args; + state->mp = mp; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name already exists, and get back a pointer + * to where it should go. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + goto out; + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { + goto out; + } else if (retval == EEXIST) { + if (args->flags & ATTR_CREATE) + goto out; + args->rename = 1; /* atomic rename op */ + args->blkno2 = args->blkno; /* set 2nd entry info*/ + args->index2 = args->index; + args->rmtblkno2 = args->rmtblkno; + args->rmtblkcnt2 = args->rmtblkcnt; + args->rmtblkno = 0; + args->rmtblkcnt = 0; + } + + retval = xfs_attr_leaf_add(blk->bp, state->args); + if (retval == ENOSPC) { + if (state->path.active == 1) { + /* + * Its really a single leaf node, but it had + * out-of-line values so it looked like it *might* + * have been a b-tree. + */ + xfs_da_state_free(state); + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_node(args); + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the node conversion and start the next + * trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + goto restart; + } + + /* + * Split as many Btree elements as required. + * This code tracks the new and old attr's location + * in the index/blkno/rmtblkno/rmtblkcnt fields and + * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_split(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else { + /* + * Addition succeeded, update Btree hashvals. + */ + xfs_da_fixhashpath(state, &state->path); + } + + /* + * Kill the state structure, we're done with it and need to + * allow the buffers to come back later. + */ + xfs_da_state_free(state); + state = NULL; + + /* + * Commit the leaf addition or btree split and start the next + * trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + /* + * If there was an out-of-line value, allocate the blocks we + * identified for its storage and copy the value. This is done + * after we create the attribute so that we don't overflow the + * maximum size of a transaction and/or hit a deadlock. + */ + if (args->rmtblkno > 0) { + error = xfs_attr_rmtval_set(args); + if (error) + return(error); + } + + /* + * If this is an atomic rename operation, we must "flip" the + * incomplete flags on the "new" and "old" attribute/value pairs + * so that one disappears and one appears atomically. Then we + * must remove the "old" attribute/value pair. + */ + if (args->rename) { + /* + * In a separate transaction, set the incomplete flag on the + * "old" attr and clear the incomplete flag on the "new" attr. + */ + error = xfs_attr_leaf_flipflags(args); + if (error) + goto out; + + /* + * Dismantle the "old" attribute/value pair by removing + * a "remote" value (if it exists). + */ + args->index = args->index2; + args->blkno = args->blkno2; + args->rmtblkno = args->rmtblkno2; + args->rmtblkcnt = args->rmtblkcnt2; + if (args->rmtblkno) { + error = xfs_attr_rmtval_remove(args); + if (error) + return(error); + } + + /* + * Re-find the "old" attribute entry after any split ops. + * The INCOMPLETE flag means that we will find the "old" + * attr, not the "new" one. + */ + args->flags |= XFS_ATTR_INCOMPLETE; + state = xfs_da_state_alloc(); + state->args = args; + state->mp = mp; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + state->inleaf = 0; + error = xfs_da_node_lookup_int(state, &retval); + if (error) + goto out; + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + error = xfs_attr_leaf_remove(blk->bp, args); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + if (retval && (state->path.active > 1)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_join(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } + + /* + * Commit and start the next trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + + } else if (args->rmtblkno > 0) { + /* + * Added a "remote" value, just clear the incomplete flag. + */ + error = xfs_attr_leaf_clearflag(args); + if (error) + goto out; + } + retval = error = 0; + +out: + if (state) + xfs_da_state_free(state); + if (error) + return(error); + return(retval); +} + +/* + * Remove a name from a B-tree attribute list. + * + * This will involve walking down the Btree, and may involve joining + * leaf nodes and even joining intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_attr_node_removename(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval, error, committed; + + /* + * Tie a string around our finger to remind us where we are. + */ + dp = args->dp; + state = xfs_da_state_alloc(); + state->args = args; + state->mp = dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error || (retval != EEXIST)) { + if (error == 0) + error = retval; + goto out; + } + + /* + * If there is an out-of-line value, de-allocate the blocks. + * This is done before we remove the attribute so that we don't + * overflow the maximum size of a transaction and/or hit a deadlock. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->bp != NULL); + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + if (args->rmtblkno > 0) { + /* + * Fill in disk block numbers in the state structure + * so that we can get the buffers back after we commit + * several transactions in the following calls. + */ + error = xfs_attr_fillstate(state); + if (error) + goto out; + + /* + * Mark the attribute as INCOMPLETE, then bunmapi() the + * remote value. + */ + error = xfs_attr_leaf_setflag(args); + if (error) + goto out; + error = xfs_attr_rmtval_remove(args); + if (error) + goto out; + + /* + * Refill the state structure with buffers, the prior calls + * released our buffers. + */ + error = xfs_attr_refillstate(state); + if (error) + goto out; + } + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + retval = xfs_attr_leaf_remove(blk->bp, args); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + if (retval && (state->path.active > 1)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_da_join(state); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + /* + * Commit the Btree join operation and start a new trans. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + goto out; + } + + /* + * If the result is small enough, push it all into the inode. + */ + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + /* + * Have to get rid of the copy of this dabuf in the state. + */ + ASSERT(state->path.active == 1); + ASSERT(state->path.blk[0].bp); + xfs_da_buf_done(state->path.blk[0].bp); + state->path.blk[0].bp = NULL; + + error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(INT_GET(((xfs_attr_leafblock_t *) + bp->data)->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + if (xfs_attr_shortform_allfit(bp, dp)) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_attr_leaf_to_shortform(bp, args); + /* bp is gone due to xfs_da_shrink_inode */ + if (!error) { + error = xfs_bmap_finish(&args->trans, + args->flist, + *args->firstblock, + &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + goto out; + } + + /* + * bmap_finish() may have committed the last trans + * and started a new one. We need the inode to be + * in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + } else + xfs_da_brelse(args->trans, bp); + } + error = 0; + +out: + xfs_da_state_free(state); + return(error); +} + +/* + * Fill in the disk block numbers in the state structure for the buffers + * that are attached to the state structure. + * This is done so that we can quickly reattach ourselves to those buffers + * after some set of transaction commit's has released these buffers. + */ +STATIC int +xfs_attr_fillstate(xfs_da_state_t *state) +{ + xfs_da_state_path_t *path; + xfs_da_state_blk_t *blk; + int level; + + /* + * Roll down the "path" in the state structure, storing the on-disk + * block number for those buffers in the "path". + */ + path = &state->path; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->bp) { + blk->disk_blkno = xfs_da_blkno(blk->bp); + xfs_da_buf_done(blk->bp); + blk->bp = NULL; + } else { + blk->disk_blkno = 0; + } + } + + /* + * Roll down the "altpath" in the state structure, storing the on-disk + * block number for those buffers in the "altpath". + */ + path = &state->altpath; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->bp) { + blk->disk_blkno = xfs_da_blkno(blk->bp); + xfs_da_buf_done(blk->bp); + blk->bp = NULL; + } else { + blk->disk_blkno = 0; + } + } + + return(0); +} + +/* + * Reattach the buffers to the state structure based on the disk block + * numbers stored in the state structure. + * This is done after some set of transaction commit's has released those + * buffers from our grip. + */ +STATIC int +xfs_attr_refillstate(xfs_da_state_t *state) +{ + xfs_da_state_path_t *path; + xfs_da_state_blk_t *blk; + int level, error; + + /* + * Roll down the "path" in the state structure, storing the on-disk + * block number for those buffers in the "path". + */ + path = &state->path; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->disk_blkno) { + error = xfs_da_read_buf(state->args->trans, + state->args->dp, + blk->blkno, blk->disk_blkno, + &blk->bp, XFS_ATTR_FORK); + if (error) + return(error); + } else { + blk->bp = NULL; + } + } + + /* + * Roll down the "altpath" in the state structure, storing the on-disk + * block number for those buffers in the "altpath". + */ + path = &state->altpath; + ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + for (blk = path->blk, level = 0; level < path->active; blk++, level++) { + if (blk->disk_blkno) { + error = xfs_da_read_buf(state->args->trans, + state->args->dp, + blk->blkno, blk->disk_blkno, + &blk->bp, XFS_ATTR_FORK); + if (error) + return(error); + } else { + blk->bp = NULL; + } + } + + return(0); +} + +/* + * Look up a filename in a node attribute list. + * + * This routine gets called for any attribute fork that has more than one + * block, ie: both true Btree attr lists and for single-leaf-blocks with + * "remote" values taking up more blocks. + */ +int +xfs_attr_node_get(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int error, retval; + int i; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_attr_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } else if (retval == EEXIST) { + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->bp != NULL); + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + + /* + * Get the value, local or "remote" + */ + retval = xfs_attr_leaf_getvalue(blk->bp, args); + if (!retval && (args->rmtblkno > 0) + && !(args->flags & ATTR_KERNOVAL)) { + retval = xfs_attr_rmtval_get(args); + } + } + + /* + * If not in a transaction, we have to release all the buffers. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +STATIC int /* error */ +xfs_attr_node_list(xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int error, i; + xfs_dabuf_t *bp; + + cursor = context->cursor; + cursor->initted = 1; + + /* + * Do all sorts of validation on the passed-in cursor structure. + * If anything is amiss, ignore the cursor and look up the hashval + * starting from the btree root. + */ + bp = NULL; + if (cursor->blkno > 0) { + error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, + &bp, XFS_ATTR_FORK); + if ((error != 0) && (error != EFSCORRUPTED)) + return(error); + if (bp) { + node = bp->data; + switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { + case XFS_DA_NODE_MAGIC: + xfs_attr_trace_l_cn("wrong blk", context, node); + xfs_da_brelse(NULL, bp); + bp = NULL; + break; + case XFS_ATTR_LEAF_MAGIC: + leaf = bp->data; + if (cursor->hashval > + INT_GET(leaf->entries[ + INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT)) { + xfs_attr_trace_l_cl("wrong blk", + context, leaf); + xfs_da_brelse(NULL, bp); + bp = NULL; + } else if (cursor->hashval <= + INT_GET(leaf->entries[0].hashval, + ARCH_CONVERT)) { + xfs_attr_trace_l_cl("maybe wrong blk", + context, leaf); + xfs_da_brelse(NULL, bp); + bp = NULL; + } + break; + default: + xfs_attr_trace_l_c("wrong blk - ??", context); + xfs_da_brelse(NULL, bp); + bp = NULL; + } + } + } + + /* + * We did not find what we expected given the cursor's contents, + * so we start from the top and work down based on the hash value. + * Note that start of node block is same as start of leaf block. + */ + if (bp == NULL) { + cursor->blkno = 0; + for (;;) { + error = xfs_da_read_buf(NULL, context->dp, + cursor->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + if (unlikely(bp == NULL)) { + XFS_ERROR_REPORT("xfs_attr_node_list(2)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount); + return(XFS_ERROR(EFSCORRUPTED)); + } + node = bp->data; + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) + break; + if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + != XFS_DA_NODE_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, + node); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + btree = node->btree; + for (i = 0; + i < INT_GET(node->hdr.count, ARCH_CONVERT); + btree++, i++) { + if (cursor->hashval + <= INT_GET(btree->hashval, + ARCH_CONVERT)) { + cursor->blkno = INT_GET(btree->before, ARCH_CONVERT); + xfs_attr_trace_l_cb("descending", + context, btree); + break; + } + } + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + xfs_da_brelse(NULL, bp); + return(0); + } + xfs_da_brelse(NULL, bp); + } + } + ASSERT(bp != NULL); + + /* + * Roll upward through the blocks, processing each leaf block in + * order. As long as there is space in the result buffer, keep + * adding the information. + */ + for (;;) { + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + != XFS_ATTR_LEAF_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, leaf); + xfs_da_brelse(NULL, bp); + return(XFS_ERROR(EFSCORRUPTED)); + } + error = xfs_attr_leaf_list_int(bp, context); + if (error || (INT_ISZERO(leaf->hdr.info.forw, ARCH_CONVERT))) + break; /* not really an error, buffer full or EOF */ + cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); + xfs_da_brelse(NULL, bp); + error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + if (unlikely((bp == NULL))) { + XFS_ERROR_REPORT("xfs_attr_node_list(5)", + XFS_ERRLEVEL_LOW, + context->dp->i_mount); + return(XFS_ERROR(EFSCORRUPTED)); + } + } + xfs_da_brelse(NULL, bp); + return(0); +} + + +/*======================================================================== + * External routines for manipulating out-of-line attribute values. + *========================================================================*/ + +/* + * Read the value associated with an attribute from the out-of-line buffer + * that we stored it in. + */ +STATIC int +xfs_attr_rmtval_get(xfs_da_args_t *args) +{ + xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE]; + xfs_mount_t *mp; + xfs_daddr_t dblkno; + xfs_caddr_t dst; + xfs_buf_t *bp; + int nmap, error, tmp, valuelen, blkcnt, i; + xfs_dablk_t lblkno; + + ASSERT(!(args->flags & ATTR_KERNOVAL)); + + mp = args->dp->i_mount; + dst = args->value; + valuelen = args->valuelen; + lblkno = args->rmtblkno; + while (valuelen > 0) { + nmap = ATTR_RMTVALUE_MAPSIZE; + error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + NULL, 0, map, &nmap, NULL); + if (error) + return(error); + ASSERT(nmap >= 1); + + for (i = 0; (i < nmap) && (valuelen > 0); i++) { + ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && + (map[i].br_startblock != HOLESTARTBLOCK)); + dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); + blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, + blkcnt, XFS_BUF_LOCK, &bp); + if (error) + return(error); + + tmp = (valuelen < XFS_BUF_SIZE(bp)) + ? valuelen : XFS_BUF_SIZE(bp); + xfs_biomove(bp, 0, tmp, dst, XFS_B_READ); + xfs_buf_relse(bp); + dst += tmp; + valuelen -= tmp; + + lblkno += map[i].br_blockcount; + } + } + ASSERT(valuelen == 0); + return(0); +} + +/* + * Write the value associated with an attribute into the out-of-line buffer + * that we have defined for it. + */ +STATIC int +xfs_attr_rmtval_set(xfs_da_args_t *args) +{ + xfs_mount_t *mp; + xfs_fileoff_t lfileoff; + xfs_inode_t *dp; + xfs_bmbt_irec_t map; + xfs_daddr_t dblkno; + xfs_caddr_t src; + xfs_buf_t *bp; + xfs_dablk_t lblkno; + int blkcnt, valuelen, nmap, error, tmp, committed; + + dp = args->dp; + mp = dp->i_mount; + src = args->value; + + /* + * Find a "hole" in the attribute address space large enough for + * us to drop the new attribute's value into. + */ + blkcnt = XFS_B_TO_FSB(mp, args->valuelen); + lfileoff = 0; + error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, + XFS_ATTR_FORK); + if (error) { + return(error); + } + args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; + args->rmtblkcnt = blkcnt; + + /* + * Roll through the "value", allocating blocks on disk as required. + */ + while (blkcnt > 0) { + /* + * Allocate a single extent, up to the size of the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno, + blkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA | + XFS_BMAPI_WRITE, + args->firstblock, args->total, &map, &nmap, + args->flist); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, dp); + } + + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + lblkno += map.br_blockcount; + blkcnt -= map.br_blockcount; + + /* + * Start the next trans in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, dp))) + return (error); + } + + /* + * Roll through the "value", copying the attribute value to the + * already-allocated blocks. Blocks are written synchronously + * so that we can know they are all on disk before we turn off + * the INCOMPLETE flag. + */ + lblkno = args->rmtblkno; + valuelen = args->valuelen; + while (valuelen > 0) { + /* + * Try to remember where we decided to put the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + args->firstblock, 0, &map, &nmap, NULL); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + + bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, + blkcnt, XFS_BUF_LOCK); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + + tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : + XFS_BUF_SIZE(bp); + xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE); + if (tmp < XFS_BUF_SIZE(bp)) + xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); + if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ + return (error); + } + src += tmp; + valuelen -= tmp; + + lblkno += map.br_blockcount; + } + ASSERT(valuelen == 0); + return(0); +} + +/* + * Remove the value associated with an attribute by deleting the + * out-of-line buffer that it is stored on. + */ +STATIC int +xfs_attr_rmtval_remove(xfs_da_args_t *args) +{ + xfs_mount_t *mp; + xfs_bmbt_irec_t map; + xfs_buf_t *bp; + xfs_daddr_t dblkno; + xfs_dablk_t lblkno; + int valuelen, blkcnt, nmap, error, done, committed; + + mp = args->dp->i_mount; + + /* + * Roll through the "value", invalidating the attribute value's + * blocks. + */ + lblkno = args->rmtblkno; + valuelen = args->rmtblkcnt; + while (valuelen > 0) { + /* + * Try to remember where we decided to put the value. + */ + XFS_BMAP_INIT(args->flist, args->firstblock); + nmap = 1; + error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno, + args->rmtblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + args->firstblock, 0, &map, &nmap, + args->flist); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), + blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); + + /* + * If the "remote" value is in the cache, remove it. + */ + /* bp = incore(mp->m_dev, dblkno, blkcnt, 1); */ + bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, 1); + if (bp) { + XFS_BUF_STALE(bp); + XFS_BUF_UNDELAYWRITE(bp); + xfs_buf_relse(bp); + bp = NULL; + } + + valuelen -= map.br_blockcount; + + lblkno += map.br_blockcount; + } + + /* + * Keep de-allocating extents until the remote-value region is gone. + */ + lblkno = args->rmtblkno; + blkcnt = args->rmtblkcnt; + done = 0; + while (!done) { + XFS_BMAP_INIT(args->flist, args->firstblock); + error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + 1, args->firstblock, args->flist, &done); + if (!error) { + error = xfs_bmap_finish(&args->trans, args->flist, + *args->firstblock, &committed); + } + if (error) { + ASSERT(committed); + args->trans = NULL; + xfs_bmap_cancel(args->flist); + return(error); + } + + /* + * bmap_finish() may have committed the last trans and started + * a new one. We need the inode to be in all transactions. + */ + if (committed) { + xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(args->trans, args->dp); + } + + /* + * Close out trans and start the next one in the chain. + */ + if ((error = xfs_attr_rolltrans(&args->trans, args->dp))) + return (error); + } + return(0); +} + +#if defined(XFS_ATTR_TRACE) +/* + * Add a trace buffer entry for an attr_list context structure. + */ +void +xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)NULL, + (__psunsigned_t)NULL, + (__psunsigned_t)NULL); +} + +/* + * Add a trace buffer entry for a context structure and a Btree node. + */ +void +xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, + struct xfs_da_intnode *node) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Add a trace buffer entry for a context structure and a Btree element. + */ +void +xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, + struct xfs_da_node_entry *btree) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), + (__psunsigned_t)NULL); +} + +/* + * Add a trace buffer entry for a context structure and a leaf block. + */ +void +xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, + struct xfs_attr_leafblock *leaf) +{ + xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, + (__psunsigned_t)context->dp, + (__psunsigned_t)context->cursor->hashval, + (__psunsigned_t)context->cursor->blkno, + (__psunsigned_t)context->cursor->offset, + (__psunsigned_t)context->alist, + (__psunsigned_t)context->bufsize, + (__psunsigned_t)context->count, + (__psunsigned_t)context->firstu, + (__psunsigned_t) + (context->count > 0) + ? (ATTR_ENTRY(context->alist, + context->count-1)->a_valuelen) + : 0, + (__psunsigned_t)context->dupcnt, + (__psunsigned_t)context->flags, + (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +void +xfs_attr_trace_enter(int type, char *where, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11, + __psunsigned_t a12, __psunsigned_t a13, + __psunsigned_t a14, __psunsigned_t a15) +{ + ASSERT(xfs_attr_trace_buf); + ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type), + (void *)where, + (void *)a2, (void *)a3, (void *)a4, + (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10, + (void *)a11, (void *)a12, (void *)a13, + (void *)a14, (void *)a15); +} +#endif /* XFS_ATTR_TRACE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr_fetch.c linux.22-ac2/fs/xfs/xfs_attr_fetch.c --- linux.vanilla/fs/xfs/xfs_attr_fetch.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr_fetch.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" + +int +xfs_attr_fetch(xfs_inode_t *ip, char *name, char *value, int valuelen) +{ + xfs_da_args_t args; + int error; + + if (XFS_IFORK_Q(ip) == 0) + return ENOATTR; + /* + * Do the argument setup for the xfs_attr routines. + */ + memset((char *)&args, 0, sizeof(args)); + args.dp = ip; + args.flags = ATTR_ROOT; + args.whichfork = XFS_ATTR_FORK; + args.name = name; + args.namelen = strlen(name); + args.value = value; + args.valuelen = valuelen; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (args.dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) + error = xfs_attr_shortform_getvalue(&args); + else if (xfs_bmap_one_block(args.dp, XFS_ATTR_FORK)) + error = xfs_attr_leaf_get(&args); + else + error = xfs_attr_node_get(&args); + + if (error == EEXIST) + error = 0; + + return(error); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr.h linux.22-ac2/fs/xfs/xfs_attr.h --- linux.vanilla/fs/xfs/xfs_attr.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_H__ +#define __XFS_ATTR_H__ + +/* + * xfs_attr.h + * + * Large attribute lists are structured around Btrees where all the data + * elements are in the leaf nodes. Attribute names are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of an attribute name may not be unique, we may have duplicate keys. + * The internal links in the Btree are logical block offsets into the file. + * + * Small attribute lists use a different format and are packed as tightly + * as possible so as to fit into the literal area of the inode. + */ + +#ifdef XFS_ALL_TRACE +#define XFS_ATTR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ATTR_TRACE +#endif + + +/*======================================================================== + * External interfaces + *========================================================================*/ + +#define ATTR_ROOT 0x0002 /* use attrs in root namespace, not user */ +#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */ +#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */ +#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ +#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ +#define ATTR_KERNAMELS 0x4000 /* [kernel] list attr names (simple list) */ +#define ATTR_KERNFULLS 0x8000 /* [kernel] full attr list, ie. root+user */ + +/* + * The maximum size (into the kernel or returned from the kernel) of an + * attribute value or the buffer used for an attr_list() call. Larger + * sizes will result in an ERANGE return code. + */ +#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */ + +/* + * Define how lists of attribute names are returned to the user from + * the attr_list() call. A large, 32bit aligned, buffer is passed in + * along with its size. We put an array of offsets at the top that each + * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom. + */ +typedef struct attrlist { + __s32 al_count; /* number of entries in attrlist */ + __s32 al_more; /* T/F: more attrs (do call again) */ + __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */ +} attrlist_t; + +/* + * Show the interesting info about one attribute. This is what the + * al_offset[i] entry points to. + */ +typedef struct attrlist_ent { /* data from attr_list() */ + __u32 a_valuelen; /* number bytes in value of attr */ + char a_name[1]; /* attr name (NULL terminated) */ +} attrlist_ent_t; + +/* + * Given a pointer to the (char*) buffer containing the attr_list() result, + * and an index, return a pointer to the indicated attribute in the buffer. + */ +#define ATTR_ENTRY(buffer, index) \ + ((attrlist_ent_t *) \ + &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ]) + +/* + * Multi-attribute operation vector. + */ +typedef struct attr_multiop { + int am_opcode; /* operation to perform (ATTR_OP_GET, etc.) */ + int am_error; /* [out arg] result of this sub-op (an errno) */ + char *am_attrname; /* attribute name to work with */ + char *am_attrvalue; /* [in/out arg] attribute value (raw bytes) */ + int am_length; /* [in/out arg] length of value */ + int am_flags; /* bitwise OR of attr API flags defined above */ +} attr_multiop_t; + +#define ATTR_OP_GET 1 /* return the indicated attr's value */ +#define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */ +#define ATTR_OP_REMOVE 3 /* remove the indicated attr */ + +/* + * Kernel-internal version of the attrlist cursor. + */ +typedef struct attrlist_cursor_kern { + __u32 hashval; /* hash value of next entry to add */ + __u32 blkno; /* block containing entry (suggestion) */ + __u32 offset; /* offset in list of equal-hashvals */ + __u16 pad1; /* padding to match user-level */ + __u8 pad2; /* padding to match user-level */ + __u8 initted; /* T/F: cursor has been initialized */ +} attrlist_cursor_kern_t; + + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +struct cred; +struct vnode; +struct xfs_inode; +struct attrlist_cursor_kern; +struct xfs_ext_attr; +struct xfs_da_args; + +/* + * Overall external interface routines. + */ +int xfs_attr_get(bhv_desc_t *, char *, char *, int *, int, struct cred *); +int xfs_attr_set(bhv_desc_t *, char *, char *, int, int, struct cred *); +int xfs_attr_remove(bhv_desc_t *, char *, int, struct cred *); +int xfs_attr_list(bhv_desc_t *, char *, int, int, + struct attrlist_cursor_kern *, struct cred *); +int xfs_attr_inactive(struct xfs_inode *dp); + +int xfs_attr_node_get(struct xfs_da_args *); +int xfs_attr_leaf_get(struct xfs_da_args *); +int xfs_attr_shortform_getvalue(struct xfs_da_args *); +int xfs_attr_fetch(struct xfs_inode *, char *, char *, int); + +#endif /* __XFS_ATTR_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr_leaf.c linux.22-ac2/fs/xfs/xfs_attr_leaf.c --- linux.vanilla/fs/xfs/xfs_attr_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr_leaf.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,3004 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +/* + * xfs_attr_leaf.c + * + * GROT: figure out how to recover gracefully when bmap returns ENOSPC. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * xfs_attr_leaf.c + * + * Routines to implement leaf blocks of attributes as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args, + int freemap_index); +STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer); +STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *leaf_blk_1, + xfs_da_state_blk_t *leaf_blk_2, + int *number_entries_in_blk1, + int *number_usedbytes_in_blk1); + +/* + * Utility routines. + */ +STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf, + int src_start, + xfs_attr_leafblock_t *dst_leaf, + int dst_start, int move_count, + xfs_mount_t *mp); + + +/*======================================================================== + * External routines when dirsize < XFS_LITINO(mp). + *========================================================================*/ + +/* + * Create the initial contents of a shortform attribute list. + */ +int +xfs_attr_shortform_create(xfs_da_args_t *args) +{ + xfs_attr_sf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_ifork_t *ifp; + + dp = args->dp; + ASSERT(dp != NULL); + ifp = dp->i_afp; + ASSERT(ifp != NULL); + ASSERT(ifp->if_bytes == 0); + if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) { + ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL; + ifp->if_flags |= XFS_IFINLINE; + } else { + ASSERT(ifp->if_flags & XFS_IFINLINE); + } + xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); + hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; + INT_ZERO(hdr->count, ARCH_CONVERT); + INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + return(0); +} + +/* + * Add a name/value pair to the shortform attribute list. + * Overflow from the inode has already been checked for. + */ +int +xfs_attr_shortform_add(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i, offset, size; + xfs_inode_t *dp; + xfs_ifork_t *ifp; + + dp = args->dp; + ifp = dp->i_afp; + ASSERT(ifp->if_flags & XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + return(XFS_ERROR(EEXIST)); + } + + offset = (char *)sfe - (char *)sf; + size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); + + sfe->namelen = args->namelen; + INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); + sfe->flags = (args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0; + memcpy(sfe->nameval, args->name, args->namelen); + memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); + INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + + return(0); +} + +/* + * Remove a name from the shortform attribute list structure. + */ +int +xfs_attr_shortform_remove(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int base, size=0, end, totsize, i; + xfs_inode_t *dp; + + /* + * Remove the attribute. + */ + dp = args->dp; + base = sizeof(xfs_attr_sf_hdr_t); + sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), + base += size, i++) { + size = XFS_ATTR_SF_ENTSIZE(sfe); + if (sfe->namelen != args->namelen) + continue; + if (memcmp(sfe->nameval, args->name, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + break; + } + if (i == INT_GET(sf->hdr.count, ARCH_CONVERT)) + return(XFS_ERROR(ENOATTR)); + + end = base + size; + totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + if (end != totsize) { + memmove(&((char *)sf)[base], &((char *)sf)[end], + totsize - end); + } + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); + xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + + return(0); +} + +/* + * Look up a name in a shortform attribute list structure. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_lookup(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i; + xfs_ifork_t *ifp; + + ifp = args->dp->i_afp; + ASSERT(ifp->if_flags & XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + return(XFS_ERROR(EEXIST)); + } + return(XFS_ERROR(ENOATTR)); +} + +/* + * Look up a name in a shortform attribute list structure. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_getvalue(xfs_da_args_t *args) +{ + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + int i; + + ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); + sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + if (sfe->namelen != args->namelen) + continue; + if (memcmp(args->name, sfe->nameval, args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0)) + continue; + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + return(XFS_ERROR(ERANGE)); + } + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + memcpy(args->value, &sfe->nameval[args->namelen], + args->valuelen); + return(XFS_ERROR(EEXIST)); + } + return(XFS_ERROR(ENOATTR)); +} + +/* + * Convert from using the shortform to the leaf. + */ +int +xfs_attr_shortform_to_leaf(xfs_da_args_t *args) +{ + xfs_inode_t *dp; + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + xfs_da_args_t nargs; + char *tmpbuffer; + int error, i, size; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + xfs_ifork_t *ifp; + + dp = args->dp; + ifp = dp->i_afp; + sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; + size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + tmpbuffer = kmem_alloc(size, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, ifp->if_u1.if_data, size); + sf = (xfs_attr_shortform_t *)tmpbuffer; + + xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); + bp = NULL; + error = xfs_da_grow_inode(args, &blkno); + if (error) { + /* + * If we hit an IO error middle of the transaction inside + * grow_inode(), we may have inconsistent data. Bail out. + */ + if (error == EIO) + goto out; + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ + goto out; + } + + ASSERT(blkno == 0); + error = xfs_attr_leaf_create(args, blkno, &bp); + if (error) { + error = xfs_da_shrink_inode(args, 0, bp); + bp = NULL; + if (error) + goto out; + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ + goto out; + } + + memset((char *)&nargs, 0, sizeof(nargs)); + nargs.dp = dp; + nargs.firstblock = args->firstblock; + nargs.flist = args->flist; + nargs.total = args->total; + nargs.whichfork = XFS_ATTR_FORK; + nargs.trans = args->trans; + nargs.oknoent = 1; + + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + nargs.name = (char *)sfe->nameval; + nargs.namelen = sfe->namelen; + nargs.value = (char *)&sfe->nameval[nargs.namelen]; + nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + nargs.hashval = xfs_da_hashname((char *)sfe->nameval, + sfe->namelen); + nargs.flags = (sfe->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0; + error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */ + ASSERT(error == ENOATTR); + error = xfs_attr_leaf_add(bp, &nargs); + ASSERT(error != ENOSPC); + if (error) + goto out; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + error = 0; + +out: + if(bp) + xfs_da_buf_done(bp); + kmem_free(tmpbuffer, size); + return(error); +} + +STATIC int +xfs_attr_shortform_compare(const void *a, const void *b) +{ + xfs_attr_sf_sort_t *sa, *sb; + + sa = (xfs_attr_sf_sort_t *)a; + sb = (xfs_attr_sf_sort_t *)b; + if (INT_GET(sa->hash, ARCH_CONVERT) + < INT_GET(sb->hash, ARCH_CONVERT)) { + return(-1); + } else if (INT_GET(sa->hash, ARCH_CONVERT) + > INT_GET(sb->hash, ARCH_CONVERT)) { + return(1); + } else { + return(sa->entno - sb->entno); + } +} + +/* + * Copy out entries of shortform attribute lists for attr_list(). + * Shortform atrtribute lists are not stored in hashval sorted order. + * If the output buffer is not large enough to hold them all, then we + * we have to calculate each entries' hashvalue and sort them before + * we can begin returning them to the user. + */ +/*ARGSUSED*/ +int +xfs_attr_shortform_list(xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_sf_sort_t *sbuf, *sbp; + xfs_attr_shortform_t *sf; + xfs_attr_sf_entry_t *sfe; + xfs_inode_t *dp; + int sbsize, nsbuf, count, i; + + ASSERT(context != NULL); + dp = context->dp; + ASSERT(dp != NULL); + ASSERT(dp->i_afp != NULL); + sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; + ASSERT(sf != NULL); + if (INT_ISZERO(sf->hdr.count, ARCH_CONVERT)) + return(0); + cursor = context->cursor; + ASSERT(cursor != NULL); + + xfs_attr_trace_l_c("sf start", context); + + /* + * If the buffer is large enough, do not bother with sorting. + * Note the generous fudge factor of 16 overhead bytes per entry. + */ + if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) + < context->bufsize) { + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + int ns = (sfe->flags & XFS_ATTR_ROOT)? + ROOT_NAMES : USER_NAMES; + if (((context->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) { + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + continue; + } + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + INT_GET(sfe->namelen, ARCH_CONVERT) + 1; + } + else { + if (xfs_attr_put_listent(context, ns, + (char *)sfe->nameval, + (int)sfe->namelen, + (int)INT_GET(sfe->valuelen, + ARCH_CONVERT))) + break; + } + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + xfs_attr_trace_l_c("sf big-gulp", context); + return(0); + } + + /* + * It didn't all fit, so we have to sort everything on hashval. + */ + sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); + sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); + + /* + * Scan the attribute list for the rest of the entries, storing + * the relevant info from only those that match into a buffer. + */ + nsbuf = 0; + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + if (unlikely( + ((char *)sfe < (char *)sf) || + ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)) || + (sfe->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", + XFS_ERRLEVEL_LOW, + context->dp->i_mount, sfe); + xfs_attr_trace_l_c("sf corrupted", context); + kmem_free(sbuf, sbsize); + return XFS_ERROR(EFSCORRUPTED); + } + if (((context->flags & ATTR_ROOT) != 0) != + ((sfe->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) { + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + continue; + } + sbp->entno = i; + INT_SET(sbp->hash, ARCH_CONVERT, + xfs_da_hashname((char *)sfe->nameval, sfe->namelen)); + sbp->name = (char *)sfe->nameval; + sbp->namelen = sfe->namelen; + /* These are bytes, and both on-disk, don't endian-flip */ + sbp->valuelen = sfe->valuelen; + sbp->flags = sfe->flags; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + sbp++; + nsbuf++; + } + + /* + * Sort the entries on hash then entno. + */ + qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); + + /* + * Re-find our place IN THE SORTED LIST. + */ + count = 0; + cursor->initted = 1; + cursor->blkno = 0; + for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { + if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { + if (cursor->offset == count) { + break; + } + count++; + } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { + break; + } + } + if (i == nsbuf) { + kmem_free(sbuf, sbsize); + xfs_attr_trace_l_c("blk end", context); + return(0); + } + + /* + * Loop putting entries into the user buffer. + */ + for ( ; i < nsbuf; i++, sbp++) { + int ns = (sbp->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES; + if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { + cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); + cursor->offset = 0; + } + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + sbp->namelen + 1; + } + else { + if (xfs_attr_put_listent(context, ns, + sbp->name, sbp->namelen, + INT_GET(sbp->valuelen, ARCH_CONVERT))) + break; + } + cursor->offset++; + } + + kmem_free(sbuf, sbsize); + xfs_attr_trace_l_c("sf E-O-F", context); + return(0); +} + +/* + * Check a leaf attribute block to see if all the entries would fit into + * a shortform attribute list. + */ +int +xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + int bytes, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + entry = &leaf->entries[0]; + bytes = sizeof(struct xfs_attr_sf_hdr); + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* don't copy partial entries */ + if (!(entry->flags & XFS_ATTR_LOCAL)) + return(0); + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) + return(0); + if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) + return(0); + bytes += sizeof(struct xfs_attr_sf_entry)-1 + + name_loc->namelen + + INT_GET(name_loc->valuelen, ARCH_CONVERT); + } + return( bytes < XFS_IFORK_ASIZE(dp) ); +} + +/* + * Convert a leaf attribute list to shortform attribute list + */ +int +xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_da_args_t nargs; + xfs_inode_t *dp; + char *tmpbuffer; + int error, i; + + dp = args->dp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + ASSERT(bp != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); + leaf = (xfs_attr_leafblock_t *)tmpbuffer; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); + + /* + * Clean out the prior contents of the attribute list. + */ + error = xfs_da_shrink_inode(args, 0, bp); + if (error) + goto out; + error = xfs_attr_shortform_create(args); + if (error) + goto out; + + /* + * Copy the attributes + */ + memset((char *)&nargs, 0, sizeof(nargs)); + nargs.dp = dp; + nargs.firstblock = args->firstblock; + nargs.flist = args->flist; + nargs.total = args->total; + nargs.whichfork = XFS_ATTR_FORK; + nargs.trans = args->trans; + nargs.oknoent = 1; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* don't copy partial entries */ + if (INT_ISZERO(entry->nameidx, ARCH_CONVERT)) + continue; + ASSERT(entry->flags & XFS_ATTR_LOCAL); + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + nargs.name = (char *)name_loc->nameval; + nargs.namelen = name_loc->namelen; + nargs.value = (char *)&name_loc->nameval[nargs.namelen]; + nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + nargs.flags = (entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0; + xfs_attr_shortform_add(&nargs); + } + error = 0; + +out: + kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); + return(error); +} + +/* + * Convert from using a single leaf to a root node and a leaf. + */ +int +xfs_attr_leaf_to_node(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_inode_t *dp; + xfs_dabuf_t *bp1, *bp2; + xfs_dablk_t blkno; + int error; + + dp = args->dp; + bp1 = bp2 = NULL; + error = xfs_da_grow_inode(args, &blkno); + if (error) + goto out; + error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(bp1 != NULL); + bp2 = NULL; + error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2, + XFS_ATTR_FORK); + if (error) + goto out; + ASSERT(bp2 != NULL); + memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); + xfs_da_buf_done(bp1); + bp1 = NULL; + xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); + + /* + * Set up the new root node. + */ + error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK); + if (error) + goto out; + node = bp1->data; + leaf = bp2->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + /* both on-disk, don't endian-flip twice */ + node->btree[0].hashval = + leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); + xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); + error = 0; +out: + if (bp1) + xfs_da_buf_done(bp1); + if (bp2) + xfs_da_buf_done(bp2); + return(error); +} + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of a leaf attribute list + * or a leaf in a node attribute list. + */ +int +xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int error; + + dp = args->dp; + ASSERT(dp != NULL); + error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + leaf = bp->data; + memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); + hdr = &leaf->hdr; + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) { + INT_SET(hdr->firstused, ARCH_CONVERT, + XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); + } + + INT_SET(hdr->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr->firstused, ARCH_CONVERT) + - INT_GET(hdr->freemap[0].base, + ARCH_CONVERT)); + + xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); + + *bpp = bp; + return(0); +} + +/* + * Split the leaf node, rebalance, then add the new entry. + */ +int +xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_dablk_t blkno; + int error; + + /* + * Allocate space for a new leaf node. + */ + ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); + error = xfs_da_grow_inode(state->args, &blkno); + if (error) + return(error); + error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp); + if (error) + return(error); + newblk->blkno = blkno; + newblk->magic = XFS_ATTR_LEAF_MAGIC; + + /* + * Rebalance the entries across the two leaves. + * NOTE: rebalance() currently depends on the 2nd block being empty. + */ + xfs_attr_leaf_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + + /* + * Save info on "old" attribute for "atomic rename" ops, leaf_add() + * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the + * "new" attrs info. Will need the "old" info to remove it later. + * + * Insert the "new" entry in the correct block. + */ + if (state->inleaf) + error = xfs_attr_leaf_add(oldblk->bp, state->args); + else + error = xfs_attr_leaf_add(newblk->bp, state->args); + + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); + return(error); +} + +/* + * Add a name to the leaf attribute list structure. + */ +int +xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_map_t *map; + int tablesize, entsize, sum, tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT((args->index >= 0) + && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + hdr = &leaf->hdr; + entsize = xfs_attr_leaf_newentsize(args, + args->trans->t_mountp->m_sb.sb_blocksize, NULL); + + /* + * Search through freemap for first-fit on new name length. + * (may need to figure in size of entry struct too) + */ + tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; + for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { + if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { + sum += INT_GET(map->size, ARCH_CONVERT); + continue; + } + if (INT_ISZERO(map->size, ARCH_CONVERT)) + continue; /* no space in this map */ + tmp = entsize; + if (INT_GET(map->base, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) + tmp += sizeof(xfs_attr_leaf_entry_t); + if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + tmp = xfs_attr_leaf_add_work(bp, args, i); + return(tmp); + } + sum += INT_GET(map->size, ARCH_CONVERT); + } + + /* + * If there are no holes in the address space of the block, + * and we don't have enough freespace, then compaction will do us + * no good and we should just give up. + */ + if (!hdr->holes && (sum < entsize)) + return(XFS_ERROR(ENOSPC)); + + /* + * Compact the entries to coalesce free space. + * This may change the hdr->count via dropping INCOMPLETE entries. + */ + xfs_attr_leaf_compact(args->trans, bp); + + /* + * After compaction, the block is guaranteed to have only one + * free region, in freemap[0]. If it is not big enough, give up. + */ + if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) + < (entsize + sizeof(xfs_attr_leaf_entry_t))) + return(XFS_ERROR(ENOSPC)); + + return(xfs_attr_leaf_add_work(bp, args, 0)); +} + +/* + * Add a name to a leaf attribute list structure. + */ +STATIC int +xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_attr_leaf_map_t *map; + xfs_mount_t *mp; + int tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr = &leaf->hdr; + ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); + ASSERT((args->index >= 0) + && (args->index <= INT_GET(hdr->count, ARCH_CONVERT))); + + /* + * Force open some space in the entry array and fill it in. + */ + entry = &leaf->entries[args->index]; + if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; + tmp *= sizeof(xfs_attr_leaf_entry_t); + memmove((char *)(entry+1), (char *)entry, tmp); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); + } + INT_MOD(hdr->count, ARCH_CONVERT, 1); + + /* + * Allocate space for the new string (at the end of the run). + */ + map = &hdr->freemap[mapindex]; + mp = args->trans->t_mountp; + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); + ASSERT(INT_GET(map->size, ARCH_CONVERT) + >= xfs_attr_leaf_newentsize(args, + mp->m_sb.sb_blocksize, NULL)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); + INT_MOD(map->size, ARCH_CONVERT, + -xfs_attr_leaf_newentsize(args, mp->m_sb.sb_blocksize, &tmp)); + INT_SET(entry->nameidx, ARCH_CONVERT, + INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)); + INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->flags = tmp ? XFS_ATTR_LOCAL : 0; + entry->flags |= (args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0; + if (args->rename) { + entry->flags |= XFS_ATTR_INCOMPLETE; + if ((args->blkno2 == args->blkno) && + (args->index2 <= args->index)) { + args->index2++; + } + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) + >= INT_GET((entry-1)->hashval, + ARCH_CONVERT))); + ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || + (INT_GET(entry->hashval, ARCH_CONVERT) + <= (INT_GET((entry+1)->hashval, ARCH_CONVERT)))); + + /* + * Copy the attribute name and value into the new space. + * + * For "remote" attribute values, simply note that we need to + * allocate space for the "remote" value. We can't actually + * allocate the extents in this transaction, and we can't decide + * which blocks they should be as we might allocate more blocks + * as part of this transaction (a split operation for example). + */ + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + name_loc->namelen = args->namelen; + INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); + memcpy((char *)name_loc->nameval, args->name, args->namelen); + memcpy((char *)&name_loc->nameval[args->namelen], args->value, + INT_GET(name_loc->valuelen, ARCH_CONVERT)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + name_rmt->namelen = args->namelen; + memcpy((char *)name_rmt->name, args->name, args->namelen); + entry->flags |= XFS_ATTR_INCOMPLETE; + /* just in case */ + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkno = 1; + args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), + xfs_attr_leaf_entsize(leaf, args->index))); + + /* + * Update the control info for this leaf node + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) { + /* both on-disk, don't endian-flip twice */ + hdr->firstused = entry->nameidx; + } + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); + tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[0]; + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { + if (INT_GET(map->base, ARCH_CONVERT) == tmp) { + INT_MOD(map->base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); + } + } + INT_MOD(hdr->usedbytes, ARCH_CONVERT, + xfs_attr_leaf_entsize(leaf, args->index)); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + return(0); +} + +/* + * Garbage collect a leaf attribute list block by copying it to a new buffer. + */ +STATIC void +xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) +{ + xfs_attr_leafblock_t *leaf_s, *leaf_d; + xfs_attr_leaf_hdr_t *hdr_s, *hdr_d; + xfs_mount_t *mp; + char *tmpbuffer; + + mp = trans->t_mountp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp)); + memset(bp->data, 0, XFS_LBSIZE(mp)); + + /* + * Copy basic information + */ + leaf_s = (xfs_attr_leafblock_t *)tmpbuffer; + leaf_d = bp->data; + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + hdr_d->info = hdr_s->info; /* struct copy */ + INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); + /* handle truncation gracefully */ + if (INT_ISZERO(hdr_d->firstused, ARCH_CONVERT)) { + INT_SET(hdr_d->firstused, ARCH_CONVERT, + XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); + } + INT_ZERO(hdr_d->usedbytes, ARCH_CONVERT); + INT_ZERO(hdr_d->count, ARCH_CONVERT); + hdr_d->holes = 0; + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + + /* + * Copy all entry's in the same (sorted) order, + * but allocate name/value pairs packed and in sequence. + */ + xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, + (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); + + xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); + + kmem_free(tmpbuffer, XFS_LBSIZE(mp)); +} + +/* + * Redistribute the attribute list entries between two leaf nodes, + * taking into account the size of the new entry. + * + * NOTE: if new block is empty, then it will get the upper half of the + * old block. At present, all (one) callers pass in an empty second block. + * + * This code adjusts the args->index/blkno and args->index2/blkno2 fields + * to match what it is doing in splitting the attribute leaf block. Those + * values are used in "atomic rename" operations on attributes. Note that + * the "new" and "old" values can end up in different blocks. + */ +STATIC void +xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_args_t *args; + xfs_da_state_blk_t *tmp_blk; + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_hdr_t *hdr1, *hdr2; + int count, totallen, max, space, swap; + + /* + * Set up environment. + */ + ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + args = state->args; + + /* + * Check ordering of blocks, reverse if it makes things simpler. + * + * NOTE: Given that all (current) callers pass in an empty + * second block, this code should never set "swap". + */ + swap = 0; + if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) { + tmp_blk = blk1; + blk1 = blk2; + blk2 = tmp_blk; + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + swap = 1; + } + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. Then get + * the direction to copy and the number of elements to move. + * + * "inleaf" is true if the new entry should be inserted into blk1. + * If "swap" is also true, then reverse the sense of "inleaf". + */ + state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2, + &count, &totallen); + if (swap) + state->inleaf = !state->inleaf; + + /* + * Move any entries required from leaf to leaf: + */ + if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + /* number entries being moved */ + count = INT_GET(hdr1->count, ARCH_CONVERT) - count; + space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; + space += count * sizeof(xfs_attr_leaf_entry_t); + + /* + * leaf2 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr2->firstused, ARCH_CONVERT) + - sizeof(xfs_attr_leaf_hdr_t); + max -= INT_GET(hdr2->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + if (space > max) { + xfs_attr_leaf_compact(args->trans, blk2->bp); + } + + /* + * Move high entries from leaf1 to low end of leaf2. + */ + xfs_attr_leaf_moveents(leaf1, + INT_GET(hdr1->count, ARCH_CONVERT)-count, + leaf2, 0, count, state->mp); + + xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); + xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); + } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * I assert that since all callers pass in an empty + * second buffer, this code should never execute. + */ + + /* + * Figure the total bytes to be added to the destination leaf. + */ + /* number entries being moved */ + count -= INT_GET(hdr1->count, ARCH_CONVERT); + space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); + space += count * sizeof(xfs_attr_leaf_entry_t); + + /* + * leaf1 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr1->firstused, ARCH_CONVERT) + - sizeof(xfs_attr_leaf_hdr_t); + max -= INT_GET(hdr1->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + if (space > max) { + xfs_attr_leaf_compact(args->trans, blk1->bp); + } + + /* + * Move low entries from leaf2 to high end of leaf1. + */ + xfs_attr_leaf_moveents(leaf2, 0, leaf1, + (int)INT_GET(hdr1->count, ARCH_CONVERT), count, + state->mp); + + xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); + xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); + } + + /* + * Copy out last hashval in each block for B-tree code. + */ + blk1->hashval = + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + blk2->hashval = + INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + * NOTE: this code depends on the (current) situation that the + * second block was originally empty. + * + * If the insertion point moved to the 2nd block, we must adjust + * the index. We must also track the entry just following the + * new entry for use in an "atomic rename" operation, that entry + * is always the "old" entry and the "new" entry is what we are + * inserting. The index/blkno fields refer to the "old" entry, + * while the index2/blkno2 fields refer to the "new" entry. + */ + if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + ASSERT(state->inleaf == 0); + blk2->index = blk1->index + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + args->index = args->index2 = blk2->index; + args->blkno = args->blkno2 = blk2->blkno; + } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + if (state->inleaf) { + args->index = blk1->index; + args->blkno = blk1->blkno; + args->index2 = 0; + args->blkno2 = blk2->blkno; + } else { + blk2->index = blk1->index + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + args->index = args->index2 = blk2->index; + args->blkno = args->blkno2 = blk2->blkno; + } + } else { + ASSERT(state->inleaf == 1); + args->index = args->index2 = blk1->index; + args->blkno = args->blkno2 = blk1->blkno; + } +} + +/* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + * GROT: Is this really necessary? With other than a 512 byte blocksize, + * GROT: there will always be enough room in either block for a new entry. + * GROT: Do a double-split for this case? + */ +STATIC int +xfs_attr_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2, + int *countarg, int *usedbytesarg) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_hdr_t *hdr1, *hdr2; + xfs_attr_leaf_entry_t *entry; + int count, max, index, totallen, half; + int lastdelta, foundit, tmp; + + /* + * Set up environment. + */ + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + foundit = 0; + totallen = 0; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + */ + max = INT_GET(hdr1->count, ARCH_CONVERT) + + INT_GET(hdr2->count, ARCH_CONVERT); + half = (max+1) * sizeof(*entry); + half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) + + INT_GET(hdr2->usedbytes, ARCH_CONVERT) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, NULL); + half /= 2; + lastdelta = state->blocksize; + entry = &leaf1->entries[0]; + for (count = index = 0; count < max; entry++, index++, count++) { + +#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A)) + /* + * The new entry is in the first block, account for it. + */ + if (count == blk1->index) { + tmp = totallen + sizeof(*entry) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, + NULL); + if (XFS_ATTR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_ATTR_ABS(half - tmp); + totallen = tmp; + foundit = 1; + } + + /* + * Wrap around into the second block if necessary. + */ + if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + leaf1 = leaf2; + entry = &leaf1->entries[0]; + index = 0; + } + + /* + * Figure out if next leaf entry would be too much. + */ + tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1, + index); + if (XFS_ATTR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_ATTR_ABS(half - tmp); + totallen = tmp; +#undef XFS_ATTR_ABS + } + + /* + * Calculate the number of usedbytes that will end up in lower block. + * If new entry not in lower block, fix up the count. + */ + totallen -= count * sizeof(*entry); + if (foundit) { + totallen -= sizeof(*entry) + + xfs_attr_leaf_newentsize(state->args, + state->blocksize, + NULL); + } + + *countarg = count; + *usedbytesarg = totallen; + return(foundit); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + * + * GROT: allow for INCOMPLETE entries in calculation. + */ +int +xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_attr_leafblock_t *leaf; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, bytes, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + leaf = (xfs_attr_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = sizeof(xfs_attr_leaf_hdr_t) + + count * sizeof(xfs_attr_leaf_entry_t) + + INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + if (bytes > (state->blocksize >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = (!INT_ISZERO(info->forw, ARCH_CONVERT)); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink an attribute list over time. + */ + /* start with smaller blk num */ + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + leaf = (xfs_attr_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize>>2); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + bytes -= count * sizeof(xfs_attr_leaf_entry_t); + bytes -= sizeof(xfs_attr_leaf_hdr_t); + xfs_da_brelse(state->args->trans, bp); + if (bytes >= 0) + break; /* fits with at least 25% to spare */ + } + if (i >= 2) { + *action = 0; + return(0); + } + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + } + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 1; + } + return(0); +} + +/* + * Remove a name from the leaf attribute list structure. + * + * Return 1 if leaf is less than 37% full, 0 if >= 37% full. + * If two leaves are 37% full, when combined they will leave 25% free. + */ +int +xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_hdr_t *hdr; + xfs_attr_leaf_map_t *map; + xfs_attr_leaf_entry_t *entry; + int before, after, smallest, entsize; + int tablesize, tmp, i; + xfs_mount_t *mp; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr = &leaf->hdr; + mp = args->trans->t_mountp; + ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((args->index >= 0) + && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); + entry = &leaf->entries[args->index]; + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + + /* + * Scan through free region table: + * check for adjacency of free'd entry with an existing one, + * find smallest free region in case we need to replace it, + * adjust any map that borders the entry table, + */ + tablesize = INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + map = &hdr->freemap[0]; + tmp = INT_GET(map->size, ARCH_CONVERT); + before = after = -1; + smallest = XFS_ATTR_LEAF_MAPSIZE - 1; + entsize = xfs_attr_leaf_entsize(leaf, args->index); + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { + INT_MOD(map->base, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); + } + + if ((INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)) + == INT_GET(entry->nameidx, ARCH_CONVERT)) { + before = i; + } else if (INT_GET(map->base, ARCH_CONVERT) + == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + after = i; + } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { + tmp = INT_GET(map->size, ARCH_CONVERT); + smallest = i; + } + } + + /* + * Coalesce adjacent freemap regions, + * or replace the smallest region. + */ + if ((before >= 0) || (after >= 0)) { + if ((before >= 0) && (after >= 0)) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + INT_MOD(map->size, ARCH_CONVERT, + INT_GET(hdr->freemap[after].size, + ARCH_CONVERT)); + INT_ZERO(hdr->freemap[after].base, ARCH_CONVERT); + INT_ZERO(hdr->freemap[after].size, ARCH_CONVERT); + } else if (before >= 0) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } else { + map = &hdr->freemap[after]; + /* both on-disk, don't endian flip twice */ + map->base = entry->nameidx; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } + } else { + /* + * Replace smallest region (if it is smaller than free'd entry) + */ + map = &hdr->freemap[smallest]; + if (INT_GET(map->size, ARCH_CONVERT) < entsize) { + INT_SET(map->base, ARCH_CONVERT, + INT_GET(entry->nameidx, ARCH_CONVERT)); + INT_SET(map->size, ARCH_CONVERT, entsize); + } + } + + /* + * Did we remove the first entry? + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) + == INT_GET(hdr->firstused, ARCH_CONVERT)) + smallest = 1; + else + smallest = 0; + + /* + * Compress the remaining entries and zero out the removed stuff. + */ + memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); + INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), + entsize)); + + tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) + * sizeof(xfs_attr_leaf_entry_t); + memmove((char *)entry, (char *)(entry+1), tmp); + INT_MOD(hdr->count, ARCH_CONVERT, -1); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); + entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); + + /* + * If we removed the first entry, re-find the first used byte + * in the name area. Note that if the entry was the "firstused", + * then we don't have a "hole" in our block resulting from + * removing the name. + */ + if (smallest) { + tmp = XFS_LBSIZE(mp); + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; + i >= 0; entry++, i--) { + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + < XFS_LBSIZE(mp)); + if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) + tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + } + INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) { + INT_SET(hdr->firstused, ARCH_CONVERT, + tmp - XFS_ATTR_LEAF_NAME_ALIGN); + } + } else { + hdr->holes = 1; /* mark as needing compaction */ + } + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + + /* + * Check if leaf is less than 50% full, caller may want to + * "join" the leaf with a sibling if so. + */ + tmp = sizeof(xfs_attr_leaf_hdr_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ +} + +/* + * Move all the attribute list entries from drop_leaf into save_leaf. + */ +void +xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf; + xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr; + xfs_mount_t *mp; + char *tmpbuffer; + + /* + * Set up environment. + */ + mp = state->mp; + ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + drop_hdr = &drop_leaf->hdr; + save_hdr = &save_leaf->hdr; + + /* + * Save last hashval from dying block for later Btree fixup. + */ + drop_blk->hashval = + INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); + + /* + * Check if we need a temp buffer, or can we do it in place. + * Note that we don't check "leaf" for holes because we will + * always be dropping it, toosmall() decided that for us already. + */ + if (save_hdr->holes == 0) { + /* + * dest leaf has no holes, so we add there. May need + * to make some room in the entry array. + */ + if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, + INT_GET(save_hdr->count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + } + } else { + /* + * Destination has holes, so we make a temporary copy + * of the leaf and add them both to that. + */ + tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memset(tmpbuffer, 0, state->blocksize); + tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer; + tmp_hdr = &tmp_leaf->hdr; + tmp_hdr->info = save_hdr->info; /* struct copy */ + INT_ZERO(tmp_hdr->count, ARCH_CONVERT); + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + if (INT_ISZERO(tmp_hdr->firstused, ARCH_CONVERT)) { + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, + state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); + } + INT_ZERO(tmp_hdr->usedbytes, ARCH_CONVERT); + if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); + } else { + xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); + xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); + } + memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); + kmem_free(tmpbuffer, state->blocksize); + } + + xfs_da_log_buf(state->args->trans, save_blk->bp, 0, + state->blocksize - 1); + + /* + * Copy out last hashval in each block for B-tree code. + */ + save_blk->hashval = + INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Look up a name in a leaf attribute list structure. + * This is the internal routine, it uses the caller's buffer. + * + * Note that duplicate keys are allowed, but only check within the + * current leaf node. The Btree code must check in adjacent leaf nodes. + * + * Return in args->index the index into the entry[] array of either + * the found entry, or where the entry should have been (insert before + * that entry). + * + * Don't change the args->value unless we find the attribute. + */ +int +xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int probe, span; + xfs_dahash_t hashval; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + < (XFS_LBSIZE(args->dp->i_mount)/8)); + + /* + * Binary search. (note: small blocks will skip this loop) + */ + hashval = args->hashval; + probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + for (entry = &leaf->entries[probe]; span > 4; + entry = &leaf->entries[probe]) { + span /= 2; + if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && \ + ((INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); + ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) + == hashval)); + + /* + * Since we may have duplicate hashval's, find the first matching + * hashval in the leaf. + */ + while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) + >= hashval)) { + entry--; + probe--; + } + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + entry++; + probe++; + } + if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) + || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + args->index = probe; + return(XFS_ERROR(ENOATTR)); + } + + /* + * Duplicate keys may be present, so search all of them for a match. + */ + for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); + entry++, probe++) { +/* + * GROT: Add code to remove incomplete entries. + */ + /* + * If we are looking for INCOMPLETE entries, show only those. + * If we are looking for complete entries, show only those. + */ + if ((args->flags & XFS_ATTR_INCOMPLETE) != + (entry->flags & XFS_ATTR_INCOMPLETE)) { + continue; + } + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe); + if (name_loc->namelen != args->namelen) + continue; + if (memcmp(args->name, (char *)name_loc->nameval, + args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0)) + continue; + args->index = probe; + return(XFS_ERROR(EEXIST)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe); + if (name_rmt->namelen != args->namelen) + continue; + if (memcmp(args->name, (char *)name_rmt->name, + args->namelen) != 0) + continue; + if (((args->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0)) + continue; + args->index = probe; + args->rmtblkno + = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, + INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); + return(XFS_ERROR(EEXIST)); + } + } + args->index = probe; + return(XFS_ERROR(ENOATTR)); +} + +/* + * Get the value associated with an attribute name from a leaf attribute + * list structure. + */ +int +xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) +{ + int valuelen; + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + < (XFS_LBSIZE(args->dp->i_mount)/8)); + ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); + + entry = &leaf->entries[args->index]; + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + ASSERT(name_loc->namelen == args->namelen); + ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); + valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = valuelen; + return(0); + } + if (args->valuelen < valuelen) { + args->valuelen = valuelen; + return(XFS_ERROR(ERANGE)); + } + args->valuelen = valuelen; + memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + ASSERT(name_rmt->namelen == args->namelen); + ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); + valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); + args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); + if (args->flags & ATTR_KERNOVAL) { + args->valuelen = valuelen; + return(0); + } + if (args->valuelen < valuelen) { + args->valuelen = valuelen; + return(XFS_ERROR(ERANGE)); + } + args->valuelen = valuelen; + } + return(0); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Move the indicated entries from one leaf to another. + * NOTE: this routine modifies both source and destination leaves. + */ +/*ARGSUSED*/ +STATIC void +xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, + xfs_attr_leafblock_t *leaf_d, int start_d, + int count, xfs_mount_t *mp) +{ + xfs_attr_leaf_hdr_t *hdr_s, *hdr_d; + xfs_attr_leaf_entry_t *entry_s, *entry_d; + int desti, tmp, i; + + /* + * Check for nothing to do. + */ + if (count == 0) + return; + + /* + * Set up environment. + */ + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr_s->count, ARCH_CONVERT) + < (XFS_LBSIZE(mp)/8))); + ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_s->count, ARCH_CONVERT) + * sizeof(*entry_s))+sizeof(*hdr_s))); + ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(*entry_d))+sizeof(*hdr_d))); + + ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); + ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + + /* + * Move the entries in the destination leaf up to make a hole? + */ + if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + tmp *= sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_d->entries[start_d]; + entry_d = &leaf_d->entries[start_d + count]; + memmove((char *)entry_d, (char *)entry_s, tmp); + } + + /* + * Copy all entry's in the same (sorted) order, + * but allocate attribute info packed and in sequence. + */ + entry_s = &leaf_s->entries[start_s]; + entry_d = &leaf_d->entries[start_d]; + desti = start_d; + for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); +#ifdef GROT + /* + * Code to drop INCOMPLETE entries. Difficult to use as we + * may also need to change the insertion index. Code turned + * off for 6.2, should be revisited later. + */ + if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ + memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + entry_d--; /* to compensate for ++ in loop hdr */ + desti--; + if ((start_s + i) < offset) + result++; /* insertion index adjustment */ + } else { +#endif /* GROT */ + INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); + /* both on-disk, don't endian flip twice */ + entry_d->hashval = entry_s->hashval; + /* both on-disk, don't endian flip twice */ + entry_d->nameidx = hdr_d->firstused; + entry_d->flags = entry_s->flags; + ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp + <= XFS_LBSIZE(mp)); + memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), + XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp + <= XFS_LBSIZE(mp)); + memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + INT_MOD(hdr_d->count, ARCH_CONVERT, 1); + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t) + + sizeof(xfs_attr_leaf_hdr_t); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); +#ifdef GROT + } +#endif /* GROT */ + } + + /* + * Zero out the entries we just copied. + */ + if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + tmp = count * sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[start_s]; + ASSERT(((char *)entry_s + tmp) <= + ((char *)leaf_s + XFS_LBSIZE(mp))); + memset((char *)entry_s, 0, tmp); + } else { + /* + * Move the remaining entries down to fill the hole, + * then zero the entries at the top. + */ + tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp *= sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[start_s + count]; + entry_d = &leaf_s->entries[start_s]; + memmove((char *)entry_d, (char *)entry_s, tmp); + + tmp = count * sizeof(xfs_attr_leaf_entry_t); + entry_s = &leaf_s->entries[INT_GET(hdr_s->count, + ARCH_CONVERT)]; + ASSERT(((char *)entry_s + tmp) <= + ((char *)leaf_s + XFS_LBSIZE(mp))); + memset((char *)entry_s, 0, tmp); + } + + /* + * Fill in the freemap information + */ + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, + INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + INT_ZERO(hdr_d->freemap[1].base, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[2].base, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[1].size, ARCH_CONVERT); + INT_ZERO(hdr_d->freemap[2].size, ARCH_CONVERT); + hdr_s->holes = 1; /* leaf may not be compact */ +} + +/* + * Compare two leaf blocks "order". + * Return 0 unless leaf2 should go before leaf1. + */ +int +xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC)); + if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) + && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) + && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) + || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from a leaf block. + */ +xfs_dahash_t +xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_attr_leafblock_t *leaf; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); +} + +/* + * Calculate the number of bytes used to store the indicated attribute + * (whether local or remote only calculate bytes in this block). + */ +int +xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) +{ + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int size; + + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); + size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, + INT_GET(name_loc->valuelen, + ARCH_CONVERT)); + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); + size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); + } + return(size); +} + +/* + * Calculate the number of bytes that would be required to store the new + * attribute (whether local or remote only calculate bytes in this block). + * This routine decides as a side effect whether the attribute will be + * a "local" or a "remote" attribute. + */ +int +xfs_attr_leaf_newentsize(xfs_da_args_t *args, int blocksize, int *local) +{ + int size; + + size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(args->namelen, args->valuelen); + if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) { + if (local) { + *local = 1; + } + } else { + size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(args->namelen); + if (local) { + *local = 0; + } + } + return(size); +} + +/* + * Copy out attribute list entries for attr_list(), for leaf attribute lists. + */ +int +xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) +{ + attrlist_cursor_kern_t *cursor; + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_local_t *name_loc; + xfs_attr_leaf_name_remote_t *name_rmt; + int retval, i; + + ASSERT(bp != NULL); + leaf = bp->data; + cursor = context->cursor; + cursor->initted = 1; + + xfs_attr_trace_l_cl("blk start", context, leaf); + + /* + * Re-find our place in the leaf block if this is a new syscall. + */ + if (context->resynch) { + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++) { + if (INT_GET(entry->hashval, ARCH_CONVERT) + == cursor->hashval) { + if (cursor->offset == context->dupcnt) { + context->dupcnt = 0; + break; + } + context->dupcnt++; + } else if (INT_GET(entry->hashval, ARCH_CONVERT) + > cursor->hashval) { + context->dupcnt = 0; + break; + } + } + if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + xfs_attr_trace_l_c("not found", context); + return(0); + } + } else { + entry = &leaf->entries[0]; + i = 0; + } + context->resynch = 0; + + /* + * We have found our place, start copying out the new attributes. + */ + retval = 0; + for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (retval == 0); entry++, i++) { + int ns = (entry->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES; + + if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { + cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); + cursor->offset = 0; + } + + if (entry->flags & XFS_ATTR_INCOMPLETE) + continue; /* skip incomplete entries */ + if (((context->flags & ATTR_ROOT) != 0) != + ((entry->flags & XFS_ATTR_ROOT) != 0) && + !(context->flags & ATTR_KERNFULLS)) + continue; /* skip non-matching entries */ + + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + (int)name_loc->namelen + 1; + } else { + retval = xfs_attr_put_listent(context, ns, + (char *)name_loc->nameval, + (int)name_loc->namelen, + (int)INT_GET(name_loc->valuelen, + ARCH_CONVERT)); + } + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (context->flags & ATTR_KERNOVAL) { + ASSERT(context->flags & ATTR_KERNAMELS); + context->count += xfs_namespaces[ns].namelen + + (int)name_rmt->namelen + 1; + } else { + retval = xfs_attr_put_listent(context, ns, + (char *)name_rmt->name, + (int)name_rmt->namelen, + (int)INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); + } + } + if (retval == 0) { + cursor->offset++; + } + } + xfs_attr_trace_l_cl("blk end", context, leaf); + return(retval); +} + +#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ + (((struct attrlist_ent *) 0)->a_name - (char *) 0) +#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ + ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ + & ~(sizeof(u_int32_t)-1)) + +/* + * Format an attribute and copy it out to the user's buffer. + * Take care to check values and protect against them changing later, + * we may be reading them directly out of a user buffer. + */ +/*ARGSUSED*/ +int +xfs_attr_put_listent(xfs_attr_list_context_t *context, + int ns, char *name, int namelen, int valuelen) +{ + attrlist_ent_t *aep; + int arraytop; + + ASSERT(!(context->flags & ATTR_KERNOVAL)); + if (context->flags & ATTR_KERNAMELS) { + char *offset; + xattr_namespace_t *nsp; + + ASSERT(context->count >= 0); + + nsp = &xfs_namespaces[ns]; + arraytop = context->count + nsp->namelen + namelen+1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ + return(1); + } + offset = (char *)context->alist + context->count; + strncpy(offset, nsp->name, nsp->namelen); /* namespace */ + offset += nsp->namelen; + strncpy(offset, name, namelen); /* real name */ + offset += namelen; + *offset = '\0'; + context->count += nsp->namelen + namelen + 1; + return(0); + } + + ASSERT(context->count >= 0); + ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); + ASSERT(context->firstu >= sizeof(*context->alist)); + ASSERT(context->firstu <= context->bufsize); + + arraytop = sizeof(*context->alist) + + context->count * sizeof(context->alist->al_offset[0]); + context->firstu -= ATTR_ENTSIZE(namelen); + if (context->firstu < arraytop) { + xfs_attr_trace_l_c("buffer full", context); + context->alist->al_more = 1; + return(1); + } + + aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]); + aep->a_valuelen = valuelen; + memcpy(aep->a_name, name, namelen); + aep->a_name[ namelen ] = 0; + context->alist->al_offset[ context->count++ ] = context->firstu; + context->alist->al_count = context->count; + xfs_attr_trace_l_c("add", context); + return(0); +} + +/*======================================================================== + * Manage the INCOMPLETE flag in a leaf entry + *========================================================================*/ + +/* + * Clear the INCOMPLETE flag on an entry in a leaf block. + */ +int +xfs_attr_leaf_clearflag(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp; + int error; +#ifdef DEBUG + xfs_attr_leaf_name_local_t *name_loc; + int namelen; + char *name; +#endif /* DEBUG */ + + /* + * Set up the operation. + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp != NULL); + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry = &leaf->entries[ args->index ]; + ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); + +#ifdef DEBUG + if (entry->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); + namelen = name_loc->namelen; + name = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + namelen = name_rmt->namelen; + name = (char *)name_rmt->name; + } + ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); + ASSERT(namelen == args->namelen); + ASSERT(memcmp(name, args->name, namelen) == 0); +#endif /* DEBUG */ + + entry->flags &= ~XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + + if (args->rmtblkno) { + ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/* + * Set the INCOMPLETE flag on an entry in a leaf block. + */ +int +xfs_attr_leaf_setflag(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp; + int error; + + /* + * Set up the operation. + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp != NULL); + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry = &leaf->entries[ args->index ]; + + ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0); + entry->flags |= XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + if ((entry->flags & XFS_ATTR_LOCAL) == 0) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/* + * In a single transaction, clear the INCOMPLETE flag on the leaf entry + * given by args->blkno/index and set the INCOMPLETE flag on the leaf + * entry given by args->blkno2/index2. + * + * Note that they could be in different blocks, or in the same block. + */ +int +xfs_attr_leaf_flipflags(xfs_da_args_t *args) +{ + xfs_attr_leafblock_t *leaf1, *leaf2; + xfs_attr_leaf_entry_t *entry1, *entry2; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_dabuf_t *bp1, *bp2; + int error; +#ifdef DEBUG + xfs_attr_leaf_name_local_t *name_loc; + int namelen1, namelen2; + char *name1, *name2; +#endif /* DEBUG */ + + /* + * Read the block containing the "old" attr + */ + error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1, + XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp1 != NULL); + + /* + * Read the block containing the "new" attr, if it is different + */ + if (args->blkno2 != args->blkno) { + error = xfs_da_read_buf(args->trans, args->dp, args->blkno2, + -1, &bp2, XFS_ATTR_FORK); + if (error) { + return(error); + } + ASSERT(bp2 != NULL); + } else { + bp2 = bp1; + } + + leaf1 = bp1->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + ASSERT(args->index >= 0); + entry1 = &leaf1->entries[ args->index ]; + + leaf2 = bp2->data; + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT)); + ASSERT(args->index2 >= 0); + entry2 = &leaf2->entries[ args->index2 ]; + +#ifdef DEBUG + if (entry1->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index); + namelen1 = name_loc->namelen; + name1 = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); + namelen1 = name_rmt->namelen; + name1 = (char *)name_rmt->name; + } + if (entry2->flags & XFS_ATTR_LOCAL) { + name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2); + namelen2 = name_loc->namelen; + name2 = (char *)name_loc->nameval; + } else { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); + namelen2 = name_rmt->namelen; + name2 = (char *)name_rmt->name; + } + ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); + ASSERT(namelen1 == namelen2); + ASSERT(memcmp(name1, name2, namelen1) == 0); +#endif /* DEBUG */ + + ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE); + ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0); + + entry1->flags &= ~XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); + if (args->rmtblkno) { + ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); + } + + entry2->flags |= XFS_ATTR_INCOMPLETE; + xfs_da_log_buf(args->trans, bp2, + XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); + if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2); + INT_ZERO(name_rmt->valueblk, ARCH_CONVERT); + INT_ZERO(name_rmt->valuelen, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp2, + XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); + } + xfs_da_buf_done(bp1); + if (bp1 != bp2) + xfs_da_buf_done(bp2); + + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_attr_rolltrans(&args->trans, args->dp); + + return(error); +} + +/*======================================================================== + * Indiscriminately delete the entire attribute fork + *========================================================================*/ + +/* + * Recurse (gasp!) through the attribute nodes until we find leaves. + * We're doing a depth-first traversal in order to invalidate everything. + */ +int +xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) +{ + xfs_da_blkinfo_t *info; + xfs_daddr_t blkno; + xfs_dabuf_t *bp; + int error; + + /* + * Read block 0 to see what we have to work with. + * We only get here if we have extents, since we remove + * the extents in reverse order the extent containing + * block 0 must still be there. + */ + error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); + if (error) + return(error); + blkno = xfs_da_blkno(bp); + + /* + * Invalidate the tree, even if the "tree" is only a single leaf block. + * This is a depth-first traversal! + */ + info = bp->data; + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + error = xfs_attr_node_inactive(trans, dp, bp, 1); + } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + error = xfs_attr_leaf_inactive(trans, dp, bp); + } else { + error = XFS_ERROR(EIO); + xfs_da_brelse(*trans, bp); + } + if (error) + return(error); + + /* + * Invalidate the incore copy of the root block. + */ + error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); + if (error) + return(error); + xfs_da_binval(*trans, bp); /* remove from cache */ + /* + * Commit the invalidate and start the next transaction. + */ + error = xfs_attr_rolltrans(trans, dp); + + return (error); +} + +/* + * Recurse (gasp!) through the attribute nodes until we find leaves. + * We're doing a depth-first traversal in order to invalidate everything. + */ +int +xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, + int level) +{ + xfs_da_blkinfo_t *info; + xfs_da_intnode_t *node; + xfs_dablk_t child_fsb; + xfs_daddr_t parent_blkno, child_blkno; + int error, count, i; + xfs_dabuf_t *child_bp; + + /* + * Since this code is recursive (gasp!) we must protect ourselves. + */ + if (level > XFS_DA_NODE_MAXDEPTH) { + xfs_da_brelse(*trans, bp); /* no locks for later trans */ + return(XFS_ERROR(EIO)); + } + + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC); + parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ + count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (!count) { + xfs_da_brelse(*trans, bp); + return(0); + } + child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); + xfs_da_brelse(*trans, bp); /* no locks for later trans */ + + /* + * If this is the node level just above the leaves, simply loop + * over the leaves removing all of them. If this is higher up + * in the tree, recurse downward. + */ + for (i = 0; i < count; i++) { + /* + * Read the subsidiary block to see what we have to work with. + * Don't do this in a transaction. This is a depth-first + * traversal of the tree so we may deal with many blocks + * before we come back to this one. + */ + error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp, + XFS_ATTR_FORK); + if (error) + return(error); + if (child_bp) { + /* save for re-read later */ + child_blkno = xfs_da_blkno(child_bp); + + /* + * Invalidate the subtree, however we have to. + */ + info = child_bp->data; + if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC) { + error = xfs_attr_node_inactive(trans, dp, + child_bp, level+1); + } else if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) { + error = xfs_attr_leaf_inactive(trans, dp, + child_bp); + } else { + error = XFS_ERROR(EIO); + xfs_da_brelse(*trans, child_bp); + } + if (error) + return(error); + + /* + * Remove the subsidiary block from the cache + * and from the log. + */ + error = xfs_da_get_buf(*trans, dp, 0, child_blkno, + &child_bp, XFS_ATTR_FORK); + if (error) + return(error); + xfs_da_binval(*trans, child_bp); + } + + /* + * If we're not done, re-read the parent to get the next + * child block number. + */ + if ((i+1) < count) { + error = xfs_da_read_buf(*trans, dp, 0, parent_blkno, + &bp, XFS_ATTR_FORK); + if (error) + return(error); + child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); + xfs_da_brelse(*trans, bp); + } + /* + * Atomically commit the whole invalidate stuff. + */ + if ((error = xfs_attr_rolltrans(trans, dp))) + return (error); + } + + return(0); +} + +/* + * Invalidate all of the "remote" value regions pointed to by a particular + * leaf block. + * Note that we must release the lock on the buffer so that we are not + * caught holding something that the logging code wants to flush to disk. + */ +int +xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) +{ + xfs_attr_leafblock_t *leaf; + xfs_attr_leaf_entry_t *entry; + xfs_attr_leaf_name_remote_t *name_rmt; + xfs_attr_inactive_list_t *list, *lp; + int error, count, size, tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + + /* + * Count the number of "remote" value extents. + */ + count = 0; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (!INT_ISZERO(name_rmt->valueblk, ARCH_CONVERT)) + count++; + } + } + + /* + * If there are no "remote" values, we're done. + */ + if (count == 0) { + xfs_da_brelse(*trans, bp); + return(0); + } + + /* + * Allocate storage for a list of all the "remote" value extents. + */ + size = count * sizeof(xfs_attr_inactive_list_t); + list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP); + + /* + * Identify each of the "remote" value extents. + */ + lp = list; + entry = &leaf->entries[0]; + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); + if (!INT_ISZERO(name_rmt->valueblk, ARCH_CONVERT)) { + /* both on-disk, don't endian flip twice */ + lp->valueblk = name_rmt->valueblk; + INT_SET(lp->valuelen, ARCH_CONVERT, + XFS_B_TO_FSB(dp->i_mount, + INT_GET(name_rmt->valuelen, + ARCH_CONVERT))); + lp++; + } + } + } + xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */ + + /* + * Invalidate each of the "remote" value extents. + */ + error = 0; + for (lp = list, i = 0; i < count; i++, lp++) { + tmp = xfs_attr_leaf_freextent(trans, dp, + INT_GET(lp->valueblk, + ARCH_CONVERT), + INT_GET(lp->valuelen, + ARCH_CONVERT)); + if (error == 0) + error = tmp; /* save only the 1st errno */ + } + + kmem_free((xfs_caddr_t)list, size); + return(error); +} + +/* + * Look at all the extents for this logical region, + * invalidate any buffers that are incore/in transactions. + */ +int +xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, + xfs_dablk_t blkno, int blkcnt) +{ + xfs_bmbt_irec_t map; + xfs_dablk_t tblkno; + int tblkcnt, dblkcnt, nmap, error; + xfs_daddr_t dblkno; + xfs_buf_t *bp; + + /* + * Roll through the "value", invalidating the attribute value's + * blocks. + */ + tblkno = blkno; + tblkcnt = blkcnt; + while (tblkcnt > 0) { + /* + * Try to remember where we decided to put the value. + */ + nmap = 1; + error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt, + XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, + NULL, 0, &map, &nmap, NULL); + if (error) { + return(error); + } + ASSERT(nmap == 1); + ASSERT(map.br_startblock != DELAYSTARTBLOCK); + + /* + * If it's a hole, these are already unmapped + * so there's nothing to invalidate. + */ + if (map.br_startblock != HOLESTARTBLOCK) { + + dblkno = XFS_FSB_TO_DADDR(dp->i_mount, + map.br_startblock); + dblkcnt = XFS_FSB_TO_BB(dp->i_mount, + map.br_blockcount); + bp = xfs_trans_get_buf(*trans, + dp->i_mount->m_ddev_targp, + dblkno, dblkcnt, XFS_BUF_LOCK); + xfs_trans_binval(*trans, bp); + /* + * Roll to next transaction. + */ + if ((error = xfs_attr_rolltrans(trans, dp))) + return (error); + } + + tblkno += map.br_blockcount; + tblkcnt -= map.br_blockcount; + } + + return(0); +} + + +/* + * Roll from one trans in the sequence of PERMANENT transactions to the next. + */ +int +xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp) +{ + xfs_trans_t *trans; + unsigned int logres, count; + int error; + + /* + * Ensure that the inode is always logged. + */ + trans = *transp; + xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); + + /* + * Copy the critical parameters from one trans to the next. + */ + logres = trans->t_log_res; + count = trans->t_log_count; + *transp = xfs_trans_dup(trans); + + /* + * Commit the current transaction. + * If this commit failed, then it'd just unlock those items that + * are not marked ihold. That also means that a filesystem shutdown + * is in progress. The caller takes the responsibility to cancel + * the duplicate transaction that gets returned. + */ + if ((error = xfs_trans_commit(trans, 0, NULL))) + return (error); + + trans = *transp; + + /* + * Reserve space in the log for th next transaction. + * This also pushes items in the "AIL", the list of logged items, + * out to disk if they are taking up space at the tail of the log + * that we want to use. This requires that either nothing be locked + * across this call, or that anything that is locked be logged in + * the prior and the next transactions. + */ + error = xfs_trans_reserve(trans, 0, logres, 0, + XFS_TRANS_PERM_LOG_RES, count); + /* + * Ensure that the inode is in the new transaction and locked. + */ + if (!error) { + xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); + xfs_trans_ihold(trans, dp); + } + return (error); + +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr_leaf.h linux.22-ac2/fs/xfs/xfs_attr_leaf.h --- linux.vanilla/fs/xfs/xfs_attr_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr_leaf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_LEAF_H__ +#define __XFS_ATTR_LEAF_H__ + +/* + * Attribute storage layout, internal structure, access macros, etc. + * + * Attribute lists are structured around Btrees where all the data + * elements are in the leaf nodes. Attribute names are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of an attribute name may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + */ + +struct attrlist; +struct attrlist_cursor_kern; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_inode; +struct xfs_trans; + +/*======================================================================== + * Attribute structure when equal to XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This is the structure of the leaf nodes in the Btree. + * + * Struct leaf_entry's are packed from the top. Name/values grow from the + * bottom but are not packed. The freemap contains run-length-encoded entries + * for the free bytes after the leaf_entry's, but only the N largest such, + * smaller runs are dropped. When the freemap doesn't show enough space + * for an allocation, we compact the name/value area and try again. If we + * still don't have enough space, then we have to split the block. The + * name/value structs (both local and remote versions) must be 32bit aligned. + * + * Since we have duplicate hash keys, for each key that matches, compare + * the actual name string. The root and intermediate node search always + * takes the first-in-the-block key match found, so we should only have + * to work "forw"ard. If none matches, continue with the "forw"ard leaf + * nodes until the hash key changes or the attribute name is found. + * + * We store the fact that an attribute is a ROOT versus USER attribute in + * the leaf_entry. The namespaces are independent only because we also look + * at the root/user bit when we are looking for a matching attribute name. + * + * We also store a "incomplete" bit in the leaf_entry. It shows that an + * attribute is in the middle of being created and should not be shown to + * the user if we crash during the time that the bit is set. We clear the + * bit when we have finished setting up the attribute. We do this because + * we cannot create some large attributes inside a single transaction, and we + * need some indication that we weren't finished if we crash in the middle. + */ +#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ + +typedef struct xfs_attr_leafblock { + struct xfs_attr_leaf_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active leaf_entry's */ + __uint16_t usedbytes; /* num bytes of names/values stored */ + __uint16_t firstused; /* first used byte in name area */ + __uint8_t holes; /* != 0 if blk needs compaction */ + __uint8_t pad1; + struct xfs_attr_leaf_map { /* RLE map of free bytes */ + __uint16_t base; /* base of free region */ + __uint16_t size; /* length of free region */ + } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ + } hdr; + struct xfs_attr_leaf_entry { /* sorted on key, not name */ + xfs_dahash_t hashval; /* hash value of name */ + __uint16_t nameidx; /* index into buffer of name/value */ + __uint8_t flags; /* LOCAL, ROOT and INCOMPLETE flags */ + __uint8_t pad2; /* unused pad byte */ + } entries[1]; /* variable sized array */ + struct xfs_attr_leaf_name_local { + __uint16_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t nameval[1]; /* name/value bytes */ + } namelist; /* grows from bottom of buf */ + struct xfs_attr_leaf_name_remote { + xfs_dablk_t valueblk; /* block number of value bytes */ + __uint32_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t name[1]; /* name bytes */ + } valuelist; /* grows from bottom of buf */ +} xfs_attr_leafblock_t; +typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t; +typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t; +typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t; +typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t; +typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t; + +/* + * Flags used in the leaf_entry[i].flags field. + * NOTE: the INCOMPLETE bit must not collide with the flags bits specified + * on the system call, they are "or"ed together for various operations. + */ +#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */ +#define XFS_ATTR_ROOT_BIT 1 /* limit access to attr to userid 0 */ +#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */ +#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT) +#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT) +#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT) + +/* + * Alignment for namelist and valuelist entries (since they are mixed + * there can be only one alignment value) + */ +#define XFS_ATTR_LEAF_NAME_ALIGN ((uint)sizeof(xfs_dablk_t)) + +/* + * Cast typed pointers for "local" and "remote" name/value structs. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_REMOTE) +xfs_attr_leaf_name_remote_t * +xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) \ + xfs_attr_leaf_name_remote(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) /* remote name struct ptr */ \ + ((xfs_attr_leaf_name_remote_t *) \ + &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME_LOCAL) +xfs_attr_leaf_name_local_t * +xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ + xfs_attr_leaf_name_local(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) /* local name struct ptr */ \ + ((xfs_attr_leaf_name_local_t *) \ + &((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_NAME) +char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx); +#define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx) +#else +#define XFS_ATTR_LEAF_NAME(leafp,idx) /* generic name struct ptr */ \ + (&((char *)(leafp))[ INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT) ]) +#endif + +/* + * Calculate total bytes used (including trailing pad for alignment) for + * a "local" name/value structure, a "remote" name/value structure, and + * a pointer which might be either. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_REMOTE) +int xfs_attr_leaf_entsize_remote(int nlen); +#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) \ + xfs_attr_leaf_entsize_remote(nlen) +#else +#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) /* space for remote struct */ \ + (((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \ + XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL) +int xfs_attr_leaf_entsize_local(int nlen, int vlen); +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) \ + xfs_attr_leaf_entsize_local(nlen,vlen) +#else +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) /* space for local struct */ \ + (((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) + \ + XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX) +int xfs_attr_leaf_entsize_local_max(int bsize); +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) \ + xfs_attr_leaf_entsize_local_max(bsize) +#else +#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) /* max local struct size */ \ + (((bsize) >> 1) + ((bsize) >> 2)) +#endif + + +/*======================================================================== + * Structure used to pass context around among the routines. + *========================================================================*/ + +typedef struct xfs_attr_list_context { + struct xfs_inode *dp; /* inode */ + struct attrlist_cursor_kern *cursor;/* position in list */ + struct attrlist *alist; /* output buffer */ + int count; /* num used entries */ + int dupcnt; /* count dup hashvals seen */ + int bufsize;/* total buffer size */ + int firstu; /* first used byte in buffer */ + int flags; /* from VOP call */ + int resynch;/* T/F: resynch with cursor */ +} xfs_attr_list_context_t; + +/* + * Used to keep a list of "remote value" extents when unlinking an inode. + */ +typedef struct xfs_attr_inactive_list { + xfs_dablk_t valueblk; /* block number of value bytes */ + int valuelen; /* number of bytes in value */ +} xfs_attr_inactive_list_t; + + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when dirsize < XFS_LITINO(mp). + */ +int xfs_attr_shortform_create(struct xfs_da_args *args); +int xfs_attr_shortform_add(struct xfs_da_args *add); +int xfs_attr_shortform_lookup(struct xfs_da_args *args); +int xfs_attr_shortform_getvalue(struct xfs_da_args *args); +int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); +int xfs_attr_shortform_remove(struct xfs_da_args *remove); +int xfs_attr_shortform_list(struct xfs_attr_list_context *context); +int xfs_attr_shortform_replace(struct xfs_da_args *args); +int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp); + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +int xfs_attr_leaf_to_node(struct xfs_da_args *args); +int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp, + struct xfs_da_args *args); +int xfs_attr_leaf_clearflag(struct xfs_da_args *args); +int xfs_attr_leaf_setflag(struct xfs_da_args *args); +int xfs_attr_leaf_flipflags(xfs_da_args_t *args); + +/* + * Routines used for growing the Btree. + */ +int xfs_attr_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block, + struct xfs_dabuf **bpp); +int xfs_attr_leaf_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); +int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf, + struct xfs_da_args *args); +int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args); +int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args); +int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args); +int xfs_attr_leaf_list_int(struct xfs_dabuf *bp, + struct xfs_attr_list_context *context); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_attr_leaf_toosmall(struct xfs_da_state *state, int *retval); +void xfs_attr_leaf_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); +int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp); +int xfs_attr_node_inactive(struct xfs_trans **trans, struct xfs_inode *dp, + struct xfs_dabuf *bp, int level); +int xfs_attr_leaf_inactive(struct xfs_trans **trans, struct xfs_inode *dp, + struct xfs_dabuf *bp); +int xfs_attr_leaf_freextent(struct xfs_trans **trans, struct xfs_inode *dp, + xfs_dablk_t blkno, int blkcnt); + +/* + * Utility routines. + */ +xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count); +int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); +int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int blocksize, + int *local); +int xfs_attr_leaf_entsize(struct xfs_attr_leafblock *leaf, int index); +int xfs_attr_put_listent(struct xfs_attr_list_context *context, + int ns, char *name, int namelen, int valuelen); +int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp); + +#endif /* __XFS_ATTR_LEAF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_attr_sf.h linux.22-ac2/fs/xfs/xfs_attr_sf.h --- linux.vanilla/fs/xfs/xfs_attr_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_attr_sf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ATTR_SF_H__ +#define __XFS_ATTR_SF_H__ + +/* + * Attribute storage when stored inside the inode. + * + * Small attribute lists are packed as tightly as possible so as + * to fit into the literal area of the inode. + */ + +struct xfs_inode; + +/* + * Entries are packed toward the top as tight as possible. + */ +typedef struct xfs_attr_shortform { + struct xfs_attr_sf_hdr { /* constant-structure header block */ + __uint16_t totsize; /* total bytes in shortform list */ + __uint8_t count; /* count of active entries */ + } hdr; + struct xfs_attr_sf_entry { + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t valuelen; /* actual length of value (no NULL) */ + __uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */ + __uint8_t nameval[1]; /* name & value bytes concatenated */ + } list[1]; /* variable sized array */ +} xfs_attr_shortform_t; +typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t; +typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t; + +/* + * We generate this then sort it, attr_list() must return things in hash-order. + */ +typedef struct xfs_attr_sf_sort { + __uint8_t entno; /* entry number in original list */ + __uint8_t namelen; /* length of name value (no null) */ + __uint8_t valuelen; /* length of value */ + __uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */ + xfs_dahash_t hash; /* this entry's hash value */ + char *name; /* name value, pointer into buffer */ +} xfs_attr_sf_sort_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE_BYNAME) +int xfs_attr_sf_entsize_byname(int nlen, int vlen); +#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) \ + xfs_attr_sf_entsize_byname(nlen,vlen) +#else +#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \ + ((int)sizeof(xfs_attr_sf_entry_t)-1 + (nlen)+(vlen)) +#endif +#define XFS_ATTR_SF_ENTSIZE_MAX /* max space for name&value */ \ + ((1 << (NBBY*(int)sizeof(__uint8_t))) - 1) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_ENTSIZE) +int xfs_attr_sf_entsize(xfs_attr_sf_entry_t *sfep); +#define XFS_ATTR_SF_ENTSIZE(sfep) xfs_attr_sf_entsize(sfep) +#else +#define XFS_ATTR_SF_ENTSIZE(sfep) /* space an entry uses */ \ + ((int)sizeof(xfs_attr_sf_entry_t)-1 + (sfep)->namelen+(sfep)->valuelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_NEXTENTRY) +xfs_attr_sf_entry_t *xfs_attr_sf_nextentry(xfs_attr_sf_entry_t *sfep); +#define XFS_ATTR_SF_NEXTENTRY(sfep) xfs_attr_sf_nextentry(sfep) +#else +#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ + ((xfs_attr_sf_entry_t *) \ + ((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ATTR_SF_TOTSIZE) +int xfs_attr_sf_totsize(struct xfs_inode *dp); +#define XFS_ATTR_SF_TOTSIZE(dp) xfs_attr_sf_totsize(dp) +#else +#define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ + (INT_GET(((xfs_attr_shortform_t *)((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) +#endif + +#ifdef XFS_ALL_TRACE +#define XFS_ATTR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_ATTR_TRACE +#endif + +/* + * Kernel tracing support for attribute lists + */ +struct xfs_attr_list_context; +struct xfs_da_intnode; +struct xfs_da_node_entry; +struct xfs_attr_leafblock; + +#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */ + +/* + * Trace record types. + */ +#define XFS_ATTR_KTRACE_L_C 1 /* context */ +#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */ +#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */ +#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */ + +#if defined(XFS_ATTR_TRACE) + +void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context); +void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, + struct xfs_da_intnode *node); +void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, + struct xfs_da_node_entry *btree); +void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, + struct xfs_attr_leafblock *leaf); +void xfs_attr_trace_enter(int type, char *where, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11, + __psunsigned_t a12, __psunsigned_t a13, + __psunsigned_t a14, __psunsigned_t a15); +#else +#define xfs_attr_trace_l_c(w,c) +#define xfs_attr_trace_l_cn(w,c,n) +#define xfs_attr_trace_l_cb(w,c,b) +#define xfs_attr_trace_l_cl(w,c,l) +#endif /* XFS_ATTR_TRACE */ + +#endif /* __XFS_ATTR_SF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bit.c linux.22-ac2/fs/xfs/xfs_bit.c --- linux.vanilla/fs/xfs/xfs_bit.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bit.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * XFS bit manipulation routines, used in non-realtime code. + */ + +#include "xfs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" + + +#ifndef HAVE_ARCH_HIGHBIT +/* + * Index of high bit number in byte, -1 for none set, 0..7 otherwise. + */ +const char xfs_highbit[256] = { + -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ + 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ + 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ + 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ + 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ + 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ +}; +#endif + +/* + * Count of bits set in byte, 0..8. + */ +static const char xfs_countbit[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, /* 00 .. 07 */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 08 .. 0f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 10 .. 17 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 18 .. 1f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 20 .. 27 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 28 .. 2f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 30 .. 37 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 38 .. 3f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 40 .. 47 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 48 .. 4f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 50 .. 57 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 58 .. 5f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 60 .. 67 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 68 .. 6f */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 70 .. 77 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* 78 .. 7f */ + 1, 2, 2, 3, 2, 3, 3, 4, /* 80 .. 87 */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 88 .. 8f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* 90 .. 97 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* 98 .. 9f */ + 2, 3, 3, 4, 3, 4, 4, 5, /* a0 .. a7 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* a8 .. af */ + 3, 4, 4, 5, 4, 5, 5, 6, /* b0 .. b7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* b8 .. bf */ + 2, 3, 3, 4, 3, 4, 4, 5, /* c0 .. c7 */ + 3, 4, 4, 5, 4, 5, 5, 6, /* c8 .. cf */ + 3, 4, 4, 5, 4, 5, 5, 6, /* d0 .. d7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* d8 .. df */ + 3, 4, 4, 5, 4, 5, 5, 6, /* e0 .. e7 */ + 4, 5, 5, 6, 5, 6, 6, 7, /* e8 .. ef */ + 4, 5, 5, 6, 5, 6, 6, 7, /* f0 .. f7 */ + 5, 6, 6, 7, 6, 7, 7, 8, /* f8 .. ff */ +}; + +/* + * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. + */ +int inline +xfs_highbit32( + __uint32_t v) +{ +#ifdef HAVE_ARCH_HIGHBIT + return highbit32(v); +#else + int i; + + if (v & 0xffff0000) + if (v & 0xff000000) + i = 24; + else + i = 16; + else if (v & 0x0000ffff) + if (v & 0x0000ff00) + i = 8; + else + i = 0; + else + return -1; + return i + xfs_highbit[(v >> i) & 0xff]; +#endif +} + +/* + * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_lowbit64( + __uint64_t v) +{ + int n; + n = ffs((unsigned)v); + if (n == 0) { + n = ffs(v >> 32); + if (n >= 0) + n+=32; + } + return n-1; +} + +/* + * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. + */ +int +xfs_highbit64( + __uint64_t v) +{ + __uint32_t h = v >> 32; + if (h) + return xfs_highbit32(h) + 32; + return xfs_highbit32((__u32)v); +} + + +/* + * Count the number of bits set in the bitmap starting with bit + * start_bit. Size is the size of the bitmap in words. + * + * Do the counting by mapping a byte value to the number of set + * bits for that value using the xfs_countbit array, i.e. + * xfs_countbit[0] == 0, xfs_countbit[1] == 1, xfs_countbit[2] == 1, + * xfs_countbit[3] == 2, etc. + */ +int +xfs_count_bits(uint *map, uint size, uint start_bit) +{ + register int bits; + register unsigned char *bytep; + register unsigned char *end_map; + int byte_bit; + + bits = 0; + end_map = (char*)(map + size); + bytep = (char*)(map + (start_bit & ~0x7)); + byte_bit = start_bit & 0x7; + + /* + * If the caller fell off the end of the map, return 0. + */ + if (bytep >= end_map) { + return (0); + } + + /* + * If start_bit is not byte aligned, then process the + * first byte separately. + */ + if (byte_bit != 0) { + /* + * Shift off the bits we don't want to look at, + * before indexing into xfs_countbit. + */ + bits += xfs_countbit[(*bytep >> byte_bit)]; + bytep++; + } + + /* + * Count the bits in each byte until the end of the bitmap. + */ + while (bytep < end_map) { + bits += xfs_countbit[*bytep]; + bytep++; + } + + return (bits); +} + +/* + * Count the number of contiguous bits set in the bitmap starting with bit + * start_bit. Size is the size of the bitmap in words. + */ +int +xfs_contig_bits(uint *map, uint size, uint start_bit) +{ +#if BITS_PER_LONG == 32 + return find_next_zero_bit(map,size*sizeof(uint)*8,start_bit) - start_bit; +#else + /* + * The first argument to find_next_zero_bit needs to be aligned, + * but this is coming from the xfs_buf_log_format_t on-disk + * struct, which can't be padded or otherwise modified w/o breaking + * on-disk compatibility... so create a temporary, aligned + * variable, copy over the bitmap, and send that to find_next_zero_bit + * This only happens in recovery, so it's ugly but not too bad. + */ + void * addr; + int bit; + size_t bitmap_size = size * sizeof(uint); + + addr = (void *)kmem_alloc(bitmap_size, KM_SLEEP); + memcpy(addr, map, size * sizeof(uint)); + + bit = find_next_zero_bit(addr,size*sizeof(uint)*8,start_bit) - start_bit; + + kmem_free(addr, bitmap_size); + + return bit; +#endif +} + +/* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + * + * Size is the number of words, not bytes, in the bitmap. + */ +int xfs_next_bit(uint *map, uint size, uint start_bit) +{ + uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); + uint result = start_bit & ~(NBWORD - 1); + uint tmp; + + size <<= BIT_TO_WORD_SHIFT; + + if (start_bit >= size) + return -1; + size -= result; + start_bit &= (NBWORD - 1); + if (start_bit) { + tmp = *p++; + /* set to zero first offset bits */ + tmp &= (~0U << start_bit); + if (size < NBWORD) + goto found_first; + if (tmp != 0U) + goto found_middle; + size -= NBWORD; + result += NBWORD; + } + while (size >= NBWORD) { + if ((tmp = *p++) != 0U) + goto found_middle; + result += NBWORD; + size -= NBWORD; + } + if (!size) + return -1; + tmp = *p; +found_first: +found_middle: + return result + ffs(tmp) - 1; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bit.h linux.22-ac2/fs/xfs/xfs_bit.h --- linux.vanilla/fs/xfs/xfs_bit.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bit.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BIT_H__ +#define __XFS_BIT_H__ + +/* + * XFS bit manipulation routines. + */ + +/* + * masks with n high/low bits set, 32-bit values & 64-bit values + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32HI) +__uint32_t xfs_mask32hi(int n); +#define XFS_MASK32HI(n) xfs_mask32hi(n) +#else +#define XFS_MASK32HI(n) ((__uint32_t)-1 << (32 - (n))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64HI) +__uint64_t xfs_mask64hi(int n); +#define XFS_MASK64HI(n) xfs_mask64hi(n) +#else +#define XFS_MASK64HI(n) ((__uint64_t)-1 << (64 - (n))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK32LO) +__uint32_t xfs_mask32lo(int n); +#define XFS_MASK32LO(n) xfs_mask32lo(n) +#else +#define XFS_MASK32LO(n) (((__uint32_t)1 << (n)) - 1) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MASK64LO) +__uint64_t xfs_mask64lo(int n); +#define XFS_MASK64LO(n) xfs_mask64lo(n) +#else +#define XFS_MASK64LO(n) (((__uint64_t)1 << (n)) - 1) +#endif + +/* Get high bit set out of 32-bit argument, -1 if none set */ +extern int xfs_highbit32(__uint32_t v); + +/* Get low bit set out of 64-bit argument, -1 if none set */ +extern int xfs_lowbit64(__uint64_t v); + +/* Get high bit set out of 64-bit argument, -1 if none set */ +extern int xfs_highbit64(__uint64_t); + +/* Count set bits in map starting with start_bit */ +extern int xfs_count_bits(uint *map, uint size, uint start_bit); + +/* Count continuous one bits in map starting with start_bit */ +extern int xfs_contig_bits(uint *map, uint size, uint start_bit); + +/* Find next set bit in map */ +extern int xfs_next_bit(uint *map, uint size, uint start_bit); + +#endif /* __XFS_BIT_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bmap_btree.c linux.22-ac2/fs/xfs/xfs_bmap_btree.c --- linux.vanilla/fs/xfs/xfs_bmap_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bmap_btree.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2815 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_quota.h" + +#ifdef DEBUG +ktrace_t *xfs_bmbt_trace_buf; +#endif + +/* + * Prototypes for internal btree functions. + */ + + +STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); +STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, + xfs_bmbt_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int); + + +#if defined(XFS_BMBT_TRACE) +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +STATIC void +xfs_bmbt_trace_enter( + char *func, + xfs_btree_cur_t *cur, + char *s, + int type, + int line, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6, + __psunsigned_t a7, + __psunsigned_t a8, + __psunsigned_t a9, + __psunsigned_t a10) +{ + xfs_inode_t *ip; + int whichfork; + + ip = cur->bc_private.b.ip; + whichfork = cur->bc_private.b.whichfork; + ktrace_enter(xfs_bmbt_trace_buf, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); + ASSERT(ip->i_btrace); + ktrace_enter(ip->i_btrace, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); +} +/* + * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. + */ +STATIC void +xfs_bmbt_trace_argbi( + char *func, + xfs_btree_cur_t *cur, + xfs_buf_t *b, + int i, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line, + (__psunsigned_t)b, i, 0, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for a buffer & 2 integer args. + */ +STATIC void +xfs_bmbt_trace_argbii( + char *func, + xfs_btree_cur_t *cur, + xfs_buf_t *b, + int i0, + int i1, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line, + (__psunsigned_t)b, i0, i1, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for 3 block-length args + * and an integer arg. + */ +STATIC void +xfs_bmbt_trace_argfffi( + char *func, + xfs_btree_cur_t *cur, + xfs_dfiloff_t o, + xfs_dfsbno_t b, + xfs_dfilblks_t i, + int j, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line, + o >> 32, (int)o, b >> 32, (int)b, + i >> 32, (int)i, (int)j, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for one integer arg. + */ +STATIC void +xfs_bmbt_trace_argi( + char *func, + xfs_btree_cur_t *cur, + int i, + int line) +{ + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line, + i, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, key. + */ +STATIC void +xfs_bmbt_trace_argifk( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_fsblock_t f, + xfs_bmbt_key_t *k, + int line) +{ + xfs_dfsbno_t d; + xfs_dfiloff_t o; + + d = (xfs_dfsbno_t)f; + o = INT_GET(k->br_startoff, ARCH_CONVERT); + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, + i, d >> 32, (int)d, o >> 32, + (int)o, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, rec. + */ +STATIC void +xfs_bmbt_trace_argifr( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_fsblock_t f, + xfs_bmbt_rec_t *r, + int line) +{ + xfs_dfsbno_t b; + xfs_dfilblks_t c; + xfs_dfsbno_t d; + xfs_dfiloff_t o; + xfs_bmbt_irec_t s; + + d = (xfs_dfsbno_t)f; + xfs_bmbt_disk_get_all(r, &s); + o = (xfs_dfiloff_t)s.br_startoff; + b = (xfs_dfsbno_t)s.br_startblock; + c = s.br_blockcount; + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line, + i, d >> 32, (int)d, o >> 32, + (int)o, b >> 32, (int)b, c >> 32, + (int)c, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, key. + */ +STATIC void +xfs_bmbt_trace_argik( + char *func, + xfs_btree_cur_t *cur, + int i, + xfs_bmbt_key_t *k, + int line) +{ + xfs_dfiloff_t o; + + o = INT_GET(k->br_startoff, ARCH_CONVERT); + xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, + i, o >> 32, (int)o, 0, + 0, 0, 0, 0, + 0, 0, 0); +} + +/* + * Add a trace buffer entry for the cursor/operation. + */ +STATIC void +xfs_bmbt_trace_cursor( + char *func, + xfs_btree_cur_t *cur, + char *s, + int line) +{ + xfs_bmbt_rec_t r; + + xfs_bmbt_set_all(&r, &cur->bc_rec.b); + xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line, + (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) | + cur->bc_private.b.allocated, + INT_GET(r.l0, ARCH_CONVERT) >> 32, (int)INT_GET(r.l0, ARCH_CONVERT), INT_GET(r.l1, ARCH_CONVERT) >> 32, (int)INT_GET(r.l1, ARCH_CONVERT), + (unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1], + (unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3], + (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], + (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); +} + +#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ + xfs_bmbt_trace_argbi(fname, c, b, i, __LINE__) +#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ + xfs_bmbt_trace_argbii(fname, c, b, i, j, __LINE__) +#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ + xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__) +#define XFS_BMBT_TRACE_ARGI(c,i) \ + xfs_bmbt_trace_argi(fname, c, i, __LINE__) +#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) \ + xfs_bmbt_trace_argifk(fname, c, i, f, k, __LINE__) +#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ + xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__) +#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ + xfs_bmbt_trace_argik(fname, c, i, k, __LINE__) +#define XFS_BMBT_TRACE_CURSOR(c,s) \ + xfs_bmbt_trace_cursor(fname, c, s, __LINE__) +static char ARGS[] = "args"; +static char ENTRY[] = "entry"; +static char ERROR[] = "error"; +#undef EXIT +static char EXIT[] = "exit"; +#else +#define XFS_BMBT_TRACE_ARGBI(c,b,i) +#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) +#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) +#define XFS_BMBT_TRACE_ARGI(c,i) +#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) +#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) +#define XFS_BMBT_TRACE_ARGIK(c,i,k) +#define XFS_BMBT_TRACE_CURSOR(c,s) +#endif /* XFS_BMBT_TRACE */ + + +/* + * Internal functions. + */ + +/* + * Delete record pointed to by cur/level. + */ +STATIC int /* error */ +xfs_bmbt_delrec( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_fsblock_t bno; /* fs-relative block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_delrec"; +#endif + int i; /* loop counter */ + int j; /* temp state */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ + xfs_fsblock_t lbno; /* left sibling block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + int lrecs=0; /* left record count */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + int ptr; /* key/record index */ + xfs_fsblock_t rbno; /* right sibling block number */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_block_t *rrblock; /* right-right btree block */ + xfs_buf_t *rrbp; /* right-right buffer pointer */ + int rrecs=0; /* right record count */ + xfs_bmbt_rec_t *rrp; /* right record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + int numrecs; /* temporary numrec count */ + int numlrecs, numrrecs; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ptr = cur->bc_ptrs[level]; + tcur = (xfs_btree_cur_t *)0; + if (ptr == 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + block = xfs_bmbt_get_block(cur, level, &bp); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + if (ptr > numrecs) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_bmbt_delrec); + if (level > 0) { + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < numrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } +#endif + if (ptr < numrecs) { + memmove(&kp[ptr - 1], &kp[ptr], + (numrecs - ptr) * sizeof(*kp)); + memmove(&pp[ptr - 1], &pp[ptr], /* INT_: direct copy */ + (numrecs - ptr) * sizeof(*pp)); + xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1); + xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1); + } + } else { + rp = XFS_BMAP_REC_IADDR(block, 1, cur); + if (ptr < numrecs) { + memmove(&rp[ptr - 1], &rp[ptr], + (numrecs - ptr) * sizeof(*rp)); + xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1); + } + if (ptr == 1) { + INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rp)); + kp = &key; + } + } + numrecs--; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); + /* + * We're at the root level. + * First, shrink the root block in-memory. + * Try to get rid of the next level down. + * If we can't then there's nothing left to do. + */ + if (level == cur->bc_nlevels - 1) { + xfs_iroot_realloc(cur->bc_private.b.ip, -1, + cur->bc_private.b.whichfork); + if ((error = xfs_bmbt_killroot(cur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + /* + * One child of root, need to get a chance to copy its contents + * into the root and delete it. Can't go up to next level, + * there's nothing to delete there. + */ + if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK && + level == cur->bc_nlevels - 2) { + if ((error = xfs_bmbt_killroot(cur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK); + if ((error = xfs_btree_dup_cursor(cur, &tcur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + bno = NULLFSBLOCK; + if (rbno != NULLFSBLOCK) { + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_increment(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if ((error = xfs_bmbt_lshift(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_BMAP_BLOCK_IMINRECS(level, tcur)); + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + if (level > 0) { + if ((error = xfs_bmbt_decrement(cur, + level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, + ERROR); + goto error0; + } + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLFSBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + if (lbno != NULLFSBLOCK) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * decrement to last in block + */ + if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_BMBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } +#endif + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_BMAP_BLOCK_IMINRECS(level, cur)) { + if ((error = xfs_bmbt_rshift(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_BMAP_BLOCK_IMINRECS(level, tcur)); + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + if (level == 0) + cur->bc_ptrs[0]++; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + mp = cur->bc_mp; + ASSERT(bno != NULLFSBLOCK); + if (lbno != NULLFSBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + rbno = bno; + right = block; + rbp = bp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } else if (rbno != NULLFSBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + lbno = bno; + left = block; + lbp = bp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + right = XFS_BUF_TO_BMBT_BLOCK(rbp); + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } else { + if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); + lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < numrrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + } +#endif + memcpy(lkp, rkp, numrrecs * sizeof(*lkp)); + memcpy(lpp, rpp, numrrecs * sizeof(*lpp)); + xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + } else { + lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); + xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs); + left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ + xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, + INT_GET(left->bb_rightsib, ARCH_CONVERT), + 0, &rrbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); + if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, + cur->bc_private.b.flist, mp); + cur->bc_private.b.ip->i_d.di_nblocks--; + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(cur->bc_tp, rbp); + if (bp != lbp) { + cur->bc_bufs[level] = lbp; + cur->bc_ptrs[level] += lrecs; + cur->bc_ra[level] = 0; + } else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + goto error0; + } + if (level > 0) + cur->bc_ptrs[level]--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 2; + return 0; + +error0: + if (tcur) + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +#ifdef XFSDEBUG +/* + * Get the data from the pointed-to record. + */ +int +xfs_bmbt_get_rec( + xfs_btree_cur_t *cur, + xfs_fileoff_t *off, + xfs_fsblock_t *bno, + xfs_filblks_t *len, + xfs_exntst_t *state, + int *stat) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; +#ifdef DEBUG + int error; +#endif + int ptr; + xfs_bmbt_rec_t *rp; + + block = xfs_bmbt_get_block(cur, 0, &bp); + ptr = cur->bc_ptrs[0]; +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) + return error; +#endif + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + *off = xfs_bmbt_disk_get_startoff(rp); + *bno = xfs_bmbt_disk_get_startblock(rp); + *len = xfs_bmbt_disk_get_blockcount(rp); + *state = xfs_bmbt_disk_get_state(rp); + *stat = 1; + return 0; +} +#endif + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_bmbt_insrec( + xfs_btree_cur_t *cur, + int level, + xfs_fsblock_t *bnop, + xfs_bmbt_rec_t *recp, + xfs_btree_cur_t **curp, + int *stat) /* no-go/done/continue */ +{ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_insrec"; +#endif + int i; /* loop index */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ + int logflags; /* inode logging flags */ + xfs_fsblock_t nbno; /* new block number */ + struct xfs_btree_cur *ncur; /* new btree cursor */ + xfs_bmbt_key_t nkey; /* new btree key value */ + xfs_bmbt_rec_t nrec; /* new record count */ + int optr; /* old key/record index */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + int ptr; /* key/record index */ + xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */ + int numrecs; + + ASSERT(level < cur->bc_nlevels); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp); + ncur = (xfs_btree_cur_t *)0; + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(recp)); + optr = ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + XFS_STATS_INC(xfsstats.xs_bmbt_insrec); + block = xfs_bmbt_get_block(cur, level, &bp); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (ptr <= numrecs) { + if (level == 0) { + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp); + } else { + kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); + xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp); + } + } +#endif + nbno = NULLFSBLOCK; + if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { + /* + * A root block, that can be made bigger. + */ + xfs_iroot_realloc(cur->bc_private.b.ip, 1, + cur->bc_private.b.whichfork); + block = xfs_bmbt_get_block(cur, level, &bp); + } else if (level == cur->bc_nlevels - 1) { + if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) || + *stat == 0) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, + logflags); + block = xfs_bmbt_get_block(cur, level, &bp); + } else { + if ((error = xfs_bmbt_rshift(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (i) { + /* nothing */ + } else { + if ((error = xfs_bmbt_lshift(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (i) { + optr = ptr = cur->bc_ptrs[level]; + } else { + if ((error = xfs_bmbt_split(cur, level, + &nbno, &nkey, &ncur, + &i))) { + XFS_BMBT_TRACE_CURSOR(cur, + ERROR); + return error; + } + if (i) { + block = xfs_bmbt_get_block( + cur, level, &bp); +#ifdef DEBUG + if ((error = + xfs_btree_check_lblock(cur, + block, level, bp))) { + XFS_BMBT_TRACE_CURSOR( + cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[level]; + xfs_bmbt_disk_set_allf(&nrec, + nkey.br_startoff, 0, 0, + XFS_EXT_NORM); + } else { + XFS_BMBT_TRACE_CURSOR(cur, + EXIT); + *stat = 0; + return 0; + } + } + } + } + } + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + if (level > 0) { + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); +#ifdef DEBUG + for (i = numrecs; i >= ptr; i--) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (numrecs - ptr + 1) * sizeof(*kp)); + memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */ + (numrecs - ptr + 1) * sizeof(*pp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)*bnop, + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + kp[ptr - 1] = key; + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + numrecs++; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_keys(cur, bp, ptr, numrecs); + xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); + } else { + rp = XFS_BMAP_REC_IADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (numrecs - ptr + 1) * sizeof(*rp)); + rp[ptr - 1] = *recp; + numrecs++; + INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); + xfs_bmbt_log_recs(cur, bp, ptr, numrecs); + } + xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (ptr < numrecs) { + if (level == 0) + xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1, + rp + ptr); + else + xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1, + kp + ptr); + } +#endif + if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + *bnop = nbno; + if (nbno != NULLFSBLOCK) { + *recp = nrec; + *curp = ncur; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +STATIC int +xfs_bmbt_killroot( + xfs_btree_cur_t *cur) +{ + xfs_bmbt_block_t *block; + xfs_bmbt_block_t *cblock; + xfs_buf_t *cbp; + xfs_bmbt_key_t *ckp; + xfs_bmbt_ptr_t *cpp; +#ifdef DEBUG + int error; +#endif +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_killroot"; +#endif + int i; + xfs_bmbt_key_t *kp; + xfs_inode_t *ip; + xfs_ifork_t *ifp; + int level; + xfs_bmbt_ptr_t *pp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = cur->bc_nlevels - 1; + ASSERT(level >= 1); + /* + * Don't deal with the root block needs to be a leaf case. + * We're just going to turn the thing back into extents anyway. + */ + if (level == 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + block = xfs_bmbt_get_block(cur, level, &cbp); + /* + * Give up if the root has multiple children. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) != 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + /* + * Only do this if the next level will fit. + * Then the data must be copied up to the inode, + * instead of freeing the root you free the next level. + */ + cbp = cur->bc_bufs[level - 1]; + cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); + if (INT_GET(cblock->bb_numrecs, ARCH_CONVERT) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + ASSERT(INT_GET(cblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(cblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); + ip = cur->bc_private.b.ip; + ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork); + ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) == + XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes)); + i = (int)(INT_GET(cblock->bb_numrecs, ARCH_CONVERT) - XFS_BMAP_BLOCK_IMAXRECS(level, cur)); + if (i) { + xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); + block = ifp->if_broot; + } + INT_MOD(block->bb_numrecs, ARCH_CONVERT, i); + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); + memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); + xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1, + cur->bc_private.b.flist, cur->bc_mp); + ip->i_d.di_nblocks--; + XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip, + XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(cur->bc_tp, cbp); + cur->bc_bufs[level - 1] = NULL; + INT_MOD(block->bb_level, ARCH_CONVERT, -1); + xfs_trans_log_inode(cur->bc_tp, ip, + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + cur->bc_nlevels--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Log key values from the btree block. + */ +STATIC void +xfs_bmbt_log_keys( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int kfirst, + int klast) +{ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_keys"; +#endif + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast); + tp = cur->bc_tp; + if (bp) { + xfs_bmbt_block_t *block; + int first; + xfs_bmbt_key_t *kp; + int last; + + block = XFS_BUF_TO_BMBT_BLOCK(bp); + kp = XFS_BMAP_KEY_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + } else { + xfs_inode_t *ip; + + ip = cur->bc_private.b.ip; + xfs_trans_log_inode(tp, ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Log pointer values from the btree block. + */ +STATIC void +xfs_bmbt_log_ptrs( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int pfirst, + int plast) +{ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_ptrs"; +#endif + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast); + tp = cur->bc_tp; + if (bp) { + xfs_bmbt_block_t *block; + int first; + int last; + xfs_bmbt_ptr_t *pp; + + block = XFS_BUF_TO_BMBT_BLOCK(bp); + pp = XFS_BMAP_PTR_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + } else { + xfs_inode_t *ip; + + ip = cur->bc_private.b.ip; + xfs_trans_log_inode(tp, ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + */ +STATIC int /* error */ +xfs_bmbt_lookup( + xfs_btree_cur_t *cur, + xfs_lookup_t dir, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block=NULL; + xfs_buf_t *bp; + xfs_daddr_t d; + xfs_sfiloff_t diff; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_lookup"; +#endif + xfs_fsblock_t fsbno=0; + int high; + int i; + int keyno=0; + xfs_bmbt_key_t *kkbase=NULL; + xfs_bmbt_key_t *kkp; + xfs_bmbt_rec_t *krbase=NULL; + xfs_bmbt_rec_t *krp; + int level; + int low; + xfs_mount_t *mp; + xfs_bmbt_ptr_t *pp; + xfs_bmbt_irec_t *rp; + xfs_fileoff_t startoff; + xfs_trans_t *tp; + + XFS_STATS_INC(xfsstats.xs_bmbt_lookup); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, (int)dir); + tp = cur->bc_tp; + mp = cur->bc_mp; + rp = &cur->bc_rec.b; + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + if (level < cur->bc_nlevels - 1) { + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, + 0, &bp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + xfs_btree_setbuf(cur, level, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, + level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } else + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } else + block = xfs_bmbt_get_block(cur, level, &bp); + if (diff == 0) + keyno = 1; + else { + if (level > 0) + kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur); + else + krbase = XFS_BMAP_REC_IADDR(block, 1, cur); + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + ASSERT(level == 0); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + while (low <= high) { + XFS_STATS_INC(xfsstats.xs_bmbt_compare); + keyno = (low + high) >> 1; + if (level > 0) { + kkp = kkbase + keyno - 1; + startoff = INT_GET(kkp->br_startoff, ARCH_CONVERT); + } else { + krp = krbase + keyno - 1; + startoff = xfs_bmbt_disk_get_startoff(krp); + } + diff = (xfs_sfiloff_t) + (startoff - rp->br_startoff); + if (diff < 0) + low = keyno + 1; + else if (diff > 0) + high = keyno - 1; + else + break; + } + } + if (level > 0) { + if (diff > 0 && --keyno < 1) + keyno = 1; + pp = XFS_BMAP_PTR_IADDR(block, keyno, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + fsbno = INT_GET(*pp, ARCH_CONVERT); + cur->bc_ptrs[level] = keyno; + } + } + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + cur->bc_ptrs[0] = keyno; + if ((error = xfs_bmbt_increment(cur, 0, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + } else { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + } + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_bmbt_lshift( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_lshift"; +#endif +#ifdef DEBUG + int i; /* loop counter */ +#endif + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp=NULL; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + int lrecs; /* left record count */ + xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp=NULL; /* right btree key */ + xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */ + xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ + int rrecs; /* right record count */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + if (level == cur->bc_nlevels - 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (cur->bc_ptrs[level] <= 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + mp = cur->bc_mp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, + &lbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + *lkp = *rkp; + xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs); + lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + *lpp = *rpp; /* INT_: direct copy */ + xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs); + } else { + lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + *lrp = *rrp; + xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); + } + INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp); + else + xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); +#endif + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; + INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs); + xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < rrecs; i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(rkp, rkp + 1, rrecs * sizeof(*rkp)); + memmove(rpp, rpp + 1, rrecs * sizeof(*rpp)); + xfs_bmbt_log_keys(cur, rbp, 1, rrecs); + xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs); + } else { + memmove(rrp, rrp + 1, rrecs * sizeof(*rrp)); + xfs_bmbt_log_recs(cur, rbp, 1, rrecs); + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(rrp)); + rkp = &key; + } + if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[level]--; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_bmbt_rshift( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_rshift"; +#endif + int i; /* loop counter */ + xfs_bmbt_key_t key; /* bmap btree key */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_mount_t *mp; /* file system mount point */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ + struct xfs_btree_cur *tcur; /* temporary btree cursor */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + if (level == cur->bc_nlevels - 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_BMBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + mp = cur->bc_mp; + if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rbp, XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + right = XFS_BUF_TO_BMBT_BLOCK(rbp); + if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + *rkp = *lkp; + *rpp = *lpp; /* INT_: direct copy */ + xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + } else { + lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + INT_SET(key.br_startoff, ARCH_CONVERT, + xfs_bmbt_disk_get_startoff(rrp)); + rkp = &key; + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); + else + xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1); +#endif + xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); + if ((error = xfs_btree_dup_cursor(cur, &tcur))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_increment(tcur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(tcur, ERROR); + goto error1; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) { + XFS_BMBT_TRACE_CURSOR(tcur, ERROR); + goto error1; + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +error0: + XFS_BMBT_TRACE_CURSOR(cur, ERROR); +error1: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Determine the extent state. + */ +/* ARGSUSED */ +STATIC xfs_exntst_t +xfs_extent_state( + xfs_filblks_t blks, + int extent_flag) +{ + if (extent_flag) { + ASSERT(blks != 0); /* saved for DMIG */ + return XFS_EXT_UNWRITTEN; + } + return XFS_EXT_NORM; +} + + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_bmbt_split( + xfs_btree_cur_t *cur, + int level, + xfs_fsblock_t *bnop, + xfs_bmbt_key_t *keyp, + xfs_btree_cur_t **curp, + int *stat) /* success/failure */ +{ + xfs_alloc_arg_t args; /* block allocation args */ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_split"; +#endif + int i; /* loop counter */ + xfs_fsblock_t lbno; /* left sibling block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_bmbt_block_t *left; /* left btree block */ + xfs_bmbt_key_t *lkp; /* left btree key */ + xfs_bmbt_ptr_t *lpp; /* left address pointer */ + xfs_bmbt_rec_t *lrp; /* left record pointer */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_bmbt_block_t *right; /* right btree block */ + xfs_bmbt_key_t *rkp; /* right btree key */ + xfs_bmbt_ptr_t *rpp; /* right address pointer */ + xfs_bmbt_block_t *rrblock; /* right-right btree block */ + xfs_buf_t *rrbp; /* right-right buffer pointer */ + xfs_bmbt_rec_t *rrp; /* right record pointer */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, keyp); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + lbp = cur->bc_bufs[level]; + lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp)); + left = XFS_BUF_TO_BMBT_BLOCK(lbp); + args.fsbno = cur->bc_private.b.firstblock; + if (args.fsbno == NULLFSBLOCK) { + args.fsbno = lbno; + args.type = XFS_ALLOCTYPE_START_BNO; + } else if (cur->bc_private.b.flist->xbf_low) + args.type = XFS_ALLOCTYPE_FIRST_AG; + else + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.mod = args.minleft = args.alignment = args.total = args.isfl = + args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; + if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return XFS_ERROR(ENOSPC); + } + if ((error = xfs_alloc_vextent(&args))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + cur->bc_private.b.ip->i_d.di_nblocks++; + xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, 1L); + rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0); + right = XFS_BUF_TO_BMBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + INT_SET(right->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + if (level > 0) { + lkp = XFS_BMAP_KEY_IADDR(left, i, cur); + lpp = XFS_BMAP_PTR_IADDR(left, i, cur); + rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); + rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT); + } else { + lrp = XFS_BMAP_REC_IADDR(left, i, cur); + rrp = XFS_BMAP_REC_IADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp); + } + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, args.fsbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS); + xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + if ((error = xfs_btree_read_bufl(args.mp, args.tp, + INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); + if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.fsbno); + xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = args.fsbno; + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + + +/* + * Update keys for the record. + */ +STATIC int +xfs_bmbt_updkey( + xfs_btree_cur_t *cur, + xfs_bmbt_key_t *keyp, /* on-disk format */ + int level) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; +#ifdef DEBUG + int error; +#endif +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_updkey"; +#endif + xfs_bmbt_key_t *kp; + int ptr; + + ASSERT(level >= 1); + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGIK(cur, level, keyp); + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); + *kp = *keyp; + xfs_bmbt_log_keys(cur, bp, ptr, ptr); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Convert on-disk form of btree root to in-memory form. + */ +void +xfs_bmdr_to_bmbt( + xfs_bmdr_block_t *dblock, + int dblocklen, + xfs_bmbt_block_t *rblock, + int rblocklen) +{ + int dmxr; + xfs_bmbt_key_t *fkp; + xfs_bmbt_ptr_t *fpp; + xfs_bmbt_key_t *tkp; + xfs_bmbt_ptr_t *tpp; + + INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + rblock->bb_level = dblock->bb_level; /* both in on-disk format */ + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); + rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */ + INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); + fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + memcpy(tkp, fkp, sizeof(*fkp) * dmxr); + memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ +} + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_bmbt_decrement( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_decrement"; +#endif + xfs_fsblock_t fsbno; + int lev; + xfs_mount_t *mp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ASSERT(level < cur->bc_nlevels); + if (level < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + if (--cur->bc_ptrs[level] > 0) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + if (lev < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + if (lev == cur->bc_nlevels) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + tp = cur->bc_tp; + mp = cur->bc_mp; + for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { + fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Delete the record pointed to by cur. + */ +int /* error */ +xfs_bmbt_delete( + xfs_btree_cur_t *cur, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_delete"; +#endif + int i; + int level; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + for (level = 0, i = 2; i == 2; level++) { + if ((error = xfs_bmbt_delrec(cur, level, &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if ((error = xfs_bmbt_decrement(cur, level, + &i))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + break; + } + } + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = i; + return 0; +} + +/* + * Convert a compressed bmap extent record to an uncompressed form. + * This code must be in sync with the routines xfs_bmbt_get_startoff, + * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state. + */ + +STATIC __inline__ void +__xfs_bmbt_get_all( + __uint64_t l0, + __uint64_t l1, + xfs_bmbt_irec_t *s) +{ + int ext_flag; + xfs_exntst_t st; + + ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); + s->br_startoff = ((xfs_fileoff_t)l0 & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +#if XFS_BIG_FILESYSTEMS + s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)l1) >> 21); +#else +#ifdef DEBUG + { + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)l1) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + s->br_startblock = (xfs_fsblock_t)b; + } +#else /* !DEBUG */ + s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ + s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21)); + /* This is xfs_extent_state() in-line */ + if (ext_flag) { + ASSERT(s->br_blockcount != 0); /* saved for DMIG */ + st = XFS_EXT_UNWRITTEN; + } else + st = XFS_EXT_NORM; + s->br_state = st; +} + +void +xfs_bmbt_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + __xfs_bmbt_get_all(r->l0, r->l1, s); +} + +/* + * Get the block pointer for the given level of the cursor. + * Fill in the buffer pointer, if applicable. + */ +xfs_bmbt_block_t * +xfs_bmbt_get_block( + xfs_btree_cur_t *cur, + int level, + xfs_buf_t **bpp) +{ + xfs_ifork_t *ifp; + xfs_bmbt_block_t *rval; + + if (level < cur->bc_nlevels - 1) { + *bpp = cur->bc_bufs[level]; + rval = XFS_BUF_TO_BMBT_BLOCK(*bpp); + } else { + *bpp = 0; + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + rval = ifp->if_broot; + } + return rval; +} + +/* + * Extract the blockcount field from an in memory bmap extent record. + */ +xfs_filblks_t +xfs_bmbt_get_blockcount( + xfs_bmbt_rec_t *r) +{ + return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21)); +} + +/* + * Extract the startblock field from an in memory bmap extent record. + */ +xfs_fsblock_t +xfs_bmbt_get_startblock( + xfs_bmbt_rec_t *r) +{ +#if XFS_BIG_FILESYSTEMS + return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)r->l1) >> 21); +#else +#ifdef DEBUG + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)r->l1) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + return (xfs_fsblock_t)b; +#else /* !DEBUG */ + return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Extract the startoff field from an in memory bmap extent record. + */ +xfs_fileoff_t +xfs_bmbt_get_startoff( + xfs_bmbt_rec_t *r) +{ + return ((xfs_fileoff_t)r->l0 & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +} + +xfs_exntst_t +xfs_bmbt_get_state( + xfs_bmbt_rec_t *r) +{ + int ext_flag; + + ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN)); + return xfs_extent_state(xfs_bmbt_get_blockcount(r), + ext_flag); +} + +#if ARCH_CONVERT != ARCH_NOCONVERT +/* Endian flipping versions of the bmbt extraction functions */ +void +xfs_bmbt_disk_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + __uint64_t l0, l1; + + l0 = INT_GET(r->l0, ARCH_CONVERT); + l1 = INT_GET(r->l1, ARCH_CONVERT); + + __xfs_bmbt_get_all(l0, l1, s); +} + +/* + * Extract the blockcount field from an on disk bmap extent record. + */ +xfs_filblks_t +xfs_bmbt_disk_get_blockcount( + xfs_bmbt_rec_t *r) +{ + return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21)); +} + +/* + * Extract the startblock field from an on disk bmap extent record. + */ +xfs_fsblock_t +xfs_bmbt_disk_get_startblock( + xfs_bmbt_rec_t *r) +{ +#if XFS_BIG_FILESYSTEMS + return (((xfs_fsblock_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) | + (((xfs_fsblock_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); +#else +#ifdef DEBUG + xfs_dfsbno_t b; + + b = (((xfs_dfsbno_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) | + (((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); + ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); + return (xfs_fsblock_t)b; +#else /* !DEBUG */ + return (xfs_fsblock_t)(((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21); +#endif /* DEBUG */ +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Extract the startoff field from a disk format bmap extent record. + */ +xfs_fileoff_t +xfs_bmbt_disk_get_startoff( + xfs_bmbt_rec_t *r) +{ + return ((xfs_fileoff_t)INT_GET(r->l0, ARCH_CONVERT) & + XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; +} + +xfs_exntst_t +xfs_bmbt_disk_get_state( + xfs_bmbt_rec_t *r) +{ + int ext_flag; + + ext_flag = (int)((INT_GET(r->l0, ARCH_CONVERT)) >> (64 - BMBT_EXNTFLAG_BITLEN)); + return xfs_extent_state(xfs_bmbt_disk_get_blockcount(r), + ext_flag); +} +#endif + + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_bmbt_increment( + xfs_btree_cur_t *cur, + int level, + int *stat) /* success/failure */ +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_increment"; +#endif + xfs_fsblock_t fsbno; + int lev; + xfs_mount_t *mp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGI(cur, level); + ASSERT(level < cur->bc_nlevels); + if (level < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + block = xfs_bmbt_get_block(cur, level, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; + } + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + block = xfs_bmbt_get_block(cur, lev, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + if (lev < cur->bc_nlevels - 1) + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + if (lev == cur->bc_nlevels) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + tp = cur->bc_tp; + mp = cur->bc_mp; + for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { + fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, + XFS_BMAP_BTREE_REF))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_BMBT_BLOCK(bp); + if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + cur->bc_ptrs[lev] = 1; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + */ +int /* error */ +xfs_bmbt_insert( + xfs_btree_cur_t *cur, + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_insert"; +#endif + int i; + int level; + xfs_fsblock_t nbno; + xfs_btree_cur_t *ncur; + xfs_bmbt_rec_t nrec; + xfs_btree_cur_t *pcur; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = 0; + nbno = NULLFSBLOCK; + xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + do { + if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + cur->bc_private.b.allocated += + pcur->bc_private.b.allocated; + pcur->bc_private.b.allocated = 0; + ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || + (cur->bc_private.b.ip->i_d.di_flags & + XFS_DIFLAG_REALTIME)); + cur->bc_private.b.firstblock = + pcur->bc_private.b.firstblock; + ASSERT(cur->bc_private.b.flist == + pcur->bc_private.b.flist); + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLFSBLOCK); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = i; + return 0; +error0: + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; +} + +/* + * Log fields from the btree block header. + */ +void +xfs_bmbt_log_block( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int fields) +{ + int first; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_block"; +#endif + int last; + xfs_trans_t *tp; + static const short offsets[] = { + offsetof(xfs_bmbt_block_t, bb_magic), + offsetof(xfs_bmbt_block_t, bb_level), + offsetof(xfs_bmbt_block_t, bb_numrecs), + offsetof(xfs_bmbt_block_t, bb_leftsib), + offsetof(xfs_bmbt_block_t, bb_rightsib), + sizeof(xfs_bmbt_block_t) + }; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBI(cur, bp, fields); + tp = cur->bc_tp; + if (bp) { + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, + &last); + xfs_trans_log_buf(tp, bp, first, last); + } else + xfs_trans_log_inode(tp, cur->bc_private.b.ip, + XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +/* + * Log record values from the btree block. + */ +void +xfs_bmbt_log_recs( + xfs_btree_cur_t *cur, + xfs_buf_t *bp, + int rfirst, + int rlast) +{ + xfs_bmbt_block_t *block; + int first; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_log_recs"; +#endif + int last; + xfs_bmbt_rec_t *rp; + xfs_trans_t *tp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast); + ASSERT(bp); + tp = cur->bc_tp; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + rp = XFS_BMAP_REC_DADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(tp, bp, first, last); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); +} + +int /* error */ +xfs_bmbt_lookup_eq( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +int /* error */ +xfs_bmbt_lookup_ge( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat); +} + +int /* error */ +xfs_bmbt_lookup_le( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_bmbt_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Give the bmap btree a new root block. Copy the old broot contents + * down into a real block and make the broot point to it. + */ +int /* error */ +xfs_bmbt_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflags, /* logging flags for inode */ + int *stat) /* return status - 0 fail */ +{ + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_bmbt_block_t *block; /* bmap btree block */ + xfs_buf_t *bp; /* buffer for block */ + xfs_bmbt_block_t *cblock; /* child btree block */ + xfs_bmbt_key_t *ckp; /* child key pointer */ + xfs_bmbt_ptr_t *cpp; /* child ptr pointer */ + int error; /* error return code */ +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_newroot"; +#endif +#ifdef DEBUG + int i; /* loop counter */ +#endif + xfs_bmbt_key_t *kp; /* pointer to bmap btree key */ + int level; /* btree level */ + xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + level = cur->bc_nlevels - 1; + block = xfs_bmbt_get_block(cur, level, &bp); + /* + * Copy the root into a real block. + */ + args.mp = cur->bc_mp; + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + args.tp = cur->bc_tp; + args.fsbno = cur->bc_private.b.firstblock; + args.mod = args.minleft = args.alignment = args.total = args.isfl = + args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; + if (args.fsbno == NULLFSBLOCK) { +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + args.fsbno = INT_GET(*pp, ARCH_CONVERT); + args.type = XFS_ALLOCTYPE_START_BNO; + } else if (args.wasdel) + args.type = XFS_ALLOCTYPE_FIRST_AG; + else + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + cur->bc_private.b.ip->i_d.di_nblocks++; + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, 1L); + bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); + cblock = XFS_BUF_TO_BMBT_BLOCK(bp); + *cblock = *block; + INT_MOD(block->bb_level, ARCH_CONVERT, +1); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + cur->bc_nlevels++; + cur->bc_ptrs[level + 1] = 1; + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); + memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); + cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + } +#endif + memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, + level))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + INT_SET(*pp, ARCH_CONVERT, args.fsbno); + xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT), + cur->bc_private.b.whichfork); + xfs_btree_setbuf(cur, level, bp); + /* + * Do all this logging at the end so that + * the root is at the right level. + */ + xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); + xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + *logflags |= + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); + *stat = 1; + return 0; +} + +/* + * Set all the fields in a bmap extent record from the uncompressed form. + */ +void +xfs_bmbt_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + int extent_flag; + + ASSERT((s->br_state == XFS_EXT_NORM) || + (s->br_state == XFS_EXT_UNWRITTEN)); + extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1; + ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0); + ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + ((xfs_bmbt_rec_base_t)s->br_startblock >> 43); + r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(s->br_startblock)) { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9); + r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set all the fields in a bmap extent record from the arguments. + */ +void +xfs_bmbt_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v) +{ + int extent_flag; + + ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN)); + extent_flag = (v == XFS_EXT_NORM) ? 0 : 1; + ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); + ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + ((xfs_bmbt_rec_base_t)b >> 43); + r->l1 = ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(b)) { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9); + r->l1 = ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +#if ARCH_CONVERT != ARCH_NOCONVERT +/* + * Set all the fields in a bmap extent record from the uncompressed form. + */ +void +xfs_bmbt_disk_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + int extent_flag; + + ASSERT((s->br_state == XFS_EXT_NORM) || + (s->br_state == XFS_EXT_UNWRITTEN)); + extent_flag = (s->br_state == XFS_EXT_NORM) ? 0 : 1; + ASSERT((s->br_startoff & XFS_MASK64HI(9)) == 0); + ASSERT((s->br_blockcount & XFS_MASK64HI(43)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((s->br_startblock & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + ((xfs_bmbt_rec_base_t)s->br_startblock >> 43)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(s->br_startblock)) { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); + INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } else { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)s->br_startoff << 9)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)s->br_startblock << 21) | + ((xfs_bmbt_rec_base_t)s->br_blockcount & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set all the fields in a disk format bmap extent record from the arguments. + */ +void +xfs_bmbt_disk_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v) +{ + int extent_flag; + + ASSERT((v == XFS_EXT_NORM) || (v == XFS_EXT_UNWRITTEN)); + extent_flag = (v == XFS_EXT_NORM) ? 0 : 1; + ASSERT((o & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0); + ASSERT((c & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); +#if XFS_BIG_FILESYSTEMS + ASSERT((b & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + ((xfs_bmbt_rec_base_t)b >> 43)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(b)) { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9) | + (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); + INT_SET(r->l1, ARCH_CONVERT, XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } else { + INT_SET(r->l0, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)extent_flag << 63) | + ((xfs_bmbt_rec_base_t)o << 9)); + INT_SET(r->l1, ARCH_CONVERT, ((xfs_bmbt_rec_base_t)b << 21) | + ((xfs_bmbt_rec_base_t)c & + (xfs_bmbt_rec_base_t)XFS_MASK64LO(21))); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} +#endif + +/* + * Set the blockcount field in a bmap extent record. + */ +void +xfs_bmbt_set_blockcount( + xfs_bmbt_rec_t *r, + xfs_filblks_t v) +{ + ASSERT((v & XFS_MASK64HI(43)) == 0); + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) | + (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21)); +} + +/* + * Set the startblock field in a bmap extent record. + */ +void +xfs_bmbt_set_startblock( + xfs_bmbt_rec_t *r, + xfs_fsblock_t v) +{ +#if XFS_BIG_FILESYSTEMS + ASSERT((v & XFS_MASK64HI(12)) == 0); +#endif /* XFS_BIG_FILESYSTEMS */ +#if XFS_BIG_FILESYSTEMS + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) | + (xfs_bmbt_rec_base_t)(v >> 43); + r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) | + (xfs_bmbt_rec_base_t)(v << 21); +#else /* !XFS_BIG_FILESYSTEMS */ + if (ISNULLSTARTBLOCK(v)) { + r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) | + ((xfs_bmbt_rec_base_t)v << 21) | + (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } else { + r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9); + r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | + (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)); + } +#endif /* XFS_BIG_FILESYSTEMS */ +} + +/* + * Set the startoff field in a bmap extent record. + */ +void +xfs_bmbt_set_startoff( + xfs_bmbt_rec_t *r, + xfs_fileoff_t v) +{ + ASSERT((v & XFS_MASK64HI(9)) == 0); + r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) | + ((xfs_bmbt_rec_base_t)v << 9) | + (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9)); +} + +/* + * Set the extent state field in a bmap extent record. + */ +void +xfs_bmbt_set_state( + xfs_bmbt_rec_t *r, + xfs_exntst_t v) +{ + ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN); + if (v == XFS_EXT_NORM) + r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN); + else + r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN); +} + +/* + * Convert in-memory form of btree root to on-disk form. + */ +void +xfs_bmbt_to_bmdr( + xfs_bmbt_block_t *rblock, + int rblocklen, + xfs_bmdr_block_t *dblock, + int dblocklen) +{ + int dmxr; + xfs_bmbt_key_t *fkp; + xfs_bmbt_ptr_t *fpp; + xfs_bmbt_key_t *tkp; + xfs_bmbt_ptr_t *tpp; + + ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC); + ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); + dblock->bb_level = rblock->bb_level; /* both in on-disk format */ + dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */ + dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); + tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); + dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); + memcpy(tkp, fkp, sizeof(*fkp) * dmxr); + memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ +} + +/* + * Update the record to the passed values. + */ +int +xfs_bmbt_update( + xfs_btree_cur_t *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + xfs_exntst_t state) +{ + xfs_bmbt_block_t *block; + xfs_buf_t *bp; + int error; +#ifdef XFS_BMBT_TRACE + static char fname[] = "xfs_bmbt_update"; +#endif + xfs_bmbt_key_t key; + int ptr; + xfs_bmbt_rec_t *rp; + + XFS_BMBT_TRACE_CURSOR(cur, ENTRY); + XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno, + (xfs_dfilblks_t)len, (int)state); + block = xfs_bmbt_get_block(cur, 0, &bp); +#ifdef DEBUG + if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[0]; + rp = XFS_BMAP_REC_IADDR(block, ptr, cur); + xfs_bmbt_disk_set_allf(rp, off, bno, len, state); + xfs_bmbt_log_recs(cur, bp, ptr, ptr); + if (ptr > 1) { + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; + } + INT_SET(key.br_startoff, ARCH_CONVERT, off); + if ((error = xfs_bmbt_updkey(cur, &key, 1))) { + XFS_BMBT_TRACE_CURSOR(cur, ERROR); + return error; + } + XFS_BMBT_TRACE_CURSOR(cur, EXIT); + return 0; +} + +/* + * Check an extent list, which has just been read, for + * any bit in the extent flag field. ASSERT on debug + * kernels, as this condition should not occur. + * Return an error condition (1) if any flags found, + * otherwise return 0. + */ + +int +xfs_check_nostate_extents( + xfs_bmbt_rec_t *ep, + xfs_extnum_t num) +{ + for (; num > 0; num--, ep++) { + if ((ep->l0 >> + (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { + ASSERT(0); + return 1; + } + } + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bmap_btree.h linux.22-ac2/fs/xfs/xfs_bmap_btree.h --- linux.vanilla/fs/xfs/xfs_bmap_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bmap_btree.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BMAP_BTREE_H__ +#define __XFS_BMAP_BTREE_H__ + +#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */ + +struct xfs_btree_cur; +struct xfs_btree_lblock; +struct xfs_mount; +struct xfs_inode; + +/* + * Bmap root header, on-disk form only. + */ +typedef struct xfs_bmdr_block +{ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ +} xfs_bmdr_block_t; + +/* + * Bmap btree record and extent descriptor. + * For 32-bit kernels, + * l0:31 is an extent flag (value 1 indicates non-normal). + * l0:0-30 and l1:9-31 are startoff. + * l1:0-8, l2:0-31, and l3:21-31 are startblock. + * l3:0-20 are blockcount. + * For 64-bit kernels, + * l0:63 is an extent flag (value 1 indicates non-normal). + * l0:9-62 are startoff. + * l0:0-8 and l1:21-63 are startblock. + * l1:0-20 are blockcount. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN + +#define BMBT_TOTAL_BITLEN 128 /* 128 bits, 16 bytes */ +#define BMBT_EXNTFLAG_BITOFF 0 +#define BMBT_EXNTFLAG_BITLEN 1 +#define BMBT_STARTOFF_BITOFF (BMBT_EXNTFLAG_BITOFF + BMBT_EXNTFLAG_BITLEN) +#define BMBT_STARTOFF_BITLEN 54 +#define BMBT_STARTBLOCK_BITOFF (BMBT_STARTOFF_BITOFF + BMBT_STARTOFF_BITLEN) +#define BMBT_STARTBLOCK_BITLEN 52 +#define BMBT_BLOCKCOUNT_BITOFF \ + (BMBT_STARTBLOCK_BITOFF + BMBT_STARTBLOCK_BITLEN) +#define BMBT_BLOCKCOUNT_BITLEN (BMBT_TOTAL_BITLEN - BMBT_BLOCKCOUNT_BITOFF) + +#else + +#define BMBT_TOTAL_BITLEN 128 /* 128 bits, 16 bytes */ +#define BMBT_EXNTFLAG_BITOFF 63 +#define BMBT_EXNTFLAG_BITLEN 1 +#define BMBT_STARTOFF_BITOFF (BMBT_EXNTFLAG_BITOFF - BMBT_STARTOFF_BITLEN) +#define BMBT_STARTOFF_BITLEN 54 +#define BMBT_STARTBLOCK_BITOFF 85 /* 128 - 43 (other 9 is in first word) */ +#define BMBT_STARTBLOCK_BITLEN 52 +#define BMBT_BLOCKCOUNT_BITOFF 64 /* Start of second 64 bit container */ +#define BMBT_BLOCKCOUNT_BITLEN 21 + +#endif + + +#define BMBT_USE_64 1 + +typedef struct xfs_bmbt_rec_32 +{ + __uint32_t l0, l1, l2, l3; +} xfs_bmbt_rec_32_t; +typedef struct xfs_bmbt_rec_64 +{ + __uint64_t l0, l1; +} xfs_bmbt_rec_64_t; + +typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */ +typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t; + +/* + * Values and macros for delayed-allocation startblock fields. + */ +#define STARTBLOCKVALBITS 17 +#define STARTBLOCKMASKBITS (15 + XFS_BIG_FILESYSTEMS * 20) +#define DSTARTBLOCKMASKBITS (15 + 20) +#define STARTBLOCKMASK \ + (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) +#define DSTARTBLOCKMASK \ + (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLSTARTBLOCK) +int isnullstartblock(xfs_fsblock_t x); +#define ISNULLSTARTBLOCK(x) isnullstartblock(x) +#else +#define ISNULLSTARTBLOCK(x) (((x) & STARTBLOCKMASK) == STARTBLOCKMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_ISNULLDSTARTBLOCK) +int isnulldstartblock(xfs_dfsbno_t x); +#define ISNULLDSTARTBLOCK(x) isnulldstartblock(x) +#else +#define ISNULLDSTARTBLOCK(x) (((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_NULLSTARTBLOCK) +xfs_fsblock_t nullstartblock(int k); +#define NULLSTARTBLOCK(k) nullstartblock(k) +#else +#define NULLSTARTBLOCK(k) \ + ((ASSERT(k < (1 << STARTBLOCKVALBITS))), (STARTBLOCKMASK | (k))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_STARTBLOCKVAL) +xfs_filblks_t startblockval(xfs_fsblock_t x); +#define STARTBLOCKVAL(x) startblockval(x) +#else +#define STARTBLOCKVAL(x) ((xfs_filblks_t)((x) & ~STARTBLOCKMASK)) +#endif + +/* + * Possible extent formats. + */ +typedef enum { + XFS_EXTFMT_NOSTATE = 0, + XFS_EXTFMT_HASSTATE +} xfs_exntfmt_t; + +/* + * Possible extent states. + */ +typedef enum { + XFS_EXT_NORM, XFS_EXT_UNWRITTEN, + XFS_EXT_DMAPI_OFFLINE +} xfs_exntst_t; + +/* + * Extent state and extent format macros. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTFMT_INODE ) +xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip); +#define XFS_EXTFMT_INODE(x) xfs_extfmt_inode(x) +#else +#define XFS_EXTFMT_INODE(x) \ + (XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \ + XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE) +#endif +#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN) + +/* + * Incore version of above. + */ +typedef struct xfs_bmbt_irec +{ + xfs_fileoff_t br_startoff; /* starting file offset */ + xfs_fsblock_t br_startblock; /* starting block number */ + xfs_filblks_t br_blockcount; /* number of blocks */ + xfs_exntst_t br_state; /* extent state */ +} xfs_bmbt_irec_t; + +/* + * Key structure for non-leaf levels of the tree. + */ +typedef struct xfs_bmbt_key +{ + xfs_dfiloff_t br_startoff; /* starting file offset */ +} xfs_bmbt_key_t, xfs_bmdr_key_t; + +typedef xfs_dfsbno_t xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_lblock xfs_bmbt_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BMBT_BLOCK) +xfs_bmbt_block_t *xfs_buf_to_bmbt_block(struct xfs_buf *bp); +#define XFS_BUF_TO_BMBT_BLOCK(bp) xfs_buf_to_bmbt_block(bp) +#else +#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)(XFS_BUF_PTR(bp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_DSIZE) +int xfs_bmap_rblock_dsize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) xfs_bmap_rblock_dsize(lev,cur) +#else +#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_RBLOCK_ISIZE) +int xfs_bmap_rblock_isize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) xfs_bmap_rblock_isize(lev,cur) +#else +#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \ + ((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \ + (cur)->bc_private.b.whichfork)->if_broot_bytes) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_IBLOCK_SIZE) +int xfs_bmap_iblock_size(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_IBLOCK_SIZE(lev,cur) xfs_bmap_iblock_size(lev,cur) +#else +#define XFS_BMAP_IBLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DSIZE) +int xfs_bmap_block_dsize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DSIZE(lev,cur) xfs_bmap_block_dsize(lev,cur) +#else +#define XFS_BMAP_BLOCK_DSIZE(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BMAP_RBLOCK_DSIZE(lev,cur) : \ + XFS_BMAP_IBLOCK_SIZE(lev,cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_ISIZE) +int xfs_bmap_block_isize(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_ISIZE(lev,cur) xfs_bmap_block_isize(lev,cur) +#else +#define XFS_BMAP_BLOCK_ISIZE(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BMAP_RBLOCK_ISIZE(lev,cur) : \ + XFS_BMAP_IBLOCK_SIZE(lev,cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMAXRECS) +int xfs_bmap_block_dmaxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) xfs_bmap_block_dmaxrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \ + xfs_bmdr, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMAXRECS) +int xfs_bmap_block_imaxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) xfs_bmap_block_imaxrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \ + xfs_bmbt, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_DMINRECS) +int xfs_bmap_block_dminrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) xfs_bmap_block_dminrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \ + xfs_bmdr, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BLOCK_IMINRECS) +int xfs_bmap_block_iminrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) xfs_bmap_block_iminrecs(lev,cur) +#else +#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \ + ((lev) == (cur)->bc_nlevels - 1 ? \ + XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur), \ + xfs_bmbt, (lev) == 0) : \ + ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_DADDR) +xfs_bmbt_rec_t * +xfs_bmap_rec_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_REC_DADDR(bb,i,cur) xfs_bmap_rec_daddr(bb,i,cur) +#else +#define XFS_BMAP_REC_DADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_REC_IADDR) +xfs_bmbt_rec_t * +xfs_bmap_rec_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_REC_IADDR(bb,i,cur) xfs_bmap_rec_iaddr(bb,i,cur) +#else +#define XFS_BMAP_REC_IADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_DADDR) +xfs_bmbt_key_t * +xfs_bmap_key_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_KEY_DADDR(bb,i,cur) xfs_bmap_key_daddr(bb,i,cur) +#else +#define XFS_BMAP_KEY_DADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_KEY_IADDR) +xfs_bmbt_key_t * +xfs_bmap_key_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_KEY_IADDR(bb,i,cur) xfs_bmap_key_iaddr(bb,i,cur) +#else +#define XFS_BMAP_KEY_IADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_DADDR) +xfs_bmbt_ptr_t * +xfs_bmap_ptr_daddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_PTR_DADDR(bb,i,cur) xfs_bmap_ptr_daddr(bb,i,cur) +#else +#define XFS_BMAP_PTR_DADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_PTR_IADDR) +xfs_bmbt_ptr_t * +xfs_bmap_ptr_iaddr(xfs_bmbt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_BMAP_PTR_IADDR(bb,i,cur) xfs_bmap_ptr_iaddr(bb,i,cur) +#else +#define XFS_BMAP_PTR_IADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ + xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ + INT_GET((bb)->bb_level, ARCH_CONVERT), cur)) +#endif + +/* + * These are to be used when we know the size of the block and + * we don't have a cursor. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_REC_ADDR) +xfs_bmbt_rec_t *xfs_bmap_broot_rec_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) xfs_bmap_broot_rec_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \ + XFS_BTREE_REC_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_KEY_ADDR) +xfs_bmbt_key_t *xfs_bmap_broot_key_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) xfs_bmap_broot_key_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \ + XFS_BTREE_KEY_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_PTR_ADDR) +xfs_bmbt_ptr_t *xfs_bmap_broot_ptr_addr(xfs_bmbt_block_t *bb, int i, int sz); +#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) xfs_bmap_broot_ptr_addr(bb,i,sz) +#else +#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ + XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_NUMRECS) +int xfs_bmap_broot_numrecs(xfs_bmdr_block_t *bb); +#define XFS_BMAP_BROOT_NUMRECS(bb) xfs_bmap_broot_numrecs(bb) +#else +#define XFS_BMAP_BROOT_NUMRECS(bb) (INT_GET((bb)->bb_numrecs, ARCH_CONVERT)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_MAXRECS) +int xfs_bmap_broot_maxrecs(int sz); +#define XFS_BMAP_BROOT_MAXRECS(sz) xfs_bmap_broot_maxrecs(sz) +#else +#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE_CALC) +int xfs_bmap_broot_space_calc(int nrecs); +#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) xfs_bmap_broot_space_calc(nrecs) +#else +#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ + ((int)(sizeof(xfs_bmbt_block_t) + \ + ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_BROOT_SPACE) +int xfs_bmap_broot_space(xfs_bmdr_block_t *bb); +#define XFS_BMAP_BROOT_SPACE(bb) xfs_bmap_broot_space(bb) +#else +#define XFS_BMAP_BROOT_SPACE(bb) \ + XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMDR_SPACE_CALC) +int xfs_bmdr_space_calc(int nrecs); +#define XFS_BMDR_SPACE_CALC(nrecs) xfs_bmdr_space_calc(nrecs) +#else +#define XFS_BMDR_SPACE_CALC(nrecs) \ + ((int)(sizeof(xfs_bmdr_block_t) + \ + ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))) +#endif + +/* + * Maximum number of bmap btree levels. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BM_MAXLEVELS) +int xfs_bm_maxlevels(struct xfs_mount *mp, int w); +#define XFS_BM_MAXLEVELS(mp,w) xfs_bm_maxlevels(mp,w) +#else +#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[w]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_SANITY_CHECK) +int xfs_bmap_sanity_check(struct xfs_mount *mp, xfs_bmbt_block_t *bb, + int level); +#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ + xfs_bmap_sanity_check(mp,bb,level) +#else +#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ + (INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \ + INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \ + INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \ + INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0]) +#endif + +/* + * Trace buffer entry types. + */ +#define XFS_BMBT_KTRACE_ARGBI 1 +#define XFS_BMBT_KTRACE_ARGBII 2 +#define XFS_BMBT_KTRACE_ARGFFFI 3 +#define XFS_BMBT_KTRACE_ARGI 4 +#define XFS_BMBT_KTRACE_ARGIFK 5 +#define XFS_BMBT_KTRACE_ARGIFR 6 +#define XFS_BMBT_KTRACE_ARGIK 7 +#define XFS_BMBT_KTRACE_CUR 8 + +#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */ +#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */ + +#if defined(XFS_ALL_TRACE) +#define XFS_BMBT_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BMBT_TRACE +#endif + + +#ifdef __KERNEL__ + +/* + * Prototypes for xfs_bmap.c to call. + */ + +void +xfs_bmdr_to_bmbt( + xfs_bmdr_block_t *, + int, + xfs_bmbt_block_t *, + int); + +int +xfs_bmbt_decrement( + struct xfs_btree_cur *, + int, + int *); + +int +xfs_bmbt_delete( + struct xfs_btree_cur *, + int *); + +void +xfs_bmbt_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +xfs_bmbt_block_t * +xfs_bmbt_get_block( + struct xfs_btree_cur *cur, + int level, + struct xfs_buf **bpp); + +xfs_filblks_t +xfs_bmbt_get_blockcount( + xfs_bmbt_rec_t *r); + +xfs_fsblock_t +xfs_bmbt_get_startblock( + xfs_bmbt_rec_t *r); + +xfs_fileoff_t +xfs_bmbt_get_startoff( + xfs_bmbt_rec_t *r); + +xfs_exntst_t +xfs_bmbt_get_state( + xfs_bmbt_rec_t *r); + +#if ARCH_CONVERT != ARCH_NOCONVERT +void +xfs_bmbt_disk_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +xfs_exntst_t +xfs_bmbt_disk_get_state( + xfs_bmbt_rec_t *r); + +xfs_filblks_t +xfs_bmbt_disk_get_blockcount( + xfs_bmbt_rec_t *r); + +xfs_fsblock_t +xfs_bmbt_disk_get_startblock( + xfs_bmbt_rec_t *r); + +xfs_fileoff_t +xfs_bmbt_disk_get_startoff( + xfs_bmbt_rec_t *r); + +#else +#define xfs_bmbt_disk_get_all(r, s) \ + xfs_bmbt_get_all(r, s) +#define xfs_bmbt_disk_get_state(r) \ + xfs_bmbt_get_state(r) +#define xfs_bmbt_disk_get_blockcount(r) \ + xfs_bmbt_get_blockcount(r) +#define xfs_bmbt_disk_get_startblock(r) \ + xfs_bmbt_get_blockcount(r) +#define xfs_bmbt_disk_get_startoff(r) \ + xfs_bmbt_get_startoff(r) +#endif + +int +xfs_bmbt_increment( + struct xfs_btree_cur *, + int, + int *); + +int +xfs_bmbt_insert( + struct xfs_btree_cur *, + int *); + +int +xfs_bmbt_insert_many( + struct xfs_btree_cur *, + int, + xfs_bmbt_rec_t *, + int *); + +void +xfs_bmbt_log_block( + struct xfs_btree_cur *, + struct xfs_buf *, + int); + +void +xfs_bmbt_log_recs( + struct xfs_btree_cur *, + struct xfs_buf *, + int, + int); + +int +xfs_bmbt_lookup_eq( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +int +xfs_bmbt_lookup_ge( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +int +xfs_bmbt_lookup_le( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + int *); + +/* + * Give the bmap btree a new root block. Copy the old broot contents + * down into a real block and make the broot point to it. + */ +int /* error */ +xfs_bmbt_newroot( + struct xfs_btree_cur *cur, /* btree cursor */ + int *logflags, /* logging flags for inode */ + int *stat); /* return status - 0 fail */ + +void +xfs_bmbt_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +void +xfs_bmbt_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v); + +void +xfs_bmbt_set_blockcount( + xfs_bmbt_rec_t *r, + xfs_filblks_t v); + +void +xfs_bmbt_set_startblock( + xfs_bmbt_rec_t *r, + xfs_fsblock_t v); + +void +xfs_bmbt_set_startoff( + xfs_bmbt_rec_t *r, + xfs_fileoff_t v); + +void +xfs_bmbt_set_state( + xfs_bmbt_rec_t *r, + xfs_exntst_t v); + +#if ARCH_CONVERT != ARCH_NOCONVERT +void +xfs_bmbt_disk_set_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s); + +void +xfs_bmbt_disk_set_allf( + xfs_bmbt_rec_t *r, + xfs_fileoff_t o, + xfs_fsblock_t b, + xfs_filblks_t c, + xfs_exntst_t v); +#else +#define xfs_bmbt_disk_set_all(r, s) \ + xfs_bmbt_set_all(r, s) +#define xfs_bmbt_disk_set_allf(r, o, b, c, v) \ + xfs_bmbt_set_allf(r, o, b, c, v) +#endif + +void +xfs_bmbt_to_bmdr( + xfs_bmbt_block_t *, + int, + xfs_bmdr_block_t *, + int); + +int +xfs_bmbt_update( + struct xfs_btree_cur *, + xfs_fileoff_t, + xfs_fsblock_t, + xfs_filblks_t, + xfs_exntst_t); + +#ifdef XFSDEBUG +/* + * Get the data from the pointed-to record. + */ +int +xfs_bmbt_get_rec( + struct xfs_btree_cur *, + xfs_fileoff_t *, + xfs_fsblock_t *, + xfs_filblks_t *, + xfs_exntst_t *, + int *); +#endif + + +/* + * Search an extent list for the extent which includes block + * bno. + */ +xfs_bmbt_rec_t * +xfs_bmap_do_search_extents( + xfs_bmbt_rec_t *, + xfs_extnum_t, + xfs_extnum_t, + xfs_fileoff_t, + int *, + xfs_extnum_t *, + xfs_bmbt_irec_t *, + xfs_bmbt_irec_t *); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BMAP_BTREE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bmap.c linux.22-ac2/fs/xfs/xfs_bmap.c --- linux.vanilla/fs/xfs/xfs_bmap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bmap.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,6247 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_dmapi.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_extfree_item.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" +#include "xfs_buf_item.h" + +#ifdef DEBUG +ktrace_t *xfs_bmap_trace_buf; +#endif + +#ifdef XFSDEBUG +STATIC void +xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork); +#endif + +kmem_zone_t *xfs_bmap_free_item_zone; + +/* + * Prototypes for internal bmap routines. + */ + + +/* + * Called from xfs_bmap_add_attrfork to handle extents format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags); /* inode logging flags */ + +/* + * Called from xfs_bmap_add_attrfork to handle local format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_local( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags); /* inode logging flags */ + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after allocating space (or doing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_add_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a delayed + * allocation to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_delay_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a delayed allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_delay( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp,/* inode logging flags */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Called by xfs_bmap_add_extent to handle cases converting an unwritten + * allocation to a real allocation or vice versa. + */ +STATIC int /* error */ +xfs_bmap_add_extent_unwritten_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp); /* inode logging flags */ + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int /* error */ +xfs_bmap_alloc( + xfs_bmalloca_t *ap); /* bmap alloc argument struct */ + +/* + * Transform a btree format file with only one leaf node, where the + * extents list will fit in the inode, into an extents format file. + * Since the extent list is already in-core, all we have to do is + * give up the space for the btree root and pitch the leaf block. + */ +STATIC int /* error */ +xfs_bmap_btree_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +#ifdef XFSDEBUG +/* + * Check that the extents list for the inode ip is in the right order. + */ +STATIC void +xfs_bmap_check_extents( + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork); /* data or attr fork */ +#else +#define xfs_bmap_check_extents(ip,w) +#endif + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after removing space (or undoing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_del_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_trans_t *tp, /* current trans pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp,/* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd); /* OK to allocate reserved blocks */ + +/* + * Remove the entry "free" from the free item list. Prev points to the + * previous entry, unless "free" is the head of the list. + */ +STATIC void +xfs_bmap_del_free( + xfs_bmap_free_t *flist, /* free item list header */ + xfs_bmap_free_item_t *prev, /* previous item on list, if any */ + xfs_bmap_free_item_t *free); /* list item to be freed */ + +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incode inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork); /* data or attr fork */ + +/* + * Convert an extents-format file into a btree-format file. + * The new file will have a root block (in the inode) and a single child block. + */ +STATIC int /* error */ +xfs_bmap_extents_to_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first-block-allocated */ + xfs_bmap_free_t *flist, /* blocks freed in xaction */ + xfs_btree_cur_t **curp, /* cursor returned to caller */ + int wasdel, /* converting a delayed alloc */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork); /* data or attr fork */ + +/* + * Convert a local file to an extents file. + * This code is sort of bogus, since the file data needs to get + * logged so it won't be lost. The bmap-level manipulations are ok, though. + */ +STATIC int /* error */ +xfs_bmap_local_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated in xaction */ + xfs_extlen_t total, /* total blocks needed by transaction */ + int *logflagsp, /* inode logging flags */ + int whichfork); /* data or attr fork */ + +/* + * Search the extents list for the inode, for the extent containing bno. + * If bno lies in a hole, point to the next entry. If bno lies past eof, + * *eofp will be set, and *prevp will contain the last entry (null if none). + * Else, *lastxp will be set to the index of the found + * entry; *gotp will contain the entry. + */ +STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_extents( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int whichfork, /* data or attr fork */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */ + +#ifdef XFS_BMAP_TRACE +/* + * Add a bmap trace buffer entry. Base routine for the others. + */ +STATIC void +xfs_bmap_trace_addentry( + int opcode, /* operation */ + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(ies) */ + xfs_extnum_t cnt, /* count of entries, 1 or 2 */ + xfs_bmbt_rec_t *r1, /* first record */ + xfs_bmbt_rec_t *r2, /* second record or null */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + */ +STATIC void +xfs_bmap_trace_delete( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) deleted */ + xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * reading in the extents list from the disk (in the btree). + */ +STATIC void +xfs_bmap_trace_insert( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) inserted */ + xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ + xfs_bmbt_irec_t *r1, /* inserted record 1 */ + xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry after updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_post_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry updated */ + int whichfork); /* data or attr fork */ + +/* + * Add bmap trace entry prior to updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_pre_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry to be updated */ + int whichfork); /* data or attr fork */ + +#else +#define xfs_bmap_trace_delete(f,d,ip,i,c,w) +#define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w) +#define xfs_bmap_trace_post_update(f,d,ip,i,w) +#define xfs_bmap_trace_pre_update(f,d,ip,i,w) +#endif /* XFS_BMAP_TRACE */ + +/* + * Compute the worst-case number of indirect blocks that will be used + * for ip's delayed extent of length "len". + */ +STATIC xfs_filblks_t +xfs_bmap_worst_indlen( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_filblks_t len); /* delayed extent length */ + +#ifdef DEBUG +/* + * Perform various validation checks on the values being returned + * from xfs_bmapi(). + */ +STATIC void +xfs_bmap_validate_ret( + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + xfs_bmbt_irec_t *mval, + int nmap, + int ret_nmap); +#else +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) +#endif /* DEBUG */ + +#if defined(DEBUG) && defined(XFS_RW_TRACE) +STATIC void +xfs_bunmap_trace( + xfs_inode_t *ip, + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + inst_t *ra); +#else +#define xfs_bunmap_trace(ip, bno, len, flags, ra) +#endif /* DEBUG && XFS_RW_TRACE */ + +STATIC int +xfs_bmap_count_tree( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_fsblock_t blockno, + int levelin, + int *count); + +STATIC int +xfs_bmap_count_leaves( + xfs_bmbt_rec_t *frp, + int numrecs, + int *count); + +/* + * Bmap internal routines. + */ + +/* + * Called from xfs_bmap_add_attrfork to handle btree format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_btree_cur_t *cur; /* btree cursor */ + int error; /* error return value */ + xfs_mount_t *mp; /* file system mount struct */ + int stat; /* newroot status */ + + mp = ip->i_mount; + if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) + *flags |= XFS_ILOG_DBROOT; + else { + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + XFS_DATA_FORK); + cur->bc_private.b.flist = flist; + cur->bc_private.b.firstblock = *firstblock; + if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) + goto error0; + ASSERT(stat == 1); /* must be at least one entry */ + if ((error = xfs_bmbt_newroot(cur, flags, &stat))) + goto error0; + if (stat == 0) { + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + return XFS_ERROR(ENOSPC); + } + *firstblock = cur->bc_private.b.firstblock; + cur->bc_private.b.allocated = 0; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + } + return 0; +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Called from xfs_bmap_add_attrfork to handle extents format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + int error; /* error return value */ + + if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) + return 0; + cur = NULL; + error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, + flags, XFS_DATA_FORK); + if (cur) { + cur->bc_private.b.allocated = 0; + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + return error; +} + +/* + * Called from xfs_bmap_add_attrfork to handle local format files. + */ +STATIC int /* error */ +xfs_bmap_add_attrfork_local( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated */ + xfs_bmap_free_t *flist, /* blocks to free at commit */ + int *flags) /* inode logging flags */ +{ + xfs_da_args_t dargs; /* args for dir/attr code */ + int error; /* error return value */ + xfs_mount_t *mp; /* mount structure pointer */ + + if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) + return 0; + if ((ip->i_d.di_mode & IFMT) == IFDIR) { + mp = ip->i_mount; + memset(&dargs, 0, sizeof(dargs)); + dargs.dp = ip; + dargs.firstblock = firstblock; + dargs.flist = flist; + dargs.total = mp->m_dirblkfsbs; + dargs.whichfork = XFS_DATA_FORK; + dargs.trans = tp; + error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs); + } else + error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, + XFS_DATA_FORK); + return error; +} + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after allocating space (or doing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_add_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd) /* OK to use reserved data blocks */ +{ + xfs_btree_cur_t *cur; /* btree cursor or null */ + xfs_filblks_t da_new; /* new count del alloc blocks used */ + xfs_filblks_t da_old; /* old count del alloc blocks used */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent"; +#endif + xfs_ifork_t *ifp; /* inode fork ptr */ + int logflags; /* returned value */ + xfs_extnum_t nextents; /* number of extents in file now */ + + XFS_STATS_INC(xfsstats.xs_add_exlist); + cur = *curp; + ifp = XFS_IFORK_PTR(ip, whichfork); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(idx <= nextents); + da_old = da_new = 0; + error = 0; + /* + * This is the first extent added to a new/empty file. + * Special case this one, so other routines get to assume there are + * already extents in the list. + */ + if (nextents == 0) { + xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, + NULL, whichfork); + xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); + ASSERT(cur == NULL); + ifp->if_lastex = 0; + if (!ISNULLSTARTBLOCK(new->br_startblock)) { + XFS_IFORK_NEXT_SET(ip, whichfork, 1); + logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + } else + logflags = 0; + } + /* + * Any kind of new delayed allocation goes here. + */ + else if (ISNULLSTARTBLOCK(new->br_startblock)) { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new, + &logflags, rsvd))) + goto done; + } + /* + * Real allocation off the end of the file. + */ + else if (idx == nextents) { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, + &logflags, whichfork))) + goto done; + } else { + xfs_bmbt_irec_t prev; /* old extent at offset idx */ + + /* + * Get the record referred to by idx. + */ + xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); + /* + * If it's a real allocation record, and the new allocation ends + * after the start of the referred to record, then we're filling + * in a delayed or unwritten allocation with a real one, or + * converting real back to unwritten. + */ + if (!ISNULLSTARTBLOCK(new->br_startblock) && + new->br_startoff + new->br_blockcount > prev.br_startoff) { + if (prev.br_state != XFS_EXT_UNWRITTEN && + ISNULLSTARTBLOCK(prev.br_startblock)) { + da_old = STARTBLOCKVAL(prev.br_startblock); + if (cur) + ASSERT(cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL); + if ((error = xfs_bmap_add_extent_delay_real(ip, + idx, &cur, new, &da_new, first, flist, + &logflags, rsvd))) + goto done; + } else if (new->br_state == XFS_EXT_NORM) { + ASSERT(new->br_state == XFS_EXT_NORM); + if ((error = xfs_bmap_add_extent_unwritten_real( + ip, idx, &cur, new, &logflags))) + goto done; + } else { + ASSERT(new->br_state == XFS_EXT_UNWRITTEN); + if ((error = xfs_bmap_add_extent_unwritten_real( + ip, idx, &cur, new, &logflags))) + goto done; + } + ASSERT(*curp == cur || *curp == NULL); + } + /* + * Otherwise we're filling in a hole with an allocation. + */ + else { + if (cur) + ASSERT((cur->bc_private.b.flags & + XFS_BTCUR_BPRV_WASDEL) == 0); + if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, + new, &logflags, whichfork))) + goto done; + } + } + + ASSERT(*curp == cur || *curp == NULL); + /* + * Convert to a btree if necessary. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) { + int tmp_logflags; /* partial log flag return val */ + + ASSERT(cur == NULL); + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first, + flist, &cur, da_old > 0, &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto done; + } + /* + * Adjust for changes in reserved delayed indirect blocks. + * Nothing to do for disk quotas here. + */ + if (da_old || da_new) { + xfs_filblks_t nblks; + + nblks = da_new; + if (cur) + nblks += cur->bc_private.b.allocated; + ASSERT(nblks <= da_old); + if (nblks < da_old) + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, + (int)(da_old - nblks), rsvd); + } + /* + * Clear out the allocated field, done with it now in any case. + */ + if (cur) { + cur->bc_private.b.allocated = 0; + *curp = cur; + } +done: +#ifdef XFSDEBUG + if (!error) + xfs_bmap_check_leaf_extents(*curp, ip, whichfork); +#endif + *logflagsp = logflags; + return error; +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a delayed + * allocation to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_delay_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ + xfs_fsblock_t *first, /* pointer to firstblock variable */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + int *logflagsp, /* inode logging flags */ + int rsvd) /* OK to use reserved data block allocation */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_btree_cur_t *cur; /* btree cursor */ + int diff; /* temp value */ + xfs_bmbt_rec_t *ep; /* extent entry for idx */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_delay_real"; +#endif + int i; /* temp state */ + xfs_fileoff_t new_endoff; /* end offset of new entry */ + xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ + /* left is 0, right is 1, prev is 2 */ + int rval=0; /* return value (logging flags) */ + int state = 0;/* state bits, accessed thru macros */ + xfs_filblks_t temp; /* value for dnew calculations */ + xfs_filblks_t temp2; /* value for dnew calculations */ + int tmp_rval; /* partial logging flags */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_FILLING, RIGHT_FILLING, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define LEFT r[0] +#define RIGHT r[1] +#define PREV r[2] +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define MASK3(a,b,c) (MASK2(a,b) | MASK(c)) +#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE \ + (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG)) + + /* + * Set up a bunch of variables to make the tests simpler. + */ + cur = *curp; + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_get_all(ep, &PREV); + new_endoff = new->br_startoff + new->br_blockcount; + ASSERT(PREV.br_startoff <= new->br_startoff); + ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); + /* + * Set flags determining what part of the previous delayed allocation + * extent is being replaced by a real allocation. + */ + STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); + STATE_SET(RIGHT_FILLING, + PREV.br_startoff + PREV.br_blockcount == new_endoff); + /* + * Check and set flags if this segment has a left neighbor. + * Don't set contiguous if the combined extent would be too large. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &LEFT); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); + } + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && + LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && + LEFT.br_state == new->br_state && + LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); + /* + * Check and set flags if this segment has a right neighbor. + * Don't set contiguous if the combined extent would be too large. + * Also check for all-three-contiguous being too large. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { + xfs_bmbt_get_all(ep + 1, &RIGHT); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); + } + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new_endoff == RIGHT.br_startoff && + new->br_startblock + new->br_blockcount == + RIGHT.br_startblock && + new->br_state == RIGHT.br_state && + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && + ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != + MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || + LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount + <= MAXEXTLEN)); + error = 0; + /* + * Switch out based on the FILLING and CONTIG state bits. + */ + switch (SWITCH_STATE) { + + case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The left and right neighbors are both contiguous with new. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + PREV.br_blockcount + + RIGHT.br_blockcount, LEFT.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The left neighbor is contiguous, the right is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, + LEFT.br_startblock, LEFT.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + PREV.br_blockcount, LEFT.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): + /* + * Filling in all of a previously delayed allocation extent. + * The right neighbor is contiguous, the left is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, new->br_startblock); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + new->br_startblock, + PREV.br_blockcount + + RIGHT.br_blockcount, PREV.br_state))) + goto done; + } + *dnew = 0; + break; + + case MASK2(LEFT_FILLING, RIGHT_FILLING): + /* + * Filling in all of a previously delayed allocation extent. + * Neither the left nor right neighbors are contiguous with + * the new one. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, new->br_startblock); + xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + *dnew = 0; + break; + + case MASK2(LEFT_FILLING, LEFT_CONTIG): + /* + * Filling in the first part of a previous delayed allocation. + * The left neighbor is contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + new->br_blockcount); + xfs_bmbt_set_startoff(ep, + PREV.br_startoff + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + ip->i_df.if_lastex = idx - 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, + LEFT.br_startblock, LEFT.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + + new->br_blockcount, + LEFT.br_state))) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK(LEFT_FILLING): + /* + * Filling in the first part of a previous delayed allocation. + * The left neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_startoff(ep, new_endoff); + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx + 1]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK2(RIGHT_FILLING, RIGHT_CONTIG): + /* + * Filling in the last part of a previous delayed allocation. + * The right neighbor is contiguous with the new allocation. + */ + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + RIGHT.br_state); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + + RIGHT.br_blockcount, + RIGHT.br_state))) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock)); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + *dnew = temp; + break; + + case MASK(RIGHT_FILLING): + /* + * Filling in the last part of a previous delayed allocation. + * The right neighbor is not contiguous. + */ + temp = PREV.br_blockcount - new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, + new, NULL, XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); + *dnew = temp; + break; + + case 0: + /* + * Filling in the middle part of a previous delayed allocation. + * Contiguity is impossible here. + * This case is avoided almost all the time. + */ + temp = new->br_startoff - PREV.br_startoff; + xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, temp); + r[0] = *new; + r[1].br_startoff = new_endoff; + temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; + r[1].br_blockcount = temp2; + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_nextents > ip->i_df.if_ext_max) { + error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + first, flist, &cur, 1, &tmp_rval, + XFS_DATA_FORK); + rval |= tmp_rval; + if (error) + goto done; + } + temp = xfs_bmap_worst_indlen(ip, temp); + temp2 = xfs_bmap_worst_indlen(ip, temp2); + diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) - + (cur ? cur->bc_private.b.allocated : 0)); + if (diff > 0 && + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) { + /* + * Ick gross gag me with a spoon. + */ + ASSERT(0); /* want to see if this ever happens! */ + while (diff > 0) { + if (temp) { + temp--; + diff--; + if (!diff || + !xfs_mod_incore_sb(ip->i_mount, + XFS_SBS_FDBLOCKS, -diff, rsvd)) + break; + } + if (temp2) { + temp2--; + diff--; + if (!diff || + !xfs_mod_incore_sb(ip->i_mount, + XFS_SBS_FDBLOCKS, -diff, rsvd)) + break; + } + } + } + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); + xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, + XFS_DATA_FORK); + *dnew = temp + temp2; + break; + + case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK2(LEFT_FILLING, RIGHT_CONTIG): + case MASK2(RIGHT_FILLING, LEFT_CONTIG): + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + case MASK(LEFT_CONTIG): + case MASK(RIGHT_CONTIG): + /* + * These cases are all impossible. + */ + ASSERT(0); + } + *curp = cur; +done: + *logflagsp = rval; + return error; +#undef LEFT +#undef RIGHT +#undef PREV +#undef MASK +#undef MASK2 +#undef MASK3 +#undef MASK4 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting an unwritten + * allocation to a real allocation or vice versa. + */ +STATIC int /* error */ +xfs_bmap_add_extent_unwritten_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp) /* inode logging flags */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_btree_cur_t *cur; /* btree cursor */ + xfs_bmbt_rec_t *ep; /* extent entry for idx */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_unwritten_real"; +#endif + int i; /* temp state */ + xfs_fileoff_t new_endoff; /* end offset of new entry */ + xfs_exntst_t newext; /* new extent state */ + xfs_exntst_t oldext; /* old extent state */ + xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ + /* left is 0, right is 1, prev is 2 */ + int rval=0; /* return value (logging flags) */ + int state = 0;/* state bits, accessed thru macros */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_FILLING, RIGHT_FILLING, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define LEFT r[0] +#define RIGHT r[1] +#define PREV r[2] +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define MASK3(a,b,c) (MASK2(a,b) | MASK(c)) +#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE \ + (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG)) + + /* + * Set up a bunch of variables to make the tests simpler. + */ + error = 0; + cur = *curp; + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + xfs_bmbt_get_all(ep, &PREV); + newext = new->br_state; + oldext = (newext == XFS_EXT_UNWRITTEN) ? + XFS_EXT_NORM : XFS_EXT_UNWRITTEN; + ASSERT(PREV.br_state == oldext); + new_endoff = new->br_startoff + new->br_blockcount; + ASSERT(PREV.br_startoff <= new->br_startoff); + ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); + /* + * Set flags determining what part of the previous oldext allocation + * extent is being replaced by a newext allocation. + */ + STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); + STATE_SET(RIGHT_FILLING, + PREV.br_startoff + PREV.br_blockcount == new_endoff); + /* + * Check and set flags if this segment has a left neighbor. + * Don't set contiguous if the combined extent would be too large. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &LEFT); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); + } + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && + LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && + LEFT.br_state == newext && + LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); + /* + * Check and set flags if this segment has a right neighbor. + * Don't set contiguous if the combined extent would be too large. + * Also check for all-three-contiguous being too large. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { + xfs_bmbt_get_all(ep + 1, &RIGHT); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); + } + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new_endoff == RIGHT.br_startoff && + new->br_startblock + new->br_blockcount == + RIGHT.br_startblock && + newext == RIGHT.br_state && + new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && + ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != + MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || + LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount + <= MAXEXTLEN)); + /* + * Switch out based on the FILLING and CONTIG state bits. + */ + switch (SWITCH_STATE) { + + case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The left and right neighbors are both contiguous with new. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + ip->i_d.di_nextents -= 2; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + PREV.br_blockcount + + RIGHT.br_blockcount, LEFT.br_state))) + goto done; + } + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The left neighbor is contiguous, the right is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + PREV.br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + PREV.br_blockcount, + LEFT.br_state))) + goto done; + } + break; + + case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): + /* + * Setting all of a previous oldext extent to newext. + * The right neighbor is contiguous, the left is not. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount + RIGHT.br_blockcount); + xfs_bmbt_set_state(ep, newext); + xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + ip->i_d.di_nextents--; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, + RIGHT.br_startblock, + RIGHT.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + newext))) + goto done; + } + break; + + case MASK2(LEFT_FILLING, RIGHT_FILLING): + /* + * Setting all of a previous oldext extent to newext. + * Neither the left nor right neighbors are contiguous with + * the new one. + */ + xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_state(ep, newext); + xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + newext))) + goto done; + } + break; + + case MASK2(LEFT_FILLING, LEFT_CONTIG): + /* + * Setting the first part of a previous oldext extent to newext. + * The left neighbor is contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, + LEFT.br_blockcount + new->br_blockcount); + xfs_bmbt_set_startoff(ep, + PREV.br_startoff + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_startblock(ep, + new->br_startblock + new->br_blockcount); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, + PREV.br_startoff + new->br_blockcount, + PREV.br_startblock + new->br_blockcount, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + goto done; + if (xfs_bmbt_update(cur, LEFT.br_startoff, + LEFT.br_startblock, + LEFT.br_blockcount + new->br_blockcount, + LEFT.br_state)) + goto done; + } + break; + + case MASK(LEFT_FILLING): + /* + * Setting the first part of a previous oldext extent to newext. + * The left neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); + ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); + xfs_bmbt_set_startoff(ep, new_endoff); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmbt_set_startblock(ep, + new->br_startblock + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, + PREV.br_startoff + new->br_blockcount, + PREV.br_startblock + new->br_blockcount, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + cur->bc_rec.b = *new; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case MASK2(RIGHT_FILLING, RIGHT_CONTIG): + /* + * Setting the last part of a previous oldext extent to newext. + * The right neighbor is contiguous with the new allocation. + */ + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, + XFS_DATA_FORK); + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, newext); + xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + if (cur == NULL) + rval = XFS_ILOG_DEXT; + else { + rval = 0; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + if ((error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + RIGHT.br_blockcount, + newext))) + goto done; + } + break; + + case MASK(RIGHT_FILLING): + /* + * Setting the last part of a previous oldext extent to newext. + * The right neighbor is not contiguous. + */ + xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + PREV.br_blockcount - new->br_blockcount); + xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); + xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, + new, NULL, XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents++; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_update(cur, PREV.br_startoff, + PREV.br_startblock, + PREV.br_blockcount - new->br_blockcount, + oldext))) + goto done; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, + &i))) + goto done; + ASSERT(i == 0); + cur->bc_rec.b.br_state = XFS_EXT_NORM; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case 0: + /* + * Setting the middle part of a previous oldext extent to + * newext. Contiguity is impossible here. + * One extent becomes three extents. + */ + xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep, + new->br_startoff - PREV.br_startoff); + xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); + r[0] = *new; + r[1].br_startoff = new_endoff; + r[1].br_blockcount = + PREV.br_startoff + PREV.br_blockcount - new_endoff; + r[1].br_startblock = new->br_startblock + new->br_blockcount; + r[1].br_state = oldext; + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + ip->i_df.if_lastex = idx + 1; + ip->i_d.di_nextents += 2; + if (cur == NULL) + rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; + else { + rval = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff, + PREV.br_startblock, PREV.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + /* new right extent - oldext */ + if ((error = xfs_bmbt_update(cur, r[1].br_startoff, + r[1].br_startblock, r[1].br_blockcount, + r[1].br_state))) + goto done; + /* new left extent - oldext */ + PREV.br_blockcount = + new->br_startoff - PREV.br_startoff; + cur->bc_rec.b = PREV; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + ASSERT(i == 1); + /* new middle extent - newext */ + cur->bc_rec.b = *new; + if ((error = xfs_bmbt_insert(cur, &i))) + goto done; + ASSERT(i == 1); + } + break; + + case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): + case MASK2(LEFT_FILLING, RIGHT_CONTIG): + case MASK2(RIGHT_FILLING, LEFT_CONTIG): + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + case MASK(LEFT_CONTIG): + case MASK(RIGHT_CONTIG): + /* + * These cases are all impossible. + */ + ASSERT(0); + } + *curp = cur; +done: + *logflagsp = rval; + return error; +#undef LEFT +#undef RIGHT +#undef PREV +#undef MASK +#undef MASK2 +#undef MASK3 +#undef MASK4 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a delayed allocation. + */ +/*ARGSUSED*/ +STATIC int /* error */ +xfs_bmap_add_extent_hole_delay( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int rsvd) /* OK to allocate reserved blocks */ +{ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_bmbt_rec_t *ep; /* extent list entry for idx */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_hole_delay"; +#endif + xfs_bmbt_irec_t left; /* left neighbor extent entry */ + xfs_filblks_t newlen=0; /* new indirect size */ + xfs_filblks_t oldlen=0; /* old indirect size */ + xfs_bmbt_irec_t right; /* right neighbor extent entry */ + int state; /* state bits, accessed thru macros */ + xfs_filblks_t temp; /* temp for indirect calculations */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) + + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; + state = 0; + ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); + /* + * Check and set flags if this segment has a left neighbor + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &left); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); + } + /* + * Check and set flags if the current (right) segment exists. + * If it doesn't exist, we're converting the hole at end-of-file. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, &right); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); + } + /* + * Set contiguity flags on the left and right neighbors. + * Don't let extents get too large, even if the pieces are contiguous. + */ + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) && + left.br_startoff + left.br_blockcount == new->br_startoff && + left.br_blockcount + new->br_blockcount <= MAXEXTLEN); + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) && + new->br_startoff + new->br_blockcount == right.br_startoff && + new->br_blockcount + right.br_blockcount <= MAXEXTLEN && + (!STATE_TEST(LEFT_CONTIG) || + (left.br_blockcount + new->br_blockcount + + right.br_blockcount <= MAXEXTLEN))); + /* + * Switch out based on the contiguity flags. + */ + switch (SWITCH_STATE) { + + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + /* + * New allocation is contiguous with delayed allocations + * on the left and on the right. + * Merge all three into a single extent list entry. + */ + temp = left.br_blockcount + new->br_blockcount + + right.br_blockcount; + xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, temp); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, + XFS_DATA_FORK); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + break; + + case MASK(LEFT_CONTIG): + /* + * New allocation is contiguous with a delayed allocation + * on the left. + * Merge the new allocation with the left neighbor. + */ + temp = left.br_blockcount + new->br_blockcount; + xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, + XFS_DATA_FORK); + xfs_bmbt_set_blockcount(ep - 1, temp); + oldlen = STARTBLOCKVAL(left.br_startblock) + + STARTBLOCKVAL(new->br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, + XFS_DATA_FORK); + ip->i_df.if_lastex = idx - 1; + break; + + case MASK(RIGHT_CONTIG): + /* + * New allocation is contiguous with a delayed allocation + * on the right. + * Merge the new allocation with the right neighbor. + */ + xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK); + temp = new->br_blockcount + right.br_blockcount; + oldlen = STARTBLOCKVAL(new->br_startblock) + + STARTBLOCKVAL(right.br_startblock); + newlen = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_allf(ep, new->br_startoff, + NULLSTARTBLOCK((int)newlen), temp, right.br_state); + xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + break; + + case 0: + /* + * New allocation is not contiguous with another + * delayed allocation. + * Insert a new entry. + */ + oldlen = newlen = 0; + xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, + XFS_DATA_FORK); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + ip->i_df.if_lastex = idx; + break; + } + if (oldlen != newlen) { + ASSERT(oldlen > newlen); + xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, + (int)(oldlen - newlen), rsvd); + /* + * Nothing to do for disk quota accounting here. + */ + } + *logflagsp = 0; + return 0; +#undef MASK +#undef MASK2 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE +} + +/* + * Called by xfs_bmap_add_extent to handle cases converting a hole + * to a real allocation. + */ +STATIC int /* error */ +xfs_bmap_add_extent_hole_real( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* extent number to update/insert */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */ + int error; /* error return value */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_add_extent_hole_real"; +#endif + int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_irec_t left; /* left neighbor extent entry */ + xfs_bmbt_irec_t right; /* right neighbor extent entry */ + int state; /* state bits, accessed thru macros */ + enum { /* bit number definitions for state */ + LEFT_CONTIG, RIGHT_CONTIG, + LEFT_DELAY, RIGHT_DELAY, + LEFT_VALID, RIGHT_VALID + }; + +#define MASK(b) (1 << (b)) +#define MASK2(a,b) (MASK(a) | MASK(b)) +#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b))) +#define STATE_TEST(b) (state & MASK(b)) +#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \ + ((state &= ~MASK(b)), 0)) +#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); + ep = &ifp->if_u1.if_extents[idx]; + state = 0; + /* + * Check and set flags if this segment has a left neighbor. + */ + if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { + xfs_bmbt_get_all(ep - 1, &left); + STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); + } + /* + * Check and set flags if this segment has a current value. + * Not true if we're inserting into the "hole" at eof. + */ + if (STATE_SET_TEST(RIGHT_VALID, + idx < + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, &right); + STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); + } + /* + * We're inserting a real allocation between "left" and "right". + * Set the contiguity flags. Don't let extents get too large. + */ + STATE_SET(LEFT_CONTIG, + STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && + left.br_startoff + left.br_blockcount == new->br_startoff && + left.br_startblock + left.br_blockcount == new->br_startblock && + left.br_state == new->br_state && + left.br_blockcount + new->br_blockcount <= MAXEXTLEN); + STATE_SET(RIGHT_CONTIG, + STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && + new->br_startoff + new->br_blockcount == right.br_startoff && + new->br_startblock + new->br_blockcount == + right.br_startblock && + new->br_state == right.br_state && + new->br_blockcount + right.br_blockcount <= MAXEXTLEN && + (!STATE_TEST(LEFT_CONTIG) || + left.br_blockcount + new->br_blockcount + + right.br_blockcount <= MAXEXTLEN)); + + /* + * Select which case we're in here, and implement it. + */ + switch (SWITCH_STATE) { + + case MASK2(LEFT_CONTIG, RIGHT_CONTIG): + /* + * New allocation is contiguous with real allocations on the + * left and on the right. + * Merge all three into a single extent list entry. + */ + xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, + whichfork); + xfs_bmbt_set_blockcount(ep - 1, + left.br_blockcount + new->br_blockcount + + right.br_blockcount); + xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, + whichfork); + xfs_bmap_trace_delete(fname, "LC|RC", ip, + idx, 1, whichfork); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + ifp->if_lastex = idx - 1; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + if (cur == NULL) { + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff, + right.br_startblock, right.br_blockcount, &i))) + return error; + ASSERT(i == 1); + if ((error = xfs_bmbt_delete(cur, &i))) + return error; + ASSERT(i == 1); + if ((error = xfs_bmbt_decrement(cur, 0, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, left.br_startoff, + left.br_startblock, + left.br_blockcount + new->br_blockcount + + right.br_blockcount, left.br_state); + return error; + + case MASK(LEFT_CONTIG): + /* + * New allocation is contiguous with a real allocation + * on the left. + * Merge the new allocation with the left neighbor. + */ + xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); + xfs_bmbt_set_blockcount(ep - 1, + left.br_blockcount + new->br_blockcount); + xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); + ifp->if_lastex = idx - 1; + if (cur == NULL) { + *logflagsp = XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = 0; + if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff, + left.br_startblock, left.br_blockcount, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, left.br_startoff, + left.br_startblock, + left.br_blockcount + new->br_blockcount, + left.br_state); + return error; + + case MASK(RIGHT_CONTIG): + /* + * New allocation is contiguous with a real allocation + * on the right. + * Merge the new allocation with the right neighbor. + */ + xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork); + xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, + new->br_blockcount + right.br_blockcount, + right.br_state); + xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork); + ifp->if_lastex = idx; + if (cur == NULL) { + *logflagsp = XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = 0; + if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff, + right.br_startblock, right.br_blockcount, &i))) + return error; + ASSERT(i == 1); + error = xfs_bmbt_update(cur, new->br_startoff, + new->br_startblock, + new->br_blockcount + right.br_blockcount, + right.br_state); + return error; + + case 0: + /* + * New allocation is not contiguous with another + * real allocation. + * Insert a new entry. + */ + xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, + whichfork); + xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); + ifp->if_lastex = idx; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + if (cur == NULL) { + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; + } + *logflagsp = XFS_ILOG_CORE; + if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, + new->br_startblock, new->br_blockcount, &i))) + return error; + ASSERT(i == 0); + cur->bc_rec.b.br_state = new->br_state; + if ((error = xfs_bmbt_insert(cur, &i))) + return error; + ASSERT(i == 1); + return 0; + } +#undef MASK +#undef MASK2 +#undef STATE_SET +#undef STATE_TEST +#undef STATE_SET_TEST +#undef SWITCH_STATE + /* NOTREACHED */ + ASSERT(0); + return 0; /* keep gcc quite */ +} + +#define XFS_ALLOC_GAP_UNITS 4 + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int /* error */ +xfs_bmap_alloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_fsblock_t adjust; /* adjustment to block numbers */ + xfs_alloctype_t atype=0; /* type for allocation routines */ + int error; /* error return value */ + xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ + xfs_mount_t *mp; /* mount point structure */ + int nullfb; /* true if ap->firstblock isn't set */ + int rt; /* true if inode is realtime */ +#ifdef __KERNEL__ + xfs_extlen_t prod=0; /* product factor for allocators */ + xfs_extlen_t ralen=0; /* realtime allocation length */ +#endif + +#define ISLEGAL(x,y) \ + (rt ? \ + (x) < mp->m_sb.sb_rblocks : \ + XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ + XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ + XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) + + /* + * Set up variables. + */ + mp = ap->ip->i_mount; + nullfb = ap->firstblock == NULLFSBLOCK; + rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; + fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); +#ifdef __KERNEL__ + if (rt) { + xfs_extlen_t extsz; /* file extent size for rt */ + xfs_fileoff_t nexto; /* next file offset */ + xfs_extlen_t orig_alen; /* original ap->alen */ + xfs_fileoff_t orig_end; /* original off+len */ + xfs_fileoff_t orig_off; /* original ap->off */ + xfs_extlen_t mod_off; /* modulus calculations */ + xfs_fileoff_t prevo; /* previous file offset */ + xfs_rtblock_t rtx; /* realtime extent number */ + xfs_extlen_t temp; /* temp for rt calculations */ + + /* + * Set prod to match the realtime extent size. + */ + if (!(extsz = ap->ip->i_d.di_extsize)) + extsz = mp->m_sb.sb_rextsize; + prod = extsz / mp->m_sb.sb_rextsize; + orig_off = ap->off; + orig_alen = ap->alen; + orig_end = orig_off + orig_alen; + /* + * If the file offset is unaligned vs. the extent size + * we need to align it. This will be possible unless + * the file was previously written with a kernel that didn't + * perform this alignment. + */ + mod_off = do_mod(orig_off, extsz); + if (mod_off) { + ap->alen += mod_off; + ap->off -= mod_off; + } + /* + * Same adjustment for the end of the requested area. + */ + if ((temp = (ap->alen % extsz))) + ap->alen += extsz - temp; + /* + * If the previous block overlaps with this proposed allocation + * then move the start forward without adjusting the length. + */ + prevo = + ap->prevp->br_startoff == NULLFILEOFF ? + 0 : + (ap->prevp->br_startoff + + ap->prevp->br_blockcount); + if (ap->off != orig_off && ap->off < prevo) + ap->off = prevo; + /* + * If the next block overlaps with this proposed allocation + * then move the start back without adjusting the length, + * but not before offset 0. + * This may of course make the start overlap previous block, + * and if we hit the offset 0 limit then the next block + * can still overlap too. + */ + nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ? + NULLFILEOFF : ap->gotp->br_startoff; + if (!ap->eof && + ap->off + ap->alen != orig_end && + ap->off + ap->alen > nexto) + ap->off = nexto > ap->alen ? nexto - ap->alen : 0; + /* + * If we're now overlapping the next or previous extent that + * means we can't fit an extsz piece in this hole. Just move + * the start forward to the first legal spot and set + * the length so we hit the end. + */ + if ((ap->off != orig_off && ap->off < prevo) || + (ap->off + ap->alen != orig_end && + ap->off + ap->alen > nexto)) { + ap->off = prevo; + ap->alen = nexto - prevo; + } + /* + * If the result isn't a multiple of rtextents we need to + * remove blocks until it is. + */ + if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) { + /* + * We're not covering the original request, or + * we won't be able to once we fix the length. + */ + if (orig_off < ap->off || + orig_end > ap->off + ap->alen || + ap->alen - temp < orig_alen) + return XFS_ERROR(EINVAL); + /* + * Try to fix it by moving the start up. + */ + if (ap->off + temp <= orig_off) { + ap->alen -= temp; + ap->off += temp; + } + /* + * Try to fix it by moving the end in. + */ + else if (ap->off + ap->alen - temp >= orig_end) + ap->alen -= temp; + /* + * Set the start to the minimum then trim the length. + */ + else { + ap->alen -= orig_off - ap->off; + ap->off = orig_off; + ap->alen -= ap->alen % mp->m_sb.sb_rextsize; + } + /* + * Result doesn't cover the request, fail it. + */ + if (orig_off < ap->off || orig_end > ap->off + ap->alen) + return XFS_ERROR(EINVAL); + } + ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); + /* + * If the offset & length are not perfectly aligned + * then kill prod, it will just get us in trouble. + */ + if (do_mod(ap->off, extsz) || ap->alen % extsz) + prod = 1; + /* + * Set ralen to be the actual requested length in rtextents. + */ + ralen = ap->alen / mp->m_sb.sb_rextsize; + /* + * If the old value was close enough to MAXEXTLEN that + * we rounded up to it, cut it back so it's legal again. + * Note that if it's a really large request (bigger than + * MAXEXTLEN), we don't hear about that number, and can't + * adjust the starting point to match it. + */ + if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) + ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; + /* + * If it's an allocation to an empty file at offset 0, + * pick an extent that will space things out in the rt area. + */ + if (ap->eof && ap->off == 0) { + error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); + if (error) + return error; + ap->rval = rtx * mp->m_sb.sb_rextsize; + } else + ap->rval = 0; + } +#else + if (rt) + ap->rval = 0; +#endif /* __KERNEL__ */ + else if (nullfb) + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); + else + ap->rval = ap->firstblock; + /* + * If allocating at eof, and there's a previous real block, + * try to use it's last block as our starting point. + */ + if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && + ISLEGAL(ap->prevp->br_startblock + ap->prevp->br_blockcount, + ap->prevp->br_startblock)) { + ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; + /* + * Adjust for the gap between prevp and us. + */ + adjust = ap->off - + (ap->prevp->br_startoff + ap->prevp->br_blockcount); + if (adjust && + ISLEGAL(ap->rval + adjust, ap->prevp->br_startblock)) + ap->rval += adjust; + } + /* + * If not at eof, then compare the two neighbor blocks. + * Figure out whether either one gives us a good starting point, + * and pick the better one. + */ + else if (!ap->eof) { + xfs_fsblock_t gotbno; /* right side block number */ + xfs_fsblock_t gotdiff=0; /* right side difference */ + xfs_fsblock_t prevbno; /* left side block number */ + xfs_fsblock_t prevdiff=0; /* left side difference */ + + /* + * If there's a previous (left) block, select a requested + * start block based on it. + */ + if (ap->prevp->br_startoff != NULLFILEOFF && + !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && + (prevbno = ap->prevp->br_startblock + + ap->prevp->br_blockcount) && + ISLEGAL(prevbno, ap->prevp->br_startblock)) { + /* + * Calculate gap to end of previous block. + */ + adjust = prevdiff = ap->off - + (ap->prevp->br_startoff + + ap->prevp->br_blockcount); + /* + * Figure the startblock based on the previous block's + * end and the gap size. + * Heuristic! + * If the gap is large relative to the piece we're + * allocating, or using it gives us an illegal block + * number, then just use the end of the previous block. + */ + if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && + ISLEGAL(prevbno + prevdiff, + ap->prevp->br_startblock)) + prevbno += adjust; + else + prevdiff += adjust; + /* + * If the firstblock forbids it, can't use it, + * must use default. + */ + if (!rt && !nullfb && + XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) + prevbno = NULLFSBLOCK; + } + /* + * No previous block or can't follow it, just default. + */ + else + prevbno = NULLFSBLOCK; + /* + * If there's a following (right) block, select a requested + * start block based on it. + */ + if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) { + /* + * Calculate gap to start of next block. + */ + adjust = gotdiff = ap->gotp->br_startoff - ap->off; + /* + * Figure the startblock based on the next block's + * start and the gap size. + */ + gotbno = ap->gotp->br_startblock; + /* + * Heuristic! + * If the gap is large relative to the piece we're + * allocating, or using it gives us an illegal block + * number, then just use the start of the next block + * offset by our length. + */ + if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen && + ISLEGAL(gotbno - gotdiff, gotbno)) + gotbno -= adjust; + else if (ISLEGAL(gotbno - ap->alen, gotbno)) { + gotbno -= ap->alen; + gotdiff += adjust - ap->alen; + } else + gotdiff += adjust; + /* + * If the firstblock forbids it, can't use it, + * must use default. + */ + if (!rt && !nullfb && + XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) + gotbno = NULLFSBLOCK; + } + /* + * No next block, just default. + */ + else + gotbno = NULLFSBLOCK; + /* + * If both valid, pick the better one, else the only good + * one, else ap->rval is already set (to 0 or the inode block). + */ + if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) + ap->rval = prevdiff <= gotdiff ? prevbno : gotbno; + else if (prevbno != NULLFSBLOCK) + ap->rval = prevbno; + else if (gotbno != NULLFSBLOCK) + ap->rval = gotbno; + } + /* + * If allowed, use ap->rval; otherwise must use firstblock since + * it's in the right allocation group. + */ + if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) + ; + else + ap->rval = ap->firstblock; + /* + * Realtime allocation, done through xfs_rtallocate_extent. + */ + if (rt) { +#ifndef __KERNEL__ + ASSERT(0); +#else + xfs_rtblock_t rtb; + + atype = ap->rval == 0 ? + XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; + do_div(ap->rval, mp->m_sb.sb_rextsize); + rtb = ap->rval; + ap->alen = ralen; + if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, + &ralen, atype, ap->wasdel, prod, &rtb))) + return error; + if (rtb == NULLFSBLOCK && prod > 1 && + (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, + ap->alen, &ralen, atype, + ap->wasdel, 1, &rtb))) + return error; + ap->rval = rtb; + if (ap->rval != NULLFSBLOCK) { + ap->rval *= mp->m_sb.sb_rextsize; + ralen *= mp->m_sb.sb_rextsize; + ap->alen = ralen; + ap->ip->i_d.di_nblocks += ralen; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= ralen; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT, + (long) ralen); + } else + ap->alen = 0; +#endif /* __KERNEL__ */ + } + /* + * Normal allocation, done through xfs_alloc_vextent. + */ + else { + xfs_agnumber_t ag; + xfs_alloc_arg_t args; + xfs_extlen_t blen; + xfs_extlen_t delta; + int isaligned; + xfs_extlen_t longest; + xfs_extlen_t need; + xfs_extlen_t nextminlen=0; + int notinit; + xfs_perag_t *pag; + xfs_agnumber_t startag; + int tryagain; + + tryagain = isaligned = 0; + args.tp = ap->tp; + args.mp = mp; + args.fsbno = ap->rval; + args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + blen = 0; + if (nullfb) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.total = ap->total; + /* + * Find the longest available space. + * We're going to try for the whole allocation at once. + */ + startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); + notinit = 0; + down_read(&mp->m_peraglock); + while (blen < ap->alen) { + pag = &mp->m_perag[ag]; + if (!pag->pagf_init && + (error = xfs_alloc_pagf_init(mp, args.tp, + ag, XFS_ALLOC_FLAG_TRYLOCK))) { + up_read(&mp->m_peraglock); + return error; + } + /* + * See xfs_alloc_fix_freelist... + */ + if (pag->pagf_init) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? + need - pag->pagf_flcount : 0; + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || + pag->pagf_longest > 0); + if (blen < longest) + blen = longest; + } else + notinit = 1; + if (++ag == mp->m_sb.sb_agcount) + ag = 0; + if (ag == startag) + break; + } + up_read(&mp->m_peraglock); + /* + * Since the above loop did a BUF_TRYLOCK, it is + * possible that there is space for this request. + */ + if (notinit || blen < ap->minlen) + args.minlen = ap->minlen; + /* + * If the best seen length is less than the request + * length, use the best as the minimum. + */ + else if (blen < ap->alen) + args.minlen = blen; + /* + * Otherwise we've seen an extent as big as alen, + * use that as the minimum. + */ + else + args.minlen = ap->alen; + } else if (ap->low) { + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = ap->minlen; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.total = ap->total; + args.minlen = ap->minlen; + } + if (ap->ip->i_d.di_extsize) { + args.prod = ap->ip->i_d.di_extsize; + if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } else if (mp->m_sb.sb_blocksize >= NBPP) { + args.prod = 1; + args.mod = 0; + } else { + args.prod = NBPP >> mp->m_sb.sb_blocklog; + if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } + /* + * If we are not low on available data blocks, and the + * underlying logical volume manager is a stripe, and + * the file offset is zero then try to allocate data + * blocks on stripe unit boundary. + * NOTE: ap->aeof is only set if the allocation length + * is >= the stripe unit and the allocation offset is + * at the end of file. + */ + if (!ap->low && ap->aeof) { + if (!ap->off) { + args.alignment = mp->m_dalign; + atype = args.type; + isaligned = 1; + /* + * Adjust for alignment + */ + if (blen > args.alignment && blen <= ap->alen) + args.minlen = blen - args.alignment; + args.minalignslop = 0; + } else { + /* + * First try an exact bno allocation. + * If it fails then do a near or start bno + * allocation with alignment turned on. + */ + atype = args.type; + tryagain = 1; + args.type = XFS_ALLOCTYPE_THIS_BNO; + args.alignment = 1; + /* + * Compute the minlen+alignment for the + * next case. Set slop so that the value + * of minlen+alignment+slop doesn't go up + * between the calls. + */ + if (blen > mp->m_dalign && blen <= ap->alen) + nextminlen = blen - mp->m_dalign; + else + nextminlen = args.minlen; + if (nextminlen + mp->m_dalign > args.minlen + 1) + args.minalignslop = + nextminlen + mp->m_dalign - + args.minlen - 1; + else + args.minalignslop = 0; + } + } else { + args.alignment = 1; + args.minalignslop = 0; + } + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.isfl = 0; + args.userdata = ap->userdata; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (tryagain && args.fsbno == NULLFSBLOCK) { + /* + * Exact allocation failed. Now try with alignment + * turned on. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = mp->m_dalign; + args.minlen = nextminlen; + args.minalignslop = 0; + isaligned = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (isaligned && args.fsbno == NULLFSBLOCK) { + /* + * allocation failed, so turn off alignment and + * try again. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb && + args.minlen > ap->minlen) { + args.minlen = ap->minlen; + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = ap->rval; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb) { + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = ap->minlen; + args.minleft = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + ap->low = 1; + } + if (args.fsbno != NULLFSBLOCK) { + ap->firstblock = ap->rval = args.fsbno; + ASSERT(nullfb || fb_agno == args.agno || + (ap->low && fb_agno < args.agno)); + ap->alen = args.len; + ap->ip->i_d.di_nblocks += args.len; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= args.len; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT, + (long) args.len); + } else { + ap->rval = NULLFSBLOCK; + ap->alen = 0; + } + } + return 0; +#undef ISLEGAL +} + +/* + * Transform a btree format file with only one leaf node, where the + * extents list will fit in the inode, into an extents format file. + * Since the extent list is already in-core, all we have to do is + * give up the space for the btree root and pitch the leaf block. + */ +STATIC int /* error */ +xfs_bmap_btree_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_btree_cur_t *cur, /* btree cursor */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + /* REFERENCED */ + xfs_bmbt_block_t *cblock;/* child btree block */ + xfs_fsblock_t cbno; /* child block number */ + xfs_buf_t *cbp; /* child block's buffer */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork data */ + xfs_mount_t *mp; /* mount point structure */ + xfs_bmbt_ptr_t *pp; /* ptr to block address */ + xfs_bmbt_block_t *rblock;/* root btree block */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + rblock = ifp->if_broot; + ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1); + ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1); + ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); + mp = ip->i_mount; + pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); + *logflagsp = 0; +#ifdef DEBUG + if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1))) + return error; +#endif + cbno = INT_GET(*pp, ARCH_CONVERT); + if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, + XFS_BMAP_BTREE_REF))) + return error; + cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); + if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp))) + return error; + xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); + ip->i_d.di_nblocks--; + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(tp, cbp); + if (cur->bc_bufs[0] == cbp) + cur->bc_bufs[0] = NULL; + xfs_iroot_realloc(ip, -1, whichfork); + ASSERT(ifp->if_broot == NULL); + ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); + return 0; +} + +/* + * Called by xfs_bmapi to update extent list structure and the btree + * after removing space (or undoing a delayed allocation). + */ +STATIC int /* error */ +xfs_bmap_del_extent( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_trans_t *tp, /* current transaction pointer */ + xfs_extnum_t idx, /* extent number to update/delete */ + xfs_bmap_free_t *flist, /* list of extents to be freed */ + xfs_btree_cur_t *cur, /* if null, not a btree */ + xfs_bmbt_irec_t *del, /* data to remove from extent list */ + int *logflagsp, /* inode logging flags */ + int whichfork, /* data or attr fork */ + int rsvd) /* OK to allocate reserved blocks */ +{ + xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ + xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ + xfs_fsblock_t del_endblock=0; /* first block past del */ + xfs_fileoff_t del_endoff; /* first offset past del */ + int delay; /* current block is delayed allocated */ + int do_fx; /* free extent at end of routine */ + xfs_bmbt_rec_t *ep; /* current extent entry pointer */ + int error; /* error return value */ + int flags; /* inode logging flags */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_del_extent"; +#endif + xfs_bmbt_irec_t got; /* current extent entry */ + xfs_fileoff_t got_endoff; /* first offset past got */ + int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_mount_t *mp; /* mount structure */ + xfs_filblks_t nblks; /* quota/sb block count */ + xfs_bmbt_irec_t new; /* new record to be inserted */ + /* REFERENCED */ + xfs_extnum_t nextents; /* number of extents in list */ + uint qfield; /* quota field to update */ + xfs_filblks_t temp; /* for indirect length calculations */ + xfs_filblks_t temp2; /* for indirect length calculations */ + + XFS_STATS_INC(xfsstats.xs_del_exlist); + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(idx >= 0 && idx < nextents); + ASSERT(del->br_blockcount > 0); + ep = &ifp->if_u1.if_extents[idx]; + xfs_bmbt_get_all(ep, &got); + ASSERT(got.br_startoff <= del->br_startoff); + del_endoff = del->br_startoff + del->br_blockcount; + got_endoff = got.br_startoff + got.br_blockcount; + ASSERT(got_endoff >= del_endoff); + delay = ISNULLSTARTBLOCK(got.br_startblock); + ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay); + flags = 0; + qfield = 0; + error = 0; + /* + * If deleting a real allocation, must free up the disk space. + */ + if (!delay) { + flags = XFS_ILOG_CORE; + /* + * Realtime allocation. Free it and record di_nblocks update. + */ + if (whichfork == XFS_DATA_FORK && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + xfs_fsblock_t bno; + xfs_filblks_t len; + + ASSERT(do_mod(del->br_blockcount, + mp->m_sb.sb_rextsize) == 0); + ASSERT(do_mod(del->br_startblock, + mp->m_sb.sb_rextsize) == 0); + bno = del->br_startblock; + len = del->br_blockcount; + do_div(bno, mp->m_sb.sb_rextsize); + do_div(len, mp->m_sb.sb_rextsize); + if ((error = xfs_rtfree_extent(ip->i_transp, bno, + (xfs_extlen_t)len))) + goto done; + do_fx = 0; + nblks = len * mp->m_sb.sb_rextsize; + qfield = XFS_TRANS_DQ_RTBCOUNT; + } + /* + * Ordinary allocation. + */ + else { + do_fx = 1; + nblks = del->br_blockcount; + qfield = XFS_TRANS_DQ_BCOUNT; + } + /* + * Set up del_endblock and cur for later. + */ + del_endblock = del->br_startblock + del->br_blockcount; + if (cur) { + if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff, + got.br_startblock, got.br_blockcount, + &i))) + goto done; + ASSERT(i == 1); + } + da_old = da_new = 0; + } else { + da_old = STARTBLOCKVAL(got.br_startblock); + da_new = 0; + nblks = 0; + do_fx = 0; + } + /* + * Set flag value to use in switch statement. + * Left-contig is 2, right-contig is 1. + */ + switch (((got.br_startoff == del->br_startoff) << 1) | + (got_endoff == del_endoff)) { + case 3: + /* + * Matches the whole extent. Delete the entry. + */ + xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + ifp->if_lastex = idx; + if (delay) + break; + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) - 1); + flags |= XFS_ILOG_CORE; + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_delete(cur, &i))) + goto done; + ASSERT(i == 1); + break; + + case 2: + /* + * Deleting the first part of the extent. + */ + xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork); + xfs_bmbt_set_startoff(ep, del_endoff); + temp = got.br_blockcount - del->br_blockcount; + xfs_bmbt_set_blockcount(ep, temp); + ifp->if_lastex = idx; + if (delay) { + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + da_old); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "2", ip, idx, + whichfork); + da_new = temp; + break; + } + xfs_bmbt_set_startblock(ep, del_endblock); + xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork); + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, + got.br_blockcount - del->br_blockcount, + got.br_state))) + goto done; + break; + + case 1: + /* + * Deleting the last part of the extent. + */ + temp = got.br_blockcount - del->br_blockcount; + xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork); + xfs_bmbt_set_blockcount(ep, temp); + ifp->if_lastex = idx; + if (delay) { + temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), + da_old); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + xfs_bmap_trace_post_update(fname, "1", ip, idx, + whichfork); + da_new = temp; + break; + } + xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork); + if (!cur) { + flags |= XFS_ILOG_FEXT(whichfork); + break; + } + if ((error = xfs_bmbt_update(cur, got.br_startoff, + got.br_startblock, + got.br_blockcount - del->br_blockcount, + got.br_state))) + goto done; + break; + + case 0: + /* + * Deleting the middle of the extent. + */ + temp = del->br_startoff - got.br_startoff; + xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork); + xfs_bmbt_set_blockcount(ep, temp); + new.br_startoff = del_endoff; + temp2 = got_endoff - del_endoff; + new.br_blockcount = temp2; + new.br_state = got.br_state; + if (!delay) { + new.br_startblock = del_endblock; + flags |= XFS_ILOG_CORE; + if (cur) { + if ((error = xfs_bmbt_update(cur, + got.br_startoff, + got.br_startblock, temp, + got.br_state))) + goto done; + if ((error = xfs_bmbt_increment(cur, 0, &i))) + goto done; + cur->bc_rec.b = new; + error = xfs_bmbt_insert(cur, &i); + if (error && error != ENOSPC) + goto done; + /* + * If get no-space back from btree insert, + * it tried a split, and we have a zero + * block reservation. + * Fix up our state and return the error. + */ + if (error == ENOSPC) { + /* + * Reset the cursor, don't trust + * it after any insert operation. + */ + if ((error = xfs_bmbt_lookup_eq(cur, + got.br_startoff, + got.br_startblock, + temp, &i))) + goto done; + ASSERT(i == 1); + /* + * Update the btree record back + * to the original value. + */ + if ((error = xfs_bmbt_update(cur, + got.br_startoff, + got.br_startblock, + got.br_blockcount, + got.br_state))) + goto done; + /* + * Reset the extent record back + * to the original value. + */ + xfs_bmbt_set_blockcount(ep, + got.br_blockcount); + flags = 0; + error = XFS_ERROR(ENOSPC); + goto done; + } + ASSERT(i == 1); + } else + flags |= XFS_ILOG_FEXT(whichfork); + XFS_IFORK_NEXT_SET(ip, whichfork, + XFS_IFORK_NEXTENTS(ip, whichfork) + 1); + } else { + ASSERT(whichfork == XFS_DATA_FORK); + temp = xfs_bmap_worst_indlen(ip, temp); + xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); + temp2 = xfs_bmap_worst_indlen(ip, temp2); + new.br_startblock = NULLSTARTBLOCK((int)temp2); + da_new = temp + temp2; + while (da_new > da_old) { + if (temp) { + temp--; + da_new--; + xfs_bmbt_set_startblock(ep, + NULLSTARTBLOCK((int)temp)); + } + if (da_new == da_old) + break; + if (temp2) { + temp2--; + da_new--; + new.br_startblock = + NULLSTARTBLOCK((int)temp2); + } + } + } + xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); + xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, + whichfork); + xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); + ifp->if_lastex = idx + 1; + break; + } + /* + * If we need to, add to list of extents to delete. + */ + if (do_fx) + xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist, + mp); + /* + * Adjust inode # blocks in the file. + */ + if (nblks) + ip->i_d.di_nblocks -= nblks; + /* + * Adjust quota data. + */ + if (qfield) + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks); + + /* + * Account for change in delayed indirect blocks. + * Nothing to do for disk quota accounting here. + */ + ASSERT(da_old >= da_new); + if (da_old > da_new) + xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new), + rsvd); +done: + *logflagsp = flags; + return error; +} + +/* + * Remove the entry "free" from the free item list. Prev points to the + * previous entry, unless "free" is the head of the list. + */ +STATIC void +xfs_bmap_del_free( + xfs_bmap_free_t *flist, /* free item list header */ + xfs_bmap_free_item_t *prev, /* previous item on list, if any */ + xfs_bmap_free_item_t *free) /* list item to be freed */ +{ + if (prev) + prev->xbfi_next = free->xbfi_next; + else + flist->xbf_first = free->xbfi_next; + flist->xbf_count--; + kmem_zone_free(xfs_bmap_free_item_zone, free); +} + +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extents in list after */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count; + memmove(&base[idx], &base[idx + count], + (nextents - idx) * sizeof(*base)); + xfs_iext_realloc(ip, -count, whichfork); +} + +/* + * Convert an extents-format file into a btree-format file. + * The new file will have a root block (in the inode) and a single child block. + */ +STATIC int /* error */ +xfs_bmap_extents_to_btree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first-block-allocated */ + xfs_bmap_free_t *flist, /* blocks freed in xaction */ + xfs_btree_cur_t **curp, /* cursor returned to caller */ + int wasdel, /* converting a delayed alloc */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *ablock; /* allocated (child) bt block */ + xfs_buf_t *abp; /* buffer for ablock */ + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_bmbt_rec_t *arp; /* child record pointer */ + xfs_bmbt_block_t *block; /* btree root block */ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ + int error; /* error return value */ + xfs_extnum_t i, cnt; /* extent list index */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_key_t *kp; /* root block key pointer */ + xfs_mount_t *mp; /* mount structure */ + xfs_extnum_t nextents; /* extent list size */ + xfs_bmbt_ptr_t *pp; /* root block address pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + /* + * Make space in the inode incore. + */ + xfs_iroot_realloc(ip, 1, whichfork); + ifp->if_flags |= XFS_IFBROOT; + /* + * Fill in the root. + */ + block = ifp->if_broot; + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + INT_SET(block->bb_level, ARCH_CONVERT, 1); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + /* + * Need a cursor. Can't allocate until bb_level is filled in. + */ + mp = ip->i_mount; + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + whichfork); + cur->bc_private.b.firstblock = *firstblock; + cur->bc_private.b.flist = flist; + cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; + /* + * Convert to a btree with two levels, one record in root. + */ + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); + args.tp = tp; + args.mp = mp; + if (*firstblock == NULLFSBLOCK) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); + } else if (flist->xbf_low) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = *firstblock; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.fsbno = *firstblock; + } + args.minlen = args.maxlen = args.prod = 1; + args.total = args.minleft = args.alignment = args.mod = args.isfl = + args.minalignslop = 0; + args.wasdel = wasdel; + *logflagsp = 0; + if ((error = xfs_alloc_vextent(&args))) { + xfs_iroot_realloc(ip, -1, whichfork); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + /* + * Allocation can't fail, the space was reserved. + */ + ASSERT(args.fsbno != NULLFSBLOCK); + ASSERT(*firstblock == NULLFSBLOCK || + args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) || + (flist->xbf_low && + args.agno > XFS_FSB_TO_AGNO(mp, *firstblock))); + *firstblock = cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + ip->i_d.di_nblocks++; + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); + abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); + /* + * Fill in the child block. + */ + ablock = XFS_BUF_TO_BMBT_BLOCK(abp); + INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); + INT_ZERO(ablock->bb_level, ARCH_CONVERT); + INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); + INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); + arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { + if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { + arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); + arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); + arp++; cnt++; + } + } + INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt); + ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork)); + /* + * Fill in the root key and pointer. + */ + kp = XFS_BMAP_KEY_IADDR(block, 1, cur); + arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp)); + pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + INT_SET(*pp, ARCH_CONVERT, args.fsbno); + /* + * Do all this logging at the end so that + * the root is at the right level. + */ + xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); + xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT)); + ASSERT(*curp == NULL); + *curp = cur; + *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); + return 0; +} + +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* extent list base */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* extent list size */ + xfs_extnum_t to; /* extent list index */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + xfs_iext_realloc(ip, count, whichfork); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + memmove(&base[idx + count], &base[idx], + (nextents - (idx + count)) * sizeof(*base)); + for (to = idx; to < idx + count; to++, new++) + xfs_bmbt_set_all(&base[to], new); +} + +/* + * Convert a local file to an extents file. + * This code is out of bounds for data forks of regular files, + * since the file data needs to get logged so things will stay consistent. + * (The bmap-level manipulations are ok, though). + */ +STATIC int /* error */ +xfs_bmap_local_to_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fsblock_t *firstblock, /* first block allocated in xaction */ + xfs_extlen_t total, /* total blocks needed by transaction */ + int *logflagsp, /* inode logging flags */ + int whichfork) /* data or attr fork */ +{ + int error; /* error return value */ + int flags; /* logging flags returned */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_local_to_extents"; +#endif + xfs_ifork_t *ifp; /* inode fork pointer */ + + /* + * We don't want to deal with the case of keeping inode data inline yet. + * So sending the data fork of a regular inode is illegal. + */ + ASSERT(!((ip->i_d.di_mode & IFMT) == IFREG && + whichfork == XFS_DATA_FORK)); + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + flags = 0; + error = 0; + if (ifp->if_bytes) { + xfs_alloc_arg_t args; /* allocation arguments */ + xfs_buf_t *bp; /* buffer for extent list block */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ + + args.tp = tp; + args.mp = ip->i_mount; + ASSERT(ifp->if_flags & XFS_IFINLINE); + /* + * Allocate a block. We know we need only one, since the + * file currently fits in an inode. + */ + if (*firstblock == NULLFSBLOCK) { + args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); + args.type = XFS_ALLOCTYPE_START_BNO; + } else { + args.fsbno = *firstblock; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + } + args.total = total; + args.mod = args.minleft = args.alignment = args.wasdel = + args.isfl = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + if ((error = xfs_alloc_vextent(&args))) + goto done; + /* + * Can't fail, the space was reserved. + */ + ASSERT(args.fsbno != NULLFSBLOCK); + ASSERT(args.len == 1); + *firstblock = args.fsbno; + bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); + memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data, + ifp->if_bytes); + xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); + xfs_iext_realloc(ip, 1, whichfork); + ep = ifp->if_u1.if_extents; + xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); + xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); + XFS_IFORK_NEXT_SET(ip, whichfork, 1); + ip->i_d.di_nblocks = 1; + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, + XFS_TRANS_DQ_BCOUNT, 1L); + flags |= XFS_ILOG_FEXT(whichfork); + } else + ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); + ifp->if_flags &= ~XFS_IFINLINE; + ifp->if_flags |= XFS_IFEXTENTS; + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + flags |= XFS_ILOG_CORE; +done: + *logflagsp = flags; + return error; +} + +xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_do_search_extents( + xfs_bmbt_rec_t *base, /* base of extent list */ + xfs_extnum_t lastx, /* last extent index used */ + xfs_extnum_t nextents, /* extent list size */ + xfs_fileoff_t bno, /* block number searched for */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_irec_t got; /* extent list entry, decoded */ + int high; /* high index of binary search */ + int low; /* low index of binary search */ + + if (lastx != NULLEXTNUM && lastx < nextents) + ep = base + lastx; + else + ep = NULL; + prevp->br_startoff = NULLFILEOFF; + if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) && + bno < got.br_startoff + + (got.br_blockcount = xfs_bmbt_get_blockcount(ep))) + *eofp = 0; + else if (ep && lastx < nextents - 1 && + bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) && + bno < got.br_startoff + + (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) { + lastx++; + ep++; + *eofp = 0; + } else if (nextents == 0) + *eofp = 1; + else if (bno == 0 && + (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) { + ep = base; + lastx = 0; + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + *eofp = 0; + } else { + /* binary search the extents array */ + low = 0; + high = nextents - 1; + while (low <= high) { + XFS_STATS_INC(xfsstats.xs_cmp_exlist); + lastx = (low + high) >> 1; + ep = base + lastx; + got.br_startoff = xfs_bmbt_get_startoff(ep); + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + if (bno < got.br_startoff) + high = lastx - 1; + else if (bno >= got.br_startoff + got.br_blockcount) + low = lastx + 1; + else { + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + *eofp = 0; + *lastxp = lastx; + *gotp = got; + return ep; + } + } + if (bno >= got.br_startoff + got.br_blockcount) { + lastx++; + if (lastx == nextents) { + *eofp = 1; + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + *prevp = got; + ep = NULL; + } else { + *eofp = 0; + xfs_bmbt_get_all(ep, prevp); + ep++; + got.br_startoff = xfs_bmbt_get_startoff(ep); + got.br_blockcount = xfs_bmbt_get_blockcount(ep); + } + } else { + *eofp = 0; + if (ep > base) + xfs_bmbt_get_all(ep - 1, prevp); + } + } + if (ep) { + got.br_startblock = xfs_bmbt_get_startblock(ep); + got.br_state = xfs_bmbt_get_state(ep); + } + *lastxp = lastx; + *gotp = got; + return ep; +} + +/* + * Search the extents list for the inode, for the extent containing bno. + * If bno lies in a hole, point to the next entry. If bno lies past eof, + * *eofp will be set, and *prevp will contain the last entry (null if none). + * Else, *lastxp will be set to the index of the found + * entry; *gotp will contain the entry. + */ +STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_extents( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int whichfork, /* data or attr fork */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_extnum_t lastx; /* last extent index used */ + xfs_extnum_t nextents; /* extent list size */ + + XFS_STATS_INC(xfsstats.xs_look_exlist); + ifp = XFS_IFORK_PTR(ip, whichfork); + lastx = ifp->if_lastex; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + + return xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, + lastxp, gotp, prevp); +} + + +#ifdef XFS_BMAP_TRACE +/* + * Add a bmap trace buffer entry. Base routine for the others. + */ +STATIC void +xfs_bmap_trace_addentry( + int opcode, /* operation */ + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(ies) */ + xfs_extnum_t cnt, /* count of entries, 1 or 2 */ + xfs_bmbt_rec_t *r1, /* first record */ + xfs_bmbt_rec_t *r2, /* second record or null */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t tr2; + + ASSERT(cnt == 1 || cnt == 2); + ASSERT(r1 != NULL); + if (cnt == 1) { + ASSERT(r2 == NULL); + r2 = &tr2; + memset(&tr2, 0, sizeof(tr2)); + } else + ASSERT(r2 != NULL); + ktrace_enter(xfs_bmap_trace_buf, + (void *)(__psint_t)(opcode | (whichfork << 16)), + (void *)fname, (void *)desc, (void *)ip, + (void *)(__psint_t)idx, + (void *)(__psint_t)cnt, + (void *)(__psunsigned_t)(ip->i_ino >> 32), + (void *)(__psunsigned_t)(unsigned)ip->i_ino, + (void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT)) + ); + ASSERT(ip->i_xtrace); + ktrace_enter(ip->i_xtrace, + (void *)(__psint_t)(opcode | (whichfork << 16)), + (void *)fname, (void *)desc, (void *)ip, + (void *)(__psint_t)idx, + (void *)(__psint_t)cnt, + (void *)(__psunsigned_t)(ip->i_ino >> 32), + (void *)(__psunsigned_t)(unsigned)ip->i_ino, + (void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)), + (void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32), + (void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT)) + ); +} + +/* + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + */ +STATIC void +xfs_bmap_trace_delete( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) deleted */ + xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, + cnt, &ifp->if_u1.if_extents[idx], + cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, + whichfork); +} + +/* + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * reading in the extents list from the disk (in the btree). + */ +STATIC void +xfs_bmap_trace_insert( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry(entries) inserted */ + xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ + xfs_bmbt_irec_t *r1, /* inserted record 1 */ + xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t tr1; /* compressed record 1 */ + xfs_bmbt_rec_t tr2; /* compressed record 2 if needed */ + + xfs_bmbt_set_all(&tr1, r1); + if (cnt == 2) { + ASSERT(r2 != NULL); + xfs_bmbt_set_all(&tr2, r2); + } else { + ASSERT(cnt == 1); + ASSERT(r2 == NULL); + } + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx, + cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork); +} + +/* + * Add bmap trace entry after updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_post_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry updated */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, + 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); +} + +/* + * Add bmap trace entry prior to updating an extent list entry in place. + */ +STATIC void +xfs_bmap_trace_pre_update( + char *fname, /* function name */ + char *desc, /* operation description */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* index of entry to be updated */ + int whichfork) /* data or attr fork */ +{ + xfs_ifork_t *ifp; /* inode fork pointer */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, + &ifp->if_u1.if_extents[idx], NULL, whichfork); +} +#endif /* XFS_BMAP_TRACE */ + +/* + * Compute the worst-case number of indirect blocks that will be used + * for ip's delayed extent of length "len". + */ +STATIC xfs_filblks_t +xfs_bmap_worst_indlen( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_filblks_t len) /* delayed extent length */ +{ + int level; /* btree level number */ + int maxrecs; /* maximum record count at this level */ + xfs_mount_t *mp; /* mount structure */ + xfs_filblks_t rval; /* return value */ + + mp = ip->i_mount; + maxrecs = mp->m_bmap_dmxr[0]; + for (level = 0, rval = 0; + level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); + level++) { + len += maxrecs - 1; + do_div(len, maxrecs); + rval += len; + if (len == 1) + return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - + level - 1; + if (level == 0) + maxrecs = mp->m_bmap_dmxr[1]; + } + return rval; +} + +#if defined(DEBUG) && defined(XFS_RW_TRACE) +STATIC void +xfs_bunmap_trace( + xfs_inode_t *ip, + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + inst_t *ra) +{ + if (ip->i_rwtrace == NULL) + return; + ktrace_enter(ip->i_rwtrace, + (void *)(__psint_t)XFS_BUNMAPI, + (void *)ip, + (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff), + (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff), + (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff), + (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff), + (void *)(__psint_t)len, + (void *)(__psint_t)flags, + (void *)(__psint_t)private.p_cpuid, + (void *)ra, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0); +} +#endif + +/* + * Convert inode from non-attributed to attributed. + * Must not be in a transaction, ip must not be locked. + */ +int /* error code */ +xfs_bmap_add_attrfork( + xfs_inode_t *ip, /* incore inode pointer */ + int rsvd) /* OK to allocated reserved blocks in trans */ +{ + int blks; /* space reservation */ + int committed; /* xaction was committed */ + int error; /* error return value */ + xfs_fsblock_t firstblock; /* 1st block/ag allocated */ + xfs_bmap_free_t flist; /* freed extent list */ + int logflags; /* logging flags */ + xfs_mount_t *mp; /* mount structure */ + unsigned long s; /* spinlock spl value */ + xfs_trans_t *tp; /* transaction pointer */ + + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + if (XFS_IFORK_Q(ip)) + return 0; + mp = ip->i_mount; + ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); + tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK); + blks = XFS_ADDAFORK_SPACE_RES(mp); + if (rsvd) + tp->t_flags |= XFS_TRANS_RESERVE; + if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) + goto error0; + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ? + XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : + XFS_QMOPT_RES_REGBLKS); + if (error) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES); + return error; + } + if (XFS_IFORK_Q(ip)) + goto error1; + if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { + /* + * For inodes coming from pre-6.2 filesystems. + */ + ASSERT(ip->i_d.di_aformat == 0); + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + } + ASSERT(ip->i_d.di_anextents == 0); + VN_HOLD(XFS_ITOV(ip)); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_DEV: + ip->i_d.di_forkoff = roundup(sizeof(dev_t), 8) >> 3; + break; + case XFS_DINODE_FMT_UUID: + ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; + break; + case XFS_DINODE_FMT_LOCAL: + case XFS_DINODE_FMT_EXTENTS: + case XFS_DINODE_FMT_BTREE: + ip->i_d.di_forkoff = mp->m_attroffset >> 3; + break; + default: + ASSERT(0); + error = XFS_ERROR(EINVAL); + goto error1; + } + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(ip->i_afp == NULL); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp->if_ext_max = + XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ip->i_afp->if_flags = XFS_IFEXTENTS; + logflags = 0; + XFS_BMAP_INIT(&flist, &firstblock); + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_LOCAL: + error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, + &logflags); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, + &flist, &logflags); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist, + &logflags); + break; + default: + error = 0; + break; + } + if (logflags) + xfs_trans_log_inode(tp, ip, logflags); + if (error) + goto error2; + if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { + s = XFS_SB_LOCK(mp); + if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { + XFS_SB_VERSION_ADDATTR(&mp->m_sb); + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, XFS_SB_VERSIONNUM); + } else + XFS_SB_UNLOCK(mp, s); + } + if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) + goto error2; + error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL); + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + return error; +error2: + xfs_bmap_cancel(&flist); +error1: + ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +error0: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + return error; +} + +/* + * Add the extent to the list of extents to be free at transaction end. + * The list is maintained sorted (by block number). + */ +/* ARGSUSED */ +void +xfs_bmap_add_free( + xfs_fsblock_t bno, /* fs block number of extent */ + xfs_filblks_t len, /* length of extent */ + xfs_bmap_free_t *flist, /* list of extents */ + xfs_mount_t *mp) /* mount point structure */ +{ + xfs_bmap_free_item_t *cur; /* current (next) element */ + xfs_bmap_free_item_t *new; /* new element */ + xfs_bmap_free_item_t *prev; /* previous element */ +#ifdef DEBUG + xfs_agnumber_t agno; + xfs_agblock_t agbno; + + ASSERT(bno != NULLFSBLOCK); + ASSERT(len > 0); + ASSERT(len <= MAXEXTLEN); + ASSERT(!ISNULLSTARTBLOCK(bno)); + agno = XFS_FSB_TO_AGNO(mp, bno); + agbno = XFS_FSB_TO_AGBNO(mp, bno); + ASSERT(agno < mp->m_sb.sb_agcount); + ASSERT(agbno < mp->m_sb.sb_agblocks); + ASSERT(len < mp->m_sb.sb_agblocks); + ASSERT(agbno + len <= mp->m_sb.sb_agblocks); +#endif + ASSERT(xfs_bmap_free_item_zone != NULL); + new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); + new->xbfi_startblock = bno; + new->xbfi_blockcount = (xfs_extlen_t)len; + for (prev = NULL, cur = flist->xbf_first; + cur != NULL; + prev = cur, cur = cur->xbfi_next) { + if (cur->xbfi_startblock >= bno) + break; + } + if (prev) + prev->xbfi_next = new; + else + flist->xbf_first = new; + new->xbfi_next = cur; + flist->xbf_count++; +} + +/* + * Compute and fill in the value of the maximum depth of a bmap btree + * in this filesystem. Done once, during mount. + */ +void +xfs_bmap_compute_maxlevels( + xfs_mount_t *mp, /* file system mount structure */ + int whichfork) /* data or attr fork */ +{ + int level; /* btree level */ + uint maxblocks; /* max blocks at this level */ + uint maxleafents; /* max leaf entries possible */ + int maxrootrecs; /* max records in root block */ + int minleafrecs; /* min records in leaf block */ + int minnoderecs; /* min records in node block */ + int sz; /* root block size */ + + /* + * The maximum number of extents in a file, hence the maximum + * number of leaf entries, is controlled by the type of di_nextents + * (a signed 32-bit number, xfs_extnum_t), or by di_anextents + * (a signed 16-bit number, xfs_aextnum_t). + */ + maxleafents = (whichfork == XFS_DATA_FORK) ? MAXEXTNUM : MAXAEXTNUM; + minleafrecs = mp->m_bmap_dmnr[0]; + minnoderecs = mp->m_bmap_dmnr[1]; + sz = (whichfork == XFS_DATA_FORK) ? + mp->m_attroffset : + mp->m_sb.sb_inodesize - mp->m_attroffset; + maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) { + if (maxblocks <= maxrootrecs) + maxblocks = 1; + else + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + } + mp->m_bm_maxlevels[whichfork] = level; +} + +/* + * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi + * caller. Frees all the extents that need freeing, which must be done + * last due to locking considerations. We never free any extents in + * the first transaction. This is to allow the caller to make the first + * transaction a synchronous one so that the pointers to the data being + * broken in this transaction will be permanent before the data is actually + * freed. This is necessary to prevent blocks from being reallocated + * and written to before the free and reallocation are actually permanent. + * We do not just make the first transaction synchronous here, because + * there are more efficient ways to gain the same protection in some cases + * (see the file truncation code). + * + * Return 1 if the given transaction was committed and a new one + * started, and 0 otherwise in the committed parameter. + */ +/*ARGSUSED*/ +int /* error */ +xfs_bmap_finish( + xfs_trans_t **tp, /* transaction pointer addr */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + xfs_fsblock_t firstblock, /* controlled ag for allocs */ + int *committed) /* xact committed or not */ +{ + xfs_efd_log_item_t *efd; /* extent free data */ + xfs_efi_log_item_t *efi; /* extent free intention */ + int error; /* error return value */ + xfs_bmap_free_item_t *free; /* free extent list item */ + unsigned int logres; /* new log reservation */ + unsigned int logcount; /* new log count */ + xfs_mount_t *mp; /* filesystem mount structure */ + xfs_bmap_free_item_t *next; /* next item on free list */ + xfs_trans_t *ntp; /* new transaction pointer */ + + ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); + if (flist->xbf_count == 0) { + *committed = 0; + return 0; + } + ntp = *tp; + efi = xfs_trans_get_efi(ntp, flist->xbf_count); + for (free = flist->xbf_first; free; free = free->xbfi_next) + xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock, + free->xbfi_blockcount); + logres = ntp->t_log_res; + logcount = ntp->t_log_count; + ntp = xfs_trans_dup(*tp); + error = xfs_trans_commit(*tp, 0, NULL); + *tp = ntp; + *committed = 1; + /* + * We have a new transaction, so we should return committed=1, + * even though we're returning an error. + */ + if (error) { + return error; + } + if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES, + logcount))) + return error; + efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count); + for (free = flist->xbf_first; free != NULL; free = next) { + next = free->xbfi_next; + if ((error = xfs_free_extent(ntp, free->xbfi_startblock, + free->xbfi_blockcount))) { + /* + * The bmap free list will be cleaned up at a + * higher level. The EFI will be canceled when + * this transaction is aborted. + * Need to force shutdown here to make sure it + * happens, since this transaction may not be + * dirty yet. + */ + mp = ntp->t_mountp; + if (!XFS_FORCED_SHUTDOWN(mp)) + xfs_force_shutdown(mp, + (error == EFSCORRUPTED) ? + XFS_CORRUPT_INCORE : + XFS_METADATA_IO_ERROR); + return error; + } + xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock, + free->xbfi_blockcount); + xfs_bmap_del_free(flist, NULL, free); + } + return 0; +} + +/* + * Free up any items left in the list. + */ +void +xfs_bmap_cancel( + xfs_bmap_free_t *flist) /* list of bmap_free_items */ +{ + xfs_bmap_free_item_t *free; /* free list item */ + xfs_bmap_free_item_t *next; + + if (flist->xbf_count == 0) + return; + ASSERT(flist->xbf_first != NULL); + for (free = flist->xbf_first; free; free = next) { + next = free->xbfi_next; + xfs_bmap_del_free(flist, NULL, free); + } + ASSERT(flist->xbf_count == 0); +} + +/* + * Returns EINVAL if the specified file is not swappable. + */ +int /* error */ +xfs_bmap_check_swappable( + xfs_inode_t *ip) /* incore inode */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + xfs_fileoff_t end_fsb; /* last block of file within size */ + xfs_bmbt_irec_t ext; /* extent list entry, decoded */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_fileoff_t lastaddr; /* last block number seen */ + xfs_extnum_t nextents; /* number of extent entries */ + int retval = 0; /* return value */ + + xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + + /* + * Check for a zero length file. + */ + if (ip->i_d.di_size == 0) + goto check_done; + + ASSERT(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_BTREE || + XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS); + + ifp = &ip->i_df; + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (retval = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) + goto check_done; + /* + * Scan extents until the file size is reached. Look for + * holes or unwritten extents, since I/O to these would cause + * a transaction. + */ + end_fsb = XFS_B_TO_FSB(ip->i_mount, ip->i_d.di_size); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (lastaddr = 0, ep = base; ep < &base[nextents]; ep++) { + xfs_bmbt_get_all(ep, &ext); + if (lastaddr < ext.br_startoff || + ext.br_state != XFS_EXT_NORM) { + goto error_done; + } + if (end_fsb <= (lastaddr = ext.br_startoff + + ext.br_blockcount)) + goto check_done; + } +error_done: + retval = XFS_ERROR(EINVAL); + + +check_done: + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + return retval; +} + +/* + * Returns the file-relative block number of the first unused block(s) + * in the file with at least "len" logically contiguous blocks free. + * This is the lowest-address hole if the file has holes, else the first block + * past the end of file. + * Return 0 if the file is currently local (in-inode). + */ +int /* error */ +xfs_bmap_first_unused( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_extlen_t len, /* size of hole to find */ + xfs_fileoff_t *first_unused, /* unused block */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_fileoff_t lastaddr; /* last block number seen */ + xfs_fileoff_t lowest; /* lowest useful block */ + xfs_fileoff_t max; /* starting useful block */ + xfs_fileoff_t off; /* offset for this block */ + xfs_extnum_t nextents; /* number of extent entries */ + + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *first_unused = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + lowest = *first_unused; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (lastaddr = 0, max = lowest, ep = base; + ep < &base[nextents]; + ep++) { + off = xfs_bmbt_get_startoff(ep); + /* + * See if the hole before this extent will work. + */ + if (off >= lowest + len && off - max >= len) { + *first_unused = max; + return 0; + } + lastaddr = off + xfs_bmbt_get_blockcount(ep); + max = XFS_FILEOFF_MAX(lastaddr, lowest); + } + *first_unused = max; + return 0; +} + +/* + * Returns the file-relative block number of the last block + 1 before + * last_block (input value) in the file. + * This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_before( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork) /* data or attr fork */ +{ + xfs_fileoff_t bno; /* input file offset */ + int eof; /* hit end of file */ + xfs_bmbt_rec_t *ep; /* pointer to last extent */ + int error; /* error return value */ + xfs_bmbt_irec_t got; /* current extent value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t lastx; /* last extent used */ + xfs_bmbt_irec_t prev; /* previous extent value */ + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EIO); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *last_block = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + bno = *last_block - 1; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + if (eof || xfs_bmbt_get_startoff(ep) > bno) { + if (prev.br_startoff == NULLFILEOFF) + *last_block = 0; + else + *last_block = prev.br_startoff + prev.br_blockcount; + } + /* + * Otherwise *last_block is already the right answer. + */ + return 0; +} + +/* + * Returns the file-relative block number of the first block past eof in + * the file. This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_offset( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent array */ + xfs_bmbt_rec_t *ep; /* pointer to last extent */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extent entries */ + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EIO); + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + *last_block = 0; + return 0; + } + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (!nextents) { + *last_block = 0; + return 0; + } + base = &ifp->if_u1.if_extents[0]; + ASSERT(base != NULL); + ep = &base[nextents - 1]; + *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); + return 0; +} + +/* + * Returns whether the selected fork of the inode has exactly one + * block or not. For the data fork we check this matches di_size, + * implying the file's range is 0..bsize-1. + */ +int /* 1=>1 block, 0=>otherwise */ +xfs_bmap_one_block( + xfs_inode_t *ip, /* incore inode */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *ep; /* ptr to fork's extent */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int rval; /* return value */ + xfs_bmbt_irec_t s; /* internal version of extent */ + +#ifndef DEBUG + if (whichfork == XFS_DATA_FORK) + return ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize; +#endif /* !DEBUG */ + if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) + return 0; + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + return 0; + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ep = ifp->if_u1.if_extents; + xfs_bmbt_get_all(ep, &s); + rval = s.br_startoff == 0 && s.br_blockcount == 1; + if (rval && whichfork == XFS_DATA_FORK) + ASSERT(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize); + return rval; +} + +/* + * Read in the extents to if_extents. + * All inode fields are set up by caller, we just traverse the btree + * and copy the records in. If the file system cannot contain unwritten + * extents, the records are checked for no "state" flags. + */ +int /* error */ +xfs_bmap_read_extents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_buf_t *bp; /* buffer for "block" */ + int error; /* error return value */ + xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_bmap_read_extents"; +#endif + xfs_extnum_t i, j; /* index into the extents list */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + /* REFERENCED */ + xfs_extnum_t room; /* number of entries there's room for */ + xfs_bmbt_rec_t *trp; /* target record pointer */ + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE : + XFS_EXTFMT_INODE(ip); + block = ifp->if_broot; + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + /* + * Go down the tree until leaf level is reached, following the first + * pointer (leftmost) at each level. + */ + while (level-- > 0) { + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, level), + error0); + if (level == 0) + break; + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, + 1, mp->m_bmap_dmxr[1]); + XFS_WANT_CORRUPTED_GOTO( + XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), + error0); + bno = INT_GET(*pp, ARCH_CONVERT); + xfs_trans_brelse(tp, bp); + } + /* + * Here with bp and block set to the leftmost leaf node in the tree. + */ + room = ifp->if_bytes / (uint)sizeof(*trp); + trp = ifp->if_u1.if_extents; + i = 0; + /* + * Loop over all leaf nodes. Copy information to the extent list. + */ + for (;;) { + xfs_bmbt_rec_t *frp, *temp; + xfs_fsblock_t nextbno; + xfs_extnum_t num_recs; + + + num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + if (unlikely(i + num_recs > room)) { + ASSERT(i + num_recs <= room); + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, (btree extents). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_ERROR_REPORT("xfs_bmap_read_extents(1)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, 0), + error0); + /* + * Read-ahead the next leaf block, if any. + */ + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + if (nextbno != NULLFSBLOCK) + xfs_btree_reada_bufl(mp, nextbno, 1); + /* + * Copy records into the extent list. + */ + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, 1, mp->m_bmap_dmxr[0]); + temp = trp; + for (j = 0; j < num_recs; j++, frp++, trp++) { + trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); + trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); + } + if (exntf == XFS_EXTFMT_NOSTATE) { + /* + * Check all attribute bmap btree records and + * any "older" data bmap btree records for a + * set bit in the "extent flag" position. + */ + if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { + XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + goto error0; + } + } + i += num_recs; + xfs_trans_brelse(tp, bp); + bno = nextbno; + /* + * If we've reached the end, stop. + */ + if (bno == NULLFSBLOCK) + break; + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); + ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); + xfs_bmap_trace_exlist(fname, ip, i, whichfork); + return 0; +error0: + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); +} + +#ifdef XFS_BMAP_TRACE +/* + * Add bmap trace insert entries for all the contents of the extent list. + */ +void +xfs_bmap_trace_exlist( + char *fname, /* function name */ + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t cnt, /* count of entries in the list */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_bmbt_rec_t *ep; /* current entry in extent list */ + xfs_extnum_t idx; /* extent list entry number */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_irec_t s; /* extent list record */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); + base = ifp->if_u1.if_extents; + for (idx = 0, ep = base; idx < cnt; idx++, ep++) { + xfs_bmbt_get_all(ep, &s); + xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, + whichfork); + } +} +#endif + +#ifdef DEBUG +/* + * Validate that the bmbt_irecs being returned from bmapi are valid + * given the callers original parameters. Specifically check the + * ranges of the returned irecs to ensure that they only extent beyond + * the given parameters if the XFS_BMAPI_ENTIRE flag was set. + */ +STATIC void +xfs_bmap_validate_ret( + xfs_fileoff_t bno, + xfs_filblks_t len, + int flags, + xfs_bmbt_irec_t *mval, + int nmap, + int ret_nmap) +{ + int i; /* index to map values */ + + ASSERT(ret_nmap <= nmap); + + for (i = 0; i < ret_nmap; i++) { + ASSERT(mval[i].br_blockcount > 0); + if (!(flags & XFS_BMAPI_ENTIRE)) { + ASSERT(mval[i].br_startoff >= bno); + ASSERT(mval[i].br_blockcount <= len); + ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= + bno + len); + } else { + ASSERT(mval[i].br_startoff < bno + len); + ASSERT(mval[i].br_startoff + mval[i].br_blockcount > + bno); + } + ASSERT(i == 0 || + mval[i - 1].br_startoff + mval[i - 1].br_blockcount == + mval[i].br_startoff); + if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY)) + ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && + mval[i].br_startblock != HOLESTARTBLOCK); + ASSERT(mval[i].br_state == XFS_EXT_NORM || + mval[i].br_state == XFS_EXT_UNWRITTEN); + } +} +#endif /* DEBUG */ + + +/* + * Map file blocks to filesystem blocks. + * File range is given by the bno/len pair. + * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set) + * into a hole or past eof. + * Only allocates blocks from a single allocation group, + * to avoid locking problems. + * The returned value in "firstblock" from the first call in a transaction + * must be remembered and presented to subsequent calls in "firstblock". + * An upper bound for the number of blocks to be allocated is supplied to + * the first call in "total"; if no allocation group has that many free + * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). + */ +int /* error */ +xfs_bmapi( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting file offs. mapped */ + xfs_filblks_t len, /* length to map in file */ + int flags, /* XFS_BMAPI_... */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_extlen_t total, /* total blocks needed */ + xfs_bmbt_irec_t *mval, /* output: map values */ + int *nmap, /* i/o: mval size/count */ + xfs_bmap_free_t *flist) /* i/o: list extents to free */ +{ + xfs_fsblock_t abno; /* allocated block number */ + xfs_extlen_t alen; /* allocated extent length */ + xfs_fileoff_t aoff; /* allocated file offset */ + xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ + char contig; /* allocation must be one extent */ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + char delay; /* this request is for delayed alloc */ + xfs_fileoff_t end; /* end of mapped file region */ + int eof; /* we've hit the end of extent list */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int error; /* error return */ + char exact; /* don't do all of wasdelayed extent */ + xfs_bmbt_irec_t got; /* current extent list record */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extlen_t indlen; /* indirect blocks length */ + char inhole; /* current location is hole in file */ + xfs_extnum_t lastx; /* last useful extent number */ + int logflags; /* flags for transaction logging */ + xfs_extlen_t minleft; /* min blocks left after allocation */ + xfs_extlen_t minlen; /* min allocation size */ + xfs_mount_t *mp; /* xfs mount structure */ + int n; /* current extent index */ + int nallocs; /* number of extents alloc\'d */ + xfs_extnum_t nextents; /* number of extents in file */ + xfs_fileoff_t obno; /* old block number (offset) */ + xfs_bmbt_irec_t prev; /* previous extent list record */ + char stateless; /* ignore state flag set */ + int tmp_logflags; /* temp flags holder */ + char trim; /* output trimmed to match range */ + char userdata; /* allocating non-metadata */ + char wasdelay; /* old extent was delayed */ + int whichfork; /* data or attr fork */ + char wr; /* this is a write request */ + char rsvd; /* OK to allocate reserved blocks */ +#ifdef DEBUG + xfs_fileoff_t orig_bno; /* original block number value */ + int orig_flags; /* original flags arg value */ + xfs_filblks_t orig_len; /* original value of len arg */ + xfs_bmbt_irec_t *orig_mval; /* original value of mval */ + int orig_nmap; /* original value of *nmap */ + + orig_bno = bno; + orig_len = len; + orig_flags = flags; + orig_mval = mval; + orig_nmap = *nmap; +#endif + ASSERT(*nmap >= 1); + ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE)); + whichfork = (flags & XFS_BMAPI_ATTRFORK) ? + XFS_ATTR_FORK : XFS_DATA_FORK; + mp = ip->i_mount; + if (unlikely(XFS_TEST_ERROR( + (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL), + mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { + XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + if ((wr = (flags & XFS_BMAPI_WRITE)) != 0) + XFS_STATS_INC(xfsstats.xs_blk_mapw); + else + XFS_STATS_INC(xfsstats.xs_blk_mapr); + delay = (flags & XFS_BMAPI_DELAY) != 0; + trim = (flags & XFS_BMAPI_ENTIRE) == 0; + userdata = (flags & XFS_BMAPI_METADATA) == 0; + exact = (flags & XFS_BMAPI_EXACT) != 0; + rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; + contig = (flags & XFS_BMAPI_CONTIG) != 0; + /* + * stateless is used to combine extents which + * differ only due to the state of the extents. + * This technique is used from xfs_getbmap() + * when the caller does not wish to see the + * separation (which is the default). + * + * This technique is also used when writing a + * buffer which has been partially written, + * (usually by being flushed during a chunkread), + * to ensure one write takes place. This also + * prevents a change in the xfs inode extents at + * this time, intentionally. This change occurs + * on completion of the write operation, in + * xfs_strat_comp(), where the xfs_bmapi() call + * is transactioned, and the extents combined. + */ + stateless = (flags & XFS_BMAPI_IGSTATE) != 0; + if (stateless && wr) /* if writing unwritten space, no */ + wr = 0; /* allocations are allowed */ + ASSERT(wr || !delay); + logflags = 0; + nallocs = 0; + cur = NULL; + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + ASSERT(wr && tp); + if ((error = xfs_bmap_local_to_extents(tp, ip, + firstblock, total, &logflags, whichfork))) + goto error0; + } + if (wr && *firstblock == NULLFSBLOCK) { + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) + minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + else + minleft = 1; + } else + minleft = 0; + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + goto error0; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + n = 0; + end = bno + len; + obno = bno; + bma.ip = NULL; + while (bno < end && n < *nmap) { + /* + * Reading past eof, act as though there's a hole + * up to end. + */ + if (eof && !wr) + got.br_startoff = end; + inhole = eof || got.br_startoff > bno; + wasdelay = wr && !inhole && !delay && + ISNULLSTARTBLOCK(got.br_startblock); + /* + * First, deal with the hole before the allocated space + * that we found, if any. + */ + if (wr && (inhole || wasdelay)) { + /* + * For the wasdelay case, we could also just + * allocate the stuff asked for in this bmap call + * but that wouldn't be as good. + */ + if (wasdelay && !exact) { + alen = (xfs_extlen_t)got.br_blockcount; + aoff = got.br_startoff; + if (lastx != NULLEXTNUM && lastx) { + ep = &ifp->if_u1.if_extents[lastx - 1]; + xfs_bmbt_get_all(ep, &prev); + } + } else if (wasdelay) { + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(len, + (got.br_startoff + + got.br_blockcount) - bno); + aoff = bno; + } else { + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(len, MAXEXTLEN); + if (!eof) + alen = (xfs_extlen_t) + XFS_FILBLKS_MIN(alen, + got.br_startoff - bno); + aoff = bno; + } + minlen = contig ? alen : 1; + if (delay) { + indlen = (xfs_extlen_t) + xfs_bmap_worst_indlen(ip, alen); + ASSERT(indlen > 0); + /* + * Make a transaction-less quota reservation for + * delayed allocation blocks. This number gets + * adjusted later. + * We return EDQUOT if we haven't allocated + * blks already inside this loop; + */ + if (XFS_TRANS_RESERVE_BLKQUOTA( + mp, NULL, ip, (long)alen)) { + if (n == 0) { + *nmap = 0; + ASSERT(cur == NULL); + return XFS_ERROR(EDQUOT); + } + break; + } + if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, + -(alen + indlen), rsvd)) { + XFS_TRANS_UNRESERVE_BLKQUOTA( + mp, NULL, ip, (long)alen); + break; + } + ip->i_delayed_blks += alen; + abno = NULLSTARTBLOCK(indlen); + } else { + /* + * If first time, allocate and fill in + * once-only bma fields. + */ + if (bma.ip == NULL) { + bma.tp = tp; + bma.ip = ip; + bma.prevp = &prev; + bma.gotp = &got; + bma.total = total; + bma.userdata = 0; + } + /* Indicate if this is the first user data + * in the file, or just any user data. + */ + if (userdata) { + bma.userdata = (aoff == 0) ? + XFS_ALLOC_INITIAL_USER_DATA : + XFS_ALLOC_USERDATA; + } + /* + * Fill in changeable bma fields. + */ + bma.eof = eof; + bma.firstblock = *firstblock; + bma.alen = alen; + bma.off = aoff; + bma.wasdel = wasdelay; + bma.minlen = minlen; + bma.low = flist->xbf_low; + bma.minleft = minleft; + /* + * Only want to do the alignment at the + * eof if it is userdata and allocation length + * is larger than a stripe unit. + */ + if (mp->m_dalign && alen >= mp->m_dalign && + userdata && whichfork == XFS_DATA_FORK) { + if ((error = xfs_bmap_isaeof(ip, aoff, + whichfork, &bma.aeof))) + goto error0; + } else + bma.aeof = 0; + /* + * Call allocator. + */ + if ((error = xfs_bmap_alloc(&bma))) + goto error0; + /* + * Copy out result fields. + */ + abno = bma.rval; + if ((flist->xbf_low = bma.low)) + minleft = 0; + alen = bma.alen; + aoff = bma.off; + ASSERT(*firstblock == NULLFSBLOCK || + XFS_FSB_TO_AGNO(mp, *firstblock) == + XFS_FSB_TO_AGNO(mp, bma.firstblock) || + (flist->xbf_low && + XFS_FSB_TO_AGNO(mp, *firstblock) < + XFS_FSB_TO_AGNO(mp, bma.firstblock))); + *firstblock = bma.firstblock; + if (cur) + cur->bc_private.b.firstblock = + *firstblock; + if (abno == NULLFSBLOCK) + break; + if ((ifp->if_flags & XFS_IFBROOT) && !cur) { + cur = xfs_btree_init_cursor(mp, + tp, NULL, 0, XFS_BTNUM_BMAP, + ip, whichfork); + cur->bc_private.b.firstblock = + *firstblock; + cur->bc_private.b.flist = flist; + } + /* + * Bump the number of extents we've allocated + * in this call. + */ + nallocs++; + } + if (cur) + cur->bc_private.b.flags = + wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0; + got.br_startoff = aoff; + got.br_startblock = abno; + got.br_blockcount = alen; + got.br_state = XFS_EXT_NORM; /* assume normal */ + /* + * Determine state of extent, and the filesystem. + * A wasdelay extent has been initialized, so + * shouldn't be flagged as unwritten. + */ + if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) + got.br_state = XFS_EXT_UNWRITTEN; + } + error = xfs_bmap_add_extent(ip, lastx, &cur, &got, + firstblock, flist, &tmp_logflags, whichfork, + rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + lastx = ifp->if_lastex; + ep = &ifp->if_u1.if_extents[lastx]; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmbt_get_all(ep, &got); + ASSERT(got.br_startoff <= aoff); + ASSERT(got.br_startoff + got.br_blockcount >= + aoff + alen); +#ifdef DEBUG + if (delay) { + ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); + ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); + } + ASSERT(got.br_state == XFS_EXT_NORM || + got.br_state == XFS_EXT_UNWRITTEN); +#endif + /* + * Fall down into the found allocated space case. + */ + } else if (inhole) { + /* + * Reading in a hole. + */ + mval->br_startoff = bno; + mval->br_startblock = HOLESTARTBLOCK; + mval->br_blockcount = + XFS_FILBLKS_MIN(len, got.br_startoff - bno); + mval->br_state = XFS_EXT_NORM; + bno += mval->br_blockcount; + len -= mval->br_blockcount; + mval++; + n++; + continue; + } + /* + * Then deal with the allocated space we found. + */ + ASSERT(ep != NULL); + if (trim && (got.br_startoff + got.br_blockcount > obno)) { + if (obno > bno) + bno = obno; + ASSERT((bno >= obno) || (n == 0)); + ASSERT(bno < end); + mval->br_startoff = bno; + if (ISNULLSTARTBLOCK(got.br_startblock)) { + ASSERT(!wr || delay); + mval->br_startblock = DELAYSTARTBLOCK; + } else + mval->br_startblock = + got.br_startblock + + (bno - got.br_startoff); + /* + * Return the minimum of what we got and what we + * asked for for the length. We can use the len + * variable here because it is modified below + * and we could have been there before coming + * here if the first part of the allocation + * didn't overlap what was asked for. + */ + mval->br_blockcount = + XFS_FILBLKS_MIN(end - bno, got.br_blockcount - + (bno - got.br_startoff)); + mval->br_state = got.br_state; + ASSERT(mval->br_blockcount <= len); + } else { + *mval = got; + if (ISNULLSTARTBLOCK(mval->br_startblock)) { + ASSERT(!wr || delay); + mval->br_startblock = DELAYSTARTBLOCK; + } + } + + /* + * Check if writing previously allocated but + * unwritten extents. + */ + if (wr && mval->br_state == XFS_EXT_UNWRITTEN && + ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) { + /* + * Modify (by adding) the state flag, if writing. + */ + ASSERT(mval->br_blockcount <= len); + if ((ifp->if_flags & XFS_IFBROOT) && !cur) { + cur = xfs_btree_init_cursor(mp, + tp, NULL, 0, XFS_BTNUM_BMAP, + ip, whichfork); + cur->bc_private.b.firstblock = + *firstblock; + cur->bc_private.b.flist = flist; + } + mval->br_state = XFS_EXT_NORM; + error = xfs_bmap_add_extent(ip, lastx, &cur, mval, + firstblock, flist, &tmp_logflags, whichfork, + rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + lastx = ifp->if_lastex; + ep = &ifp->if_u1.if_extents[lastx]; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmbt_get_all(ep, &got); + /* + * We may have combined previously unwritten + * space with written space, so generate + * another request. + */ + if (mval->br_blockcount < len) + continue; + } + + ASSERT(!trim || + ((mval->br_startoff + mval->br_blockcount) <= end)); + ASSERT(!trim || (mval->br_blockcount <= len) || + (mval->br_startoff < obno)); + bno = mval->br_startoff + mval->br_blockcount; + len = end - bno; + if (n > 0 && mval->br_startoff == mval[-1].br_startoff) { + ASSERT(mval->br_startblock == mval[-1].br_startblock); + ASSERT(mval->br_blockcount > mval[-1].br_blockcount); + ASSERT(mval->br_state == mval[-1].br_state); + mval[-1].br_blockcount = mval->br_blockcount; + mval[-1].br_state = mval->br_state; + } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK && + mval[-1].br_startblock != DELAYSTARTBLOCK && + mval[-1].br_startblock != HOLESTARTBLOCK && + mval->br_startblock == + mval[-1].br_startblock + mval[-1].br_blockcount && + (stateless || mval[-1].br_state == mval->br_state)) { + ASSERT(mval->br_startoff == + mval[-1].br_startoff + mval[-1].br_blockcount); + mval[-1].br_blockcount += mval->br_blockcount; + } else if (n > 0 && + mval->br_startblock == DELAYSTARTBLOCK && + mval[-1].br_startblock == DELAYSTARTBLOCK && + mval->br_startoff == + mval[-1].br_startoff + mval[-1].br_blockcount) { + mval[-1].br_blockcount += mval->br_blockcount; + mval[-1].br_state = mval->br_state; + } else if (!((n == 0) && + ((mval->br_startoff + mval->br_blockcount) <= + obno))) { + mval++; + n++; + } + /* + * If we're done, stop now. Stop when we've allocated + * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise + * the transaction may get too big. + */ + if (bno >= end || n >= *nmap || nallocs >= *nmap) + break; + /* + * Else go on to the next record. + */ + ep++; + lastx++; + if (lastx >= nextents) { + eof = 1; + prev = got; + } else + xfs_bmbt_get_all(ep, &got); + } + ifp->if_lastex = lastx; + *nmap = n; + /* + * Transform from btree to extents, give it cur. + */ + if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && + XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) { + ASSERT(wr && cur); + error = xfs_bmap_btree_to_extents(tp, ip, cur, + &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max); + error = 0; + +error0: + /* + * Log everything. Do this after conversion, there's no point in + * logging the extent list if we've converted to btree format. + */ + if ((logflags & XFS_ILOG_FEXT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) + logflags &= ~XFS_ILOG_FBROOT(whichfork); + /* + * Log whatever the flags say, even if error. Otherwise we might miss + * detecting a case where the data is changed, there's an error, + * and it's not logged so we don't shutdown when we should. + */ + if (logflags) { + ASSERT(tp && wr); + xfs_trans_log_inode(tp, ip, logflags); + } + if (cur) { + if (!error) { + ASSERT(*firstblock == NULLFSBLOCK || + XFS_FSB_TO_AGNO(mp, *firstblock) == + XFS_FSB_TO_AGNO(mp, + cur->bc_private.b.firstblock) || + (flist->xbf_low && + XFS_FSB_TO_AGNO(mp, *firstblock) < + XFS_FSB_TO_AGNO(mp, + cur->bc_private.b.firstblock))); + *firstblock = cur->bc_private.b.firstblock; + } + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + if (!error) + xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, + orig_nmap, *nmap); + return error; +} + +/* + * Map file blocks to filesystem blocks, simple version. + * One block (extent) only, read-only. + * For flags, only the XFS_BMAPI_ATTRFORK flag is examined. + * For the other flag values, the effect is as if XFS_BMAPI_METADATA + * was set and all the others were clear. + */ +int /* error */ +xfs_bmapi_single( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + xfs_fsblock_t *fsb, /* output: mapped block */ + xfs_fileoff_t bno) /* starting file offs. mapped */ +{ + int eof; /* we've hit the end of extent list */ + int error; /* error return */ + xfs_bmbt_irec_t got; /* current extent list record */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t lastx; /* last useful extent number */ + xfs_bmbt_irec_t prev; /* previous extent list record */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (unlikely( + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) { + XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + XFS_STATS_INC(xfsstats.xs_blk_mapr); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + /* + * Reading past eof, act as though there's a hole + * up to end. + */ + if (eof || got.br_startoff > bno) { + *fsb = NULLFSBLOCK; + return 0; + } + ASSERT(!ISNULLSTARTBLOCK(got.br_startblock)); + ASSERT(bno < got.br_startoff + got.br_blockcount); + *fsb = got.br_startblock + (bno - got.br_startoff); + ifp->if_lastex = lastx; + return 0; +} + +/* + * Unmap (remove) blocks from a file. + * If nexts is nonzero then the number of extents to remove is limited to + * that value. If not all extents in the block range can be removed then + * *done is set. + */ +int /* error */ +xfs_bunmapi( + xfs_trans_t *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting offset to unmap */ + xfs_filblks_t len, /* length to unmap in file */ + int flags, /* misc flags */ + xfs_extnum_t nexts, /* number of extents max */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + int *done) /* set if not done yet */ +{ + xfs_btree_cur_t *cur; /* bmap btree cursor */ + xfs_bmbt_irec_t del; /* extent being deleted */ + int eof; /* is deleting at eof */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int error; /* error return value */ + xfs_extnum_t extno; /* extent number in list */ + xfs_bmbt_irec_t got; /* current extent list entry */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int isrt; /* freeing in rt area */ + xfs_extnum_t lastx; /* last extent index used */ + int logflags; /* transaction logging flags */ + xfs_extlen_t mod; /* rt extent offset */ + xfs_mount_t *mp; /* mount structure */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t prev; /* previous extent list entry */ + xfs_fileoff_t start; /* first file offset deleted */ + int tmp_logflags; /* partial logging flags */ + int wasdel; /* was a delayed alloc extent */ + int whichfork; /* data or attribute fork */ + int rsvd; /* OK to allocate reserved blocks */ + xfs_fsblock_t sum; + + xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address); + whichfork = (flags & XFS_BMAPI_ATTRFORK) ? + XFS_ATTR_FORK : XFS_DATA_FORK; + ifp = XFS_IFORK_PTR(ip, whichfork); + if (unlikely( + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { + XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + mp = ip->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; + ASSERT(len > 0); + ASSERT(nexts >= 0); + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(tp, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *done = 1; + return 0; + } + XFS_STATS_INC(xfsstats.xs_blk_unmap); + isrt = (whichfork == XFS_DATA_FORK) && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); + start = bno; + bno = start + len - 1; + ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, + &prev); + /* + * Check to see if the given block number is past the end of the + * file, back up to the last block if so... + */ + if (eof) { + ep = &ifp->if_u1.if_extents[--lastx]; + xfs_bmbt_get_all(ep, &got); + bno = got.br_startoff + got.br_blockcount - 1; + } + logflags = 0; + if (ifp->if_flags & XFS_IFBROOT) { + ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); + cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, + whichfork); + cur->bc_private.b.firstblock = *firstblock; + cur->bc_private.b.flist = flist; + cur->bc_private.b.flags = 0; + } else + cur = NULL; + extno = 0; + while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 && + (nexts == 0 || extno < nexts)) { + /* + * Is the found extent after a hole in which bno lives? + * Just back up to the previous extent, if so. + */ + if (got.br_startoff > bno) { + if (--lastx < 0) + break; + ep--; + xfs_bmbt_get_all(ep, &got); + } + /* + * Is the last block of this extent before the range + * we're supposed to delete? If so, we're done. + */ + bno = XFS_FILEOFF_MIN(bno, + got.br_startoff + got.br_blockcount - 1); + if (bno < start) + break; + /* + * Then deal with the (possibly delayed) allocated space + * we found. + */ + ASSERT(ep != NULL); + del = got; + wasdel = ISNULLSTARTBLOCK(del.br_startblock); + if (got.br_startoff < start) { + del.br_startoff = start; + del.br_blockcount -= start - got.br_startoff; + if (!wasdel) + del.br_startblock += start - got.br_startoff; + } + if (del.br_startoff + del.br_blockcount > bno + 1) + del.br_blockcount = bno + 1 - del.br_startoff; + sum = del.br_startblock + del.br_blockcount; + if (isrt && + (mod = do_mod(sum, mp->m_sb.sb_rextsize))) { + /* + * Realtime extent not lined up at the end. + * The extent could have been split into written + * and unwritten pieces, or we could just be + * unmapping part of it. But we can't really + * get rid of part of a realtime extent. + */ + if (del.br_state == XFS_EXT_UNWRITTEN || + !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + /* + * This piece is unwritten, or we're not + * using unwritten extents. Skip over it. + */ + ASSERT(bno >= mod); + bno -= mod > del.br_blockcount ? + del.br_blockcount : mod; + if (bno < got.br_startoff) { + if (--lastx >= 0) + xfs_bmbt_get_all(--ep, &got); + } + continue; + } + /* + * It's written, turn it unwritten. + * This is better than zeroing it. + */ + ASSERT(del.br_state == XFS_EXT_NORM); + ASSERT(xfs_trans_get_block_res(tp) > 0); + /* + * If this spans a realtime extent boundary, + * chop it back to the start of the one we end at. + */ + if (del.br_blockcount > mod) { + del.br_startoff += del.br_blockcount - mod; + del.br_startblock += del.br_blockcount - mod; + del.br_blockcount = mod; + } + del.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx, &cur, &del, + firstblock, flist, &logflags, XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } + if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { + /* + * Realtime extent is lined up at the end but not + * at the front. We'll get rid of full extents if + * we can. + */ + mod = mp->m_sb.sb_rextsize - mod; + if (del.br_blockcount > mod) { + del.br_blockcount -= mod; + del.br_startoff += mod; + del.br_startblock += mod; + } else if ((del.br_startoff == start && + (del.br_state == XFS_EXT_UNWRITTEN || + xfs_trans_get_block_res(tp) == 0)) || + !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + /* + * Can't make it unwritten. There isn't + * a full extent here so just skip it. + */ + ASSERT(bno >= del.br_blockcount); + bno -= del.br_blockcount; + if (bno < got.br_startoff) { + if (--lastx >= 0) + xfs_bmbt_get_all(--ep, &got); + } + continue; + } else if (del.br_state == XFS_EXT_UNWRITTEN) { + /* + * This one is already unwritten. + * It must have a written left neighbor. + * Unwrite the killed part of that one and + * try again. + */ + ASSERT(lastx > 0); + xfs_bmbt_get_all(ep - 1, &prev); + ASSERT(prev.br_state == XFS_EXT_NORM); + ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); + ASSERT(del.br_startblock == + prev.br_startblock + prev.br_blockcount); + if (prev.br_startoff < start) { + mod = start - prev.br_startoff; + prev.br_blockcount -= mod; + prev.br_startblock += mod; + prev.br_startoff = start; + } + prev.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx - 1, &cur, + &prev, firstblock, flist, &logflags, + XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } else { + ASSERT(del.br_state == XFS_EXT_NORM); + del.br_state = XFS_EXT_UNWRITTEN; + error = xfs_bmap_add_extent(ip, lastx, &cur, + &del, firstblock, flist, &logflags, + XFS_DATA_FORK, 0); + if (error) + goto error0; + goto nodelete; + } + } + if (wasdel) { + ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); + xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, + (int)del.br_blockcount, rsvd); + /* Unreserve our quota space */ + XFS_TRANS_RESERVE_QUOTA_NBLKS( + mp, NULL, ip, -((long)del.br_blockcount), 0, + isrt ? XFS_QMOPT_RES_RTBLKS : + XFS_QMOPT_RES_REGBLKS); + ip->i_delayed_blks -= del.br_blockcount; + if (cur) + cur->bc_private.b.flags |= + XFS_BTCUR_BPRV_WASDEL; + } else if (cur) + cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; + /* + * If it's the case where the directory code is running + * with no block reservation, and the deleted block is in + * the middle of its extent, and the resulting insert + * of an extent would cause transformation to btree format, + * then reject it. The calling code will then swap + * blocks around instead. + * We have to do this now, rather than waiting for the + * conversion to btree format, since the transaction + * will be dirty. + */ + if (!wasdel && xfs_trans_get_block_res(tp) == 0 && + XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max && + del.br_startoff > got.br_startoff && + del.br_startoff + del.br_blockcount < + got.br_startoff + got.br_blockcount) { + error = XFS_ERROR(ENOSPC); + goto error0; + } + error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, + &tmp_logflags, whichfork, rsvd); + logflags |= tmp_logflags; + if (error) + goto error0; + bno = del.br_startoff - 1; +nodelete: + lastx = ifp->if_lastex; + /* + * If not done go on to the next (previous) record. + * Reset ep in case the extents array was re-alloced. + */ + ep = &ifp->if_u1.if_extents[lastx]; + if (bno != (xfs_fileoff_t)-1 && bno >= start) { + if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || + xfs_bmbt_get_startoff(ep) > bno) { + lastx--; + ep--; + } + if (lastx >= 0) + xfs_bmbt_get_all(ep, &got); + extno++; + } + } + ifp->if_lastex = lastx; + *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + /* + * Convert to a btree if necessary. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) { + ASSERT(cur == NULL); + error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, + &cur, 0, &tmp_logflags, whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + /* + * transform from btree to extents, give it cur + */ + else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && + XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) { + ASSERT(cur != NULL); + error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, + whichfork); + logflags |= tmp_logflags; + if (error) + goto error0; + } + /* + * transform from extents to local? + */ + ASSERT(ifp->if_ext_max == + XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); + error = 0; +error0: + /* + * Log everything. Do this after conversion, there's no point in + * logging the extent list if we've converted to btree format. + */ + if ((logflags & XFS_ILOG_FEXT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) + logflags &= ~XFS_ILOG_FEXT(whichfork); + else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && + XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) + logflags &= ~XFS_ILOG_FBROOT(whichfork); + /* + * Log inode even in the error case, if the transaction + * is dirty we'll need to shut down the filesystem. + */ + if (logflags) + xfs_trans_log_inode(tp, ip, logflags); + if (cur) { + if (!error) { + *firstblock = cur->bc_private.b.firstblock; + cur->bc_private.b.allocated = 0; + } + xfs_btree_del_cursor(cur, + error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + } + return error; +} + +/* + * Fcntl interface to xfs_bmapi. + */ +int /* error code */ +xfs_getbmap( + bhv_desc_t *bdp, /* XFS behavior descriptor*/ + struct getbmap *bmv, /* user bmap structure */ + void *ap, /* pointer to user's array */ + int interface) /* interface flags */ +{ + __int64_t bmvend; /* last block requested */ + int error; /* return value */ + __int64_t fixlen; /* length for -1 case */ + int i; /* extent number */ + xfs_inode_t *ip; /* xfs incore inode pointer */ + vnode_t *vp; /* corresponding vnode */ + int lock; /* lock state */ + xfs_bmbt_irec_t *map; /* buffer for user's data */ + xfs_mount_t *mp; /* file system mount point */ + int nex; /* # of user extents can do */ + int nexleft; /* # of user extents left */ + int subnex; /* # of bmapi's can do */ + int nmap; /* number of map entries */ + struct getbmap out; /* output structure */ + int whichfork; /* data or attr fork */ + int prealloced; /* this is a file with + * preallocated data space */ + int sh_unwritten; /* true, if unwritten */ + /* extents listed separately */ + int bmapi_flags; /* flags for xfs_bmapi */ + __int32_t oflags; /* getbmapx bmv_oflags field */ + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; + sh_unwritten = (interface & BMV_IF_PREALLOC) != 0; + + /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not + * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ + * bit is set for the file, generate a read event in order + * that the DMAPI application may do its thing before we return + * the extents. Usually this means restoring user file data to + * regions of the file that look like holes. + * + * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify + * BMV_IF_NO_DMAPI_READ so that read events are generated. + * If this were not true, callers of ioctl( XFS_IOC_GETBMAP ) + * could misinterpret holes in a DMAPI file as true holes, + * when in fact they may represent offline user data. + */ + if ( (interface & BMV_IF_NO_DMAPI_READ) == 0 + && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) + && whichfork == XFS_DATA_FORK) { + + error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, 0, 0, 0, NULL); + if (error) + return XFS_ERROR(error); + } + + if (whichfork == XFS_ATTR_FORK) { + if (XFS_IFORK_Q(ip)) { + if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && + ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EINVAL); + } else if (unlikely( + ip->i_d.di_aformat != 0 && + ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { + XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && + ip->i_d.di_format != XFS_DINODE_FMT_BTREE && + ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) + return XFS_ERROR(EINVAL); + if (whichfork == XFS_DATA_FORK) { + if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) { + prealloced = 1; + fixlen = XFS_MAX_FILE_OFFSET; + } else { + prealloced = 0; + fixlen = ip->i_d.di_size; + } + } else { + prealloced = 0; + fixlen = 1LL << 32; + } + + if (bmv->bmv_length == -1) { + fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); + bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), + (__int64_t)0); + } else if (bmv->bmv_length < 0) + return XFS_ERROR(EINVAL); + if (bmv->bmv_length == 0) { + bmv->bmv_entries = 0; + return 0; + } + nex = bmv->bmv_count - 1; + if (nex <= 0) + return XFS_ERROR(EINVAL); + bmvend = bmv->bmv_offset + bmv->bmv_length; + + xfs_ilock(ip, XFS_IOLOCK_SHARED); + + if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) { + /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); + } + + ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0); + + lock = xfs_ilock_map_shared(ip); + + /* + * Don't let nex be bigger than the number of extents + * we can have assuming alternating holes and real extents. + */ + if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) + nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; + + bmapi_flags = XFS_BMAPI_AFLAG(whichfork) | + ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE); + + /* + * Allocate enough space to handle "subnex" maps at a time. + */ + subnex = 16; + map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); + + bmv->bmv_entries = 0; + + if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) { + error = 0; + goto unlock_and_return; + } + + nexleft = nex; + + do { + nmap = (nexleft > subnex) ? subnex : nexleft; + error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), + XFS_BB_TO_FSB(mp, bmv->bmv_length), + bmapi_flags, NULL, 0, map, &nmap, NULL); + if (error) + goto unlock_and_return; + ASSERT(nmap <= subnex); + + for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { + nexleft--; + oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ? + BMV_OF_PREALLOC : 0; + out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); + out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + if (prealloced && + map[i].br_startblock == HOLESTARTBLOCK && + out.bmv_offset + out.bmv_length == bmvend) { + /* + * came to hole at end of file + */ + goto unlock_and_return; + } else { + out.bmv_block = + (map[i].br_startblock == HOLESTARTBLOCK) ? + -1 : + XFS_FSB_TO_DB(ip, map[i].br_startblock); + + /* return either getbmap/getbmapx structure. */ + if (interface & BMV_IF_EXTENDED) { + struct getbmapx outx; + + GETBMAP_CONVERT(out,outx); + outx.bmv_oflags = oflags; + outx.bmv_unused1 = outx.bmv_unused2 = 0; + if (copy_to_user(ap, &outx, + sizeof(outx))) { + error = XFS_ERROR(EFAULT); + goto unlock_and_return; + } + } else { + if (copy_to_user(ap, &out, + sizeof(out))) { + error = XFS_ERROR(EFAULT); + goto unlock_and_return; + } + } + bmv->bmv_offset = + out.bmv_offset + out.bmv_length; + bmv->bmv_length = MAX((__int64_t)0, + (__int64_t)(bmvend - bmv->bmv_offset)); + bmv->bmv_entries++; + ap = (interface & BMV_IF_EXTENDED) ? + (void *)((struct getbmapx *)ap + 1) : + (void *)((struct getbmap *)ap + 1); + } + } + } while (nmap && nexleft && bmv->bmv_length); + +unlock_and_return: + xfs_iunlock_map_shared(ip, lock); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + kmem_free(map, subnex * sizeof(*map)); + + return error; +} + +/* + * Check the last inode extent to determine whether this allocation will result + * in blocks being allocated at the end of the file. When we allocate new data + * blocks at the end of the file which do not start at the previous data block, + * we will try to align the new blocks at stripe unit boundaries. + */ +int /* error */ +xfs_bmap_isaeof( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t off, /* file offset in fsblocks */ + int whichfork, /* data or attribute fork */ + char *aeof) /* return value */ +{ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t s; /* expanded extent list entry */ + + ASSERT(whichfork == XFS_DATA_FORK); + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(NULL, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *aeof = 1; + return 0; + } + /* + * Go to the last extent + */ + lastrec = &ifp->if_u1.if_extents[nextents - 1]; + xfs_bmbt_get_all(lastrec, &s); + /* + * Check we are allocating in the last extent (for delayed allocations) + * or past the last extent for non-delayed allocations. + */ + *aeof = (off >= s.br_startoff && + off < s.br_startoff + s.br_blockcount && + ISNULLSTARTBLOCK(s.br_startblock)) || + off >= s.br_startoff + s.br_blockcount; + return 0; +} + +/* + * Check if the endoff is outside the last extent. If so the caller will grow + * the allocation to a stripe unit boundary. + */ +int /* error */ +xfs_bmap_eof( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_fileoff_t endoff, /* file offset in fsblocks */ + int whichfork, /* data or attribute fork */ + int *eof) /* result value */ +{ + xfs_fsblock_t blockcount; /* extent block count */ + int error; /* error return value */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_fileoff_t startoff; /* extent starting file offset */ + + ASSERT(whichfork == XFS_DATA_FORK); + ifp = XFS_IFORK_PTR(ip, whichfork); + if (!(ifp->if_flags & XFS_IFEXTENTS) && + (error = xfs_iread_extents(NULL, ip, whichfork))) + return error; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *eof = 1; + return 0; + } + /* + * Go to the last extent + */ + lastrec = &ifp->if_u1.if_extents[nextents - 1]; + startoff = xfs_bmbt_get_startoff(lastrec); + blockcount = xfs_bmbt_get_blockcount(lastrec); + *eof = endoff >= startoff + blockcount; + return 0; +} + +#ifdef XFSDEBUG +/* + * Check that the extents list for the inode ip is in the right order. + */ +STATIC void +xfs_bmap_check_extents( + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extents list */ + xfs_bmbt_rec_t *ep; /* current extent entry */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extents in list */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (ep = base; ep < &base[nextents - 1]; ep++) { + xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, + (void *)(ep + 1)); + } +} + +STATIC +xfs_buf_t * +xfs_bmap_get_bp( + xfs_btree_cur_t *cur, + xfs_fsblock_t bno) +{ + int i; + xfs_buf_t *bp; + + if (!cur) + return(NULL); + + bp = NULL; + for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) { + bp = cur->bc_bufs[i]; + if (!bp) break; + if (XFS_BUF_ADDR(bp) == bno) + break; /* Found it */ + } + if (i == XFS_BTREE_MAXLEVELS) + bp = NULL; + + if (!bp) { /* Chase down all the log items to see if the bp is there */ + xfs_log_item_chunk_t *licp; + xfs_trans_t *tp; + + tp = cur->bc_tp; + licp = &tp->t_items; + while (!bp && licp != NULL) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + continue; + } + for (i = 0; i < licp->lic_unused; i++) { + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + xfs_buf_log_item_t *bip; + xfs_buf_t *lbp; + + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + lip = lidp->lid_item; + if (lip->li_type != XFS_LI_BUF) + continue; + + bip = (xfs_buf_log_item_t *)lip; + lbp = bip->bli_buf; + + if (XFS_BUF_ADDR(lbp) == bno) { + bp = lbp; + break; /* Found it */ + } + } + licp = licp->lic_next; + } + } + return(bp); +} + +void +xfs_check_block( + xfs_bmbt_block_t *block, + xfs_mount_t *mp, + int root, + short sz) +{ + int i, j, dmxr; + xfs_bmbt_ptr_t *pp, *thispa; /* pointer to block address */ + xfs_bmbt_key_t *prevp, *keyp; + + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + + prevp = NULL; + for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) { + dmxr = mp->m_bmap_dmxr[0]; + + if (root) { + keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz); + } else { + keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, i, dmxr); + } + + if (prevp) { + xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp); + } + prevp = keyp; + + /* + * Compare the block numbers to see if there are dups. + */ + + if (root) { + pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz); + } else { + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, i, dmxr); + } + for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) { + if (root) { + thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz); + } else { + thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, j, dmxr); + } + if (INT_GET(*thispa, ARCH_CONVERT) == + INT_GET(*pp, ARCH_CONVERT)) { + cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", + __FUNCTION__, j, i, + INT_GET(*thispa, ARCH_CONVERT)); + panic("%s: ptrs are equal in node\n", + __FUNCTION__); + } + } + } +} + +/* + * Check that the extents for the inode ip are in the right order in all + * btree leaves. + */ + +STATIC void +xfs_bmap_check_leaf_extents( + xfs_btree_cur_t *cur, /* btree cursor or null */ + xfs_inode_t *ip, /* incore inode pointer */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_buf_t *bp; /* buffer for "block" */ + int error; /* error return value */ + xfs_extnum_t i=0; /* index into the extents list */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ + int bp_release = 0; + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { + return; + } + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + block = ifp->if_broot; + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + xfs_check_block(block, mp, 1, ifp->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + /* + * Go down the tree until leaf level is reached, following the first + * pointer (leftmost) at each level. + */ + while (level-- > 0) { + /* See if buf is in cur first */ + bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); + if (bp) { + bp_release = 0; + } else { + bp_release = 1; + } + if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + goto error_norelse; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + XFS_WANT_CORRUPTED_GOTO( + XFS_BMAP_SANITY_CHECK(mp, block, level), + error0); + if (level == 0) + break; + + /* + * Check this block for basic sanity (increasing keys and + * no duplicate blocks). + */ + + xfs_check_block(block, mp, 0, 0); + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, + 1, mp->m_bmap_dmxr[1]); + XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0); + bno = INT_GET(*pp, ARCH_CONVERT); + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + } + + /* + * Here with bp and block set to the leftmost leaf node in the tree. + */ + i = 0; + + /* + * Loop over all leaf nodes checking that all extents are in the right order. + */ + lastp = NULL; + for (;;) { + xfs_bmbt_rec_t *frp; + xfs_fsblock_t nextbno; + xfs_extnum_t num_recs; + + + num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + + /* + * Read-ahead the next leaf block, if any. + */ + + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + + /* + * Check all the extents to make sure they are OK. + * If we had a previous block, the last entry should + * conform with the first entry in this one. + */ + + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, 1, mp->m_bmap_dmxr[0]); + + for (ep = frp;ep < frp + (num_recs - 1); ep++) { + if (lastp) { + xfs_btree_check_rec(XFS_BTNUM_BMAP, + (void *)lastp, (void *)ep); + } + xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, + (void *)(ep + 1)); + } + lastp = frp + num_recs - 1; /* For the next iteration */ + + i += num_recs; + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + bno = nextbno; + /* + * If we've reached the end, stop. + */ + if (bno == NULLFSBLOCK) + break; + + bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); + if (bp) { + bp_release = 0; + } else { + bp_release = 1; + } + if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + goto error_norelse; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + if (bp_release) { + bp_release = 0; + xfs_trans_brelse(NULL, bp); + } + return; + +error0: + cmn_err(CE_WARN, "%s: at error0", __FUNCTION__); + if (bp_release) + xfs_trans_brelse(NULL, bp); +error_norelse: + cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents", + i, __FUNCTION__); + panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__); + return; +} +#endif + +/* + * Count fsblocks of the given fork. + */ +int /* error */ +xfs_bmap_count_blocks( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + int *count) /* out: count of blocks */ +{ + xfs_bmbt_block_t *block; /* current btree block */ + xfs_fsblock_t bno; /* block # of "block" */ + xfs_ifork_t *ifp; /* fork structure */ + int level; /* btree level, for checking */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_bmbt_ptr_t *pp; /* pointer to block address */ + + bno = NULLFSBLOCK; + mp = ip->i_mount; + ifp = XFS_IFORK_PTR(ip, whichfork); + if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { + if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), + count) < 0)) { + XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; + } + + /* + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. + */ + block = ifp->if_broot; + ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); + level = INT_GET(block->bb_level, ARCH_CONVERT); + pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); + ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); + ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); + bno = INT_GET(*pp, ARCH_CONVERT); + + if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { + XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +/* + * Recursively walks each level of a btree + * to count total fsblocks is use. + */ +int /* error */ +xfs_bmap_count_tree( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t blockno, /* file system block number */ + int levelin, /* level in btree */ + int *count) /* Count of blocks */ +{ + int error; + xfs_buf_t *bp, *nbp; + int level = levelin; + xfs_bmbt_ptr_t *pp; + xfs_fsblock_t bno = blockno; + xfs_fsblock_t nextbno; + xfs_bmbt_block_t *block, *nextblock; + int numrecs; + xfs_bmbt_rec_t *frp; + + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + + if (--level) { + /* Not at node above leafs, count this level of nodes */ + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + while (nextbno != NULLFSBLOCK) { + if ((error = xfs_btree_read_bufl(mp, tp, nextbno, + 0, &nbp, XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp); + nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT); + xfs_trans_brelse(tp, nbp); + } + + /* Dive to the next level */ + pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); + bno = INT_GET(*pp, ARCH_CONVERT); + if (unlikely((error = + xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { + xfs_trans_brelse(tp, bp); + XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_trans_brelse(tp, bp); + } else { + /* count all level 1 nodes and their leaves */ + for (;;) { + nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); + if (unlikely(xfs_bmap_count_leaves(frp, numrecs, count) < 0)) { + xfs_trans_brelse(tp, bp); + XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_trans_brelse(tp, bp); + if (nextbno == NULLFSBLOCK) + break; + bno = nextbno; + if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, + XFS_BMAP_BTREE_REF))) + return error; + *count += 1; + block = XFS_BUF_TO_BMBT_BLOCK(bp); + } + } + return 0; +} + +/* + * Count leaf blocks given a pointer to an extent list. + */ +int +xfs_bmap_count_leaves( + xfs_bmbt_rec_t *frp, + int numrecs, + int *count) +{ + int b; + + for ( b = 1; b <= numrecs; b++, frp++) + *count += xfs_bmbt_disk_get_blockcount(frp); + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_bmap.h linux.22-ac2/fs/xfs/xfs_bmap.h --- linux.vanilla/fs/xfs/xfs_bmap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_bmap.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BMAP_H__ +#define __XFS_BMAP_H__ + +struct getbmap; +struct xfs_bmbt_irec; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * List of extents to be free "later". + * The list is kept sorted on xbf_startblock. + */ +typedef struct xfs_bmap_free_item +{ + xfs_fsblock_t xbfi_startblock;/* starting fs block number */ + xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */ + struct xfs_bmap_free_item *xbfi_next; /* link to next entry */ +} xfs_bmap_free_item_t; + +/* + * Header for free extent list. + */ +typedef struct xfs_bmap_free +{ + xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */ + int xbf_count; /* count of items on list */ + int xbf_low; /* kludge: alloc in low mode */ +} xfs_bmap_free_t; + +#define XFS_BMAP_MAX_NMAP 4 + +/* + * Flags for xfs_bmapi + */ +#define XFS_BMAPI_WRITE 0x001 /* write operation: allocate space */ +#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */ +#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ +#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ +#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */ +#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */ +#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */ +#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */ +#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */ +#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ + /* combine contig. space */ +#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAPI_AFLAG) +int xfs_bmapi_aflag(int w); +#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) +#else +#define XFS_BMAPI_AFLAG(w) ((w) == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0) +#endif + +/* + * Special values for xfs_bmbt_irec_t br_startblock field. + */ +#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) +#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) + +/* + * Trace operations for bmap extent tracing + */ +#define XFS_BMAP_KTRACE_DELETE 1 +#define XFS_BMAP_KTRACE_INSERT 2 +#define XFS_BMAP_KTRACE_PRE_UP 3 +#define XFS_BMAP_KTRACE_POST_UP 4 + +#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */ +#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */ + +#if defined(XFS_ALL_TRACE) +#define XFS_BMAP_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BMAP_TRACE +#endif + + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_INIT) +void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp); +#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp) +#else +#define XFS_BMAP_INIT(flp,fbp) \ + ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ + (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK) +#endif + +/* + * Argument structure for xfs_bmap_alloc. + */ +typedef struct xfs_bmalloca { + xfs_fsblock_t firstblock; /* i/o first block allocated */ + xfs_fsblock_t rval; /* starting block of new extent */ + xfs_fileoff_t off; /* offset in file filling in */ + struct xfs_trans *tp; /* transaction pointer */ + struct xfs_inode *ip; /* incore inode pointer */ + struct xfs_bmbt_irec *prevp; /* extent before the new one */ + struct xfs_bmbt_irec *gotp; /* extent after, or delayed */ + xfs_extlen_t alen; /* i/o length asked/allocated */ + xfs_extlen_t total; /* total blocks needed for xaction */ + xfs_extlen_t minlen; /* mininum allocation size (blocks) */ + xfs_extlen_t minleft; /* amount must be left after alloc */ + char eof; /* set if allocating past last extent */ + char wasdel; /* replacing a delayed allocation */ + char userdata;/* set if is user data */ + char low; /* low on space, using seq'l ags */ + char aeof; /* allocated space at eof */ +} xfs_bmalloca_t; + +#ifdef __KERNEL__ +/* + * Convert inode from non-attributed to attributed. + * Must not be in a transaction, ip must not be locked. + */ +int /* error code */ +xfs_bmap_add_attrfork( + struct xfs_inode *ip, /* incore inode pointer */ + int rsvd); /* flag for reserved block allocation */ + +/* + * Add the extent to the list of extents to be free at transaction end. + * The list is maintained sorted (by block number). + */ +void +xfs_bmap_add_free( + xfs_fsblock_t bno, /* fs block number of extent */ + xfs_filblks_t len, /* length of extent */ + xfs_bmap_free_t *flist, /* list of extents */ + struct xfs_mount *mp); /* mount point structure */ + +/* + * Routine to clean up the free list data structure when + * an error occurs during a transaction. + */ +void +xfs_bmap_cancel( + xfs_bmap_free_t *flist); /* free list to clean up */ + +/* + * Routine to check if a specified inode is swap capable. + */ +int +xfs_bmap_check_swappable( + struct xfs_inode *ip); /* incore inode */ + +/* + * Compute and fill in the value of the maximum depth of a bmap btree + * in this filesystem. Done once, during mount. + */ +void +xfs_bmap_compute_maxlevels( + struct xfs_mount *mp, /* file system mount structure */ + int whichfork); /* data or attr fork */ + +/* + * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi + * caller. Frees all the extents that need freeing, which must be done + * last due to locking considerations. + * + * Return 1 if the given transaction was committed and a new one allocated, + * and 0 otherwise. + */ +int /* error */ +xfs_bmap_finish( + struct xfs_trans **tp, /* transaction pointer addr */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + xfs_fsblock_t firstblock, /* controlled a.g. for allocs */ + int *committed); /* xact committed or not */ + +/* + * Returns the file-relative block number of the first unused block in the file. + * This is the lowest-address hole if the file has holes, else the first block + * past the end of file. + */ +int /* error */ +xfs_bmap_first_unused( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_extlen_t len, /* size of hole to find */ + xfs_fileoff_t *unused, /* unused block num */ + int whichfork); /* data or attr fork */ + +/* + * Returns the file-relative block number of the last block + 1 before + * last_block (input value) in the file. + * This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_before( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t *last_block, /* last block */ + int whichfork); /* data or attr fork */ + +/* + * Returns the file-relative block number of the first block past eof in + * the file. This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. + */ +int /* error */ +xfs_bmap_last_offset( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t *unused, /* last block num */ + int whichfork); /* data or attr fork */ + +/* + * Returns whether the selected fork of the inode has exactly one + * block or not. For the data fork we check this matches di_size, + * implying the file's range is 0..bsize-1. + */ +int +xfs_bmap_one_block( + struct xfs_inode *ip, /* incore inode */ + int whichfork); /* data or attr fork */ + +/* + * Read in the extents to iu_extents. + * All inode fields are set up by caller, we just traverse the btree + * and copy the records in. + */ +int /* error */ +xfs_bmap_read_extents( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + int whichfork); /* data or attr fork */ + +#if defined(XFS_BMAP_TRACE) +/* + * Add bmap trace insert entries for all the contents of the extent list. + */ +void +xfs_bmap_trace_exlist( + char *fname, /* function name */ + struct xfs_inode *ip, /* incore inode pointer */ + xfs_extnum_t cnt, /* count of entries in list */ + int whichfork); /* data or attr fork */ +#else +#define xfs_bmap_trace_exlist(f,ip,c,w) +#endif + +/* + * Map file blocks to filesystem blocks. + * File range is given by the bno/len pair. + * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set) + * into a hole or past eof. + * Only allocates blocks from a single allocation group, + * to avoid locking problems. + * The returned value in "firstblock" from the first call in a transaction + * must be remembered and presented to subsequent calls in "firstblock". + * An upper bound for the number of blocks to be allocated is supplied to + * the first call in "total"; if no allocation group has that many free + * blocks then the call will fail (return NULLFSBLOCK in "firstblock"). + */ +int /* error */ +xfs_bmapi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting file offs. mapped */ + xfs_filblks_t len, /* length to map in file */ + int flags, /* XFS_BMAPI_... */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_extlen_t total, /* total blocks needed */ + struct xfs_bmbt_irec *mval, /* output: map values */ + int *nmap, /* i/o: mval size/count */ + xfs_bmap_free_t *flist); /* i/o: list extents to free */ + +/* + * Map file blocks to filesystem blocks, simple version. + * One block only, read-only. + * For flags, only the XFS_BMAPI_ATTRFORK flag is examined. + * For the other flag values, the effect is as if XFS_BMAPI_METADATA + * was set and all the others were clear. + */ +int /* error */ +xfs_bmapi_single( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + int whichfork, /* data or attr fork */ + xfs_fsblock_t *fsb, /* output: mapped block */ + xfs_fileoff_t bno); /* starting file offs. mapped */ + +/* + * Unmap (remove) blocks from a file. + * If nexts is nonzero then the number of extents to remove is limited to + * that value. If not all extents in the block range can be removed then + * *done is set. + */ +int /* error */ +xfs_bunmapi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* incore inode */ + xfs_fileoff_t bno, /* starting offset to unmap */ + xfs_filblks_t len, /* length to unmap in file */ + int flags, /* XFS_BMAPI_... */ + xfs_extnum_t nexts, /* number of extents max */ + xfs_fsblock_t *firstblock, /* first allocated block + controls a.g. for allocs */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + int *done); /* set if not done yet */ + +/* + * Fcntl interface to xfs_bmapi. + */ +int /* error code */ +xfs_getbmap( + bhv_desc_t *bdp, /* XFS behavior descriptor*/ + struct getbmap *bmv, /* user bmap structure */ + void *ap, /* pointer to user's array */ + int iflags); /* interface flags */ + +/* + * Check the last inode extent to determine whether this allocation will result + * in blocks being allocated at the end of the file. When we allocate new data + * blocks at the end of the file which do not start at the previous data block, + * we will try to align the new blocks at stripe unit boundaries. + */ +int +xfs_bmap_isaeof( + struct xfs_inode *ip, + xfs_fileoff_t off, + int whichfork, + char *aeof); + +/* + * Check if the endoff is outside the last extent. If so the caller will grow + * the allocation to a stripe unit boundary + */ +int +xfs_bmap_eof( + struct xfs_inode *ip, + xfs_fileoff_t endoff, + int whichfork, + int *eof); + +/* + * Count fsblocks of the given fork. + */ +int +xfs_bmap_count_blocks( + xfs_trans_t *tp, + xfs_inode_t *ip, + int whichfork, + int *count); + +/* + * Check an extent list, which has just been read, for + * any bit in the extent flag field. + */ +int +xfs_check_nostate_extents( + xfs_bmbt_rec_t *ep, + xfs_extnum_t num); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BMAP_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_btree.c linux.22-ac2/fs/xfs/xfs_btree.c --- linux.vanilla/fs/xfs/xfs_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_btree.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,949 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains common code for the space manager's btree implementations. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bit.h" +#include "xfs_error.h" + +/* + * Cursor allocation zone. + */ +kmem_zone_t *xfs_btree_cur_zone; + +/* + * Btree magic numbers. + */ +const __uint32_t xfs_magics[XFS_BTNUM_MAX] = +{ + XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC +}; + +/* + * Prototypes for internal routines. + */ + +/* + * Checking routine: return maxrecs for the block. + */ +STATIC int /* number of records fitting in block */ +xfs_btree_maxrecs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block);/* generic btree block pointer */ + +/* + * Internal routines. + */ + +/* + * Checking routine: return maxrecs for the block. + */ +STATIC int /* number of records fitting in block */ +xfs_btree_maxrecs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block) /* generic btree block pointer */ +{ + switch (cur->bc_btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + case XFS_BTNUM_BMAP: + return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + case XFS_BTNUM_INO: + return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); + default: + ASSERT(0); + return 0; + } +} + +/* + * External routines. + */ + +#ifdef DEBUG +/* + * Debug routine: check that block header is ok. + */ +void +xfs_btree_check_block( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block, /* generic btree block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer containing block, if any */ +{ + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level, + bp); + else + xfs_btree_check_sblock(cur, (xfs_btree_sblock_t *)block, level, + bp); +} + +/* + * Debug routine: check that keys are in the right order. + */ +void +xfs_btree_check_key( + xfs_btnum_t btnum, /* btree identifier */ + void *ak1, /* pointer to left (lower) key */ + void *ak2) /* pointer to right (higher) key */ +{ + switch (btnum) { + case XFS_BTNUM_BNO: { + xfs_alloc_key_t *k1; + xfs_alloc_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_CNT: { + xfs_alloc_key_t *k1; + xfs_alloc_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) || + (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) && + INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT))); + break; + } + case XFS_BTNUM_BMAP: { + xfs_bmbt_key_t *k1; + xfs_bmbt_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->br_startoff, ARCH_CONVERT) < INT_GET(k2->br_startoff, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_INO: { + xfs_inobt_key_t *k1; + xfs_inobt_key_t *k2; + + k1 = ak1; + k2 = ak2; + ASSERT(INT_GET(k1->ir_startino, ARCH_CONVERT) < INT_GET(k2->ir_startino, ARCH_CONVERT)); + break; + } + default: + ASSERT(0); + } +} +#endif /* DEBUG */ + +/* + * Checking routine: check that long form block header is ok. + */ +/* ARGSUSED */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_lblock_t *block, /* btree long form block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer for block, if any */ +{ + int lblock_ok; /* block passes checks */ + xfs_mount_t *mp; /* file system mount point */ + + mp = cur->bc_mp; + lblock_ok = + INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && + INT_GET(block->bb_level, ARCH_CONVERT) == level && + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + !INT_ISZERO(block->bb_leftsib, ARCH_CONVERT) && + (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) && + !INT_ISZERO(block->bb_rightsib, ARCH_CONVERT) && + (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT))); + if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, + XFS_RANDOM_BTREE_CHECK_LBLOCK))) { + if (bp) + xfs_buftrace("LBTREE ERROR", bp); + XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Checking routine: check that (long) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_dfsbno_t ptr, /* btree block disk address */ + int level) /* btree block level */ +{ + xfs_mount_t *mp; /* file system mount point */ + + mp = cur->bc_mp; + XFS_WANT_CORRUPTED_RETURN( + level > 0 && + ptr != NULLDFSBNO && + XFS_FSB_SANITY_CHECK(mp, ptr)); + return 0; +} + +#ifdef DEBUG +/* + * Debug routine: check that records are in the right order. + */ +void +xfs_btree_check_rec( + xfs_btnum_t btnum, /* btree identifier */ + void *ar1, /* pointer to left (lower) record */ + void *ar2) /* pointer to right (higher) record */ +{ + switch (btnum) { + case XFS_BTNUM_BNO: { + xfs_alloc_rec_t *r1; + xfs_alloc_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <= + INT_GET(r2->ar_startblock, ARCH_CONVERT)); + break; + } + case XFS_BTNUM_CNT: { + xfs_alloc_rec_t *r1; + xfs_alloc_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) || + (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) && + INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT))); + break; + } + case XFS_BTNUM_BMAP: { + xfs_bmbt_rec_t *r1; + xfs_bmbt_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(xfs_bmbt_disk_get_startoff(r1) + + xfs_bmbt_disk_get_blockcount(r1) <= + xfs_bmbt_disk_get_startoff(r2)); + break; + } + case XFS_BTNUM_INO: { + xfs_inobt_rec_t *r1; + xfs_inobt_rec_t *r2; + + r1 = ar1; + r2 = ar2; + ASSERT(INT_GET(r1->ir_startino, ARCH_CONVERT) + XFS_INODES_PER_CHUNK <= + INT_GET(r2->ir_startino, ARCH_CONVERT)); + break; + } + default: + ASSERT(0); + } +} +#endif /* DEBUG */ + +/* + * Checking routine: check that block header is ok. + */ +/* ARGSUSED */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_sblock_t *block, /* btree short form block pointer */ + int level, /* level of the btree block */ + xfs_buf_t *bp) /* buffer containing block */ +{ + xfs_buf_t *agbp; /* buffer for ag. freespace struct */ + xfs_agf_t *agf; /* ag. freespace structure */ + xfs_agblock_t agflen; /* native ag. freespace length */ + int sblock_ok; /* block passes checks */ + + agbp = cur->bc_private.a.agbp; + agf = XFS_BUF_TO_AGF(agbp); + agflen = INT_GET(agf->agf_length, ARCH_CONVERT); + sblock_ok = + INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && + INT_GET(block->bb_level, ARCH_CONVERT) == level && + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK || + INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) && + !INT_ISZERO(block->bb_leftsib, ARCH_CONVERT) && + (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK || + INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) && + !INT_ISZERO(block->bb_rightsib, ARCH_CONVERT); + if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, + XFS_ERRTAG_BTREE_CHECK_SBLOCK, + XFS_RANDOM_BTREE_CHECK_SBLOCK))) { + if (bp) + xfs_buftrace("SBTREE ERROR", bp); + XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW, + cur->bc_mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Checking routine: check that (short) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t ptr, /* btree block disk address */ + int level) /* btree block level */ +{ + xfs_buf_t *agbp; /* buffer for ag. freespace struct */ + xfs_agf_t *agf; /* ag. freespace structure */ + + agbp = cur->bc_private.a.agbp; + agf = XFS_BUF_TO_AGF(agbp); + XFS_WANT_CORRUPTED_RETURN( + level > 0 && + ptr != NULLAGBLOCK && ptr != 0 && + ptr < INT_GET(agf->agf_length, ARCH_CONVERT)); + return 0; +} + +/* + * Delete the btree cursor. + */ +void +xfs_btree_del_cursor( + xfs_btree_cur_t *cur, /* btree cursor */ + int error) /* del because of error */ +{ + int i; /* btree level */ + + /* + * Clear the buffer pointers, and release the buffers. + * If we're doing this in the face of an error, we + * need to make sure to inspect all of the entries + * in the bc_bufs array for buffers to be unlocked. + * This is because some of the btree code works from + * level n down to 0, and if we get an error along + * the way we won't have initialized all the entries + * down to 0. + */ + for (i = 0; i < cur->bc_nlevels; i++) { + if (cur->bc_bufs[i]) + xfs_btree_setbuf(cur, i, NULL); + else if (!error) + break; + } + /* + * Can't free a bmap cursor without having dealt with the + * allocated indirect blocks' accounting. + */ + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || + cur->bc_private.b.allocated == 0); + /* + * Free the cursor. + */ + kmem_zone_free(xfs_btree_cur_zone, cur); +} + +/* + * Duplicate the btree cursor. + * Allocate a new one, copy the record, re-get the buffers. + */ +int /* error */ +xfs_btree_dup_cursor( + xfs_btree_cur_t *cur, /* input cursor */ + xfs_btree_cur_t **ncur) /* output cursor */ +{ + xfs_buf_t *bp; /* btree block's buffer pointer */ + int error; /* error return value */ + int i; /* level number of btree block */ + xfs_mount_t *mp; /* mount structure for filesystem */ + xfs_btree_cur_t *new; /* new cursor value */ + xfs_trans_t *tp; /* transaction pointer, can be NULL */ + + tp = cur->bc_tp; + mp = cur->bc_mp; + /* + * Allocate a new cursor like the old one. + */ + new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp, + cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + /* + * Copy the record currently in the cursor. + */ + new->bc_rec = cur->bc_rec; + /* + * For each level current, re-get the buffer and copy the ptr value. + */ + for (i = 0; i < new->bc_nlevels; i++) { + new->bc_ptrs[i] = cur->bc_ptrs[i]; + new->bc_ra[i] = cur->bc_ra[i]; + if ((bp = cur->bc_bufs[i])) { + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) { + xfs_btree_del_cursor(new, error); + *ncur = NULL; + return error; + } + new->bc_bufs[i] = bp; + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + } else + new->bc_bufs[i] = NULL; + } + /* + * For bmap btrees, copy the firstblock, flist, and flags values, + * since init cursor doesn't get them. + */ + if (new->bc_btnum == XFS_BTNUM_BMAP) { + new->bc_private.b.firstblock = cur->bc_private.b.firstblock; + new->bc_private.b.flist = cur->bc_private.b.flist; + new->bc_private.b.flags = cur->bc_private.b.flags; + } + *ncur = new; + return 0; +} + +/* + * Change the cursor to point to the first record at the given level. + * Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_firstrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to change */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + /* + * Get the block pointer for this level. + */ + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + /* + * It's empty, there is no such record. + */ + if (INT_ISZERO(block->bb_h.bb_numrecs, ARCH_CONVERT)) + return 0; + /* + * Set the ptr value to 1, that's the first record/key. + */ + cur->bc_ptrs[level] = 1; + return 1; +} + +/* + * Retrieve the block pointer from the cursor at the given level. + * This may be a bmap btree root or from a buffer. + */ +xfs_btree_block_t * /* generic btree block pointer */ +xfs_btree_get_block( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree */ + xfs_buf_t **bpp) /* buffer containing the block */ +{ + xfs_btree_block_t *block; /* return value */ + xfs_buf_t *bp; /* return buffer */ + xfs_ifork_t *ifp; /* inode fork pointer */ + int whichfork; /* data or attr fork */ + + if (cur->bc_btnum == XFS_BTNUM_BMAP && level == cur->bc_nlevels - 1) { + whichfork = cur->bc_private.b.whichfork; + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, whichfork); + block = (xfs_btree_block_t *)ifp->if_broot; + bp = NULL; + } else { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_BLOCK(bp); + } + ASSERT(block != NULL); + *bpp = bp; + return block; +} + +/* + * Get a buffer for the block, return it with no data read. + * Long-form addressing. + */ +xfs_buf_t * /* buffer for fsbno */ +xfs_btree_get_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock) /* lock flags for get_buf */ +{ + xfs_buf_t *bp; /* buffer pointer (return value) */ + xfs_daddr_t d; /* real disk block address */ + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + return bp; +} + +/* + * Get a buffer for the block, return it with no data read. + * Short-form addressing. + */ +xfs_buf_t * /* buffer for agno/agbno */ +xfs_btree_get_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock) /* lock flags for get_buf */ +{ + xfs_buf_t *bp; /* buffer pointer (return value) */ + xfs_daddr_t d; /* real disk block address */ + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); + ASSERT(bp); + ASSERT(!XFS_BUF_GETERROR(bp)); + return bp; +} + +/* + * Allocate a new btree cursor. + * The cursor is either for allocation (A) or bmap (B) or inodes (I). + */ +xfs_btree_cur_t * /* new btree cursor */ +xfs_btree_init_cursor( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* (A only) buffer for agf structure */ + /* (I only) buffer for agi structure */ + xfs_agnumber_t agno, /* (AI only) allocation group number */ + xfs_btnum_t btnum, /* btree identifier */ + xfs_inode_t *ip, /* (B only) inode owning the btree */ + int whichfork) /* (B only) data or attr fork */ +{ + xfs_agf_t *agf; /* (A) allocation group freespace */ + xfs_agi_t *agi; /* (I) allocation group inodespace */ + xfs_btree_cur_t *cur; /* return value */ + xfs_ifork_t *ifp; /* (I) inode fork pointer */ + int nlevels=0; /* number of levels in the btree */ + + ASSERT(xfs_btree_cur_zone != NULL); + /* + * Allocate a new cursor. + */ + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); + /* + * Deduce the number of btree levels from the arguments. + */ + switch (btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + agf = XFS_BUF_TO_AGF(agbp); + nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT); + break; + case XFS_BTNUM_BMAP: + ifp = XFS_IFORK_PTR(ip, whichfork); + nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; + break; + case XFS_BTNUM_INO: + agi = XFS_BUF_TO_AGI(agbp); + nlevels = INT_GET(agi->agi_level, ARCH_CONVERT); + break; + default: + ASSERT(0); + } + /* + * Fill in the common fields. + */ + cur->bc_tp = tp; + cur->bc_mp = mp; + cur->bc_nlevels = nlevels; + cur->bc_btnum = btnum; + cur->bc_blocklog = mp->m_sb.sb_blocklog; + /* + * Fill in private fields. + */ + switch (btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + /* + * Allocation btree fields. + */ + cur->bc_private.a.agbp = agbp; + cur->bc_private.a.agno = agno; + break; + case XFS_BTNUM_BMAP: + /* + * Bmap btree fields. + */ + cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); + cur->bc_private.b.ip = ip; + cur->bc_private.b.firstblock = NULLFSBLOCK; + cur->bc_private.b.flist = NULL; + cur->bc_private.b.allocated = 0; + cur->bc_private.b.flags = 0; + cur->bc_private.b.whichfork = whichfork; + break; + case XFS_BTNUM_INO: + /* + * Inode allocation btree fields. + */ + cur->bc_private.i.agbp = agbp; + cur->bc_private.i.agno = agno; + break; + default: + ASSERT(0); + } + return cur; +} + +/* + * Check for the cursor referring to the last block at the given level. + */ +int /* 1=is last block, 0=not last block */ +xfs_btree_islastblock( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to check */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO; + else + return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK; +} + +/* + * Change the cursor to point to the last record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_lastrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level) /* level to change */ +{ + xfs_btree_block_t *block; /* generic btree block pointer */ + xfs_buf_t *bp; /* buffer containing block */ + + /* + * Get the block pointer for this level. + */ + block = xfs_btree_get_block(cur, level, &bp); + xfs_btree_check_block(cur, block, level, bp); + /* + * It's empty, there is no such record. + */ + if (INT_ISZERO(block->bb_h.bb_numrecs, ARCH_CONVERT)) + return 0; + /* + * Set the ptr value to numrecs, that's the last record/key. + */ + cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT); + return 1; +} + +/* + * Compute first and last byte offsets for the fields given. + * Interprets the offsets table, which contains struct field offsets. + */ +void +xfs_btree_offsets( + __int64_t fields, /* bitmask of fields */ + const short *offsets, /* table of field offsets */ + int nbits, /* number of bits to inspect */ + int *first, /* output: first byte offset */ + int *last) /* output: last byte offset */ +{ + int i; /* current bit number */ + __int64_t imask; /* mask for current bit number */ + + ASSERT(fields != 0); + /* + * Find the lowest bit, so the first byte offset. + */ + for (i = 0, imask = 1LL; ; i++, imask <<= 1) { + if (imask & fields) { + *first = offsets[i]; + break; + } + } + /* + * Find the highest bit, so the last byte offset. + */ + for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) { + if (imask & fields) { + *last = offsets[i + 1] - 1; + break; + } + } +} + +/* + * Get a buffer for the block, return it read in. + * Long-form addressing. + */ +int /* error */ +xfs_btree_read_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock, /* lock flags for read_buf */ + xfs_buf_t **bpp, /* buffer for fsbno */ + int refval) /* ref count value for buffer */ +{ + xfs_buf_t *bp; /* return value */ + xfs_daddr_t d; /* real disk block address */ + int error; + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, lock, &bp))) { + return error; + } + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (bp != NULL) { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); + } + *bpp = bp; + return 0; +} + +/* + * Get a buffer for the block, return it read in. + * Short-form addressing. + */ +int /* error */ +xfs_btree_read_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock, /* lock flags for read_buf */ + xfs_buf_t **bpp, /* buffer for agno/agbno */ + int refval) /* ref count value for buffer */ +{ + xfs_buf_t *bp; /* return value */ + xfs_daddr_t d; /* real disk block address */ + int error; + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, lock, &bp))) { + return error; + } + ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + if (bp != NULL) { + switch (refval) { + case XFS_ALLOC_BTREE_REF: + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); + break; + case XFS_INO_BTREE_REF: + XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval); + break; + } + } + *bpp = bp; + return 0; +} + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Long-form addressing. + */ +/* ARGSUSED */ +void +xfs_btree_reada_bufl( + xfs_mount_t *mp, /* file system mount point */ + xfs_fsblock_t fsbno, /* file system block number */ + xfs_extlen_t count) /* count of filesystem blocks */ +{ + xfs_daddr_t d; + + ASSERT(fsbno != NULLFSBLOCK); + d = XFS_FSB_TO_DADDR(mp, fsbno); + xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); +} + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Short-form addressing. + */ +/* ARGSUSED */ +void +xfs_btree_reada_bufs( + xfs_mount_t *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + xfs_extlen_t count) /* count of filesystem blocks */ +{ + xfs_daddr_t d; + + ASSERT(agno != NULLAGNUMBER); + ASSERT(agbno != NULLAGBLOCK); + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); +} + +/* + * Read-ahead btree blocks, at the given level. + * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. + */ +int +xfs_btree_readahead_core( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr) /* left/right bits */ +{ + xfs_alloc_block_t *a; + xfs_bmbt_block_t *b; + xfs_inobt_block_t *i; + int rval = 0; + + ASSERT(cur->bc_bufs[lev] != NULL); + cur->bc_ra[lev] |= lr; + switch (cur->bc_btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + INT_GET(a->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + INT_GET(a->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + case XFS_BTNUM_BMAP: + b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + case XFS_BTNUM_INO: + i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); + if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, + INT_GET(i->bb_leftsib, ARCH_CONVERT), 1); + rval++; + } + if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, + INT_GET(i->bb_rightsib, ARCH_CONVERT), 1); + rval++; + } + break; + default: + ASSERT(0); + } + return rval; +} + +/* + * Set the buffer for level "lev" in the cursor to bp, releasing + * any previous buffer. + */ +void +xfs_btree_setbuf( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + xfs_buf_t *bp) /* new buffer to set */ +{ + xfs_btree_block_t *b; /* btree block */ + xfs_buf_t *obp; /* old buffer pointer */ + + obp = cur->bc_bufs[lev]; + if (obp) + xfs_trans_brelse(cur->bc_tp, obp); + cur->bc_bufs[lev] = bp; + cur->bc_ra[lev] = 0; + if (!bp) + return; + b = XFS_BUF_TO_BLOCK(bp); + if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { + if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) + cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; + if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) + cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; + } else { + if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) + cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; + if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) + cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_btree.h linux.22-ac2/fs/xfs/xfs_btree.h --- linux.vanilla/fs/xfs/xfs_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_btree.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BTREE_H__ +#define __XFS_BTREE_H__ + +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * This nonsense is to make -wlint happy. + */ +#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi) +#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi) +#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi) + +#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi) +#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi) +#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi) +#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi) + +/* + * Short form header: space allocation btrees. + */ +typedef struct xfs_btree_sblock +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ + xfs_agblock_t bb_leftsib; /* left sibling block or NULLAGBLOCK */ + xfs_agblock_t bb_rightsib; /* right sibling block or NULLAGBLOCK */ +} xfs_btree_sblock_t; + +/* + * Long form header: bmap btrees. + */ +typedef struct xfs_btree_lblock +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ + xfs_dfsbno_t bb_leftsib; /* left sibling block or NULLDFSBNO */ + xfs_dfsbno_t bb_rightsib; /* right sibling block or NULLDFSBNO */ +} xfs_btree_lblock_t; + +/* + * Combined header and structure, used by common code. + */ +typedef struct xfs_btree_hdr +{ + __uint32_t bb_magic; /* magic number for block type */ + __uint16_t bb_level; /* 0 is a leaf */ + __uint16_t bb_numrecs; /* current # of data records */ +} xfs_btree_hdr_t; + +typedef struct xfs_btree_block +{ + xfs_btree_hdr_t bb_h; /* header */ + union { + struct { + xfs_agblock_t bb_leftsib; + xfs_agblock_t bb_rightsib; + } s; /* short form pointers */ + struct { + xfs_dfsbno_t bb_leftsib; + xfs_dfsbno_t bb_rightsib; + } l; /* long form pointers */ + } bb_u; /* rest */ +} xfs_btree_block_t; + +/* + * For logging record fields. + */ +#define XFS_BB_MAGIC 0x01 +#define XFS_BB_LEVEL 0x02 +#define XFS_BB_NUMRECS 0x04 +#define XFS_BB_LEFTSIB 0x08 +#define XFS_BB_RIGHTSIB 0x10 +#define XFS_BB_NUM_BITS 5 +#define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1) + +/* + * Boolean to select which form of xfs_btree_block_t.bb_u to use. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BTREE_LONG_PTRS) +int xfs_btree_long_ptrs(xfs_btnum_t btnum); +#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP) +#else +#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP) +#endif + +/* + * Magic numbers for btree blocks. + */ +extern const __uint32_t xfs_magics[]; + +/* + * Maximum and minimum records in a btree block. + * Given block size, type prefix, and leaf flag (0 or 1). + * The divisor below is equivalent to lf ? (e1) : (e2) but that produces + * compiler warnings. + */ +#define XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) \ + ((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \ + (((lf) * (uint)sizeof(t ## _rec_t)) + \ + ((1 - (lf)) * \ + ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t)))))) +#define XFS_BTREE_BLOCK_MINRECS(bsz,t,lf) \ + (XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2) + +/* + * Record, key, and pointer address calculation macros. + * Given block size, type prefix, block pointer, and index of requested entry + * (first entry numbered 1). + */ +#define XFS_BTREE_REC_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + ((i) - 1) * sizeof(t ## _rec_t))) +#define XFS_BTREE_KEY_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + ((i) - 1) * sizeof(t ## _key_t))) +#define XFS_BTREE_PTR_ADDR(bsz,t,bb,i,mxr) \ + ((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \ + (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t))) + +#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ + +/* + * Btree cursor structure. + * This collects all information needed by the btree code in one place. + */ +typedef struct xfs_btree_cur +{ + struct xfs_trans *bc_tp; /* transaction we're in, if any */ + struct xfs_mount *bc_mp; /* file system mount struct */ + union { + xfs_alloc_rec_t a; + xfs_bmbt_irec_t b; + xfs_inobt_rec_t i; + } bc_rec; /* current insert/search record value */ + struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */ + int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */ + __uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */ +#define XFS_BTCUR_LEFTRA 1 /* left sibling has been read-ahead */ +#define XFS_BTCUR_RIGHTRA 2 /* right sibling has been read-ahead */ + __uint8_t bc_nlevels; /* number of levels in the tree */ + __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ + xfs_btnum_t bc_btnum; /* identifies which btree type */ + union { + struct { /* needed for BNO, CNT */ + struct xfs_buf *agbp; /* agf buffer pointer */ + xfs_agnumber_t agno; /* ag number */ + } a; + struct { /* needed for BMAP */ + struct xfs_inode *ip; /* pointer to our inode */ + struct xfs_bmap_free *flist; /* list to free after */ + xfs_fsblock_t firstblock; /* 1st blk allocated */ + int allocated; /* count of alloced */ + short forksize; /* fork's inode space */ + char whichfork; /* data or attr fork */ + char flags; /* flags */ +#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ + } b; + struct { /* needed for INO */ + struct xfs_buf *agbp; /* agi buffer pointer */ + xfs_agnumber_t agno; /* ag number */ + } i; + } bc_private; /* per-btree type data */ +} xfs_btree_cur_t; + +#define XFS_BTREE_NOERROR 0 +#define XFS_BTREE_ERROR 1 + +/* + * Convert from buffer to btree block header. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_BLOCK) +xfs_btree_block_t *xfs_buf_to_block(struct xfs_buf *bp); +#define XFS_BUF_TO_BLOCK(bp) xfs_buf_to_block(bp) +#else +#define XFS_BUF_TO_BLOCK(bp) ((xfs_btree_block_t *)(XFS_BUF_PTR(bp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_LBLOCK) +xfs_btree_lblock_t *xfs_buf_to_lblock(struct xfs_buf *bp); +#define XFS_BUF_TO_LBLOCK(bp) xfs_buf_to_lblock(bp) +#else +#define XFS_BUF_TO_LBLOCK(bp) ((xfs_btree_lblock_t *)(XFS_BUF_PTR(bp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBLOCK) +xfs_btree_sblock_t *xfs_buf_to_sblock(struct xfs_buf *bp); +#define XFS_BUF_TO_SBLOCK(bp) xfs_buf_to_sblock(bp) +#else +#define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)(XFS_BUF_PTR(bp))) +#endif + +#ifdef __KERNEL__ + +#ifdef DEBUG +/* + * Debug routine: check that block header is ok. + */ +void +xfs_btree_check_block( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_block_t *block, /* generic btree block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block, if any */ + +/* + * Debug routine: check that keys are in the right order. + */ +void +xfs_btree_check_key( + xfs_btnum_t btnum, /* btree identifier */ + void *ak1, /* pointer to left (lower) key */ + void *ak2); /* pointer to right (higher) key */ + +/* + * Debug routine: check that records are in the right order. + */ +void +xfs_btree_check_rec( + xfs_btnum_t btnum, /* btree identifier */ + void *ar1, /* pointer to left (lower) record */ + void *ar2); /* pointer to right (higher) record */ +#else +#define xfs_btree_check_block(a,b,c,d) +#define xfs_btree_check_key(a,b,c) +#define xfs_btree_check_rec(a,b,c) +#endif /* DEBUG */ + +/* + * Checking routine: check that long form block header is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_lblock_t *block, /* btree long form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block, if any */ + +/* + * Checking routine: check that (long) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_dfsbno_t ptr, /* btree block disk address */ + int level); /* btree block level */ + +/* + * Checking routine: check that short form block header is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sblock( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_btree_sblock_t *block, /* btree short form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block */ + +/* + * Checking routine: check that (short) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sptr( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agblock_t ptr, /* btree block disk address */ + int level); /* btree block level */ + +/* + * Delete the btree cursor. + */ +void +xfs_btree_del_cursor( + xfs_btree_cur_t *cur, /* btree cursor */ + int error); /* del because of error */ + +/* + * Duplicate the btree cursor. + * Allocate a new one, copy the record, re-get the buffers. + */ +int /* error */ +xfs_btree_dup_cursor( + xfs_btree_cur_t *cur, /* input cursor */ + xfs_btree_cur_t **ncur);/* output cursor */ + +/* + * Change the cursor to point to the first record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_firstrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to change */ + +/* + * Retrieve the block pointer from the cursor at the given level. + * This may be a bmap btree root or from a buffer. + */ +xfs_btree_block_t * /* generic btree block pointer */ +xfs_btree_get_block( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree */ + struct xfs_buf **bpp); /* buffer containing the block */ + +/* + * Get a buffer for the block, return it with no data read. + * Long-form addressing. + */ +struct xfs_buf * /* buffer for fsbno */ +xfs_btree_get_bufl( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock); /* lock flags for get_buf */ + +/* + * Get a buffer for the block, return it with no data read. + * Short-form addressing. + */ +struct xfs_buf * /* buffer for agno/agbno */ +xfs_btree_get_bufs( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock); /* lock flags for get_buf */ + +/* + * Allocate a new btree cursor. + * The cursor is either for allocation (A) or bmap (B). + */ +xfs_btree_cur_t * /* new btree cursor */ +xfs_btree_init_cursor( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* (A only) buffer for agf structure */ + xfs_agnumber_t agno, /* (A only) allocation group number */ + xfs_btnum_t btnum, /* btree identifier */ + struct xfs_inode *ip, /* (B only) inode owning the btree */ + int whichfork); /* (B only) data/attr fork */ + +/* + * Check for the cursor referring to the last block at the given level. + */ +int /* 1=is last block, 0=not last block */ +xfs_btree_islastblock( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to check */ + +/* + * Change the cursor to point to the last record in the current block + * at the given level. Other levels are unaffected. + */ +int /* success=1, failure=0 */ +xfs_btree_lastrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level); /* level to change */ + +/* + * Compute first and last byte offsets for the fields given. + * Interprets the offsets table, which contains struct field offsets. + */ +void +xfs_btree_offsets( + __int64_t fields, /* bitmask of fields */ + const short *offsets,/* table of field offsets */ + int nbits, /* number of bits to inspect */ + int *first, /* output: first byte offset */ + int *last); /* output: last byte offset */ + +/* + * Get a buffer for the block, return it read in. + * Long-form addressing. + */ +int /* error */ +xfs_btree_read_bufl( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_fsblock_t fsbno, /* file system block number */ + uint lock, /* lock flags for read_buf */ + struct xfs_buf **bpp, /* buffer for fsbno */ + int refval);/* ref count value for buffer */ + +/* + * Get a buffer for the block, return it read in. + * Short-form addressing. + */ +int /* error */ +xfs_btree_read_bufs( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + uint lock, /* lock flags for read_buf */ + struct xfs_buf **bpp, /* buffer for agno/agbno */ + int refval);/* ref count value for buffer */ + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Long-form addressing. + */ +void /* error */ +xfs_btree_reada_bufl( + struct xfs_mount *mp, /* file system mount point */ + xfs_fsblock_t fsbno, /* file system block number */ + xfs_extlen_t count); /* count of filesystem blocks */ + +/* + * Read-ahead the block, don't wait for it, don't return a buffer. + * Short-form addressing. + */ +void /* error */ +xfs_btree_reada_bufs( + struct xfs_mount *mp, /* file system mount point */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_agblock_t agbno, /* allocation group block number */ + xfs_extlen_t count); /* count of filesystem blocks */ + +/* + * Read-ahead btree blocks, at the given level. + * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. + */ +int /* readahead block count */ +xfs_btree_readahead_core( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr); /* left/right bits */ + +static inline int /* readahead block count */ +xfs_btree_readahead( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + int lr) /* left/right bits */ +{ + if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) + return 0; + + return xfs_btree_readahead_core(cur, lev, lr); +} + + +/* + * Set the buffer for level "lev" in the cursor to bp, releasing + * any previous buffer. + */ +void +xfs_btree_setbuf( + xfs_btree_cur_t *cur, /* btree cursor */ + int lev, /* level in btree */ + struct xfs_buf *bp); /* new buffer to set */ + +#endif /* __KERNEL__ */ + + +/* + * Min and max functions for extlen, agblock, fileoff, and filblks types. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MIN) +xfs_extlen_t xfs_extlen_min(xfs_extlen_t a, xfs_extlen_t b); +#define XFS_EXTLEN_MIN(a,b) xfs_extlen_min(a,b) +#else +#define XFS_EXTLEN_MIN(a,b) \ + ((xfs_extlen_t)(a) < (xfs_extlen_t)(b) ? \ + (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_EXTLEN_MAX) +xfs_extlen_t xfs_extlen_max(xfs_extlen_t a, xfs_extlen_t b); +#define XFS_EXTLEN_MAX(a,b) xfs_extlen_max(a,b) +#else +#define XFS_EXTLEN_MAX(a,b) \ + ((xfs_extlen_t)(a) > (xfs_extlen_t)(b) ? \ + (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MIN) +xfs_agblock_t xfs_agblock_min(xfs_agblock_t a, xfs_agblock_t b); +#define XFS_AGBLOCK_MIN(a,b) xfs_agblock_min(a,b) +#else +#define XFS_AGBLOCK_MIN(a,b) \ + ((xfs_agblock_t)(a) < (xfs_agblock_t)(b) ? \ + (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGBLOCK_MAX) +xfs_agblock_t xfs_agblock_max(xfs_agblock_t a, xfs_agblock_t b); +#define XFS_AGBLOCK_MAX(a,b) xfs_agblock_max(a,b) +#else +#define XFS_AGBLOCK_MAX(a,b) \ + ((xfs_agblock_t)(a) > (xfs_agblock_t)(b) ? \ + (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MIN) +xfs_fileoff_t xfs_fileoff_min(xfs_fileoff_t a, xfs_fileoff_t b); +#define XFS_FILEOFF_MIN(a,b) xfs_fileoff_min(a,b) +#else +#define XFS_FILEOFF_MIN(a,b) \ + ((xfs_fileoff_t)(a) < (xfs_fileoff_t)(b) ? \ + (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILEOFF_MAX) +xfs_fileoff_t xfs_fileoff_max(xfs_fileoff_t a, xfs_fileoff_t b); +#define XFS_FILEOFF_MAX(a,b) xfs_fileoff_max(a,b) +#else +#define XFS_FILEOFF_MAX(a,b) \ + ((xfs_fileoff_t)(a) > (xfs_fileoff_t)(b) ? \ + (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MIN) +xfs_filblks_t xfs_filblks_min(xfs_filblks_t a, xfs_filblks_t b); +#define XFS_FILBLKS_MIN(a,b) xfs_filblks_min(a,b) +#else +#define XFS_FILBLKS_MIN(a,b) \ + ((xfs_filblks_t)(a) < (xfs_filblks_t)(b) ? \ + (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FILBLKS_MAX) +xfs_filblks_t xfs_filblks_max(xfs_filblks_t a, xfs_filblks_t b); +#define XFS_FILBLKS_MAX(a,b) xfs_filblks_max(a,b) +#else +#define XFS_FILBLKS_MAX(a,b) \ + ((xfs_filblks_t)(a) > (xfs_filblks_t)(b) ? \ + (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_SANITY_CHECK) +int xfs_fsb_sanity_check(struct xfs_mount *mp, xfs_fsblock_t fsb); +#define XFS_FSB_SANITY_CHECK(mp,fsb) xfs_fsb_sanity_check(mp,fsb) +#else +#define XFS_FSB_SANITY_CHECK(mp,fsb) \ + (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \ + XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks) +#endif + +/* + * Macros to set EFSCORRUPTED & return/branch. + */ +#define XFS_WANT_CORRUPTED_GOTO(x,l) \ + { \ + int fs_is_ok = (x); \ + ASSERT(fs_is_ok); \ + if (unlikely(!fs_is_ok)) { \ + XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ + XFS_ERRLEVEL_LOW, NULL); \ + error = XFS_ERROR(EFSCORRUPTED); \ + goto l; \ + } \ + } + +#define XFS_WANT_CORRUPTED_RETURN(x) \ + { \ + int fs_is_ok = (x); \ + ASSERT(fs_is_ok); \ + if (unlikely(!fs_is_ok)) { \ + XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ + XFS_ERRLEVEL_LOW, NULL); \ + return XFS_ERROR(EFSCORRUPTED); \ + } \ + } + +#endif /* __XFS_BTREE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_buf.h linux.22-ac2/fs/xfs/xfs_buf.h --- linux.vanilla/fs/xfs/xfs_buf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_buf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BUF_H__ +#define __XFS_BUF_H__ + +/* These are just for xfs_syncsub... it sets an internal variable + * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t + */ +#define XFS_B_ASYNC PBF_ASYNC +#define XFS_B_DELWRI PBF_DELWRI +#define XFS_B_READ PBF_READ +#define XFS_B_WRITE PBF_WRITE +#define XFS_B_STALE PBF_STALE + +#define XFS_BUF_TRYLOCK PBF_TRYLOCK +#define XFS_INCORE_TRYLOCK PBF_TRYLOCK +#define XFS_BUF_LOCK PBF_LOCK +#define XFS_BUF_MAPPED PBF_MAPPED + +#define BUF_BUSY PBF_DONT_BLOCK + +#define XFS_BUF_BFLAGS(x) ((x)->pb_flags) +#define XFS_BUF_ZEROFLAGS(x) \ + ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_DELWRI)) + +#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE) +#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE) +#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE) +#define XFS_BUF_SUPER_STALE(x) do { \ + XFS_BUF_STALE(x); \ + xfs_buf_undelay(x); \ + XFS_BUF_DONE(x); \ + } while (0) + +#define XFS_BUF_MANAGE PBF_FS_MANAGED +#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED) + +static inline void xfs_buf_undelay(page_buf_t *pb) +{ + if (pb->pb_flags & PBF_DELWRI) { + if (pb->pb_list.next != &pb->pb_list) { + pagebuf_delwri_dequeue(pb); + pagebuf_rele(pb); + } else { + pb->pb_flags &= ~PBF_DELWRI; + } + } +} + +#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) +#define XFS_BUF_UNDELAYWRITE(x) xfs_buf_undelay(x) +#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI) + +#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) +#define XFS_BUF_GETERROR(x) pagebuf_geterror(x) +#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) + +#define XFS_BUF_DONE(x) ((x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE)) +#define XFS_BUF_UNDONE(x) ((x)->pb_flags |= PBF_PARTIAL|PBF_NONE) +#define XFS_BUF_ISDONE(x) (!(PBF_NOT_DONE(x))) + +#define XFS_BUF_BUSY(x) ((x)->pb_flags |= PBF_FORCEIO) +#define XFS_BUF_UNBUSY(x) ((x)->pb_flags &= ~PBF_FORCEIO) +#define XFS_BUF_ISBUSY(x) (1) + +#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC) +#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC) +#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC) + +#define XFS_BUF_FLUSH(x) ((x)->pb_flags |= PBF_FLUSH) +#define XFS_BUF_UNFLUSH(x) ((x)->pb_flags &= ~PBF_FLUSH) +#define XFS_BUF_ISFLUSH(x) ((x)->pb_flags & PBF_FLUSH) + +#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n") +#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n") +#define XFS_BUF_ISSHUT(x) (0) + +#define XFS_BUF_HOLD(x) pagebuf_hold(x) +#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ) +#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ) +#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ) + +#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE) +#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE) +#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE) + +#define XFS_BUF_ISUNINITIAL(x) (0) +#define XFS_BUF_UNUNINITIAL(x) (0) + +#define XFS_BUF_BP_ISMAPPED(bp) 1 + +typedef struct page_buf_s xfs_buf_t; +#define xfs_buf page_buf_s + +typedef struct pb_target xfs_buftarg_t; +#define xfs_buftarg pb_target + +#define XFS_BUF_DATAIO(x) ((x)->pb_flags |= PBF_FS_DATAIOD) +#define XFS_BUF_UNDATAIO(x) ((x)->pb_flags &= ~PBF_FS_DATAIOD) + +#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone +#define XFS_BUF_SET_IODONE_FUNC(buf, func) \ + (buf)->pb_iodone = (func) +#define XFS_BUF_CLR_IODONE_FUNC(buf) \ + (buf)->pb_iodone = NULL +#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \ + (buf)->pb_strat = (func) +#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \ + (buf)->pb_strat = NULL + +#define XFS_BUF_FSPRIVATE(buf, type) \ + ((type)(buf)->pb_fspriv) +#define XFS_BUF_SET_FSPRIVATE(buf, value) \ + (buf)->pb_fspriv = (void *)(value) +#define XFS_BUF_FSPRIVATE2(buf, type) \ + ((type)(buf)->pb_fspriv2) +#define XFS_BUF_SET_FSPRIVATE2(buf, value) \ + (buf)->pb_fspriv2 = (void *)(value) +#define XFS_BUF_FSPRIVATE3(buf, type) \ + ((type)(buf)->pb_fspriv3) +#define XFS_BUF_SET_FSPRIVATE3(buf, value) \ + (buf)->pb_fspriv3 = (void *)(value) +#define XFS_BUF_SET_START(buf) + +#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \ + (buf)->pb_relse = (value) + +#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr) + +extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset) +{ + if (bp->pb_flags & PBF_MAPPED) + return XFS_BUF_PTR(bp) + offset; + return (xfs_caddr_t) pagebuf_offset(bp, offset); +} + +#define XFS_BUF_SET_PTR(bp, val, count) \ + pagebuf_associate_memory(bp, val, count) +#define XFS_BUF_ADDR(bp) ((bp)->pb_bn) +#define XFS_BUF_SET_ADDR(bp, blk) \ + ((bp)->pb_bn = (page_buf_daddr_t)(blk)) +#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) +#define XFS_BUF_SET_OFFSET(bp, off) \ + ((bp)->pb_file_offset = (off)) +#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) +#define XFS_BUF_SET_COUNT(bp, cnt) \ + ((bp)->pb_count_desired = (cnt)) +#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) +#define XFS_BUF_SET_SIZE(bp, cnt) \ + ((bp)->pb_buffer_length = (cnt)) +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) +#define XFS_BUF_SET_VTYPE(bp, type) +#define XFS_BUF_SET_REF(bp, ref) + +#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp) + +#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) +#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) +#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp) +#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) +#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); + +/* setup the buffer target from a buftarg structure */ +#define XFS_BUF_SET_TARGET(bp, target) \ + (bp)->pb_target = (target) + +#define XFS_BUF_TARGET(bp) ((bp)->pb_target) +#define XFS_BUF_TARGET_DEV(bp) ((bp)->pb_target->pbr_dev) +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) +#define XFS_BUF_SET_VTYPE(bp, type) +#define XFS_BUF_SET_REF(bp, ref) + +#define xfs_buf_read(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_LOCK | PBF_READ | PBF_MAPPED | PBF_MAPPABLE) +#define xfs_buf_get(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_LOCK | PBF_MAPPED | PBF_MAPPABLE) + +#define xfs_buf_read_flags(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_READ | PBF_MAPPABLE | flags) +#define xfs_buf_get_flags(target, blkno, len, flags) \ + pagebuf_get((target), (blkno), (len), \ + PBF_MAPPABLE | flags) + +static inline int xfs_bawrite(void *mp, page_buf_t *bp) +{ + int ret; + + bp->pb_fspriv3 = mp; + bp->pb_strat = xfs_bdstrat_cb; + xfs_buf_undelay(bp); + if ((ret = pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC)) == 0) + pagebuf_run_queues(bp); + return ret; +} + +static inline void xfs_buf_relse(page_buf_t *bp) +{ + if ((bp->pb_flags & _PBF_LOCKABLE) && !bp->pb_relse) + pagebuf_unlock(bp); + + pagebuf_rele(bp); +} + + +#define xfs_bpin(bp) pagebuf_pin(bp) +#define xfs_bunpin(bp) pagebuf_unpin(bp) + +#ifdef PAGEBUF_TRACE +# define PB_DEFINE_TRACES +# include +# define xfs_buftrace(id, bp) PB_TRACE(bp, PB_TRACE_REC(external), (void *)id) +#else +# define xfs_buftrace(id, bp) do { } while (0) +#endif + + +#define xfs_biodone(pb) \ + pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0) + +#define xfs_incore(buftarg,blkno,len,lockit) \ + pagebuf_find(buftarg, blkno ,len, lockit) + + +#define xfs_biomove(pb, off, len, data, rw) \ + pagebuf_iomove((pb), (off), (len), (data), \ + ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) + +#define xfs_biozero(pb, off, len) \ + pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) + + +static inline int XFS_bwrite(page_buf_t *pb) +{ + int sync = (pb->pb_flags & PBF_ASYNC) == 0; + int error; + + pb->pb_flags |= PBF_SYNC; + + xfs_buf_undelay(pb); + + __pagebuf_iorequest(pb); + + if (sync) { + error = pagebuf_iowait(pb); + xfs_buf_relse(pb); + } else { + pagebuf_run_queues(pb); + error = 0; + } + + return error; +} + + +#define XFS_bdwrite(pb) \ + pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC) + +static inline int xfs_bdwrite(void *mp, page_buf_t *bp) +{ + bp->pb_strat = xfs_bdstrat_cb; + bp->pb_fspriv3 = mp; + + return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC); +} + +#define XFS_bdstrat(bp) pagebuf_iorequest(bp) + +#define xfs_iowait(pb) pagebuf_iowait(pb) + + +/* + * Go through all incore buffers, and release buffers + * if they belong to the given device. This is used in + * filesystem error handling to preserve the consistency + * of its metadata. + */ + +extern void XFS_bflush(xfs_buftarg_t *); +#define xfs_binval(buftarg) XFS_bflush(buftarg) + +#define xfs_incore_relse(buftarg,delwri_only,wait) \ + xfs_relse_buftarg(buftarg) + +#define xfs_baread(target, rablkno, ralen) \ + pagebuf_readahead((target), (rablkno), \ + (ralen), PBF_DONT_BLOCK) + +#define XFS_getrbuf(sleep,mp) \ + pagebuf_get_empty((mp)->m_ddev_targp) +#define XFS_ngetrbuf(len,mp) \ + pagebuf_get_no_daddr(len,(mp)->m_ddev_targp) +#define XFS_freerbuf(bp) pagebuf_free(bp) +#define XFS_nfreerbuf(bp) pagebuf_free(bp) + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_buf_item.c linux.22-ac2/fs/xfs/xfs_buf_item.c --- linux.vanilla/fs/xfs/xfs_buf_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_buf_item.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1218 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_buf_log_item. + * It contains the item operations used to manipulate the buf log + * items as well as utility routines used by the buffer specific + * transaction routines. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_rw.h" +#include "xfs_bit.h" +#include "xfs_error.h" + + +#define ROUNDUPNBWORD(x) (((x) + (NBWORD - 1)) & ~(NBWORD - 1)) + +kmem_zone_t *xfs_buf_item_zone; + +#ifdef XFS_TRANS_DEBUG +/* + * This function uses an alternate strategy for tracking the bytes + * that the user requests to be logged. This can then be used + * in conjunction with the bli_orig array in the buf log item to + * catch bugs in our callers' code. + * + * We also double check the bits set in xfs_buf_item_log using a + * simple algorithm to check that every byte is accounted for. + */ +STATIC void +xfs_buf_item_log_debug( + xfs_buf_log_item_t *bip, + uint first, + uint last) +{ + uint x; + uint byte; + uint nbytes; + uint chunk_num; + uint word_num; + uint bit_num; + uint bit_set; + uint *wordp; + + ASSERT(bip->bli_logged != NULL); + byte = first; + nbytes = last - first + 1; + bfset(bip->bli_logged, first, nbytes); + for (x = 0; x < nbytes; x++) { + chunk_num = byte >> XFS_BLI_SHIFT; + word_num = chunk_num >> BIT_TO_WORD_SHIFT; + bit_num = chunk_num & (NBWORD - 1); + wordp = &(bip->bli_format.blf_data_map[word_num]); + bit_set = *wordp & (1 << bit_num); + ASSERT(bit_set); + byte++; + } +} + +/* + * This function is called when we flush something into a buffer without + * logging it. This happens for things like inodes which are logged + * separately from the buffer. + */ +void +xfs_buf_item_flush_log_debug( + xfs_buf_t *bp, + uint first, + uint last) +{ + xfs_buf_log_item_t *bip; + uint nbytes; + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { + return; + } + + ASSERT(bip->bli_logged != NULL); + nbytes = last - first + 1; + bfset(bip->bli_logged, first, nbytes); +} + +/* + * This function is called to verify that our caller's have logged + * all the bytes that they changed. + * + * It does this by comparing the original copy of the buffer stored in + * the buf log item's bli_orig array to the current copy of the buffer + * and ensuring that all bytes which miscompare are set in the bli_logged + * array of the buf log item. + */ +STATIC void +xfs_buf_item_log_check( + xfs_buf_log_item_t *bip) +{ + char *orig; + char *buffer; + int x; + xfs_buf_t *bp; + + ASSERT(bip->bli_orig != NULL); + ASSERT(bip->bli_logged != NULL); + + bp = bip->bli_buf; + ASSERT(XFS_BUF_COUNT(bp) > 0); + ASSERT(XFS_BUF_PTR(bp) != NULL); + orig = bip->bli_orig; + buffer = XFS_BUF_PTR(bp); + for (x = 0; x < XFS_BUF_COUNT(bp); x++) { + if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) + cmn_err(CE_PANIC, + "xfs_buf_item_log_check bip %x buffer %x orig %x index %d", + bip, bp, orig, x); + } +} +#else +#define xfs_buf_item_log_debug(x,y,z) +#define xfs_buf_item_log_check(x) +#endif + +STATIC void xfs_buf_error_relse(xfs_buf_t *bp); + +/* + * This returns the number of log iovecs needed to log the + * given buf log item. + * + * It calculates this as 1 iovec for the buf log format structure + * and 1 for each stretch of non-contiguous chunks to be logged. + * Contiguous chunks are logged in a single iovec. + * + * If the XFS_BLI_STALE flag has been set, then log nothing. + */ +uint +xfs_buf_item_size( + xfs_buf_log_item_t *bip) +{ + uint nvecs; + int next_bit; + int last_bit; + xfs_buf_t *bp; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * The buffer is stale, so all we need to log + * is the buf log format structure with the + * cancel flag in it. + */ + xfs_buf_item_trace("SIZE STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + return 1; + } + + bp = bip->bli_buf; + ASSERT(bip->bli_flags & XFS_BLI_LOGGED); + nvecs = 1; + last_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0); + ASSERT(last_bit != -1); + nvecs++; + while (last_bit != -1) { + /* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + */ + next_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, + last_bit + 1); + /* + * If we run out of bits, leave the loop, + * else if we find a new set of bits bump the number of vecs, + * else keep scanning the current set of bits. + */ + if (next_bit == -1) { + last_bit = -1; + } else if (next_bit != last_bit + 1) { + last_bit = next_bit; + nvecs++; + } else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) != + (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) + + XFS_BLI_CHUNK)) { + last_bit = next_bit; + nvecs++; + } else { + last_bit++; + } + } + + xfs_buf_item_trace("SIZE NORM", bip); + return nvecs; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given log buf item. It fills the first entry with a buf log + * format structure, and the rest point to contiguous chunks + * within the buffer. + */ +void +xfs_buf_item_format( + xfs_buf_log_item_t *bip, + xfs_log_iovec_t *log_vector) +{ + uint base_size; + uint nvecs; + xfs_log_iovec_t *vecp; + xfs_buf_t *bp; + int first_bit; + int last_bit; + int next_bit; + uint nbits; + uint buffer_offset; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); + bp = bip->bli_buf; + ASSERT(XFS_BUF_BP_ISMAPPED(bp)); + vecp = log_vector; + + /* + * The size of the base structure is the size of the + * declared structure plus the space for the extra words + * of the bitmap. We subtract one from the map size, because + * the first element of the bitmap is accounted for in the + * size of the base structure. + */ + base_size = + (uint)(sizeof(xfs_buf_log_format_t) + + ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); + vecp->i_addr = (xfs_caddr_t)&bip->bli_format; + vecp->i_len = base_size; + vecp++; + nvecs = 1; + + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * The buffer is stale, so all we need to log + * is the buf log format structure with the + * cancel flag in it. + */ + xfs_buf_item_trace("FORMAT STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + bip->bli_format.blf_size = nvecs; + return; + } + + /* + * Fill in an iovec for each set of contiguous chunks. + */ + first_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0); + ASSERT(first_bit != -1); + last_bit = first_bit; + nbits = 1; + for (;;) { + /* + * This takes the bit number to start looking from and + * returns the next set bit from there. It returns -1 + * if there are no more bits set or the start bit is + * beyond the end of the bitmap. + */ + next_bit = xfs_next_bit(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, + (uint)last_bit + 1); + /* + * If we run out of bits fill in the last iovec and get + * out of the loop. + * Else if we start a new set of bits then fill in the + * iovec for the series we were looking at and start + * counting the bits in the new one. + * Else we're still in the same set of bits so just + * keep counting and scanning. + */ + if (next_bit == -1) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; + nvecs++; + break; + } else if (next_bit != last_bit + 1) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; + nvecs++; + vecp++; + first_bit = next_bit; + last_bit = next_bit; + nbits = 1; + } else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) != + (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) + + XFS_BLI_CHUNK)) { + buffer_offset = first_bit * XFS_BLI_CHUNK; + vecp->i_addr = xfs_buf_offset(bp, buffer_offset); + vecp->i_len = nbits * XFS_BLI_CHUNK; +/* You would think we need to bump the nvecs here too, but we do not + * this number is used by recovery, and it gets confused by the boundary + * split here + * nvecs++; + */ + vecp++; + first_bit = next_bit; + last_bit = next_bit; + nbits = 1; + } else { + last_bit++; + nbits++; + } + } + bip->bli_format.blf_size = nvecs; + + /* + * Check to make sure everything is consistent. + */ + xfs_buf_item_trace("FORMAT NORM", bip); + xfs_buf_item_log_check(bip); +} + +/* + * This is called to pin the buffer associated with the buf log + * item in memory so it cannot be written out. Simply call bpin() + * on the buffer to do this. + */ +void +xfs_buf_item_pin( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("PIN", bip); + xfs_buftrace("XFS_PIN", bp); + xfs_bpin(bp); +} + + +/* + * This is called to unpin the buffer associated with the buf log + * item which was previously pinned with a call to xfs_buf_item_pin(). + * Just call bunpin() on the buffer to do this. + * + * Also drop the reference to the buf item for the current transaction. + * If the XFS_BLI_STALE flag is set and we are the last reference, + * then free up the buf log item and unlock the buffer. + */ +void +xfs_buf_item_unpin( + xfs_buf_log_item_t *bip, + int stale) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + int freed; + SPLDECL(s); + + bp = bip->bli_buf; + ASSERT(bp != NULL); + ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + xfs_buf_item_trace("UNPIN", bip); + xfs_buftrace("XFS_UNPIN", bp); + + freed = atomic_dec_and_test(&bip->bli_refcount); + mp = bip->bli_item.li_mountp; + xfs_bunpin(bp); + if (freed && stale) { + ASSERT(bip->bli_flags & XFS_BLI_STALE); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); + ASSERT(XFS_BUF_ISSTALE(bp)); +/** + ASSERT(bp->b_pincount == 0); +**/ + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + xfs_buf_item_trace("UNPIN STALE", bip); + xfs_buftrace("XFS_UNPIN STALE", bp); + AIL_LOCK(mp,s); + /* + * If we get called here because of an IO error, we may + * or may not have the item on the AIL. xfs_trans_delete_ail() + * will take care of that situation. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); + xfs_buf_item_relse(bp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); + xfs_buf_relse(bp); + } +} + +/* + * this is called from uncommit in the forced-shutdown path. + * we need to check to see if the reference count on the log item + * is going to drop to zero. If so, unpin will free the log item + * so we need to free the item's descriptor (that points to the item) + * in the transaction. + */ +void +xfs_buf_item_unpin_remove( + xfs_buf_log_item_t *bip, + xfs_trans_t *tp) +{ + xfs_buf_t *bp; + xfs_log_item_desc_t *lidp; + int stale = 0; + + bp = bip->bli_buf; + /* + * will xfs_buf_item_unpin() call xfs_buf_item_relse()? + */ + if ((atomic_read(&bip->bli_refcount) == 1) && + (bip->bli_flags & XFS_BLI_STALE)) { + ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); + xfs_buf_item_trace("UNPIN REMOVE", bip); + xfs_buftrace("XFS_UNPIN_REMOVE", bp); + /* + * yes -- clear the xaction descriptor in-use flag + * and free the chunk if required. We can safely + * do some work here and then call buf_item_unpin + * to do the rest because if the if is true, then + * we are holding the buffer locked so no one else + * will be able to bump up the refcount. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip); + stale = lidp->lid_flags & XFS_LID_BUF_STALE; + xfs_trans_free_item(tp, lidp); + /* + * Since the transaction no longer refers to the buffer, + * the buffer should no longer refer to the transaction. + */ + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + } + + xfs_buf_item_unpin(bip, stale); + + return; +} + +/* + * This is called to attempt to lock the buffer associated with this + * buf log item. Don't sleep on the buffer lock. If we can't get + * the lock right away, return 0. If we can get the lock, pull the + * buffer from the free list, mark it busy, and return 1. + */ +uint +xfs_buf_item_trylock( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + + if (XFS_BUF_ISPINNED(bp)) { + return XFS_ITEM_PINNED; + } + + if (!XFS_BUF_CPSEMA(bp)) { + return XFS_ITEM_LOCKED; + } + + /* + * Remove the buffer from the free list. Only do this + * if it's on the free list. Private buffers like the + * superblock buffer are not. + */ + XFS_BUF_HOLD(bp); + + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("TRYLOCK SUCCESS", bip); + return XFS_ITEM_SUCCESS; +} + +/* + * Release the buffer associated with the buf log item. + * If there is no dirty logged data associated with the + * buffer recorded in the buf log item, then free the + * buf log item and remove the reference to it in the + * buffer. + * + * This call ignores the recursion count. It is only called + * when the buffer should REALLY be unlocked, regardless + * of the recursion count. + * + * If the XFS_BLI_HOLD flag is set in the buf log item, then + * free the log item if necessary but do not unlock the buffer. + * This is for support of xfs_trans_bhold(). Make sure the + * XFS_BLI_HOLD field is cleared if we don't free the item. + */ +void +xfs_buf_item_unlock( + xfs_buf_log_item_t *bip) +{ + int aborted; + xfs_buf_t *bp; + uint hold; + + bp = bip->bli_buf; + xfs_buftrace("XFS_UNLOCK", bp); + + /* + * Clear the buffer's association with this transaction. + */ + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + + /* + * If this is a transaction abort, don't return early. + * Instead, allow the brelse to happen. + * Normally it would be done for stale (cancelled) buffers + * at unpin time, but we'll never go through the pin/unpin + * cycle if we abort inside commit. + */ + aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0; + + /* + * If the buf item is marked stale, then don't do anything. + * We'll unlock the buffer and free the buf item when the + * buffer is unpinned for the last time. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + bip->bli_flags &= ~XFS_BLI_LOGGED; + xfs_buf_item_trace("UNLOCK STALE", bip); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + if (!aborted) + return; + } + + /* + * Drop the transaction's reference to the log item if + * it was not logged as part of the transaction. Otherwise + * we'll drop the reference in xfs_buf_item_unpin() when + * the transaction is really through with the buffer. + */ + if (!(bip->bli_flags & XFS_BLI_LOGGED)) { + atomic_dec(&bip->bli_refcount); + } else { + /* + * Clear the logged flag since this is per + * transaction state. + */ + bip->bli_flags &= ~XFS_BLI_LOGGED; + } + + /* + * Before possibly freeing the buf item, determine if we should + * release the buffer at the end of this routine. + */ + hold = bip->bli_flags & XFS_BLI_HOLD; + xfs_buf_item_trace("UNLOCK", bip); + + /* + * If the buf item isn't tracking any data, free it. + * Otherwise, if XFS_BLI_HOLD is set clear it. + */ + if (xfs_count_bits(bip->bli_format.blf_data_map, + bip->bli_format.blf_map_size, 0) == 0) { + xfs_buf_item_relse(bp); + } else if (hold) { + bip->bli_flags &= ~XFS_BLI_HOLD; + } + + /* + * Release the buffer if XFS_BLI_HOLD was not set. + */ + if (!hold) { + xfs_buf_relse(bp); + } +} + +/* + * This is called to find out where the oldest active copy of the + * buf log item in the on disk log resides now that the last log + * write of it completed at the given lsn. + * We always re-log all the dirty data in a buffer, so usually the + * latest copy in the on disk log is the only one that matters. For + * those cases we simply return the given lsn. + * + * The one exception to this is for buffers full of newly allocated + * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF + * flag set, indicating that only the di_next_unlinked fields from the + * inodes in the buffers will be replayed during recovery. If the + * original newly allocated inode images have not yet been flushed + * when the buffer is so relogged, then we need to make sure that we + * keep the old images in the 'active' portion of the log. We do this + * by returning the original lsn of that transaction here rather than + * the current one. + */ +xfs_lsn_t +xfs_buf_item_committed( + xfs_buf_log_item_t *bip, + xfs_lsn_t lsn) +{ + xfs_buf_item_trace("COMMITTED", bip); + if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && + (bip->bli_item.li_lsn != 0)) { + return bip->bli_item.li_lsn; + } + return (lsn); +} + +/* + * This is called when the transaction holding the buffer is aborted. + * Just behave as if the transaction had been cancelled. If we're shutting down + * and have aborted this transaction, we'll trap this buffer when it tries to + * get written out. + */ +void +xfs_buf_item_abort( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + bp = bip->bli_buf; + xfs_buftrace("XFS_ABORT", bp); + XFS_BUF_SUPER_STALE(bp); + xfs_buf_item_unlock(bip); + return; +} + +/* + * This is called to asynchronously write the buffer associated with this + * buf log item out to disk. The buffer will already have been locked by + * a successful call to xfs_buf_item_trylock(). If the buffer still has + * B_DELWRI set, then get it going out to disk with a call to bawrite(). + * If not, then just release the buffer. + */ +void +xfs_buf_item_push( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + xfs_buf_item_trace("PUSH", bip); + + bp = bip->bli_buf; + + if (XFS_BUF_ISDELAYWRITE(bp)) { + xfs_bawrite(bip->bli_item.li_mountp, bp); + } else { + xfs_buf_relse(bp); + } +} + +/* ARGSUSED */ +void +xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) +{ +} + +/* + * This is the ops vector shared by all buf log items. + */ +struct xfs_item_ops xfs_buf_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_buf_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) + xfs_buf_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_buf_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_buf_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_buf_item_committing +}; + + +/* + * Allocate a new buf log item to go with the given buffer. + * Set the buffer's b_fsprivate field to point to the new + * buf log item. If there are other item's attached to the + * buffer (see xfs_buf_attach_iodone() below), then put the + * buf log item at the front. + */ +void +xfs_buf_item_init( + xfs_buf_t *bp, + xfs_mount_t *mp) +{ + xfs_log_item_t *lip; + xfs_buf_log_item_t *bip; + int chunks; + int map_size; + + /* + * Check to see if there is already a buf log item for + * this buffer. If there is, it is guaranteed to be + * the first. If we do already have one, there is + * nothing to do here so return. + */ + if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp) + XFS_BUF_SET_FSPRIVATE3(bp, mp); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (lip->li_type == XFS_LI_BUF) { + return; + } + } + + /* + * chunks is the number of XFS_BLI_CHUNK size pieces + * the buffer can be divided into. Make sure not to + * truncate any pieces. map_size is the size of the + * bitmap needed to describe the chunks of the buffer. + */ + chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT); + map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); + + bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, + KM_SLEEP); + bip->bli_item.li_type = XFS_LI_BUF; + bip->bli_item.li_ops = &xfs_buf_item_ops; + bip->bli_item.li_mountp = mp; + bip->bli_buf = bp; + bip->bli_format.blf_type = XFS_LI_BUF; + bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); + bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); + bip->bli_format.blf_map_size = map_size; +#ifdef XFS_BLI_TRACE + bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); +#endif + +#ifdef XFS_TRANS_DEBUG + /* + * Allocate the arrays for tracking what needs to be logged + * and what our callers request to be logged. bli_orig + * holds a copy of the original, clean buffer for comparison + * against, and bli_logged keeps a 1 bit flag per byte in + * the buffer to indicate which bytes the callers have asked + * to have logged. + */ + bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); + memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); + bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); +#endif + + /* + * Put the buf item into the list of items attached to the + * buffer at the front. + */ + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + bip->bli_item.li_bio_list = + XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + } + XFS_BUF_SET_FSPRIVATE(bp, bip); +} + + +/* + * Mark bytes first through last inclusive as dirty in the buf + * item's bitmap. + */ +void +xfs_buf_item_log( + xfs_buf_log_item_t *bip, + uint first, + uint last) +{ + uint first_bit; + uint last_bit; + uint bits_to_set; + uint bits_set; + uint word_num; + uint *wordp; + uint bit; + uint end_bit; + uint mask; + + /* + * Mark the item as having some dirty data for + * quick reference in xfs_buf_item_dirty. + */ + bip->bli_flags |= XFS_BLI_DIRTY; + + /* + * Convert byte offsets to bit numbers. + */ + first_bit = first >> XFS_BLI_SHIFT; + last_bit = last >> XFS_BLI_SHIFT; + + /* + * Calculate the total number of bits to be set. + */ + bits_to_set = last_bit - first_bit + 1; + + /* + * Get a pointer to the first word in the bitmap + * to set a bit in. + */ + word_num = first_bit >> BIT_TO_WORD_SHIFT; + wordp = &(bip->bli_format.blf_data_map[word_num]); + + /* + * Calculate the starting bit in the first word. + */ + bit = first_bit & (uint)(NBWORD - 1); + + /* + * First set any bits in the first word of our range. + * If it starts at bit 0 of the word, it will be + * set below rather than here. That is what the variable + * bit tells us. The variable bits_set tracks the number + * of bits that have been set so far. End_bit is the number + * of the last bit to be set in this word plus one. + */ + if (bit) { + end_bit = MIN(bit + bits_to_set, (uint)NBWORD); + mask = ((1 << (end_bit - bit)) - 1) << bit; + *wordp |= mask; + wordp++; + bits_set = end_bit - bit; + } else { + bits_set = 0; + } + + /* + * Now set bits a whole word at a time that are between + * first_bit and last_bit. + */ + while ((bits_to_set - bits_set) >= NBWORD) { + *wordp |= 0xffffffff; + bits_set += NBWORD; + wordp++; + } + + /* + * Finally, set any bits left to be set in one last partial word. + */ + end_bit = bits_to_set - bits_set; + if (end_bit) { + mask = (1 << end_bit) - 1; + *wordp |= mask; + } + + xfs_buf_item_log_debug(bip, first, last); +} + + +/* + * Return 1 if the buffer has some data that has been logged (at any + * point, not just the current transaction) and 0 if not. + */ +uint +xfs_buf_item_dirty( + xfs_buf_log_item_t *bip) +{ + return (bip->bli_flags & XFS_BLI_DIRTY); +} + +/* + * This is called when the buf log item is no longer needed. It should + * free the buf log item associated with the given buffer and clear + * the buffer's pointer to the buf log item. If there are no more + * items in the list, clear the b_iodone field of the buffer (see + * xfs_buf_attach_iodone() below). + */ +void +xfs_buf_item_relse( + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + xfs_buftrace("XFS_RELSE", bp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); + if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && + (XFS_BUF_IODONE_FUNC(bp) != NULL)) { + ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0); + XFS_BUF_CLR_IODONE_FUNC(bp); + } + +#ifdef XFS_TRANS_DEBUG + kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); + bip->bli_orig = NULL; + kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); + bip->bli_logged = NULL; +#endif /* XFS_TRANS_DEBUG */ + +#ifdef XFS_BLI_TRACE + ktrace_free(bip->bli_trace); +#endif + kmem_zone_free(xfs_buf_item_zone, bip); +} + + +/* + * Add the given log item with its callback to the list of callbacks + * to be called when the buffer's I/O completes. If it is not set + * already, set the buffer's b_iodone() routine to be + * xfs_buf_iodone_callbacks() and link the log item into the list of + * items rooted at b_fsprivate. Items are always added as the second + * entry in the list if there is a first, because the buf item code + * assumes that the buf log item is first. + */ +void +xfs_buf_attach_iodone( + xfs_buf_t *bp, + void (*cb)(xfs_buf_t *, xfs_log_item_t *), + xfs_log_item_t *lip) +{ + xfs_log_item_t *head_lip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + lip->li_cb = cb; + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + lip->li_bio_list = head_lip->li_bio_list; + head_lip->li_bio_list = lip; + } else { + XFS_BUF_SET_FSPRIVATE(bp, lip); + } + + ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || + (XFS_BUF_IODONE_FUNC(bp) == NULL)); + XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); +} + +STATIC void +xfs_buf_do_callbacks( + xfs_buf_t *bp, + xfs_log_item_t *lip) +{ + xfs_log_item_t *nlip; + + while (lip != NULL) { + nlip = lip->li_bio_list; + ASSERT(lip->li_cb != NULL); + /* + * Clear the next pointer so we don't have any + * confusion if the item is added to another buf. + * Don't touch the log item after calling its + * callback, because it could have freed itself. + */ + lip->li_bio_list = NULL; + lip->li_cb(bp, lip); + lip = nlip; + } +} + +/* + * This is the iodone() function for buffers which have had callbacks + * attached to them by xfs_buf_attach_iodone(). It should remove each + * log item from the buffer's list and call the callback of each in turn. + * When done, the buffer's fsprivate field is set to NULL and the buffer + * is unlocked with a call to iodone(). + */ +void +xfs_buf_iodone_callbacks( + xfs_buf_t *bp) +{ + xfs_log_item_t *lip; + static ulong lasttime; + static dev_t lastdev; + xfs_mount_t *mp; + + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + + if (XFS_BUF_GETERROR(bp) != 0) { + /* + * If we've already decided to shutdown the filesystem + * because of IO errors, there's no point in giving this + * a retry. + */ + mp = lip->li_mountp; + if (XFS_FORCED_SHUTDOWN(mp)) { + ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); + XFS_BUF_SUPER_STALE(bp); + xfs_buftrace("BUF_IODONE_CB", bp); + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + + /* + * XFS_SHUT flag gets set when we go thru the + * entire buffer cache and deliberately start + * throwing away delayed write buffers. + * Since there's no biowait done on those, + * we should just brelse them. + */ + if (XFS_BUF_ISSHUT(bp)) { + XFS_BUF_UNSHUT(bp); + xfs_buf_relse(bp); + } else { + xfs_biodone(bp); + } + + return; + } + + if ((XFS_BUF_TARGET_DEV(bp) != lastdev) || + (time_after(jiffies, (lasttime + 5*HZ)))) { + lasttime = jiffies; + prdev("XFS write error in file system meta-data " + "block 0x%Lx in %s", + XFS_BUF_TARGET_DEV(bp), + XFS_BUF_ADDR(bp), mp->m_fsname); + } + lastdev = XFS_BUF_TARGET_DEV(bp); + + if (XFS_BUF_ISASYNC(bp)) { + /* + * If the write was asynchronous then noone will be + * looking for the error. Clear the error state + * and write the buffer out again delayed write. + * + * XXXsup This is OK, so long as we catch these + * before we start the umount; we don't want these + * DELWRI metadata bufs to be hanging around. + */ + XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */ + + if (!(XFS_BUF_ISSTALE(bp))) { + XFS_BUF_DELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_SET_START(bp); + } + ASSERT(XFS_BUF_IODONE_FUNC(bp)); + xfs_buftrace("BUF_IODONE ASYNC", bp); + xfs_buf_relse(bp); + } else { + /* + * If the write of the buffer was not asynchronous, + * then we want to make sure to return the error + * to the caller of bwrite(). Because of this we + * cannot clear the B_ERROR state at this point. + * Instead we install a callback function that + * will be called when the buffer is released, and + * that routine will clear the error state and + * set the buffer to be written out again after + * some delay. + */ + /* We actually overwrite the existing b-relse + function at times, but we're gonna be shutting down + anyway. */ + XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); + XFS_BUF_DONE(bp); + XFS_BUF_V_IODONESEMA(bp); + } + return; + } +#ifdef XFSERRORDEBUG + xfs_buftrace("XFS BUFCB NOERR", bp); +#endif + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + xfs_biodone(bp); +} + +/* + * This is a callback routine attached to a buffer which gets an error + * when being written out synchronously. + */ +STATIC void +xfs_buf_error_relse( + xfs_buf_t *bp) +{ + xfs_log_item_t *lip; + xfs_mount_t *mp; + + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + mp = (xfs_mount_t *)lip->li_mountp; + ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); + + XFS_BUF_STALE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_ERROR(bp,0); + xfs_buftrace("BUF_ERROR_RELSE", bp); + if (! XFS_FORCED_SHUTDOWN(mp)) + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + /* + * We have to unpin the pinned buffers so do the + * callbacks. + */ + xfs_buf_do_callbacks(bp, lip); + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_SET_BRELSE_FUNC(bp,NULL); + xfs_buf_relse(bp); +} + + +/* + * This is the iodone() function for buffers which have been + * logged. It is called when they are eventually flushed out. + * It should remove the buf item from the AIL, and free the buf item. + * It is called by xfs_buf_iodone_callbacks() above which will take + * care of cleaning up the buffer itself. + */ +/* ARGSUSED */ +void +xfs_buf_iodone( + xfs_buf_t *bp, + xfs_buf_log_item_t *bip) +{ + struct xfs_mount *mp; + SPLDECL(s); + + ASSERT(bip->bli_buf == bp); + + mp = bip->bli_item.li_mountp; + + /* + * If we are forcibly shutting down, this may well be + * off the AIL already. That's because we simulate the + * log-committed callbacks to unpin these buffers. Or we may never + * have put this item on AIL because of the transaction was + * aborted forcibly. xfs_trans_delete_ail() takes care of these. + * + * Either way, AIL is useless if we're forcing a shutdown. + */ + AIL_LOCK(mp,s); + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); + +#ifdef XFS_TRANS_DEBUG + kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); + bip->bli_orig = NULL; + kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); + bip->bli_logged = NULL; +#endif /* XFS_TRANS_DEBUG */ + +#ifdef XFS_BLI_TRACE + ktrace_free(bip->bli_trace); +#endif + kmem_zone_free(xfs_buf_item_zone, bip); +} + +#if defined(XFS_BLI_TRACE) +void +xfs_buf_item_trace( + char *id, + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + ASSERT(bip->bli_trace != NULL); + + bp = bip->bli_buf; + ktrace_enter(bip->bli_trace, + (void *)id, + (void *)bip->bli_buf, + (void *)((unsigned long)bip->bli_flags), + (void *)((unsigned long)bip->bli_recur), + (void *)((unsigned long)atomic_read(&bip->bli_refcount)), + (void *)XFS_BUF_ADDR(bp), + (void *)((unsigned long)XFS_BUF_COUNT(bp)), + (void *)((unsigned long)(0xFFFFFFFF & (XFS_BFLAGS(bp) >> 32))), + (void *)((unsigned long)(0xFFFFFFFF & XFS_BFLAGS(bp))), + XFS_BUF_FSPRIVATE(bp, void *), + XFS_BUF_FSPRIVATE2(bp, void *), + (void *)((unsigned long)bp->b_pincount), + (void *)XFS_BUF_IODONE_FUNC(bp), + (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))), + (void *)bip->bli_item.li_desc, + (void *)((unsigned long)bip->bli_item.li_flags)); +} +#endif /* XFS_BLI_TRACE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_buf_item.h linux.22-ac2/fs/xfs/xfs_buf_item.h --- linux.vanilla/fs/xfs/xfs_buf_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_buf_item.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_BUF_ITEM_H__ +#define __XFS_BUF_ITEM_H__ + +/* + * This is the structure used to lay out a buf log item in the + * log. The data map describes which 128 byte chunks of the buffer + * have been logged. This structure works only on buffers that + * reside up to the first TB in the filesystem. These buffers are + * generated only by pre-6.2 systems and are known as XFS_LI_6_1_BUF. + */ +typedef struct xfs_buf_log_format_v1 { + unsigned short blf_type; /* buf log item type indicator */ + unsigned short blf_size; /* size of this item */ + __int32_t blf_blkno; /* starting blkno of this buf */ + ushort blf_flags; /* misc state */ + ushort blf_len; /* number of blocks in this buf */ + unsigned int blf_map_size; /* size of data bitmap in words */ + unsigned int blf_data_map[1];/* variable size bitmap of */ + /* regions of buffer in this item */ +} xfs_buf_log_format_v1_t; + +/* + * This is a form of the above structure with a 64 bit blkno field. + * For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything. + */ +typedef struct xfs_buf_log_format_t { + unsigned short blf_type; /* buf log item type indicator */ + unsigned short blf_size; /* size of this item */ + ushort blf_flags; /* misc state */ + ushort blf_len; /* number of blocks in this buf */ + __int64_t blf_blkno; /* starting blkno of this buf */ + unsigned int blf_map_size; /* size of data bitmap in words */ + unsigned int blf_data_map[1];/* variable size bitmap of */ + /* regions of buffer in this item */ +} xfs_buf_log_format_t; + +/* + * This flag indicates that the buffer contains on disk inodes + * and requires special recovery handling. + */ +#define XFS_BLI_INODE_BUF 0x1 +/* + * This flag indicates that the buffer should not be replayed + * during recovery because its blocks are being freed. + */ +#define XFS_BLI_CANCEL 0x2 +/* + * This flag indicates that the buffer contains on disk + * user or group dquots and may require special recovery handling. + */ +#define XFS_BLI_UDQUOT_BUF 0x4 +/* #define XFS_BLI_PDQUOT_BUF 0x8 */ +#define XFS_BLI_GDQUOT_BUF 0x10 + +#define XFS_BLI_CHUNK 128 +#define XFS_BLI_SHIFT 7 +#define BIT_TO_WORD_SHIFT 5 +#define NBWORD (NBBY * sizeof(unsigned int)) + +/* + * buf log item flags + */ +#define XFS_BLI_HOLD 0x01 +#define XFS_BLI_DIRTY 0x02 +#define XFS_BLI_STALE 0x04 +#define XFS_BLI_LOGGED 0x08 +#define XFS_BLI_INODE_ALLOC_BUF 0x10 + + +#ifdef __KERNEL__ + +struct xfs_buf; +struct ktrace; +struct xfs_mount; + +/* + * This is the in core log item structure used to track information + * needed to log buffers. It tracks how many times the lock has been + * locked, and which 128 byte chunks of the buffer are dirty. + */ +typedef struct xfs_buf_log_item { + xfs_log_item_t bli_item; /* common item structure */ + struct xfs_buf *bli_buf; /* real buffer pointer */ + unsigned int bli_flags; /* misc flags */ + unsigned int bli_recur; /* lock recursion count */ + atomic_t bli_refcount; /* cnt of tp refs */ +#ifdef DEBUG + struct ktrace *bli_trace; /* event trace buf */ +#endif +#ifdef XFS_TRANS_DEBUG + char *bli_orig; /* original buffer copy */ + char *bli_logged; /* bytes logged (bitmap) */ +#endif + xfs_buf_log_format_t bli_format; /* in-log header */ +} xfs_buf_log_item_t; + +/* + * This structure is used during recovery to record the buf log + * items which have been canceled and should not be replayed. + */ +typedef struct xfs_buf_cancel { + xfs_daddr_t bc_blkno; + uint bc_len; + int bc_refcount; + struct xfs_buf_cancel *bc_next; +} xfs_buf_cancel_t; + +#define XFS_BLI_TRACE_SIZE 32 + + +#if defined(XFS_ALL_TRACE) +#define XFS_BLI_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_BLI_TRACE +#endif + +#if defined(XFS_BLI_TRACE) +void xfs_buf_item_trace(char *, xfs_buf_log_item_t *); +#else +#define xfs_buf_item_trace(id, bip) +#endif + +void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); +void xfs_buf_item_relse(struct xfs_buf *); +void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint); +uint xfs_buf_item_dirty(xfs_buf_log_item_t *); +void xfs_buf_attach_iodone(struct xfs_buf *, + void(*)(struct xfs_buf *, xfs_log_item_t *), + xfs_log_item_t *); +void xfs_buf_iodone_callbacks(struct xfs_buf *); +void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *); + +#ifdef XFS_TRANS_DEBUG +void +xfs_buf_item_flush_log_debug( + struct xfs_buf *bp, + uint first, + uint last); +#else +#define xfs_buf_item_flush_log_debug(bp, first, last) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_BUF_ITEM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_cap.c linux.22-ac2/fs/xfs/xfs_cap.c --- linux.vanilla/fs/xfs/xfs_cap.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_cap.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +STATIC int xfs_cap_allow_set(vnode_t *); + + +/* + * Test for existence of capability attribute as efficiently as possible. + */ +int +xfs_cap_vhascap( + vnode_t *vp) +{ + int error; + int len = sizeof(xfs_cap_set_t); + int flags = ATTR_KERNOVAL|ATTR_ROOT; + + VOP_ATTR_GET(vp, SGI_CAP_LINUX, NULL, &len, flags, sys_cred, error); + return (error == 0); +} + +/* + * Convert from extended attribute representation to in-memory for XFS. + */ +STATIC int +posix_cap_xattr_to_xfs( + posix_cap_xattr *src, + size_t size, + xfs_cap_set_t *dest) +{ + if (!src || !dest) + return EINVAL; + + if (src->c_version != cpu_to_le32(POSIX_CAP_XATTR_VERSION)) + return EINVAL; + if (src->c_abiversion != cpu_to_le32(_LINUX_CAPABILITY_VERSION)) + return EINVAL; + + if (size < sizeof(posix_cap_xattr)) + return EINVAL; + + ASSERT(sizeof(dest->cap_effective) == sizeof(src->c_effective)); + + dest->cap_effective = src->c_effective; + dest->cap_permitted = src->c_permitted; + dest->cap_inheritable = src->c_inheritable; + + return 0; +} + +/* + * Convert from in-memory XFS to extended attribute representation. + */ +STATIC int +posix_cap_xfs_to_xattr( + xfs_cap_set_t *src, + posix_cap_xattr *xattr_cap, + size_t size) +{ + size_t new_size = posix_cap_xattr_size(); + + if (size < new_size) + return -ERANGE; + + ASSERT(sizeof(xattr_cap->c_effective) == sizeof(src->cap_effective)); + + xattr_cap->c_version = cpu_to_le32(POSIX_CAP_XATTR_VERSION); + xattr_cap->c_abiversion = cpu_to_le32(_LINUX_CAPABILITY_VERSION); + xattr_cap->c_effective = src->cap_effective; + xattr_cap->c_permitted = src->cap_permitted; + xattr_cap->c_inheritable= src->cap_inheritable; + + return new_size; +} + +int +xfs_cap_vget( + vnode_t *vp, + void *cap, + size_t size) +{ + int error; + int len = sizeof(xfs_cap_set_t); + int flags = ATTR_ROOT; + xfs_cap_set_t xfs_cap = { 0 }; + posix_cap_xattr *xattr_cap = cap; + char *data = (char *)&xfs_cap; + + VN_HOLD(vp); + if ((error = _MAC_VACCESS(vp, NULL, VREAD))) + goto out; + + if (!size) { + flags |= ATTR_KERNOVAL; + data = NULL; + } + VOP_ATTR_GET(vp, SGI_CAP_LINUX, data, &len, flags, sys_cred, error); + if (error) + goto out; + ASSERT(len == sizeof(xfs_cap_set_t)); + + error = (size)? -posix_cap_xattr_size() : + -posix_cap_xfs_to_xattr(&xfs_cap, xattr_cap, size); +out: + VN_RELE(vp); + return -error; +} + +int +xfs_cap_vremove( + vnode_t *vp) +{ + int error; + + VN_HOLD(vp); + error = xfs_cap_allow_set(vp); + if (!error) { + VOP_ATTR_REMOVE(vp, SGI_CAP_LINUX, ATTR_ROOT, sys_cred, error); + if (error == ENOATTR) + error = 0; /* 'scool */ + } + VN_RELE(vp); + return -error; +} + +int +xfs_cap_vset( + vnode_t *vp, + void *cap, + size_t size) +{ + posix_cap_xattr *xattr_cap = cap; + xfs_cap_set_t xfs_cap; + int error; + + if (!cap) + return -EINVAL; + + error = posix_cap_xattr_to_xfs(xattr_cap, size, &xfs_cap); + if (error) + return -error; + + VN_HOLD(vp); + error = xfs_cap_allow_set(vp); + if (error) + goto out; + + VOP_ATTR_SET(vp, SGI_CAP_LINUX, (char *)&xfs_cap, + sizeof(xfs_cap_set_t), ATTR_ROOT, sys_cred, error); +out: + VN_RELE(vp); + return -error; +} + +STATIC int +xfs_cap_allow_set( + vnode_t *vp) +{ + vattr_t va; + int error; + + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return EROFS; + if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) + return error; + va.va_mask = XFS_AT_UID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return error; + if (va.va_uid != current->fsuid && !capable(CAP_FOWNER)) + return EPERM; + return error; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_cap.h linux.22-ac2/fs/xfs/xfs_cap.h --- linux.vanilla/fs/xfs/xfs_cap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_cap.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CAP_H__ +#define __XFS_CAP_H__ + +/* + * Capabilities + */ +typedef __uint64_t xfs_cap_value_t; + +typedef struct xfs_cap_set { + xfs_cap_value_t cap_effective; /* use in capability checks */ + xfs_cap_value_t cap_permitted; /* combined with file attrs */ + xfs_cap_value_t cap_inheritable;/* pass through exec */ +} xfs_cap_set_t; + +/* On-disk XFS extended attribute names */ +#define SGI_CAP_FILE "SGI_CAP_FILE" +#define SGI_CAP_FILE_SIZE (sizeof(SGI_CAP_FILE)-1) +#define SGI_CAP_LINUX "SGI_CAP_LINUX" +#define SGI_CAP_LINUX_SIZE (sizeof(SGI_CAP_LINUX)-1) + +/* + * For Linux, we take the bitfields directly from capability.h + * and no longer attempt to keep this attribute ondisk compatible + * with IRIX. Since this attribute is only set on exectuables, + * it just doesn't make much sense to try. We do use a different + * named attribute though, to avoid confusion. + */ + +#ifdef __KERNEL__ + +#ifdef CONFIG_FS_POSIX_CAP + +#include + +struct vnode; + +extern int xfs_cap_vhascap(struct vnode *); +extern int xfs_cap_vset(struct vnode *, void *, size_t); +extern int xfs_cap_vget(struct vnode *, void *, size_t); +extern int xfs_cap_vremove(struct vnode *vp); + +#define _CAP_EXISTS xfs_cap_vhascap + +#else +#define xfs_cap_vset(v,p,sz) (-EOPNOTSUPP) +#define xfs_cap_vget(v,p,sz) (-EOPNOTSUPP) +#define xfs_cap_vremove(v) (-EOPNOTSUPP) +#define _CAP_EXISTS (NULL) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_CAP_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_clnt.h linux.22-ac2/fs/xfs/xfs_clnt.h --- linux.vanilla/fs/xfs/xfs_clnt.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_clnt.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_CLNT_H__ +#define __XFS_CLNT_H__ + +/* + * XFS arguments structure, constructed from the arguments we + * are passed via the mount system call. + * + * NOTE: The mount system call is handled differently between + * Linux and IRIX. In IRIX we worked work with a binary data + * structure coming in across the syscall interface from user + * space (the mount userspace knows about each filesystem type + * and the set of valid options for it, and converts the users + * argument string into a binary structure _before_ making the + * system call), and the ABI issues that this implies. + * + * In Linux, we are passed a comma separated set of options; + * ie. a NULL terminated string of characters. Userspace mount + * code does not have any knowledge of mount options expected by + * each filesystem type and so each filesystem parses its mount + * options in kernel space. + * + * For the Linux port, we kept this structure pretty much intact + * and use it internally (because the existing code groks it). + */ +struct xfs_mount_args { + int flags; /* flags -> see XFSMNT_... macros below */ + int logbufs; /* Number of log buffers, -1 to default */ + int logbufsize; /* Size of log buffers, -1 to default */ + char fsname[MAXNAMELEN]; /* data device name */ + char rtname[MAXNAMELEN]; /* realtime device filename */ + char logname[MAXNAMELEN]; /* journal device filename */ + char mtpt[MAXNAMELEN]; /* filesystem mount point */ + int sunit; /* stripe unit (BBs) */ + int swidth; /* stripe width (BBs), multiple of sunit */ + uchar_t iosizelog; /* log2 of the preferred I/O size */ +}; + +/* + * XFS mount option flags + */ +#define XFSMNT_CHKLOG 0x00000001 /* check log */ +#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount + * compatible */ +#define XFSMNT_INO64 0x00000004 /* move inode numbers up + * past 2^32 */ +#define XFSMNT_UQUOTA 0x00000008 /* user quota accounting */ +#define XFSMNT_PQUOTA 0x00000010 /* IRIX prj quota accounting */ +#define XFSMNT_UQUOTAENF 0x00000020 /* user quota limit + * enforcement */ +#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit + * enforcement */ +#define XFSMNT_NOATIME 0x00000100 /* don't modify access + * times on reads */ +#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at + * stripe boundaries*/ +#define XFSMNT_RETERR 0x00000400 /* return error to user */ +#define XFSMNT_NORECOVERY 0x00000800 /* no recovery, implies + * read-only mount */ +#define XFSMNT_SHARED 0x00001000 /* shared XFS mount */ +#define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */ +#define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */ + /* (osyncisdsync is now default) */ +#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32 + * bits of address space */ +#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */ +#define XFSMNT_GQUOTAENF 0x00800000 /* group quota limit + * enforcement */ +#define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */ +#define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */ +#define XFSMNT_NOLOGFLUSH 0x04000000 /* Don't flush for log blocks */ + +#endif /* __XFS_CLNT_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_da_btree.c linux.22-ac2/fs/xfs/xfs_da_btree.c --- linux.vanilla/fs/xfs/xfs_da_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_da_btree.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2658 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +#if defined(XFSDEBUG) && defined(CONFIG_KDB) +#undef xfs_buftrace +#define xfs_buftrace(A,B) \ + printk(" xfs_buftrace : %s (0x%p)\n", A, B); \ + BUG(); +#endif + +/* + * xfs_da_btree.c + * + * Routines to implement directories as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC int xfs_da_root_split(xfs_da_state_t *state, + xfs_da_state_blk_t *existing_root, + xfs_da_state_blk_t *new_child); +STATIC int xfs_da_node_split(xfs_da_state_t *state, + xfs_da_state_blk_t *existing_blk, + xfs_da_state_blk_t *split_blk, + xfs_da_state_blk_t *blk_to_add, + int treelevel, + int *result); +STATIC void xfs_da_node_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *node_blk_1, + xfs_da_state_blk_t *node_blk_2); +STATIC void xfs_da_node_add(xfs_da_state_t *state, + xfs_da_state_blk_t *old_node_blk, + xfs_da_state_blk_t *new_node_blk); + +/* + * Routines used for shrinking the Btree. + */ +STATIC int xfs_da_root_join(xfs_da_state_t *state, + xfs_da_state_blk_t *root_blk); +STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval); +STATIC void xfs_da_node_remove(xfs_da_state_t *state, + xfs_da_state_blk_t *drop_blk); +STATIC void xfs_da_node_unbalance(xfs_da_state_t *state, + xfs_da_state_blk_t *src_node_blk, + xfs_da_state_blk_t *dst_node_blk); + +/* + * Utility routines. + */ +STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count); +STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp); +STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra); + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of an intermediate node. + */ +int +xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, + xfs_dabuf_t **bpp, int whichfork) +{ + xfs_da_intnode_t *node; + xfs_dabuf_t *bp; + int error; + xfs_trans_t *tp; + + tp = args->trans; + error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + node = bp->data; + INT_ZERO(node->hdr.info.forw, ARCH_CONVERT); + INT_ZERO(node->hdr.info.back, ARCH_CONVERT); + INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); + INT_ZERO(node->hdr.info.pad, ARCH_CONVERT); + INT_ZERO(node->hdr.count, ARCH_CONVERT); + INT_SET(node->hdr.level, ARCH_CONVERT, level); + + xfs_da_log_buf(tp, bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + *bpp = bp; + return(0); +} + +/* + * Split a leaf node, rebalance, then possibly split + * intermediate nodes, rebalance, etc. + */ +int /* error */ +xfs_da_split(xfs_da_state_t *state) +{ + xfs_da_state_blk_t *oldblk, *newblk, *addblk; + xfs_da_intnode_t *node; + xfs_dabuf_t *bp; + int max, action, error, i; + + /* + * Walk back up the tree splitting/inserting/adjusting as necessary. + * If we need to insert and there isn't room, split the node, then + * decide which fragment to insert the new block from below into. + * Note that we may split the root this way, but we need more fixup. + */ + max = state->path.active - 1; + ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH)); + ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC || + state->path.blk[max].magic == XFS_DIRX_LEAF_MAGIC(state->mp)); + + addblk = &state->path.blk[max]; /* initial dummy value */ + for (i = max; (i >= 0) && addblk; state->path.active--, i--) { + oldblk = &state->path.blk[i]; + newblk = &state->altpath.blk[i]; + + /* + * If a leaf node then + * Allocate a new leaf node, then rebalance across them. + * else if an intermediate node then + * We split on the last layer, must we split the node? + */ + switch (oldblk->magic) { + case XFS_ATTR_LEAF_MAGIC: +#ifndef __KERNEL__ + return(ENOTTY); +#else + error = xfs_attr_leaf_split(state, oldblk, newblk); + if ((error != 0) && (error != ENOSPC)) { + return(error); /* GROT: attr is inconsistent */ + } + if (!error) { + addblk = newblk; + break; + } + /* + * Entry wouldn't fit, split the leaf again. + */ + state->extravalid = 1; + if (state->inleaf) { + state->extraafter = 0; /* before newblk */ + error = xfs_attr_leaf_split(state, oldblk, + &state->extrablk); + } else { + state->extraafter = 1; /* after newblk */ + error = xfs_attr_leaf_split(state, newblk, + &state->extrablk); + } + if (error) + return(error); /* GROT: attr inconsistent */ + addblk = newblk; + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + error = xfs_dir_leaf_split(state, oldblk, newblk); + if ((error != 0) && (error != ENOSPC)) { + return(error); /* GROT: dir is inconsistent */ + } + if (!error) { + addblk = newblk; + break; + } + /* + * Entry wouldn't fit, split the leaf again. + */ + state->extravalid = 1; + if (state->inleaf) { + state->extraafter = 0; /* before newblk */ + error = xfs_dir_leaf_split(state, oldblk, + &state->extrablk); + if (error) + return(error); /* GROT: dir incon. */ + addblk = newblk; + } else { + state->extraafter = 1; /* after newblk */ + error = xfs_dir_leaf_split(state, newblk, + &state->extrablk); + if (error) + return(error); /* GROT: dir incon. */ + addblk = newblk; + } + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + error = xfs_dir2_leafn_split(state, oldblk, newblk); + if (error) + return error; + addblk = newblk; + break; + case XFS_DA_NODE_MAGIC: + error = xfs_da_node_split(state, oldblk, newblk, addblk, + max - i, &action); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + if (error) + return(error); /* GROT: dir is inconsistent */ + /* + * Record the newly split block for the next time thru? + */ + if (action) + addblk = newblk; + else + addblk = NULL; + break; + } + + /* + * Update the btree to show the new hashval for this child. + */ + xfs_da_fixhashpath(state, &state->path); + /* + * If we won't need this block again, it's getting dropped + * from the active path by the loop control, so we need + * to mark it done now. + */ + if (i > 0 || !addblk) + xfs_da_buf_done(oldblk->bp); + } + if (!addblk) + return(0); + + /* + * Split the root node. + */ + ASSERT(state->path.active == 0); + oldblk = &state->path.blk[0]; + error = xfs_da_root_split(state, oldblk, addblk); + if (error) { + xfs_da_buf_done(oldblk->bp); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + return(error); /* GROT: dir is inconsistent */ + } + + /* + * Update pointers to the node which used to be block 0 and + * just got bumped because of the addition of a new root node. + * There might be three blocks involved if a double split occurred, + * and the original block 0 could be at any position in the list. + */ + + node = oldblk->bp->data; + if (!INT_ISZERO(node->hdr.info.forw, ARCH_CONVERT)) { + if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { + bp = addblk->bp; + } else { + ASSERT(state->extravalid); + bp = state->extrablk.bp; + } + node = bp->data; + INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); + xfs_da_log_buf(state->args->trans, bp, + XFS_DA_LOGRANGE(node, &node->hdr.info, + sizeof(node->hdr.info))); + } + node = oldblk->bp->data; + if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { + if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { + bp = addblk->bp; + } else { + ASSERT(state->extravalid); + bp = state->extrablk.bp; + } + node = bp->data; + INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); + xfs_da_log_buf(state->args->trans, bp, + XFS_DA_LOGRANGE(node, &node->hdr.info, + sizeof(node->hdr.info))); + } + xfs_da_buf_done(oldblk->bp); + xfs_da_buf_done(addblk->bp); + addblk->bp = NULL; + return(0); +} + +/* + * Split the root. We have to create a new root and point to the two + * parts (the split old root) that we just created. Copy block zero to + * the EOF, extending the inode in process. + */ +STATIC int /* error */ +xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_intnode_t *node, *oldroot; + xfs_da_args_t *args; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + int error, size; + xfs_inode_t *dp; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_dir2_leaf_t *leaf; + + /* + * Copy the existing (incorrect) block from the root node position + * to a free space somewhere. + */ + args = state->args; + ASSERT(args != NULL); + error = xfs_da_grow_inode(args, &blkno); + if (error) + return(error); + dp = args->dp; + tp = args->trans; + mp = state->mp; + error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + node = bp->data; + oldroot = blk1->bp->data; + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - + (char *)oldroot); + } else { + ASSERT(XFS_DIR_IS_V2(mp)); + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + leaf = (xfs_dir2_leaf_t *)oldroot; + size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - + (char *)leaf); + } + memcpy(node, oldroot, size); + xfs_da_log_buf(tp, bp, 0, size - 1); + xfs_da_buf_done(blk1->bp); + blk1->bp = bp; + blk1->blkno = blkno; + + /* + * Set up the new root node. + */ + error = xfs_da_node_create(args, + args->whichfork == XFS_DATA_FORK && + XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, + INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); + if (error) + return(error); + node = bp->data; + INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); + INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); + INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); + INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 2); + +#ifdef DEBUG + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(blk1->blkno >= mp->m_dirleafblk && + blk1->blkno < mp->m_dirfreeblk); + ASSERT(blk2->blkno >= mp->m_dirleafblk && + blk2->blkno < mp->m_dirfreeblk); + } +#endif + + /* Header is already logged by xfs_da_node_create */ + xfs_da_log_buf(tp, bp, + XFS_DA_LOGRANGE(node, node->btree, + sizeof(xfs_da_node_entry_t) * 2)); + xfs_da_buf_done(bp); + + return(0); +} + +/* + * Split the node, rebalance, then add the new entry. + */ +STATIC int /* error */ +xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk, + xfs_da_state_blk_t *addblk, + int treelevel, int *result) +{ + xfs_da_intnode_t *node; + xfs_dablk_t blkno; + int newcount, error; + int useextra; + + node = oldblk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + + /* + * With V2 the extra block is data or freespace. + */ + useextra = state->extravalid && XFS_DIR_IS_V1(state->mp); + newcount = 1 + useextra; + /* + * Do we have to split the node? + */ + if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { + /* + * Allocate a new node, add to the doubly linked chain of + * nodes, then move some of our excess entries into it. + */ + error = xfs_da_grow_inode(state->args, &blkno); + if (error) + return(error); /* GROT: dir is inconsistent */ + + error = xfs_da_node_create(state->args, blkno, treelevel, + &newblk->bp, state->args->whichfork); + if (error) + return(error); /* GROT: dir is inconsistent */ + newblk->blkno = blkno; + newblk->magic = XFS_DA_NODE_MAGIC; + xfs_da_node_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + *result = 1; + } else { + *result = 0; + } + + /* + * Insert the new entry(s) into the correct block + * (updating last hashval in the process). + * + * xfs_da_node_add() inserts BEFORE the given index, + * and as a result of using node_lookup_int() we always + * point to a valid entry (not after one), but a split + * operation always results in a new block whose hashvals + * FOLLOW the current block. + * + * If we had double-split op below us, then add the extra block too. + */ + node = oldblk->bp->data; + if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { + oldblk->index++; + xfs_da_node_add(state, oldblk, addblk); + if (useextra) { + if (state->extraafter) + oldblk->index++; + xfs_da_node_add(state, oldblk, &state->extrablk); + state->extravalid = 0; + } + } else { + newblk->index++; + xfs_da_node_add(state, newblk, addblk); + if (useextra) { + if (state->extraafter) + newblk->index++; + xfs_da_node_add(state, newblk, &state->extrablk); + state->extravalid = 0; + } + } + + return(0); +} + +/* + * Balance the btree elements between two intermediate nodes, + * usually one full and one empty. + * + * NOTE: if blk2 is empty, then it will get the upper half of blk1. + */ +STATIC void +xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_intnode_t *node1, *node2, *tmpnode; + xfs_da_node_entry_t *btree_s, *btree_d; + int count, tmp; + xfs_trans_t *tp; + + node1 = blk1->bp->data; + node2 = blk2->bp->data; + /* + * Figure out how many entries need to move, and in which direction. + * Swap the nodes around if that makes it simpler. + */ + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + tmpnode = node1; + node1 = node2; + node2 = tmpnode; + } + ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; + if (count == 0) + return; + tp = state->args->trans; + /* + * Two cases: high-to-low and low-to-high. + */ + if (count > 0) { + /* + * Move elements in node2 up to make a hole. + */ + if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { + tmp *= (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[0]; + btree_d = &node2->btree[count]; + memmove(btree_d, btree_s, tmp); + } + + /* + * Move the req'd B-tree elements from high in node1 to + * low in node2. + */ + INT_MOD(node2->hdr.count, ARCH_CONVERT, count); + tmp = count * (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; + btree_d = &node2->btree[0]; + memcpy(btree_d, btree_s, tmp); + INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); + + } else { + /* + * Move the req'd B-tree elements from low in node2 to + * high in node1. + */ + count = -count; + tmp = count * (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[0]; + btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; + memcpy(btree_d, btree_s, tmp); + INT_MOD(node1->hdr.count, ARCH_CONVERT, count); + xfs_da_log_buf(tp, blk1->bp, + XFS_DA_LOGRANGE(node1, btree_d, tmp)); + + /* + * Move elements in node2 down to fill the hole. + */ + tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; + tmp *= (uint)sizeof(xfs_da_node_entry_t); + btree_s = &node2->btree[count]; + btree_d = &node2->btree[0]; + memmove(btree_d, btree_s, tmp); + INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); + } + + /* + * Log header of node 1 and all current bits of node 2. + */ + xfs_da_log_buf(tp, blk1->bp, + XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr))); + xfs_da_log_buf(tp, blk2->bp, + XFS_DA_LOGRANGE(node2, &node2->hdr, + sizeof(node2->hdr) + + sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); + + /* + * Record the last hashval from each block for upward propagation. + * (note: don't use the swapped node pointers) + */ + node1 = blk1->bp->data; + node2 = blk2->bp->data; + blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + */ + if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { + blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); + blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ + } +} + +/* + * Add a new entry to an intermediate node. + */ +STATIC void +xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int tmp; + xfs_mount_t *mp; + + node = oldblk->bp->data; + mp = state->mp; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); + ASSERT(newblk->blkno != 0); + if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + ASSERT(newblk->blkno >= mp->m_dirleafblk && + newblk->blkno < mp->m_dirfreeblk); + + /* + * We may need to make some room before we insert the new node. + */ + tmp = 0; + btree = &node->btree[ oldblk->index ]; + if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { + tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); + memmove(btree + 1, btree, tmp); + } + INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); + INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); + xfs_da_log_buf(state->args->trans, oldblk->bp, + XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); + INT_MOD(node->hdr.count, ARCH_CONVERT, +1); + xfs_da_log_buf(state->args->trans, oldblk->bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + /* + * Copy the last hash value from the oldblk to propagate upwards. + */ + oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Deallocate an empty leaf node, remove it from its parent, + * possibly deallocating that block, etc... + */ +int +xfs_da_join(xfs_da_state_t *state) +{ + xfs_da_state_blk_t *drop_blk, *save_blk; + int action, error; + + action = 0; + drop_blk = &state->path.blk[ state->path.active-1 ]; + save_blk = &state->altpath.blk[ state->path.active-1 ]; + ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC); + ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC || + drop_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp)); + + /* + * Walk back up the tree joining/deallocating as necessary. + * When we stop dropping blocks, break out. + */ + for ( ; state->path.active >= 2; drop_blk--, save_blk--, + state->path.active--) { + /* + * See if we can combine the block with a neighbor. + * (action == 0) => no options, just leave + * (action == 1) => coalesce, then unlink + * (action == 2) => block empty, unlink it + */ + switch (drop_blk->magic) { + case XFS_ATTR_LEAF_MAGIC: +#ifndef __KERNEL__ + error = ENOTTY; +#else + error = xfs_attr_leaf_toosmall(state, &action); +#endif + if (error) + return(error); + if (action == 0) + return(0); +#ifdef __KERNEL__ + xfs_attr_leaf_unbalance(state, drop_blk, save_blk); +#endif + break; + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + error = xfs_dir_leaf_toosmall(state, &action); + if (error) + return(error); + if (action == 0) + return(0); + xfs_dir_leaf_unbalance(state, drop_blk, save_blk); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + error = xfs_dir2_leafn_toosmall(state, &action); + if (error) + return error; + if (action == 0) + return 0; + xfs_dir2_leafn_unbalance(state, drop_blk, save_blk); + break; + case XFS_DA_NODE_MAGIC: + /* + * Remove the offending node, fixup hashvals, + * check for a toosmall neighbor. + */ + xfs_da_node_remove(state, drop_blk); + xfs_da_fixhashpath(state, &state->path); + error = xfs_da_node_toosmall(state, &action); + if (error) + return(error); + if (action == 0) + return 0; + xfs_da_node_unbalance(state, drop_blk, save_blk); + break; + } + xfs_da_fixhashpath(state, &state->altpath); + error = xfs_da_blk_unlink(state, drop_blk, save_blk); + xfs_da_state_kill_altpath(state); + if (error) + return(error); + error = xfs_da_shrink_inode(state->args, drop_blk->blkno, + drop_blk->bp); + drop_blk->bp = NULL; + if (error) + return(error); + } + /* + * We joined all the way to the top. If it turns out that + * we only have one entry in the root, make the child block + * the new root. + */ + xfs_da_node_remove(state, drop_blk); + xfs_da_fixhashpath(state, &state->path); + error = xfs_da_root_join(state, &state->path.blk[0]); + return(error); +} + +/* + * We have only one entry in the root. Copy the only remaining child of + * the old root to block 0 as the new root node. + */ +STATIC int +xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) +{ + xfs_da_intnode_t *oldroot; + /* REFERENCED */ + xfs_da_blkinfo_t *blkinfo; + xfs_da_args_t *args; + xfs_dablk_t child; + xfs_dabuf_t *bp; + int error; + + args = state->args; + ASSERT(args != NULL); + ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); + oldroot = root_blk->bp->data; + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_ISZERO(oldroot->hdr.info.forw, ARCH_CONVERT)); + ASSERT(INT_ISZERO(oldroot->hdr.info.back, ARCH_CONVERT)); + + /* + * If the root has more than one child, then don't do anything. + */ + if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) + return(0); + + /* + * Read in the (only) child block, then copy those bytes into + * the root block's buffer and free the original child block. + */ + child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); + ASSERT(child != 0); + error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + blkinfo = bp->data; + if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + } else { + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + } + ASSERT(INT_ISZERO(blkinfo->forw, ARCH_CONVERT)); + ASSERT(INT_ISZERO(blkinfo->back, ARCH_CONVERT)); + memcpy(root_blk->bp->data, bp->data, state->blocksize); + xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1); + error = xfs_da_shrink_inode(args, child, bp); + return(error); +} + +/* + * Check a node block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +STATIC int +xfs_da_node_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_da_intnode_t *node; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + node = (xfs_da_intnode_t *)info; + count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (count > (state->node_ents >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); /* blk over 50%, don't try to join */ + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = (!INT_ISZERO(info->forw, ARCH_CONVERT)); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + /* start with smaller blk num */ + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, state->args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + + node = (xfs_da_intnode_t *)info; + count = state->node_ents; + count -= state->node_ents >> 2; + count -= INT_GET(node->hdr.count, ARCH_CONVERT); + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count -= INT_GET(node->hdr.count, ARCH_CONVERT); + xfs_da_brelse(state->args->trans, bp); + if (count >= 0) + break; /* fits with at least 25% to spare */ + } + if (i >= 2) { + *action = 0; + return(0); + } + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) { + return(error); + } + if (retval) { + *action = 0; + return(0); + } + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + if (error) { + return(error); + } + if (retval) { + *action = 0; + return(0); + } + } + *action = 1; + return(0); +} + +/* + * Walk back up the tree adjusting hash values as necessary, + * when we stop making changes, return. + */ +void +xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) +{ + xfs_da_state_blk_t *blk; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dahash_t lasthash=0; + int level, count; + + level = path->active-1; + blk = &path->blk[ level ]; + switch (blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); + if (count == 0) + return; + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + lasthash = xfs_dir_leaf_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + case XFS_DA_NODE_MAGIC: + lasthash = xfs_da_node_lasthash(blk->bp, &count); + if (count == 0) + return; + break; + } + for (blk--, level--; level >= 0; blk--, level--) { + node = blk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + btree = &node->btree[ blk->index ]; + if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) + break; + blk->hashval = lasthash; + INT_SET(btree->hashval, ARCH_CONVERT, lasthash); + xfs_da_log_buf(state->args->trans, blk->bp, + XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); + + lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + } +} + +/* + * Remove an entry from an intermediate node. + */ +STATIC void +xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + int tmp; + + node = drop_blk->bp->data; + ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); + ASSERT(drop_blk->index >= 0); + + /* + * Copy over the offending entry, or just zero it out. + */ + btree = &node->btree[drop_blk->index]; + if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; + tmp *= (uint)sizeof(xfs_da_node_entry_t); + memmove(btree, btree + 1, tmp); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, btree, tmp)); + btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; + } + memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); + INT_MOD(node->hdr.count, ARCH_CONVERT, -1); + xfs_da_log_buf(state->args->trans, drop_blk->bp, + XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); + + /* + * Copy the last hash value from the block to propagate upwards. + */ + btree--; + drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); +} + +/* + * Unbalance the btree elements between two intermediate nodes, + * move all Btree elements from one node into another. + */ +STATIC void +xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_da_intnode_t *drop_node, *save_node; + xfs_da_node_entry_t *btree; + int tmp; + xfs_trans_t *tp; + + drop_node = drop_blk->bp->data; + save_node = save_blk->bp->data; + ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + tp = state->args->trans; + + /* + * If the dying block has lower hashvals, then move all the + * elements in the remaining block up to make a hole. + */ + if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) + { + btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; + tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + memmove(btree, &save_node->btree[0], tmp); + btree = &save_node->btree[0]; + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, btree, + (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * + sizeof(xfs_da_node_entry_t))); + } else { + btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, btree, + INT_GET(drop_node->hdr.count, ARCH_CONVERT) * + sizeof(xfs_da_node_entry_t))); + } + + /* + * Move all the B-tree elements from drop_blk to save_blk. + */ + tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + memcpy(btree, &drop_node->btree[0], tmp); + INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); + + xfs_da_log_buf(tp, save_blk->bp, + XFS_DA_LOGRANGE(save_node, &save_node->hdr, + sizeof(save_node->hdr))); + + /* + * Save the last hashval in the remaining block for upward propagation. + */ + save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Walk down the Btree looking for a particular filename, filling + * in the state structure as we go. + * + * We will set the state structure to point to each of the elements + * in each of the nodes where either the hashval is or should be. + * + * We support duplicate hashval's so for each entry in the current + * node that could contain the desired hashval, descend. This is a + * pruned depth-first tree search. + */ +int /* error */ +xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) +{ + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *curr; + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dablk_t blkno; + int probe, span, max, error, retval; + xfs_dahash_t hashval; + xfs_da_args_t *args; + + args = state->args; + /* + * Descend thru the B-tree searching each level for the right + * node to use, until the right hashval is found. + */ + if (args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(state->mp)) + blkno = state->mp->m_dirleafblk; + else + blkno = 0; + for (blk = &state->path.blk[0], state->path.active = 1; + state->path.active <= XFS_DA_NODE_MAXDEPTH; + blk++, state->path.active++) { + /* + * Read the next node down in the tree. + */ + blk->blkno = blkno; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &blk->bp, + state->args->whichfork); + if (error) { + blk->blkno = 0; + state->path.active--; + return(error); + } + ASSERT(blk->bp != NULL); + curr = blk->bp->data; + ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + + /* + * Search an intermediate node for a match. + */ + blk->magic = INT_GET(curr->magic, ARCH_CONVERT); + if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + node = blk->bp->data; + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Binary search. (note: small blocks will skip loop) + */ + max = INT_GET(node->hdr.count, ARCH_CONVERT); + probe = span = max / 2; + hashval = state->args->hashval; + for (btree = &node->btree[probe]; span > 4; + btree = &node->btree[probe]) { + span /= 2; + if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && (probe < max)); + ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); + + /* + * Since we may have duplicate hashval's, find the first + * matching hashval in the node. + */ + while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { + btree--; + probe--; + } + while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { + btree++; + probe++; + } + + /* + * Pick the right block to descend on. + */ + if (probe == max) { + blk->index = max-1; + blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); + } else { + blk->index = probe; + blkno = INT_GET(btree->before, ARCH_CONVERT); + } + } +#ifdef __KERNEL__ + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); + break; + } +#endif + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); + break; + } + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); + break; + } + } + + /* + * A leaf block that ends in the hashval that we are interested in + * (final hashval == search hashval) means that the next block may + * contain more entries with the same hashval, shift upward to the + * next leaf and keep searching. + */ + for (;;) { + if (blk->magic == XFS_DIR_LEAF_MAGIC) { + ASSERT(XFS_DIR_IS_V1(state->mp)); + retval = xfs_dir_leaf_lookup_int(blk->bp, state->args, + &blk->index); + } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(XFS_DIR_IS_V2(state->mp)); + retval = xfs_dir2_leafn_lookup_int(blk->bp, state->args, + &blk->index, state); + } +#ifdef __KERNEL__ + else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { + retval = xfs_attr_leaf_lookup_int(blk->bp, state->args); + blk->index = state->args->index; + state->args->blkno = blk->blkno; + } +#endif + if (((retval == ENOENT) || (retval == ENOATTR)) && + (blk->hashval == state->args->hashval)) { + error = xfs_da_path_shift(state, &state->path, 1, 1, + &retval); + if (error) + return(error); + if (retval == 0) { + continue; + } +#ifdef __KERNEL__ + else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { + /* path_shift() gives ENOENT */ + retval = XFS_ERROR(ENOATTR); + } +#endif + } + break; + } + *result = retval; + return(0); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Link a new block into a doubly linked list of blocks (of whatever type). + */ +int /* error */ +xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, + xfs_da_state_blk_t *new_blk) +{ + xfs_da_blkinfo_t *old_info, *new_info, *tmp_info; + xfs_da_args_t *args; + int before=0, error; + xfs_dabuf_t *bp; + + /* + * Set up environment. + */ + args = state->args; + ASSERT(args != NULL); + old_info = old_blk->bp->data; + new_info = new_blk->bp->data; + ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || + old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || + old_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); + ASSERT(old_blk->magic == new_blk->magic); + + switch (old_blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + before = xfs_dir_leaf_order(old_blk->bp, new_blk->bp); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp); + break; + case XFS_DA_NODE_MAGIC: + before = xfs_da_node_order(old_blk->bp, new_blk->bp); + break; + } + + /* + * Link blocks in appropriate order. + */ + if (before) { + /* + * Link new block in before existing block. + */ + INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); + new_info->back = old_info->back; /* INT_: direct copy */ + if (INT_GET(old_info->back, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(old_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); + xfs_da_buf_done(bp); + } + INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); + } else { + /* + * Link new block in after existing block. + */ + new_info->forw = old_info->forw; /* INT_: direct copy */ + INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); + if (INT_GET(old_info->forw, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == old_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); + xfs_da_buf_done(bp); + } + INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); + } + + xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); + xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); + return(0); +} + +/* + * Compare two intermediate nodes for "order". + */ +STATIC int +xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) +{ + xfs_da_intnode_t *node1, *node2; + + node1 = node1_bp->data; + node2 = node2_bp->data; + ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && + (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from an intermediate node. + */ +STATIC uint +xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_da_intnode_t *node; + + node = bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + if (count) + *count = INT_GET(node->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(node->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); +} + +/* + * Unlink a block from a doubly linked list of blocks. + */ +int /* error */ +xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info; + xfs_da_args_t *args; + xfs_dabuf_t *bp; + int error; + + /* + * Set up environment. + */ + args = state->args; + ASSERT(args != NULL); + save_info = save_blk->bp->data; + drop_info = drop_blk->bp->data; + ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || + save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || + save_blk->magic == XFS_ATTR_LEAF_MAGIC); + ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); + ASSERT(save_blk->magic == drop_blk->magic); + ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || + (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); + ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || + (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); + + /* + * Unlink the leaf block from the doubly linked chain of leaves. + */ + if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { + save_info->back = drop_info->back; /* INT_: direct copy */ + if (INT_GET(drop_info->back, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(drop_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, + sizeof(*tmp_info) - 1); + xfs_da_buf_done(bp); + } + } else { + save_info->forw = drop_info->forw; /* INT_: direct copy */ + if (INT_GET(drop_info->forw, ARCH_CONVERT)) { + error = xfs_da_read_buf(args->trans, args->dp, + INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); + if (error) + return(error); + ASSERT(bp != NULL); + tmp_info = bp->data; + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == drop_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno); + xfs_da_log_buf(args->trans, bp, 0, + sizeof(*tmp_info) - 1); + xfs_da_buf_done(bp); + } + } + + xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); + return(0); +} + +/* + * Move a path "forward" or "!forward" one block at the current level. + * + * This routine will adjust a "path" to point to the next block + * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the + * Btree, including updating pointers to the intermediate nodes between + * the new bottom and the root. + */ +int /* error */ +xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, + int forward, int release, int *result) +{ + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + xfs_da_intnode_t *node; + xfs_da_args_t *args; + xfs_dablk_t blkno=0; + int level, error; + + /* + * Roll up the Btree looking for the first block where our + * current index is not at the edge of the block. Note that + * we skip the bottom layer because we want the sibling block. + */ + args = state->args; + ASSERT(args != NULL); + ASSERT(path != NULL); + ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); + level = (path->active-1) - 1; /* skip bottom layer in path */ + for (blk = &path->blk[level]; level >= 0; blk--, level--) { + ASSERT(blk->bp != NULL); + node = blk->bp->data; + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + blk->index++; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + break; + } else if (!forward && (blk->index > 0)) { + blk->index--; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + break; + } + } + if (level < 0) { + *result = XFS_ERROR(ENOENT); /* we're out of our tree */ + ASSERT(args->oknoent); + return(0); + } + + /* + * Roll down the edge of the subtree until we reach the + * same depth we were at originally. + */ + for (blk++, level++; level < path->active; blk++, level++) { + /* + * Release the old block. + * (if it's dirty, trans won't actually let go) + */ + if (release) + xfs_da_brelse(args->trans, blk->bp); + + /* + * Read the next child block. + */ + blk->blkno = blkno; + error = xfs_da_read_buf(args->trans, args->dp, blkno, -1, + &blk->bp, args->whichfork); + if (error) + return(error); + ASSERT(blk->bp != NULL); + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + blk->magic = INT_GET(info->magic, ARCH_CONVERT); + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + node = (xfs_da_intnode_t *)info; + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + if (forward) + blk->index = 0; + else + blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + } else { + ASSERT(level == path->active-1); + blk->index = 0; + switch(blk->magic) { +#ifdef __KERNEL__ + case XFS_ATTR_LEAF_MAGIC: + blk->hashval = xfs_attr_leaf_lasthash(blk->bp, + NULL); + break; +#endif + case XFS_DIR_LEAF_MAGIC: + ASSERT(XFS_DIR_IS_V1(state->mp)); + blk->hashval = xfs_dir_leaf_lasthash(blk->bp, + NULL); + break; + case XFS_DIR2_LEAFN_MAGIC: + ASSERT(XFS_DIR_IS_V2(state->mp)); + blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, + NULL); + break; + default: + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC || + blk->magic == + XFS_DIRX_LEAF_MAGIC(state->mp)); + break; + } + } + } + *result = 0; + return(0); +} + + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Implement a simple hash on a character string. + * Rotate the hash value by 7 bits, then XOR each character in. + * This is implemented with some source-level loop unrolling. + */ +xfs_dahash_t +xfs_da_hashname(uchar_t *name, int namelen) +{ + xfs_dahash_t hash; + +#define ROTL(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#ifdef SLOWVERSION + /* + * This is the old one-byte-at-a-time version. + */ + for (hash = 0; namelen > 0; namelen--) { + hash = *name++ ^ ROTL(hash, 7); + } + return(hash); +#else + /* + * Do four characters at a time as long as we can. + */ + for (hash = 0; namelen >= 4; namelen -= 4, name += 4) { + hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^ + (name[3] << 0) ^ ROTL(hash, 7 * 4); + } + /* + * Now do the rest of the characters. + */ + switch (namelen) { + case 3: + return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^ + ROTL(hash, 7 * 3); + case 2: + return (name[0] << 7) ^ (name[1] << 0) ^ ROTL(hash, 7 * 2); + case 1: + return (name[0] << 0) ^ ROTL(hash, 7 * 1); + case 0: + return hash; + } + /* NOTREACHED */ +#endif +#undef ROTL + return 0; /* keep gcc happy */ +} + +/* + * Add a block to the btree ahead of the file. + * Return the new block number to the caller. + */ +int +xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) +{ + xfs_fileoff_t bno, b; + xfs_bmbt_irec_t map; + xfs_bmbt_irec_t *mapp; + xfs_inode_t *dp; + int nmap, error, w, count, c, got, i, mapi; + xfs_fsize_t size; + xfs_trans_t *tp; + xfs_mount_t *mp; + + dp = args->dp; + mp = dp->i_mount; + w = args->whichfork; + tp = args->trans; + /* + * For new directories adjust the file offset and block count. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) { + bno = mp->m_dirleafblk; + count = mp->m_dirblkfsbs; + } else { + bno = 0; + count = 1; + } + /* + * Find a spot in the file space to put the new block. + */ + if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w))) { + return error; + } + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk); + /* + * Try mapping it in one filesystem block. + */ + nmap = 1; + ASSERT(args->firstblock != NULL); + if ((error = xfs_bmapi(tp, dp, bno, count, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| + XFS_BMAPI_CONTIG, + args->firstblock, args->total, &map, &nmap, + args->flist))) { + return error; + } + ASSERT(nmap <= 1); + if (nmap == 1) { + mapp = ↦ + mapi = 1; + } + /* + * If we didn't get it and the block might work if fragmented, + * try without the CONTIG flag. Loop until we get it all. + */ + else if (nmap == 0 && count > 1) { + mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); + for (b = bno, mapi = 0; b < bno + count; ) { + nmap = MIN(XFS_BMAP_MAX_NMAP, count); + c = (int)(bno + count - b); + if ((error = xfs_bmapi(tp, dp, b, c, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE| + XFS_BMAPI_METADATA, + args->firstblock, args->total, + &mapp[mapi], &nmap, args->flist))) { + kmem_free(mapp, sizeof(*mapp) * count); + return error; + } + if (nmap < 1) + break; + mapi += nmap; + b = mapp[mapi - 1].br_startoff + + mapp[mapi - 1].br_blockcount; + } + } else { + mapi = 0; + mapp = NULL; + } + /* + * Count the blocks we got, make sure it matches the total. + */ + for (i = 0, got = 0; i < mapi; i++) + got += mapp[i].br_blockcount; + if (got != count || mapp[0].br_startoff != bno || + mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != + bno + count) { + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + return XFS_ERROR(ENOSPC); + } + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + *new_blkno = (xfs_dablk_t)bno; + /* + * For version 1 directories, adjust the file size if it changed. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) { + ASSERT(mapi == 1); + if ((error = xfs_bmap_last_offset(tp, dp, &bno, w))) + return error; + size = XFS_FSB_TO_B(mp, bno); + if (size != dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +} + +/* + * Ick. We need to always be able to remove a btree block, even + * if there's no space reservation because the filesystem is full. + * This is called if xfs_bunmapi on a btree block fails due to ENOSPC. + * It swaps the target block with the last block in the file. The + * last block in the file can always be removed since it can't cause + * a bmap btree split to do that. + */ +STATIC int +xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, + xfs_dabuf_t **dead_bufp) +{ + xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno; + xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf; + xfs_fileoff_t lastoff; + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error, w, entno, level, dead_level; + xfs_da_blkinfo_t *dead_info, *sib_info; + xfs_da_intnode_t *par_node, *dead_node; + xfs_dir_leafblock_t *dead_leaf; + xfs_dir2_leaf_t *dead_leaf2; + xfs_dahash_t dead_hash; + + dead_buf = *dead_bufp; + dead_blkno = *dead_blknop; + tp = args->trans; + ip = args->dp; + w = args->whichfork; + ASSERT(w == XFS_DATA_FORK); + mp = ip->i_mount; + if (XFS_DIR_IS_V2(mp)) { + lastoff = mp->m_dirfreeblk; + error = xfs_bmap_last_before(tp, ip, &lastoff, w); + } else + error = xfs_bmap_last_offset(tp, ip, &lastoff, w); + if (error) + return error; + if (unlikely(lastoff == 0)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Read the last block in the btree space. + */ + last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs; + if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w))) + return error; + /* + * Copy the last block into the dead buffer and log it. + */ + memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize); + xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1); + dead_info = dead_buf->data; + /* + * Get values from the moved block. + */ + if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + ASSERT(XFS_DIR_IS_V1(mp)); + dead_leaf = (xfs_dir_leafblock_t *)dead_info; + dead_level = 0; + dead_hash = + INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + ASSERT(XFS_DIR_IS_V2(mp)); + dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; + dead_level = 0; + dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } else { + ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + dead_node = (xfs_da_intnode_t *)dead_info; + dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); + dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + } + sib_buf = par_buf = NULL; + /* + * If the moved block has a left sibling, fix up the pointers. + */ + if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { + if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) + goto done; + sib_info = sib_buf->data; + if (unlikely( + INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || + INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, sib_buf, + XFS_DA_LOGRANGE(sib_info, &sib_info->forw, + sizeof(sib_info->forw))); + xfs_da_buf_done(sib_buf); + sib_buf = NULL; + } + /* + * If the moved block has a right sibling, fix up the pointers. + */ + if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { + if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) + goto done; + sib_info = sib_buf->data; + if (unlikely( + INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno + || INT_GET(sib_info->magic, ARCH_CONVERT) + != INT_GET(dead_info->magic, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, sib_buf, + XFS_DA_LOGRANGE(sib_info, &sib_info->back, + sizeof(sib_info->back))); + xfs_da_buf_done(sib_buf); + sib_buf = NULL; + } + par_blkno = XFS_DIR_IS_V1(mp) ? 0 : mp->m_dirleafblk; + level = -1; + /* + * Walk down the tree looking for the parent of the moved block. + */ + for (;;) { + if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) + goto done; + par_node = par_buf->data; + if (unlikely( + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || + (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + level = INT_GET(par_node->hdr.level, ARCH_CONVERT); + for (entno = 0; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; + entno++) + continue; + if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); + if (level == dead_level + 1) + break; + xfs_da_brelse(tp, par_buf); + par_buf = NULL; + } + /* + * We're in the right parent block. + * Look for the right entry. + */ + for (;;) { + for (; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; + entno++) + continue; + if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) + break; + par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); + xfs_da_brelse(tp, par_buf); + par_buf = NULL; + if (unlikely(par_blkno == 0)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) + goto done; + par_node = par_buf->data; + if (unlikely( + INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { + XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", + XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto done; + } + entno = 0; + } + /* + * Update the parent entry pointing to the moved block. + */ + INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); + xfs_da_log_buf(tp, par_buf, + XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, + sizeof(par_node->btree[entno].before))); + xfs_da_buf_done(par_buf); + xfs_da_buf_done(dead_buf); + *dead_blknop = last_blkno; + *dead_bufp = last_buf; + return 0; +done: + if (par_buf) + xfs_da_brelse(tp, par_buf); + if (sib_buf) + xfs_da_brelse(tp, sib_buf); + xfs_da_brelse(tp, last_buf); + return error; +} + +/* + * Remove a btree block from a directory or attribute. + */ +int +xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, + xfs_dabuf_t *dead_buf) +{ + xfs_inode_t *dp; + int done, error, w, count; + xfs_fileoff_t bno; + xfs_fsize_t size; + xfs_trans_t *tp; + xfs_mount_t *mp; + + dp = args->dp; + w = args->whichfork; + tp = args->trans; + mp = dp->i_mount; + if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + count = mp->m_dirblkfsbs; + else + count = 1; + for (;;) { + /* + * Remove extents. If we get ENOSPC for a dir we have to move + * the last block to the place we want to kill. + */ + if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, + XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA, + 0, args->firstblock, args->flist, + &done)) == ENOSPC) { + if (w != XFS_DATA_FORK) + goto done; + if ((error = xfs_da_swap_lastblock(args, &dead_blkno, + &dead_buf))) + goto done; + } else if (error) + goto done; + else + break; + } + ASSERT(done); + xfs_da_binval(tp, dead_buf); + /* + * Adjust the directory size for version 1. + */ + if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) { + if ((error = xfs_bmap_last_offset(tp, dp, &bno, w))) + return error; + size = XFS_FSB_TO_B(dp->i_mount, bno); + if (size != dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +done: + xfs_da_binval(tp, dead_buf); + return error; +} + +/* + * See if the mapping(s) for this btree block are valid, i.e. + * don't contain holes, are logically contiguous, and cover the whole range. + */ +STATIC int +xfs_da_map_covers_blocks( + int nmap, + xfs_bmbt_irec_t *mapp, + xfs_dablk_t bno, + int count) +{ + int i; + xfs_fileoff_t off; + + for (i = 0, off = bno; i < nmap; i++) { + if (mapp[i].br_startblock == HOLESTARTBLOCK || + mapp[i].br_startblock == DELAYSTARTBLOCK) { + return 0; + } + if (off != mapp[i].br_startoff) { + return 0; + } + off += mapp[i].br_blockcount; + } + return off == bno + count; +} + +/* + * Make a dabuf. + * Used for get_buf, read_buf, read_bufr, and reada_buf. + */ +STATIC int +xfs_da_do_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t *mappedbnop, + xfs_dabuf_t **bpp, + int whichfork, + int caller, + inst_t *ra) +{ + xfs_buf_t *bp = 0; + xfs_buf_t **bplist; + int error=0; + int i; + xfs_bmbt_irec_t map; + xfs_bmbt_irec_t *mapp; + xfs_daddr_t mappedbno; + xfs_mount_t *mp; + int nbplist=0; + int nfsb; + int nmap; + xfs_dabuf_t *rbp; + + mp = dp->i_mount; + if (whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) + nfsb = mp->m_dirblkfsbs; + else + nfsb = 1; + mappedbno = *mappedbnop; + /* + * Caller doesn't have a mapping. -2 means don't complain + * if we land in a hole. + */ + if (mappedbno == -1 || mappedbno == -2) { + /* + * Optimize the one-block case. + */ + if (nfsb == 1) { + xfs_fsblock_t fsb; + + if ((error = + xfs_bmapi_single(trans, dp, whichfork, &fsb, + (xfs_fileoff_t)bno))) { + return error; + } + mapp = ↦ + if (fsb == NULLFSBLOCK) { + nmap = 0; + } else { + map.br_startblock = fsb; + map.br_startoff = (xfs_fileoff_t)bno; + map.br_blockcount = 1; + nmap = 1; + } + } else { + mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP); + nmap = nfsb; + if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, + nfsb, + XFS_BMAPI_METADATA | + XFS_BMAPI_AFLAG(whichfork), + NULL, 0, mapp, &nmap, NULL))) + goto exit0; + } + } else { + map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); + map.br_startoff = (xfs_fileoff_t)bno; + map.br_blockcount = nfsb; + mapp = ↦ + nmap = 1; + } + if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) { + error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); + if (unlikely(error == EFSCORRUPTED)) { + if (xfs_error_level >= XFS_ERRLEVEL_LOW) { + int i; + cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", + (long long)bno); + cmn_err(CE_ALERT, "dir: inode %lld\n", + (long long)dp->i_ino); + for (i = 0; i < nmap; i++) { + cmn_err(CE_ALERT, + "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n", + i, + mapp[i].br_startoff, + mapp[i].br_startblock, + mapp[i].br_blockcount, + mapp[i].br_state); + } + } + XFS_ERROR_REPORT("xfs_da_do_buf(1)", + XFS_ERRLEVEL_LOW, mp); + } + goto exit0; + } + if (caller != 3 && nmap > 1) { + bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP); + nbplist = 0; + } else + bplist = NULL; + /* + * Turn the mapping(s) into buffer(s). + */ + for (i = 0; i < nmap; i++) { + int nmapped; + + mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock); + if (i == 0) + *mappedbnop = mappedbno; + nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount); + switch (caller) { + case 0: + bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, + mappedbno, nmapped, 0); + error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO); + break; + case 1: +#ifndef __KERNEL__ + case 2: +#endif + bp = NULL; + error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp, + mappedbno, nmapped, 0, &bp); + break; +#ifdef __KERNEL__ + case 3: + xfs_baread(mp->m_ddev_targp, mappedbno, nmapped); + error = 0; + bp = NULL; + break; +#endif + } + if (error) { + if (bp) + xfs_trans_brelse(trans, bp); + goto exit1; + } + if (!bp) + continue; + if (caller == 1) { + if (whichfork == XFS_ATTR_FORK) { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE, + XFS_ATTR_BTREE_REF); + } else { + XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE, + XFS_DIR_BTREE_REF); + } + } + if (bplist) { + bplist[nbplist++] = bp; + } + } + /* + * Build a dabuf structure. + */ + if (bplist) { + rbp = xfs_da_buf_make(nbplist, bplist, ra); + } else if (bp) + rbp = xfs_da_buf_make(1, &bp, ra); + else + rbp = NULL; + /* + * For read_buf, check the magic number. + */ + if (caller == 1) { + xfs_dir2_data_t *data; + xfs_dir2_free_t *free; + xfs_da_blkinfo_t *info; + uint magic, magic1; + + info = rbp->data; + data = rbp->data; + free = rbp->data; + magic = INT_GET(info->magic, ARCH_CONVERT); + magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); + if (unlikely( + XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && + (magic != XFS_DIR_LEAF_MAGIC) && + (magic != XFS_ATTR_LEAF_MAGIC) && + (magic != XFS_DIR2_LEAF1_MAGIC) && + (magic != XFS_DIR2_LEAFN_MAGIC) && + (magic1 != XFS_DIR2_BLOCK_MAGIC) && + (magic1 != XFS_DIR2_DATA_MAGIC) && + (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), + mp, XFS_ERRTAG_DA_READ_BUF, + XFS_RANDOM_DA_READ_BUF))) { + xfs_buftrace("DA READ ERROR", rbp->bps[0]); + XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", + XFS_ERRLEVEL_LOW, mp, info); + error = XFS_ERROR(EFSCORRUPTED); + xfs_da_brelse(trans, rbp); + nbplist = 0; + goto exit1; + } + } + if (bplist) { + kmem_free(bplist, sizeof(*bplist) * nmap); + } + if (mapp != &map) { + kmem_free(mapp, sizeof(*mapp) * nfsb); + } + if (bpp) + *bpp = rbp; + return 0; +exit1: + if (bplist) { + for (i = 0; i < nbplist; i++) + xfs_trans_brelse(trans, bplist[i]); + kmem_free(bplist, sizeof(*bplist) * nmap); + } +exit0: + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * nfsb); + if (bpp) + *bpp = NULL; + return error; +} + +/* + * Get a buffer for the dir/attr block. + */ +int +xfs_da_get_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, + int whichfork) +{ + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0, + (inst_t *)__return_address); +} + +/* + * Get a buffer for the dir/attr block, fill in the contents. + */ +int +xfs_da_read_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, + int whichfork) +{ + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1, + (inst_t *)__return_address); +} + +/* + * Readahead the dir/attr block. + */ +xfs_daddr_t +xfs_da_reada_buf( + xfs_trans_t *trans, + xfs_inode_t *dp, + xfs_dablk_t bno, + int whichfork) +{ + xfs_daddr_t rval; + + rval = -1; + if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3, + (inst_t *)__return_address)) + return -1; + else + return rval; +} + +/* + * Calculate the number of bits needed to hold i different values. + */ +uint +xfs_da_log2_roundup(uint i) +{ + uint rval; + + for (rval = 0; rval < NBBY * sizeof(i); rval++) { + if ((1 << rval) >= i) + break; + } + return(rval); +} + +kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ +kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */ + +/* + * Allocate a dir-state structure. + * We don't put them on the stack since they're large. + */ +xfs_da_state_t * +xfs_da_state_alloc(void) +{ + return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP); +} + +/* + * Kill the altpath contents of a da-state structure. + */ +void +xfs_da_state_kill_altpath(xfs_da_state_t *state) +{ + int i; + + for (i = 0; i < state->altpath.active; i++) { + if (state->altpath.blk[i].bp) { + if (state->altpath.blk[i].bp != state->path.blk[i].bp) + xfs_da_buf_done(state->altpath.blk[i].bp); + state->altpath.blk[i].bp = NULL; + } + } + state->altpath.active = 0; +} + +/* + * Free a da-state structure. + */ +void +xfs_da_state_free(xfs_da_state_t *state) +{ + int i; + + xfs_da_state_kill_altpath(state); + for (i = 0; i < state->path.active; i++) { + if (state->path.blk[i].bp) + xfs_da_buf_done(state->path.blk[i].bp); + } + if (state->extravalid && state->extrablk.bp) + xfs_da_buf_done(state->extrablk.bp); +#ifdef DEBUG + memset((char *)state, 0, sizeof(*state)); +#endif /* DEBUG */ + kmem_zone_free(xfs_da_state_zone, state); +} + +#ifdef XFS_DABUF_DEBUG +xfs_dabuf_t *xfs_dabuf_global_list; +lock_t xfs_dabuf_global_lock; +#endif + +/* + * Create a dabuf. + */ +/* ARGSUSED */ +STATIC xfs_dabuf_t * +xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) +{ + xfs_buf_t *bp; + xfs_dabuf_t *dabuf; + int i; + int off; + + if (nbuf == 1) + dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP); + else + dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP); + dabuf->dirty = 0; +#ifdef XFS_DABUF_DEBUG + dabuf->ra = ra; + dabuf->dev = XFS_BUF_TARGET_DEV(bps[0]); + dabuf->blkno = XFS_BUF_ADDR(bps[0]); +#endif + if (nbuf == 1) { + dabuf->nbuf = 1; + bp = bps[0]; + dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); + dabuf->data = XFS_BUF_PTR(bp); + dabuf->bps[0] = bp; + } else { + dabuf->nbuf = nbuf; + for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) { + dabuf->bps[i] = bp = bps[i]; + dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp)); + } + dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); + for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { + bp = bps[i]; + memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp), + XFS_BUF_COUNT(bp)); + } + } +#ifdef XFS_DABUF_DEBUG + { + SPLDECL(s); + xfs_dabuf_t *p; + + s = mutex_spinlock(&xfs_dabuf_global_lock); + for (p = xfs_dabuf_global_list; p; p = p->next) { + ASSERT(p->blkno != dabuf->blkno || + p->dev != dabuf->dev); + } + dabuf->prev = NULL; + if (xfs_dabuf_global_list) + xfs_dabuf_global_list->prev = dabuf; + dabuf->next = xfs_dabuf_global_list; + xfs_dabuf_global_list = dabuf; + mutex_spinunlock(&xfs_dabuf_global_lock, s); + } +#endif + return dabuf; +} + +/* + * Un-dirty a dabuf. + */ +STATIC void +xfs_da_buf_clean(xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + int i; + int off; + + if (dabuf->dirty) { + ASSERT(dabuf->nbuf > 1); + dabuf->dirty = 0; + for (i = off = 0; i < dabuf->nbuf; + i++, off += XFS_BUF_COUNT(bp)) { + bp = dabuf->bps[i]; + memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off, + XFS_BUF_COUNT(bp)); + } + } +} + +/* + * Release a dabuf. + */ +void +xfs_da_buf_done(xfs_dabuf_t *dabuf) +{ + ASSERT(dabuf); + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if (dabuf->dirty) + xfs_da_buf_clean(dabuf); + if (dabuf->nbuf > 1) + kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); +#ifdef XFS_DABUF_DEBUG + { + SPLDECL(s); + + s = mutex_spinlock(&xfs_dabuf_global_lock); + if (dabuf->prev) + dabuf->prev->next = dabuf->next; + else + xfs_dabuf_global_list = dabuf->next; + if (dabuf->next) + dabuf->next->prev = dabuf->prev; + mutex_spinunlock(&xfs_dabuf_global_lock, s); + } + memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); +#endif + if (dabuf->nbuf == 1) + kmem_zone_free(xfs_dabuf_zone, dabuf); + else + kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf)); +} + +/* + * Log transaction from a dabuf. + */ +void +xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last) +{ + xfs_buf_t *bp; + uint f; + int i; + uint l; + int off; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if (dabuf->nbuf == 1) { + ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0])); + xfs_trans_log_buf(tp, dabuf->bps[0], first, last); + return; + } + dabuf->dirty = 1; + ASSERT(first <= last); + for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { + bp = dabuf->bps[i]; + f = off; + l = f + XFS_BUF_COUNT(bp) - 1; + if (f < first) + f = first; + if (l > last) + l = last; + if (f <= l) + xfs_trans_log_buf(tp, bp, f - off, l - off); + /* + * B_DONE is set by xfs_trans_log buf. + * If we don't set it on a new buffer (get not read) + * then if we don't put anything in the buffer it won't + * be set, and at commit it it released into the cache, + * and then a read will fail. + */ + else if (!(XFS_BUF_ISDONE(bp))) + XFS_BUF_DONE(bp); + } + ASSERT(last < off); +} + +/* + * Release dabuf from a transaction. + * Have to free up the dabuf before the buffers are released, + * since the synchronization on the dabuf is really the lock on the buffer. + */ +void +xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + xfs_buf_t **bplist; + int i; + int nbuf; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if ((nbuf = dabuf->nbuf) == 1) { + bplist = &bp; + bp = dabuf->bps[0]; + } else { + bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP); + memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist)); + } + xfs_da_buf_done(dabuf); + for (i = 0; i < nbuf; i++) + xfs_trans_brelse(tp, bplist[i]); + if (bplist != &bp) + kmem_free(bplist, nbuf * sizeof(*bplist)); +} + +/* + * Invalidate dabuf from a transaction. + */ +void +xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf) +{ + xfs_buf_t *bp; + xfs_buf_t **bplist; + int i; + int nbuf; + + ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); + if ((nbuf = dabuf->nbuf) == 1) { + bplist = &bp; + bp = dabuf->bps[0]; + } else { + bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP); + memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist)); + } + xfs_da_buf_done(dabuf); + for (i = 0; i < nbuf; i++) + xfs_trans_binval(tp, bplist[i]); + if (bplist != &bp) + kmem_free(bplist, nbuf * sizeof(*bplist)); +} + +/* + * Get the first daddr from a dabuf. + */ +xfs_daddr_t +xfs_da_blkno(xfs_dabuf_t *dabuf) +{ + ASSERT(dabuf->nbuf); + ASSERT(dabuf->data); + return XFS_BUF_ADDR(dabuf->bps[0]); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_da_btree.h linux.22-ac2/fs/xfs/xfs_da_btree.h --- linux.vanilla/fs/xfs/xfs_da_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_da_btree.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DA_BTREE_H__ +#define __XFS_DA_BTREE_H__ + +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; +struct zone; + +/*======================================================================== + * Directory Structure when greater than XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This structure is common to both leaf nodes and non-leaf nodes in the Btree. + * + * Is is used to manage a doubly linked list of all blocks at the same + * level in the Btree, and to identify which type of block this is. + */ +#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */ +#define XFS_DIR_LEAF_MAGIC 0xfeeb /* magic number: directory leaf blks */ +#define XFS_ATTR_LEAF_MAGIC 0xfbee /* magic number: attribute leaf blks */ +#define XFS_DIR2_LEAF1_MAGIC 0xd2f1 /* magic number: v2 dirlf single blks */ +#define XFS_DIR2_LEAFN_MAGIC 0xd2ff /* magic number: v2 dirlf multi blks */ + +#define XFS_DIRX_LEAF_MAGIC(mp) \ + (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) + +typedef struct xfs_da_blkinfo { + xfs_dablk_t forw; /* previous block in list */ + xfs_dablk_t back; /* following block in list */ + __uint16_t magic; /* validity check on block */ + __uint16_t pad; /* unused */ +} xfs_da_blkinfo_t; + +/* + * This is the structure of the root and intermediate nodes in the Btree. + * The leaf nodes are defined above. + * + * Entries are not packed. + * + * Since we have duplicate keys, use a binary search but always follow + * all match in the block, not just the first match found. + */ +#define XFS_DA_NODE_MAXDEPTH 5 /* max depth of Btree */ + +typedef struct xfs_da_intnode { + struct xfs_da_node_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active entries */ + __uint16_t level; /* level above leaves (leaf == 0) */ + } hdr; + struct xfs_da_node_entry { + xfs_dahash_t hashval; /* hash value for this descendant */ + xfs_dablk_t before; /* Btree block before this key */ + } btree[1]; /* variable sized array of keys */ +} xfs_da_intnode_t; +typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; +typedef struct xfs_da_node_entry xfs_da_node_entry_t; + +#define XFS_DA_MAXHASH ((xfs_dahash_t)-1) /* largest valid hash value */ + +/* + * Macros used by directory code to interface to the filesystem. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBSIZE) +int xfs_lbsize(struct xfs_mount *mp); +#define XFS_LBSIZE(mp) xfs_lbsize(mp) +#else +#define XFS_LBSIZE(mp) ((mp)->m_sb.sb_blocksize) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LBLOG) +int xfs_lblog(struct xfs_mount *mp); +#define XFS_LBLOG(mp) xfs_lblog(mp) +#else +#define XFS_LBLOG(mp) ((mp)->m_sb.sb_blocklog) +#endif + +/* + * Macros used by directory code to interface to the kernel + */ + +/* + * Macros used to manipulate directory off_t's + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_BNOENTRY) +__uint32_t xfs_da_make_bnoentry(struct xfs_mount *mp, xfs_dablk_t bno, + int entry); +#define XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \ + xfs_da_make_bnoentry(mp,bno,entry) +#else +#define XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \ + (((bno) << (mp)->m_dircook_elog) | (entry)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_MAKE_COOKIE) +xfs_off_t xfs_da_make_cookie(struct xfs_mount *mp, xfs_dablk_t bno, int entry, + xfs_dahash_t hash); +#define XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \ + xfs_da_make_cookie(mp,bno,entry,hash) +#else +#define XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \ + (((xfs_off_t)XFS_DA_MAKE_BNOENTRY(mp, bno, entry) << 32) | (hash)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_HASH) +xfs_dahash_t xfs_da_cookie_hash(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_HASH(mp,cookie) xfs_da_cookie_hash(mp,cookie) +#else +#define XFS_DA_COOKIE_HASH(mp,cookie) ((xfs_dahash_t)(cookie)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_BNO) +xfs_dablk_t xfs_da_cookie_bno(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_BNO(mp,cookie) xfs_da_cookie_bno(mp,cookie) +#else +#define XFS_DA_COOKIE_BNO(mp,cookie) \ + (((xfs_off_t)(cookie) >> 31) == -1LL ? \ + (xfs_dablk_t)0 : \ + (xfs_dablk_t)((xfs_off_t)(cookie) >> ((mp)->m_dircook_elog + 32))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DA_COOKIE_ENTRY) +int xfs_da_cookie_entry(struct xfs_mount *mp, xfs_off_t cookie); +#define XFS_DA_COOKIE_ENTRY(mp,cookie) xfs_da_cookie_entry(mp,cookie) +#else +#define XFS_DA_COOKIE_ENTRY(mp,cookie) \ + (((xfs_off_t)(cookie) >> 31) == -1LL ? \ + (xfs_dablk_t)0 : \ + (xfs_dablk_t)(((xfs_off_t)(cookie) >> 32) & \ + ((1 << (mp)->m_dircook_elog) - 1))) +#endif + + +/*======================================================================== + * Btree searching and modification structure definitions. + *========================================================================*/ + +/* + * Structure to ease passing around component names. + */ +typedef struct xfs_da_args { + uchar_t *name; /* string (maybe not NULL terminated) */ + int namelen; /* length of string (maybe no NULL) */ + uchar_t *value; /* set of bytes (maybe contain NULLs) */ + int valuelen; /* length of value */ + int flags; /* argument flags (eg: ATTR_NOCREATE) */ + xfs_dahash_t hashval; /* hash value of name */ + xfs_ino_t inumber; /* input/output inode number */ + struct xfs_inode *dp; /* directory inode to manipulate */ + xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */ + struct xfs_bmap_free *flist; /* ptr to freelist for bmap_finish */ + struct xfs_trans *trans; /* current trans (changes over time) */ + xfs_extlen_t total; /* total blocks needed, for 1st bmap */ + int whichfork; /* data or attribute fork */ + xfs_dablk_t blkno; /* blkno of attr leaf of interest */ + int index; /* index of attr of interest in blk */ + xfs_dablk_t rmtblkno; /* remote attr value starting blkno */ + int rmtblkcnt; /* remote attr value block count */ + int rename; /* T/F: this is an atomic rename op */ + xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */ + int index2; /* index of 2nd attr in blk */ + xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */ + int rmtblkcnt2; /* remote attr value block count */ + int justcheck; /* check for ok with no space */ + int addname; /* T/F: this is an add operation */ + int oknoent; /* T/F: ok to return ENOENT, else die */ +} xfs_da_args_t; + +/* + * Structure to describe buffer(s) for a block. + * This is needed in the directory version 2 format case, when + * multiple non-contiguous fsblocks might be needed to cover one + * logical directory block. + * If the buffer count is 1 then the data pointer points to the + * same place as the b_addr field for the buffer, else to kmem_alloced memory. + */ +typedef struct xfs_dabuf { + int nbuf; /* number of buffer pointers present */ + short dirty; /* data needs to be copied back */ + short bbcount; /* how large is data in bbs */ + void *data; /* pointer for buffers' data */ +#ifdef XFS_DABUF_DEBUG + inst_t *ra; /* return address of caller to make */ + struct xfs_dabuf *next; /* next in global chain */ + struct xfs_dabuf *prev; /* previous in global chain */ + dev_t dev; /* device for buffer */ + xfs_daddr_t blkno; /* daddr first in bps[0] */ +#endif + struct xfs_buf *bps[1]; /* actually nbuf of these */ +} xfs_dabuf_t; +#define XFS_DA_BUF_SIZE(n) \ + (sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1)) + +#ifdef XFS_DABUF_DEBUG +extern xfs_dabuf_t *xfs_dabuf_global_list; +#endif + +/* + * Storage for holding state during Btree searches and split/join ops. + * + * Only need space for 5 intermediate nodes. With a minimum of 62-way + * fanout to the Btree, we can support over 900 million directory blocks, + * which is slightly more than enough. + */ +typedef struct xfs_da_state_blk { + xfs_dabuf_t *bp; /* buffer containing block */ + xfs_dablk_t blkno; /* filesystem blkno of buffer */ + xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */ + int index; /* relevant index into block */ + xfs_dahash_t hashval; /* last hash value in block */ + int magic; /* blk's magic number, ie: blk type */ +} xfs_da_state_blk_t; + +typedef struct xfs_da_state_path { + int active; /* number of active levels */ + xfs_da_state_blk_t blk[XFS_DA_NODE_MAXDEPTH]; +} xfs_da_state_path_t; + +typedef struct xfs_da_state { + xfs_da_args_t *args; /* filename arguments */ + struct xfs_mount *mp; /* filesystem mount point */ + unsigned int blocksize; /* logical block size */ + unsigned int node_ents; /* how many entries in danode */ + xfs_da_state_path_t path; /* search/split paths */ + xfs_da_state_path_t altpath; /* alternate path for join */ + unsigned int inleaf : 1; /* insert into 1->lf, 0->splf */ + unsigned int extravalid : 1; /* T/F: extrablk is in use */ + unsigned int extraafter : 1; /* T/F: extrablk is after new */ + xfs_da_state_blk_t extrablk; /* for double-splits on leafs */ + /* for dirv2 extrablk is data */ +} xfs_da_state_t; + +/* + * Utility macros to aid in logging changed structure fields. + */ +#define XFS_DA_LOGOFF(BASE, ADDR) ((char *)(ADDR) - (char *)(BASE)) +#define XFS_DA_LOGRANGE(BASE, ADDR, SIZE) \ + (uint)(XFS_DA_LOGOFF(BASE, ADDR)), \ + (uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1) + + +#ifdef __KERNEL__ +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, + xfs_dabuf_t **bpp, int whichfork); +int xfs_da_split(xfs_da_state_t *state); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_da_join(xfs_da_state_t *state); +void xfs_da_fixhashpath(xfs_da_state_t *state, + xfs_da_state_path_t *path_to_to_fix); + +/* + * Routines used for finding things in the Btree. + */ +int xfs_da_node_lookup_int(xfs_da_state_t *state, int *result); +int xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, + int forward, int release, int *result); +/* + * Utility routines. + */ +int xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk); +int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, + xfs_da_state_blk_t *new_blk); + +/* + * Utility routines. + */ +int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno); +int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, xfs_daddr_t mappedbno, + xfs_dabuf_t **bp, int whichfork); +int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, xfs_daddr_t mappedbno, + xfs_dabuf_t **bpp, int whichfork); +xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp, + xfs_dablk_t bno, int whichfork); +int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, + xfs_dabuf_t *dead_buf); + +uint xfs_da_hashname(uchar_t *name_string, int name_length); +uint xfs_da_log2_roundup(uint i); +xfs_da_state_t *xfs_da_state_alloc(void); +void xfs_da_state_free(xfs_da_state_t *state); +void xfs_da_state_kill_altpath(xfs_da_state_t *state); + +void xfs_da_buf_done(xfs_dabuf_t *dabuf); +void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first, + uint last); +void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf); +void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf); +xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); + +extern struct kmem_zone *xfs_da_state_zone; +#endif /* __KERNEL__ */ + +#endif /* __XFS_DA_BTREE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dfrag.c linux.22-ac2/fs/xfs/xfs_dfrag.c --- linux.vanilla/fs/xfs/xfs_dfrag.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dfrag.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,385 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_dfrag.h" +#include "xfs_error.h" +#include "xfs_mac.h" +#include "xfs_rw.h" + +/* + * Syssgi interface for swapext + */ +int +xfs_swapext( + xfs_swapext_t *sxp) +{ + xfs_swapext_t sx; + xfs_inode_t *ip=NULL, *tip=NULL, *ips[2]; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_bstat_t *sbp; + struct file *fp = NULL, *tfp = NULL; + vnode_t *vp, *tvp; + bhv_desc_t *bdp, *tbdp; + vn_bhv_head_t *bhp, *tbhp; + uint lock_flags=0; + int ilf_fields, tilf_fields; + int error = 0; + xfs_ifork_t tempif, *ifp, *tifp; + __uint64_t tmp; + int aforkblks = 0; + int taforkblks = 0; + int locked = 0; + + if (copy_from_user(&sx, sxp, sizeof(sx))) + return XFS_ERROR(EFAULT); + + /* Pull information for the target fd */ + if (((fp = fget((int)sx.sx_fdtarget)) == NULL) || + ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + bhp = VN_BHV_HEAD(vp); + bdp = vn_bhv_lookup(bhp, &xfs_vnodeops); + if (bdp == NULL) { + error = XFS_ERROR(EBADF); + goto error0; + } else { + ip = XFS_BHVTOI(bdp); + } + + if (((tfp = fget((int)sx.sx_fdtmp)) == NULL) || + ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + tbhp = VN_BHV_HEAD(tvp); + tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops); + if (tbdp == NULL) { + error = XFS_ERROR(EBADF); + goto error0; + } else { + tip = XFS_BHVTOI(tbdp); + } + + if (ip->i_ino == tip->i_ino) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + mp = ip->i_mount; + + sbp = &sx.sx_stat; + + if (XFS_FORCED_SHUTDOWN(mp)) { + error = XFS_ERROR(EIO); + goto error0; + } + + locked = 1; + + /* Lock in i_ino order */ + if (ip->i_ino < tip->i_ino) { + ips[0] = ip; + ips[1] = tip; + } else { + ips[0] = tip; + ips[1] = ip; + } + lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; + xfs_lock_inodes(ips, 2, 0, lock_flags); + + /* Check permissions */ + if ((error = _MAC_XFS_IACCESS(ip, MACWRITE, NULL))) { + goto error0; + } + if ((error = _MAC_XFS_IACCESS(tip, MACWRITE, NULL))) { + goto error0; + } + if ((current->fsuid != ip->i_d.di_uid) && + (error = xfs_iaccess(ip, IWRITE, NULL)) && + !capable_cred(NULL, CAP_FOWNER)) { + goto error0; + } + if ((current->fsuid != tip->i_d.di_uid) && + (error = xfs_iaccess(tip, IWRITE, NULL)) && + !capable_cred(NULL, CAP_FOWNER)) { + goto error0; + } + + /* Verify both files are either real-time or non-realtime */ + if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != + (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* Should never get a local format */ + if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || + tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + if (VN_CACHED(tvp) != 0) + xfs_inval_cached_pages(XFS_ITOV(tip), &(tip->i_iocore), + (loff_t)0, 0, 0); + + /* Verify O_DIRECT for ftmp */ + if (VN_CACHED(tvp) != 0) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* Verify all data are being swapped */ + if (sx.sx_offset != 0 || + sx.sx_length != ip->i_d.di_size || + sx.sx_length != tip->i_d.di_size) { + error = XFS_ERROR(EFAULT); + goto error0; + } + + /* + * If the target has extended attributes, the tmp file + * must also in order to ensure the correct data fork + * format. + */ + if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) { + error = XFS_ERROR(EINVAL); + goto error0; + } + + /* + * Compare the current change & modify times with that + * passed in. If they differ, we abort this swap. + * This is the mechanism used to ensure the calling + * process that the file was not changed out from + * under it. + */ + if ((sbp->bs_ctime.tv_sec != ip->i_d.di_ctime.t_sec) || + (sbp->bs_ctime.tv_nsec != ip->i_d.di_ctime.t_nsec) || + (sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) || + (sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) { + error = XFS_ERROR(EBUSY); + goto error0; + } + + /* We need to fail if the file is memory mapped. Once we have tossed + * all existing pages, the page fault will have no option + * but to go to the filesystem for pages. By making the page fault call + * VOP_READ (or write in the case of autogrow) they block on the iolock + * until we have switched the extents. + */ + if (VN_MAPPED(vp)) { + error = XFS_ERROR(EBUSY); + goto error0; + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(tip, XFS_ILOCK_EXCL); + + /* + * There is a race condition here since we gave up the + * ilock. However, the data fork will not change since + * we have the iolock (locked for truncation too) so we + * are safe. We don't really care if non-io related + * fields change. + */ + + VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); + if ((error = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), 0, + 0, 0))) { + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(tip, XFS_IOLOCK_EXCL); + xfs_trans_cancel(tp, 0); + return error; + } + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + + /* + * Count the number of extended attribute blocks + */ + if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && + (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); + if (error) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + xfs_trans_cancel(tp, 0); + return error; + } + } + if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && + (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { + error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, + &taforkblks); + if (error) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + xfs_trans_cancel(tp, 0); + return error; + } + } + + /* + * Swap the data forks of the inodes + */ + ifp = &ip->i_df; + tifp = &tip->i_df; + tempif = *ifp; /* struct copy */ + *ifp = *tifp; /* struct copy */ + *tifp = tempif; /* struct copy */ + + /* + * Fix the on-disk inode values + */ + tmp = (__uint64_t)ip->i_d.di_nblocks; + ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; + tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; + + tmp = (__uint64_t) ip->i_d.di_nextents; + ip->i_d.di_nextents = tip->i_d.di_nextents; + tip->i_d.di_nextents = tmp; + + tmp = (__uint64_t) ip->i_d.di_format; + ip->i_d.di_format = tip->i_d.di_format; + tip->i_d.di_format = tmp; + + ilf_fields = XFS_ILOG_CORE; + + switch(ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + /* If the extents fit in the inode, fix the + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ + if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { + ifp->if_u1.if_extents = + ifp->if_u2.if_inline_ext; + } + ilf_fields |= XFS_ILOG_DEXT; + break; + case XFS_DINODE_FMT_BTREE: + ilf_fields |= XFS_ILOG_DBROOT; + break; + } + + tilf_fields = XFS_ILOG_CORE; + + switch(tip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + /* If the extents fit in the inode, fix the + * pointer. Otherwise it's already NULL or + * pointing to the extent. + */ + if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) { + tifp->if_u1.if_extents = + tifp->if_u2.if_inline_ext; + } + tilf_fields |= XFS_ILOG_DEXT; + break; + case XFS_DINODE_FMT_BTREE: + tilf_fields |= XFS_ILOG_DBROOT; + break; + } + + /* + * Increment vnode ref counts since xfs_trans_commit & + * xfs_trans_cancel will both unlock the inodes and + * decrement the associated ref counts. + */ + VN_HOLD(vp); + VN_HOLD(tvp); + + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ijoin(tp, tip, lock_flags); + + xfs_trans_log_inode(tp, ip, ilf_fields); + xfs_trans_log_inode(tp, tip, tilf_fields); + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT, NULL); + + fput(fp); + fput(tfp); + + return error; + + error0: + if (locked) { + xfs_iunlock(ip, lock_flags); + xfs_iunlock(tip, lock_flags); + } + + if (fp != NULL) fput(fp); + if (tfp != NULL) fput(tfp); + + return error; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dfrag.h linux.22-ac2/fs/xfs/xfs_dfrag.h --- linux.vanilla/fs/xfs/xfs_dfrag.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dfrag.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DFRAG_H__ +#define __XFS_DFRAG_H__ + +/* + * Structure passed to xfs_swapext + */ + +typedef struct xfs_swapext +{ + __int64_t sx_version; /* version */ + __int64_t sx_fdtarget; /* fd of target file */ + __int64_t sx_fdtmp; /* fd of tmp file */ + xfs_off_t sx_offset; /* offset into file */ + xfs_off_t sx_length; /* leng from offset */ + char sx_pad[16]; /* pad space, unused */ + xfs_bstat_t sx_stat; /* stat of target b4 copy */ +} xfs_swapext_t; + +/* + * Version flag + */ +#define XFS_SX_VERSION 0 + +#ifdef __KERNEL__ +/* + * Prototypes for visible xfs_dfrag.c routines. + */ + +/* + * Syscall interface for xfs_swapext + */ +int xfs_swapext(struct xfs_swapext *sx); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_DFRAG_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dinode.h linux.22-ac2/fs/xfs/xfs_dinode.h --- linux.vanilla/fs/xfs/xfs_dinode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dinode.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DINODE_H__ +#define __XFS_DINODE_H__ + +struct xfs_buf; +struct xfs_mount; + +#define XFS_DINODE_VERSION_1 1 +#define XFS_DINODE_VERSION_2 2 +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DINODE_GOOD_VERSION) +int xfs_dinode_good_version(int v); +#define XFS_DINODE_GOOD_VERSION(v) xfs_dinode_good_version(v) +#else +#define XFS_DINODE_GOOD_VERSION(v) (((v) == XFS_DINODE_VERSION_1) || \ + ((v) == XFS_DINODE_VERSION_2)) +#endif +#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ + +/* + * Disk inode structure. + * This is just the header; the inode is expanded to fill a variable size + * with the last field expanding. It is split into the core and "other" + * because we only need the core part in the in-core inode. + */ +typedef struct xfs_timestamp { + __int32_t t_sec; /* timestamp seconds */ + __int32_t t_nsec; /* timestamp nanoseconds */ +} xfs_timestamp_t; + +/* + * Note: Coordinate changes to this structure with the XFS_DI_* #defines + * below and the offsets table in xfs_ialloc_log_di(). + */ +typedef struct xfs_dinode_core +{ + __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */ + __uint16_t di_mode; /* mode and type of file */ + __int8_t di_version; /* inode version */ + __int8_t di_format; /* format of di_c data */ + __uint16_t di_onlink; /* old number of links to file */ + __uint32_t di_uid; /* owner's user id */ + __uint32_t di_gid; /* owner's group id */ + __uint32_t di_nlink; /* number of links to file */ + __uint16_t di_projid; /* owner's project id */ + __uint8_t di_pad[10]; /* unused, zeroed space */ + xfs_timestamp_t di_atime; /* time last accessed */ + xfs_timestamp_t di_mtime; /* time last modified */ + xfs_timestamp_t di_ctime; /* time created/inode modified */ + xfs_fsize_t di_size; /* number of bytes in file */ + xfs_drfsbno_t di_nblocks; /* # of direct & btree blocks used */ + xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ + xfs_extnum_t di_nextents; /* number of extents in data fork */ + xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ + __uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */ + __int8_t di_aformat; /* format of attr fork's data */ + __uint32_t di_dmevmask; /* DMIG event mask */ + __uint16_t di_dmstate; /* DMIG state info */ + __uint16_t di_flags; /* random flags, XFS_DIFLAG_... */ + __uint32_t di_gen; /* generation number */ +} xfs_dinode_core_t; + +typedef struct xfs_dinode +{ + xfs_dinode_core_t di_core; + /* + * In adding anything between the core and the union, be + * sure to update the macros like XFS_LITINO below and + * XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h. + */ + xfs_agino_t di_next_unlinked;/* agi unlinked list ptr */ + union { + xfs_bmdr_block_t di_bmbt; /* btree root block */ + xfs_bmbt_rec_32_t di_bmx[1]; /* extent list */ + xfs_dir_shortform_t di_dirsf; /* shortform directory */ + xfs_dir2_sf_t di_dir2sf; /* shortform directory v2 */ + char di_c[1]; /* local contents */ + xfs_dev_t di_dev; /* device for IFCHR/IFBLK */ + uuid_t di_muuid; /* mount point value */ + char di_symlink[1]; /* local symbolic link */ + } di_u; + union { + xfs_bmdr_block_t di_abmbt; /* btree root block */ + xfs_bmbt_rec_32_t di_abmx[1]; /* extent list */ + xfs_attr_shortform_t di_attrsf; /* shortform attribute list */ + } di_a; +} xfs_dinode_t; + +/* + * The 32 bit link count in the inode theoretically maxes out at UINT_MAX. + * Since the pathconf interface is signed, we use 2^31 - 1 instead. + * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX. + */ +#define XFS_MAXLINK ((1U << 31) - 1U) +#define XFS_MAXLINK_1 65535U + +/* + * Bit names for logging disk inodes only + */ +#define XFS_DI_MAGIC 0x0000001 +#define XFS_DI_MODE 0x0000002 +#define XFS_DI_VERSION 0x0000004 +#define XFS_DI_FORMAT 0x0000008 +#define XFS_DI_ONLINK 0x0000010 +#define XFS_DI_UID 0x0000020 +#define XFS_DI_GID 0x0000040 +#define XFS_DI_NLINK 0x0000080 +#define XFS_DI_PROJID 0x0000100 +#define XFS_DI_PAD 0x0000200 +#define XFS_DI_ATIME 0x0000400 +#define XFS_DI_MTIME 0x0000800 +#define XFS_DI_CTIME 0x0001000 +#define XFS_DI_SIZE 0x0002000 +#define XFS_DI_NBLOCKS 0x0004000 +#define XFS_DI_EXTSIZE 0x0008000 +#define XFS_DI_NEXTENTS 0x0010000 +#define XFS_DI_NAEXTENTS 0x0020000 +#define XFS_DI_FORKOFF 0x0040000 +#define XFS_DI_AFORMAT 0x0080000 +#define XFS_DI_DMEVMASK 0x0100000 +#define XFS_DI_DMSTATE 0x0200000 +#define XFS_DI_FLAGS 0x0400000 +#define XFS_DI_GEN 0x0800000 +#define XFS_DI_NEXT_UNLINKED 0x1000000 +#define XFS_DI_U 0x2000000 +#define XFS_DI_A 0x4000000 +#define XFS_DI_NUM_BITS 27 +#define XFS_DI_ALL_BITS ((1 << XFS_DI_NUM_BITS) - 1) +#define XFS_DI_CORE_BITS (XFS_DI_ALL_BITS & ~(XFS_DI_U|XFS_DI_A)) + +/* + * Values for di_format + */ +typedef enum xfs_dinode_fmt +{ + XFS_DINODE_FMT_DEV, /* CHR, BLK: di_dev */ + XFS_DINODE_FMT_LOCAL, /* DIR, REG: di_c */ + /* LNK: di_symlink */ + XFS_DINODE_FMT_EXTENTS, /* DIR, REG, LNK: di_bmx */ + XFS_DINODE_FMT_BTREE, /* DIR, REG, LNK: di_bmbt */ + XFS_DINODE_FMT_UUID /* MNT: di_uuid */ +} xfs_dinode_fmt_t; + +/* + * Inode minimum and maximum sizes. + */ +#define XFS_DINODE_MIN_LOG 8 +#define XFS_DINODE_MAX_LOG 11 +#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG) +#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG) + +/* + * Inode size for given fs. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LITINO) +int xfs_litino(struct xfs_mount *mp); +#define XFS_LITINO(mp) xfs_litino(mp) +#else +#define XFS_LITINO(mp) ((mp)->m_litino) +#endif +#define XFS_BROOT_SIZE_ADJ \ + (sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t)) + +/* + * Fork identifiers. Here so utilities can use them without including + * xfs_inode.h. + */ +#define XFS_DATA_FORK 0 +#define XFS_ATTR_FORK 1 + +/* + * Inode data & attribute fork sizes, per inode. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_Q) +int xfs_cfork_q_arch(xfs_dinode_core_t *dcp, xfs_arch_t arch); +int xfs_cfork_q(xfs_dinode_core_t *dcp); +#define XFS_CFORK_Q_ARCH(dcp,arch) xfs_cfork_q_arch(dcp,arch) +#define XFS_CFORK_Q(dcp) xfs_cfork_q(dcp) +#else +#define XFS_CFORK_Q_ARCH(dcp,arch) (!INT_ISZERO((dcp)->di_forkoff, arch)) +#define XFS_CFORK_Q(dcp) ((dcp)->di_forkoff != 0) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_BOFF) +int xfs_cfork_boff_arch(xfs_dinode_core_t *dcp, xfs_arch_t arch); +int xfs_cfork_boff(xfs_dinode_core_t *dcp); +#define XFS_CFORK_BOFF_ARCH(dcp,arch) xfs_cfork_boff_arch(dcp,arch) +#define XFS_CFORK_BOFF(dcp) xfs_cfork_boff(dcp) +#else +#define XFS_CFORK_BOFF_ARCH(dcp,arch) ((int)(INT_GET((dcp)->di_forkoff, arch) << 3)) +#define XFS_CFORK_BOFF(dcp) ((int)((dcp)->di_forkoff << 3)) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_DSIZE) +int xfs_cfork_dsize_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_cfork_dsize(xfs_dinode_core_t *dcp, struct xfs_mount *mp); +#define XFS_CFORK_DSIZE_ARCH(dcp,mp,arch) xfs_cfork_dsize_arch(dcp,mp,arch) +#define XFS_CFORK_DSIZE(dcp,mp) xfs_cfork_dsize(dcp,mp) +#else +#define XFS_CFORK_DSIZE_ARCH(dcp,mp,arch) \ + (XFS_CFORK_Q_ARCH(dcp, arch) ? XFS_CFORK_BOFF_ARCH(dcp, arch) : XFS_LITINO(mp)) +#define XFS_CFORK_DSIZE(dcp,mp) \ + (XFS_CFORK_Q(dcp) ? XFS_CFORK_BOFF(dcp) : XFS_LITINO(mp)) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_ASIZE) +int xfs_cfork_asize_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_cfork_asize(xfs_dinode_core_t *dcp, struct xfs_mount *mp); +#define XFS_CFORK_ASIZE_ARCH(dcp,mp,arch) xfs_cfork_asize_arch(dcp,mp,arch) +#define XFS_CFORK_ASIZE(dcp,mp) xfs_cfork_asize(dcp,mp) +#else +#define XFS_CFORK_ASIZE_ARCH(dcp,mp,arch) \ + (XFS_CFORK_Q_ARCH(dcp, arch) ? XFS_LITINO(mp) - XFS_CFORK_BOFF_ARCH(dcp, arch) : 0) +#define XFS_CFORK_ASIZE(dcp,mp) \ + (XFS_CFORK_Q(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF(dcp) : 0) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_SIZE) +int xfs_cfork_size_arch(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w, xfs_arch_t arch); +int xfs_cfork_size(xfs_dinode_core_t *dcp, struct xfs_mount *mp, int w); +#define XFS_CFORK_SIZE_ARCH(dcp,mp,w,arch) xfs_cfork_size_arch(dcp,mp,w,arch) +#define XFS_CFORK_SIZE(dcp,mp,w) xfs_cfork_size(dcp,mp,w) +#else +#define XFS_CFORK_SIZE_ARCH(dcp,mp,w,arch) \ + ((w) == XFS_DATA_FORK ? \ + XFS_CFORK_DSIZE_ARCH(dcp, mp, arch) : XFS_CFORK_ASIZE_ARCH(dcp, mp, arch)) +#define XFS_CFORK_SIZE(dcp,mp,w) \ + ((w) == XFS_DATA_FORK ? \ + XFS_CFORK_DSIZE(dcp, mp) : XFS_CFORK_ASIZE(dcp, mp)) + +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DSIZE) +int xfs_dfork_dsize_arch(xfs_dinode_t *dip, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_dfork_dsize(xfs_dinode_t *dip, struct xfs_mount *mp); +#define XFS_DFORK_DSIZE_ARCH(dip,mp,arch) xfs_dfork_dsize_arch(dip,mp,arch) +#define XFS_DFORK_DSIZE(dip,mp) xfs_dfork_dsize(dip,mp) +#else +#define XFS_DFORK_DSIZE_ARCH(dip,mp,arch) XFS_CFORK_DSIZE_ARCH(&(dip)->di_core, mp, arch) +#define XFS_DFORK_DSIZE(dip,mp) XFS_DFORK_DSIZE_ARCH(dip,mp,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_ASIZE) +int xfs_dfork_asize_arch(xfs_dinode_t *dip, struct xfs_mount *mp, xfs_arch_t arch); +int xfs_dfork_asize(xfs_dinode_t *dip, struct xfs_mount *mp); +#define XFS_DFORK_ASIZE_ARCH(dip,mp,arch) xfs_dfork_asize_arch(dip,mp,arch) +#define XFS_DFORK_ASIZE(dip,mp) xfs_dfork_asize(dip,mp) +#else +#define XFS_DFORK_ASIZE_ARCH(dip,mp,arch) XFS_CFORK_ASIZE_ARCH(&(dip)->di_core, mp, arch) +#define XFS_DFORK_ASIZE(dip,mp) XFS_DFORK_ASIZE_ARCH(dip,mp,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_SIZE) +int xfs_dfork_size_arch(xfs_dinode_t *dip, struct xfs_mount *mp, int w, xfs_arch_t arch); +int xfs_dfork_size(xfs_dinode_t *dip, struct xfs_mount *mp, int w); +#define XFS_DFORK_SIZE_ARCH(dip,mp,w,arch) xfs_dfork_size_arch(dip,mp,w,arch) +#define XFS_DFORK_SIZE(dip,mp,w) xfs_dfork_size(dip,mp,w) +#else +#define XFS_DFORK_SIZE_ARCH(dip,mp,w,arch) XFS_CFORK_SIZE_ARCH(&(dip)->di_core, mp, w, arch) +#define XFS_DFORK_SIZE(dip,mp,w) XFS_DFORK_SIZE_ARCH(dip,mp,w,ARCH_NOCONVERT) + +#endif + +/* + * Macros for accessing per-fork disk inode information. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_Q) +int xfs_dfork_q_arch(xfs_dinode_t *dip, xfs_arch_t arch); +int xfs_dfork_q(xfs_dinode_t *dip); +#define XFS_DFORK_Q_ARCH(dip,arch) xfs_dfork_q_arch(dip,arch) +#define XFS_DFORK_Q(dip) xfs_dfork_q(dip) +#else +#define XFS_DFORK_Q_ARCH(dip,arch) XFS_CFORK_Q_ARCH(&(dip)->di_core, arch) +#define XFS_DFORK_Q(dip) XFS_DFORK_Q_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_BOFF) +int xfs_dfork_boff_arch(xfs_dinode_t *dip, xfs_arch_t arch); +int xfs_dfork_boff(xfs_dinode_t *dip); +#define XFS_DFORK_BOFF_ARCH(dip,arch) xfs_dfork_boff_arch(dip,arch) +#define XFS_DFORK_BOFF(dip) xfs_dfork_boff(dip) +#else +#define XFS_DFORK_BOFF_ARCH(dip,arch) XFS_CFORK_BOFF_ARCH(&(dip)->di_core, arch) +#define XFS_DFORK_BOFF(dip) XFS_DFORK_BOFF_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_DPTR) +char *xfs_dfork_dptr_arch(xfs_dinode_t *dip, xfs_arch_t arch); +char *xfs_dfork_dptr(xfs_dinode_t *dip); +#define XFS_DFORK_DPTR_ARCH(dip,arch) xfs_dfork_dptr_arch(dip,arch) +#define XFS_DFORK_DPTR(dip) xfs_dfork_dptr(dip) +#else +#define XFS_DFORK_DPTR_ARCH(dip,arch) ((dip)->di_u.di_c) +#define XFS_DFORK_DPTR(dip) XFS_DFORK_DPTR_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_APTR) +char *xfs_dfork_aptr_arch(xfs_dinode_t *dip, xfs_arch_t arch); +char *xfs_dfork_aptr(xfs_dinode_t *dip); +#define XFS_DFORK_APTR_ARCH(dip,arch) xfs_dfork_aptr_arch(dip,arch) +#define XFS_DFORK_APTR(dip) xfs_dfork_aptr(dip) +#else +#define XFS_DFORK_APTR_ARCH(dip,arch) ((dip)->di_u.di_c + XFS_DFORK_BOFF_ARCH(dip, arch)) +#define XFS_DFORK_APTR(dip) XFS_DFORK_APTR_ARCH(dip,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_PTR) +char *xfs_dfork_ptr_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +char *xfs_dfork_ptr(xfs_dinode_t *dip, int w); +#define XFS_DFORK_PTR_ARCH(dip,w,arch) xfs_dfork_ptr_arch(dip,w,arch) +#define XFS_DFORK_PTR(dip,w) xfs_dfork_ptr(dip,w) +#else +#define XFS_DFORK_PTR_ARCH(dip,w,arch) \ + ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR_ARCH(dip, arch) : XFS_DFORK_APTR_ARCH(dip, arch)) +#define XFS_DFORK_PTR(dip,w) XFS_DFORK_PTR_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FORMAT) +int xfs_cfork_format_arch(xfs_dinode_core_t *dcp, int w, xfs_arch_t arch); +int xfs_cfork_format(xfs_dinode_core_t *dcp, int w); +#define XFS_CFORK_FORMAT_ARCH(dcp,w,arch) xfs_cfork_format_arch(dcp,w,arch) +#define XFS_CFORK_FORMAT(dcp,w) xfs_cfork_format(dcp,w) +#else +#define XFS_CFORK_FORMAT_ARCH(dcp,w,arch) \ + ((w) == XFS_DATA_FORK ? INT_GET((dcp)->di_format, arch) : INT_GET((dcp)->di_aformat, arch)) +#define XFS_CFORK_FORMAT(dcp,w) XFS_CFORK_FORMAT_ARCH(dcp,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_FMT_SET) +void xfs_cfork_fmt_set_arch(xfs_dinode_core_t *dcp, int w, int n, xfs_arch_t arch); +void xfs_cfork_fmt_set(xfs_dinode_core_t *dcp, int w, int n); +#define XFS_CFORK_FMT_SET_ARCH(dcp,w,n,arch) xfs_cfork_fmt_set_arch(dcp,w,n,arch) +#define XFS_CFORK_FMT_SET(dcp,w,n) xfs_cfork_fmt_set(dcp,w,n) +#else +#define XFS_CFORK_FMT_SET_ARCH(dcp,w,n,arch) \ + ((w) == XFS_DATA_FORK ? \ + (INT_SET((dcp)->di_format, arch, (n))) : \ + (INT_SET((dcp)->di_aformat, arch, (n)))) +#define XFS_CFORK_FMT_SET(dcp,w,n) XFS_CFORK_FMT_SET_ARCH(dcp,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXTENTS) +int xfs_cfork_nextents_arch(xfs_dinode_core_t *dcp, int w, xfs_arch_t arch); +int xfs_cfork_nextents(xfs_dinode_core_t *dcp, int w); +#define XFS_CFORK_NEXTENTS_ARCH(dcp,w,arch) xfs_cfork_nextents_arch(dcp,w,arch) +#define XFS_CFORK_NEXTENTS(dcp,w) xfs_cfork_nextents(dcp,w) +#else +#define XFS_CFORK_NEXTENTS_ARCH(dcp,w,arch) \ + ((w) == XFS_DATA_FORK ? INT_GET((dcp)->di_nextents, arch) : INT_GET((dcp)->di_anextents, arch)) +#define XFS_CFORK_NEXTENTS(dcp,w) XFS_CFORK_NEXTENTS_ARCH(dcp,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_CFORK_NEXT_SET) +void xfs_cfork_next_set_arch(xfs_dinode_core_t *dcp, int w, int n, xfs_arch_t arch); +void xfs_cfork_next_set(xfs_dinode_core_t *dcp, int w, int n); +#define XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,arch) xfs_cfork_next_set_arch(dcp,w,n,arch) +#define XFS_CFORK_NEXT_SET(dcp,w,n) xfs_cfork_next_set(dcp,w,n) +#else +#define XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,arch) \ + ((w) == XFS_DATA_FORK ? \ + (INT_SET((dcp)->di_nextents, arch, (n))) : \ + (INT_SET((dcp)->di_anextents, arch, (n)))) +#define XFS_CFORK_NEXT_SET(dcp,w,n) XFS_CFORK_NEXT_SET_ARCH(dcp,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_FORMAT) +int xfs_dfork_format_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +int xfs_dfork_format(xfs_dinode_t *dip, int w); +#define XFS_DFORK_FORMAT_ARCH(dip,w,arch) xfs_dfork_format_arch(dip,w,arch) +#define XFS_DFORK_FORMAT(dip,w) xfs_dfork_format(dip,w) +#else +#define XFS_DFORK_FORMAT_ARCH(dip,w,arch) XFS_CFORK_FORMAT_ARCH(&(dip)->di_core, w, arch) +#define XFS_DFORK_FORMAT(dip,w) XFS_DFORK_FORMAT_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_FMT_SET) +void xfs_dfork_fmt_set_arch(xfs_dinode_t *dip, int w, int n, xfs_arch_t arch); +void xfs_dfork_fmt_set(xfs_dinode_t *dip, int w, int n); +#define XFS_DFORK_FMT_SET_ARCH(dip,w,n,arch) xfs_dfork_fmt_set_arch(dip,w,n,arch) +#define XFS_DFORK_FMT_SET(dip,w,n) xfs_dfork_fmt_set(dip,w,n) +#else +#define XFS_DFORK_FMT_SET_ARCH(dip,w,n,arch) XFS_CFORK_FMT_SET_ARCH(&(dip)->di_core, w, n, arch) +#define XFS_DFORK_FMT_SET(dip,w,n) XFS_DFORK_FMT_SET_ARCH(dip,w,n,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_NEXTENTS) +int xfs_dfork_nextents_arch(xfs_dinode_t *dip, int w, xfs_arch_t arch); +int xfs_dfork_nextents(xfs_dinode_t *dip, int w); +#define XFS_DFORK_NEXTENTS_ARCH(dip,w,arch) xfs_dfork_nextents_arch(dip,w,arch) +#define XFS_DFORK_NEXTENTS(dip,w) xfs_dfork_nextents(dip,w) +#else +#define XFS_DFORK_NEXTENTS_ARCH(dip,w,arch) XFS_CFORK_NEXTENTS_ARCH(&(dip)->di_core, w, arch) +#define XFS_DFORK_NEXTENTS(dip,w) XFS_DFORK_NEXTENTS_ARCH(dip,w,ARCH_NOCONVERT) + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DFORK_NEXT_SET) +void xfs_dfork_next_set_arch(xfs_dinode_t *dip, int w, int n, xfs_arch_t arch); +void xfs_dfork_next_set(xfs_dinode_t *dip, int w, int n); +#define XFS_DFORK_NEXT_SET_ARCH(dip,w,n,arch) xfs_dfork_next_set_arch(dip,w,n,arch) +#define XFS_DFORK_NEXT_SET(dip,w,n) xfs_dfork_next_set(dip,w,n) +#else +#define XFS_DFORK_NEXT_SET_ARCH(dip,w,n,arch) XFS_CFORK_NEXT_SET_ARCH(&(dip)->di_core, w, n, arch) +#define XFS_DFORK_NEXT_SET(dip,w,n) XFS_DFORK_NEXT_SET_ARCH(dip,w,n,ARCH_NOCONVERT) + +#endif + +/* + * File types (mode field) + */ +#define IFMT 0170000 /* type of file */ +#define IFIFO 0010000 /* named pipe (fifo) */ +#define IFCHR 0020000 /* character special */ +#define IFDIR 0040000 /* directory */ +#define IFBLK 0060000 /* block special */ +#define IFREG 0100000 /* regular */ +#define IFLNK 0120000 /* symbolic link */ +#define IFSOCK 0140000 /* socket */ +#define IFMNT 0160000 /* mount point */ + +/* + * File execution and access modes. + */ +#define ISUID 04000 /* set user id on execution */ +#define ISGID 02000 /* set group id on execution */ +#define ISVTX 01000 /* sticky directory */ +#define IREAD 0400 /* read, write, execute permissions */ +#define IWRITE 0200 +#define IEXEC 0100 + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_DINODE) +xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp); +#define XFS_BUF_TO_DINODE(bp) xfs_buf_to_dinode(bp) +#else +#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Values for di_flags + * There should be a one-to-one correspondence between these flags and the + * XFS_XFLAG_s. + */ +#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */ +#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */ +#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */ +#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) +#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) +#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) +#define XFS_DIFLAG_ALL \ + (XFS_DIFLAG_REALTIME|XFS_DIFLAG_PREALLOC|XFS_DIFLAG_NEWRTBM) + +#endif /* __XFS_DINODE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_block.c linux.22-ac2/fs/xfs/xfs_dir2_block.c --- linux.vanilla/fs/xfs/xfs_dir2_block.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_block.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1249 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_block.c + * XFS V2 directory implementation, single-block form. + * See xfs_dir2_block.h for the format. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" + +/* + * Local function prototypes. + */ +static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first, + int last); +static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp); +static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp, + int *entno); +static int xfs_dir2_block_sort(const void *a, const void *b); + +/* + * Add an entry to a block directory. + */ +int /* error */ +xfs_dir2_block_addname( + xfs_da_args_t *args) /* directory op arguments */ +{ + xfs_dir2_data_free_t *bf; /* bestfree table in block */ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* buffer for block */ + xfs_dir2_block_tail_t *btp; /* block tail */ + int compact; /* need to compact leaf ents */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* directory inode */ + xfs_dir2_data_unused_t *dup; /* block unused entry */ + int error; /* error return value */ + xfs_dir2_data_unused_t *enddup=NULL; /* unused at end of data */ + xfs_dahash_t hash; /* hash value of found entry */ + int high; /* high index for binary srch */ + int highstale; /* high stale index */ + int lfloghigh=0; /* last final leaf to log */ + int lfloglow=0; /* first final leaf to log */ + int len; /* length of the new entry */ + int low; /* low index for binary srch */ + int lowstale; /* low stale index */ + int mid=0; /* midpoint for binary srch */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log header */ + int needscan; /* need to rescan freespace */ + xfs_dir2_data_off_t *tagp; /* pointer to tag value */ + xfs_trans_t *tp; /* transaction structure */ + + xfs_dir2_trace_args("block_addname", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the (one and only) directory block into dabuf bp. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + block = bp->data; + /* + * Check the magic number, corrupted if wrong. + */ + if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) + != XFS_DIR2_BLOCK_MAGIC)) { + XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", + XFS_ERRLEVEL_LOW, mp, block); + xfs_da_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + len = XFS_DIR2_DATA_ENTSIZE(args->namelen); + /* + * Set up pointers to parts of the block. + */ + bf = block->hdr.bestfree; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * No stale entries? Need space for entry and new leaf. + */ + if (INT_ISZERO(btp->stale, ARCH_CONVERT)) { + /* + * Tag just before the first leaf entry. + */ + tagp = (xfs_dir2_data_off_t *)blp - 1; + /* + * Data object just before the first leaf entry. + */ + enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free then can't do this add without cleaning up: + * the space before the first leaf entry needs to be free so it + * can be expanded to hold the pointer to the new entry. + */ + if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + dup = enddup = NULL; + /* + * Check out the biggest freespace and see if it's the same one. + */ + else { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + if (dup == enddup) { + /* + * It is the biggest freespace, is it too small + * to hold the new leaf too? + */ + if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { + /* + * Yes, we use the second-largest + * entry instead if it works. + */ + if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) + dup = (xfs_dir2_data_unused_t *) + ((char *)block + + INT_GET(bf[1].offset, ARCH_CONVERT)); + else + dup = NULL; + } + } else { + /* + * Not the same free entry, + * just check its length. + */ + if (INT_GET(dup->length, ARCH_CONVERT) < len) { + dup = NULL; + } + } + } + compact = 0; + } + /* + * If there are stale entries we'll use one for the leaf. + * Is the biggest entry enough to avoid compaction? + */ + else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + compact = 0; + } + /* + * Will need to compact to make this work. + */ + else { + /* + * Tag just before the first leaf entry. + */ + tagp = (xfs_dir2_data_off_t *)blp - 1; + /* + * Data object just before the first leaf entry. + */ + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free then the data will go where the + * leaf data starts now, if it works at all. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * + (uint)sizeof(*blp) < len) + dup = NULL; + } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) + dup = NULL; + else + dup = (xfs_dir2_data_unused_t *)blp; + compact = 1; + } + /* + * If this isn't a real add, we're done with the buffer. + */ + if (args->justcheck) + xfs_da_brelse(tp, bp); + /* + * If we don't have space for the new entry & leaf ... + */ + if (!dup) { + /* + * Not trying to actually do anything, or don't have + * a space reservation: return no-space. + */ + if (args->justcheck || args->total == 0) + return XFS_ERROR(ENOSPC); + /* + * Convert to the next larger format. + * Then add the new entry in that format. + */ + error = xfs_dir2_block_to_leaf(args, bp); + xfs_da_buf_done(bp); + if (error) + return error; + return xfs_dir2_leaf_addname(args); + } + /* + * Just checking, and it would work, so say so. + */ + if (args->justcheck) + return 0; + needlog = needscan = 0; + /* + * If need to compact the leaf entries, do it now. + * Leave the highest-numbered stale entry stale. + * XXX should be the one closest to mid but mid is not yet computed. + */ + if (compact) { + int fromidx; /* source leaf index */ + int toidx; /* target leaf index */ + + for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, + highstale = lfloghigh = -1; + fromidx >= 0; + fromidx--) { + if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (highstale == -1) + highstale = toidx; + else { + if (lfloghigh == -1) + lfloghigh = toidx; + continue; + } + } + if (fromidx < toidx) + blp[toidx] = blp[fromidx]; + toidx--; + } + lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); + lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); + xfs_dir2_data_make_free(tp, bp, + (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), + (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), + &needlog, &needscan); + blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_SET(btp->stale, ARCH_CONVERT, 1); + /* + * If we now need to rebuild the bestfree map, do so. + * This needs to happen before the next call to use_free. + */ + if (needscan) { + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, + &needlog, NULL); + needscan = 0; + } + } + /* + * Set leaf logging boundaries to impossible state. + * For the no-stale case they're set explicitly. + */ + else if (INT_GET(btp->stale, ARCH_CONVERT)) { + lfloglow = INT_GET(btp->count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * Find the slot that's first lower than our hash value, -1 if none. + */ + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { + mid = (low + high) >> 1; + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + break; + if (hash < args->hashval) + low = mid + 1; + else + high = mid - 1; + } + while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { + mid--; + } + /* + * No stale entries, will use enddup space to hold new leaf. + */ + if (INT_ISZERO(btp->stale, ARCH_CONVERT)) { + /* + * Mark the space needed for the new leaf entry, now in use. + */ + xfs_dir2_data_use_free(tp, bp, enddup, + (xfs_dir2_data_aoff_t) + ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - + sizeof(*blp)), + (xfs_dir2_data_aoff_t)sizeof(*blp), + &needlog, &needscan); + /* + * Update the tail (entry count). + */ + INT_MOD(btp->count, ARCH_CONVERT, +1); + /* + * If we now need to rebuild the bestfree map, do so. + * This needs to happen before the next call to use_free. + */ + if (needscan) { + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, + &needlog, NULL); + needscan = 0; + } + /* + * Adjust pointer to the first leaf entry, we're about to move + * the table up one to open up space for the new leaf entry. + * Then adjust our index to match. + */ + blp--; + mid++; + if (mid) + memmove(blp, &blp[1], mid * sizeof(*blp)); + lfloglow = 0; + lfloghigh = mid; + } + /* + * Use a stale leaf for our new entry. + */ + else { + for (lowstale = mid; + lowstale >= 0 && + INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + for (highstale = mid + 1; + highstale < INT_GET(btp->count, ARCH_CONVERT) && + INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || mid - lowstale > highstale - mid); + highstale++) + continue; + /* + * Move entries toward the low-numbered stale entry. + */ + if (lowstale >= 0 && + (highstale == INT_GET(btp->count, ARCH_CONVERT) || + mid - lowstale <= highstale - mid)) { + if (mid - lowstale) + memmove(&blp[lowstale], &blp[lowstale + 1], + (mid - lowstale) * sizeof(*blp)); + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(mid, lfloghigh); + } + /* + * Move entries toward the high-numbered stale entry. + */ + else { + ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); + mid++; + if (highstale - mid) + memmove(&blp[mid + 1], &blp[mid], + (highstale - mid) * sizeof(*blp)); + lfloglow = MIN(mid, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(btp->stale, ARCH_CONVERT, -1); + } + /* + * Point to the new data entry. + */ + dep = (xfs_dir2_data_entry_t *)dup; + /* + * Fill in the leaf entry. + */ + INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); + INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); + /* + * Mark space for the data entry used. + */ + xfs_dir2_data_use_free(tp, bp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + (xfs_dir2_data_aoff_t)len, &needlog, &needscan); + /* + * Create the new data entry. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, args->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + /* + * Clean up the bestfree array and log the header, tail, and entry. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_block_log_tail(tp, bp); + xfs_dir2_data_log_entry(tp, bp, dep); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} + +/* + * Readdir for block directories. + */ +int /* error */ +xfs_dir2_block_getdents( + xfs_trans_t *tp, /* transaction (NULL) */ + xfs_inode_t *dp, /* incore inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp, /* eof reached? (out) */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* abi's formatting function */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dabuf_t *bp; /* buffer for block */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_dir2_data_unused_t *dup; /* block unused entry */ + char *endptr; /* end of the data entries */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_put_args_t p; /* arg package for put rtn */ + char *ptr; /* current data entry */ + int wantoff; /* starting block offset */ + + mp = dp->i_mount; + /* + * If the block number in the offset is out of range, we're done. + */ + if (XFS_DIR2_DATAPTR_TO_DB(mp, uio->uio_offset) > mp->m_dirdatablk) { + *eofp = 1; + return 0; + } + /* + * Can't read the block, give up, else get dabuf in bp. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + /* + * Extract the byte offset we start at from the seek pointer. + * We'll skip entries before this. + */ + wantoff = XFS_DIR2_DATAPTR_TO_OFF(mp, uio->uio_offset); + block = bp->data; + xfs_dir2_data_check(dp, bp); + /* + * Set up values for the loop. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + ptr = (char *)block->u; + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Loop over the data portion of the block. + * Each object is a real entry (dep) or an unused one (dup). + */ + while (ptr < endptr) { + dup = (xfs_dir2_data_unused_t *)ptr; + /* + * Unused, skip it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + + dep = (xfs_dir2_data_entry_t *)ptr; + + /* + * Bump pointer for the next iteration. + */ + ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + /* + * The entry is before the desired starting point, skip it. + */ + if ((char *)dep - (char *)block < wantoff) + continue; + /* + * Set up argument structure for put routine. + */ + p.namelen = dep->namelen; + + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + ptr - (char *)block); +#if XFS_BIG_FILESYSTEMS + p.ino = INT_GET(dep->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = INT_GET(dep->inumber, ARCH_CONVERT); +#endif + p.name = (char *)dep->name; + + /* + * Put the entry in the caller's buffer. + */ + error = p.put(&p); + + /* + * If it didn't fit, set the final offset to here & return. + */ + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + (char *)dep - (char *)block); + xfs_da_brelse(tp, bp); + return error; + } + } + + /* + * Reached the end of the block. + * Set the offset to a nonexistent block 1 and return. + */ + *eofp = 1; + + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); + + xfs_da_brelse(tp, bp); + + return 0; +} + +/* + * Log leaf entries from the block. + */ +static void +xfs_dir2_block_log_leaf( + xfs_trans_t *tp, /* transaction structure */ + xfs_dabuf_t *bp, /* block buffer */ + int first, /* index of first logged leaf */ + int last) /* index of last logged leaf */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block), + (uint)((char *)&blp[last + 1] - (char *)block - 1)); +} + +/* + * Log the block tail. + */ +static void +xfs_dir2_block_log_tail( + xfs_trans_t *tp, /* transaction structure */ + xfs_dabuf_t *bp) /* block buffer */ +{ + xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block), + (uint)((char *)(btp + 1) - (char *)block - 1)); +} + +/* + * Look up an entry in the block. This is the external routine, + * xfs_dir2_block_lookup_int does the real work. + */ +int /* error */ +xfs_dir2_block_lookup( + xfs_da_args_t *args) /* dir lookup arguments */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + xfs_dir2_trace_args("block_lookup", args); + /* + * Get the buffer, look up the entry. + * If not found (ENOENT) then return, have no buffer. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) + return error; + dp = args->dp; + mp = dp->i_mount; + block = bp->data; + xfs_dir2_data_check(dp, bp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Get the offset from the leaf entry, to point to the data. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + /* + * Fill in inode number, release the block. + */ + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + xfs_da_brelse(args->trans, bp); + return XFS_ERROR(EEXIST); +} + +/* + * Internal block lookup routine. + */ +static int /* error */ +xfs_dir2_block_lookup_int( + xfs_da_args_t *args, /* dir lookup arguments */ + xfs_dabuf_t **bpp, /* returned block buffer */ + int *entno) /* returned entry number */ +{ + xfs_dir2_dataptr_t addr; /* data entry address */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int error; /* error return value */ + xfs_dahash_t hash; /* found hash value */ + int high; /* binary search high index */ + int low; /* binary search low index */ + int mid; /* binary search current idx */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the buffer, return error if we can't get it. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + block = bp->data; + xfs_dir2_data_check(dp, bp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Loop doing a binary search for our hash value. + * Find our entry, ENOENT if it's not there. + */ + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { + ASSERT(low <= high); + mid = (low + high) >> 1; + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + break; + if (hash < args->hashval) + low = mid + 1; + else + high = mid - 1; + if (low > high) { + ASSERT(args->oknoent); + xfs_da_brelse(tp, bp); + return XFS_ERROR(ENOENT); + } + } + /* + * Back up to the first one with the right hash value. + */ + while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { + mid--; + } + /* + * Now loop forward through all the entries with the + * right hash value looking for our name. + */ + do { + if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Get pointer to the entry from the leaf. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); + /* + * Compare, if it's right give back buffer & entry number. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + *bpp = bp; + *entno = mid; + return 0; + } + } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); + /* + * No match, release the buffer and return ENOENT. + */ + ASSERT(args->oknoent); + xfs_da_brelse(tp, bp); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a block format directory. + * If that makes the block small enough to fit in shortform, transform it. + */ +int /* error */ +xfs_dir2_block_removename( + xfs_da_args_t *args) /* directory operation args */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* block leaf entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to fixup bestfree */ + xfs_dir2_sf_hdr_t sfh; /* shortform header */ + int size; /* shortform size */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("block_removename", args); + /* + * Look up the entry in the block. Gets the buffer and entry index. + * It will always be there, the vnodeops level does a lookup first. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { + return error; + } + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Point to the data entry using the leaf entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + /* + * Mark the data entry's space free. + */ + needlog = needscan = 0; + xfs_dir2_data_make_free(tp, bp, + (xfs_dir2_data_aoff_t)((char *)dep - (char *)block), + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * Fix up the block tail. + */ + INT_MOD(btp->stale, ARCH_CONVERT, +1); + xfs_dir2_block_log_tail(tp, bp); + /* + * Remove the leaf entry by marking it stale. + */ + INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_block_log_leaf(tp, bp, ent, ent); + /* + * Fix up bestfree, log the header if necessary. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_data_check(dp, bp); + /* + * See if the size as a shortform is good enough. + */ + if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > + XFS_IFORK_DSIZE(dp)) { + xfs_da_buf_done(bp); + return 0; + } + /* + * If it works, do the conversion. + */ + return xfs_dir2_block_to_sf(args, bp, size, &sfh); +} + +/* + * Replace an entry in a V2 block directory. + * Change the inode number to the new value. + */ +int /* error */ +xfs_dir2_block_replace( + xfs_da_args_t *args) /* directory operation args */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* block data entry */ + xfs_inode_t *dp; /* incore inode */ + int ent; /* leaf entry index */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + xfs_dir2_trace_args("block_replace", args); + /* + * Lookup the entry in the directory. Get buffer and entry index. + * This will always succeed since the caller has already done a lookup. + */ + if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { + return error; + } + dp = args->dp; + mp = dp->i_mount; + block = bp->data; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Point to the data entry we need to change. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); + /* + * Change the inode number to the new value. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + xfs_dir2_data_log_entry(args->trans, bp, dep); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} + +/* + * Qsort comparison routine for the block leaf entries. + */ +static int /* sort order */ +xfs_dir2_block_sort( + const void *a, /* first leaf entry */ + const void *b) /* second leaf entry */ +{ + const xfs_dir2_leaf_entry_t *la; /* first leaf entry */ + const xfs_dir2_leaf_entry_t *lb; /* second leaf entry */ + + la = a; + lb = b; + return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : + (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); +} + +/* + * Convert a V2 leaf directory to a V2 block directory if possible. + */ +int /* error */ +xfs_dir2_leaf_to_block( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp, /* leaf buffer */ + xfs_dabuf_t *dbp) /* data buffer */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused data entry */ + int error; /* error return value */ + int from; /* leaf from index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* file system mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to scan for bestfree */ + xfs_dir2_sf_hdr_t sfh; /* shortform header */ + int size; /* bytes used */ + xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ + int to; /* block/leaf to index */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = lbp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * If there are data blocks other than the first one, take this + * opportunity to remove trailing empty data blocks that may have + * been left behind during no-space-reservation operations. + * These will show up in the leaf bests table. + */ + while (dp->i_d.di_size > mp->m_dirblksize) { + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(block->hdr)) { + if ((error = + xfs_dir2_leaf_trim_data(args, lbp, + (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) + goto out; + } else { + error = 0; + goto out; + } + } + /* + * Read the data block if we don't already have it, give up if it fails. + */ + if (dbp == NULL && + (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp, + XFS_DATA_FORK))) { + goto out; + } + block = dbp->data; + ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + /* + * Size of the "leaf" area in the block. + */ + size = (uint)sizeof(block->tail) + + (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + /* + * Look at the last data entry. + */ + tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + /* + * If it's not free or is too short we can't do it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { + error = 0; + goto out; + } + /* + * Start converting it to block form. + */ + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + needlog = 1; + needscan = 0; + /* + * Use up the space at the end of the block (blp/btp). + */ + xfs_dir2_data_use_free(tp, dbp, dup, mp->m_dirblksize - size, size, + &needlog, &needscan); + /* + * Initialize the block tail. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + INT_ZERO(btp->stale, ARCH_CONVERT); + xfs_dir2_block_log_tail(tp, dbp); + /* + * Initialize the block leaf area. We compact out stale entries. + */ + lep = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + lep[to++] = leaf->ents[from]; + } + ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); + xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + /* + * Scan the bestfree if we need it and log the data block header. + */ + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * Pitch the old leaf block. + */ + error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp); + lbp = NULL; + if (error) { + goto out; + } + /* + * Now see if the resulting block can be shrunken to shortform. + */ + if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > + XFS_IFORK_DSIZE(dp)) { + error = 0; + goto out; + } + return xfs_dir2_block_to_sf(args, dbp, size, &sfh); +out: + if (lbp) + xfs_da_buf_done(lbp); + if (dbp) + xfs_da_buf_done(dbp); + return error; +} + +/* + * Convert the shortform directory to block form. + */ +int /* error */ +xfs_dir2_sf_to_block( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_db_t blkno; /* dir-relative block # (0) */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_block_tail_t *btp; /* block tail pointer */ + char *buf; /* sf buffer */ + int buf_len; + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + int dummy; /* trash */ + xfs_dir2_data_unused_t *dup; /* unused entry pointer */ + int endoffset; /* end of data objects */ + int error; /* error return value */ + int i; /* index */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to scan block freespc */ + int newoffset; /* offset from current entry */ + int offset; /* target block offset */ + xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("sf_to_block", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bomb out if the shortform directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Copy the directory into the stack buffer. + * Then pitch the incore inode data so we can make extents. + */ + + buf_len = dp->i_df.if_bytes; + buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP); + + memcpy(buf, sfp, dp->i_df.if_bytes); + xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK); + dp->i_d.di_size = 0; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + /* + * Reset pointer - old sfp is gone. + */ + sfp = (xfs_dir2_sf_t *)buf; + /* + * Add block 0 to the inode. + */ + error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno); + if (error) { + kmem_free(buf, buf_len); + return error; + } + /* + * Initialize the data block. + */ + error = xfs_dir2_data_init(args, blkno, &bp); + if (error) { + kmem_free(buf, buf_len); + return error; + } + block = bp->data; + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + /* + * Compute size of block "tail" area. + */ + i = (uint)sizeof(*btp) + + (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + /* + * The whole thing is initialized to free by the init routine. + * Say we're using the leaf and tail area. + */ + dup = (xfs_dir2_data_unused_t *)block->u; + needlog = needscan = 0; + xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog, + &needscan); + ASSERT(needscan == 0); + /* + * Fill in the tail. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ + INT_ZERO(btp->stale, ARCH_CONVERT); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + endoffset = (uint)((char *)blp - (char *)block); + /* + * Remove the freespace, we'll manage it. + */ + xfs_dir2_data_use_free(tp, bp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); + /* + * Create entry for . + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATA_DOT_OFFSET); + INT_SET(dep->inumber, ARCH_CONVERT, dp->i_ino); + dep->namelen = 1; + dep->name[0] = '.'; + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); + INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + /* + * Create entry for .. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); + INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT)); + dep->namelen = 2; + dep->name[0] = dep->name[1] = '.'; + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); + INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + offset = XFS_DIR2_DATA_FIRST_OFFSET; + /* + * Loop over existing entries, stuff them in. + */ + if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + sfep = NULL; + else + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + /* + * Need to preserve the existing offset values in the sf directory. + * Insert holes (unused entries) where necessary. + */ + while (offset < endoffset) { + /* + * sfep is null when we reach the end of the list. + */ + if (sfep == NULL) + newoffset = endoffset; + else + newoffset = XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT); + /* + * There should be a hole here, make one. + */ + if (offset < newoffset) { + dup = (xfs_dir2_data_unused_t *) + ((char *)block + offset); + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t) + ((char *)dup - (char *)block)); + xfs_dir2_data_log_unused(tp, bp, dup); + (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, + dup, &dummy); + offset += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + /* + * Copy a real entry. + */ + dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); + INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT)); + dep->namelen = sfep->namelen; + memcpy(dep->name, sfep->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + xfs_dir2_data_log_entry(tp, bp, dep); + INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); + INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); + offset = (int)((char *)(tagp + 1) - (char *)block); + if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + sfep = NULL; + else + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + /* Done with the temporary buffer */ + kmem_free(buf, buf_len); + /* + * Sort the leaf entries by hash value. + */ + qsort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); + /* + * Log the leaf entry area and tail. + * Already logged the header in data_init, ignore needlog. + */ + ASSERT(needscan == 0); + xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + xfs_dir2_block_log_tail(tp, bp); + xfs_dir2_data_check(dp, bp); + xfs_da_buf_done(bp); + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_block.h linux.22-ac2/fs/xfs/xfs_dir2_block.h --- linux.vanilla/fs/xfs/xfs_dir2_block.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_block.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_BLOCK_H__ +#define __XFS_DIR2_BLOCK_H__ + +/* + * xfs_dir2_block.h + * Directory version 2, single block format structures + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_data_hdr; +struct xfs_dir2_leaf_entry; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * The single block format is as follows: + * xfs_dir2_data_hdr_t structure + * xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures + * xfs_dir2_leaf_entry_t structures + * xfs_dir2_block_tail_t structure + */ + +#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ + +typedef struct xfs_dir2_block_tail { + __uint32_t count; /* count of leaf entries */ + __uint32_t stale; /* count of stale lf entries */ +} xfs_dir2_block_tail_t; + +/* + * Generic single-block structure, for xfs_db. + */ +typedef struct xfs_dir2_block { + xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_BLOCK_MAGIC */ + xfs_dir2_data_union_t u[1]; + xfs_dir2_leaf_entry_t leaf[1]; + xfs_dir2_block_tail_t tail; +} xfs_dir2_block_t; + +/* + * Pointer to the leaf header embedded in a data block (1-block format) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_TAIL_P) +xfs_dir2_block_tail_t * +xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block); +#define XFS_DIR2_BLOCK_TAIL_P(mp,block) xfs_dir2_block_tail_p(mp,block) +#else +#define XFS_DIR2_BLOCK_TAIL_P(mp,block) \ + (((xfs_dir2_block_tail_t *)((char *)(block) + (mp)->m_dirblksize)) - 1) +#endif + +/* + * Pointer to the leaf entries embedded in a data block (1-block format) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BLOCK_LEAF_P) +struct xfs_dir2_leaf_entry *xfs_dir2_block_leaf_p_arch( + xfs_dir2_block_tail_t *btp, xfs_arch_t arch); +#define XFS_DIR2_BLOCK_LEAF_P_ARCH(btp,arch) \ + xfs_dir2_block_leaf_p_arch(btp,arch) +#else +#define XFS_DIR2_BLOCK_LEAF_P_ARCH(btp,arch) \ + (((struct xfs_dir2_leaf_entry *)(btp)) - INT_GET((btp)->count, arch)) +#endif + +/* + * Function declarations. + */ + +extern int + xfs_dir2_block_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_block_getdents(struct xfs_trans *tp, struct xfs_inode *dp, + struct uio *uio, int *eofp, struct xfs_dirent *dbp, + xfs_dir2_put_t put); + +extern int + xfs_dir2_block_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_block_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_block_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_to_block(struct xfs_da_args *args, struct xfs_dabuf *lbp, + struct xfs_dabuf *dbp); + +extern int + xfs_dir2_sf_to_block(struct xfs_da_args *args); + +#endif /* __XFS_DIR2_BLOCK_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2.c linux.22-ac2/fs/xfs/xfs_dir2.c --- linux.vanilla/fs/xfs/xfs_dir2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * XFS v2 directory implmentation. + * Top-level and utility routines. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_sf.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * Declarations for interface routines. + */ +static void xfs_dir2_mount(xfs_mount_t *mp); +static int xfs_dir2_isempty(xfs_inode_t *dp); +static int xfs_dir2_init(xfs_trans_t *tp, xfs_inode_t *dp, + xfs_inode_t *pdp); +static int xfs_dir2_createname(xfs_trans_t *tp, xfs_inode_t *dp, + char *name, int namelen, xfs_ino_t inum, + xfs_fsblock_t *first, + xfs_bmap_free_t *flist, xfs_extlen_t total); +static int xfs_dir2_lookup(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t *inum); +static int xfs_dir2_removename(xfs_trans_t *tp, xfs_inode_t *dp, + char *name, int namelen, xfs_ino_t ino, + xfs_fsblock_t *first, + xfs_bmap_free_t *flist, xfs_extlen_t total); +static int xfs_dir2_getdents(xfs_trans_t *tp, xfs_inode_t *dp, uio_t *uio, + int *eofp); +static int xfs_dir2_replace(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t inum, + xfs_fsblock_t *first, xfs_bmap_free_t *flist, + xfs_extlen_t total); +static int xfs_dir2_canenter(xfs_trans_t *tp, xfs_inode_t *dp, char *name, + int namelen); +static int xfs_dir2_shortform_validate_ondisk(xfs_mount_t *mp, + xfs_dinode_t *dip); + +/* + * Utility routine declarations. + */ +static int xfs_dir2_put_dirent64_direct(xfs_dir2_put_args_t *pa); +static int xfs_dir2_put_dirent64_uio(xfs_dir2_put_args_t *pa); + +/* + * Directory operations vector. + */ +xfs_dirops_t xfsv2_dirops = { + .xd_mount = xfs_dir2_mount, + .xd_isempty = xfs_dir2_isempty, + .xd_init = xfs_dir2_init, + .xd_createname = xfs_dir2_createname, + .xd_lookup = xfs_dir2_lookup, + .xd_removename = xfs_dir2_removename, + .xd_getdents = xfs_dir2_getdents, + .xd_replace = xfs_dir2_replace, + .xd_canenter = xfs_dir2_canenter, + .xd_shortform_validate_ondisk = xfs_dir2_shortform_validate_ondisk, + .xd_shortform_to_single = xfs_dir2_sf_to_block, +}; + +/* + * Interface routines. + */ + +/* + * Initialize directory-related fields in the mount structure. + */ +static void +xfs_dir2_mount( + xfs_mount_t *mp) /* filesystem mount point */ +{ + mp->m_dirversion = 2; + ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <= + XFS_MAX_BLOCKSIZE); + mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog); + mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog; + mp->m_dirdatablk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_DATA_FIRSTDB(mp)); + mp->m_dirleafblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_LEAF_FIRSTDB(mp)); + mp->m_dirfreeblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_FREE_FIRSTDB(mp)); + mp->m_attr_node_ents = + (mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_node_ents = + (mp->m_dirblksize - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100; +} + +/* + * Return 1 if directory contains only "." and "..". + */ +static int /* return code */ +xfs_dir2_isempty( + xfs_inode_t *dp) /* incore inode structure */ +{ + xfs_dir2_sf_t *sfp; /* shortform directory structure */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Might happen during shutdown. + */ + if (dp->i_d.di_size == 0) { + return 1; + } + if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) + return 0; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + return INT_ISZERO(sfp->hdr.count, ARCH_CONVERT); +} + +/* + * Initialize a directory with its "." and ".." entries. + */ +static int /* error */ +xfs_dir2_init( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + xfs_inode_t *pdp) /* incore parent directory inode */ +{ + xfs_da_args_t args; /* operation arguments */ + int error; /* error return value */ + + memset((char *)&args, 0, sizeof(args)); + args.dp = dp; + args.trans = tp; + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino))) { + return error; + } + return xfs_dir2_sf_create(&args, pdp->i_ino); +} + +/* + Enter a name in a directory. + */ +static int /* error */ +xfs_dir2_createname( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* new entry name */ + int namelen, /* new entry name length */ + xfs_ino_t inum, /* new entry inode number */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) { + return rval; + } + XFS_STATS_INC(xfsstats.xs_dir_create); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = 0; + args.addname = args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_addname(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_addname(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_addname(&args); + else + rval = xfs_dir2_node_addname(&args); + return rval; +} + +/* + * Lookup a name in a directory, give back the inode number. + */ +static int /* error */ +xfs_dir2_lookup( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* lookup name */ + int namelen, /* lookup name length */ + xfs_ino_t *inum) /* out: inode number */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_lookup); + + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = 0; + args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_lookup(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_lookup(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_lookup(&args); + else + rval = xfs_dir2_node_lookup(&args); + if (rval == EEXIST) + rval = 0; + if (rval == 0) + *inum = args.inumber; + return rval; +} + +/* + * Remove an entry from a directory. + */ +static int /* error */ +xfs_dir2_removename( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to remove */ + int namelen, /* name length of entry to remove */ + xfs_ino_t ino, /* inode number of entry to remove */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_remove); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = ino; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_removename(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_removename(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_removename(&args); + else + rval = xfs_dir2_node_removename(&args); + return rval; +} + +/* + * Read a directory. + */ +static int /* error */ +xfs_dir2_getdents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp) /* out: eof reached */ +{ + int alignment; /* alignment required for ABI */ + xfs_dirent_t *dbp; /* malloc'ed buffer */ + xfs_dir2_put_t put; /* entry formatting routine */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_getdents); + /* + * If our caller has given us a single contiguous aligned memory buffer, + * just work directly within that buffer. If it's in user memory, + * lock it down first. + */ + alignment = sizeof(xfs_off_t) - 1; + if ((uio->uio_iovcnt == 1) && + (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && + ((uio->uio_iov[0].iov_len & alignment) == 0)) { + dbp = NULL; + put = xfs_dir2_put_dirent64_direct; + } else { + dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); + put = xfs_dir2_put_dirent64_uio; + } + + *eofp = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_getdents(dp, uio, eofp, dbp, put); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + ; + } else if (v) + rval = xfs_dir2_block_getdents(tp, dp, uio, eofp, dbp, put); + else + rval = xfs_dir2_leaf_getdents(tp, dp, uio, eofp, dbp, put); + if (dbp != NULL) + kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); + return rval; +} + +/* + * Replace the inode number of a directory entry. + */ +static int /* error */ +xfs_dir2_replace( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to replace */ + int namelen, /* name length of entry to replace */ + xfs_ino_t inum, /* new inode number */ + xfs_fsblock_t *first, /* bmap's firstblock */ + xfs_bmap_free_t *flist, /* bmap's freeblock list */ + xfs_extlen_t total) /* bmap's total block count */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) { + return rval; + } + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = first; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 0; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_replace(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_replace(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_replace(&args); + else + rval = xfs_dir2_node_replace(&args); + return rval; +} + +/* + * See if this entry can be added to the directory without allocating space. + */ +static int /* error */ +xfs_dir2_canenter( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + char *name, /* name of entry to add */ + int namelen) /* name length of entry to add */ +{ + xfs_da_args_t args; /* operation arguments */ + int rval; /* return value */ + int v; /* type-checking value */ + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = tp; + args.justcheck = args.addname = args.oknoent = 1; + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) + rval = xfs_dir2_sf_addname(&args); + else if ((rval = xfs_dir2_isblock(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_block_addname(&args); + else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) { + return rval; + } else if (v) + rval = xfs_dir2_leaf_addname(&args); + else + rval = xfs_dir2_node_addname(&args); + return rval; +} + +/* + * Dummy routine for shortform inode validation. + * Can't really do this. + */ +/* ARGSUSED */ +static int /* error */ +xfs_dir2_shortform_validate_ondisk( + xfs_mount_t *mp, /* filesystem mount point */ + xfs_dinode_t *dip) /* ondisk inode */ +{ + return 0; +} + +/* + * Utility routines. + */ + +/* + * Add a block to the directory. + * This routine is for data and free blocks, not leaf/node blocks + * which are handled by xfs_da_grow_inode. + */ +int /* error */ +xfs_dir2_grow_inode( + xfs_da_args_t *args, /* operation arguments */ + int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */ + xfs_dir2_db_t *dbp) /* out: block number added */ +{ + xfs_fileoff_t bno; /* directory offset of new block */ + int count; /* count of filesystem blocks */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int got; /* blocks actually mapped */ + int i; /* temp mapping index */ + xfs_bmbt_irec_t map; /* single structure for bmap */ + int mapi; /* mapping index */ + xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */ + xfs_mount_t *mp; /* filesystem mount point */ + int nmap; /* number of bmap entries */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_s("grow_inode", args, space); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Set lowest possible block in the space requested. + */ + bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE); + count = mp->m_dirblkfsbs; + /* + * Find the first hole for our block. + */ + if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK))) { + return error; + } + nmap = 1; + ASSERT(args->firstblock != NULL); + /* + * Try mapping the new block contiguously (one extent). + */ + if ((error = xfs_bmapi(tp, dp, bno, count, + XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, + args->firstblock, args->total, &map, &nmap, + args->flist))) { + return error; + } + ASSERT(nmap <= 1); + /* + * Got it in 1. + */ + if (nmap == 1) { + mapp = ↦ + mapi = 1; + } + /* + * Didn't work and this is a multiple-fsb directory block. + * Try again with contiguous flag turned on. + */ + else if (nmap == 0 && count > 1) { + xfs_fileoff_t b; /* current file offset */ + + /* + * Space for maximum number of mappings. + */ + mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); + /* + * Iterate until we get to the end of our block. + */ + for (b = bno, mapi = 0; b < bno + count; ) { + int c; /* current fsb count */ + + /* + * Can't map more than MAX_NMAP at once. + */ + nmap = MIN(XFS_BMAP_MAX_NMAP, count); + c = (int)(bno + count - b); + if ((error = xfs_bmapi(tp, dp, b, c, + XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, + args->firstblock, args->total, + &mapp[mapi], &nmap, args->flist))) { + kmem_free(mapp, sizeof(*mapp) * count); + return error; + } + if (nmap < 1) + break; + /* + * Add this bunch into our table, go to the next offset. + */ + mapi += nmap; + b = mapp[mapi - 1].br_startoff + + mapp[mapi - 1].br_blockcount; + } + } + /* + * Didn't work. + */ + else { + mapi = 0; + mapp = NULL; + } + /* + * See how many fsb's we got. + */ + for (i = 0, got = 0; i < mapi; i++) + got += mapp[i].br_blockcount; + /* + * Didn't get enough fsb's, or the first/last block's are wrong. + */ + if (got != count || mapp[0].br_startoff != bno || + mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != + bno + count) { + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + return XFS_ERROR(ENOSPC); + } + /* + * Done with the temporary mapping table. + */ + if (mapp != &map) + kmem_free(mapp, sizeof(*mapp) * count); + *dbp = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)bno); + /* + * Update file's size if this is the data space and it grew. + */ + if (space == XFS_DIR2_DATA_SPACE) { + xfs_fsize_t size; /* directory file (data) size */ + + size = XFS_FSB_TO_B(mp, bno + count); + if (size > dp->i_d.di_size) { + dp->i_d.di_size = size; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + } + } + return 0; +} + +/* + * See if the directory is a single-block form directory. + */ +int /* error */ +xfs_dir2_isblock( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + int *vp) /* out: 1 is block, 0 is not block */ +{ + xfs_fileoff_t last; /* last file offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* return value */ + + mp = dp->i_mount; + if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) { + return rval; + } + rval = XFS_FSB_TO_B(mp, last) == mp->m_dirblksize; + ASSERT(rval == 0 || dp->i_d.di_size == mp->m_dirblksize); + *vp = rval; + return 0; +} + +/* + * See if the directory is a single-leaf form directory. + */ +int /* error */ +xfs_dir2_isleaf( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + int *vp) /* out: 1 is leaf, 0 is not leaf */ +{ + xfs_fileoff_t last; /* last file offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* return value */ + + mp = dp->i_mount; + if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) { + return rval; + } + *vp = last == mp->m_dirleafblk + (1 << mp->m_sb.sb_dirblklog); + return 0; +} + +/* + * Getdents put routine for 64-bit ABI, direct form. + */ +static int /* error */ +xfs_dir2_put_dirent64_direct( + xfs_dir2_put_args_t *pa) /* argument bundle */ +{ + xfs_dirent_t *idbp; /* dirent pointer */ + iovec_t *iovp; /* io vector */ + int namelen; /* entry name length */ + int reclen; /* entry total length */ + uio_t *uio; /* I/O control */ + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + /* + * Won't fit in the remaining space. + */ + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + iovp = uio->uio_iov; + idbp = (xfs_dirent_t *)iovp->iov_base; + iovp->iov_base = (char *)idbp + reclen; + iovp->iov_len -= reclen; + uio->uio_resid -= reclen; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook; + idbp->d_name[namelen] = '\0'; + pa->done = 1; + memcpy(idbp->d_name, pa->name, namelen); + return 0; +} + +/* + * Getdents put routine for 64-bit ABI, uio form. + */ +static int /* error */ +xfs_dir2_put_dirent64_uio( + xfs_dir2_put_args_t *pa) /* argument bundle */ +{ + xfs_dirent_t *idbp; /* dirent pointer */ + int namelen; /* entry name length */ + int reclen; /* entry total length */ + int rval; /* return value */ + uio_t *uio; /* I/O control */ + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + /* + * Won't fit in the remaining space. + */ + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + idbp = pa->dbp; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook; + idbp->d_name[namelen] = '\0'; + memcpy(idbp->d_name, pa->name, namelen); + rval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio); + pa->done = (rval == 0); + return rval; +} + +/* + * Remove the given block from the directory. + * This routine is used for data and free blocks, leaf/node are done + * by xfs_da_shrink_inode. + */ +int +xfs_dir2_shrink_inode( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_db_t db, /* directory block number */ + xfs_dabuf_t *bp) /* block's buffer */ +{ + xfs_fileoff_t bno; /* directory file offset */ + xfs_dablk_t da; /* directory file offset */ + int done; /* bunmap is finished */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_db("shrink_inode", args, db, bp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + da = XFS_DIR2_DB_TO_DA(mp, db); + /* + * Unmap the fsblock(s). + */ + if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs, + XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, + &done))) { + /* + * ENOSPC actually can happen if we're in a removename with + * no space reservation, and the resulting block removal + * would cause a bmap btree split or conversion from extents + * to btree. This can only happen for un-fragmented + * directory blocks, since you need to be punching out + * the middle of an extent. + * In this case we need to leave the block in the file, + * and not binval it. + * So the block has to be in a consistent empty state + * and appropriately logged. + * We don't free up the buffer, the caller can tell it + * hasn't happened since it got an error back. + */ + return error; + } + ASSERT(done); + /* + * Invalidate the buffer from the transaction. + */ + xfs_da_binval(tp, bp); + /* + * If it's not a data block, we're done. + */ + if (db >= XFS_DIR2_LEAF_FIRSTDB(mp)) + return 0; + /* + * If the block isn't the last one in the directory, we're done. + */ + if (dp->i_d.di_size > XFS_DIR2_DB_OFF_TO_BYTE(mp, db + 1, 0)) + return 0; + bno = da; + if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) { + /* + * This can't really happen unless there's kernel corruption. + */ + return error; + } + if (db == mp->m_dirdatablk) + ASSERT(bno == 0); + else + ASSERT(bno > 0); + /* + * Set the size to the new last block. + */ + dp->i_d.di_size = XFS_FSB_TO_B(mp, bno); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_data.c linux.22-ac2/fs/xfs/xfs_dir2_data.c --- linux.vanilla/fs/xfs/xfs_dir2_data.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_data.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,855 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_data.c + * Core data block handling routines for XFS V2 directories. + * See xfs_dir2_data.h for data structures. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_error.h" + +#ifdef DEBUG +/* + * Check the consistency of the data block. + * The input can also be a block-format directory. + * Pop an assert if we find anything bad. + */ +void +xfs_dir2_data_check( + xfs_inode_t *dp, /* incore inode pointer */ + xfs_dabuf_t *bp) /* data block's buffer */ +{ + xfs_dir2_dataptr_t addr; /* addr for leaf lookup */ + xfs_dir2_data_free_t *bf; /* bestfree table */ + xfs_dir2_block_tail_t *btp=NULL; /* block tail */ + int count; /* count of entries found */ + xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_dir2_data_free_t *dfp; /* bestfree entry */ + xfs_dir2_data_unused_t *dup; /* unused entry */ + char *endp; /* end of useful data */ + int freeseen; /* mask of bestfrees seen */ + xfs_dahash_t hash; /* hash of current name */ + int i; /* leaf index */ + int lastfree; /* last entry was unused */ + xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */ + xfs_mount_t *mp; /* filesystem mount point */ + char *p; /* current data position */ + int stale; /* count of stale leaves */ + + mp = dp->i_mount; + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + bf = d->hdr.bestfree; + p = (char *)d->u; + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + lep = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + endp = (char *)lep; + } else + endp = (char *)d + mp->m_dirblksize; + count = lastfree = freeseen = 0; + /* + * Account for zero bestfree entries. + */ + if (INT_ISZERO(bf[0].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[0].offset, ARCH_CONVERT)); + freeseen |= 1 << 0; + } + if (INT_ISZERO(bf[1].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[1].offset, ARCH_CONVERT)); + freeseen |= 1 << 1; + } + if (INT_ISZERO(bf[2].length, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(bf[2].offset, ARCH_CONVERT)); + freeseen |= 1 << 2; + } + ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); + ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); + /* + * Loop over the data/unused entries. + */ + while (p < endp) { + dup = (xfs_dir2_data_unused_t *)p; + /* + * If it's unused, look for the space in the bestfree table. + * If we find it, account for that, else make sure it + * doesn't need to be there. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ASSERT(lastfree == 0); + ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT) == + (char *)dup - (char *)d); + dfp = xfs_dir2_data_freefind(d, dup); + if (dfp) { + i = (int)(dfp - bf); + ASSERT((freeseen & (1 << i)) == 0); + freeseen |= 1 << i; + } else + ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); + p += INT_GET(dup->length, ARCH_CONVERT); + lastfree = 1; + continue; + } + /* + * It's a real entry. Validate the fields. + * If this is a block directory then make sure it's + * in the leaf section of the block. + * The linear search is crude but this is DEBUG code. + */ + dep = (xfs_dir2_data_entry_t *)p; + ASSERT(dep->namelen != 0); + ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); + ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == + (char *)dep - (char *)d); + count++; + lastfree = 0; + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + (xfs_dir2_data_aoff_t) + ((char *)dep - (char *)d)); + hash = xfs_da_hashname((char *)dep->name, dep->namelen); + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && + INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) + break; + } + ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); + } + p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + /* + * Need to have seen all the entries and all the bestfree slots. + */ + ASSERT(freeseen == 7); + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + if (i > 0) + ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); + } + ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); + ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); + } +} +#endif + +/* + * Given a data block and an unused entry from that block, + * return the bestfree entry if any that corresponds to it. + */ +xfs_dir2_data_free_t * +xfs_dir2_data_freefind( + xfs_dir2_data_t *d, /* data block */ + xfs_dir2_data_unused_t *dup) /* data unused entry */ +{ + xfs_dir2_data_free_t *dfp; /* bestfree entry */ + xfs_dir2_data_aoff_t off; /* offset value needed */ +#if defined(DEBUG) && defined(__KERNEL__) + int matched; /* matched the value */ + int seenzero; /* saw a 0 bestfree entry */ +#endif + + off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d); +#if defined(DEBUG) && defined(__KERNEL__) + /* + * Validate some consistency in the bestfree table. + * Check order, non-overlapping entries, and if we find the + * one we're looking for it has to be exact. + */ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; + dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + dfp++) { + if (INT_ISZERO(dfp->offset, ARCH_CONVERT)) { + ASSERT(INT_ISZERO(dfp->length, ARCH_CONVERT)); + seenzero = 1; + continue; + } + ASSERT(seenzero == 0); + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { + matched = 1; + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); + } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) + ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); + else + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); + ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); + if (dfp > &d->hdr.bestfree[0]) + ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); + } +#endif + /* + * If this is smaller than the smallest bestfree entry, + * it can't be there since they're sorted. + */ + if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) + return NULL; + /* + * Look at the three bestfree entries for our guy. + */ + for (dfp = &d->hdr.bestfree[0]; + dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + dfp++) { + if (INT_ISZERO(dfp->offset, ARCH_CONVERT)) + return NULL; + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) + return dfp; + } + /* + * Didn't find it. This only happens if there are duplicate lengths. + */ + return NULL; +} + +/* + * Insert an unused-space entry into the bestfree table. + */ +xfs_dir2_data_free_t * /* entry inserted */ +xfs_dir2_data_freeinsert( + xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_unused_t *dup, /* unused space */ + int *loghead) /* log the data header (out) */ +{ + xfs_dir2_data_free_t *dfp; /* bestfree table pointer */ + xfs_dir2_data_free_t new; /* new bestfree entry */ + +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + dfp = d->hdr.bestfree; + INT_COPY(new.length, dup->length, ARCH_CONVERT); + INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + /* + * Insert at position 0, 1, or 2; or not at all. + */ + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { + dfp[2] = dfp[1]; + dfp[1] = dfp[0]; + dfp[0] = new; + *loghead = 1; + return &dfp[0]; + } + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { + dfp[2] = dfp[1]; + dfp[1] = new; + *loghead = 1; + return &dfp[1]; + } + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { + dfp[2] = new; + *loghead = 1; + return &dfp[2]; + } + return NULL; +} + +/* + * Remove a bestfree entry from the table. + */ +void +xfs_dir2_data_freeremove( + xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */ + int *loghead) /* out: log data header */ +{ +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + /* + * It's the first entry, slide the next 2 up. + */ + if (dfp == &d->hdr.bestfree[0]) { + d->hdr.bestfree[0] = d->hdr.bestfree[1]; + d->hdr.bestfree[1] = d->hdr.bestfree[2]; + } + /* + * It's the second entry, slide the 3rd entry up. + */ + else if (dfp == &d->hdr.bestfree[1]) + d->hdr.bestfree[1] = d->hdr.bestfree[2]; + /* + * Must be the last entry. + */ + else + ASSERT(dfp == &d->hdr.bestfree[2]); + /* + * Clear the 3rd entry, must be zero now. + */ + INT_ZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + INT_ZERO(d->hdr.bestfree[2].offset, ARCH_CONVERT); + *loghead = 1; +} + +/* + * Given a data block, reconstruct its bestfree map. + */ +void +xfs_dir2_data_freescan( + xfs_mount_t *mp, /* filesystem mount point */ + xfs_dir2_data_t *d, /* data block pointer */ + int *loghead, /* out: log data header */ + char *aendp) /* in: caller's endp */ +{ + xfs_dir2_block_tail_t *btp; /* block tail */ + xfs_dir2_data_entry_t *dep; /* active data entry */ + xfs_dir2_data_unused_t *dup; /* unused data entry */ + char *endp; /* end of block's data */ + char *p; /* current entry pointer */ + +#ifdef __KERNEL__ + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); +#endif + /* + * Start by clearing the table. + */ + memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree)); + *loghead = 1; + /* + * Set up pointers. + */ + p = (char *)d->u; + if (aendp) + endp = aendp; + else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + endp = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + } else + endp = (char *)d + mp->m_dirblksize; + /* + * Loop over the block's entries. + */ + while (p < endp) { + dup = (xfs_dir2_data_unused_t *)p; + /* + * If it's a free entry, insert it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ASSERT((char *)dup - (char *)d == + INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT)); + xfs_dir2_data_freeinsert(d, dup, loghead); + p += INT_GET(dup->length, ARCH_CONVERT); + } + /* + * For active entries, check their tags and skip them. + */ + else { + dep = (xfs_dir2_data_entry_t *)p; + ASSERT((char *)dep - (char *)d == + INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); + p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + } +} + +/* + * Initialize a data block at the given block number in the directory. + * Give back the buffer for the created block. + */ +int /* error */ +xfs_dir2_data_init( + xfs_da_args_t *args, /* directory operation args */ + xfs_dir2_db_t blkno, /* logical dir block number */ + xfs_dabuf_t **bpp) /* output block buffer */ +{ + xfs_dabuf_t *bp; /* block buffer */ + xfs_dir2_data_t *d; /* pointer to block */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused entry pointer */ + int error; /* error return value */ + int i; /* bestfree index */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + int t; /* temp */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Get the buffer set up for the block. + */ + error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, blkno), -1, &bp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(bp != NULL); + /* + * Initialize the header. + */ + d = bp->data; + INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); + for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { + INT_ZERO(d->hdr.bestfree[i].length, ARCH_CONVERT); + INT_ZERO(d->hdr.bestfree[i].offset, ARCH_CONVERT); + } + /* + * Set up an unused entry for the block's body. + */ + dup = &d->u[0].unused; + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + + t=mp->m_dirblksize - (uint)sizeof(d->hdr); + INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); + INT_SET(dup->length, ARCH_CONVERT, t); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + /* + * Log it and return it. + */ + xfs_dir2_data_log_header(tp, bp); + xfs_dir2_data_log_unused(tp, bp, dup); + *bpp = bp; + return 0; +} + +/* + * Log an active data entry from the block. + */ +void +xfs_dir2_data_log_entry( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_entry_t *dep) /* data entry pointer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), + (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - + (char *)d - 1)); +} + +/* + * Log a data block header. + */ +void +xfs_dir2_data_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* block buffer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), + (uint)(sizeof(d->hdr) - 1)); +} + +/* + * Log a data unused entry. + */ +void +xfs_dir2_data_log_unused( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_unused_t *dup) /* data unused pointer */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + /* + * Log the first part of the unused entry. + */ + xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d), + (uint)((char *)&dup->length + sizeof(dup->length) - + 1 - (char *)d)); + /* + * Log the end (tag) of the unused entry. + */ + xfs_da_log_buf(tp, bp, + (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT) - (char *)d), + (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT) - (char *)d + + sizeof(xfs_dir2_data_off_t) - 1)); +} + +/* + * Make a byte range in the data block unused. + * Its current contents are unimportant. + */ +void +xfs_dir2_data_make_free( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* block buffer */ + xfs_dir2_data_aoff_t offset, /* starting byte offset */ + xfs_dir2_data_aoff_t len, /* length in bytes */ + int *needlogp, /* out: log header */ + int *needscanp) /* out: regen bestfree */ +{ + xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_free_t *dfp; /* bestfree pointer */ + char *endptr; /* end of data area */ + xfs_mount_t *mp; /* filesystem mount point */ + int needscan; /* need to regen bestfree */ + xfs_dir2_data_unused_t *newdup; /* new unused entry */ + xfs_dir2_data_unused_t *postdup; /* unused entry after us */ + xfs_dir2_data_unused_t *prevdup; /* unused entry before us */ + + mp = tp->t_mountp; + d = bp->data; + /* + * Figure out where the end of the data area is. + */ + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + endptr = (char *)d + mp->m_dirblksize; + else { + xfs_dir2_block_tail_t *btp; /* block tail */ + + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + } + /* + * If this isn't the start of the block, then back up to + * the previous entry and see if it's free. + */ + if (offset > sizeof(d->hdr)) { + xfs_dir2_data_off_t *tagp; /* tag just before us */ + + tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); + if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + prevdup = NULL; + } else + prevdup = NULL; + /* + * If this isn't the end of the block, see if the entry after + * us is free. + */ + if ((char *)d + offset + len < endptr) { + postdup = + (xfs_dir2_data_unused_t *)((char *)d + offset + len); + if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + postdup = NULL; + } else + postdup = NULL; + ASSERT(*needscanp == 0); + needscan = 0; + /* + * Previous and following entries are both free, + * merge everything into a single free entry. + */ + if (prevdup && postdup) { + xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */ + + /* + * See if prevdup and/or postdup are in bestfree table. + */ + dfp = xfs_dir2_data_freefind(d, prevdup); + dfp2 = xfs_dir2_data_freefind(d, postdup); + /* + * We need a rescan unless there are exactly 2 free entries + * namely our two. Then we know what's happening, otherwise + * since the third bestfree is there, there might be more + * entries. + */ + needscan = !INT_ISZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + /* + * Fix up the new big freespace. + */ + INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(prevdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, prevdup); + if (!needscan) { + /* + * Has to be the case that entries 0 and 1 are + * dfp and dfp2 (don't know which is which), and + * entry 2 is empty. + * Remove entry 1 first then entry 0. + */ + ASSERT(dfp && dfp2); + if (dfp == &d->hdr.bestfree[1]) { + dfp = &d->hdr.bestfree[0]; + ASSERT(dfp2 == dfp); + dfp2 = &d->hdr.bestfree[1]; + } + xfs_dir2_data_freeremove(d, dfp2, needlogp); + xfs_dir2_data_freeremove(d, dfp, needlogp); + /* + * Now insert the new entry. + */ + dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); + ASSERT(dfp == &d->hdr.bestfree[0]); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); + ASSERT(INT_ISZERO(dfp[1].length, ARCH_CONVERT)); + ASSERT(INT_ISZERO(dfp[2].length, ARCH_CONVERT)); + } + } + /* + * The entry before us is free, merge with it. + */ + else if (prevdup) { + dfp = xfs_dir2_data_freefind(d, prevdup); + INT_MOD(prevdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(prevdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, prevdup); + /* + * If the previous entry was in the table, the new entry + * is longer, so it will be in the table too. Remove + * the old one and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, prevdup, needlogp); + } + /* + * Otherwise we need a scan if the new entry is big enough. + */ + else + needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + } + /* + * The following entry is free, merge with it. + */ + else if (postdup) { + dfp = xfs_dir2_data_freefind(d, postdup); + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If the following entry was in the table, the new entry + * is longer, so it will be in the table too. Remove + * the old one and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + } + /* + * Otherwise we need a scan if the new entry is big enough. + */ + else + needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + } + /* + * Neither neighbor is free. Make a new entry. + */ + else { + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + } + *needscanp = needscan; +} + +/* + * Take a byte range out of an existing unused space and make it un-free. + */ +void +xfs_dir2_data_use_free( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* data block buffer */ + xfs_dir2_data_unused_t *dup, /* unused entry */ + xfs_dir2_data_aoff_t offset, /* starting offset to use */ + xfs_dir2_data_aoff_t len, /* length to use */ + int *needlogp, /* out: need to log header */ + int *needscanp) /* out: need regen bestfree */ +{ + xfs_dir2_data_t *d; /* data block */ + xfs_dir2_data_free_t *dfp; /* bestfree pointer */ + int matchback; /* matches end of freespace */ + int matchfront; /* matches start of freespace */ + int needscan; /* need to regen bestfree */ + xfs_dir2_data_unused_t *newdup; /* new unused entry */ + xfs_dir2_data_unused_t *newdup2; /* another new unused entry */ + int oldlen; /* old unused entry's length */ + + d = bp->data; + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); + ASSERT(offset >= (char *)dup - (char *)d); + ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); + ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup, ARCH_CONVERT), ARCH_CONVERT)); + /* + * Look up the entry in the bestfree table. + */ + dfp = xfs_dir2_data_freefind(d, dup); + oldlen = INT_GET(dup->length, ARCH_CONVERT); + ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); + /* + * Check for alignment with front and back of the entry. + */ + matchfront = (char *)dup - (char *)d == offset; + matchback = (char *)dup + oldlen - (char *)d == offset + len; + ASSERT(*needscanp == 0); + needscan = 0; + /* + * If we matched it exactly we just need to get rid of it from + * the bestfree table. + */ + if (matchfront && matchback) { + if (dfp) { + needscan = !INT_ISZERO(d->hdr.bestfree[2].offset, ARCH_CONVERT); + if (!needscan) + xfs_dir2_data_freeremove(d, dfp, needlogp); + } + } + /* + * We match the first part of the entry. + * Make a new entry with the remaining freespace. + */ + else if (matchfront) { + newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If it was in the table, remove it and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + ASSERT(dfp != NULL); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + /* + * If we got inserted at the last slot, + * that means we don't know if there was a better + * choice for the last slot, or not. Rescan. + */ + needscan = dfp == &d->hdr.bestfree[2]; + } + } + /* + * We match the last part of the entry. + * Trim the allocated space off the tail of the entry. + */ + else if (matchback) { + newdup = dup; + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + /* + * If it was in the table, remove it and add the new one. + */ + if (dfp) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + ASSERT(dfp != NULL); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + /* + * If we got inserted at the last slot, + * that means we don't know if there was a better + * choice for the last slot, or not. Rescan. + */ + needscan = dfp == &d->hdr.bestfree[2]; + } + } + /* + * Poking out the middle of an entry. + * Make two new entries. + */ + else { + newdup = dup; + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup); + newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(newdup2, ARCH_CONVERT), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); + xfs_dir2_data_log_unused(tp, bp, newdup2); + /* + * If the old entry was in the table, we need to scan + * if the 3rd entry was valid, since these entries + * are smaller than the old one. + * If we don't need to scan that means there were 1 or 2 + * entries in the table, and removing the old and adding + * the 2 new will work. + */ + if (dfp) { + needscan = !INT_ISZERO(d->hdr.bestfree[2].length, ARCH_CONVERT); + if (!needscan) { + xfs_dir2_data_freeremove(d, dfp, needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup, + needlogp); + (void)xfs_dir2_data_freeinsert(d, newdup2, + needlogp); + } + } + } + *needscanp = needscan; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_data.h linux.22-ac2/fs/xfs/xfs_dir2_data.h --- linux.vanilla/fs/xfs/xfs_dir2_data.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_data.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_DATA_H__ +#define __XFS_DIR2_DATA_H__ + +/* + * Directory format 2, data block structures. + */ + +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_inode; +struct xfs_trans; + +/* + * Constants. + */ +#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: for multiblock dirs */ +#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */ +#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG) +#define XFS_DIR2_DATA_FREE_TAG 0xffff +#define XFS_DIR2_DATA_FD_COUNT 3 + +/* + * Directory address space divided into sections, + * spaces separated by 32gb. + */ +#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG)) +#define XFS_DIR2_DATA_SPACE 0 +#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_DATA_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATA_OFFSET) + +/* + * Offsets of . and .. in data space (always block 0) + */ +#define XFS_DIR2_DATA_DOT_OFFSET \ + ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t)) +#define XFS_DIR2_DATA_DOTDOT_OFFSET \ + (XFS_DIR2_DATA_DOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(1)) +#define XFS_DIR2_DATA_FIRST_OFFSET \ + (XFS_DIR2_DATA_DOTDOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(2)) + +/* + * Structures. + */ + +/* + * Describe a free area in the data block. + * The freespace will be formatted as a xfs_dir2_data_unused_t. + */ +typedef struct xfs_dir2_data_free { + xfs_dir2_data_off_t offset; /* start of freespace */ + xfs_dir2_data_off_t length; /* length of freespace */ +} xfs_dir2_data_free_t; + +/* + * Header for the data blocks. + * Always at the beginning of a directory-sized block. + * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. + */ +typedef struct xfs_dir2_data_hdr { + __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ + /* or XFS_DIR2_BLOCK_MAGIC */ + xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; +} xfs_dir2_data_hdr_t; + +/* + * Active entry in a data block. Aligned to 8 bytes. + * Tag appears as the last 2 bytes. + */ +typedef struct xfs_dir2_data_entry { + xfs_ino_t inumber; /* inode number */ + __uint8_t namelen; /* name length */ + __uint8_t name[1]; /* name bytes, no null */ + /* variable offset */ + xfs_dir2_data_off_t tag; /* starting offset of us */ +} xfs_dir2_data_entry_t; + +/* + * Unused entry in a data block. Aligned to 8 bytes. + * Tag appears as the last 2 bytes. + */ +typedef struct xfs_dir2_data_unused { + __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ + xfs_dir2_data_off_t length; /* total free length */ + /* variable offset */ + xfs_dir2_data_off_t tag; /* starting offset of us */ +} xfs_dir2_data_unused_t; + +typedef union { + xfs_dir2_data_entry_t entry; + xfs_dir2_data_unused_t unused; +} xfs_dir2_data_union_t; + +/* + * Generic data block structure, for xfs_db. + */ +typedef struct xfs_dir2_data { + xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_DATA_MAGIC */ + xfs_dir2_data_union_t u[1]; +} xfs_dir2_data_t; + +/* + * Macros. + */ + +/* + * Size of a data entry. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTSIZE) +int xfs_dir2_data_entsize(int n); +#define XFS_DIR2_DATA_ENTSIZE(n) xfs_dir2_data_entsize(n) +#else +#define XFS_DIR2_DATA_ENTSIZE(n) \ + ((int)(roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \ + (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN))) +#endif + +/* + * Pointer to an entry's tag word. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_ENTRY_TAG_P) +xfs_dir2_data_off_t *xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep); +#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) +#else +#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) \ + ((xfs_dir2_data_off_t *)\ + ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ + (uint)sizeof(xfs_dir2_data_off_t))) +#endif + +/* + * Pointer to a freespace's tag word. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATA_UNUSED_TAG_P) +xfs_dir2_data_off_t *xfs_dir2_data_unused_tag_p_arch( + xfs_dir2_data_unused_t *dup, xfs_arch_t arch); +#define XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup,arch) \ + xfs_dir2_data_unused_tag_p_arch(dup,arch) +#else +#define XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(dup,arch) \ + ((xfs_dir2_data_off_t *)\ + ((char *)(dup) + INT_GET((dup)->length, arch) \ + - (uint)sizeof(xfs_dir2_data_off_t))) +#endif + +/* + * Function declarations. + */ + +#ifdef DEBUG +extern void + xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); +#else +#define xfs_dir2_data_check(dp,bp) +#endif + +extern xfs_dir2_data_free_t * + xfs_dir2_data_freefind(xfs_dir2_data_t *d, + xfs_dir2_data_unused_t *dup); + +extern xfs_dir2_data_free_t * + xfs_dir2_data_freeinsert(xfs_dir2_data_t *d, + xfs_dir2_data_unused_t *dup, int *loghead); + +extern void + xfs_dir2_data_freeremove(xfs_dir2_data_t *d, + xfs_dir2_data_free_t *dfp, int *loghead); + +extern void + xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d, + int *loghead, char *aendp); + +extern int + xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, + struct xfs_dabuf **bpp); + +extern void + xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_entry_t *dep); + +extern void + xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern void + xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_unused_t *dup); + +extern void + xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_aoff_t offset, + xfs_dir2_data_aoff_t len, int *needlogp, + int *needscanp); + +extern void + xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_unused_t *dup, + xfs_dir2_data_aoff_t offset, + xfs_dir2_data_aoff_t len, int *needlogp, + int *needscanp); + +#endif /* __XFS_DIR2_DATA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2.h linux.22-ac2/fs/xfs/xfs_dir2.h --- linux.vanilla/fs/xfs/xfs_dir2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_H__ +#define __XFS_DIR2_H__ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_put_args; +struct xfs_inode; +struct xfs_trans; + +/* + * Directory version 2. + * There are 4 possible formats: + * shortform + * single block - data with embedded leaf at the end + * multiple data blocks, single leaf+freeindex block + * data blocks, node&leaf blocks (btree), freeindex blocks + * + * The shortform format is in xfs_dir2_sf.h. + * The single block format is in xfs_dir2_block.h. + * The data block format is in xfs_dir2_data.h. + * The leaf and freeindex block formats are in xfs_dir2_leaf.h. + * Node blocks are the same as the other version, in xfs_da_btree.h. + */ + +/* + * Byte offset in data block and shortform entry. + */ +typedef __uint16_t xfs_dir2_data_off_t; +#define NULLDATAOFF 0xffffU +typedef uint xfs_dir2_data_aoff_t; /* argument form */ + +/* + * Directory block number (logical dirblk in file) + */ +typedef __uint32_t xfs_dir2_db_t; + +/* + * Byte offset in a directory. + */ +typedef xfs_off_t xfs_dir2_off_t; + +/* + * For getdents, argument struct for put routines. + */ +typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); +typedef struct xfs_dir2_put_args { + xfs_off_t cook; /* cookie of (next) entry */ + xfs_intino_t ino; /* inode number */ + struct xfs_dirent *dbp; /* buffer pointer */ + char *name; /* directory entry name */ + int namelen; /* length of name */ + int done; /* output: set if value was stored */ + xfs_dir2_put_t put; /* put function ptr (i/o) */ + struct uio *uio; /* uio control structure */ +} xfs_dir2_put_args_t; + +#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2) +extern xfs_dirops_t xfsv2_dirops; + +/* + * Other interfaces used by the rest of the dir v2 code. + */ +extern int + xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); + +extern int + xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); + +#endif /* __XFS_DIR2_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_leaf.c linux.22-ac2/fs/xfs/xfs_dir2_leaf.c --- linux.vanilla/fs/xfs/xfs_dir2_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_leaf.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1897 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_leaf.c + * XFS directory version 2 implementation - single leaf form + * see xfs_dir2_leaf.h for data structures. + * These directories have multiple XFS_DIR2_DATA blocks and one + * XFS_DIR2_LEAF1 block containing the hash table and freespace map. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" +#include "xfs_bit.h" + +/* + * Local function declarations. + */ +#ifdef DEBUG +static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp); +#else +#define xfs_dir2_leaf_check(dp, bp) +#endif +static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp, + int *indexp, xfs_dabuf_t **dbpp); + +/* + * Convert a block form directory to a leaf form directory. + */ +int /* error */ +xfs_dir2_block_to_leaf( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *dbp) /* input block's buffer */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ + xfs_dablk_t blkno; /* leaf block's bno */ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ + xfs_dir2_block_tail_t *btp; /* block's tail */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dabuf_t *lbp; /* leaf block's buffer */ + xfs_dir2_db_t ldb; /* leaf block's bno */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log block header */ + int needscan; /* need to rescan bestfree */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_b("block_to_leaf", args, dbp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Add the leaf block to the inode. + * This interface will only put blocks in the leaf/node range. + * Since that's empty now, we'll get the root (block 0 in range). + */ + if ((error = xfs_da_grow_inode(args, &blkno))) { + return error; + } + ldb = XFS_DIR2_DA_TO_DB(mp, blkno); + ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp)); + /* + * Initialize the leaf block, get a buffer for it. + */ + if ((error = xfs_dir2_leaf_init(args, ldb, &lbp, XFS_DIR2_LEAF1_MAGIC))) { + return error; + } + ASSERT(lbp != NULL); + leaf = lbp->data; + block = dbp->data; + xfs_dir2_data_check(dp, dbp); + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + /* + * Set the counts in the leaf header. + */ + INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ + INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ + /* + * Could compact these but I think we always do the conversion + * after squeezing out stale entries. + */ + memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); + needscan = 0; + needlog = 1; + /* + * Make the space formerly occupied by the leaf entries and block + * tail be free. + */ + xfs_dir2_data_make_free(tp, dbp, + (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), + (xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize - + (char *)blp), + &needlog, &needscan); + /* + * Fix up the block header, make it a data block. + */ + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + if (needscan) + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, + NULL); + /* + * Set up leaf tail and bests table. + */ + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_SET(ltp->bestcount, ARCH_CONVERT, 1); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); + /* + * Log the data header and leaf bests table. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_leaf_check(dp, lbp); + xfs_dir2_data_check(dp, dbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, 0); + xfs_da_buf_done(lbp); + return 0; +} + +/* + * Add an entry to a leaf form directory. + */ +int /* error */ +xfs_dir2_leaf_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ + int compact; /* need to compact leaves */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* data unused entry */ + int error; /* error return value */ + int grown; /* allocated new data block */ + int highstale; /* index of next stale leaf */ + int i; /* temporary, index */ + int index; /* leaf table position */ + xfs_dabuf_t *lbp; /* leaf's buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int length; /* length of new entry */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */ + int lfloglow; /* low leaf logging index */ + int lfloghigh; /* high leaf logging index */ + int lowstale; /* index of prev stale leaf */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */ + xfs_mount_t *mp; /* filesystem mount point */ + int needbytes; /* leaf block bytes needed */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data free */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ + xfs_trans_t *tp; /* transaction pointer */ + xfs_dir2_db_t use_block; /* data block number */ + + xfs_dir2_trace_args("leaf_addname", args); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the leaf block. + */ + error = xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(lbp != NULL); + /* + * Look up the entry by hash value and name. + * We know it's not there, our caller has already done a lookup. + * So the index is of the entry to insert in front of. + * But if there are dup hash values the index is of the first of those. + */ + index = xfs_dir2_leaf_search_hash(args, lbp); + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + /* + * See if there are any entries with the same hash value + * and space in their block for the new entry. + * This is good because it puts multiple same-hash value entries + * in a data block, improving the lookup of those entries. + */ + for (use_block = -1, lep = &leaf->ents[index]; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index++, lep++) { + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); + ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); + if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + use_block = i; + break; + } + } + /* + * Didn't find a block yet, linear search all the data blocks. + */ + if (use_block == -1) { + for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { + /* + * Remember a block we see that's missing. + */ + if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) + use_block = i; + else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + use_block = i; + break; + } + } + } + /* + * How many bytes do we need in the leaf block? + */ + needbytes = + (!INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT) ? 0 : (uint)sizeof(leaf->ents[0])) + + (use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0])); + /* + * Now kill use_block if it refers to a missing block, so we + * can use it as an indication of allocation needed. + */ + if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) + use_block = -1; + /* + * If we don't have enough free bytes but we can make enough + * by compacting out stale entries, we'll do that. + */ + if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && + INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { + compact = 1; + } + /* + * Otherwise if we don't have enough free bytes we need to + * convert to node form. + */ + else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < + needbytes) { + /* + * Just checking or no space reservation, give up. + */ + if (args->justcheck || args->total == 0) { + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOSPC); + } + /* + * Convert to node form. + */ + error = xfs_dir2_leaf_to_node(args, lbp); + xfs_da_buf_done(lbp); + if (error) + return error; + /* + * Then add the new entry. + */ + return xfs_dir2_node_addname(args); + } + /* + * Otherwise it will fit without compaction. + */ + else + compact = 0; + /* + * If just checking, then it will fit unless we needed to allocate + * a new data block. + */ + if (args->justcheck) { + xfs_da_brelse(tp, lbp); + return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; + } + /* + * If no allocations are allowed, return now before we've + * changed anything. + */ + if (args->total == 0 && use_block == -1) { + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOSPC); + } + /* + * Need to compact the leaf entries, removing stale ones. + * Leave one stale entry behind - the one closest to our + * insertion index - and we'll shift that one to our insertion + * point later. + */ + if (compact) { + xfs_dir2_leaf_compact_x1(lbp, &index, &lowstale, &highstale, + &lfloglow, &lfloghigh); + } + /* + * There are stale entries, so we'll need log-low and log-high + * impossibly bad values later. + */ + else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * If there was no data block space found, we need to allocate + * a new one. + */ + if (use_block == -1) { + /* + * Add the new data block. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, + &use_block))) { + xfs_da_brelse(tp, lbp); + return error; + } + /* + * Initialize the block. + */ + if ((error = xfs_dir2_data_init(args, use_block, &dbp))) { + xfs_da_brelse(tp, lbp); + return error; + } + /* + * If we're adding a new data block on the end we need to + * extend the bests table. Copy it up one entry. + */ + if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { + bestsp--; + memmove(&bestsp[0], &bestsp[1], + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); + INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + } + /* + * If we're filling in a previously empty block just log it. + */ + else + xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); + data = dbp->data; + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + grown = 1; + } + /* + * Already had space in some data block. + * Just read that one in. + */ + else { + if ((error = + xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, use_block), + -1, &dbp, XFS_DATA_FORK))) { + xfs_da_brelse(tp, lbp); + return error; + } + data = dbp->data; + grown = 0; + } + xfs_dir2_data_check(dp, dbp); + /* + * Point to the biggest freespace in our data block. + */ + dup = (xfs_dir2_data_unused_t *) + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); + needscan = needlog = 0; + /* + * Mark the initial part of our freespace in use for the new entry. + */ + xfs_dir2_data_use_free(tp, dbp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + &needlog, &needscan); + /* + * Initialize our new entry (at last). + */ + dep = (xfs_dir2_data_entry_t *)dup; + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + /* + * Need to scan fix up the bestfree table. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + /* + * Need to log the data block's header. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_data_log_entry(tp, dbp, dep); + /* + * If the bests table needs to be changed, do it. + * Log the change unless we've already done that. + */ + if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (!grown) + xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); + } + /* + * Now we need to make room to insert the leaf entry. + * If there are no stale entries, we just insert a hole at index. + */ + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + /* + * lep is still good as the index leaf entry. + */ + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + memmove(lep + 1, lep, + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + /* + * Record low and high logging indices for the leaf. + */ + lfloglow = index; + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + } + /* + * There are stale entries. + * We will use one of them for the new entry. + * It's probably not at the right location, so we'll have to + * shift some up or down first. + */ + else { + /* + * If we didn't compact before, we need to find the nearest + * stale entries before and after our insertion point. + */ + if (compact == 0) { + /* + * Find the first stale entry before the insertion + * point, if any. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find the next stale entry at or after the insertion + * point, if any. Stop if we go so far that the + * lowstale entry would be better. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || + index - lowstale - 1 >= highstale - index); + highstale++) + continue; + } + /* + * If the low one is better, use it. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale - 1 < highstale - index)) { + ASSERT(index - lowstale - 1 >= 0); + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + /* + * Copy entries up to cover the stale entry + * and make room for the new entry. + */ + if (index - lowstale - 1 > 0) + memmove(&leaf->ents[lowstale], + &leaf->ents[lowstale + 1], + (index - lowstale - 1) * sizeof(*lep)); + lep = &leaf->ents[index - 1]; + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(index - 1, lfloghigh); + } + /* + * The high one is better, so use that one. + */ + else { + ASSERT(highstale - index >= 0); + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + /* + * Copy entries down to copver the stale entry + * and make room for the new entry. + */ + if (highstale - index > 0) + memmove(&leaf->ents[index + 1], + &leaf->ents[index], + (highstale - index) * sizeof(*lep)); + lep = &leaf->ents[index]; + lfloglow = MIN(index, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + } + /* + * Fill in the new leaf entry. + */ + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); + /* + * Log the leaf fields and give up the buffers. + */ + xfs_dir2_leaf_log_header(tp, lbp); + xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh); + xfs_dir2_leaf_check(dp, lbp); + xfs_da_buf_done(lbp); + xfs_dir2_data_check(dp, dbp); + xfs_da_buf_done(dbp); + return 0; +} + +#ifdef DEBUG +/* + * Check the internal consistency of a leaf1 block. + * Pop an assert if something is wrong. + */ +void +xfs_dir2_leaf_check( + xfs_inode_t *dp, /* incore directory inode */ + xfs_dabuf_t *bp) /* leaf's buffer */ +{ + int i; /* leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */ + xfs_mount_t *mp; /* filesystem mount point */ + int stale; /* count of stale leaves */ + + leaf = bp->data; + mp = dp->i_mount; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + /* + * This value is not restrictive enough. + * Should factor in the size of the bests table as well. + * We can deduce a value for that from di_size. + */ + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * Leaves and bests don't overlap. + */ + ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= + (char *)XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT)); + /* + * Check hash value order, count stale entries. + */ + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); +} +#endif /* DEBUG */ + +/* + * Compact out any stale entries in the leaf. + * Log the header and changed leaf entries, if any. + */ +void +xfs_dir2_leaf_compact( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + int from; /* source leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int loglow; /* first leaf entry to log */ + int to; /* target leaf index */ + + leaf = bp->data; + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + return; + } + /* + * Compress out the stale entries in place. + */ + for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Only actually copy the entries that are different. + */ + if (from > to) { + if (loglow == -1) + loglow = to; + leaf->ents[to] = leaf->ents[from]; + } + to++; + } + /* + * Update and log the header, log the leaf entries. + */ + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); + INT_ZERO(leaf->hdr.stale, ARCH_CONVERT); + xfs_dir2_leaf_log_header(args->trans, bp); + if (loglow != -1) + xfs_dir2_leaf_log_ents(args->trans, bp, loglow, to - 1); +} + +/* + * Compact the leaf entries, removing stale ones. + * Leave one stale entry behind - the one closest to our + * insertion index - and the caller will shift that one to our insertion + * point later. + * Return new insertion index, where the remaining stale entry is, + * and leaf logging indices. + */ +void +xfs_dir2_leaf_compact_x1( + xfs_dabuf_t *bp, /* leaf buffer */ + int *indexp, /* insertion index */ + int *lowstalep, /* out: stale entry before us */ + int *highstalep, /* out: stale entry after us */ + int *lowlogp, /* out: low log index */ + int *highlogp) /* out: high log index */ +{ + int from; /* source copy index */ + int highstale; /* stale entry at/after index */ + int index; /* insertion index */ + int keepstale; /* source index of kept stale */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int lowstale; /* stale entry before index */ + int newindex=0; /* new insertion index */ + int to; /* destination copy index */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); + index = *indexp; + /* + * Find the first stale entry before our index, if any. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find the first stale entry at or after our index, if any. + * Stop if the answer would be worse than lowstale. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || index - lowstale > highstale - index); + highstale++) + continue; + /* + * Pick the better of lowstale and highstale. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale <= highstale - index)) + keepstale = lowstale; + else + keepstale = highstale; + /* + * Copy the entries in place, removing all the stale entries + * except keepstale. + */ + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + /* + * Notice the new value of index. + */ + if (index == from) + newindex = to; + if (from != keepstale && + INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (from == to) + *lowlogp = to; + continue; + } + /* + * Record the new keepstale value for the insertion. + */ + if (from == keepstale) + lowstale = highstale = to; + /* + * Copy only the entries that have moved. + */ + if (from > to) + leaf->ents[to] = leaf->ents[from]; + to++; + } + ASSERT(from > to); + /* + * If the insertion point was past the last entry, + * set the new insertion point accordingly. + */ + if (index == from) + newindex = to; + *indexp = newindex; + /* + * Adjust the leaf header values. + */ + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); + INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); + /* + * Remember the low/high stale value only in the "right" + * direction. + */ + if (lowstale >= newindex) + lowstale = -1; + else + highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; + *lowstalep = lowstale; + *highstalep = highstale; +} + +/* + * Getdents (readdir) for leaf and node directories. + * This reads the data blocks only, so is the same for both forms. + */ +int /* error */ +xfs_dir2_leaf_getdents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* I/O control & vectors */ + int *eofp, /* out: reached end of dir */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* ABI formatting routine */ +{ + xfs_dabuf_t *bp; /* data block buffer */ + int byteoff; /* offset in current block */ + xfs_dir2_db_t curdb; /* db for current block */ + xfs_dir2_off_t curoff; /* current overall offset */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_dir2_data_unused_t *dup; /* unused entry */ + int eof; /* reached end of directory */ + int error=0; /* error return value */ + int i; /* temporary loop index */ + int j; /* temporary loop index */ + int length; /* temporary length value */ + xfs_bmbt_irec_t *map; /* map vector for blocks */ + xfs_extlen_t map_blocks; /* number of fsbs in map */ + xfs_dablk_t map_off; /* last mapped file offset */ + int map_size; /* total entries in *map */ + int map_valid; /* valid entries in *map */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_off_t newoff; /* new curoff after new blk */ + int nmap; /* mappings to ask xfs_bmapi */ + xfs_dir2_put_args_t p; /* formatting arg bundle */ + char *ptr=NULL; /* pointer to current data */ + int ra_current; /* number of read-ahead blks */ + int ra_index; /* *map index for read-ahead */ + int ra_offset; /* map entry offset for ra */ + int ra_want; /* readahead count wanted */ + + /* + * If the offset is at or past the largest allowed value, + * give up right away, return eof. + */ + if (uio->uio_offset >= XFS_DIR2_MAX_DATAPTR) { + *eofp = 1; + return 0; + } + mp = dp->i_mount; + /* + * Setup formatting arguments. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Set up to bmap a number of blocks based on the caller's + * buffer size, the directory block size, and the filesystem + * block size. + */ + map_size = + howmany(uio->uio_resid + mp->m_dirblksize, + mp->m_sb.sb_blocksize); + map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP); + map_valid = ra_index = ra_offset = ra_current = map_blocks = 0; + bp = NULL; + eof = 1; + /* + * Inside the loop we keep the main offset value as a byte offset + * in the directory file. + */ + curoff = XFS_DIR2_DATAPTR_TO_BYTE(mp, uio->uio_offset); + /* + * Force this conversion through db so we truncate the offset + * down to get the start of the data block. + */ + map_off = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, curoff)); + /* + * Loop over directory entries until we reach the end offset. + * Get more blocks and readahead as necessary. + */ + while (curoff < XFS_DIR2_LEAF_OFFSET) { + /* + * If we have no buffer, or we're off the end of the + * current buffer, need to get another one. + */ + if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) { + /* + * If we have a buffer, we need to release it and + * take it out of the mapping. + */ + if (bp) { + xfs_da_brelse(tp, bp); + bp = NULL; + map_blocks -= mp->m_dirblkfsbs; + /* + * Loop to get rid of the extents for the + * directory block. + */ + for (i = mp->m_dirblkfsbs; i > 0; ) { + j = MIN((int)map->br_blockcount, i); + map->br_blockcount -= j; + map->br_startblock += j; + map->br_startoff += j; + /* + * If mapping is done, pitch it from + * the table. + */ + if (!map->br_blockcount && --map_valid) + memmove(&map[0], &map[1], + sizeof(map[0]) * + map_valid); + i -= j; + } + } + /* + * Recalculate the readahead blocks wanted. + */ + ra_want = howmany(uio->uio_resid + mp->m_dirblksize, + mp->m_sb.sb_blocksize) - 1; + /* + * If we don't have as many as we want, and we haven't + * run out of data blocks, get some more mappings. + */ + if (1 + ra_want > map_blocks && + map_off < + XFS_DIR2_BYTE_TO_DA(mp, XFS_DIR2_LEAF_OFFSET)) { + /* + * Get more bmaps, fill in after the ones + * we already have in the table. + */ + nmap = map_size - map_valid; + error = xfs_bmapi(tp, dp, + map_off, + XFS_DIR2_BYTE_TO_DA(mp, + XFS_DIR2_LEAF_OFFSET) - map_off, + XFS_BMAPI_METADATA, NULL, 0, + &map[map_valid], &nmap, NULL); + /* + * Don't know if we should ignore this or + * try to return an error. + * The trouble with returning errors + * is that readdir will just stop without + * actually passing the error through. + */ + if (error) + break; /* XXX */ + /* + * If we got all the mappings we asked for, + * set the final map offset based on the + * last bmap value received. + * Otherwise, we've reached the end. + */ + if (nmap == map_size - map_valid) + map_off = + map[map_valid + nmap - 1].br_startoff + + map[map_valid + nmap - 1].br_blockcount; + else + map_off = + XFS_DIR2_BYTE_TO_DA(mp, + XFS_DIR2_LEAF_OFFSET); + /* + * Look for holes in the mapping, and + * eliminate them. Count up the valid blocks. + */ + for (i = map_valid; i < map_valid + nmap; ) { + if (map[i].br_startblock == + HOLESTARTBLOCK) { + nmap--; + length = map_valid + nmap - i; + if (length) + memmove(&map[i], + &map[i + 1], + sizeof(map[i]) * + length); + } else { + map_blocks += + map[i].br_blockcount; + i++; + } + } + map_valid += nmap; + } + /* + * No valid mappings, so no more data blocks. + */ + if (!map_valid) { + curoff = XFS_DIR2_DA_TO_BYTE(mp, map_off); + break; + } + /* + * Read the directory block starting at the first + * mapping. + */ + curdb = XFS_DIR2_DA_TO_DB(mp, map->br_startoff); + error = xfs_da_read_buf(tp, dp, map->br_startoff, + map->br_blockcount >= mp->m_dirblkfsbs ? + XFS_FSB_TO_DADDR(mp, map->br_startblock) : + -1, + &bp, XFS_DATA_FORK); + /* + * Should just skip over the data block instead + * of giving up. + */ + if (error) + break; /* XXX */ + /* + * Adjust the current amount of read-ahead: we just + * read a block that was previously ra. + */ + if (ra_current) + ra_current -= mp->m_dirblkfsbs; + /* + * Do we need more readahead? + */ + for (ra_index = ra_offset = i = 0; + ra_want > ra_current && i < map_blocks; + i += mp->m_dirblkfsbs) { + ASSERT(ra_index < map_valid); + /* + * Read-ahead a contiguous directory block. + */ + if (i > ra_current && + map[ra_index].br_blockcount >= + mp->m_dirblkfsbs) { + xfs_baread(mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, + map[ra_index].br_startblock + + ra_offset), + (int)BTOBB(mp->m_dirblksize)); + ra_current = i; + } + /* + * Read-ahead a non-contiguous directory block. + * This doesn't use our mapping, but this + * is a very rare case. + */ + else if (i > ra_current) { + (void)xfs_da_reada_buf(tp, dp, + map[ra_index].br_startoff + + ra_offset, XFS_DATA_FORK); + ra_current = i; + } + /* + * Advance offset through the mapping table. + */ + for (j = 0; j < mp->m_dirblkfsbs; j++) { + /* + * The rest of this extent but not + * more than a dir block. + */ + length = MIN(mp->m_dirblkfsbs, + (int)(map[ra_index].br_blockcount - + ra_offset)); + j += length; + ra_offset += length; + /* + * Advance to the next mapping if + * this one is used up. + */ + if (ra_offset == + map[ra_index].br_blockcount) { + ra_offset = 0; + ra_index++; + } + } + } + /* + * Having done a read, we need to set a new offset. + */ + newoff = XFS_DIR2_DB_OFF_TO_BYTE(mp, curdb, 0); + /* + * Start of the current block. + */ + if (curoff < newoff) + curoff = newoff; + /* + * Make sure we're in the right block. + */ + else if (curoff > newoff) + ASSERT(XFS_DIR2_BYTE_TO_DB(mp, curoff) == + curdb); + data = bp->data; + xfs_dir2_data_check(dp, bp); + /* + * Find our position in the block. + */ + ptr = (char *)&data->u; + byteoff = XFS_DIR2_BYTE_TO_OFF(mp, curoff); + /* + * Skip past the header. + */ + if (byteoff == 0) + curoff += (uint)sizeof(data->hdr); + /* + * Skip past entries until we reach our offset. + */ + else { + while ((char *)ptr - (char *)data < byteoff) { + dup = (xfs_dir2_data_unused_t *)ptr; + + if (INT_GET(dup->freetag, ARCH_CONVERT) + == XFS_DIR2_DATA_FREE_TAG) { + + length = INT_GET(dup->length, + ARCH_CONVERT); + ptr += length; + continue; + } + dep = (xfs_dir2_data_entry_t *)ptr; + length = + XFS_DIR2_DATA_ENTSIZE(dep->namelen); + ptr += length; + } + /* + * Now set our real offset. + */ + curoff = + XFS_DIR2_DB_OFF_TO_BYTE(mp, + XFS_DIR2_BYTE_TO_DB(mp, curoff), + (char *)ptr - (char *)data); + if (ptr >= (char *)data + mp->m_dirblksize) { + continue; + } + } + } + /* + * We have a pointer to an entry. + * Is it a live one? + */ + dup = (xfs_dir2_data_unused_t *)ptr; + /* + * No, it's unused, skip over it. + */ + if (INT_GET(dup->freetag, ARCH_CONVERT) + == XFS_DIR2_DATA_FREE_TAG) { + length = INT_GET(dup->length, ARCH_CONVERT); + ptr += length; + curoff += length; + continue; + } + + /* + * Copy the entry into the putargs, and try formatting it. + */ + dep = (xfs_dir2_data_entry_t *)ptr; + + p.namelen = dep->namelen; + + length = XFS_DIR2_DATA_ENTSIZE(p.namelen); + + p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); + +#if XFS_BIG_FILESYSTEMS + p.ino = INT_GET(dep->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = INT_GET(dep->inumber, ARCH_CONVERT); +#endif + p.name = (char *)dep->name; + + error = p.put(&p); + + /* + * Won't fit. Return to caller. + */ + if (!p.done) { + eof = 0; + break; + } + /* + * Advance to next entry in the block. + */ + ptr += length; + curoff += length; + } + + /* + * All done. Set output offset value to current offset. + */ + *eofp = eof; + if (curoff > XFS_DIR2_DATAPTR_TO_BYTE(mp, XFS_DIR2_MAX_DATAPTR)) + uio->uio_offset = XFS_DIR2_MAX_DATAPTR; + else + uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); + kmem_free(map, map_size * sizeof(*map)); + if (bp) + xfs_da_brelse(tp, bp); + return error; +} + +/* + * Initialize a new leaf block, leaf1 or leafn magic accepted. + */ +int +xfs_dir2_leaf_init( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_db_t bno, /* directory block number */ + xfs_dabuf_t **bpp, /* out: leaf buffer */ + int magic) /* magic number for block */ +{ + xfs_dabuf_t *bp; /* leaf buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + ASSERT(dp != NULL); + tp = args->trans; + mp = dp->i_mount; + ASSERT(bno >= XFS_DIR2_LEAF_FIRSTDB(mp) && + bno < XFS_DIR2_FREE_FIRSTDB(mp)); + /* + * Get the buffer for the block. + */ + error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, bno), -1, &bp, + XFS_DATA_FORK); + if (error) { + return error; + } + ASSERT(bp != NULL); + leaf = bp->data; + /* + * Initialize the header. + */ + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); + INT_ZERO(leaf->hdr.info.forw, ARCH_CONVERT); + INT_ZERO(leaf->hdr.info.back, ARCH_CONVERT); + INT_ZERO(leaf->hdr.count, ARCH_CONVERT); + INT_ZERO(leaf->hdr.stale, ARCH_CONVERT); + xfs_dir2_leaf_log_header(tp, bp); + /* + * If it's a leaf-format directory initialize the tail. + * In this case our caller has the real bests table to copy into + * the block. + */ + if (magic == XFS_DIR2_LEAF1_MAGIC) { + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_ZERO(ltp->bestcount, ARCH_CONVERT); + xfs_dir2_leaf_log_tail(tp, bp); + } + *bpp = bp; + return 0; +} + +/* + * Log the bests entries indicated from a leaf1 block. + */ +void +xfs_dir2_leaf_log_bests( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* leaf buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_data_off_t *firstb; /* pointer to first entry */ + xfs_dir2_data_off_t *lastb; /* pointer to last entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); + firstb = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT) + first; + lastb = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT) + last; + xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf), + (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1)); +} + +/* + * Log the leaf entries indicated from a leaf1 or leafn block. + */ +void +xfs_dir2_leaf_log_ents( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* leaf buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_leaf_entry_t *firstlep; /* pointer to first entry */ + xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + firstlep = &leaf->ents[first]; + lastlep = &leaf->ents[last]; + xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), + (uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1)); +} + +/* + * Log the header of the leaf1 or leafn block. + */ +void +xfs_dir2_leaf_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), + (uint)(sizeof(leaf->hdr) - 1)); +} + +/* + * Log the tail of the leaf1 block. + */ +void +xfs_dir2_leaf_log_tail( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + + mp = tp->t_mountp; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), + (uint)(mp->m_dirblksize - 1)); +} + +/* + * Look up the entry referred to by args in the leaf format directory. + * Most of the work is done by the xfs_dir2_leaf_lookup_int routine which + * is also used by the node-format code. + */ +int +xfs_dir2_leaf_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* found entry index */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_lookup", args); + /* + * Look up name in the leaf block, returning both buffers and index. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + tp = args->trans; + dp = args->dp; + xfs_dir2_leaf_check(dp, lbp); + leaf = lbp->data; + /* + * Get to the leaf entry and contained data entry address. + */ + lep = &leaf->ents[index]; + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + /* + * Return the found inode number. + */ + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + xfs_da_brelse(tp, dbp); + xfs_da_brelse(tp, lbp); + return XFS_ERROR(EEXIST); +} + +/* + * Look up name/hash in the leaf block. + * Fill in indexp with the found index, and dbpp with the data buffer. + * If not found dbpp will be NULL, and ENOENT comes back. + * lbpp will always be filled in with the leaf buffer unless there's an error. + */ +static int /* error */ +xfs_dir2_leaf_lookup_int( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t **lbpp, /* out: leaf buffer */ + int *indexp, /* out: index in leaf block */ + xfs_dabuf_t **dbpp) /* out: data buffer */ +{ + xfs_dir2_db_t curdb; /* current data block number */ + xfs_dabuf_t *dbp; /* data buffer */ + xfs_dir2_data_entry_t *dep; /* data entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* index in leaf block */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_db_t newdb; /* new data block number */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + /* + * Read the leaf block into the buffer. + */ + if ((error = + xfs_da_read_buf(tp, dp, mp->m_dirleafblk, -1, &lbp, + XFS_DATA_FORK))) { + return error; + } + *lbpp = lbp; + leaf = lbp->data; + xfs_dir2_leaf_check(dp, lbp); + /* + * Look for the first leaf entry with our hash value. + */ + index = xfs_dir2_leaf_search_hash(args, lbp); + /* + * Loop over all the entries with the right hash value + * looking to match the name. + */ + for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + lep++, index++) { + /* + * Skip over stale leaf entries. + */ + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Get the new data block number. + */ + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + /* + * If it's not the same as the old data block number, + * need to pitch the old one and read the new one. + */ + if (newdb != curdb) { + if (dbp) + xfs_da_brelse(tp, dbp); + if ((error = + xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, newdb), -1, &dbp, + XFS_DATA_FORK))) { + xfs_da_brelse(tp, lbp); + return error; + } + xfs_dir2_data_check(dp, dbp); + curdb = newdb; + } + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + /* + * If it matches then return it. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + *dbpp = dbp; + *indexp = index; + return 0; + } + } + /* + * No match found, return ENOENT. + */ + ASSERT(args->oknoent); + if (dbp) + xfs_da_brelse(tp, dbp); + xfs_da_brelse(tp, lbp); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a leaf format directory. + */ +int /* error */ +xfs_dir2_leaf_removename( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t db; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data entry structure */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_db_t i; /* temporary data block # */ + int index; /* index into leaf entries */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + xfs_dir2_data_off_t oldbest; /* old value of best free */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_removename", args); + /* + * Lookup the leaf entry, get the leaf and data blocks read in. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = lbp->data; + data = dbp->data; + xfs_dir2_data_check(dp, dbp); + /* + * Point to the leaf entry, use that to point to the data entry. + */ + lep = &leaf->ents[index]; + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + dep = (xfs_dir2_data_entry_t *) + ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + needscan = needlog = 0; + oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); + /* + * Mark the former data entry unused. + */ + xfs_dir2_data_make_free(tp, dbp, + (xfs_dir2_data_aoff_t)((char *)dep - (char *)data), + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * We just mark the leaf entry stale by putting a null in it. + */ + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_header(tp, lbp); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_leaf_log_ents(tp, lbp, index, index); + /* + * Scan the freespace in the data block again if necessary, + * log the data block header if necessary. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * If the longest freespace in the data block has changed, + * put the new value in the bests table and log that. + */ + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { + INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); + xfs_dir2_leaf_log_bests(tp, lbp, db, db); + } + xfs_dir2_data_check(dp, dbp); + /* + * If the data block is now empty then get rid of the data block. + */ + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(data->hdr)) { + ASSERT(db != mp->m_dirdatablk); + if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { + /* + * Nope, can't get rid of it because it caused + * allocation of a bmap btree block to do so. + * Just go on, returning success, leaving the + * empty block in place. + */ + if (error == ENOSPC && args->total == 0) { + xfs_da_buf_done(dbp); + error = 0; + } + xfs_dir2_leaf_check(dp, lbp); + xfs_da_buf_done(lbp); + return error; + } + dbp = NULL; + /* + * If this is the last data block then compact the + * bests table by getting rid of entries. + */ + if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { + /* + * Look for the last active entry (i). + */ + for (i = db - 1; i > 0; i--) { + if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) + break; + } + /* + * Copy the table down so inactive entries at the + * end are removed. + */ + memmove(&bestsp[db - i], bestsp, + (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + } else + INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); + } + /* + * If the data block was not the first one, drop it. + */ + else if (db != mp->m_dirdatablk && dbp != NULL) { + xfs_da_buf_done(dbp); + dbp = NULL; + } + xfs_dir2_leaf_check(dp, lbp); + /* + * See if we can convert to block form. + */ + return xfs_dir2_leaf_to_block(args, lbp, dbp); +} + +/* + * Replace the inode number in a leaf format directory entry. + */ +int /* error */ +xfs_dir2_leaf_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + int index; /* index of leaf entry */ + xfs_dabuf_t *lbp; /* leaf buffer */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args("leaf_replace", args); + /* + * Look up the entry. + */ + if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) { + return error; + } + dp = args->dp; + leaf = lbp->data; + /* + * Point to the leaf entry, get data address from it. + */ + lep = &leaf->ents[index]; + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)dbp->data + + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); + /* + * Put the new inode number in, log it. + */ + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + tp = args->trans; + xfs_dir2_data_log_entry(tp, dbp, dep); + xfs_da_buf_done(dbp); + xfs_dir2_leaf_check(dp, lbp); + xfs_da_brelse(tp, lbp); + return 0; +} + +/* + * Return index in the leaf block (lbp) which is either the first + * one with this hash value, or if there are none, the insert point + * for that hash value. + */ +int /* index value */ +xfs_dir2_leaf_search_hash( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp) /* leaf buffer */ +{ + xfs_dahash_t hash=0; /* hash from this entry */ + xfs_dahash_t hashwant; /* hash value looking for */ + int high; /* high leaf index */ + int low; /* low leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int mid=0; /* current leaf index */ + + leaf = lbp->data; +#ifndef __KERNEL__ + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return 0; +#endif + /* + * Note, the table cannot be empty, so we have to go through the loop. + * Binary search the leaf entries looking for our hash value. + */ + for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, + hashwant = args->hashval; + low <= high; ) { + mid = (low + high) >> 1; + if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) + break; + if (hash < hashwant) + low = mid + 1; + else + high = mid - 1; + } + /* + * Found one, back up through all the equal hash values. + */ + if (hash == hashwant) { + while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { + mid--; + } + } + /* + * Need to point to an entry higher than ours. + */ + else if (hash < hashwant) + mid++; + return mid; +} + +/* + * Trim off a trailing data block. We know it's empty since the leaf + * freespace table says so. + */ +int /* error */ +xfs_dir2_leaf_trim_data( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp, /* leaf buffer */ + xfs_dir2_db_t db) /* data block number */ +{ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ +#ifdef DEBUG + xfs_dir2_data_t *data; /* data block structure */ +#endif + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Read the offending data block. We need its buffer. + */ + if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, db), -1, &dbp, + XFS_DATA_FORK))) { + return error; + } +#ifdef DEBUG + data = dbp->data; + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); +#endif + /* this seems to be an error + * data is only valid if DEBUG is defined? + * RMC 09/08/1999 + */ + + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + mp->m_dirblksize - (uint)sizeof(data->hdr)); + ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + /* + * Get rid of the data block. + */ + if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { + ASSERT(error != ENOSPC); + xfs_da_brelse(tp, dbp); + return error; + } + /* + * Eliminate the last bests entry from the table. + */ + bestsp = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); + memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + return 0; +} + +/* + * Convert node form directory to leaf form directory. + * The root of the node form dir needs to already be a LEAFN block. + * Just return if we can't do anything. + */ +int /* error */ +xfs_dir2_node_to_leaf( + xfs_da_state_t *state) /* directory operation state */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dabuf_t *fbp; /* buffer for freespace block */ + xfs_fileoff_t fo; /* freespace file offset */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_dabuf_t *lbp; /* buffer for leaf block */ + xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int rval; /* successful free trim? */ + xfs_trans_t *tp; /* transaction pointer */ + + /* + * There's more than a leaf level in the btree, so there must + * be multiple leafn blocks. Give up. + */ + if (state->path.active > 1) + return 0; + args = state->args; + xfs_dir2_trace_args("node_to_leaf", args); + mp = state->mp; + dp = args->dp; + tp = args->trans; + /* + * Get the last offset in the file. + */ + if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) { + return error; + } + fo -= mp->m_dirblkfsbs; + /* + * If there are freespace blocks other than the first one, + * take this opportunity to remove trailing empty freespace blocks + * that may have been left behind during no-space-reservation + * operations. + */ + while (fo > mp->m_dirfreeblk) { + if ((error = xfs_dir2_node_trim_free(args, fo, &rval))) { + return error; + } + if (rval) + fo -= mp->m_dirblkfsbs; + else + return 0; + } + /* + * Now find the block just before the freespace block. + */ + if ((error = xfs_bmap_last_before(tp, dp, &fo, XFS_DATA_FORK))) { + return error; + } + /* + * If it's not the single leaf block, give up. + */ + if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize) + return 0; + lbp = state->path.blk[0].bp; + leaf = lbp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * Read the freespace block. + */ + if ((error = xfs_da_read_buf(tp, dp, mp->m_dirfreeblk, -1, &fbp, + XFS_DATA_FORK))) { + return error; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_ISZERO(free->hdr.firstdb, ARCH_CONVERT)); + /* + * Now see if the leafn and free data will fit in a leaf1. + * If not, release the buffer and give up. + */ + if ((uint)sizeof(leaf->hdr) + + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + + (uint)sizeof(leaf->tail) > + mp->m_dirblksize) { + xfs_da_brelse(tp, fbp); + return 0; + } + /* + * If the leaf has any stale entries in it, compress them out. + * The compact routine will log the header. + */ + if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, lbp); + else + xfs_dir2_leaf_log_header(tp, lbp); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); + /* + * Set up the leaf tail from the freespace block. + */ + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); + /* + * Set up the leaf bests table. + */ + memcpy(XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT), free->bests, + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_tail(tp, lbp); + xfs_dir2_leaf_check(dp, lbp); + /* + * Get rid of the freespace block. + */ + error = xfs_dir2_shrink_inode(args, XFS_DIR2_FREE_FIRSTDB(mp), fbp); + if (error) { + /* + * This can't fail here because it can only happen when + * punching out the middle of an extent, and this is an + * isolated block. + */ + ASSERT(error != ENOSPC); + return error; + } + fbp = NULL; + /* + * Now see if we can convert the single-leaf directory + * down to a block form directory. + * This routine always kills the dabuf for the leaf, so + * eliminate it from the path. + */ + error = xfs_dir2_leaf_to_block(args, lbp, NULL); + state->path.blk[0].bp = NULL; + return error; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_leaf.h linux.22-ac2/fs/xfs/xfs_dir2_leaf.h --- linux.vanilla/fs/xfs/xfs_dir2_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_leaf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_LEAF_H__ +#define __XFS_DIR2_LEAF_H__ + +/* + * Directory version 2, leaf block structures. + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Constants. + */ + +/* + * Offset of the leaf/node space. First block in this space + * is the btree root. + */ +#define XFS_DIR2_LEAF_SPACE 1 +#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_LEAF_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_LEAF_OFFSET) + +/* + * Types. + */ + +/* + * Offset in data space of a data entry. + */ +typedef __uint32_t xfs_dir2_dataptr_t; +#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0x7fffffff) +#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0) + +/* + * Structures. + */ + +/* + * Leaf block header. + */ +typedef struct xfs_dir2_leaf_hdr { + xfs_da_blkinfo_t info; /* header for da routines */ + __uint16_t count; /* count of entries */ + __uint16_t stale; /* count of stale entries */ +} xfs_dir2_leaf_hdr_t; + +/* + * Leaf block entry. + */ +typedef struct xfs_dir2_leaf_entry { + xfs_dahash_t hashval; /* hash value of name */ + xfs_dir2_dataptr_t address; /* address of data entry */ +} xfs_dir2_leaf_entry_t; + +/* + * Leaf block tail. + */ +typedef struct xfs_dir2_leaf_tail { + __uint32_t bestcount; +} xfs_dir2_leaf_tail_t; + +/* + * Leaf block. + * bests and tail are at the end of the block for single-leaf only + * (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC). + */ +typedef struct xfs_dir2_leaf { + xfs_dir2_leaf_hdr_t hdr; /* leaf header */ + xfs_dir2_leaf_entry_t ents[1]; /* entries */ + /* ... */ + xfs_dir2_data_off_t bests[1]; /* best free counts */ + xfs_dir2_leaf_tail_t tail; /* leaf tail */ +} xfs_dir2_leaf_t; + +/* + * Macros. + * The DB blocks are logical directory block numbers, not filesystem blocks. + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_MAX_LEAF_ENTS) +int +xfs_dir2_max_leaf_ents(struct xfs_mount *mp); +#define XFS_DIR2_MAX_LEAF_ENTS(mp) \ + xfs_dir2_max_leaf_ents(mp) +#else +#define XFS_DIR2_MAX_LEAF_ENTS(mp) \ + ((int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / \ + (uint)sizeof(xfs_dir2_leaf_entry_t))) +#endif + +/* + * Get address of the bestcount field in the single-leaf block. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_TAIL_P) +xfs_dir2_leaf_tail_t * +xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp); +#define XFS_DIR2_LEAF_TAIL_P(mp,lp) \ + xfs_dir2_leaf_tail_p(mp, lp) +#else +#define XFS_DIR2_LEAF_TAIL_P(mp,lp) \ + ((xfs_dir2_leaf_tail_t *)\ + ((char *)(lp) + (mp)->m_dirblksize - \ + (uint)sizeof(xfs_dir2_leaf_tail_t))) +#endif + +/* + * Get address of the bests array in the single-leaf block. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_LEAF_BESTS_P) +xfs_dir2_data_off_t * +xfs_dir2_leaf_bests_p_arch(xfs_dir2_leaf_tail_t *ltp, xfs_arch_t arch); +#define XFS_DIR2_LEAF_BESTS_P_ARCH(ltp,arch) xfs_dir2_leaf_bests_p_arch(ltp,arch) +#else +#define XFS_DIR2_LEAF_BESTS_P_ARCH(ltp,arch) \ + ((xfs_dir2_data_off_t *)(ltp) - INT_GET((ltp)->bestcount, arch)) +#endif + +/* + * Convert dataptr to byte in file space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_BYTE) +xfs_dir2_off_t +xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) xfs_dir2_dataptr_to_byte(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) \ + ((xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG) +#endif + +/* + * Convert byte in file space to dataptr. It had better be aligned. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DATAPTR) +xfs_dir2_dataptr_t +xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) xfs_dir2_byte_to_dataptr(mp,by) +#else +#define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) \ + ((xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG)) +#endif + +/* + * Convert dataptr to a block number + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_DB) +xfs_dir2_db_t +xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_DB(mp,dp) xfs_dir2_dataptr_to_db(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_DB(mp,dp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)) +#endif + +/* + * Convert dataptr to a byte offset in a block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DATAPTR_TO_OFF) +xfs_dir2_data_aoff_t +xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp); +#define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) xfs_dir2_dataptr_to_off(mp, dp) +#else +#define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) \ + XFS_DIR2_BYTE_TO_OFF(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)) +#endif + +/* + * Convert block and offset to byte in space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_BYTE) +xfs_dir2_off_t +xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o); +#define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \ + xfs_dir2_db_off_to_byte(mp, db, o) +#else +#define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \ + (((xfs_dir2_off_t)(db) << \ + ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o)) +#endif + +/* + * Convert byte in space to (DB) block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DB) +xfs_dir2_db_t xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DB(mp,by) xfs_dir2_byte_to_db(mp, by) +#else +#define XFS_DIR2_BYTE_TO_DB(mp,by) \ + ((xfs_dir2_db_t)((by) >> \ + ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog))) +#endif + +/* + * Convert byte in space to (DA) block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_DA) +xfs_dablk_t xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_DA(mp,by) xfs_dir2_byte_to_da(mp, by) +#else +#define XFS_DIR2_BYTE_TO_DA(mp,by) \ + XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, by)) +#endif + +/* + * Convert byte in space to offset in a block + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_BYTE_TO_OFF) +xfs_dir2_data_aoff_t +xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by); +#define XFS_DIR2_BYTE_TO_OFF(mp,by) xfs_dir2_byte_to_off(mp, by) +#else +#define XFS_DIR2_BYTE_TO_OFF(mp,by) \ + ((xfs_dir2_data_aoff_t)((by) & \ + ((1 << ((mp)->m_sb.sb_blocklog + \ + (mp)->m_sb.sb_dirblklog)) - 1))) +#endif + +/* + * Convert block and offset to dataptr + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_OFF_TO_DATAPTR) +xfs_dir2_dataptr_t +xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o); +#define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \ + xfs_dir2_db_off_to_dataptr(mp, db, o) +#else +#define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \ + XFS_DIR2_BYTE_TO_DATAPTR(mp, XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o)) +#endif + +/* + * Convert block (DB) to block (dablk) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_DA) +xfs_dablk_t xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_DA(mp,db) xfs_dir2_db_to_da(mp, db) +#else +#define XFS_DIR2_DB_TO_DA(mp,db) \ + ((xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog)) +#endif + +/* + * Convert block (dablk) to block (DB) + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_DB) +xfs_dir2_db_t xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da); +#define XFS_DIR2_DA_TO_DB(mp,da) xfs_dir2_da_to_db(mp, da) +#else +#define XFS_DIR2_DA_TO_DB(mp,da) \ + ((xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog)) +#endif + +/* + * Convert block (dablk) to byte offset in space + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DA_TO_BYTE) +xfs_dir2_off_t xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da); +#define XFS_DIR2_DA_TO_BYTE(mp,da) xfs_dir2_da_to_byte(mp, da) +#else +#define XFS_DIR2_DA_TO_BYTE(mp,da) \ + XFS_DIR2_DB_OFF_TO_BYTE(mp, XFS_DIR2_DA_TO_DB(mp, da), 0) +#endif + +/* + * Function declarations. + */ + +extern int + xfs_dir2_block_to_leaf(struct xfs_da_args *args, struct xfs_dabuf *dbp); + +extern int + xfs_dir2_leaf_addname(struct xfs_da_args *args); + +extern void + xfs_dir2_leaf_compact(struct xfs_da_args *args, struct xfs_dabuf *bp); + +extern void + xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp, + int *lowstalep, int *highstalep, int *lowlogp, + int *highlogp); + +extern int + xfs_dir2_leaf_getdents(struct xfs_trans *tp, struct xfs_inode *dp, + struct uio *uio, int *eofp, struct xfs_dirent *dbp, + xfs_dir2_put_t put); + +extern int + xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno, + struct xfs_dabuf **bpp, int magic); + +extern void + xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern void + xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern void + xfs_dir2_leaf_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern void + xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp); + +extern int + xfs_dir2_leaf_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_leaf_search_hash(struct xfs_da_args *args, + struct xfs_dabuf *lbp); +extern int + xfs_dir2_leaf_trim_data(struct xfs_da_args *args, struct xfs_dabuf *lbp, xfs_dir2_db_t db); + +extern int + xfs_dir2_node_to_leaf(struct xfs_da_state *state); + +#endif /* __XFS_DIR2_LEAF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_node.c linux.22-ac2/fs/xfs/xfs_dir2_node.c --- linux.vanilla/fs/xfs/xfs_dir2_node.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_node.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2023 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_node.c + * XFS directory implementation, version 2, node form files + * See data structures in xfs_dir2_node.h and xfs_da_btree.h. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_error.h" + +/* + * Function declarations. + */ +static void xfs_dir2_free_log_header(xfs_trans_t *tp, xfs_dabuf_t *bp); +static int xfs_dir2_leafn_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index); +#ifdef DEBUG +static void xfs_dir2_leafn_check(xfs_inode_t *dp, xfs_dabuf_t *bp); +#else +#define xfs_dir2_leafn_check(dp, bp) +#endif +static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, xfs_dabuf_t *bp_s, + int start_s, xfs_dabuf_t *bp_d, int start_d, + int count); +static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +static int xfs_dir2_leafn_remove(xfs_da_args_t *args, xfs_dabuf_t *bp, + int index, xfs_da_state_blk_t *dblk, + int *rval); +static int xfs_dir2_node_addname_int(xfs_da_args_t *args, + xfs_da_state_blk_t *fblk); + +/* + * Log entries from a freespace block. + */ +void +xfs_dir2_free_log_bests( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp, /* freespace buffer */ + int first, /* first entry to log */ + int last) /* last entry to log */ +{ + xfs_dir2_free_t *free; /* freespace structure */ + + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + xfs_da_log_buf(tp, bp, + (uint)((char *)&free->bests[first] - (char *)free), + (uint)((char *)&free->bests[last] - (char *)free + + sizeof(free->bests[0]) - 1)); +} + +/* + * Log header from a freespace block. + */ +static void +xfs_dir2_free_log_header( + xfs_trans_t *tp, /* transaction pointer */ + xfs_dabuf_t *bp) /* freespace buffer */ +{ + xfs_dir2_free_t *free; /* freespace structure */ + + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), + (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); +} + +/* + * Convert a leaf-format directory to a node-format directory. + * We need to change the magic number of the leaf block, and copy + * the freespace table out of the leaf block into its own block. + */ +int /* error */ +xfs_dir2_leaf_to_node( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *lbp) /* leaf buffer */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + xfs_dabuf_t *fbp; /* freespace buffer */ + xfs_dir2_db_t fdb; /* freespace block number */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_dir2_data_off_t *from; /* pointer to freespace entry */ + int i; /* leaf freespace index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int n; /* count of live freespc ents */ + xfs_dir2_data_off_t off; /* freespace entry value */ + xfs_dir2_data_off_t *to; /* pointer to freespace entry */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_b("leaf_to_node", args, lbp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Add a freespace block to the directory. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, &fdb))) { + return error; + } + ASSERT(fdb == XFS_DIR2_FREE_FIRSTDB(mp)); + /* + * Get the buffer for the new freespace block. + */ + if ((error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), -1, &fbp, + XFS_DATA_FORK))) { + return error; + } + ASSERT(fbp != NULL); + free = fbp->data; + leaf = lbp->data; + ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); + /* + * Initialize the freespace block header. + */ + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + INT_ZERO(free->hdr.firstdb, ARCH_CONVERT); + ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); + INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); + /* + * Copy freespace entries from the leaf block to the new block. + * Count active entries. + */ + for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P_ARCH(ltp, ARCH_CONVERT), to = free->bests; + i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { + if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) + n++; + INT_SET(*to, ARCH_CONVERT, off); + } + INT_SET(free->hdr.nused, ARCH_CONVERT, n); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); + /* + * Log everything. + */ + xfs_dir2_leaf_log_header(tp, lbp); + xfs_dir2_free_log_header(tp, fbp); + xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); + xfs_da_buf_done(fbp); + xfs_dir2_leafn_check(dp, lbp); + return 0; +} + +/* + * Add a leaf entry to a leaf block in a node-form directory. + * The other work necessary is done from the caller. + */ +static int /* error */ +xfs_dir2_leafn_add( + xfs_dabuf_t *bp, /* leaf buffer */ + xfs_da_args_t *args, /* operation arguments */ + int index) /* insertion pt for new entry */ +{ + int compact; /* compacting stale leaves */ + xfs_inode_t *dp; /* incore directory inode */ + int highstale; /* next stale entry */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int lfloghigh; /* high leaf entry logging */ + int lfloglow; /* low leaf entry logging */ + int lowstale; /* previous stale entry */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_sb("leafn_add", args, index, bp); + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + leaf = bp->data; + /* + * If there are already the maximum number of leaf entries in + * the block, if there are no stale entries it won't fit. + * Caller will do a split. If there are stale entries we'll do + * a compact. + */ + if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) + return XFS_ERROR(ENOSPC); + compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; + } else + compact = 0; + ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); + + if (args->justcheck) + return 0; + + /* + * Compact out all but one stale leaf entry. Leaves behind + * the entry closest to index. + */ + if (compact) { + xfs_dir2_leaf_compact_x1(bp, &index, &lowstale, &highstale, + &lfloglow, &lfloghigh); + } + /* + * Set impossible logging indices for this case. + */ + else if (!INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloghigh = -1; + } + /* + * No stale entries, just insert a space for the new entry. + */ + if (INT_ISZERO(leaf->hdr.stale, ARCH_CONVERT)) { + lep = &leaf->ents[index]; + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + memmove(lep + 1, lep, + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + lfloglow = index; + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + } + /* + * There are stale entries. We'll use one for the new entry. + */ + else { + /* + * If we didn't do a compact then we need to figure out + * which stale entry will be used. + */ + if (compact == 0) { + /* + * Find first stale entry before our insertion point. + */ + for (lowstale = index - 1; + lowstale >= 0 && + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + /* + * Find next stale entry after insertion point. + * Stop looking if the answer would be worse than + * lowstale already found. + */ + for (highstale = index; + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || + index - lowstale - 1 >= highstale - index); + highstale++) + continue; + } + /* + * Using the low stale entry. + * Shift entries up toward the stale slot. + */ + if (lowstale >= 0 && + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + index - lowstale - 1 < highstale - index)) { + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + ASSERT(index - lowstale - 1 >= 0); + if (index - lowstale - 1 > 0) + memmove(&leaf->ents[lowstale], + &leaf->ents[lowstale + 1], + (index - lowstale - 1) * sizeof(*lep)); + lep = &leaf->ents[index - 1]; + lfloglow = MIN(lowstale, lfloglow); + lfloghigh = MAX(index - 1, lfloghigh); + } + /* + * Using the high stale entry. + * Shift entries down toward the stale slot. + */ + else { + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + XFS_DIR2_NULL_DATAPTR); + ASSERT(highstale - index >= 0); + if (highstale - index > 0) + memmove(&leaf->ents[index + 1], + &leaf->ents[index], + (highstale - index) * sizeof(*lep)); + lep = &leaf->ents[index]; + lfloglow = MIN(index, lfloglow); + lfloghigh = MAX(highstale, lfloghigh); + } + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + } + /* + * Insert the new entry, log everything. + */ + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); + xfs_dir2_leaf_log_header(tp, bp); + xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); + xfs_dir2_leafn_check(dp, bp); + return 0; +} + +#ifdef DEBUG +/* + * Check internal consistency of a leafn block. + */ +void +xfs_dir2_leafn_check( + xfs_inode_t *dp, /* incore directory inode */ + xfs_dabuf_t *bp) /* leaf buffer */ +{ + int i; /* leaf index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_mount_t *mp; /* filesystem mount point */ + int stale; /* count of stale leaves */ + + leaf = bp->data; + mp = dp->i_mount; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + } + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); +} +#endif /* DEBUG */ + +/* + * Return the last hash value in the leaf. + * Stale entries are ok. + */ +xfs_dahash_t /* hash value */ +xfs_dir2_leafn_lasthash( + xfs_dabuf_t *bp, /* leaf buffer */ + int *count) /* count of entries in leaf */ +{ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return 0; + return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); +} + +/* + * Look up a leaf entry in a node-format leaf block. + * If this is an addname then the extrablk in state is a freespace block, + * otherwise it's a data block. + */ +int +xfs_dir2_leafn_lookup_int( + xfs_dabuf_t *bp, /* leaf buffer */ + xfs_da_args_t *args, /* operation arguments */ + int *indexp, /* out: leaf entry index */ + xfs_da_state_t *state) /* state to fill in */ +{ + xfs_dabuf_t *curbp; /* current data/free buffer */ + xfs_dir2_db_t curdb; /* current data block number */ + xfs_dir2_db_t curfdb; /* current free block number */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int fi; /* free entry index */ + xfs_dir2_free_t *free=NULL; /* free block structure */ + int index; /* leaf entry index */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int length=0; /* length of new data entry */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_db_t newdb; /* new data block number */ + xfs_dir2_db_t newfdb; /* new free block number */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); +#ifdef __KERNEL__ + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); +#endif + xfs_dir2_leafn_check(dp, bp); + /* + * Look up the hash value in the leaf entries. + */ + index = xfs_dir2_leaf_search_hash(args, bp); + /* + * Do we have a buffer coming in? + */ + if (state->extravalid) + curbp = state->extrablk.bp; + else + curbp = NULL; + /* + * For addname, it's a free block buffer, get the block number. + */ + if (args->addname) { + curfdb = curbp ? state->extrablk.blkno : -1; + curdb = -1; + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + if ((free = (curbp ? curbp->data : NULL))) + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + } + /* + * For others, it's a data block buffer, get the block number. + */ + else { + curfdb = -1; + curdb = curbp ? state->extrablk.blkno : -1; + } + /* + * Loop over leaf entries with the right hash value. + */ + for (lep = &leaf->ents[index]; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + lep++, index++) { + /* + * Skip stale leaf entries. + */ + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Pull the data block number from the entry. + */ + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + /* + * For addname, we're looking for a place to put the new entry. + * We want to use a data block with an entry of equal + * hash value to ours if there is one with room. + */ + if (args->addname) { + /* + * If this block isn't the data block we already have + * in hand, take a look at it. + */ + if (newdb != curdb) { + curdb = newdb; + /* + * Convert the data block to the free block + * holding its freespace information. + */ + newfdb = XFS_DIR2_DB_TO_FDB(mp, newdb); + /* + * If it's not the one we have in hand, + * read it in. + */ + if (newfdb != curfdb) { + /* + * If we had one before, drop it. + */ + if (curbp) + xfs_da_brelse(tp, curbp); + /* + * Read the free block. + */ + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, + newfdb), + -1, &curbp, + XFS_DATA_FORK))) { + return error; + } + curfdb = newfdb; + free = curbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == + XFS_DIR2_FREE_MAGIC); + ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % + XFS_DIR2_MAX_FREE_BESTS(mp)) == + 0); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); + ASSERT(curdb < + INT_GET(free->hdr.firstdb, ARCH_CONVERT) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + } + /* + * Get the index for our entry. + */ + fi = XFS_DIR2_DB_TO_FDINDEX(mp, curdb); + /* + * If it has room, return it. + */ + if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { + XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { + *indexp = index; + state->extravalid = 1; + state->extrablk.bp = curbp; + state->extrablk.blkno = curfdb; + state->extrablk.index = fi; + state->extrablk.magic = + XFS_DIR2_FREE_MAGIC; + ASSERT(args->oknoent); + return XFS_ERROR(ENOENT); + } + } + } + /* + * Not adding a new entry, so we really want to find + * the name given to us. + */ + else { + /* + * If it's a different data block, go get it. + */ + if (newdb != curdb) { + /* + * If we had a block before, drop it. + */ + if (curbp) + xfs_da_brelse(tp, curbp); + /* + * Read the data block. + */ + if ((error = + xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, newdb), -1, + &curbp, XFS_DATA_FORK))) { + return error; + } + xfs_dir2_data_check(dp, curbp); + curdb = newdb; + } + /* + * Point to the data entry. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)curbp->data + + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + /* + * Compare the entry, return it if it matches. + */ + if (dep->namelen == args->namelen && + dep->name[0] == args->name[0] && + memcmp(dep->name, args->name, args->namelen) == 0) { + args->inumber = INT_GET(dep->inumber, ARCH_CONVERT); + *indexp = index; + state->extravalid = 1; + state->extrablk.bp = curbp; + state->extrablk.blkno = curdb; + state->extrablk.index = + (int)((char *)dep - + (char *)curbp->data); + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; + return XFS_ERROR(EEXIST); + } + } + } + /* + * Didn't find a match. + * If we are holding a buffer, give it back in case our caller + * finds it useful. + */ + if ((state->extravalid = (curbp != NULL))) { + state->extrablk.bp = curbp; + state->extrablk.index = -1; + /* + * For addname, giving back a free block. + */ + if (args->addname) { + state->extrablk.blkno = curfdb; + state->extrablk.magic = XFS_DIR2_FREE_MAGIC; + } + /* + * For other callers, giving back a data block. + */ + else { + state->extrablk.blkno = curdb; + state->extrablk.magic = XFS_DIR2_DATA_MAGIC; + } + } + /* + * Return the final index, that will be the insertion point. + */ + *indexp = index; + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + return XFS_ERROR(ENOENT); +} + +/* + * Move count leaf entries from source to destination leaf. + * Log entries and headers. Stale entries are preserved. + */ +static void +xfs_dir2_leafn_moveents( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp_s, /* source leaf buffer */ + int start_s, /* source leaf index */ + xfs_dabuf_t *bp_d, /* destination leaf buffer */ + int start_d, /* destination leaf index */ + int count) /* count of leaves to copy */ +{ + xfs_dir2_leaf_t *leaf_d; /* destination leaf structure */ + xfs_dir2_leaf_t *leaf_s; /* source leaf structure */ + int stale; /* count stale leaves copied */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d, + start_d, count); + /* + * Silently return if nothing to do. + */ + if (count == 0) { + return; + } + tp = args->trans; + leaf_s = bp_s->data; + leaf_d = bp_d->data; + /* + * If the destination index is not the end of the current + * destination leaf entries, open up a hole in the destination + * to hold the new entries. + */ + if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { + memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], + (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * + sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, + count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); + } + /* + * If the source has stale leaves, count the ones in the copy range + * so we can update the header correctly. + */ + if (!INT_ISZERO(leaf_s->hdr.stale, ARCH_CONVERT)) { + int i; /* temp leaf index */ + + for (i = start_s, stale = 0; i < start_s + count; i++) { + if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + stale++; + } + } else + stale = 0; + /* + * Copy the leaf entries from source to destination. + */ + memcpy(&leaf_d->ents[start_d], &leaf_s->ents[start_s], + count * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_d, start_d, start_d + count - 1); + /* + * If there are source entries after the ones we copied, + * delete the ones we copied by sliding the next ones down. + */ + if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { + memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], + count * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); + } + /* + * Update the headers and log them. + */ + INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); + INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); + INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); + INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); + xfs_dir2_leaf_log_header(tp, bp_s); + xfs_dir2_leaf_log_header(tp, bp_d); + xfs_dir2_leafn_check(args->dp, bp_s); + xfs_dir2_leafn_check(args->dp, bp_d); +} + +/* + * Determine the sort order of two leaf blocks. + * Returns 1 if both are valid and leaf2 should be before leaf1, else 0. + */ +int /* sort order */ +xfs_dir2_leafn_order( + xfs_dabuf_t *leaf1_bp, /* leaf1 buffer */ + xfs_dabuf_t *leaf2_bp) /* leaf2 buffer */ +{ + xfs_dir2_leaf_t *leaf1; /* leaf1 structure */ + xfs_dir2_leaf_t *leaf2; /* leaf2 structure */ + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && + INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && + (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || + INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) + return 1; + return 0; +} + +/* + * Rebalance leaf entries between two leaf blocks. + * This is actually only called when the second block is new, + * though the code deals with the general case. + * A new entry will be inserted in one of the blocks, and that + * entry is taken into account when balancing. + */ +static void +xfs_dir2_leafn_rebalance( + xfs_da_state_t *state, /* btree cursor */ + xfs_da_state_blk_t *blk1, /* first btree block */ + xfs_da_state_blk_t *blk2) /* second btree block */ +{ + xfs_da_args_t *args; /* operation arguments */ + int count; /* count (& direction) leaves */ + int isleft; /* new goes in left leaf */ + xfs_dir2_leaf_t *leaf1; /* first leaf structure */ + xfs_dir2_leaf_t *leaf2; /* second leaf structure */ + int mid; /* midpoint leaf index */ +#ifdef DEBUG + int oldstale; /* old count of stale leaves */ +#endif + int oldsum; /* old total leaf count */ + int swap; /* swapped leaf blocks */ + + args = state->args; + /* + * If the block order is wrong, swap the arguments. + */ + if ((swap = xfs_dir2_leafn_order(blk1->bp, blk2->bp))) { + xfs_da_state_blk_t *tmp; /* temp for block swap */ + + tmp = blk1; + blk1 = blk2; + blk2 = tmp; + } + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); +#ifdef DEBUG + oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); +#endif + mid = oldsum >> 1; + /* + * If the old leaf count was odd then the new one will be even, + * so we need to divide the new count evenly. + */ + if (oldsum & 1) { + xfs_dahash_t midhash; /* middle entry hash value */ + + if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) + midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); + else + midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); + isleft = args->hashval <= midhash; + } + /* + * If the old count is even then the new count is odd, so there's + * no preferred side for the new entry. + * Pick the left one. + */ + else + isleft = 1; + /* + * Calculate moved entry count. Positive means left-to-right, + * negative means right-to-left. Then move the entries. + */ + count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); + if (count > 0) + xfs_dir2_leafn_moveents(args, blk1->bp, + INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); + else if (count < 0) + xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, + INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); + ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); + ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); + /* + * Mark whether we're inserting into the old or new leaf. + */ + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + state->inleaf = swap; + else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + state->inleaf = !swap; + else + state->inleaf = + swap ^ (args->hashval < INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT)); + /* + * Adjust the expected index for insertion. + */ + if (!state->inleaf) + blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); +} + +/* + * Remove an entry from a node directory. + * This removes the leaf entry and the data entry, + * and updates the free block if necessary. + */ +static int /* error */ +xfs_dir2_leafn_remove( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp, /* leaf buffer */ + int index, /* leaf entry index */ + xfs_da_state_blk_t *dblk, /* data block */ + int *rval) /* resulting block needs join */ +{ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t db; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data block entry */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry */ + int longest; /* longest data free entry */ + int off; /* data block entry offset */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + xfs_trans_t *tp; /* transaction pointer */ + + xfs_dir2_trace_args_sb("leafn_remove", args, index, bp); + dp = args->dp; + tp = args->trans; + mp = dp->i_mount; + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * Point to the entry we're removing. + */ + lep = &leaf->ents[index]; + /* + * Extract the data block and offset from the entry. + */ + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(dblk->blkno == db); + off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(dblk->index == off); + /* + * Kill the leaf entry by marking it stale. + * Log the leaf block changes. + */ + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + xfs_dir2_leaf_log_header(tp, bp); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + xfs_dir2_leaf_log_ents(tp, bp, index, index); + /* + * Make the data entry free. Keep track of the longest freespace + * in the data block in case it changes. + */ + dbp = dblk->bp; + data = dbp->data; + dep = (xfs_dir2_data_entry_t *)((char *)data + off); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + needlog = needscan = 0; + xfs_dir2_data_make_free(tp, dbp, off, + XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); + /* + * Rescan the data block freespaces for bestfree. + * Log the data block header if needed. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + xfs_dir2_data_check(dp, dbp); + /* + * If the longest data block freespace changes, need to update + * the corresponding freeblock entry. + */ + if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + int error; /* error return value */ + xfs_dabuf_t *fbp; /* freeblock buffer */ + xfs_dir2_db_t fdb; /* freeblock block number */ + int findex; /* index in freeblock entries */ + xfs_dir2_free_t *free; /* freeblock structure */ + int logfree; /* need to log free entry */ + + /* + * Convert the data block number to a free block, + * read in the free block. + */ + fdb = XFS_DIR2_DB_TO_FDB(mp, db); + if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), + -1, &fbp, XFS_DATA_FORK))) { + return error; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == + XFS_DIR2_MAX_FREE_BESTS(mp) * + (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); + /* + * Calculate which entry we need to fix. + */ + findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + /* + * If the data block is now empty we can get rid of it + * (usually). + */ + if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) { + /* + * Try to punch out the data block. + */ + error = xfs_dir2_shrink_inode(args, db, dbp); + if (error == 0) { + dblk->bp = NULL; + data = NULL; + } + /* + * We can get ENOSPC if there's no space reservation. + * In this case just drop the buffer and some one else + * will eventually get rid of the empty block. + */ + else if (error == ENOSPC && args->total == 0) + xfs_da_buf_done(dbp); + else + return error; + } + /* + * If we got rid of the data block, we can eliminate that entry + * in the free block. + */ + if (data == NULL) { + /* + * One less used entry in the free table. + */ + INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); + xfs_dir2_free_log_header(tp, fbp); + /* + * If this was the last entry in the table, we can + * trim the table size back. There might be other + * entries at the end referring to non-existent + * data blocks, get those too. + */ + if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { + int i; /* free entry index */ + + for (i = findex - 1; + i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; + i--) + continue; + INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); + logfree = 0; + } + /* + * Not the last entry, just punch it out. + */ + else { + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + logfree = 1; + } + /* + * If there are no useful entries left in the block, + * get rid of the block if we can. + */ + if (INT_ISZERO(free->hdr.nused, ARCH_CONVERT)) { + error = xfs_dir2_shrink_inode(args, fdb, fbp); + if (error == 0) { + fbp = NULL; + logfree = 0; + } else if (error != ENOSPC || args->total != 0) + return error; + /* + * It's possible to get ENOSPC if there is no + * space reservation. In this case some one + * else will eventually get rid of this block. + */ + } + } + /* + * Data block is not empty, just set the free entry to + * the new value. + */ + else { + INT_SET(free->bests[findex], ARCH_CONVERT, longest); + logfree = 1; + } + /* + * Log the free entry that changed, unless we got rid of it. + */ + if (logfree) + xfs_dir2_free_log_bests(tp, fbp, findex, findex); + /* + * Drop the buffer if we still have it. + */ + if (fbp) + xfs_da_buf_done(fbp); + } + xfs_dir2_leafn_check(dp, bp); + /* + * Return indication of whether this leaf block is emtpy enough + * to justify trying to join it with a neighbor. + */ + *rval = + ((uint)sizeof(leaf->hdr) + + (uint)sizeof(leaf->ents[0]) * + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < + mp->m_dir_magicpct; + return 0; +} + +/* + * Split the leaf entries in the old block into old and new blocks. + */ +int /* error */ +xfs_dir2_leafn_split( + xfs_da_state_t *state, /* btree cursor */ + xfs_da_state_blk_t *oldblk, /* original block */ + xfs_da_state_blk_t *newblk) /* newly created block */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_dablk_t blkno; /* new leaf block number */ + int error; /* error return value */ + xfs_mount_t *mp; /* filesystem mount point */ + + /* + * Allocate space for a new leaf node. + */ + args = state->args; + mp = args->dp->i_mount; + ASSERT(args != NULL); + ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC); + error = xfs_da_grow_inode(args, &blkno); + if (error) { + return error; + } + /* + * Initialize the new leaf block. + */ + error = xfs_dir2_leaf_init(args, XFS_DIR2_DA_TO_DB(mp, blkno), + &newblk->bp, XFS_DIR2_LEAFN_MAGIC); + if (error) { + return error; + } + newblk->blkno = blkno; + newblk->magic = XFS_DIR2_LEAFN_MAGIC; + /* + * Rebalance the entries across the two leaves, link the new + * block into the leaves. + */ + xfs_dir2_leafn_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) { + return error; + } + /* + * Insert the new entry in the correct block. + */ + if (state->inleaf) + error = xfs_dir2_leafn_add(oldblk->bp, args, oldblk->index); + else + error = xfs_dir2_leafn_add(newblk->bp, args, newblk->index); + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_dir2_leafn_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_dir2_leafn_lasthash(newblk->bp, NULL); + xfs_dir2_leafn_check(args->dp, oldblk->bp); + xfs_dir2_leafn_check(args->dp, newblk->bp); + return error; +} + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +int /* error */ +xfs_dir2_leafn_toosmall( + xfs_da_state_t *state, /* btree cursor */ + int *action) /* resulting action to take */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + xfs_dablk_t blkno; /* leaf block number */ + xfs_dabuf_t *bp; /* leaf buffer */ + int bytes; /* bytes in use */ + int count; /* leaf live entry count */ + int error; /* error return value */ + int forward; /* sibling block direction */ + int i; /* sibling counter */ + xfs_da_blkinfo_t *info; /* leaf block header */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + int rval; /* result from path_shift */ + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[state->path.active - 1]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + leaf = (xfs_dir2_leaf_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); + if (bytes > (state->blocksize >> 1)) { + /* + * Blk over 50%, don't try to join. + */ + *action = 0; + return 0; + } + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (arbitrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = !INT_ISZERO(info->forw, ARCH_CONVERT); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, 0, + &rval); + if (error) + return error; + *action = rval ? 2 : 0; + return 0; + } + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); + for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { + blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + /* + * Read the sibling leaf block. + */ + if ((error = + xfs_da_read_buf(state->args->trans, state->args->dp, blkno, + -1, &bp, XFS_DATA_FORK))) { + return error; + } + ASSERT(bp != NULL); + /* + * Count bytes in the two blocks combined. + */ + leaf = (xfs_dir2_leaf_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize >> 2); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + bytes -= count * (uint)sizeof(leaf->ents[0]); + /* + * Fits with at least 25% to spare. + */ + if (bytes >= 0) + break; + xfs_da_brelse(state->args->trans, bp); + } + /* + * Didn't like either block, give up. + */ + if (i >= 2) { + *action = 0; + return 0; + } + /* + * Done with the sibling leaf block here, drop the dabuf + * so path_shift can get it. + */ + xfs_da_buf_done(bp); + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) + error = xfs_da_path_shift(state, &state->altpath, forward, 0, + &rval); + else + error = xfs_da_path_shift(state, &state->path, forward, 0, + &rval); + if (error) { + return error; + } + *action = rval ? 0 : 1; + return 0; +} + +/* + * Move all the leaf entries from drop_blk to save_blk. + * This is done as part of a join operation. + */ +void +xfs_dir2_leafn_unbalance( + xfs_da_state_t *state, /* cursor */ + xfs_da_state_blk_t *drop_blk, /* dead block */ + xfs_da_state_blk_t *save_blk) /* surviving block */ +{ + xfs_da_args_t *args; /* operation arguments */ + xfs_dir2_leaf_t *drop_leaf; /* dead leaf structure */ + xfs_dir2_leaf_t *save_leaf; /* surviving leaf structure */ + + args = state->args; + ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); + ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + /* + * If there are any stale leaf entries, take this opportunity + * to purge them. + */ + if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, drop_blk->bp); + if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) + xfs_dir2_leaf_compact(args, save_blk->bp); + /* + * Move the entries from drop to the appropriate end of save. + */ + drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) + xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, + INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + else + xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, + INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + xfs_dir2_leafn_check(args->dp, save_blk->bp); +} + +/* + * Top-level node form directory addname routine. + */ +int /* error */ +xfs_dir2_node_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block for insert */ + int error; /* error return value */ + int rval; /* sub-return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_addname", args); + /* + * Allocate and initialize the state (btree cursor). + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Look up the name. We're not supposed to find it, but + * this gives us the insertion point. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) + rval = error; + if (rval != ENOENT) { + goto done; + } + /* + * Add the data entry to a data block. + * Extravalid is set to a freeblock found by lookup. + */ + rval = xfs_dir2_node_addname_int(args, + state->extravalid ? &state->extrablk : NULL); + if (rval) { + goto done; + } + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + /* + * Add the new leaf entry. + */ + rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); + if (rval == 0) { + /* + * It worked, fix the hash values up the btree. + */ + if (!args->justcheck) + xfs_da_fixhashpath(state, &state->path); + } else { + /* + * It didn't work, we need to split the leaf block. + */ + if (args->total == 0) { + ASSERT(rval == ENOSPC); + goto done; + } + /* + * Split the leaf block and insert the new entry. + */ + rval = xfs_da_split(state); + } +done: + xfs_da_state_free(state); + return rval; +} + +/* + * Add the data entry for a node-format directory name addition. + * The leaf entry is added in xfs_dir2_leafn_add. + * We may enter with a freespace block that the lookup found. + */ +static int /* error */ +xfs_dir2_node_addname_int( + xfs_da_args_t *args, /* operation arguments */ + xfs_da_state_blk_t *fblk) /* optional freespace block */ +{ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_db_t dbno; /* data block number */ + xfs_dabuf_t *dbp; /* data block buffer */ + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* data unused entry pointer */ + int error; /* error return value */ + xfs_dir2_db_t fbno; /* freespace block number */ + xfs_dabuf_t *fbp; /* freespace buffer */ + int findex; /* freespace entry index */ + xfs_dir2_db_t foundbno=0; /* found freespace block no */ + int foundindex=0; /* found freespace entry idx */ + int foundhole; /* found hole in freespace */ + xfs_dir2_free_t *free=NULL; /* freespace block structure */ + xfs_dir2_db_t ifbno; /* initial freespace block no */ + xfs_dir2_db_t lastfbno=0; /* highest freespace block no */ + int length; /* length of the new entry */ + int logfree; /* need to log free entry */ + xfs_mount_t *mp; /* filesystem mount point */ + int needlog; /* need to log data header */ + int needscan; /* need to rescan data frees */ + int needfreesp; /* need to allocate freesp blk */ + xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + length = XFS_DIR2_DATA_ENTSIZE(args->namelen); + foundhole = 0; + /* + * If we came in with a freespace block that means that lookup + * found an entry with our hash value. This is the freespace + * block for that data entry. + */ + if (fblk) { + fbp = fblk->bp; + /* + * Remember initial freespace block number. + */ + ifbno = fblk->blkno; + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + findex = fblk->index; + /* + * This means the free entry showed that the data block had + * space for our entry, so we remembered it. + * Use that data block. + */ + if (findex >= 0) { + ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + } + /* + * The data block looked at didn't have enough room. + * We'll start at the beginning of the freespace entries. + */ + else { + dbno = -1; + findex = 0; + } + } + /* + * Didn't come in with a freespace block, so don't have a data block. + */ + else { + ifbno = dbno = -1; + fbp = NULL; + findex = 0; + } + /* + * If we don't have a data block yet, we're going to scan the + * freespace blocks looking for one. Figure out what the + * highest freespace block number is. + */ + if (dbno == -1) { + xfs_fileoff_t fo; /* freespace block number */ + + if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) + return error; + lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo); + fbno = ifbno; + foundindex = -1; + } + /* + * While we haven't identified a data block, search the freeblock + * data for a good data block. If we find a null freeblock entry, + * indicating a hole in the data blocks, remember that. + */ + while (dbno == -1) { + /* + * If we don't have a freeblock in hand, get the next one. + */ + if (fbp == NULL) { + /* + * Happens the first time through unless lookup gave + * us a freespace block to start with. + */ + if (++fbno == 0) + fbno = XFS_DIR2_FREE_FIRSTDB(mp); + /* + * If it's ifbno we already looked at it. + */ + if (fbno == ifbno) + fbno++; + /* + * If it's off the end we're done. + */ + if (fbno >= lastfbno) + break; + /* + * Read the block. There can be holes in the + * freespace blocks, so this might not succeed. + * This should be really rare, so there's no reason + * to avoid it. + */ + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, + XFS_DATA_FORK))) { + return error; + } + if (unlikely(fbp == NULL)) { + foundhole = 1; + continue; + } + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + findex = 0; + } + /* + * Look at the current free entry. Is it good enough? + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && + INT_GET(free->bests[findex], ARCH_CONVERT) >= length) + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + else { + /* + * If we haven't found an empty entry yet, and this + * one is empty, remember this slot. + */ + if (foundindex == -1 && + INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF && !foundhole) { + foundindex = findex; + foundbno = fbno; + } + /* + * Are we done with the freeblock? + */ + if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + /* + * If there is space left in this freeblock, + * and we don't have an empty entry yet, + * remember this slot. + */ + if (foundindex == -1 && + findex < XFS_DIR2_MAX_FREE_BESTS(mp) && + !foundhole) { + foundindex = findex; + foundbno = fbno; + } + /* + * Drop the block. + */ + xfs_da_brelse(tp, fbp); + fbp = NULL; + if (fblk && fblk->bp) + fblk->bp = NULL; + } + } + } + /* + * If we don't have a data block, we need to allocate one and make + * the freespace entries refer to it. + */ + if (unlikely(dbno == -1)) { + /* + * Not allowed to allocate, return failure. + */ + if (args->justcheck || args->total == 0) { + /* + * Drop the freespace buffer unless it came from our + * caller. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return XFS_ERROR(ENOSPC); + } + /* + * Allocate and initialize the new data block. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, + &dbno)) || + (error = xfs_dir2_data_init(args, dbno, &dbp))) { + /* + * Drop the freespace buffer unless it came from our + * caller. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return error; + } + /* + * If the freespace entry for this data block is not in the + * freespace block we have in hand, drop the one we have + * and get the right one. + */ + needfreesp = 0; + if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno || fbp == NULL) { + if (fbp) + xfs_da_brelse(tp, fbp); + if (fblk && fblk->bp) + fblk->bp = NULL; + fbno = XFS_DIR2_DB_TO_FDB(mp, dbno); + if ((error = xfs_da_read_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, + XFS_DATA_FORK))) { + xfs_da_buf_done(dbp); + return error; + } + + /* + * If there wasn't a freespace block, the read will + * return a NULL fbp. Allocate one later. + */ + + if(unlikely( fbp == NULL )) { + needfreesp = 1; + } else { + free = fbp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + } + } + + /* + * If we don't have a data block, and there's no free slot in a + * freeblock, we need to add a new freeblock. + */ + if (unlikely(needfreesp || foundindex == -1)) { + /* + * Add the new freeblock. + */ + if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE, + &fbno))) { + return error; + } + + if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno) { + cmn_err(CE_ALERT, + "xfs_dir2_node_addname_int: needed block %lld, got %lld\n", + (long long)XFS_DIR2_DB_TO_FDB(mp, dbno), + (long long)fbno); + XFS_ERROR_REPORT("xfs_dir2_node_addname_int", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Get a buffer for the new block. + */ + if ((error = xfs_da_get_buf(tp, dp, + XFS_DIR2_DB_TO_DA(mp, fbno), + -1, &fbp, XFS_DATA_FORK))) { + return error; + } + ASSERT(fbp != NULL); + + /* + * Initialize the new block to be empty, and remember + * its first slot as our empty slot. + */ + free = fbp->data; + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + INT_SET(free->hdr.firstdb, ARCH_CONVERT, + (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * + XFS_DIR2_MAX_FREE_BESTS(mp)); + INT_ZERO(free->hdr.nvalid, ARCH_CONVERT); + INT_ZERO(free->hdr.nused, ARCH_CONVERT); + foundindex = 0; + foundbno = fbno; + } + + /* + * Set the freespace block index from the data block number. + */ + findex = XFS_DIR2_DB_TO_FDINDEX(mp, dbno); + /* + * If it's after the end of the current entries in the + * freespace block, extend that table. + */ + if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); + INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); + /* + * Tag new entry so nused will go up. + */ + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + } + /* + * If this entry was for an empty data block + * (this should always be true) then update the header. + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { + INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); + xfs_dir2_free_log_header(tp, fbp); + } + /* + * Update the real value in the table. + * We haven't allocated the data entry yet so this will + * change again. + */ + data = dbp->data; + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + logfree = 1; + } + /* + * We had a data block so we don't have to make a new one. + */ + else { + /* + * If just checking, we succeeded. + */ + if (args->justcheck) { + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return 0; + } + /* + * Read the data block in. + */ + if (unlikely( + error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, dbno), + -1, &dbp, XFS_DATA_FORK))) { + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + return error; + } + data = dbp->data; + logfree = 0; + } + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); + /* + * Point to the existing unused space. + */ + dup = (xfs_dir2_data_unused_t *) + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + needscan = needlog = 0; + /* + * Mark the first part of the unused space, inuse for us. + */ + xfs_dir2_data_use_free(tp, dbp, dup, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + &needlog, &needscan); + /* + * Fill in the new entry and log it. + */ + dep = (xfs_dir2_data_entry_t *)dup; + INT_SET(dep->inumber, ARCH_CONVERT, args->inumber); + dep->namelen = args->namelen; + memcpy(dep->name, args->name, dep->namelen); + tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + xfs_dir2_data_log_entry(tp, dbp, dep); + /* + * Rescan the block for bestfree if needed. + */ + if (needscan) + xfs_dir2_data_freescan(mp, data, &needlog, NULL); + /* + * Log the data block header if needed. + */ + if (needlog) + xfs_dir2_data_log_header(tp, dbp); + /* + * If the freespace entry is now wrong, update it. + */ + if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + logfree = 1; + } + /* + * Log the freespace entry if needed. + */ + if (logfree) + xfs_dir2_free_log_bests(tp, fbp, findex, findex); + /* + * If the caller didn't hand us the freespace block, drop it. + */ + if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) + xfs_da_buf_done(fbp); + /* + * Return the data block and offset in args, then drop the data block. + */ + args->blkno = (xfs_dablk_t)dbno; + args->index = INT_GET(*tagp, ARCH_CONVERT); + xfs_da_buf_done(dbp); + return 0; +} + +/* + * Lookup an entry in a node-format directory. + * All the real work happens in xfs_da_node_lookup_int. + * The only real output is the inode number of the entry. + */ +int /* error */ +xfs_dir2_node_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + int error; /* error return value */ + int i; /* btree level */ + int rval; /* operation return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_lookup", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Fill in the path to the entry in the cursor. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) + rval = error; + /* + * Release the btree blocks and leaf block. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + /* + * Release the data block if we have it. + */ + if (state->extravalid && state->extrablk.bp) { + xfs_da_brelse(args->trans, state->extrablk.bp); + state->extrablk.bp = NULL; + } + xfs_da_state_free(state); + return rval; +} + +/* + * Remove an entry from a node-format directory. + */ +int /* error */ +xfs_dir2_node_removename( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + int error; /* error return value */ + int rval; /* operation return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_removename", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + /* + * Look up the entry we're deleting, set up the cursor. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) { + rval = error; + } + /* + * Didn't find it, upper layer screwed up. + */ + if (rval != EEXIST) { + xfs_da_state_free(state); + return rval; + } + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + ASSERT(state->extravalid); + /* + * Remove the leaf and data entries. + * Extrablk refers to the data block. + */ + error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, + &state->extrablk, &rval); + if (error) { + return error; + } + /* + * Fix the hash values up the btree. + */ + xfs_da_fixhashpath(state, &state->path); + /* + * If we need to join leaf blocks, do it. + */ + if (rval && state->path.active > 1) + error = xfs_da_join(state); + /* + * If no errors so far, try conversion to leaf format. + */ + if (!error) + error = xfs_dir2_node_to_leaf(state); + xfs_da_state_free(state); + return error; +} + +/* + * Replace an entry's inode number in a node-format directory. + */ +int /* error */ +xfs_dir2_node_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_da_state_blk_t *blk; /* leaf block */ + xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_entry_t *dep; /* data entry changed */ + int error; /* error return value */ + int i; /* btree level */ + xfs_ino_t inum; /* new inode number */ + xfs_dir2_leaf_t *leaf; /* leaf structure */ + xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */ + int rval; /* internal return value */ + xfs_da_state_t *state; /* btree cursor */ + + xfs_dir2_trace_args("node_replace", args); + /* + * Allocate and initialize the btree cursor. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_dirblksize; + state->node_ents = state->mp->m_dir_node_ents; + inum = args->inumber; + /* + * Lookup the entry to change in the btree. + */ + error = xfs_da_node_lookup_int(state, &rval); + if (error) { + rval = error; + } + /* + * It should be found, since the vnodeops layer has looked it up + * and locked it. But paranoia is good. + */ + if (rval == EEXIST) { + /* + * Find the leaf entry. + */ + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); + leaf = blk->bp->data; + lep = &leaf->ents[blk->index]; + ASSERT(state->extravalid); + /* + * Point to the data entry. + */ + data = state->extrablk.bp->data; + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + dep = (xfs_dir2_data_entry_t *) + ((char *)data + + XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); + ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); + /* + * Fill in the new inode number and log the entry. + */ + INT_SET(dep->inumber, ARCH_CONVERT, inum); + xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep); + rval = 0; + } + /* + * Didn't find it, and we're holding a data block. Drop it. + */ + else if (state->extravalid) { + xfs_da_brelse(args->trans, state->extrablk.bp); + state->extrablk.bp = NULL; + } + /* + * Release all the buffers in the cursor. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + xfs_da_state_free(state); + return rval; +} + +/* + * Trim off a trailing empty freespace block. + * Return (in rvalp) 1 if we did it, 0 if not. + */ +int /* error */ +xfs_dir2_node_trim_free( + xfs_da_args_t *args, /* operation arguments */ + xfs_fileoff_t fo, /* free block number */ + int *rvalp) /* out: did something */ +{ + xfs_dabuf_t *bp; /* freespace buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return code */ + xfs_dir2_free_t *free; /* freespace structure */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_trans_t *tp; /* transaction pointer */ + + dp = args->dp; + mp = dp->i_mount; + tp = args->trans; + /* + * Read the freespace block. + */ + if (unlikely(error = xfs_da_read_buf(tp, dp, (xfs_dablk_t)fo, -2, &bp, + XFS_DATA_FORK))) { + return error; + } + + /* + * There can be holes in freespace. If fo is a hole, there's + * nothing to do. + */ + if (bp == NULL) { + return 0; + } + free = bp->data; + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + /* + * If there are used entries, there's nothing to do. + */ + if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { + xfs_da_brelse(tp, bp); + *rvalp = 0; + return 0; + } + /* + * Blow the block away. + */ + if ((error = + xfs_dir2_shrink_inode(args, XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo), + bp))) { + /* + * Can't fail with ENOSPC since that only happens with no + * space reservation, when breaking up an extent into two + * pieces. This is the last block of an extent. + */ + ASSERT(error != ENOSPC); + xfs_da_brelse(tp, bp); + return error; + } + /* + * Return that we succeeded. + */ + *rvalp = 1; + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_node.h linux.22-ac2/fs/xfs/xfs_dir2_node.h --- linux.vanilla/fs/xfs/xfs_dir2_node.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_node.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_NODE_H__ +#define __XFS_DIR2_NODE_H__ + +/* + * Directory version 2, btree node format structures + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_inode; +struct xfs_trans; + +/* + * Constants. + */ + +/* + * Offset of the freespace index. + */ +#define XFS_DIR2_FREE_SPACE 2 +#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_FREE_FIRSTDB(mp) \ + XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_FREE_OFFSET) + +#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ + +/* + * Structures. + */ +typedef struct xfs_dir2_free_hdr { + __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ + __int32_t firstdb; /* db of first entry */ + __int32_t nvalid; /* count of valid entries */ + __int32_t nused; /* count of used entries */ +} xfs_dir2_free_hdr_t; + +typedef struct xfs_dir2_free { + xfs_dir2_free_hdr_t hdr; /* block header */ + xfs_dir2_data_off_t bests[1]; /* best free counts */ + /* unused entries are -1 */ +} xfs_dir2_free_t; +#define XFS_DIR2_MAX_FREE_BESTS(mp) \ + (((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \ + (uint)sizeof(xfs_dir2_data_off_t)) + +/* + * Macros. + */ + +/* + * Convert data space db to the corresponding free db. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDB) +xfs_dir2_db_t +xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_FDB(mp,db) xfs_dir2_db_to_fdb(mp, db) +#else +#define XFS_DIR2_DB_TO_FDB(mp,db) \ + (XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp)) +#endif + +/* + * Convert data space db to the corresponding index in a free db. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_DB_TO_FDINDEX) +int +xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db); +#define XFS_DIR2_DB_TO_FDINDEX(mp,db) xfs_dir2_db_to_fdindex(mp, db) +#else +#define XFS_DIR2_DB_TO_FDINDEX(mp,db) ((db) % XFS_DIR2_MAX_FREE_BESTS(mp)) +#endif + +/* + * Functions. + */ + +extern void + xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); + +extern int + xfs_dir2_leaf_to_node(struct xfs_da_args *args, struct xfs_dabuf *lbp); + +extern xfs_dahash_t + xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); + +extern int + xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp, + struct xfs_da_args *args, int *indexp, + struct xfs_da_state *state); + +extern int + xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); + +extern int + xfs_dir2_leafn_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); + +extern int + xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action); + +extern void + xfs_dir2_leafn_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); + +extern int + xfs_dir2_node_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_node_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_node_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_node_replace(struct xfs_da_args *args); + +extern int + xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo, + int *rvalp); + +#endif /* __XFS_DIR2_NODE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_sf.c linux.22-ac2/fs/xfs/xfs_dir2_sf.c --- linux.vanilla/fs/xfs/xfs_dir2_sf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_sf.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1328 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_sf.c + * Shortform directory implementation for v2 directories. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_trace.h" + +/* + * Prototypes for internal functions. + */ +static void xfs_dir2_sf_addname_easy(xfs_da_args_t *args, + xfs_dir2_sf_entry_t *sfep, + xfs_dir2_data_aoff_t offset, + int new_isize); +static void xfs_dir2_sf_addname_hard(xfs_da_args_t *args, int objchange, + int new_isize); +static int xfs_dir2_sf_addname_pick(xfs_da_args_t *args, int objchange, + xfs_dir2_sf_entry_t **sfepp, + xfs_dir2_data_aoff_t *offsetp); +#ifdef DEBUG +static void xfs_dir2_sf_check(xfs_da_args_t *args); +#else +#define xfs_dir2_sf_check(args) +#endif /* DEBUG */ +#if XFS_BIG_FILESYSTEMS +static void xfs_dir2_sf_toino4(xfs_da_args_t *args); +static void xfs_dir2_sf_toino8(xfs_da_args_t *args); +#endif /* XFS_BIG_FILESYSTEMS */ + +/* + * Given a block directory (dp/block), calculate its size as a shortform (sf) + * directory and a header for the sf directory, if it will fit it the + * space currently present in the inode. If it won't fit, the output + * size is too big (but not accurate). + */ +int /* size for sf form */ +xfs_dir2_block_sfsize( + xfs_inode_t *dp, /* incore inode pointer */ + xfs_dir2_block_t *block, /* block directory data */ + xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */ +{ + xfs_dir2_dataptr_t addr; /* data entry address */ + xfs_dir2_leaf_entry_t *blp; /* leaf area of the block */ + xfs_dir2_block_tail_t *btp; /* tail area of the block */ + int count; /* shortform entry count */ + xfs_dir2_data_entry_t *dep; /* data entry in the block */ + int i; /* block entry index */ + int i8count; /* count of big-inode entries */ + int isdot; /* entry is "." */ + int isdotdot; /* entry is ".." */ + xfs_mount_t *mp; /* mount structure pointer */ + int namelen; /* total name bytes */ + xfs_ino_t parent; /* parent inode number */ + int size=0; /* total computed size */ + + mp = dp->i_mount; + + count = i8count = namelen = 0; + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + blp = XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + + /* + * Iterate over the block's data entries by using the leaf pointers. + */ + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + continue; + /* + * Calculate the pointer to the entry at hand. + */ + dep = (xfs_dir2_data_entry_t *) + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); + /* + * Detect . and .., so we can special-case them. + * . is not included in sf directories. + * .. is included by just the parent inode number. + */ + isdot = dep->namelen == 1 && dep->name[0] == '.'; + isdotdot = + dep->namelen == 2 && + dep->name[0] == '.' && dep->name[1] == '.'; +#if XFS_BIG_FILESYSTEMS + if (!isdot) + i8count += INT_GET(dep->inumber, ARCH_CONVERT) > XFS_DIR2_MAX_SHORT_INUM; +#endif + if (!isdot && !isdotdot) { + count++; + namelen += dep->namelen; + } else if (isdotdot) + parent = INT_GET(dep->inumber, ARCH_CONVERT); + /* + * Calculate the new size, see if we should give up yet. + */ + size = XFS_DIR2_SF_HDR_SIZE(i8count) + /* header */ + count + /* namelen */ + count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */ + namelen + /* name */ + (i8count ? /* inumber */ + (uint)sizeof(xfs_dir2_ino8_t) * count : + (uint)sizeof(xfs_dir2_ino4_t) * count); + if (size > XFS_IFORK_DSIZE(dp)) + return size; /* size value is a failure */ + } + /* + * Create the output header, if it worked. + */ + sfhp->count = count; + sfhp->i8count = i8count; + XFS_DIR2_SF_PUT_INUMBER_ARCH((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent, ARCH_CONVERT); + return size; +} + +/* + * Convert a block format directory to shortform. + * Caller has already checked that it will fit, and built us a header. + */ +int /* error */ +xfs_dir2_block_to_sf( + xfs_da_args_t *args, /* operation arguments */ + xfs_dabuf_t *bp, /* block buffer */ + int size, /* shortform directory size */ + xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */ +{ + xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_block_tail_t *btp; /* block tail pointer */ + xfs_dir2_data_entry_t *dep; /* data entry pointer */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_data_unused_t *dup; /* unused data pointer */ + char *endptr; /* end of data entries */ + int error; /* error return value */ + int logflags; /* inode logging flags */ + xfs_mount_t *mp; /* filesystem mount point */ + char *ptr; /* current data pointer */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_ino_t temp; + + xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); + dp = args->dp; + mp = dp->i_mount; + + /* + * Make a copy of the block data, so we can shrink the inode + * and add local data. + */ + block = kmem_alloc(mp->m_dirblksize, KM_SLEEP); + memcpy(block, bp->data, mp->m_dirblksize); + logflags = XFS_ILOG_CORE; + if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { + ASSERT(error != ENOSPC); + goto out; + } + /* + * The buffer is now unconditionally gone, whether + * xfs_dir2_shrink_inode worked or not. + * + * Convert the inode to local format. + */ + dp->i_df.if_flags &= ~XFS_IFEXTENTS; + dp->i_df.if_flags |= XFS_IFINLINE; + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + ASSERT(dp->i_df.if_bytes == 0); + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + logflags |= XFS_ILOG_DDATA; + /* + * Copy the header into the newly allocate local space. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count)); + dp->i_d.di_size = size; + /* + * Set up to loop over the block's entries. + */ + btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); + ptr = (char *)block->u; + endptr = (char *)XFS_DIR2_BLOCK_LEAF_P_ARCH(btp, ARCH_CONVERT); + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + /* + * Loop over the active and unused entries. + * Stop when we reach the leaf/tail portion of the block. + */ + while (ptr < endptr) { + /* + * If it's unused, just skip over it. + */ + dup = (xfs_dir2_data_unused_t *)ptr; + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); + continue; + } + dep = (xfs_dir2_data_entry_t *)ptr; + /* + * Skip . + */ + if (dep->namelen == 1 && dep->name[0] == '.') + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == dp->i_ino); + /* + * Skip .., but make sure the inode number is right. + */ + else if (dep->namelen == 2 && + dep->name[0] == '.' && dep->name[1] == '.') + ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == + XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT)); + /* + * Normal entry, copy it into shortform. + */ + else { + sfep->namelen = dep->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, + (xfs_dir2_data_aoff_t) + ((char *)dep - (char *)block), ARCH_CONVERT); + memcpy(sfep->name, dep->name, dep->namelen); + temp=INT_GET(dep->inumber, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &temp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); + } + ASSERT((char *)sfep - (char *)sfp == size); + xfs_dir2_sf_check(args); +out: + xfs_trans_log_inode(args->trans, dp, logflags); + kmem_free(block, mp->m_dirblksize); + return error; +} + +/* + * Add a name to a shortform directory. + * There are two algorithms, "easy" and "hard" which we decide on + * before changing anything. + * Convert to block form if necessary, if the new entry won't fit. + */ +int /* error */ +xfs_dir2_sf_addname( + xfs_da_args_t *args) /* operation arguments */ +{ + int add_entsize; /* size of the new entry */ + xfs_inode_t *dp; /* incore directory inode */ + int error; /* error return value */ + int incr_isize; /* total change in size */ + int new_isize; /* di_size after adding name */ + int objchange; /* changing to 8-byte inodes */ + xfs_dir2_data_aoff_t offset; /* offset for new entry */ + int old_isize; /* di_size before adding name */ + int pick; /* which algorithm to use */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + + xfs_dir2_trace_args("sf_addname", args); + ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Make sure the shortform value has some of its header. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Compute entry (and change in) size. + */ + add_entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); + incr_isize = add_entsize; +#if XFS_BIG_FILESYSTEMS + /* + * Do we have to change to 8 byte inodes? + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + /* + * Yes, adjust the entry size and the total size. + */ + add_entsize += + (uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t); + incr_isize += + (sfp->hdr.count + 2) * + ((uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t)); + objchange = 1; + } else + objchange = 0; +#else + objchange = 0; +#endif + old_isize = (int)dp->i_d.di_size; + new_isize = old_isize + incr_isize; + /* + * Won't fit as shortform any more (due to size), + * or the pick routine says it won't (due to offset values). + */ + if (new_isize > XFS_IFORK_DSIZE(dp) || + (pick = + xfs_dir2_sf_addname_pick(args, objchange, &sfep, &offset)) == 0) { + /* + * Just checking or no space reservation, it doesn't fit. + */ + if (args->justcheck || args->total == 0) + return XFS_ERROR(ENOSPC); + /* + * Convert to block form then add the name. + */ + error = xfs_dir2_sf_to_block(args); + if (error) + return error; + return xfs_dir2_block_addname(args); + } + /* + * Just checking, it fits. + */ + if (args->justcheck) + return 0; + /* + * Do it the easy way - just add it at the end. + */ + if (pick == 1) + xfs_dir2_sf_addname_easy(args, sfep, offset, new_isize); + /* + * Do it the hard way - look for a place to insert the new entry. + * Convert to 8 byte inode numbers first if necessary. + */ + else { + ASSERT(pick == 2); +#if XFS_BIG_FILESYSTEMS + if (objchange) + xfs_dir2_sf_toino8(args); +#endif + xfs_dir2_sf_addname_hard(args, objchange, new_isize); + } + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +/* + * Add the new entry the "easy" way. + * This is copying the old directory and adding the new entry at the end. + * Since it's sorted by "offset" we need room after the last offset + * that's already there, and then room to convert to a block directory. + * This is already checked by the pick routine. + */ +static void +xfs_dir2_sf_addname_easy( + xfs_da_args_t *args, /* operation arguments */ + xfs_dir2_sf_entry_t *sfep, /* pointer to new entry */ + xfs_dir2_data_aoff_t offset, /* offset to use for new ent */ + int new_isize) /* new directory size */ +{ + int byteoff; /* byte offset in sf dir */ + xfs_inode_t *dp; /* incore directory inode */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + byteoff = (int)((char *)sfep - (char *)sfp); + /* + * Grow the in-inode space. + */ + xfs_idata_realloc(dp, XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen), + XFS_DATA_FORK); + /* + * Need to set up again due to realloc of the inode data. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff); + /* + * Fill in the new entry. + */ + sfep->namelen = args->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT); + memcpy(sfep->name, args->name, sfep->namelen); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + /* + * Update the header and inode. + */ + sfp->hdr.count++; +#if XFS_BIG_FILESYSTEMS + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) + sfp->hdr.i8count++; +#endif + dp->i_d.di_size = new_isize; + xfs_dir2_sf_check(args); +} + +/* + * Add the new entry the "hard" way. + * The caller has already converted to 8 byte inode numbers if necessary, + * in which case we need to leave the i8count at 1. + * Find a hole that the new entry will fit into, and copy + * the first part of the entries, the new entry, and the last part of + * the entries. + */ +/* ARGSUSED */ +static void +xfs_dir2_sf_addname_hard( + xfs_da_args_t *args, /* operation arguments */ + int objchange, /* changing inode number size */ + int new_isize) /* new directory size */ +{ + int add_datasize; /* data size need for new ent */ + char *buf; /* buffer for old */ + xfs_inode_t *dp; /* incore directory inode */ + int eof; /* reached end of old dir */ + int nbytes; /* temp for byte copies */ + xfs_dir2_data_aoff_t new_offset; /* next offset value */ + xfs_dir2_data_aoff_t offset; /* current offset value */ + int old_isize; /* previous di_size */ + xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */ + xfs_dir2_sf_t *oldsfp; /* original shortform dir */ + xfs_dir2_sf_entry_t *sfep; /* entry in new dir */ + xfs_dir2_sf_t *sfp; /* new shortform dir */ + + /* + * Copy the old directory to the stack buffer. + */ + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + old_isize = (int)dp->i_d.di_size; + buf = kmem_alloc(old_isize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)buf; + memcpy(oldsfp, sfp, old_isize); + /* + * Loop over the old directory finding the place we're going + * to insert the new entry. + * If it's going to end up at the end then oldsfep will point there. + */ + for (offset = XFS_DIR2_DATA_FIRST_OFFSET, + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp), + add_datasize = XFS_DIR2_DATA_ENTSIZE(args->namelen), + eof = (char *)oldsfep == &buf[old_isize]; + !eof; + offset = new_offset + XFS_DIR2_DATA_ENTSIZE(oldsfep->namelen), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep), + eof = (char *)oldsfep == &buf[old_isize]) { + new_offset = XFS_DIR2_SF_GET_OFFSET_ARCH(oldsfep, ARCH_CONVERT); + if (offset + add_datasize <= new_offset) + break; + } + /* + * Get rid of the old directory, then allocate space for + * the new one. We do this so xfs_idata_realloc won't copy + * the data. + */ + xfs_idata_realloc(dp, -old_isize, XFS_DATA_FORK); + xfs_idata_realloc(dp, new_isize, XFS_DATA_FORK); + /* + * Reset the pointer since the buffer was reallocated. + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Copy the first part of the directory, including the header. + */ + nbytes = (int)((char *)oldsfep - (char *)oldsfp); + memcpy(sfp, oldsfp, nbytes); + sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + nbytes); + /* + * Fill in the new entry, and update the header counts. + */ + sfep->namelen = args->namelen; + XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep, offset, ARCH_CONVERT); + memcpy(sfep->name, args->name, sfep->namelen); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + sfp->hdr.count++; +#if XFS_BIG_FILESYSTEMS + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) + sfp->hdr.i8count++; +#endif + /* + * If there's more left to copy, do that. + */ + if (!eof) { + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + memcpy(sfep, oldsfep, old_isize - nbytes); + } + kmem_free(buf, old_isize); + dp->i_d.di_size = new_isize; + xfs_dir2_sf_check(args); +} + +/* + * Decide if the new entry will fit at all. + * If it will fit, pick between adding the new entry to the end (easy) + * or somewhere else (hard). + * Return 0 (won't fit), 1 (easy), 2 (hard). + */ +/*ARGSUSED*/ +static int /* pick result */ +xfs_dir2_sf_addname_pick( + xfs_da_args_t *args, /* operation arguments */ + int objchange, /* inode # size changes */ + xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */ + xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int holefit; /* found hole it will fit in */ + int i; /* entry number */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_data_aoff_t offset; /* data block offset */ + xfs_dir2_sf_entry_t *sfep; /* shortform entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + int size; /* entry's data size */ + int used; /* data bytes used */ + + dp = args->dp; + mp = dp->i_mount; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + size = XFS_DIR2_DATA_ENTSIZE(args->namelen); + offset = XFS_DIR2_DATA_FIRST_OFFSET; + sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + holefit = 0; + /* + * Loop over sf entries. + * Keep track of data offset and whether we've seen a place + * to insert the new entry. + */ + for (i = 0; i < sfp->hdr.count; i++) { + if (!holefit) + holefit = offset + size <= XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT); + offset = XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(sfep->namelen); + sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); + } + /* + * Calculate data bytes used excluding the new entry, if this + * was a data block (block form directory). + */ + used = offset + + (sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t); + /* + * If it won't fit in a block form then we can't insert it, + * we'll go back, convert to block, then try the insert and convert + * to leaf. + */ + if (used + (holefit ? 0 : size) > mp->m_dirblksize) + return 0; + /* + * If changing the inode number size, do it the hard way. + */ +#if XFS_BIG_FILESYSTEMS + if (objchange) { + return 2; + } +#else + ASSERT(objchange == 0); +#endif + /* + * If it won't fit at the end then do it the hard way (use the hole). + */ + if (used + size > mp->m_dirblksize) + return 2; + /* + * Do it the easy way. + */ + *sfepp = sfep; + *offsetp = offset; + return 1; +} + +#ifdef DEBUG +/* + * Check consistency of shortform directory, assert if bad. + */ +static void +xfs_dir2_sf_check( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry number */ + int i8count; /* number of big inode#s */ + xfs_ino_t ino; /* entry inode number */ + int offset; /* data offset */ + xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + dp = args->dp; + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + offset = XFS_DIR2_DATA_FIRST_OFFSET; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + i8count = ino > XFS_DIR2_MAX_SHORT_INUM; + + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + ASSERT(XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) >= offset); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + i8count += ino > XFS_DIR2_MAX_SHORT_INUM; + offset = + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(sfep->namelen); + } + ASSERT(i8count == sfp->hdr.i8count); +#if !XFS_BIG_FILESYSTEMS + ASSERT(i8count == 0); +#endif + ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); + ASSERT(offset + + (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t) <= + dp->i_mount->m_dirblksize); +} +#endif /* DEBUG */ + +/* + * Create a new (shortform) directory. + */ +int /* error, always 0 */ +xfs_dir2_sf_create( + xfs_da_args_t *args, /* operation arguments */ + xfs_ino_t pino) /* parent inode number */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i8count; /* parent inode is an 8-byte number */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + int size; /* directory size */ + + xfs_dir2_trace_args_i("sf_create", args, pino); + dp = args->dp; + + ASSERT(dp != NULL); + ASSERT(dp->i_d.di_size == 0); + /* + * If it's currently a zero-length extent file, + * convert it to local format. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) { + dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); + dp->i_df.if_flags |= XFS_IFINLINE; + } + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + ASSERT(dp->i_df.if_bytes == 0); + i8count = pino > XFS_DIR2_MAX_SHORT_INUM; + size = XFS_DIR2_SF_HDR_SIZE(i8count); + /* + * Make a buffer for the data. + */ + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + /* + * Fill in the header, + */ + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp->hdr.i8count = i8count; + /* + * Now can put in the inode number, since i8count is set. + */ + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &pino, &sfp->hdr.parent, ARCH_CONVERT); + sfp->hdr.count = 0; + dp->i_d.di_size = size; + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +int /* error */ +xfs_dir2_sf_getdents( + xfs_inode_t *dp, /* incore directory inode */ + uio_t *uio, /* caller's buffer control */ + int *eofp, /* eof reached? (out) */ + xfs_dirent_t *dbp, /* caller's buffer */ + xfs_dir2_put_t put) /* abi's formatting function */ +{ + int error; /* error return value */ + int i; /* shortform entry number */ + xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_dataptr_t off; /* current entry's offset */ + xfs_dir2_put_args_t p; /* arg package for put rtn */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_off_t dir_offset; + + mp = dp->i_mount; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Give up if the directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + return XFS_ERROR(EIO); + } + + dir_offset = uio->uio_offset; + + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + + /* + * If the block number in the offset is out of range, we're done. + */ + if (XFS_DIR2_DATAPTR_TO_DB(mp, dir_offset) > mp->m_dirdatablk) { + *eofp = 1; + return 0; + } + + /* + * Set up putargs structure. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + /* + * Put . entry unless we're starting past it. + */ + if (dir_offset <= + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOT_OFFSET)) { + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 0, + XFS_DIR2_DATA_DOTDOT_OFFSET); +#if XFS_BIG_FILESYSTEMS + p.ino = dp->i_ino + mp->m_inoadd; +#else + p.ino = dp->i_ino; +#endif + p.name = "."; + p.namelen = 1; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOT_OFFSET); + return error; + } + } + + /* + * Put .. entry unless we're starting past it. + */ + if (dir_offset <= + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOTDOT_OFFSET)) { + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_FIRST_OFFSET); +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT) + + mp->m_inoadd; +#else + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); +#endif + p.name = ".."; + p.namelen = 2; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_DATA_DOTDOT_OFFSET); + return error; + } + } + + /* + * Loop while there are more entries and put'ing works. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + + off = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT)); + + if (dir_offset > off) + continue; + + p.namelen = sfep->namelen; + + p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfep, ARCH_CONVERT) + + XFS_DIR2_DATA_ENTSIZE(p.namelen)); + +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT) + + mp->m_inoadd; +#else + p.ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); +#endif + p.name = (char *)sfep->name; + + error = p.put(&p); + + if (!p.done) { + uio->uio_offset = off; + return error; + } + } + + /* + * They all fit. + */ + *eofp = 1; + + uio->uio_offset = + XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); + + return 0; +} + +/* + * Lookup an entry in a shortform directory. + * Returns EEXIST if found, ENOENT if not found. + */ +int /* error */ +xfs_dir2_sf_lookup( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_lookup", args); + xfs_dir2_sf_check(args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bail out if the directory is way too short. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Special case for . + */ + if (args->namelen == 1 && args->name[0] == '.') { + args->inumber = dp->i_ino; + return XFS_ERROR(EEXIST); + } + /* + * Special case for .. + */ + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + args->inumber = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + return XFS_ERROR(EEXIST); + } + /* + * Loop over all the entries trying to match ours. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(args->name, sfep->name, args->namelen) == 0) { + args->inumber = + XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + return XFS_ERROR(EEXIST); + } + } + /* + * Didn't find it. + */ + ASSERT(args->oknoent); + return XFS_ERROR(ENOENT); +} + +/* + * Remove an entry from a shortform directory. + */ +int /* error */ +xfs_dir2_sf_removename( + xfs_da_args_t *args) +{ + int byteoff; /* offset of removed entry */ + xfs_inode_t *dp; /* incore directory inode */ + int entsize; /* this entry's size */ + int i; /* shortform entry index */ + int newsize; /* new inode size */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_removename", args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + oldsize = (int)dp->i_d.di_size; + /* + * Bail out if the directory is way too short. + */ + if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == oldsize); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsize >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); + /* + * Loop over the old directory entries. + * Find the one we're deleting. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(sfep->name, args->name, args->namelen) == 0) { + ASSERT(XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT) == + args->inumber); + break; + } + } + /* + * Didn't find it. + */ + if (i == sfp->hdr.count) { + return XFS_ERROR(ENOENT); + } + /* + * Calculate sizes. + */ + byteoff = (int)((char *)sfep - (char *)sfp); + entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); + newsize = oldsize - entsize; + /* + * Copy the part if any after the removed entry, sliding it down. + */ + if (byteoff + entsize < oldsize) + memmove((char *)sfp + byteoff, (char *)sfp + byteoff + entsize, + oldsize - (byteoff + entsize)); + /* + * Fix up the header and file size. + */ + sfp->hdr.count--; + dp->i_d.di_size = newsize; + /* + * Reallocate, making it smaller. + */ + xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; +#if XFS_BIG_FILESYSTEMS + /* + * Are we changing inode number size? + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) { + if (sfp->hdr.i8count == 1) + xfs_dir2_sf_toino4(args); + else + sfp->hdr.i8count--; + } +#endif + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return 0; +} + +/* + * Replace the inode number of an entry in a shortform directory. + */ +int /* error */ +xfs_dir2_sf_replace( + xfs_da_args_t *args) /* operation arguments */ +{ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + xfs_ino_t ino=0; /* entry old inode number */ +#endif +#if XFS_BIG_FILESYSTEMS + int i8elevated; /* sf_toino8 set i8count=1 */ +#endif + xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ + xfs_dir2_sf_t *sfp; /* shortform structure */ + + xfs_dir2_trace_args("sf_replace", args); + dp = args->dp; + + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Bail out if the shortform directory is way too small. + */ + if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); +#if XFS_BIG_FILESYSTEMS + /* + * New inode number is large, and need to convert to 8-byte inodes. + */ + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + int error; /* error return value */ + int newsize; /* new inode size */ + + newsize = + dp->i_df.if_bytes + + (sfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - + (uint)sizeof(xfs_dir2_ino4_t)); + /* + * Won't fit as shortform, convert to block then do replace. + */ + if (newsize > XFS_IFORK_DSIZE(dp)) { + error = xfs_dir2_sf_to_block(args); + if (error) { + return error; + } + return xfs_dir2_block_replace(args); + } + /* + * Still fits, convert to 8-byte now. + */ + xfs_dir2_sf_toino8(args); + i8elevated = 1; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + } else + i8elevated = 0; +#endif + ASSERT(args->namelen != 1 || args->name[0] != '.'); + /* + * Replace ..'s entry. + */ + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, &sfp->hdr.parent, ARCH_CONVERT); + ASSERT(args->inumber != ino); +#endif + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, &sfp->hdr.parent, ARCH_CONVERT); + } + /* + * Normal entry, look for the name. + */ + else { + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { + if (sfep->namelen == args->namelen && + sfep->name[0] == args->name[0] && + memcmp(args->name, sfep->name, args->namelen) == 0) { +#if XFS_BIG_FILESYSTEMS || defined(DEBUG) + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + ASSERT(args->inumber != ino); +#endif + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &args->inumber, + XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + break; + } + } + /* + * Didn't find it. + */ + if (i == sfp->hdr.count) { + ASSERT(args->oknoent); +#if XFS_BIG_FILESYSTEMS + if (i8elevated) + xfs_dir2_sf_toino4(args); +#endif + return XFS_ERROR(ENOENT); + } + } +#if XFS_BIG_FILESYSTEMS + /* + * See if the old number was large, the new number is small. + */ + if (ino > XFS_DIR2_MAX_SHORT_INUM && + args->inumber <= XFS_DIR2_MAX_SHORT_INUM) { + /* + * And the old count was one, so need to convert to small. + */ + if (sfp->hdr.i8count == 1) + xfs_dir2_sf_toino4(args); + else + sfp->hdr.i8count--; + } + /* + * See if the old number was small, the new number is large. + */ + if (ino <= XFS_DIR2_MAX_SHORT_INUM && + args->inumber > XFS_DIR2_MAX_SHORT_INUM) { + /* + * add to the i8count unless we just converted to 8-byte + * inodes (which does an implied i8count = 1) + */ + ASSERT(sfp->hdr.i8count != 0); + if (!i8elevated) + sfp->hdr.i8count++; + } +#endif + xfs_dir2_sf_check(args); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return 0; +} + +#if XFS_BIG_FILESYSTEMS +/* + * Convert from 8-byte inode numbers to 4-byte inode numbers. + * The last 8-byte inode number is gone, but the count is still 1. + */ +static void +xfs_dir2_sf_toino4( + xfs_da_args_t *args) /* operation arguments */ +{ + char *buf; /* old dir's buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_ino_t ino; /* entry inode number */ + int newsize; /* new inode size */ + xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ + xfs_dir2_sf_t *oldsfp; /* old sf directory */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* new sf entry */ + xfs_dir2_sf_t *sfp; /* new sf directory */ + + xfs_dir2_trace_args("sf_toino4", args); + dp = args->dp; + + /* + * Copy the old directory to the buffer. + * Then nuke it from the inode, and add the new buffer to the inode. + * Don't want xfs_idata_realloc copying the data here. + */ + oldsize = dp->i_df.if_bytes; + buf = kmem_alloc(oldsize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->hdr.i8count == 1); + memcpy(buf, oldsfp, oldsize); + /* + * Compute the new inode size. + */ + newsize = + oldsize - + (oldsfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); + xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); + xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); + /* + * Reset our pointers, the data has moved. + */ + oldsfp = (xfs_dir2_sf_t *)buf; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Fill in the new header. + */ + sfp->hdr.count = oldsfp->hdr.count; + sfp->hdr.i8count = 0; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, &oldsfp->hdr.parent, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, &sfp->hdr.parent, ARCH_CONVERT); + /* + * Copy the entries field by field. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { + sfep->namelen = oldsfep->namelen; + sfep->offset = oldsfep->offset; + memcpy(sfep->name, oldsfep->name, sfep->namelen); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, + XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + } + /* + * Clean up the inode. + */ + kmem_free(buf, oldsize); + dp->i_d.di_size = newsize; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); +} + +/* + * Convert from 4-byte inode numbers to 8-byte inode numbers. + * The new 8-byte inode number is not there yet, we leave with the + * count 1 but no corresponding entry. + */ +static void +xfs_dir2_sf_toino8( + xfs_da_args_t *args) /* operation arguments */ +{ + char *buf; /* old dir's buffer */ + xfs_inode_t *dp; /* incore directory inode */ + int i; /* entry index */ + xfs_ino_t ino; /* entry inode number */ + int newsize; /* new inode size */ + xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ + xfs_dir2_sf_t *oldsfp; /* old sf directory */ + int oldsize; /* old inode size */ + xfs_dir2_sf_entry_t *sfep; /* new sf entry */ + xfs_dir2_sf_t *sfp; /* new sf directory */ + + xfs_dir2_trace_args("sf_toino8", args); + dp = args->dp; + + /* + * Copy the old directory to the buffer. + * Then nuke it from the inode, and add the new buffer to the inode. + * Don't want xfs_idata_realloc copying the data here. + */ + oldsize = dp->i_df.if_bytes; + buf = kmem_alloc(oldsize, KM_SLEEP); + oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->hdr.i8count == 0); + memcpy(buf, oldsfp, oldsize); + /* + * Compute the new inode size. + */ + newsize = + oldsize + + (oldsfp->hdr.count + 1) * + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); + xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); + xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); + /* + * Reset our pointers, the data has moved. + */ + oldsfp = (xfs_dir2_sf_t *)buf; + sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + /* + * Fill in the new header. + */ + sfp->hdr.count = oldsfp->hdr.count; + sfp->hdr.i8count = 1; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, &oldsfp->hdr.parent, ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, &sfp->hdr.parent, ARCH_CONVERT); + /* + * Copy the entries field by field. + */ + for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), + oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); + i < sfp->hdr.count; + i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), + oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { + sfep->namelen = oldsfep->namelen; + sfep->offset = oldsfep->offset; + memcpy(sfep->name, oldsfep->name, sfep->namelen); + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(oldsfp, + XFS_DIR2_SF_INUMBERP(oldsfep), ARCH_CONVERT); + XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep), ARCH_CONVERT); + } + /* + * Clean up the inode. + */ + kmem_free(buf, oldsize); + dp->i_d.di_size = newsize; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); +} +#endif /* XFS_BIG_FILESYSTEMS */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_sf.h linux.22-ac2/fs/xfs/xfs_dir2_sf.h --- linux.vanilla/fs/xfs/xfs_dir2_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_sf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_SF_H__ +#define __XFS_DIR2_SF_H__ + +/* + * Directory layout when stored internal to an inode. + * + * Small directories are packed as tightly as possible so as to + * fit into the literal area of the inode. + */ + +struct uio; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_dir2_block; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Maximum size of a shortform directory. + */ +#define XFS_DIR2_SF_MAX_SIZE \ + (XFS_DINODE_MAX_SIZE - (uint)sizeof(xfs_dinode_core_t) - \ + (uint)sizeof(xfs_agino_t)) + +/* + * Inode number stored as 8 8-bit values. + */ +typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t; + +#define XFS_DIR2_SF_GET_INO8_ARCH(di,arch) \ + (xfs_ino_t)(DIRINO_GET_ARCH(&di,arch)) +#define XFS_DIR2_SF_GET_INO8(di) \ + XFS_DIR2_SF_GET_INO8_ARCH(di,ARCH_NOCONVERT) + +/* + * Inode number stored as 4 8-bit values. + * Works a lot of the time, when all the inode numbers in a directory + * fit in 32 bits. + */ +typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t; +#define XFS_DIR2_SF_GET_INO4_ARCH(di,arch) \ + (xfs_ino_t)(DIRINO4_GET_ARCH(&di,arch)) +#define XFS_DIR2_SF_GET_INO4(di) \ + XFS_DIR2_SF_GET_INO4_ARCH(di,ARCH_NOCONVERT) + +typedef union { + xfs_dir2_ino8_t i8; + xfs_dir2_ino4_t i4; +} xfs_dir2_inou_t; +#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL) + +/* + * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t. + * Only need 16 bits, this is the byte offset into the single block form. + */ +typedef struct { __uint8_t i[2]; } xfs_dir2_sf_off_t; + +/* + * The parent directory has a dedicated field, and the self-pointer must + * be calculated on the fly. + * + * Entries are packed toward the top as tightly as possible. The header + * and the elements must be memcpy'd out into a work area to get correct + * alignment for the inode number fields. + */ +typedef struct xfs_dir2_sf_hdr { + __uint8_t count; /* count of entries */ + __uint8_t i8count; /* count of 8-byte inode #s */ + xfs_dir2_inou_t parent; /* parent dir inode number */ +} xfs_dir2_sf_hdr_t; + +typedef struct xfs_dir2_sf_entry { + __uint8_t namelen; /* actual name length */ + xfs_dir2_sf_off_t offset; /* saved offset */ + __uint8_t name[1]; /* name, variable size */ + xfs_dir2_inou_t inumber; /* inode number, var. offset */ +} xfs_dir2_sf_entry_t; + +typedef struct xfs_dir2_sf { + xfs_dir2_sf_hdr_t hdr; /* shortform header */ + xfs_dir2_sf_entry_t list[1]; /* shortform entries */ +} xfs_dir2_sf_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_HDR_SIZE) +int xfs_dir2_sf_hdr_size(int i8count); +#define XFS_DIR2_SF_HDR_SIZE(i8count) xfs_dir2_sf_hdr_size(i8count) +#else +#define XFS_DIR2_SF_HDR_SIZE(i8count) \ + ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ + ((i8count) == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_INUMBERP) +xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_INUMBERP(sfep) xfs_dir2_sf_inumberp(sfep) +#else +#define XFS_DIR2_SF_INUMBERP(sfep) \ + ((xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_INUMBER) +xfs_intino_t xfs_dir2_sf_get_inumber_arch(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from, + xfs_arch_t arch); +#define XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, from, arch) \ + xfs_dir2_sf_get_inumber_arch(sfp, from, arch) + +#else +#define XFS_DIR2_SF_GET_INUMBER_ARCH(sfp, from, arch) \ + ((sfp)->hdr.i8count == 0 ? \ + (xfs_intino_t)XFS_DIR2_SF_GET_INO4_ARCH(*(from), arch) : \ + (xfs_intino_t)XFS_DIR2_SF_GET_INO8_ARCH(*(from), arch)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_INUMBER) +void xfs_dir2_sf_put_inumber_arch(xfs_dir2_sf_t *sfp, xfs_ino_t *from, + xfs_dir2_inou_t *to, xfs_arch_t arch); +#define XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp,from,to,arch) \ + xfs_dir2_sf_put_inumber_arch(sfp,from,to,arch) +#else +#define XFS_DIR2_SF_PUT_INUMBER_ARCH(sfp,from,to,arch) \ + if ((sfp)->hdr.i8count == 0) { \ + DIRINO4_COPY_ARCH(from,to,arch); \ + } else { \ + DIRINO_COPY_ARCH(from,to,arch); \ + } +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_GET_OFFSET) +xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset_arch(xfs_dir2_sf_entry_t *sfep, + xfs_arch_t arch); +xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_GET_OFFSET_ARCH(sfep,arch) \ + xfs_dir2_sf_get_offset_arch(sfep,arch) +#else +#define XFS_DIR2_SF_GET_OFFSET_ARCH(sfep,arch) \ + INT_GET_UNALIGNED_16_ARCH(&(sfep)->offset.i,arch) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_PUT_OFFSET) +void xfs_dir2_sf_put_offset_arch(xfs_dir2_sf_entry_t *sfep, + xfs_dir2_data_aoff_t off, xfs_arch_t arch); +#define XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep,off,arch) \ + xfs_dir2_sf_put_offset_arch(sfep,off,arch) +#else +#define XFS_DIR2_SF_PUT_OFFSET_ARCH(sfep,off,arch) \ + INT_SET_UNALIGNED_16_ARCH(&(sfep)->offset.i,off,arch) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYNAME) +int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len); +#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) \ + xfs_dir2_sf_entsize_byname(sfp,len) +#else +#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) /* space a name uses */ \ + ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ + ((sfp)->hdr.i8count == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_ENTSIZE_BYENTRY) +int xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) \ + xfs_dir2_sf_entsize_byentry(sfp,sfep) +#else +#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) /* space an entry uses */ \ + ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \ + ((sfp)->hdr.i8count == 0) * \ + ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_FIRSTENTRY) +xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp); +#define XFS_DIR2_SF_FIRSTENTRY(sfp) xfs_dir2_sf_firstentry(sfp) +#else +#define XFS_DIR2_SF_FIRSTENTRY(sfp) /* first entry in struct */ \ + ((xfs_dir2_sf_entry_t *) \ + ((char *)(sfp) + XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR2_SF_NEXTENTRY) +xfs_dir2_sf_entry_t *xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, + xfs_dir2_sf_entry_t *sfep); +#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) xfs_dir2_sf_nextentry(sfp,sfep) +#else +#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) /* next entry in struct */ \ + ((xfs_dir2_sf_entry_t *) \ + ((char *)(sfep) + XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep))) +#endif + +/* + * Functions. + */ + +extern int + xfs_dir2_block_sfsize(struct xfs_inode *dp, + struct xfs_dir2_block *block, + xfs_dir2_sf_hdr_t *sfhp); + +extern int + xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, + int size, xfs_dir2_sf_hdr_t *sfhp); + +extern int + xfs_dir2_sf_addname(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); + +extern int + xfs_dir2_sf_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp, + struct xfs_dirent *dbp, xfs_dir2_put_t put); + +extern int + xfs_dir2_sf_lookup(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_removename(struct xfs_da_args *args); + +extern int + xfs_dir2_sf_replace(struct xfs_da_args *args); + +#endif /* __XFS_DIR2_SF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_trace.c linux.22-ac2/fs/xfs/xfs_dir2_trace.c --- linux.vanilla/fs/xfs/xfs_dir2_trace.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_trace.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir2_trace.c + * Tracing for xfs v2 directories. + */ +#include "xfs.h" + +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_da_btree.h" +#include "xfs_dir2_trace.h" + +#ifdef DEBUG +ktrace_t *xfs_dir2_trace_buf; +#endif /* DEBUG */ + +#ifdef XFS_DIR2_TRACE +/* + * Enter something in the trace buffers. + */ +static void +xfs_dir2_trace_enter( + xfs_inode_t *dp, + int type, + char *where, + char *name, + int namelen, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6) +{ + __psunsigned_t n[6]; + + ASSERT(xfs_dir2_trace_buf); + ASSERT(dp->i_dir_trace); + if (name) + memcpy(n, name, min(sizeof(n), namelen)); + else + memset((char *)n, 0, sizeof(n)); + ktrace_enter(xfs_dir2_trace_buf, + (void *)(__psunsigned_t)type, (void *)where, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, + (void *)(__psunsigned_t)namelen, + (void *)n[0], (void *)n[1], (void *)n[2], + (void *)n[3], (void *)n[4], (void *)n[5]); + ktrace_enter(dp->i_dir_trace, + (void *)(__psunsigned_t)type, (void *)where, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, + (void *)(__psunsigned_t)namelen, + (void *)n[0], (void *)n[1], (void *)n[2], + (void *)n[3], (void *)n[4], (void *)n[5]); +} + +void +xfs_dir2_trace_args( + char *where, + xfs_da_args_t *args) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, 0, 0); +} + +void +xfs_dir2_trace_args_b( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, + (__psunsigned_t)(bp ? bp->bps[0] : NULL), 0); +} + +void +xfs_dir2_trace_args_bb( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *lbp, + xfs_dabuf_t *dbp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, + (__psunsigned_t)(lbp ? lbp->bps[0] : NULL), + (__psunsigned_t)(dbp ? dbp->bps[0] : NULL)); +} + +void +xfs_dir2_trace_args_bibii( + char *where, + xfs_da_args_t *args, + xfs_dabuf_t *bs, + int ss, + xfs_dabuf_t *bd, + int sd, + int c) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)(bs ? bs->bps[0] : NULL), (__psunsigned_t)ss, + (__psunsigned_t)(bd ? bd->bps[0] : NULL), (__psunsigned_t)sd, + (__psunsigned_t)c); +} + +void +xfs_dir2_trace_args_db( + char *where, + xfs_da_args_t *args, + xfs_dir2_db_t db, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)db, + (__psunsigned_t)(bp ? bp->bps[0] : NULL)); +} + +void +xfs_dir2_trace_args_i( + char *where, + xfs_da_args_t *args, + xfs_ino_t i) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)i, 0); +} + +void +xfs_dir2_trace_args_s( + char *where, + xfs_da_args_t *args, + int s) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)s, 0); +} + +void +xfs_dir2_trace_args_sb( + char *where, + xfs_da_args_t *args, + int s, + xfs_dabuf_t *bp) +{ + xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where, + (char *)args->name, (int)args->namelen, + (__psunsigned_t)args->hashval, (__psunsigned_t)args->inumber, + (__psunsigned_t)args->dp, (__psunsigned_t)args->trans, + (__psunsigned_t)args->justcheck, (__psunsigned_t)s, + (__psunsigned_t)(bp ? bp->bps[0] : NULL)); +} +#endif /* XFS_DIR2_TRACE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir2_trace.h linux.22-ac2/fs/xfs/xfs_dir2_trace.h --- linux.vanilla/fs/xfs/xfs_dir2_trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir2_trace.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR2_TRACE_H__ +#define __XFS_DIR2_TRACE_H__ + +/* + * Tracing for xfs v2 directories. + */ + +struct ktrace; +struct xfs_dabuf; +struct xfs_da_args; + +#ifdef XFS_ALL_TRACE +#define XFS_DIR2_TRACE +#endif /* XFS_ALL_TRACE */ + +#if !defined(DEBUG) +#undef XFS_DIR2_TRACE +#endif /* !DEBUG */ + +#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */ +#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */ + +#define XFS_DIR2_KTRACE_ARGS 1 /* args only */ +#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */ +#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */ +#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */ +#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */ +#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */ +#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */ +#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */ + +#ifdef XFS_DIR2_TRACE + +void xfs_dir2_trace_args(char *where, struct xfs_da_args *args); +void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args, + struct xfs_dabuf *bp); +void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args, + struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); +void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args, + struct xfs_dabuf *bs, int ss, + struct xfs_dabuf *bd, int sd, int c); +void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args, + xfs_dir2_db_t db, struct xfs_dabuf *bp); +void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i); +void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s); +void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s, + struct xfs_dabuf *bp); + +#else /* XFS_DIR2_TRACE */ + +#define xfs_dir2_trace_args(where, args) +#define xfs_dir2_trace_args_b(where, args, bp) +#define xfs_dir2_trace_args_bb(where, args, lbp, dbp) +#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c) +#define xfs_dir2_trace_args_db(where, args, db, bp) +#define xfs_dir2_trace_args_i(where, args, i) +#define xfs_dir2_trace_args_s(where, args, s) +#define xfs_dir2_trace_args_sb(where, args, s, bp) + +#endif /* XFS_DIR2_TRACE */ + +extern struct ktrace *xfs_dir2_trace_buf; + +#endif /* __XFS_DIR2_TRACE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir.c linux.22-ac2/fs/xfs/xfs_dir.c --- linux.vanilla/fs/xfs/xfs_dir.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1210 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" + +/* + * xfs_dir.c + * + * Provide the external interfaces to manage directories. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Functions for the dirops interfaces. + */ +static void xfs_dir_mount(struct xfs_mount *mp); + +static int xfs_dir_isempty(struct xfs_inode *dp); + +static int xfs_dir_init(struct xfs_trans *trans, + struct xfs_inode *dir, + struct xfs_inode *parent_dir); + +static int xfs_dir_createname(struct xfs_trans *trans, + struct xfs_inode *dp, + char *name_string, + int name_len, + xfs_ino_t inode_number, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_lookup(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t *inode_number); + +static int xfs_dir_removename(struct xfs_trans *trans, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t ino, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_getdents(struct xfs_trans *tp, + struct xfs_inode *dp, + struct uio *uiop, + int *eofp); + +static int xfs_dir_replace(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length, + xfs_ino_t inode_number, + xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, + xfs_extlen_t total); + +static int xfs_dir_canenter(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name_string, + int name_length); + +static int xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, + xfs_dinode_t *dip); + +xfs_dirops_t xfsv1_dirops = { + .xd_mount = xfs_dir_mount, + .xd_isempty = xfs_dir_isempty, + .xd_init = xfs_dir_init, + .xd_createname = xfs_dir_createname, + .xd_lookup = xfs_dir_lookup, + .xd_removename = xfs_dir_removename, + .xd_getdents = xfs_dir_getdents, + .xd_replace = xfs_dir_replace, + .xd_canenter = xfs_dir_canenter, + .xd_shortform_validate_ondisk = xfs_dir_shortform_validate_ondisk, + .xd_shortform_to_single = xfs_dir_shortform_to_leaf, +}; + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +STATIC int xfs_dir_leaf_lookup(xfs_da_args_t *args); +STATIC int xfs_dir_leaf_removename(xfs_da_args_t *args, int *number_entries, + int *total_namebytes); +STATIC int xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, + uio_t *uio, int *eofp, + xfs_dirent_t *dbp, + xfs_dir_put_t put); +STATIC int xfs_dir_leaf_replace(xfs_da_args_t *args); + +/* + * Internal routines when dirsize > XFS_LBSIZE(mp). + */ +STATIC int xfs_dir_node_addname(xfs_da_args_t *args); +STATIC int xfs_dir_node_lookup(xfs_da_args_t *args); +STATIC int xfs_dir_node_removename(xfs_da_args_t *args); +STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, + uio_t *uio, int *eofp, + xfs_dirent_t *dbp, + xfs_dir_put_t put); +STATIC int xfs_dir_node_replace(xfs_da_args_t *args); + +#if defined(DEBUG) +ktrace_t *xfs_dir_trace_buf; +#endif + + +/*======================================================================== + * Overall external interface routines. + *========================================================================*/ + +xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot; + +/* + * One-time startup routine called from xfs_init(). + */ +void +xfs_dir_startup(void) +{ + xfs_dir_hash_dot = xfs_da_hashname(".", 1); + xfs_dir_hash_dotdot = xfs_da_hashname("..", 2); +} + +/* + * Initialize directory-related fields in the mount structure. + */ +static void +xfs_dir_mount(xfs_mount_t *mp) +{ + uint shortcount, leafcount, count; + + mp->m_dirversion = 1; + shortcount = (mp->m_attroffset - (uint)sizeof(xfs_dir_sf_hdr_t)) / + (uint)sizeof(xfs_dir_sf_entry_t); + leafcount = (XFS_LBSIZE(mp) - (uint)sizeof(xfs_dir_leaf_hdr_t)) / + ((uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_name_t)); + count = shortcount > leafcount ? shortcount : leafcount; + mp->m_dircook_elog = xfs_da_log2_roundup(count + 1); + ASSERT(mp->m_dircook_elog <= mp->m_sb.sb_blocklog); + mp->m_dir_node_ents = mp->m_attr_node_ents = + (XFS_LBSIZE(mp) - (uint)sizeof(xfs_da_node_hdr_t)) / + (uint)sizeof(xfs_da_node_entry_t); + mp->m_dir_magicpct = (XFS_LBSIZE(mp) * 37) / 100; + mp->m_dirblksize = mp->m_sb.sb_blocksize; + mp->m_dirblkfsbs = 1; +} + +/* + * Return 1 if directory contains only "." and "..". + */ +static int +xfs_dir_isempty(xfs_inode_t *dp) +{ + xfs_dir_sf_hdr_t *hdr; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + if (dp->i_d.di_size == 0) + return(1); + if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) + return(0); + hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data; + return(hdr->count == 0); +} + +/* + * Initialize a directory with its "." and ".." entries. + */ +static int +xfs_dir_init(xfs_trans_t *trans, xfs_inode_t *dir, xfs_inode_t *parent_dir) +{ + xfs_da_args_t args; + int error; + + memset((char *)&args, 0, sizeof(args)); + args.dp = dir; + args.trans = trans; + + ASSERT((dir->i_d.di_mode & IFMT) == IFDIR); + if ((error = xfs_dir_ino_validate(trans->t_mountp, parent_dir->i_ino))) + return error; + + return(xfs_dir_shortform_create(&args, parent_dir->i_ino)); +} + +/* + * Generic handler routine to add a name to a directory. + * Transitions directory from shortform to Btree as necessary. + */ +static int /* error */ +xfs_dir_createname(xfs_trans_t *trans, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t inum, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int retval, newsize, done; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum))) + return (retval); + + XFS_STATS_INC(xfsstats.xs_dir_create); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + done = 0; + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen); + if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) { + retval = xfs_dir_shortform_addname(&args); + done = 1; + } else { + if (total == 0) + return XFS_ERROR(ENOSPC); + retval = xfs_dir_shortform_to_leaf(&args); + done = retval != 0; + } + } + if (!done && xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_addname(&args); + done = retval != ENOSPC; + if (!done) { + if (total == 0) + return XFS_ERROR(ENOSPC); + retval = xfs_dir_leaf_to_node(&args); + done = retval != 0; + } + } + if (!done) { + retval = xfs_dir_node_addname(&args); + } + return(retval); +} + +/* + * Generic handler routine to check if a name can be added to a directory, + * without adding any blocks to the directory. + */ +static int /* error */ +xfs_dir_canenter(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen) +{ + xfs_da_args_t args; + int retval, newsize; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen); + if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) + retval = 0; + else + retval = XFS_ERROR(ENOSPC); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_addname(&args); + } else { + retval = xfs_dir_node_addname(&args); + } + return(retval); +} + +/* + * Generic handler routine to remove a name from a directory. + * Transitions directory from Btree to shortform as necessary. + */ +static int /* error */ +xfs_dir_removename(xfs_trans_t *trans, xfs_inode_t *dp, char *name, + int namelen, xfs_ino_t ino, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int count, totallen, newsize, retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + XFS_STATS_INC(xfsstats.xs_dir_remove); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = ino; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 0; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_removename(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_removename(&args, &count, &totallen); + if (retval == 0) { + newsize = XFS_DIR_SF_ALLFIT(count, totallen); + if (newsize <= XFS_IFORK_DSIZE(dp)) { + retval = xfs_dir_leaf_to_shortform(&args); + } + } + } else { + retval = xfs_dir_node_removename(&args); + } + return(retval); +} + +static int /* error */ +xfs_dir_lookup(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen, + xfs_ino_t *inum) +{ + xfs_da_args_t args; + int retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + XFS_STATS_INC(xfsstats.xs_dir_lookup); + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = 0; + args.dp = dp; + args.firstblock = NULL; + args.flist = NULL; + args.total = 0; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = 0; + args.oknoent = 1; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_lookup(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_lookup(&args); + } else { + retval = xfs_dir_node_lookup(&args); + } + if (retval == EEXIST) + retval = 0; + *inum = args.inumber; + return(retval); +} + +/* + * Implement readdir. + */ +static int /* error */ +xfs_dir_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp) +{ + xfs_dirent_t *dbp; + int alignment, retval; + xfs_dir_put_t put; + + XFS_STATS_INC(xfsstats.xs_dir_getdents); + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + /* + * If our caller has given us a single contiguous memory buffer, + * just work directly within that buffer. If it's in user memory, + * lock it down first. + */ + alignment = sizeof(xfs_off_t) - 1; + if ((uio->uio_iovcnt == 1) && + (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && + ((uio->uio_iov[0].iov_len & alignment) == 0)) { + dbp = NULL; + put = xfs_dir_put_dirent64_direct; + } else { + dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); + put = xfs_dir_put_dirent64_uio; + } + + /* + * Decide on what work routines to call based on the inode size. + */ + *eofp = 0; + + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_getdents(dp, uio, eofp, dbp, put); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_getdents(trans, dp, uio, eofp, dbp, put); + } else { + retval = xfs_dir_node_getdents(trans, dp, uio, eofp, dbp, put); + } + if (dbp != NULL) + kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); + + return(retval); +} + +static int /* error */ +xfs_dir_replace(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen, + xfs_ino_t inum, xfs_fsblock_t *firstblock, + xfs_bmap_free_t *flist, xfs_extlen_t total) +{ + xfs_da_args_t args; + int retval; + + ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); + + if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum))) + return retval; + + /* + * Fill in the arg structure for this request. + */ + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(name, namelen); + args.inumber = inum; + args.dp = dp; + args.firstblock = firstblock; + args.flist = flist; + args.total = total; + args.whichfork = XFS_DATA_FORK; + args.trans = trans; + args.justcheck = args.addname = args.oknoent = 0; + + /* + * Decide on what work routines to call based on the inode size. + */ + if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { + retval = xfs_dir_shortform_replace(&args); + } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { + retval = xfs_dir_leaf_replace(&args); + } else { + retval = xfs_dir_node_replace(&args); + } + + return(retval); +} + +static int +xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp) +{ + xfs_ino_t ino; + int namelen_sum; + int count; + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i; + + + + if ((INT_GET(dp->di_core.di_mode, ARCH_CONVERT) & IFMT) != IFDIR) { + return 0; + } + if (INT_GET(dp->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_LOCAL) { + return 0; + } + if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) { + xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p", + dp); + return 1; + } + sf = (xfs_dir_shortform_t *)(&dp->di_u.di_dirsf); + ino = XFS_GET_DIR_INO_ARCH(mp, sf->hdr.parent, ARCH_CONVERT); + if (xfs_dir_ino_validate(mp, ino)) + return 1; + + count = sf->hdr.count; + if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform count: dp 0x%p", dp); + return(1); + } + + if (count == 0) { + return 0; + } + + namelen_sum = 0; + sfe = &sf->list[0]; + for (i = sf->hdr.count - 1; i >= 0; i--) { + ino = XFS_GET_DIR_INO_ARCH(mp, sfe->inumber, ARCH_CONVERT); + xfs_dir_ino_validate(mp, ino); + if (sfe->namelen >= XFS_LITINO(mp)) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform namelen: dp 0x%p", dp); + return 1; + } + namelen_sum += sfe->namelen; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + if (namelen_sum >= XFS_LITINO(mp)) { + xfs_fs_cmn_err(CE_WARN, mp, + "Invalid shortform namelen: dp 0x%p", dp); + return 1; + } + + return 0; +} + +/*======================================================================== + * External routines when dirsize == XFS_LBSIZE(dp->i_mount). + *========================================================================*/ + +/* + * Add a name to the leaf directory structure + * This is the external routine. + */ +int +xfs_dir_leaf_addname(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == ENOENT) + retval = xfs_dir_leaf_add(bp, args, index); + xfs_da_buf_done(bp); + return(retval); +} + +/* + * Remove a name from the leaf directory structure + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) +{ + xfs_dir_leafblock_t *leaf; + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == EEXIST) { + (void)xfs_dir_leaf_remove(args->trans, bp, index); + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *totallen = INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + retval = 0; + } + xfs_da_buf_done(bp); + return(retval); +} + +/* + * Look up a name in a leaf directory structure. + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_lookup(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + xfs_da_brelse(args->trans, bp); + return(retval); +} + +/* + * Copy out directory entries for getdents(), for leaf directories. + */ +STATIC int +xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, + int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_dabuf_t *bp; + int retval, eob; + + retval = xfs_da_read_buf(dp->i_transp, dp, 0, -1, &bp, XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_getdents_int(bp, dp, 0, uio, &eob, dbp, put, -1); + xfs_da_brelse(trans, bp); + *eofp = (eob == 0); + return(retval); +} + +/* + * Look up a name in a leaf directory structure, replace the inode number. + * This is the external routine. + */ +STATIC int +xfs_dir_leaf_replace(xfs_da_args_t *args) +{ + int index, retval; + xfs_dabuf_t *bp; + xfs_ino_t inum; + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + + inum = args->inumber; + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + retval = xfs_dir_leaf_lookup_int(bp, args, &index); + if (retval == EEXIST) { + leaf = bp->data; + entry = &leaf->entries[index]; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + /* XXX - replace assert? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); + xfs_da_buf_done(bp); + retval = 0; + } else + xfs_da_brelse(args->trans, bp); + return(retval); +} + + +/*======================================================================== + * External routines when dirsize > XFS_LBSIZE(mp). + *========================================================================*/ + +/* + * Add a name to a Btree-format directory. + * + * This will involve walking down the Btree, and may involve splitting + * leaf nodes and even splitting intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_dir_node_addname(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int retval, error; + + /* + * Fill in bucket of arguments/results/context to carry around. + */ + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name already exists, and get back a pointer + * to where it should go. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + retval = error; + if (retval != ENOENT) + goto error; + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_add(blk->bp, args, blk->index); + if (retval == 0) { + /* + * Addition succeeded, update Btree hashvals. + */ + if (!args->justcheck) + xfs_da_fixhashpath(state, &state->path); + } else { + /* + * Addition failed, split as many Btree elements as required. + */ + if (args->total == 0) { + ASSERT(retval == ENOSPC); + goto error; + } + retval = xfs_da_split(state); + } +error: + xfs_da_state_free(state); + + return(retval); +} + +/* + * Remove a name from a B-tree directory. + * + * This will involve walking down the Btree, and may involve joining + * leaf nodes and even joining intermediate nodes up to and including + * the root node (a special case of an intermediate node). + */ +STATIC int +xfs_dir_node_removename(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + int retval, error; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) + retval = error; + if (retval != EEXIST) { + xfs_da_state_free(state); + return(retval); + } + + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[ state->path.active-1 ]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + retval = xfs_dir_leaf_remove(args->trans, blk->bp, blk->index); + xfs_da_fixhashpath(state, &state->path); + + /* + * Check to see if the tree needs to be collapsed. + */ + error = 0; + if (retval) { + error = xfs_da_join(state); + } + + xfs_da_state_free(state); + if (error) + return(error); + return(0); +} + +/* + * Look up a filename in a int directory. + * Use an internal routine to actually do all the work. + */ +STATIC int +xfs_dir_node_lookup(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + int retval, error, i; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + + /* + * Search to see if name exists, + * and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } + + /* + * If not in a transaction, we have to release all the buffers. + */ + for (i = 0; i < state->path.active; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +STATIC int +xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, + int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_da_intnode_t *node; + xfs_da_node_entry_t *btree; + xfs_dir_leafblock_t *leaf = NULL; + xfs_dablk_t bno, nextbno; + xfs_dahash_t cookhash; + xfs_mount_t *mp; + int error, eob, i; + xfs_dabuf_t *bp; + xfs_daddr_t nextda; + + /* + * Pick up our context. + */ + mp = dp->i_mount; + bp = NULL; + bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset); + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + + xfs_dir_trace_g_du("node: start", dp, uio); + + /* + * Re-find our place, even if we're confused about what our place is. + * + * First we check the block number from the magic cookie, it is a + * cache of where we ended last time. If we find a leaf block, and + * the starting hashval in that block is less than our desired + * hashval, then we run with it. + */ + if (bno > 0) { + error = xfs_da_read_buf(trans, dp, bno, -2, &bp, XFS_DATA_FORK); + if ((error != 0) && (error != EFSCORRUPTED)) + return(error); + if (bp) + leaf = bp->data; + if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + xfs_dir_trace_g_dub("node: block not a leaf", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) { + xfs_dir_trace_g_dub("node: leaf hash too large", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + if (bp && + cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) { + xfs_dir_trace_g_dub("node: leaf hash too small", + dp, uio, bno); + xfs_da_brelse(trans, bp); + bp = NULL; + } + } + + /* + * If we did not find a leaf block from the blockno in the cookie, + * or we there was no blockno in the cookie (eg: first time thru), + * the we start at the top of the Btree and re-find our hashval. + */ + if (bp == NULL) { + xfs_dir_trace_g_du("node: start at root" , dp, uio); + bno = 0; + for (;;) { + error = xfs_da_read_buf(trans, dp, bno, -1, &bp, + XFS_DATA_FORK); + if (error) + return(error); + if (bp == NULL) + return(XFS_ERROR(EFSCORRUPTED)); + node = bp->data; + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) + break; + btree = &node->btree[0]; + xfs_dir_trace_g_dun("node: node detail", dp, uio, node); + for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { + if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { + bno = INT_GET(btree->before, ARCH_CONVERT); + break; + } + } + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + xfs_da_brelse(trans, bp); + xfs_dir_trace_g_du("node: hash beyond EOF", + dp, uio); + uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, + XFS_DA_MAXHASH); + *eofp = 1; + return(0); + } + xfs_dir_trace_g_dub("node: going to block", + dp, uio, bno); + xfs_da_brelse(trans, bp); + } + } + ASSERT(cookhash != XFS_DA_MAXHASH); + + /* + * We've dropped down to the (first) leaf block that contains the + * hashval we are interested in. Continue rolling upward thru the + * leaf blocks until we fill up our buffer. + */ + for (;;) { + leaf = bp->data; + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { + xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); + xfs_da_brelse(trans, bp); + XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", + XFS_ERRLEVEL_LOW, mp, leaf); + return XFS_ERROR(EFSCORRUPTED); + } + xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); + if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { + nextda = xfs_da_reada_buf(trans, dp, nextbno, + XFS_DATA_FORK); + } else + nextda = -1; + error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp, + put, nextda); + xfs_da_brelse(trans, bp); + bno = nextbno; + if (eob) { + xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno); + *eofp = 0; + return(error); + } + if (bno == 0) + break; + error = xfs_da_read_buf(trans, dp, bno, nextda, &bp, + XFS_DATA_FORK); + if (error) + return(error); + if (unlikely(bp == NULL)) { + XFS_ERROR_REPORT("xfs_dir_node_getdents(2)", + XFS_ERRLEVEL_LOW, mp); + return(XFS_ERROR(EFSCORRUPTED)); + } + } + *eofp = 1; + xfs_dir_trace_g_du("node: E-O-F", dp, uio); + return(0); +} + +/* + * Look up a filename in an int directory, replace the inode number. + * Use an internal routine to actually do the lookup. + */ +STATIC int +xfs_dir_node_replace(xfs_da_args_t *args) +{ + xfs_da_state_t *state; + xfs_da_state_blk_t *blk; + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_ino_t inum; + int retval, error, i; + xfs_dabuf_t *bp; + + state = xfs_da_state_alloc(); + state->args = args; + state->mp = args->dp->i_mount; + state->blocksize = state->mp->m_sb.sb_blocksize; + state->node_ents = state->mp->m_dir_node_ents; + inum = args->inumber; + + /* + * Search to see if name exists, + * and get back a pointer to it. + */ + error = xfs_da_node_lookup_int(state, &retval); + if (error) { + retval = error; + } + + if (retval == EEXIST) { + blk = &state->path.blk[state->path.active - 1]; + ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); + bp = blk->bp; + leaf = bp->data; + entry = &leaf->entries[blk->index]; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + /* XXX - replace assert ? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); + xfs_da_buf_done(bp); + blk->bp = NULL; + retval = 0; + } else { + i = state->path.active - 1; + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + for (i = 0; i < state->path.active - 1; i++) { + xfs_da_brelse(args->trans, state->path.blk[i].bp); + state->path.blk[i].bp = NULL; + } + + xfs_da_state_free(state); + return(retval); +} + +#if defined(XFS_DIR_TRACE) +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_du(char *where, xfs_inode_t *dp, uio_t *uio) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DU, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + NULL, NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dub(char *where, xfs_inode_t *dp, uio_t *uio, xfs_dablk_t bno) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUB, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)bno, + NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_da_intnode_t *node) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(node->hdr.info.forw, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT), + NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_dir_leafblock_t *leaf) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUL, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(leaf->hdr.info.forw, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT), + NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_due(char *where, xfs_inode_t *dp, uio_t *uio, + xfs_dir_leaf_entry_t *entry) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUE, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)INT_GET(entry->hashval, ARCH_CONVERT), + NULL, NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for an inode and a uio. + */ +void +xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie) +{ + xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUC, where, + (__psunsigned_t)dp, (__psunsigned_t)dp->i_mount, + (__psunsigned_t)(uio->uio_offset >> 32), + (__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF), + (__psunsigned_t)uio->uio_resid, + (__psunsigned_t)(cookie >> 32), + (__psunsigned_t)(cookie & 0xFFFFFFFF), + NULL, NULL, NULL, NULL, NULL); +} + +/* + * Add a trace buffer entry for the arguments given to the routine, + * generic form. + */ +void +xfs_dir_trace_enter(int type, char *where, + __psunsigned_t a0, __psunsigned_t a1, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11) +{ + ASSERT(xfs_dir_trace_buf); + ktrace_enter(xfs_dir_trace_buf, (void *)((__psunsigned_t)type), + (void *)where, + (void *)a0, (void *)a1, (void *)a2, + (void *)a3, (void *)a4, (void *)a5, + (void *)a6, (void *)a7, (void *)a8, + (void *)a9, (void *)a10, (void *)a11, + NULL, NULL); +} +#endif /* XFS_DIR_TRACE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir.h linux.22-ac2/fs/xfs/xfs_dir.h --- linux.vanilla/fs/xfs/xfs_dir.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_H__ +#define __XFS_DIR_H__ + +/* + * Large directories are structured around Btrees where all the data + * elements are in the leaf nodes. Filenames are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of a filename may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + * + * Small directories use a different format and are packed as tightly + * as possible so as to fit into the literal area of the inode. + */ + +#ifdef XFS_ALL_TRACE +#define XFS_DIR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_DIR_TRACE +#endif + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +struct uio; +struct xfs_bmap_free; +struct xfs_da_args; +struct xfs_dinode; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/* + * Directory function types. + * Put in structures (xfs_dirops_t) for v1 and v2 directories. + */ +typedef void (*xfs_dir_mount_t)(struct xfs_mount *mp); +typedef int (*xfs_dir_isempty_t)(struct xfs_inode *dp); +typedef int (*xfs_dir_init_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + struct xfs_inode *pdp); +typedef int (*xfs_dir_createname_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t inum, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_lookup_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t *inum); +typedef int (*xfs_dir_removename_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t ino, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_getdents_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + struct uio *uio, + int *eofp); +typedef int (*xfs_dir_replace_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen, + xfs_ino_t inum, + xfs_fsblock_t *first, + struct xfs_bmap_free *flist, + xfs_extlen_t total); +typedef int (*xfs_dir_canenter_t)(struct xfs_trans *tp, + struct xfs_inode *dp, + char *name, + int namelen); +typedef int (*xfs_dir_shortform_validate_ondisk_t)(struct xfs_mount *mp, + struct xfs_dinode *dip); +typedef int (*xfs_dir_shortform_to_single_t)(struct xfs_da_args *args); + +typedef struct xfs_dirops { + xfs_dir_mount_t xd_mount; + xfs_dir_isempty_t xd_isempty; + xfs_dir_init_t xd_init; + xfs_dir_createname_t xd_createname; + xfs_dir_lookup_t xd_lookup; + xfs_dir_removename_t xd_removename; + xfs_dir_getdents_t xd_getdents; + xfs_dir_replace_t xd_replace; + xfs_dir_canenter_t xd_canenter; + xfs_dir_shortform_validate_ondisk_t xd_shortform_validate_ondisk; + xfs_dir_shortform_to_single_t xd_shortform_to_single; +} xfs_dirops_t; + +/* + * Overall external interface routines. + */ +void xfs_dir_startup(void); /* called exactly once */ + +#define XFS_DIR_MOUNT(mp) \ + ((mp)->m_dirops.xd_mount(mp)) +#define XFS_DIR_ISEMPTY(mp,dp) \ + ((mp)->m_dirops.xd_isempty(dp)) +#define XFS_DIR_INIT(mp,tp,dp,pdp) \ + ((mp)->m_dirops.xd_init(tp,dp,pdp)) +#define XFS_DIR_CREATENAME(mp,tp,dp,name,namelen,inum,first,flist,total) \ + ((mp)->m_dirops.xd_createname(tp,dp,name,namelen,inum,first,flist,\ + total)) +#define XFS_DIR_LOOKUP(mp,tp,dp,name,namelen,inum) \ + ((mp)->m_dirops.xd_lookup(tp,dp,name,namelen,inum)) +#define XFS_DIR_REMOVENAME(mp,tp,dp,name,namelen,ino,first,flist,total) \ + ((mp)->m_dirops.xd_removename(tp,dp,name,namelen,ino,first,flist,total)) +#define XFS_DIR_GETDENTS(mp,tp,dp,uio,eofp) \ + ((mp)->m_dirops.xd_getdents(tp,dp,uio,eofp)) +#define XFS_DIR_REPLACE(mp,tp,dp,name,namelen,inum,first,flist,total) \ + ((mp)->m_dirops.xd_replace(tp,dp,name,namelen,inum,first,flist,total)) +#define XFS_DIR_CANENTER(mp,tp,dp,name,namelen) \ + ((mp)->m_dirops.xd_canenter(tp,dp,name,namelen)) +#define XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp,dip) \ + ((mp)->m_dirops.xd_shortform_validate_ondisk(mp,dip)) +#define XFS_DIR_SHORTFORM_TO_SINGLE(mp,args) \ + ((mp)->m_dirops.xd_shortform_to_single(args)) + +#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1) +extern xfs_dirops_t xfsv1_dirops; + +#endif /* __XFS_DIR_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir_leaf.c linux.22-ac2/fs/xfs/xfs_dir_leaf.c --- linux.vanilla/fs/xfs/xfs_dir_leaf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir_leaf.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2262 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * xfs_dir_leaf.c + * + * GROT: figure out how to recover gracefully when bmap returns ENOSPC. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_error.h" + +/* + * xfs_dir_leaf.c + * + * Routines to implement leaf blocks of directories as Btrees of hashed names. + */ + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Routines used for growing the Btree. + */ +STATIC void xfs_dir_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args, + int insertion_index, + int freemap_index); +STATIC int xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer, + int musthave, int justcheck); +STATIC void xfs_dir_leaf_rebalance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2); +STATIC int xfs_dir_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *leaf_blk_1, + xfs_da_state_blk_t *leaf_blk_2, + int *number_entries_in_blk1, + int *number_namebytes_in_blk1); + +/* + * Utility routines. + */ +STATIC void xfs_dir_leaf_moveents(xfs_dir_leafblock_t *src_leaf, + int src_start, + xfs_dir_leafblock_t *dst_leaf, + int dst_start, int move_count, + xfs_mount_t *mp); + + +/*======================================================================== + * External routines when dirsize < XFS_IFORK_DSIZE(dp). + *========================================================================*/ + + +/* + * Validate a given inode number. + */ +int +xfs_dir_ino_validate(xfs_mount_t *mp, xfs_ino_t ino) +{ + xfs_agblock_t agblkno; + xfs_agino_t agino; + xfs_agnumber_t agno; + int ino_ok; + int ioff; + + agno = XFS_INO_TO_AGNO(mp, ino); + agblkno = XFS_INO_TO_AGBNO(mp, ino); + ioff = XFS_INO_TO_OFFSET(mp, ino); + agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff); + ino_ok = + agno < mp->m_sb.sb_agcount && + agblkno < mp->m_sb.sb_agblocks && + agblkno != 0 && + ioff < (1 << mp->m_sb.sb_inopblog) && + XFS_AGINO_TO_INO(mp, agno, agino) == ino; + if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE, + XFS_RANDOM_DIR_INO_VALIDATE))) { + xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx", + (unsigned long long) ino); + XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + +/* + * Create the initial contents of a shortform directory. + */ +int +xfs_dir_shortform_create(xfs_da_args_t *args, xfs_ino_t parent) +{ + xfs_dir_sf_hdr_t *hdr; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp != NULL); + ASSERT(dp->i_d.di_size == 0); + if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) { + dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */ + dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); + dp->i_df.if_flags |= XFS_IFINLINE; + } + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + ASSERT(dp->i_df.if_bytes == 0); + xfs_idata_realloc(dp, sizeof(*hdr), XFS_DATA_FORK); + hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data; + XFS_DIR_SF_PUT_DIRINO_ARCH(&parent, &hdr->parent, ARCH_CONVERT); + + INT_ZERO(hdr->count, ARCH_CONVERT); + dp->i_d.di_size = sizeof(*hdr); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + return(0); +} + +/* + * Add a name to the shortform directory structure. + * Overflow from the inode has already been checked for. + */ +int +xfs_dir_shortform_addname(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i, offset, size; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + args->name[0] == sfe->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) + return(XFS_ERROR(EEXIST)); + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + + offset = (int)((char *)sfe - (char *)sf); + size = XFS_DIR_SF_ENTSIZE_BYNAME(args->namelen); + xfs_idata_realloc(dp, size, XFS_DATA_FORK); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = (xfs_dir_sf_entry_t *)((char *)sf + offset); + + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT); + sfe->namelen = args->namelen; + memcpy(sfe->name, args->name, sfe->namelen); + INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); + + dp->i_d.di_size += size; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + + return(0); +} + +/* + * Remove a name from the shortform directory structure. + */ +int +xfs_dir_shortform_removename(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int base, size = 0, i; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + base = sizeof(xfs_dir_sf_hdr_t); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(sfe->name, args->name, args->namelen) == 0) + break; + base += size; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + if (i < 0) { + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); + } + + if ((base + size) != dp->i_d.di_size) { + memmove(&((char *)sf)[base], &((char *)sf)[base+size], + dp->i_d.di_size - (base+size)); + } + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + + xfs_idata_realloc(dp, -size, XFS_DATA_FORK); + dp->i_d.di_size -= size; + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); + + return(0); +} + +/* + * Look up a name in a shortform directory structure. + */ +int +xfs_dir_shortform_lookup(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int i; + xfs_inode_t *dp; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + XFS_DIR_SF_GET_DIRINO_ARCH(&sf->hdr.parent, &args->inumber, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + if (args->namelen == 1 && args->name[0] == '.') { + args->inumber = dp->i_ino; + return(XFS_ERROR(EEXIST)); + } + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) { + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &args->inumber, ARCH_CONVERT); + return(XFS_ERROR(EEXIST)); + } + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/* + * Convert from using the shortform to the leaf. + */ +int +xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs) +{ + xfs_inode_t *dp; + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + xfs_da_args_t args; + xfs_ino_t inumber; + char *tmpbuffer; + int retval, i, size; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + dp = iargs->dp; + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + size = dp->i_df.if_bytes; + tmpbuffer = kmem_alloc(size, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + memcpy(tmpbuffer, dp->i_df.if_u1.if_data, size); + + sf = (xfs_dir_shortform_t *)tmpbuffer; + XFS_DIR_SF_GET_DIRINO_ARCH(&sf->hdr.parent, &inumber, ARCH_CONVERT); + + xfs_idata_realloc(dp, -size, XFS_DATA_FORK); + dp->i_d.di_size = 0; + xfs_trans_log_inode(iargs->trans, dp, XFS_ILOG_CORE); + retval = xfs_da_grow_inode(iargs, &blkno); + if (retval) + goto out; + + ASSERT(blkno == 0); + retval = xfs_dir_leaf_create(iargs, blkno, &bp); + if (retval) + goto out; + xfs_da_buf_done(bp); + + args.name = "."; + args.namelen = 1; + args.hashval = xfs_dir_hash_dot; + args.inumber = dp->i_ino; + args.dp = dp; + args.firstblock = iargs->firstblock; + args.flist = iargs->flist; + args.total = iargs->total; + args.whichfork = XFS_DATA_FORK; + args.trans = iargs->trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + + args.name = ".."; + args.namelen = 2; + args.hashval = xfs_dir_hash_dotdot; + args.inumber = inumber; + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + + sfe = &sf->list[0]; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + args.name = (char *)(sfe->name); + args.namelen = sfe->namelen; + args.hashval = xfs_da_hashname((char *)(sfe->name), + sfe->namelen); + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &args.inumber, ARCH_CONVERT); + retval = xfs_dir_leaf_addname(&args); + if (retval) + goto out; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + retval = 0; + +out: + kmem_free(tmpbuffer, size); + return(retval); +} + +STATIC int +xfs_dir_shortform_compare(const void *a, const void *b) +{ + xfs_dir_sf_sort_t *sa, *sb; + + sa = (xfs_dir_sf_sort_t *)a; + sb = (xfs_dir_sf_sort_t *)b; + if (sa->hash < sb->hash) + return -1; + else if (sa->hash > sb->hash) + return 1; + else + return sa->entno - sb->entno; +} + +/* + * Copy out directory entries for getdents(), for shortform directories. + */ +/*ARGSUSED*/ +int +xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, + xfs_dirent_t *dbp, xfs_dir_put_t put) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + int retval, i, sbsize, nsbuf, lastresid=0, want_entno; + xfs_mount_t *mp; + xfs_dahash_t cookhash, hash; + xfs_dir_put_args_t p; + xfs_dir_sf_sort_t *sbuf, *sbp; + + mp = dp->i_mount; + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); + nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; + sbsize = (nsbuf + 1) * sizeof(*sbuf); + sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); + + xfs_dir_trace_g_du("sf: start", dp, uio); + + /* + * Collect all the entries into the buffer. + * Entry 0 is . + */ + sbp->entno = 0; + sbp->seqno = 0; + sbp->hash = xfs_dir_hash_dot; + sbp->ino = dp->i_ino; + sbp->name = "."; + sbp->namelen = 1; + sbp++; + + /* + * Entry 1 is .. + */ + sbp->entno = 1; + sbp->seqno = 0; + sbp->hash = xfs_dir_hash_dotdot; + sbp->ino = XFS_GET_DIR_INO_ARCH(mp, sf->hdr.parent, ARCH_CONVERT); + sbp->name = ".."; + sbp->namelen = 2; + sbp++; + + /* + * Scan the directory data for the rest of the entries. + */ + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + + if (unlikely( + ((char *)sfe < (char *)sf) || + ((char *)sfe >= ((char *)sf + dp->i_df.if_bytes)) || + (sfe->namelen >= MAXNAMELEN))) { + xfs_dir_trace_g_du("sf: corrupted", dp, uio); + XFS_CORRUPTION_ERROR("xfs_dir_shortform_getdents", + XFS_ERRLEVEL_LOW, mp, sfe); + kmem_free(sbuf, sbsize); + return XFS_ERROR(EFSCORRUPTED); + } + + sbp->entno = i + 2; + sbp->seqno = 0; + sbp->hash = xfs_da_hashname((char *)sfe->name, sfe->namelen); + sbp->ino = XFS_GET_DIR_INO_ARCH(mp, sfe->inumber, ARCH_CONVERT); + sbp->name = (char *)sfe->name; + sbp->namelen = sfe->namelen; + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + sbp++; + } + + /* + * Sort the entries on hash then entno. + */ + qsort(sbuf, nsbuf, sizeof(*sbuf), xfs_dir_shortform_compare); + /* + * Stuff in last entry. + */ + sbp->entno = nsbuf; + sbp->hash = XFS_DA_MAXHASH; + sbp->seqno = 0; + /* + * Figure out the sequence numbers in case there's a hash duplicate. + */ + for (hash = sbuf->hash, sbp = sbuf + 1; + sbp < &sbuf[nsbuf + 1]; sbp++) { + if (sbp->hash == hash) + sbp->seqno = sbp[-1].seqno + 1; + else + hash = sbp->hash; + } + + /* + * Set up put routine. + */ + p.dbp = dbp; + p.put = put; + p.uio = uio; + + /* + * Find our place. + */ + for (sbp = sbuf; sbp < &sbuf[nsbuf + 1]; sbp++) { + if (sbp->hash > cookhash || + (sbp->hash == cookhash && sbp->seqno >= want_entno)) + break; + } + + /* + * Did we fail to find anything? We stop at the last entry, + * the one we put maxhash into. + */ + if (sbp == &sbuf[nsbuf]) { + kmem_free(sbuf, sbsize); + xfs_dir_trace_g_du("sf: hash beyond end", dp, uio); + uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); + *eofp = 1; + return 0; + } + + /* + * Loop putting entries into the user buffer. + */ + while (sbp < &sbuf[nsbuf]) { + /* + * Save the first resid in a run of equal-hashval entries + * so that we can back them out if they don't all fit. + */ + if (sbp->seqno == 0 || sbp == sbuf) + lastresid = uio->uio_resid; + /* + * NOTE! Linux "filldir" semantics require that the + * offset "cookie" be for this entry, not the + * next; all the actual shuffling to make it + * "look right" to the user is done in filldir. + */ + XFS_PUT_COOKIE(p.cook, mp, 0, sbp->seqno, sbp->hash); + +#if XFS_BIG_FILESYSTEMS + p.ino = sbp->ino + mp->m_inoadd; +#else + p.ino = sbp->ino; +#endif + p.name = sbp->name; + p.namelen = sbp->namelen; + + retval = p.put(&p); + + if (!p.done) { + uio->uio_offset = + XFS_DA_MAKE_COOKIE(mp, 0, 0, sbp->hash); + kmem_free(sbuf, sbsize); + uio->uio_resid = lastresid; + xfs_dir_trace_g_du("sf: E-O-B", dp, uio); + return retval; + } + + sbp++; + } + + kmem_free(sbuf, sbsize); + + XFS_PUT_COOKIE(p.cook, mp, 0, 0, XFS_DA_MAXHASH); + + uio->uio_offset = p.cook.o; + + *eofp = 1; + + xfs_dir_trace_g_du("sf: E-O-F", dp, uio); + + return 0; +} + +/* + * Look up a name in a shortform directory structure, replace the inode number. + */ +int +xfs_dir_shortform_replace(xfs_da_args_t *args) +{ + xfs_dir_shortform_t *sf; + xfs_dir_sf_entry_t *sfe; + xfs_inode_t *dp; + int i; + + dp = args->dp; + ASSERT(dp->i_df.if_flags & XFS_IFINLINE); + /* + * Catch the case where the conversion from shortform to leaf + * failed part way through. + */ + if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) { + ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); + return XFS_ERROR(EIO); + } + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); + ASSERT(dp->i_df.if_u1.if_data != NULL); + sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; + if (args->namelen == 2 && + args->name[0] == '.' && args->name[1] == '.') { + /* XXX - replace assert? */ + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sf->hdr.parent, ARCH_CONVERT); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return(0); + } + ASSERT(args->namelen != 1 || args->name[0] != '.'); + sfe = &sf->list[0]; + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + if (sfe->namelen == args->namelen && + sfe->name[0] == args->name[0] && + memcmp(args->name, sfe->name, args->namelen) == 0) { + ASSERT(memcmp((char *)&args->inumber, + (char *)&sfe->inumber, sizeof(xfs_ino_t))); + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &sfe->inumber, ARCH_CONVERT); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); + return(0); + } + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/* + * Convert a leaf directory to shortform structure + */ +int +xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_da_args_t args; + xfs_inode_t *dp; + xfs_ino_t parent; + char *tmpbuffer; + int retval, i; + xfs_dabuf_t *bp; + + dp = iargs->dp; + tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP); + ASSERT(tmpbuffer != NULL); + + retval = xfs_da_read_buf(iargs->trans, iargs->dp, 0, -1, &bp, + XFS_DATA_FORK); + if (retval) + goto out; + ASSERT(bp != NULL); + memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); + leaf = (xfs_dir_leafblock_t *)tmpbuffer; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); + + /* + * Find and special case the parent inode number + */ + hdr = &leaf->hdr; + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) { + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + if ((entry->namelen == 2) && + (namest->name[0] == '.') && + (namest->name[1] == '.')) { + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &parent, ARCH_CONVERT); + INT_ZERO(entry->nameidx, ARCH_CONVERT); + } else if ((entry->namelen == 1) && (namest->name[0] == '.')) { + INT_ZERO(entry->nameidx, ARCH_CONVERT); + } + } + retval = xfs_da_shrink_inode(iargs, 0, bp); + if (retval) + goto out; + retval = xfs_dir_shortform_create(iargs, parent); + if (retval) + goto out; + + /* + * Copy the rest of the filenames + */ + entry = &leaf->entries[0]; + args.dp = dp; + args.firstblock = iargs->firstblock; + args.flist = iargs->flist; + args.total = iargs->total; + args.whichfork = XFS_DATA_FORK; + args.trans = iargs->trans; + args.justcheck = 0; + args.addname = args.oknoent = 1; + for (i = 0; i < INT_GET(hdr->count, ARCH_CONVERT); entry++, i++) { + if (INT_ISZERO(entry->nameidx, ARCH_CONVERT)) + continue; + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + args.name = (char *)(namest->name); + args.namelen = entry->namelen; + args.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &args.inumber, ARCH_CONVERT); + xfs_dir_shortform_addname(&args); + } + +out: + kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); + return(retval); +} + +/* + * Convert from using a single leaf to a root node and a leaf. + */ +int +xfs_dir_leaf_to_node(xfs_da_args_t *args) +{ + xfs_dir_leafblock_t *leaf; + xfs_da_intnode_t *node; + xfs_inode_t *dp; + xfs_dabuf_t *bp1, *bp2; + xfs_dablk_t blkno; + int retval; + + dp = args->dp; + retval = xfs_da_grow_inode(args, &blkno); + ASSERT(blkno == 1); + if (retval) + return(retval); + retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, + XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp1 != NULL); + retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2, + XFS_DATA_FORK); + if (retval) { + xfs_da_buf_done(bp1); + return(retval); + } + ASSERT(bp2 != NULL); + memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); + xfs_da_buf_done(bp1); + xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); + + /* + * Set up the new root node. + */ + retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK); + if (retval) { + xfs_da_buf_done(bp2); + return(retval); + } + node = bp1->data; + leaf = bp2->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + xfs_da_buf_done(bp2); + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); + xfs_da_log_buf(args->trans, bp1, + XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); + xfs_da_buf_done(bp1); + + return(retval); +} + + +/*======================================================================== + * Routines used for growing the Btree. + *========================================================================*/ + +/* + * Create the initial contents of a leaf directory + * or a leaf in a node directory. + */ +int +xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_inode_t *dp; + xfs_dabuf_t *bp; + int retval; + + dp = args->dp; + ASSERT(dp != NULL); + retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK); + if (retval) + return(retval); + ASSERT(bp != NULL); + leaf = bp->data; + memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); + hdr = &leaf->hdr; + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); + INT_SET(hdr->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t)); + INT_SET(hdr->freemap[0].size, ARCH_CONVERT, INT_GET(hdr->firstused, ARCH_CONVERT) - INT_GET(hdr->freemap[0].base, ARCH_CONVERT)); + + xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); + + *bpp = bp; + return(0); +} + +/* + * Split the leaf node, rebalance, then add the new entry. + */ +int +xfs_dir_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, + xfs_da_state_blk_t *newblk) +{ + xfs_dablk_t blkno; + xfs_da_args_t *args; + int error; + + /* + * Allocate space for a new leaf node. + */ + args = state->args; + ASSERT(args != NULL); + ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC); + error = xfs_da_grow_inode(args, &blkno); + if (error) + return(error); + error = xfs_dir_leaf_create(args, blkno, &newblk->bp); + if (error) + return(error); + newblk->blkno = blkno; + newblk->magic = XFS_DIR_LEAF_MAGIC; + + /* + * Rebalance the entries across the two leaves. + */ + xfs_dir_leaf_rebalance(state, oldblk, newblk); + error = xfs_da_blk_link(state, oldblk, newblk); + if (error) + return(error); + + /* + * Insert the new entry in the correct block. + */ + if (state->inleaf) { + error = xfs_dir_leaf_add(oldblk->bp, args, oldblk->index); + } else { + error = xfs_dir_leaf_add(newblk->bp, args, newblk->index); + } + + /* + * Update last hashval in each block since we added the name. + */ + oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL); + newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL); + return(error); +} + +/* + * Add a name to the leaf directory structure. + * + * Must take into account fragmented leaves and leaves where spacemap has + * lost some freespace information (ie: holes). + */ +int +xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_map_t *map; + int tablesize, entsize, sum, i, tmp, error; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + hdr = &leaf->hdr; + entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); + + /* + * Search through freemap for first-fit on new name length. + * (may need to figure in size of entry struct too) + */ + tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[XFS_DIR_LEAF_MAPSIZE-1]; + for (sum = 0, i = XFS_DIR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { + if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { + sum += INT_GET(map->size, ARCH_CONVERT); + continue; + } + if (INT_ISZERO(map->size, ARCH_CONVERT)) + continue; /* no space in this map */ + tmp = entsize; + if (INT_GET(map->base, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT)) + tmp += (uint)sizeof(xfs_dir_leaf_entry_t); + if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + if (!args->justcheck) + xfs_dir_leaf_add_work(bp, args, index, i); + return(0); + } + sum += INT_GET(map->size, ARCH_CONVERT); + } + + /* + * If there are no holes in the address space of the block, + * and we don't have enough freespace, then compaction will do us + * no good and we should just give up. + */ + if (!hdr->holes && (sum < entsize)) + return(XFS_ERROR(ENOSPC)); + + /* + * Compact the entries to coalesce free space. + * Pass the justcheck flag so the checking pass can return + * an error, without changing anything, if it won't fit. + */ + error = xfs_dir_leaf_compact(args->trans, bp, + args->total == 0 ? + entsize + + (uint)sizeof(xfs_dir_leaf_entry_t) : 0, + args->justcheck); + if (error) + return(error); + /* + * After compaction, the block is guaranteed to have only one + * free region, in freemap[0]. If it is not big enough, give up. + */ + if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) < + (entsize + (uint)sizeof(xfs_dir_leaf_entry_t))) + return(XFS_ERROR(ENOSPC)); + + if (!args->justcheck) + xfs_dir_leaf_add_work(bp, args, index, 0); + return(0); +} + +/* + * Add a name to a leaf directory structure. + */ +STATIC void +xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index, + int mapindex) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + xfs_dir_leaf_map_t *map; + /* REFERENCED */ + xfs_mount_t *mp; + int tmp, i; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr = &leaf->hdr; + ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); + ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); + + /* + * Force open some space in the entry array and fill it in. + */ + entry = &leaf->entries[index]; + if (index < INT_GET(hdr->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr->count, ARCH_CONVERT) - index; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + memmove(entry + 1, entry, tmp); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry))); + } + INT_MOD(hdr->count, ARCH_CONVERT, +1); + + /* + * Allocate space for the new string (at the end of the run). + */ + map = &hdr->freemap[mapindex]; + mp = args->trans->t_mountp; + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) >= XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + INT_MOD(map->size, ARCH_CONVERT, -(XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen))); + INT_SET(entry->nameidx, ARCH_CONVERT, INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)); + INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->namelen = args->namelen; + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); + + /* + * Copy the string and inode number into the new space. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + XFS_DIR_SF_PUT_DIRINO_ARCH(&args->inumber, &namest->inumber, ARCH_CONVERT); + memcpy(namest->name, args->name, args->namelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, namest, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry))); + + /* + * Update the control info for this leaf node + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT)) + INT_COPY(hdr->firstused, entry->nameidx, ARCH_CONVERT); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr))); + tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[0]; + for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) { + if (INT_GET(map->base, ARCH_CONVERT) == tmp) { + INT_MOD(map->base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t))); + } + } + INT_MOD(hdr->namebytes, ARCH_CONVERT, args->namelen); + xfs_da_log_buf(args->trans, bp, + XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); +} + +/* + * Garbage collect a leaf directory block by copying it to a new buffer. + */ +STATIC int +xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave, + int justcheck) +{ + xfs_dir_leafblock_t *leaf_s, *leaf_d; + xfs_dir_leaf_hdr_t *hdr_s, *hdr_d; + xfs_mount_t *mp; + char *tmpbuffer; + char *tmpbuffer2=NULL; + int rval; + int lbsize; + + mp = trans->t_mountp; + lbsize = XFS_LBSIZE(mp); + tmpbuffer = kmem_alloc(lbsize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memcpy(tmpbuffer, bp->data, lbsize); + + /* + * Make a second copy in case xfs_dir_leaf_moveents() + * below destroys the original. + */ + if (musthave || justcheck) { + tmpbuffer2 = kmem_alloc(lbsize, KM_SLEEP); + memcpy(tmpbuffer2, bp->data, lbsize); + } + memset(bp->data, 0, lbsize); + + /* + * Copy basic information + */ + leaf_s = (xfs_dir_leafblock_t *)tmpbuffer; + leaf_d = bp->data; + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + hdr_d->info = hdr_s->info; /* struct copy */ + INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize); + if (INT_ISZERO(hdr_d->firstused, ARCH_CONVERT)) + INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize - 1); + INT_ZERO(hdr_d->namebytes, ARCH_CONVERT); + INT_ZERO(hdr_d->count, ARCH_CONVERT); + hdr_d->holes = 0; + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + + /* + * Copy all entry's in the same (sorted) order, + * but allocate filenames packed and in sequence. + * This changes the source (leaf_s) as well. + */ + xfs_dir_leaf_moveents(leaf_s, 0, leaf_d, 0, (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); + + if (musthave && INT_GET(hdr_d->freemap[0].size, ARCH_CONVERT) < musthave) + rval = XFS_ERROR(ENOSPC); + else + rval = 0; + + if (justcheck || rval == ENOSPC) { + ASSERT(tmpbuffer2); + memcpy(bp->data, tmpbuffer2, lbsize); + } else { + xfs_da_log_buf(trans, bp, 0, lbsize - 1); + } + + kmem_free(tmpbuffer, lbsize); + if (musthave || justcheck) + kmem_free(tmpbuffer2, lbsize); + return(rval); +} + +/* + * Redistribute the directory entries between two leaf nodes, + * taking into account the size of the new entry. + * + * NOTE: if new block is empty, then it will get the upper half of old block. + */ +STATIC void +xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2) +{ + xfs_da_state_blk_t *tmp_blk; + xfs_dir_leafblock_t *leaf1, *leaf2; + xfs_dir_leaf_hdr_t *hdr1, *hdr2; + int count, totallen, max, space, swap; + + /* + * Set up environment. + */ + ASSERT(blk1->magic == XFS_DIR_LEAF_MAGIC); + ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + + /* + * Check ordering of blocks, reverse if it makes things simpler. + */ + swap = 0; + if (xfs_dir_leaf_order(blk1->bp, blk2->bp)) { + tmp_blk = blk1; + blk1 = blk2; + blk2 = tmp_blk; + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + swap = 1; + } + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. Then get + * the direction to copy and the number of elements to move. + */ + state->inleaf = xfs_dir_leaf_figure_balance(state, blk1, blk2, + &count, &totallen); + if (swap) + state->inleaf = !state->inleaf; + + /* + * Move any entries required from leaf to leaf: + */ + if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + count = INT_GET(hdr1->count, ARCH_CONVERT) - count; /* number entries being moved */ + space = INT_GET(hdr1->namebytes, ARCH_CONVERT) - totallen; + space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1); + space += count * (uint)sizeof(xfs_dir_leaf_entry_t); + + /* + * leaf2 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr2->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t); + max -= INT_GET(hdr2->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + if (space > max) { + xfs_dir_leaf_compact(state->args->trans, blk2->bp, + 0, 0); + } + + /* + * Move high entries from leaf1 to low end of leaf2. + */ + xfs_dir_leaf_moveents(leaf1, INT_GET(hdr1->count, ARCH_CONVERT) - count, + leaf2, 0, count, state->mp); + + xfs_da_log_buf(state->args->trans, blk1->bp, 0, + state->blocksize-1); + xfs_da_log_buf(state->args->trans, blk2->bp, 0, + state->blocksize-1); + + } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + /* + * Figure the total bytes to be added to the destination leaf. + */ + count -= INT_GET(hdr1->count, ARCH_CONVERT); /* number entries being moved */ + space = totallen - INT_GET(hdr1->namebytes, ARCH_CONVERT); + space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1); + space += count * (uint)sizeof(xfs_dir_leaf_entry_t); + + /* + * leaf1 is the destination, compact it if it looks tight. + */ + max = INT_GET(hdr1->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t); + max -= INT_GET(hdr1->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + if (space > max) { + xfs_dir_leaf_compact(state->args->trans, blk1->bp, + 0, 0); + } + + /* + * Move low entries from leaf2 to high end of leaf1. + */ + xfs_dir_leaf_moveents(leaf2, 0, leaf1, (int)INT_GET(hdr1->count, ARCH_CONVERT), + count, state->mp); + + xfs_da_log_buf(state->args->trans, blk1->bp, 0, + state->blocksize-1); + xfs_da_log_buf(state->args->trans, blk2->bp, 0, + state->blocksize-1); + } + + /* + * Copy out last hashval in each block for B-tree code. + */ + blk1->hashval = INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk2->hashval = INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + + /* + * Adjust the expected index for insertion. + * GROT: this doesn't work unless blk2 was originally empty. + */ + if (!state->inleaf) { + blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + } +} + +/* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + * GROT: Is this really necessary? With other than a 512 byte blocksize, + * GROT: there will always be enough room in either block for a new entry. + * GROT: Do a double-split for this case? + */ +STATIC int +xfs_dir_leaf_figure_balance(xfs_da_state_t *state, + xfs_da_state_blk_t *blk1, + xfs_da_state_blk_t *blk2, + int *countarg, int *namebytesarg) +{ + xfs_dir_leafblock_t *leaf1, *leaf2; + xfs_dir_leaf_hdr_t *hdr1, *hdr2; + xfs_dir_leaf_entry_t *entry; + int count, max, totallen, half; + int lastdelta, foundit, tmp; + + /* + * Set up environment. + */ + leaf1 = blk1->bp->data; + leaf2 = blk2->bp->data; + hdr1 = &leaf1->hdr; + hdr2 = &leaf2->hdr; + foundit = 0; + totallen = 0; + + /* + * Examine entries until we reduce the absolute difference in + * byte usage between the two blocks to a minimum. + */ + max = INT_GET(hdr1->count, ARCH_CONVERT) + INT_GET(hdr2->count, ARCH_CONVERT); + half = (max+1) * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1); + half += INT_GET(hdr1->namebytes, ARCH_CONVERT) + INT_GET(hdr2->namebytes, ARCH_CONVERT) + state->args->namelen; + half /= 2; + lastdelta = state->blocksize; + entry = &leaf1->entries[0]; + for (count = 0; count < max; entry++, count++) { + +#define XFS_DIR_ABS(A) (((A) < 0) ? -(A) : (A)) + /* + * The new entry is in the first block, account for it. + */ + if (count == blk1->index) { + tmp = totallen + (uint)sizeof(*entry) + + XFS_DIR_LEAF_ENTSIZE_BYNAME(state->args->namelen); + if (XFS_DIR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_DIR_ABS(half - tmp); + totallen = tmp; + foundit = 1; + } + + /* + * Wrap around into the second block if necessary. + */ + if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + leaf1 = leaf2; + entry = &leaf1->entries[0]; + } + + /* + * Figure out if next leaf entry would be too much. + */ + tmp = totallen + (uint)sizeof(*entry) + + XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry); + if (XFS_DIR_ABS(half - tmp) > lastdelta) + break; + lastdelta = XFS_DIR_ABS(half - tmp); + totallen = tmp; +#undef XFS_DIR_ABS + } + + /* + * Calculate the number of namebytes that will end up in lower block. + * If new entry not in lower block, fix up the count. + */ + totallen -= + count * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1); + if (foundit) { + totallen -= (sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1) + + state->args->namelen; + } + + *countarg = count; + *namebytesarg = totallen; + return(foundit); +} + +/*======================================================================== + * Routines used for shrinking the Btree. + *========================================================================*/ + +/* + * Check a leaf block and its neighbors to see if the block should be + * collapsed into one or the other neighbor. Always keep the block + * with the smaller block number. + * If the current block is over 50% full, don't try to join it, return 0. + * If the block is empty, fill in the state structure and return 2. + * If it can be collapsed, fill in the state structure and return 1. + * If nothing can be done, return 0. + */ +int +xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) +{ + xfs_dir_leafblock_t *leaf; + xfs_da_state_blk_t *blk; + xfs_da_blkinfo_t *info; + int count, bytes, forward, error, retval, i; + xfs_dablk_t blkno; + xfs_dabuf_t *bp; + + /* + * Check for the degenerate case of the block being over 50% full. + * If so, it's not worth even looking to see if we might be able + * to coalesce with a sibling. + */ + blk = &state->path.blk[ state->path.active-1 ]; + info = blk->bp->data; + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + leaf = (xfs_dir_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + + count * (uint)sizeof(xfs_dir_leaf_entry_t) + + count * ((uint)sizeof(xfs_dir_leaf_name_t)-1) + + INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + if (bytes > (state->blocksize >> 1)) { + *action = 0; /* blk over 50%, don't try to join */ + return(0); + } + + /* + * Check for the degenerate case of the block being empty. + * If the block is empty, we'll simply delete it, no need to + * coalesce it with a sibling block. We choose (aribtrarily) + * to merge with the forward block unless it is NULL. + */ + if (count == 0) { + /* + * Make altpath point to the block we want to keep and + * path point to the block we want to drop (this one). + */ + forward = !INT_ISZERO(info->forw, ARCH_CONVERT); + memcpy(&state->altpath, &state->path, sizeof(state->path)); + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 2; + } + return(0); + } + + /* + * Examine each sibling block to see if we can coalesce with + * at least 25% free space to spare. We need to figure out + * whether to merge with the forward or the backward block. + * We prefer coalescing with the lower numbered sibling so as + * to shrink a directory over time. + */ + forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ + for (i = 0; i < 2; forward = !forward, i++) { + if (forward) + blkno = INT_GET(info->forw, ARCH_CONVERT); + else + blkno = INT_GET(info->back, ARCH_CONVERT); + if (blkno == 0) + continue; + error = xfs_da_read_buf(state->args->trans, state->args->dp, + blkno, -1, &bp, + XFS_DATA_FORK); + if (error) + return(error); + ASSERT(bp != NULL); + + leaf = (xfs_dir_leafblock_t *)info; + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes = state->blocksize - (state->blocksize>>2); + bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); + bytes -= count * (uint)sizeof(xfs_dir_leaf_entry_t); + bytes -= (uint)sizeof(xfs_dir_leaf_hdr_t); + if (bytes >= 0) + break; /* fits with at least 25% to spare */ + + xfs_da_brelse(state->args->trans, bp); + } + if (i >= 2) { + *action = 0; + return(0); + } + xfs_da_buf_done(bp); + + /* + * Make altpath point to the block we want to keep (the lower + * numbered block) and path point to the block we want to drop. + */ + memcpy(&state->altpath, &state->path, sizeof(state->path)); + if (blkno < blk->blkno) { + error = xfs_da_path_shift(state, &state->altpath, forward, + 0, &retval); + } else { + error = xfs_da_path_shift(state, &state->path, forward, + 0, &retval); + } + if (error) + return(error); + if (retval) { + *action = 0; + } else { + *action = 1; + } + return(0); +} + +/* + * Remove a name from the leaf directory structure. + * + * Return 1 if leaf is less than 37% full, 0 if >= 37% full. + * If two leaves are 37% full, when combined they will leave 25% free. + */ +int +xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_hdr_t *hdr; + xfs_dir_leaf_map_t *map; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int before, after, smallest, entsize; + int tablesize, tmp, i; + xfs_mount_t *mp; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr = &leaf->hdr; + mp = trans->t_mountp; + ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((index >= 0) && (index < INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr))); + entry = &leaf->entries[index]; + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + + /* + * Scan through free region table: + * check for adjacency of free'd entry with an existing one, + * find smallest free region in case we need to replace it, + * adjust any map that borders the entry table, + */ + tablesize = INT_GET(hdr->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + map = &hdr->freemap[0]; + tmp = INT_GET(map->size, ARCH_CONVERT); + before = after = -1; + smallest = XFS_DIR_LEAF_MAPSIZE - 1; + entsize = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry); + for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) { + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { + INT_MOD(map->base, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t))); + INT_MOD(map->size, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t)); + } + + if ((INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)) == INT_GET(entry->nameidx, ARCH_CONVERT)) { + before = i; + } else if (INT_GET(map->base, ARCH_CONVERT) == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + after = i; + } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { + tmp = INT_GET(map->size, ARCH_CONVERT); + smallest = i; + } + } + + /* + * Coalesce adjacent freemap regions, + * or replace the smallest region. + */ + if ((before >= 0) || (after >= 0)) { + if ((before >= 0) && (after >= 0)) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + INT_MOD(map->size, ARCH_CONVERT, INT_GET(hdr->freemap[after].size, ARCH_CONVERT)); + INT_ZERO(hdr->freemap[after].base, ARCH_CONVERT); + INT_ZERO(hdr->freemap[after].size, ARCH_CONVERT); + } else if (before >= 0) { + map = &hdr->freemap[before]; + INT_MOD(map->size, ARCH_CONVERT, entsize); + } else { + map = &hdr->freemap[after]; + INT_COPY(map->base, entry->nameidx, ARCH_CONVERT); + INT_MOD(map->size, ARCH_CONVERT, entsize); + } + } else { + /* + * Replace smallest region (if it is smaller than free'd entry) + */ + map = &hdr->freemap[smallest]; + if (INT_GET(map->size, ARCH_CONVERT) < entsize) { + INT_COPY(map->base, entry->nameidx, ARCH_CONVERT); + INT_SET(map->size, ARCH_CONVERT, entsize); + } + } + + /* + * Did we remove the first entry? + */ + if (INT_GET(entry->nameidx, ARCH_CONVERT) == INT_GET(hdr->firstused, ARCH_CONVERT)) + smallest = 1; + else + smallest = 0; + + /* + * Compress the remaining entries and zero out the removed stuff. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + memset((char *)namest, 0, entsize); + xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, namest, entsize)); + + INT_MOD(hdr->namebytes, ARCH_CONVERT, -(entry->namelen)); + tmp = (INT_GET(hdr->count, ARCH_CONVERT) - index) * (uint)sizeof(xfs_dir_leaf_entry_t); + memmove(entry, entry + 1, tmp); + INT_MOD(hdr->count, ARCH_CONVERT, -1); + xfs_da_log_buf(trans, bp, + XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry))); + entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + memset((char *)entry, 0, sizeof(xfs_dir_leaf_entry_t)); + + /* + * If we removed the first entry, re-find the first used byte + * in the name area. Note that if the entry was the "firstused", + * then we don't have a "hole" in our block resulting from + * removing the name. + */ + if (smallest) { + tmp = XFS_LBSIZE(mp); + entry = &leaf->entries[0]; + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) { + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) + tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + } + INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + if (INT_ISZERO(hdr->firstused, ARCH_CONVERT)) + INT_SET(hdr->firstused, ARCH_CONVERT, tmp - 1); + } else { + hdr->holes = 1; /* mark as needing compaction */ + } + + xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); + + /* + * Check if leaf is less than 50% full, caller may want to + * "join" the leaf with a sibling if so. + */ + tmp = (uint)sizeof(xfs_dir_leaf_hdr_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); + tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); + if (tmp < mp->m_dir_magicpct) + return(1); /* leaf is < 37% full */ + return(0); +} + +/* + * Move all the directory entries from drop_leaf into save_leaf. + */ +void +xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, + xfs_da_state_blk_t *save_blk) +{ + xfs_dir_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf; + xfs_dir_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr; + xfs_mount_t *mp; + char *tmpbuffer; + + /* + * Set up environment. + */ + mp = state->mp; + ASSERT(drop_blk->magic == XFS_DIR_LEAF_MAGIC); + ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); + drop_leaf = drop_blk->bp->data; + save_leaf = save_blk->bp->data; + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + drop_hdr = &drop_leaf->hdr; + save_hdr = &save_leaf->hdr; + + /* + * Save last hashval from dying block for later Btree fixup. + */ + drop_blk->hashval = INT_GET(drop_leaf->entries[ drop_leaf->hdr.count-1 ].hashval, ARCH_CONVERT); + + /* + * Check if we need a temp buffer, or can we do it in place. + * Note that we don't check "leaf" for holes because we will + * always be dropping it, toosmall() decided that for us already. + */ + if (save_hdr->holes == 0) { + /* + * dest leaf has no holes, so we add there. May need + * to make some room in the entry array. + */ + if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_dir_leaf_moveents(drop_leaf, 0, save_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_dir_leaf_moveents(drop_leaf, 0, + save_leaf, INT_GET(save_hdr->count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } + } else { + /* + * Destination has holes, so we make a temporary copy + * of the leaf and add them both to that. + */ + tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP); + ASSERT(tmpbuffer != NULL); + memset(tmpbuffer, 0, state->blocksize); + tmp_leaf = (xfs_dir_leafblock_t *)tmpbuffer; + tmp_hdr = &tmp_leaf->hdr; + tmp_hdr->info = save_hdr->info; /* struct copy */ + INT_ZERO(tmp_hdr->count, ARCH_CONVERT); + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + if (INT_ISZERO(tmp_hdr->firstused, ARCH_CONVERT)) + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize - 1); + INT_ZERO(tmp_hdr->namebytes, ARCH_CONVERT); + if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) { + xfs_dir_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + xfs_dir_leaf_moveents(save_leaf, 0, + tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp); + } else { + xfs_dir_leaf_moveents(save_leaf, 0, tmp_leaf, 0, + (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp); + xfs_dir_leaf_moveents(drop_leaf, 0, + tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + } + memcpy(save_leaf, tmp_leaf, state->blocksize); + kmem_free(tmpbuffer, state->blocksize); + } + + xfs_da_log_buf(state->args->trans, save_blk->bp, 0, + state->blocksize - 1); + + /* + * Copy out last hashval in each block for B-tree code. + */ + save_blk->hashval = INT_GET(save_leaf->entries[ INT_GET(save_leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); +} + +/*======================================================================== + * Routines used for finding things in the Btree. + *========================================================================*/ + +/* + * Look up a name in a leaf directory structure. + * This is the internal routine, it uses the caller's buffer. + * + * Note that duplicate keys are allowed, but only check within the + * current leaf node. The Btree code must check in adjacent leaf nodes. + * + * Return in *index the index into the entry[] array of either the found + * entry, or where the entry should have been (insert before that entry). + * + * Don't change the args->inumber unless we find the filename. + */ +int +xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int probe, span; + xfs_dahash_t hashval; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); + + /* + * Binary search. (note: small blocks will skip this loop) + */ + hashval = args->hashval; + probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + for (entry = &leaf->entries[probe]; span > 4; + entry = &leaf->entries[probe]) { + span /= 2; + if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + probe += span; + else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + probe -= span; + else + break; + } + ASSERT((probe >= 0) && \ + ((INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); + ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)); + + /* + * Since we may have duplicate hashval's, find the first matching + * hashval in the leaf. + */ + while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) >= hashval)) { + entry--; + probe--; + } + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + entry++; + probe++; + } + if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + *index = probe; + ASSERT(args->oknoent); + return(XFS_ERROR(ENOENT)); + } + + /* + * Duplicate keys may be present, so search all of them for a match. + */ + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)) { + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); + if (entry->namelen == args->namelen && + namest->name[0] == args->name[0] && + memcmp(args->name, namest->name, args->namelen) == 0) { + XFS_DIR_SF_GET_DIRINO_ARCH(&namest->inumber, &args->inumber, ARCH_CONVERT); + *index = probe; + return(XFS_ERROR(EEXIST)); + } + entry++; + probe++; + } + *index = probe; + ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + return(XFS_ERROR(ENOENT)); +} + +/*======================================================================== + * Utility routines. + *========================================================================*/ + +/* + * Move the indicated entries from one leaf to another. + * NOTE: this routine modifies both source and destination leaves. + */ +/* ARGSUSED */ +STATIC void +xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s, + xfs_dir_leafblock_t *leaf_d, int start_d, + int count, xfs_mount_t *mp) +{ + xfs_dir_leaf_hdr_t *hdr_s, *hdr_d; + xfs_dir_leaf_entry_t *entry_s, *entry_d; + int tmp, i; + + /* + * Check for nothing to do. + */ + if (count == 0) + return; + + /* + * Set up environment. + */ + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + hdr_s = &leaf_s->hdr; + hdr_d = &leaf_d->hdr; + ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_s->count, ARCH_CONVERT)*sizeof(*entry_s))+sizeof(*hdr_s))); + ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_d->count, ARCH_CONVERT)*sizeof(*entry_d))+sizeof(*hdr_d))); + + ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); + ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + + /* + * Move the entries in the destination leaf up to make a hole? + */ + if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_d->entries[start_d]; + entry_d = &leaf_d->entries[start_d + count]; + memcpy(entry_d, entry_s, tmp); + } + + /* + * Copy all entry's in the same (sorted) order, + * but allocate filenames packed and in sequence. + */ + entry_s = &leaf_s->entries[start_s]; + entry_d = &leaf_d->entries[start_d]; + for (i = 0; i < count; entry_s++, entry_d++, i++) { + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + ASSERT(entry_s->namelen < MAXNAMELEN); + tmp = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry_s); + INT_MOD(hdr_d->firstused, ARCH_CONVERT, -(tmp)); + entry_d->hashval = entry_s->hashval; /* INT_: direct copy */ + INT_COPY(entry_d->nameidx, hdr_d->firstused, ARCH_CONVERT); + entry_d->namelen = entry_s->namelen; + ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); + memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)), + XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), tmp); + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); + memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), + 0, tmp); + INT_MOD(hdr_s->namebytes, ARCH_CONVERT, -(entry_d->namelen)); + INT_MOD(hdr_d->namebytes, ARCH_CONVERT, entry_d->namelen); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + INT_MOD(hdr_d->count, ARCH_CONVERT, +1); + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t) + + (uint)sizeof(xfs_dir_leaf_hdr_t); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); + + } + + /* + * Zero out the entries we just copied. + */ + if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[start_s]; + ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp)); + memset((char *)entry_s, 0, tmp); + } else { + /* + * Move the remaining entries down to fill the hole, + * then zero the entries at the top. + */ + tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp *= (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[start_s + count]; + entry_d = &leaf_s->entries[start_s]; + memcpy(entry_d, entry_s, tmp); + + tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t); + entry_s = &leaf_s->entries[INT_GET(hdr_s->count, ARCH_CONVERT)]; + ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp)); + memset((char *)entry_s, 0, tmp); + } + + /* + * Fill in the freemap information + */ + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_hdr_t)); + INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + INT_SET(hdr_d->freemap[1].base, ARCH_CONVERT, INT_ZERO(hdr_d->freemap[2].base, ARCH_CONVERT)); + INT_SET(hdr_d->freemap[1].size, ARCH_CONVERT, INT_ZERO(hdr_d->freemap[2].size, ARCH_CONVERT)); + hdr_s->holes = 1; /* leaf may not be compact */ +} + +/* + * Compare two leaf blocks "order". + */ +int +xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) +{ + xfs_dir_leafblock_t *leaf1, *leaf2; + + leaf1 = leaf1_bp->data; + leaf2 = leaf2_bp->data; + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); + if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + return(1); + } + return(0); +} + +/* + * Pick up the last hashvalue from a leaf block. + */ +xfs_dahash_t +xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count) +{ + xfs_dir_leafblock_t *leaf; + + leaf = bp->data; + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + if (count) + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + if (INT_ISZERO(leaf->hdr.count, ARCH_CONVERT)) + return(0); + return(INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); +} + +/* + * Copy out directory entries for getdents(), for leaf directories. + */ +int +xfs_dir_leaf_getdents_int( + xfs_dabuf_t *bp, + xfs_inode_t *dp, + xfs_dablk_t bno, + uio_t *uio, + int *eobp, + xfs_dirent_t *dbp, + xfs_dir_put_t put, + xfs_daddr_t nextda) +{ + xfs_dir_leafblock_t *leaf; + xfs_dir_leaf_entry_t *entry; + xfs_dir_leaf_name_t *namest; + int entno, want_entno, i, nextentno; + xfs_mount_t *mp; + xfs_dahash_t cookhash; + xfs_dahash_t nexthash = 0; +#if (BITS_PER_LONG == 32) + xfs_dahash_t lasthash = XFS_DA_MAXHASH; +#endif + xfs_dir_put_args_t p; + + mp = dp->i_mount; + leaf = bp->data; + if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + *eobp = 1; + return(XFS_ERROR(ENOENT)); /* XXX wrong code */ + } + + want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); + + cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); + + xfs_dir_trace_g_dul("leaf: start", dp, uio, leaf); + + /* + * Re-find our place. + */ + for (i = entno = 0, entry = &leaf->entries[0]; + i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++) { + + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, + INT_GET(entry->nameidx, ARCH_CONVERT)); + + if (unlikely( + ((char *)namest < (char *)leaf) || + ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)) || + (entry->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(1)", + XFS_ERRLEVEL_LOW, mp, leaf); + xfs_dir_trace_g_du("leaf: corrupted", dp, uio); + return XFS_ERROR(EFSCORRUPTED); + } + if (INT_GET(entry->hashval, ARCH_CONVERT) >= cookhash) { + if ( entno < want_entno + && INT_GET(entry->hashval, ARCH_CONVERT) + == cookhash) { + /* + * Trying to get to a particular offset in a + * run of equal-hashval entries. + */ + entno++; + } else if ( want_entno > 0 + && entno == want_entno + && INT_GET(entry->hashval, ARCH_CONVERT) + == cookhash) { + break; + } else { + entno = 0; + break; + } + } + } + + if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + xfs_dir_trace_g_du("leaf: hash not found", dp, uio); + if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) + uio->uio_offset = + XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); + /* + * Don't set uio_offset if there's another block: + * the node code will be setting uio_offset anyway. + */ + *eobp = 0; + return(0); + } + xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry); + + p.dbp = dbp; + p.put = put; + p.uio = uio; + + /* + * We're synchronized, start copying entries out to the user. + */ + for (; entno >= 0 && i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++, (entno = nextentno)) { + int lastresid=0, retval; + xfs_dircook_t lastoffset; + xfs_dahash_t thishash; + + /* + * Check for a damaged directory leaf block and pick up + * the inode number from this entry. + */ + namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, + INT_GET(entry->nameidx, ARCH_CONVERT)); + + if (unlikely( + ((char *)namest < (char *)leaf) || + ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)) || + (entry->namelen >= MAXNAMELEN))) { + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(2)", + XFS_ERRLEVEL_LOW, mp, leaf); + xfs_dir_trace_g_du("leaf: corrupted", dp, uio); + return XFS_ERROR(EFSCORRUPTED); + } + + thishash = INT_GET(entry->hashval, ARCH_CONVERT); + + /* + * NOTE! Linux "filldir" semantics require that the + * offset "cookie" be for this entry, not the + * next; all the actual shuffling to make it + * "look right" to the user is done in filldir. + */ + XFS_PUT_COOKIE(p.cook, mp, bno, entno, thishash); + + xfs_dir_trace_g_duc("leaf: middle cookie ", + dp, uio, p.cook.o); + + if (i < (INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1)) { + nexthash = INT_GET(entry[1].hashval, ARCH_CONVERT); + + if (nexthash == INT_GET(entry->hashval, ARCH_CONVERT)) + nextentno = entno + 1; + else + nextentno = 0; + + } else if (INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) { + xfs_dabuf_t *bp2; + xfs_dir_leafblock_t *leaf2; + + ASSERT(nextda != -1); + + retval = xfs_da_read_buf(dp->i_transp, dp, + INT_GET(leaf->hdr.info.forw, + ARCH_CONVERT), nextda, + &bp2, XFS_DATA_FORK); + if (retval) + return(retval); + + ASSERT(bp2 != NULL); + + leaf2 = bp2->data; + + if (unlikely( + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + != XFS_DIR_LEAF_MAGIC) + || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) + != bno))) { /* GROT */ + XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", + XFS_ERRLEVEL_LOW, mp, + leaf2); + xfs_da_brelse(dp->i_transp, bp2); + + return(XFS_ERROR(EFSCORRUPTED)); + } + + nexthash = INT_GET(leaf2->entries[0].hashval, + ARCH_CONVERT); + nextentno = -1; + + xfs_da_brelse(dp->i_transp, bp2); + xfs_dir_trace_g_duc("leaf: next blk cookie", + dp, uio, p.cook.o); + } else { + nextentno = -1; + nexthash = XFS_DA_MAXHASH; + } + + /* + * Save off the cookie so we can fall back should the + * 'put' into the outgoing buffer fails. To handle a run + * of equal-hashvals, the off_t structure on 64bit + * builds has entno built into the cookie to ID the + * entry. On 32bit builds, we only have space for the + * hashval so we can't ID specific entries within a group + * of same hashval entries. For this, lastoffset is set + * to the first in the run of equal hashvals so we don't + * include any entries unless we can include all entries + * that share the same hashval. Hopefully the buffer + * provided is big enough to handle it (see pv763517). + */ +#if (BITS_PER_LONG == 32) + if (INT_GET(entry->hashval, ARCH_CONVERT) != lasthash) { + XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash); + lastresid = uio->uio_resid; + lasthash = thishash; + } else { + xfs_dir_trace_g_duc("leaf: DUP COOKIES, skipped", + dp, uio, p.cook.o); + } +#else + XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash); + lastresid = uio->uio_resid; +#endif /* BITS_PER_LONG == 32 */ + + /* + * Put the current entry into the outgoing buffer. If we fail + * then restore the UIO to the first entry in the current + * run of equal-hashval entries (probably one 1 entry long). + */ +#if XFS_BIG_FILESYSTEMS + p.ino = XFS_GET_DIR_INO_ARCH(mp, namest->inumber, ARCH_CONVERT) + mp->m_inoadd; +#else + p.ino = XFS_GET_DIR_INO_ARCH(mp, namest->inumber, ARCH_CONVERT); +#endif + p.name = (char *)namest->name; + p.namelen = entry->namelen; + + retval = p.put(&p); + + if (!p.done) { + uio->uio_offset = lastoffset.o; + uio->uio_resid = lastresid; + + *eobp = 1; + + xfs_dir_trace_g_du("leaf: E-O-B", dp, uio); + + return(retval); + } + } + + XFS_PUT_COOKIE(p.cook, mp, 0, 0, nexthash); + + uio->uio_offset = p.cook.o; + + *eobp = 0; + + xfs_dir_trace_g_du("leaf: E-O-F", dp, uio); + + return(0); +} + +/* + * Format a dirent64 structure and copy it out the the user's buffer. + */ +int +xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa) +{ + iovec_t *iovp; + int reclen, namelen; + xfs_dirent_t *idbp; + uio_t *uio; + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + iovp = uio->uio_iov; + idbp = (xfs_dirent_t *)iovp->iov_base; + iovp->iov_base = (char *)idbp + reclen; + iovp->iov_len -= reclen; + uio->uio_resid -= reclen; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook.o; + idbp->d_name[namelen] = '\0'; + pa->done = 1; + memcpy(idbp->d_name, pa->name, namelen); + return 0; +} + +/* + * Format a dirent64 structure and copy it out the the user's buffer. + */ +int +xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa) +{ + int retval, reclen, namelen; + xfs_dirent_t *idbp; + uio_t *uio; + + namelen = pa->namelen; + reclen = DIRENTSIZE(namelen); + uio = pa->uio; + if (reclen > uio->uio_resid) { + pa->done = 0; + return 0; + } + idbp = pa->dbp; + idbp->d_reclen = reclen; + idbp->d_ino = pa->ino; + idbp->d_off = pa->cook.o; + idbp->d_name[namelen] = '\0'; + memcpy(idbp->d_name, pa->name, namelen); + retval = uiomove((caddr_t)idbp, reclen, UIO_READ, uio); + pa->done = (retval == 0); + return retval; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir_leaf.h linux.22-ac2/fs/xfs/xfs_dir_leaf.h --- linux.vanilla/fs/xfs/xfs_dir_leaf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir_leaf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_LEAF_H__ +#define __XFS_DIR_LEAF_H__ + +/* + * Directory layout, internal structure, access macros, etc. + * + * Large directories are structured around Btrees where all the data + * elements are in the leaf nodes. Filenames are hashed into an int, + * then that int is used as the index into the Btree. Since the hashval + * of a filename may not be unique, we may have duplicate keys. The + * internal links in the Btree are logical block offsets into the file. + */ + +struct uio; +struct xfs_bmap_free; +struct xfs_dabuf; +struct xfs_da_args; +struct xfs_da_state; +struct xfs_da_state_blk; +struct xfs_dir_put_args; +struct xfs_inode; +struct xfs_mount; +struct xfs_trans; + +/*======================================================================== + * Directory Structure when equal to XFS_LBSIZE(mp) bytes. + *========================================================================*/ + +/* + * This is the structure of the leaf nodes in the Btree. + * + * Struct leaf_entry's are packed from the top. Names grow from the bottom + * but are not packed. The freemap contains run-length-encoded entries + * for the free bytes after the leaf_entry's, but only the N largest such, + * smaller runs are dropped. When the freemap doesn't show enough space + * for an allocation, we compact the namelist area and try again. If we + * still don't have enough space, then we have to split the block. + * + * Since we have duplicate hash keys, for each key that matches, compare + * the actual string. The root and intermediate node search always takes + * the first-in-the-block key match found, so we should only have to work + * "forw"ard. If none matches, continue with the "forw"ard leaf nodes + * until the hash key changes or the filename is found. + * + * The parent directory and the self-pointer are explicitly represented + * (ie: there are entries for "." and ".."). + * + * Note that the count being a __uint16_t limits us to something like a + * blocksize of 1.3MB in the face of worst case (short) filenames. + */ +#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */ + +typedef struct xfs_dir_leafblock { + struct xfs_dir_leaf_hdr { /* constant-structure header block */ + xfs_da_blkinfo_t info; /* block type, links, etc. */ + __uint16_t count; /* count of active leaf_entry's */ + __uint16_t namebytes; /* num bytes of name strings stored */ + __uint16_t firstused; /* first used byte in name area */ + __uint8_t holes; /* != 0 if blk needs compaction */ + __uint8_t pad1; + struct xfs_dir_leaf_map {/* RLE map of free bytes */ + __uint16_t base; /* base of free region */ + __uint16_t size; /* run length of free region */ + } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */ + } hdr; + struct xfs_dir_leaf_entry { /* sorted on key, not name */ + xfs_dahash_t hashval; /* hash value of name */ + __uint16_t nameidx; /* index into buffer of name */ + __uint8_t namelen; /* length of name string */ + __uint8_t pad2; + } entries[1]; /* var sized array */ + struct xfs_dir_leaf_name { + xfs_dir_ino_t inumber; /* inode number for this key */ + __uint8_t name[1]; /* name string itself */ + } namelist[1]; /* grows from bottom of buf */ +} xfs_dir_leafblock_t; +typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t; +typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t; +typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t; +typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t; + +/* + * Length of name for which a 512-byte block filesystem + * can get a double split. + */ +#define XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN \ + (512 - (uint)sizeof(xfs_dir_leaf_hdr_t) - \ + (uint)sizeof(xfs_dir_leaf_entry_t) * 2 - \ + (uint)sizeof(xfs_dir_leaf_name_t) * 2 - (MAXNAMELEN - 2) + 1 + 1) + +typedef int (*xfs_dir_put_t)(struct xfs_dir_put_args *pa); + +typedef union { + xfs_off_t o; /* offset (cookie) */ + /* + * Watch the order here (endian-ness dependent). + */ + struct { +#if __BYTE_ORDER == __LITTLE_ENDIAN + xfs_dahash_t h; /* hash value */ + __uint32_t be; /* block and entry */ +#else /* __BYTE_ORDER == __BIG_ENDIAN */ + __uint32_t be; /* block and entry */ + xfs_dahash_t h; /* hash value */ +#endif /* __BYTE_ORDER == __BIG_ENDIAN */ + } s; +} xfs_dircook_t; + +#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \ + ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash)) + +#define XFS_GET_DIR_INO_ARCH(mp,di,arch) \ + DIRINO_GET_ARCH(&(di),arch) +#define XFS_GET_DIR_INO(mp,di) \ + XFS_GET_DIR_INO_ARCH(mp,di,ARCH_NOCONVERT) + +typedef struct xfs_dir_put_args +{ + xfs_dircook_t cook; /* cookie of (next) entry */ + xfs_intino_t ino; /* inode number */ + struct xfs_dirent *dbp; /* buffer pointer */ + char *name; /* directory entry name */ + int namelen; /* length of name */ + int done; /* output: set if value was stored */ + xfs_dir_put_t put; /* put function ptr (i/o) */ + struct uio *uio; /* uio control structure */ +} xfs_dir_put_args_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYNAME) +int xfs_dir_leaf_entsize_byname(int len); +#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len) +#else +#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) /* space a name will use */ \ + ((uint)sizeof(xfs_dir_leaf_name_t)-1 + len) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_ENTSIZE_BYENTRY) +int xfs_dir_leaf_entsize_byentry(xfs_dir_leaf_entry_t *entry); +#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry) \ + xfs_dir_leaf_entsize_byentry(entry) +#else +#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry) /* space an entry will use */ \ + ((uint)sizeof(xfs_dir_leaf_name_t)-1 + (entry)->namelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_LEAF_NAMESTRUCT) +xfs_dir_leaf_name_t * +xfs_dir_leaf_namestruct(xfs_dir_leafblock_t *leafp, int offset); +#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset) \ + xfs_dir_leaf_namestruct(leafp,offset) +#else +#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset) /* point to name struct */ \ + ((xfs_dir_leaf_name_t *)&((char *)(leafp))[offset]) +#endif + +/*======================================================================== + * Function prototypes for the kernel. + *========================================================================*/ + +/* + * Internal routines when dirsize < XFS_LITINO(mp). + */ +int xfs_dir_shortform_create(struct xfs_da_args *args, xfs_ino_t parent); +int xfs_dir_shortform_addname(struct xfs_da_args *args); +int xfs_dir_shortform_lookup(struct xfs_da_args *args); +int xfs_dir_shortform_to_leaf(struct xfs_da_args *args); +int xfs_dir_shortform_removename(struct xfs_da_args *args); +int xfs_dir_shortform_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp, + struct xfs_dirent *dbp, xfs_dir_put_t put); +int xfs_dir_shortform_replace(struct xfs_da_args *args); + +/* + * Internal routines when dirsize == XFS_LBSIZE(mp). + */ +int xfs_dir_leaf_to_node(struct xfs_da_args *args); +int xfs_dir_leaf_to_shortform(struct xfs_da_args *args); + +/* + * Routines used for growing the Btree. + */ +int xfs_dir_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block, + struct xfs_dabuf **bpp); +int xfs_dir_leaf_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, + struct xfs_da_state_blk *newblk); +int xfs_dir_leaf_add(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args, int insertion_index); +int xfs_dir_leaf_addname(struct xfs_da_args *args); +int xfs_dir_leaf_lookup_int(struct xfs_dabuf *leaf_buffer, + struct xfs_da_args *args, + int *index_found_at); +int xfs_dir_leaf_remove(struct xfs_trans *trans, + struct xfs_dabuf *leaf_buffer, + int index_to_remove); +int xfs_dir_leaf_getdents_int(struct xfs_dabuf *bp, struct xfs_inode *dp, + xfs_dablk_t bno, struct uio *uio, + int *eobp, struct xfs_dirent *dbp, + xfs_dir_put_t put, xfs_daddr_t nextda); + +/* + * Routines used for shrinking the Btree. + */ +int xfs_dir_leaf_toosmall(struct xfs_da_state *state, int *retval); +void xfs_dir_leaf_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); + +/* + * Utility routines. + */ +uint xfs_dir_leaf_lasthash(struct xfs_dabuf *bp, int *count); +int xfs_dir_leaf_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); +int xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa); +int xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa); +int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); + + +/* + * Global data. + */ +extern xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot; + +#endif /* __XFS_DIR_LEAF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dir_sf.h linux.22-ac2/fs/xfs/xfs_dir_sf.h --- linux.vanilla/fs/xfs/xfs_dir_sf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dir_sf.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DIR_SF_H__ +#define __XFS_DIR_SF_H__ + +/* + * Directory layout when stored internal to an inode. + * + * Small directories are packed as tightly as possible so as to + * fit into the literal area of the inode. + */ + +typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t; + +/* + * The parent directory has a dedicated field, and the self-pointer must + * be calculated on the fly. + * + * Entries are packed toward the top as tight as possible. The header + * and the elements much be memcpy'd out into a work area to get correct + * alignment for the inode number fields. + */ +typedef struct xfs_dir_shortform { + struct xfs_dir_sf_hdr { /* constant-structure header block */ + xfs_dir_ino_t parent; /* parent dir inode number */ + __uint8_t count; /* count of active entries */ + } hdr; + struct xfs_dir_sf_entry { + xfs_dir_ino_t inumber; /* referenced inode number */ + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t name[1]; /* name */ + } list[1]; /* variable sized array */ +} xfs_dir_shortform_t; +typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t; +typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t; + +/* + * We generate this then sort it, so that readdirs are returned in + * hash-order. Else seekdir won't work. + */ +typedef struct xfs_dir_sf_sort { + __uint8_t entno; /* .=0, ..=1, else entry# + 2 */ + __uint8_t seqno; /* sequence # with same hash value */ + __uint8_t namelen; /* length of name value (no null) */ + xfs_dahash_t hash; /* this entry's hash value */ + xfs_intino_t ino; /* this entry's inode number */ + char *name; /* name value, pointer into buffer */ +} xfs_dir_sf_sort_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_GET_DIRINO) +void xfs_dir_sf_get_dirino_arch(xfs_dir_ino_t *from, xfs_ino_t *to, xfs_arch_t arch); +void xfs_dir_sf_get_dirino(xfs_dir_ino_t *from, xfs_ino_t *to); +#define XFS_DIR_SF_GET_DIRINO_ARCH(from,to,arch) xfs_dir_sf_get_dirino_arch(from, to, arch) +#define XFS_DIR_SF_GET_DIRINO(from,to) xfs_dir_sf_get_dirino(from, to) +#else +#define XFS_DIR_SF_GET_DIRINO_ARCH(from,to,arch) DIRINO_COPY_ARCH(from,to,arch) +#define XFS_DIR_SF_GET_DIRINO(from,to) DIRINO_COPY_ARCH(from,to,ARCH_NOCONVERT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_PUT_DIRINO) +void xfs_dir_sf_put_dirino_arch(xfs_ino_t *from, xfs_dir_ino_t *to, xfs_arch_t arch); +void xfs_dir_sf_put_dirino(xfs_ino_t *from, xfs_dir_ino_t *to); +#define XFS_DIR_SF_PUT_DIRINO_ARCH(from,to,arch) xfs_dir_sf_put_dirino_arch(from, to, arch) +#define XFS_DIR_SF_PUT_DIRINO(from,to) xfs_dir_sf_put_dirino(from, to) +#else +#define XFS_DIR_SF_PUT_DIRINO_ARCH(from,to,arch) DIRINO_COPY_ARCH(from,to,arch) +#define XFS_DIR_SF_PUT_DIRINO(from,to) DIRINO_COPY_ARCH(from,to,ARCH_NOCONVERT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYNAME) +int xfs_dir_sf_entsize_byname(int len); +#define XFS_DIR_SF_ENTSIZE_BYNAME(len) xfs_dir_sf_entsize_byname(len) +#else +#define XFS_DIR_SF_ENTSIZE_BYNAME(len) /* space a name uses */ \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1 + (len)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ENTSIZE_BYENTRY) +int xfs_dir_sf_entsize_byentry(xfs_dir_sf_entry_t *sfep); +#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep) xfs_dir_sf_entsize_byentry(sfep) +#else +#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep) /* space an entry uses */ \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1 + (sfep)->namelen) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_NEXTENTRY) +xfs_dir_sf_entry_t *xfs_dir_sf_nextentry(xfs_dir_sf_entry_t *sfep); +#define XFS_DIR_SF_NEXTENTRY(sfep) xfs_dir_sf_nextentry(sfep) +#else +#define XFS_DIR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ + ((xfs_dir_sf_entry_t *) \ + ((char *)(sfep) + XFS_DIR_SF_ENTSIZE_BYENTRY(sfep))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DIR_SF_ALLFIT) +int xfs_dir_sf_allfit(int count, int totallen); +#define XFS_DIR_SF_ALLFIT(count,totallen) \ + xfs_dir_sf_allfit(count,totallen) +#else +#define XFS_DIR_SF_ALLFIT(count,totallen) /* will all entries fit? */ \ + ((uint)sizeof(xfs_dir_sf_hdr_t) + \ + ((uint)sizeof(xfs_dir_sf_entry_t)-1)*(count) + (totallen)) +#endif + +#ifdef XFS_ALL_TRACE +#define XFS_DIR_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_DIR_TRACE +#endif + +/* + * Kernel tracing support for directories. + */ +struct uio; +struct xfs_inode; +struct xfs_da_intnode; +struct xfs_dinode; +struct xfs_dir_leafblock; +struct xfs_dir_leaf_entry; + +#define XFS_DIR_TRACE_SIZE 4096 /* size of global trace buffer */ + +/* + * Trace record types. + */ +#define XFS_DIR_KTRACE_G_DU 1 /* dp, uio */ +#define XFS_DIR_KTRACE_G_DUB 2 /* dp, uio, bno */ +#define XFS_DIR_KTRACE_G_DUN 3 /* dp, uio, node */ +#define XFS_DIR_KTRACE_G_DUL 4 /* dp, uio, leaf */ +#define XFS_DIR_KTRACE_G_DUE 5 /* dp, uio, leaf entry */ +#define XFS_DIR_KTRACE_G_DUC 6 /* dp, uio, cookie */ + +#if defined(XFS_DIR_TRACE) + +void xfs_dir_trace_g_du(char *where, struct xfs_inode *dp, struct uio *uio); +void xfs_dir_trace_g_dub(char *where, struct xfs_inode *dp, struct uio *uio, + xfs_dablk_t bno); +void xfs_dir_trace_g_dun(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_da_intnode *node); +void xfs_dir_trace_g_dul(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_dir_leafblock *leaf); +void xfs_dir_trace_g_due(char *where, struct xfs_inode *dp, struct uio *uio, + struct xfs_dir_leaf_entry *entry); +void xfs_dir_trace_g_duc(char *where, struct xfs_inode *dp, struct uio *uio, + xfs_off_t cookie); +void xfs_dir_trace_enter(int type, char *where, + __psunsigned_t a0, __psunsigned_t a1, + __psunsigned_t a2, __psunsigned_t a3, + __psunsigned_t a4, __psunsigned_t a5, + __psunsigned_t a6, __psunsigned_t a7, + __psunsigned_t a8, __psunsigned_t a9, + __psunsigned_t a10, __psunsigned_t a11); +#else +#define xfs_dir_trace_g_du(w,d,u) +#define xfs_dir_trace_g_dub(w,d,u,b) +#define xfs_dir_trace_g_dun(w,d,u,n) +#define xfs_dir_trace_g_dul(w,d,u,l) +#define xfs_dir_trace_g_due(w,d,u,e) +#define xfs_dir_trace_g_duc(w,d,u,c) +#endif /* DEBUG */ + +#endif /* __XFS_DIR_SF_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dmapi.h linux.22-ac2/fs/xfs/xfs_dmapi.h --- linux.vanilla/fs/xfs/xfs_dmapi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dmapi.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_DMAPI_H__ +#define __XFS_DMAPI_H__ + +/* Values used to define the on-disk version of dm_attrname_t. All + * on-disk attribute names start with the 8-byte string "SGI_DMI_". + * + * In the on-disk inode, DMAPI attribute names consist of the user-provided + * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be + * changed. + */ + +#define DMATTR_PREFIXLEN 8 +#define DMATTR_PREFIXSTRING "SGI_DMI_" + +typedef enum { + DM_EVENT_INVALID = -1, + DM_EVENT_CANCEL = 0, /* not supported */ + DM_EVENT_MOUNT = 1, + DM_EVENT_PREUNMOUNT = 2, + DM_EVENT_UNMOUNT = 3, + DM_EVENT_DEBUT = 4, /* not supported */ + DM_EVENT_CREATE = 5, + DM_EVENT_CLOSE = 6, /* not supported */ + DM_EVENT_POSTCREATE = 7, + DM_EVENT_REMOVE = 8, + DM_EVENT_POSTREMOVE = 9, + DM_EVENT_RENAME = 10, + DM_EVENT_POSTRENAME = 11, + DM_EVENT_LINK = 12, + DM_EVENT_POSTLINK = 13, + DM_EVENT_SYMLINK = 14, + DM_EVENT_POSTSYMLINK = 15, + DM_EVENT_READ = 16, + DM_EVENT_WRITE = 17, + DM_EVENT_TRUNCATE = 18, + DM_EVENT_ATTRIBUTE = 19, + DM_EVENT_DESTROY = 20, + DM_EVENT_NOSPACE = 21, + DM_EVENT_USER = 22, + DM_EVENT_MAX = 23 +} dm_eventtype_t; +#define HAVE_DM_EVENTTYPE_T + +typedef enum { + DM_RIGHT_NULL, + DM_RIGHT_SHARED, + DM_RIGHT_EXCL +} dm_right_t; +#define HAVE_DM_RIGHT_T + +/* Defines for determining if an event message should be sent. */ +#define DM_EVENT_ENABLED(vfsp, ip, event) ( \ + unlikely ((vfsp)->vfs_flag & VFS_DMI) && \ + ( ((ip)->i_d.di_dmevmask & (1 << event)) || \ + ((ip)->i_mount->m_dmevmask & (1 << event)) ) \ + ) + +#define DM_EVENT_ENABLED_IO(vfsp, io, event) ( \ + unlikely ((vfsp)->vfs_flag & VFS_DMI) && \ + ( ((io)->io_dmevmask & (1 << event)) || \ + ((io)->io_mount->m_dmevmask & (1 << event)) ) \ + ) + +#define DM_XFS_VALID_FS_EVENTS ( \ + (1 << DM_EVENT_PREUNMOUNT) | \ + (1 << DM_EVENT_UNMOUNT) | \ + (1 << DM_EVENT_NOSPACE) | \ + (1 << DM_EVENT_DEBUT) | \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events valid in dm_set_eventlist() when called with a file handle for + a regular file or a symlink. These events are persistent. +*/ + +#define DM_XFS_VALID_FILE_EVENTS ( \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events valid in dm_set_eventlist() when called with a file handle for + a directory. These events are persistent. +*/ + +#define DM_XFS_VALID_DIRECTORY_EVENTS ( \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + +/* Events supported by the XFS filesystem. */ +#define DM_XFS_SUPPORTED_EVENTS ( \ + (1 << DM_EVENT_MOUNT) | \ + (1 << DM_EVENT_PREUNMOUNT) | \ + (1 << DM_EVENT_UNMOUNT) | \ + (1 << DM_EVENT_NOSPACE) | \ + (1 << DM_EVENT_CREATE) | \ + (1 << DM_EVENT_POSTCREATE) | \ + (1 << DM_EVENT_REMOVE) | \ + (1 << DM_EVENT_POSTREMOVE) | \ + (1 << DM_EVENT_RENAME) | \ + (1 << DM_EVENT_POSTRENAME) | \ + (1 << DM_EVENT_LINK) | \ + (1 << DM_EVENT_POSTLINK) | \ + (1 << DM_EVENT_SYMLINK) | \ + (1 << DM_EVENT_POSTSYMLINK) | \ + (1 << DM_EVENT_READ) | \ + (1 << DM_EVENT_WRITE) | \ + (1 << DM_EVENT_TRUNCATE) | \ + (1 << DM_EVENT_ATTRIBUTE) | \ + (1 << DM_EVENT_DESTROY) ) + + +/* + * Definitions used for the flags field on dm_send_*_event(). + */ + +#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */ +#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */ + +/* + * Macros to turn caller specified delay/block flags into + * dm_send_xxxx_event flag DM_FLAGS_NDELAY. + */ + +#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ + DM_FLAGS_NDELAY : 0) +#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) + +/* + * Macros to turn caller specified delay/block flags into + * dm_send_xxxx_event flag DM_FLAGS_NDELAY. + */ + +#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ + DM_FLAGS_NDELAY : 0) + + +extern struct bhv_vfsops xfs_dmops; + +extern int dmapi_init(void); +extern void dmapi_uninit(void); + +#endif /* __XFS_DMAPI_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_dmops.c linux.22-ac2/fs/xfs/xfs_dmops.c --- linux.vanilla/fs/xfs/xfs_dmops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_dmops.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" + + +#ifndef CONFIG_XFS_DMAPI +xfs_dmops_t xfs_dmcore_xfs = { + .xfs_send_data = (xfs_send_data_t)fs_nosys, + .xfs_send_mmap = (xfs_send_mmap_t)fs_noerr, + .xfs_send_destroy = (xfs_send_destroy_t)fs_nosys, + .xfs_send_namesp = (xfs_send_namesp_t)fs_nosys, + .xfs_send_unmount = (xfs_send_unmount_t)fs_noval, +}; +#endif /* CONFIG_XFS_DMAPI */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_error.c linux.22-ac2/fs/xfs/xfs_error.c --- linux.vanilla/fs/xfs/xfs_error.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_error.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_sb.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_utils.h" +#include "xfs_error.h" + +#ifdef DEBUG + +int xfs_etrap[XFS_ERROR_NTRAP] = { + 0, +}; + +int +xfs_error_trap(int e) +{ + int i; + + if (!e) + return 0; + for (i = 0; i < XFS_ERROR_NTRAP; i++) { + if (xfs_etrap[i] == 0) + break; + if (e != xfs_etrap[i]) + continue; + cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); + debug_stop_all_cpus((void *)-1LL); + BUG(); + break; + } + return e; +} +#endif + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) + +int xfs_etest[XFS_NUM_INJECT_ERROR]; +int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; +char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; + +void +xfs_error_test_init(void) +{ + memset(xfs_etest, 0, sizeof(xfs_etest)); + memset(xfs_etest_fsid, 0, sizeof(xfs_etest_fsid)); + memset(xfs_etest_fsname, 0, sizeof(xfs_etest_fsname)); +} + +int +xfs_error_test(int error_tag, int *fsidp, char *expression, + int line, char *file, unsigned long randfactor) +{ + int i; + int64_t fsid; + + if (random() % randfactor) + return 0; + + memcpy(&fsid, fsidp, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { + cmn_err(CE_WARN, + "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"", + expression, file, line, xfs_etest_fsname[i]); + return 1; + } + } + + return 0; +} + +int +xfs_errortag_add(int error_tag, xfs_mount_t *mp) +{ + int i; + int len; + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { + cmn_err(CE_WARN, "XFS error tag #%d on", error_tag); + return 0; + } + } + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest[i] == 0) { + cmn_err(CE_WARN, "Turned on XFS error tag #%d", + error_tag); + xfs_etest[i] = error_tag; + xfs_etest_fsid[i] = fsid; + len = strlen(mp->m_fsname); + xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP); + strcpy(xfs_etest_fsname[i], mp->m_fsname); + return 0; + } + } + + cmn_err(CE_WARN, "error tag overflow, too many turned on"); + + return 1; +} + +int +xfs_errortag_clear(int error_tag, xfs_mount_t *mp) +{ + int i; + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { + xfs_etest[i] = 0; + xfs_etest_fsid[i] = 0LL; + kmem_free(xfs_etest_fsname[i], + strlen(xfs_etest_fsname[i]) + 1); + xfs_etest_fsname[i] = NULL; + cmn_err(CE_WARN, "Cleared XFS error tag #%d", + error_tag); + return 0; + } + } + + cmn_err(CE_WARN, "XFS error tag %d not on", error_tag); + + return 1; +} + +int +xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud) +{ + int i; + int cleared = 0; + + for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { + if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) && + xfs_etest[i] != 0) { + cleared = 1; + cmn_err(CE_WARN, "Clearing XFS error tag #%d", + xfs_etest[i]); + xfs_etest[i] = 0; + xfs_etest_fsid[i] = 0LL; + kmem_free(xfs_etest_fsname[i], + strlen(xfs_etest_fsname[i]) + 1); + xfs_etest_fsname[i] = NULL; + } + } + + if (loud || cleared) + cmn_err(CE_WARN, + "Cleared all XFS error tags for filesystem \"%s\"", + fsname); + + return 0; +} + +int +xfs_errortag_clearall(xfs_mount_t *mp) +{ + int64_t fsid; + + memcpy(&fsid, mp->m_fixedfsid, sizeof(fsid_t)); + + return xfs_errortag_clearall_umount(fsid, mp->m_fsname, 1); +} +#endif /* DEBUG || INDUCE_IO_ERROR */ + +static void +xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) +{ + char *newfmt; + int len = 16 + mp->m_fsname_len + strlen(fmt); + + newfmt = kmem_alloc(len, KM_SLEEP); + sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt); + icmn_err(level, newfmt, ap); + kmem_free(newfmt, len); +} + +void +xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + xfs_fs_vcmn_err(level, mp, fmt, ap); + va_end(ap); +} + +void +xfs_cmn_err(uint64_t panic_tag, int level, xfs_mount_t *mp, char *fmt, ...) +{ + va_list ap; + +#ifdef DEBUG + xfs_panic_mask |= XFS_PTAG_SHUTDOWN_CORRUPT; +#endif + + if (xfs_panic_mask && (xfs_panic_mask & panic_tag) + && (level & CE_ALERT)) { + level &= ~CE_ALERT; + level |= CE_PANIC; + cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG."); + } + va_start(ap, fmt); + xfs_fs_vcmn_err(level, mp, fmt, ap); + va_end(ap); +} + +void +xfs_error_report( + char *tag, + int level, + xfs_mount_t *mp, + char *fname, + int linenum, + inst_t *ra) +{ + if (level <= xfs_error_level) { + if (mp != NULL) { + xfs_cmn_err(XFS_PTAG_ERROR_REPORT, + CE_ALERT, mp, + "XFS internal error %s at line %d of file %s. Caller 0x%p\n", + tag, linenum, fname, ra); + } else { + cmn_err(CE_ALERT, + "XFS internal error %s at line %d of file %s. Caller 0x%p\n", + tag, linenum, fname, ra); + } + + xfs_stack_trace(); + } +} + +void +xfs_hex_dump(void *p, int length) +{ + __uint8_t *uip = (__uint8_t*)p; + int i; + char sbuf[128], *s; + + s = sbuf; + *s = '\0'; + for (i=0; i= KERNEL_VERSION(2,4,20) + dump_stack(); +#else + #if defined(CONFIG_X86) || defined(CONFIG_X86_64) + show_stack(0); + #else + return; + #endif +#endif +} + +#define XFS_ERROR_REPORT(e, lvl, mp) \ + xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address) +#define XFS_CORRUPTION_ERROR(e, lvl, mp, mem) \ + xfs_corruption_error(e, lvl, mp, mem, \ + __FILE__, __LINE__, __return_address) + +#define XFS_ERRLEVEL_OFF 0 +#define XFS_ERRLEVEL_LOW 1 +#define XFS_ERRLEVEL_HIGH 5 + +/* + * error injection tags - the labels can be anything you want + * but each tag should have its own unique number + */ + +#define XFS_ERRTAG_NOERROR 0 +#define XFS_ERRTAG_IFLUSH_1 1 +#define XFS_ERRTAG_IFLUSH_2 2 +#define XFS_ERRTAG_IFLUSH_3 3 +#define XFS_ERRTAG_IFLUSH_4 4 +#define XFS_ERRTAG_IFLUSH_5 5 +#define XFS_ERRTAG_IFLUSH_6 6 +#define XFS_ERRTAG_DA_READ_BUF 7 +#define XFS_ERRTAG_BTREE_CHECK_LBLOCK 8 +#define XFS_ERRTAG_BTREE_CHECK_SBLOCK 9 +#define XFS_ERRTAG_ALLOC_READ_AGF 10 +#define XFS_ERRTAG_IALLOC_READ_AGI 11 +#define XFS_ERRTAG_ITOBP_INOTOBP 12 +#define XFS_ERRTAG_IUNLINK 13 +#define XFS_ERRTAG_IUNLINK_REMOVE 14 +#define XFS_ERRTAG_DIR_INO_VALIDATE 15 +#define XFS_ERRTAG_BULKSTAT_READ_CHUNK 16 +#define XFS_ERRTAG_IODONE_IOERR 17 +#define XFS_ERRTAG_STRATREAD_IOERR 18 +#define XFS_ERRTAG_STRATCMPL_IOERR 19 +#define XFS_ERRTAG_DIOWRITE_IOERR 20 +#define XFS_ERRTAG_BMAPIFORMAT 21 +#define XFS_ERRTAG_MAX 22 + +/* + * Random factors for above tags, 1 means always, 2 means 1/2 time, etc. + */ +#define XFS_RANDOM_DEFAULT 100 +#define XFS_RANDOM_IFLUSH_1 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_2 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_3 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_4 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_5 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IFLUSH_6 XFS_RANDOM_DEFAULT +#define XFS_RANDOM_DA_READ_BUF XFS_RANDOM_DEFAULT +#define XFS_RANDOM_BTREE_CHECK_LBLOCK (XFS_RANDOM_DEFAULT/4) +#define XFS_RANDOM_BTREE_CHECK_SBLOCK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_ALLOC_READ_AGF XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IALLOC_READ_AGI XFS_RANDOM_DEFAULT +#define XFS_RANDOM_ITOBP_INOTOBP XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IUNLINK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IUNLINK_REMOVE XFS_RANDOM_DEFAULT +#define XFS_RANDOM_DIR_INO_VALIDATE XFS_RANDOM_DEFAULT +#define XFS_RANDOM_BULKSTAT_READ_CHUNK XFS_RANDOM_DEFAULT +#define XFS_RANDOM_IODONE_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_STRATREAD_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) +#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) +extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); +void xfs_error_test_init(void); + +#define XFS_NUM_INJECT_ERROR 10 + +#ifdef __ANSI_CPP__ +#define XFS_TEST_ERROR(expr, mp, tag, rf) \ + ((expr) || \ + xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \ + (rf))) +#else +#define XFS_TEST_ERROR(expr, mp, tag, rf) \ + ((expr) || \ + xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ + (rf))) +#endif /* __ANSI_CPP__ */ + +int xfs_errortag_add(int error_tag, xfs_mount_t *mp); +int xfs_errortag_clear(int error_tag, xfs_mount_t *mp); + +int xfs_errortag_clearall(xfs_mount_t *mp); +int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, + int loud); +#else +#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) +#define xfs_errortag_add(tag, mp) (ENOSYS) +#define xfs_errortag_clearall(mp) (ENOSYS) +#endif /* (DEBUG || INDUCE_IO_ERROR) */ + +/* + * XFS panic tags -- allow a call to xfs_cmn_err() be turned into + * a panic by setting xfs_panic_mask in a + * sysctl. update xfs_max[XFS_PARAM] if + * more are added. + */ +#define XFS_NO_PTAG 0LL +#define XFS_PTAG_IFLUSH 0x0000000000000001LL +#define XFS_PTAG_LOGRES 0x0000000000000002LL +#define XFS_PTAG_AILDELETE 0x0000000000000004LL +#define XFS_PTAG_ERROR_REPORT 0x0000000000000008LL +#define XFS_PTAG_SHUTDOWN_CORRUPT 0x0000000000000010LL +#define XFS_PTAG_SHUTDOWN_IOERROR 0x0000000000000020LL +#define XFS_PTAG_SHUTDOWN_LOGERROR 0x0000000000000040LL + +struct xfs_mount; +/* PRINTFLIKE4 */ +void xfs_cmn_err(uint64_t panic_tag, int level, struct xfs_mount *mp, + char *fmt, ...); +/* PRINTFLIKE3 */ +void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); + +#endif /* __XFS_ERROR_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_extfree_item.c linux.22-ac2/fs/xfs/xfs_extfree_item.c --- linux.vanilla/fs/xfs/xfs_extfree_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_extfree_item.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_efi_log_item + * and xfs_efd_log_item items. + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_extfree_item.h" + + +kmem_zone_t *xfs_efi_zone; +kmem_zone_t *xfs_efd_zone; + +STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *); +STATIC void xfs_efi_item_abort(xfs_efi_log_item_t *); +STATIC void xfs_efd_item_abort(xfs_efd_log_item_t *); + + + +/* + * This returns the number of iovecs needed to log the given efi item. + * We only need 1 iovec for an efi item. It just logs the efi_log_format + * structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efi_item_size(xfs_efi_log_item_t *efip) +{ + return 1; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given efi log item. We use only 1 iovec, and we point that + * at the efi_log_format structure embedded in the efi item. + * It is at this point that we assert that all of the extent + * slots in the efi item have been filled. + */ +STATIC void +xfs_efi_item_format(xfs_efi_log_item_t *efip, + xfs_log_iovec_t *log_vector) +{ + uint size; + + ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); + + efip->efi_format.efi_type = XFS_LI_EFI; + + size = sizeof(xfs_efi_log_format_t); + size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); + efip->efi_format.efi_size = 1; + + log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); + log_vector->i_len = size; + ASSERT(size >= sizeof(xfs_efi_log_format_t)); +} + + +/* + * Pinning has no meaning for an efi item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_pin(xfs_efi_log_item_t *efip) +{ + return; +} + + +/* + * While EFIs cannot really be pinned, the unpin operation is the + * last place at which the EFI is manipulated during a transaction. + * Here we coordinate with xfs_efi_cancel() to determine who gets to + * free the EFI. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) +{ + int nexts; + int size; + xfs_mount_t *mp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_CANCELED) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_COMMITTED; + AIL_UNLOCK(mp, s); + } + + return; +} + +/* + * like unpin only we have to also clear the xaction descriptor + * pointing the log item if we free the item. This routine duplicates + * unpin because efi_flags is protected by the AIL lock. Freeing + * the descriptor and then calling unpin would force us to drop the AIL + * lock which would open up a race condition. + */ +STATIC void +xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) +{ + int nexts; + int size; + xfs_mount_t *mp; + xfs_log_item_desc_t *lidp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_CANCELED) { + /* + * free the xaction descriptor pointing to this item + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip); + xfs_trans_free_item(tp, lidp); + /* + * pull the item off the AIL. + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + /* + * now free the item itself + */ + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_COMMITTED; + AIL_UNLOCK(mp, s); + } + + return; +} + +/* + * Efi items have no locking or pushing. However, since EFIs are + * pulled from the AIL when their corresponding EFDs are committed + * to disk, their situation is very similar to being pinned. Return + * XFS_ITEM_PINNED so that the caller will eventually flush the log. + * This should help in getting the EFI out of the AIL. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efi_item_trylock(xfs_efi_log_item_t *efip) +{ + return XFS_ITEM_PINNED; +} + +/* + * Efi items have no locking, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_unlock(xfs_efi_log_item_t *efip) +{ + if (efip->efi_item.li_flags & XFS_LI_ABORTED) + xfs_efi_item_abort(efip); + return; +} + +/* + * The EFI is logged only once and cannot be moved in the log, so + * simply return the lsn at which it's been logged. The canceled + * flag is not paid any attention here. Checking for that is delayed + * until the EFI is unpinned. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) +{ + return lsn; +} + +/* + * This is called when the transaction logging the EFI is aborted. + * Free up the EFI and return. No need to clean up the slot for + * the item in the transaction. That was done by the unpin code + * which is called prior to this routine in the abort/fs-shutdown path. + */ +STATIC void +xfs_efi_item_abort(xfs_efi_log_item_t *efip) +{ + int nexts; + int size; + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + return; +} + +/* + * There isn't much you can do to push on an efi item. It is simply + * stuck waiting for all of its corresponding efd items to be + * committed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_push(xfs_efi_log_item_t *efip) +{ + return; +} + +/* + * The EFI dependency tracking op doesn't do squat. It can't because + * it doesn't know where the free extent is coming from. The dependency + * tracking has to be handled by the "enclosing" metadata object. For + * example, for inodes, the inode is locked throughout the extent freeing + * so the dependency should be recorded there. + */ +/*ARGSUSED*/ +STATIC void +xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) +{ + return; +} + +/* + * This is the ops vector shared by all efi log items. + */ +struct xfs_item_ops xfs_efi_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_efi_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efi_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) + xfs_efi_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efi_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_efi_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efi_item_committing +}; + + +/* + * Allocate and initialize an efi item with the given number of extents. + */ +xfs_efi_log_item_t * +xfs_efi_init(xfs_mount_t *mp, + uint nextents) + +{ + xfs_efi_log_item_t *efip; + uint size; + + ASSERT(nextents > 0); + if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { + size = (uint)(sizeof(xfs_efi_log_item_t) + + ((nextents - 1) * sizeof(xfs_extent_t))); + efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP); + } else { + efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone, + KM_SLEEP); + } + + efip->efi_item.li_type = XFS_LI_EFI; + efip->efi_item.li_ops = &xfs_efi_item_ops; + efip->efi_item.li_mountp = mp; + efip->efi_format.efi_nextents = nextents; + efip->efi_format.efi_id = (__psint_t)(void*)efip; + + return (efip); +} + +/* + * This is called by the efd item code below to release references to + * the given efi item. Each efd calls this with the number of + * extents that it has logged, and when the sum of these reaches + * the total number of extents logged by this efi item we can free + * the efi item. + * + * Freeing the efi item requires that we remove it from the AIL. + * We'll use the AIL lock to protect our counters as well as + * the removal from the AIL. + */ +void +xfs_efi_release(xfs_efi_log_item_t *efip, + uint nextents) +{ + xfs_mount_t *mp; + int extents_left; + uint size; + int nexts; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + ASSERT(efip->efi_next_extent > 0); + ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); + + AIL_LOCK(mp, s); + ASSERT(efip->efi_next_extent >= nextents); + efip->efi_next_extent -= nextents; + extents_left = efip->efi_next_extent; + if (extents_left == 0) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + } else { + AIL_UNLOCK(mp, s); + } + + if (extents_left == 0) { + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } +} + +/* + * This is called when the transaction that should be committing the + * EFD corresponding to the given EFI is aborted. The committed and + * canceled flags are used to coordinate the freeing of the EFI and + * the references by the transaction that committed it. + */ +STATIC void +xfs_efi_cancel( + xfs_efi_log_item_t *efip) +{ + int nexts; + int size; + xfs_mount_t *mp; + SPLDECL(s); + + mp = efip->efi_item.li_mountp; + AIL_LOCK(mp, s); + if (efip->efi_flags & XFS_EFI_COMMITTED) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); + + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efi_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efip, size); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } else { + efip->efi_flags |= XFS_EFI_CANCELED; + AIL_UNLOCK(mp, s); + } + + return; +} + + + + + +/* + * This returns the number of iovecs needed to log the given efd item. + * We only need 1 iovec for an efd item. It just logs the efd_log_format + * structure. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efd_item_size(xfs_efd_log_item_t *efdp) +{ + return 1; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given efd log item. We use only 1 iovec, and we point that + * at the efd_log_format structure embedded in the efd item. + * It is at this point that we assert that all of the extent + * slots in the efd item have been filled. + */ +STATIC void +xfs_efd_item_format(xfs_efd_log_item_t *efdp, + xfs_log_iovec_t *log_vector) +{ + uint size; + + ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); + + efdp->efd_format.efd_type = XFS_LI_EFD; + + size = sizeof(xfs_efd_log_format_t); + size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); + efdp->efd_format.efd_size = 1; + + log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); + log_vector->i_len = size; + ASSERT(size >= sizeof(xfs_efd_log_format_t)); +} + + +/* + * Pinning has no meaning for an efd item, so just return. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_pin(xfs_efd_log_item_t *efdp) +{ + return; +} + + +/* + * Since pinning has no meaning for an efd item, unpinning does + * not either. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unpin(xfs_efd_log_item_t *efdp, int stale) +{ + return; +} + +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp) +{ + return; +} + +/* + * Efd items have no locking, so just return success. + */ +/*ARGSUSED*/ +STATIC uint +xfs_efd_item_trylock(xfs_efd_log_item_t *efdp) +{ + return XFS_ITEM_LOCKED; +} + +/* + * Efd items have no locking or pushing, so return failure + * so that the caller doesn't bother with us. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) +{ + if (efdp->efd_item.li_flags & XFS_LI_ABORTED) + xfs_efd_item_abort(efdp); + return; +} + +/* + * When the efd item is committed to disk, all we need to do + * is delete our reference to our partner efi item and then + * free ourselves. Since we're freeing ourselves we must + * return -1 to keep the transaction code from further referencing + * this item. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn) +{ + uint size; + int nexts; + + /* + * If we got a log I/O error, it's always the case that the LR with the + * EFI got unpinned and freed before the EFD got aborted. + */ + if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) + xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents); + + nexts = efdp->efd_format.efd_nextents; + if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efd_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efdp, size); + } else { + kmem_zone_free(xfs_efd_zone, efdp); + } + + return (xfs_lsn_t)-1; +} + +/* + * The transaction of which this EFD is a part has been aborted. + * Inform its companion EFI of this fact and then clean up after + * ourselves. No need to clean up the slot for the item in the + * transaction. That was done by the unpin code which is called + * prior to this routine in the abort/fs-shutdown path. + */ +STATIC void +xfs_efd_item_abort(xfs_efd_log_item_t *efdp) +{ + int nexts; + int size; + + /* + * If we got a log I/O error, it's always the case that the LR with the + * EFI got unpinned and freed before the EFD got aborted. So don't + * reference the EFI at all in that case. + */ + if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) + xfs_efi_cancel(efdp->efd_efip); + + nexts = efdp->efd_format.efd_nextents; + if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { + size = sizeof(xfs_efd_log_item_t); + size += (nexts - 1) * sizeof(xfs_extent_t); + kmem_free(efdp, size); + } else { + kmem_zone_free(xfs_efd_zone, efdp); + } + return; +} + +/* + * There isn't much you can do to push on an efd item. It is simply + * stuck waiting for the log to be flushed to disk. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_push(xfs_efd_log_item_t *efdp) +{ + return; +} + +/* + * The EFD dependency tracking op doesn't do squat. It can't because + * it doesn't know where the free extent is coming from. The dependency + * tracking has to be handled by the "enclosing" metadata object. For + * example, for inodes, the inode is locked throughout the extent freeing + * so the dependency should be recorded there. + */ +/*ARGSUSED*/ +STATIC void +xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn) +{ + return; +} + +/* + * This is the ops vector shared by all efd log items. + */ +struct xfs_item_ops xfs_efd_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_efd_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efd_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_efd_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efd_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_efd_item_abort, + .iop_pushbuf = NULL, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_efd_item_committing +}; + + +/* + * Allocate and initialize an efd item with the given number of extents. + */ +xfs_efd_log_item_t * +xfs_efd_init(xfs_mount_t *mp, + xfs_efi_log_item_t *efip, + uint nextents) + +{ + xfs_efd_log_item_t *efdp; + uint size; + + ASSERT(nextents > 0); + if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { + size = (uint)(sizeof(xfs_efd_log_item_t) + + ((nextents - 1) * sizeof(xfs_extent_t))); + efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP); + } else { + efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone, + KM_SLEEP); + } + + efdp->efd_item.li_type = XFS_LI_EFD; + efdp->efd_item.li_ops = &xfs_efd_item_ops; + efdp->efd_item.li_mountp = mp; + efdp->efd_efip = efip; + efdp->efd_format.efd_nextents = nextents; + efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; + + return (efdp); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_extfree_item.h linux.22-ac2/fs/xfs/xfs_extfree_item.h --- linux.vanilla/fs/xfs/xfs_extfree_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_extfree_item.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_EXTFREE_ITEM_H__ +#define __XFS_EXTFREE_ITEM_H__ + +struct xfs_mount; +struct kmem_zone; + +typedef struct xfs_extent { + xfs_dfsbno_t ext_start; + xfs_extlen_t ext_len; +} xfs_extent_t; + +/* + * This is the structure used to lay out an efi log item in the + * log. The efi_extents field is a variable size array whose + * size is given by efi_nextents. + */ +typedef struct xfs_efi_log_format { + unsigned short efi_type; /* efi log item type */ + unsigned short efi_size; /* size of this item */ + uint efi_nextents; /* # extents to free */ + __uint64_t efi_id; /* efi identifier */ + xfs_extent_t efi_extents[1]; /* array of extents to free */ +} xfs_efi_log_format_t; + +/* + * This is the structure used to lay out an efd log item in the + * log. The efd_extents array is a variable size array whose + * size is given by efd_nextents; + */ +typedef struct xfs_efd_log_format { + unsigned short efd_type; /* efd log item type */ + unsigned short efd_size; /* size of this item */ + uint efd_nextents; /* # of extents freed */ + __uint64_t efd_efi_id; /* id of corresponding efi */ + xfs_extent_t efd_extents[1]; /* array of extents freed */ +} xfs_efd_log_format_t; + + +#ifdef __KERNEL__ + +/* + * Max number of extents in fast allocation path. + */ +#define XFS_EFI_MAX_FAST_EXTENTS 16 + +/* + * Define EFI flags. + */ +#define XFS_EFI_RECOVERED 0x1 +#define XFS_EFI_COMMITTED 0x2 +#define XFS_EFI_CANCELED 0x4 + +/* + * This is the "extent free intention" log item. It is used + * to log the fact that some extents need to be free. It is + * used in conjunction with the "extent free done" log item + * described below. + */ +typedef struct xfs_efi_log_item { + xfs_log_item_t efi_item; + uint efi_flags; /* misc flags */ + uint efi_next_extent; + xfs_efi_log_format_t efi_format; +} xfs_efi_log_item_t; + +/* + * This is the "extent free done" log item. It is used to log + * the fact that some extents earlier mentioned in an efi item + * have been freed. + */ +typedef struct xfs_efd_log_item { + xfs_log_item_t efd_item; + xfs_efi_log_item_t *efd_efip; + uint efd_next_extent; + xfs_efd_log_format_t efd_format; +} xfs_efd_log_item_t; + +/* + * Max number of extents in fast allocation path. + */ +#define XFS_EFD_MAX_FAST_EXTENTS 16 + +extern struct kmem_zone *xfs_efi_zone; +extern struct kmem_zone *xfs_efd_zone; + +xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint); +xfs_efd_log_item_t *xfs_efd_init(struct xfs_mount *, xfs_efi_log_item_t *, + uint); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_EXTFREE_ITEM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_fs.h linux.22-ac2/fs/xfs/xfs_fs.h --- linux.vanilla/fs/xfs/xfs_fs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_fs.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,513 @@ +/* + * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_FS_H__ +#define __XFS_FS_H__ + +/* + * SGI's XFS filesystem's major stuff (constants, structures) + */ + +#define XFS_NAME "xfs" + +/* + * Direct I/O attribute record used with XFS_IOC_DIOINFO + * d_miniosz is the min xfer size, xfer size multiple and file seek offset + * alignment. + */ +#ifndef HAVE_DIOATTR +struct dioattr { + __u32 d_mem; /* data buffer memory alignment */ + __u32 d_miniosz; /* min xfer size */ + __u32 d_maxiosz; /* max xfer size */ +}; +#endif + +/* + * Structure for XFS_IOC_FSGETXATTR[A] and XFS_IOC_FSSETXATTR. + */ +#ifndef HAVE_FSXATTR +struct fsxattr { + __u32 fsx_xflags; /* xflags field value (get/set) */ + __u32 fsx_extsize; /* extsize field value (get/set)*/ + __u32 fsx_nextents; /* nextents field value (get) */ + unsigned char fsx_pad[16]; +}; +#endif + +/* + * Flags for the bs_xflags/fsx_xflags field + * There should be a one-to-one correspondence between these flags and the + * XFS_DIFLAG_s. + */ +#define XFS_XFLAG_REALTIME 0x00000001 +#define XFS_XFLAG_PREALLOC 0x00000002 +#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ +#define XFS_XFLAG_ALL \ + ( XFS_XFLAG_REALTIME|XFS_XFLAG_PREALLOC|XFS_XFLAG_HASATTR ) + + +/* + * Structure for XFS_IOC_GETBMAP. + * On input, fill in bmv_offset and bmv_length of the first structure + * to indicate the area of interest in the file, and bmv_entry with the + * number of array elements given. The first structure is updated on + * return to give the offset and length for the next call. + */ +#ifndef HAVE_GETBMAP +struct getbmap { + __s64 bmv_offset; /* file offset of segment in blocks */ + __s64 bmv_block; /* starting block (64-bit daddr_t) */ + __s64 bmv_length; /* length of segment, blocks */ + __s32 bmv_count; /* # of entries in array incl. 1st */ + __s32 bmv_entries; /* # of entries filled in (output) */ +}; +#endif + +/* + * Structure for XFS_IOC_GETBMAPX. Fields bmv_offset through bmv_entries + * are used exactly as in the getbmap structure. The getbmapx structure + * has additional bmv_iflags and bmv_oflags fields. The bmv_iflags field + * is only used for the first structure. It contains input flags + * specifying XFS_IOC_GETBMAPX actions. The bmv_oflags field is filled + * in by the XFS_IOC_GETBMAPX command for each returned structure after + * the first. + */ +#ifndef HAVE_GETBMAPX +struct getbmapx { + __s64 bmv_offset; /* file offset of segment in blocks */ + __s64 bmv_block; /* starting block (64-bit daddr_t) */ + __s64 bmv_length; /* length of segment, blocks */ + __s32 bmv_count; /* # of entries in array incl. 1st */ + __s32 bmv_entries; /* # of entries filled in (output). */ + __s32 bmv_iflags; /* input flags (1st structure) */ + __s32 bmv_oflags; /* output flags (after 1st structure)*/ + __s32 bmv_unused1; /* future use */ + __s32 bmv_unused2; /* future use */ +}; +#endif + +/* bmv_iflags values - set by XFS_IOC_GETBMAPX caller. */ +#define BMV_IF_ATTRFORK 0x1 /* return attr fork rather than data */ +#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ +#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ +#define BMV_IF_VALID (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC) +#ifdef __KERNEL__ +#define BMV_IF_EXTENDED 0x40000000 /* getpmapx if set */ +#endif + +/* bmv_oflags values - returned for for each non-header segment */ +#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ + +/* Convert getbmap <-> getbmapx - move fields from p1 to p2. */ +#define GETBMAP_CONVERT(p1,p2) { \ + p2.bmv_offset = p1.bmv_offset; \ + p2.bmv_block = p1.bmv_block; \ + p2.bmv_length = p1.bmv_length; \ + p2.bmv_count = p1.bmv_count; \ + p2.bmv_entries = p1.bmv_entries; } + + +/* + * Structure for XFS_IOC_FSSETDM. + * For use by backup and restore programs to set the XFS on-disk inode + * fields di_dmevmask and di_dmstate. These must be set to exactly and + * only values previously obtained via xfs_bulkstat! (Specifically the + * xfs_bstat_t fields bs_dmevmask and bs_dmstate.) + */ +#ifndef HAVE_FSDMIDATA +struct fsdmidata { + __u32 fsd_dmevmask; /* corresponds to di_dmevmask */ + __u16 fsd_padding; + __u16 fsd_dmstate; /* corresponds to di_dmstate */ +}; +#endif + +/* + * File segment locking set data type for 64 bit access. + * Also used for all the RESV/FREE interfaces. + */ +typedef struct xfs_flock64 { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + pid_t l_pid; + __s32 l_pad[4]; /* reserve area */ +} xfs_flock64_t; + +/* + * Output for XFS_IOC_FSGEOMETRY_V1 + */ +typedef struct xfs_fsop_geom_v1 { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ +} xfs_fsop_geom_v1_t; + +/* + * Output for XFS_IOC_FSGEOMETRY + */ +typedef struct xfs_fsop_geom { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ + __u32 logsunit; /* log stripe unit, bytes */ +} xfs_fsop_geom_t; + +/* Output for XFS_FS_COUNTS */ +typedef struct xfs_fsop_counts { + __u64 freedata; /* free data section blocks */ + __u64 freertx; /* free rt extents */ + __u64 freeino; /* free inodes */ + __u64 allocino; /* total allocated inodes */ +} xfs_fsop_counts_t; + +/* Input/Output for XFS_GET_RESBLKS and XFS_SET_RESBLKS */ +typedef struct xfs_fsop_resblks { + __u64 resblks; + __u64 resblks_avail; +} xfs_fsop_resblks_t; + +#define XFS_FSOP_GEOM_VERSION 0 + +#define XFS_FSOP_GEOM_FLAGS_ATTR 0x0001 /* attributes in use */ +#define XFS_FSOP_GEOM_FLAGS_NLINK 0x0002 /* 32-bit nlink values */ +#define XFS_FSOP_GEOM_FLAGS_QUOTA 0x0004 /* quotas enabled */ +#define XFS_FSOP_GEOM_FLAGS_IALIGN 0x0008 /* inode alignment */ +#define XFS_FSOP_GEOM_FLAGS_DALIGN 0x0010 /* large data alignment */ +#define XFS_FSOP_GEOM_FLAGS_SHARED 0x0020 /* read-only shared */ +#define XFS_FSOP_GEOM_FLAGS_EXTFLG 0x0040 /* special extent flag */ +#define XFS_FSOP_GEOM_FLAGS_DIRV2 0x0080 /* directory version 2 */ +#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ +#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ + + +/* + * Minimum and maximum sizes need for growth checks + */ +#define XFS_MIN_AG_BLOCKS 64 +#define XFS_MIN_LOG_BLOCKS 512 +#define XFS_MAX_LOG_BLOCKS (64 * 1024) +#define XFS_MIN_LOG_BYTES (256 * 1024) +#define XFS_MAX_LOG_BYTES (128 * 1024 * 1024) + +/* + * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT + */ +typedef struct xfs_growfs_data { + __u64 newblocks; /* new data subvol size, fsblocks */ + __u32 imaxpct; /* new inode space percentage limit */ +} xfs_growfs_data_t; + +typedef struct xfs_growfs_log { + __u32 newblocks; /* new log size, fsblocks */ + __u32 isint; /* 1 if new log is internal */ +} xfs_growfs_log_t; + +typedef struct xfs_growfs_rt { + __u64 newblocks; /* new realtime size, fsblocks */ + __u32 extsize; /* new realtime extent size, fsblocks */ +} xfs_growfs_rt_t; + + +/* + * Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE + */ +typedef struct xfs_bstime { + time_t tv_sec; /* seconds */ + __s32 tv_nsec; /* and nanoseconds */ +} xfs_bstime_t; + +typedef struct xfs_bstat { + __u64 bs_ino; /* inode number */ + __u16 bs_mode; /* type and mode */ + __u16 bs_nlink; /* number of links */ + __u32 bs_uid; /* user id */ + __u32 bs_gid; /* group id */ + __u32 bs_rdev; /* device value */ + __s32 bs_blksize; /* block size */ + __s64 bs_size; /* file size */ + xfs_bstime_t bs_atime; /* access time */ + xfs_bstime_t bs_mtime; /* modify time */ + xfs_bstime_t bs_ctime; /* inode change time */ + int64_t bs_blocks; /* number of blocks */ + __u32 bs_xflags; /* extended flags */ + __s32 bs_extsize; /* extent size */ + __s32 bs_extents; /* number of extents */ + __u32 bs_gen; /* generation count */ + __u16 bs_projid; /* project id */ + unsigned char bs_pad[14]; /* pad space, unused */ + __u32 bs_dmevmask; /* DMIG event mask */ + __u16 bs_dmstate; /* DMIG state info */ + __u16 bs_aextents; /* attribute number of extents */ +} xfs_bstat_t; + +/* + * The user-level BulkStat Request interface structure. + */ +typedef struct xfs_fsop_bulkreq { + __u64 *lastip; /* last inode # pointer */ + __s32 icount; /* count of entries in buffer */ + void *ubuffer; /* user buffer for inode desc. */ + __s32 *ocount; /* output count pointer */ +} xfs_fsop_bulkreq_t; + + +/* + * Structures returned from xfs_inumbers routine (XFS_IOC_FSINUMBERS). + */ +typedef struct xfs_inogrp { + __u64 xi_startino; /* starting inode number */ + __s32 xi_alloccount; /* # bits set in allocmask */ + __u64 xi_allocmask; /* mask of allocated inodes */ +} xfs_inogrp_t; + + +/* + * Error injection. + */ +typedef struct xfs_error_injection { + __s32 fd; + __s32 errtag; +} xfs_error_injection_t; + + +/* + * The user-level Handle Request interface structure. + */ +typedef struct xfs_fsop_handlereq { + __u32 fd; /* fd for FD_TO_HANDLE */ + void *path; /* user pathname */ + __u32 oflags; /* open flags */ + void *ihandle; /* user supplied handle */ + __u32 ihandlen; /* user supplied length */ + void *ohandle; /* user buffer for handle */ + __u32 *ohandlen; /* user buffer length */ +} xfs_fsop_handlereq_t; + +/* + * Compound structures for passing args through Handle Request interfaces + * xfs_fssetdm_by_handle, xfs_attrlist_by_handle, xfs_attrmulti_by_handle + * - ioctls: XFS_IOC_FSSETDM_BY_HANDLE, XFS_IOC_ATTRLIST_BY_HANDLE, and + * XFS_IOC_ATTRMULTI_BY_HANDLE + */ + +typedef struct xfs_fsop_setdm_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + struct fsdmidata *data; /* DMAPI data to set */ +} xfs_fsop_setdm_handlereq_t; + +typedef struct xfs_attrlist_cursor { + __u32 opaque[4]; +} xfs_attrlist_cursor_t; + +typedef struct xfs_fsop_attrlist_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ + __u32 flags; /* flags, use ROOT/USER names */ + __u32 buflen; /* length of buffer supplied */ + void *buffer; /* attrlist data to return */ +} xfs_fsop_attrlist_handlereq_t; + +typedef struct xfs_attr_multiop { + __u32 am_opcode; + __s32 am_error; + void *am_attrname; + void *am_attrvalue; + __u32 am_length; + __u32 am_flags; +} xfs_attr_multiop_t; + +typedef struct xfs_fsop_attrmulti_handlereq { + struct xfs_fsop_handlereq hreq; /* handle interface structure */ + __u32 opcount; /* count of following multiop */ + struct xfs_attr_multiop *ops; /* attr_multi data to get/set */ +} xfs_fsop_attrmulti_handlereq_t; + +/* + * File system identifier. Should be unique (at least per machine). + */ +typedef struct { + __u32 val[2]; /* file system id type */ +} xfs_fsid_t; + +/* + * File identifier. Should be unique per filesystem on a single machine. + * This is typically called by a stateless file server in order to generate + * "file handles". + */ +#ifndef HAVE_FID +#define MAXFIDSZ 46 +typedef struct fid { + __u16 fid_len; /* length of data in bytes */ + unsigned char fid_data[MAXFIDSZ]; /* data (variable length) */ +} fid_t; +#endif + +typedef struct xfs_fid { + __u16 xfs_fid_len; /* length of remainder */ + __u16 xfs_fid_pad; + __u32 xfs_fid_gen; /* generation number */ + __u64 xfs_fid_ino; /* 64 bits inode number */ +} xfs_fid_t; + +typedef struct xfs_fid2 { + __u16 fid_len; /* length of remainder */ + __u16 fid_pad; /* padding, must be zero */ + __u32 fid_gen; /* generation number */ + __u64 fid_ino; /* inode number */ +} xfs_fid2_t; + +typedef struct xfs_handle { + union { + __s64 align; /* force alignment of ha_fid */ + xfs_fsid_t _ha_fsid; /* unique file system identifier */ + } ha_u; + xfs_fid_t ha_fid; /* file system specific file ID */ +} xfs_handle_t; +#define ha_fsid ha_u._ha_fsid + +#define XFS_HSIZE(handle) (((char *) &(handle).ha_fid.xfs_fid_pad \ + - (char *) &(handle)) \ + + (handle).ha_fid.xfs_fid_len) + +#define XFS_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(xfs_handle_t)) + +#define FSHSIZE sizeof(fsid_t) + + +/* + * ioctl commands that replace IRIX fcntl()'s + * For 'documentation' purposed more than anything else, + * the "cmd #" field reflects the IRIX fcntl number. + */ +#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64) +#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64) +#define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr) +#define XFS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#define XFS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) +#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) +#define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap) +#define XFS_IOC_FSSETDM _IOW ('X', 39, struct fsdmidata) +#define XFS_IOC_RESVSP _IOW ('X', 40, struct xfs_flock64) +#define XFS_IOC_UNRESVSP _IOW ('X', 41, struct xfs_flock64) +#define XFS_IOC_RESVSP64 _IOW ('X', 42, struct xfs_flock64) +#define XFS_IOC_UNRESVSP64 _IOW ('X', 43, struct xfs_flock64) +#define XFS_IOC_GETBMAPA _IOWR('X', 44, struct getbmap) +#define XFS_IOC_FSGETXATTRA _IOR ('X', 45, struct fsxattr) +/* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ +/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ +#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) + +/* + * ioctl commands that replace IRIX syssgi()'s + */ +#define XFS_IOC_FSGEOMETRY_V1 _IOR ('X', 100, struct xfs_fsop_geom_v1) +#define XFS_IOC_FSBULKSTAT _IOWR('X', 101, struct xfs_fsop_bulkreq) +#define XFS_IOC_FSBULKSTAT_SINGLE _IOWR('X', 102, struct xfs_fsop_bulkreq) +#define XFS_IOC_FSINUMBERS _IOWR('X', 103, struct xfs_fsop_bulkreq) +#define XFS_IOC_PATH_TO_FSHANDLE _IOWR('X', 104, struct xfs_fsop_handlereq) +#define XFS_IOC_PATH_TO_HANDLE _IOWR('X', 105, struct xfs_fsop_handlereq) +#define XFS_IOC_FD_TO_HANDLE _IOWR('X', 106, struct xfs_fsop_handlereq) +#define XFS_IOC_OPEN_BY_HANDLE _IOWR('X', 107, struct xfs_fsop_handlereq) +#define XFS_IOC_READLINK_BY_HANDLE _IOWR('X', 108, struct xfs_fsop_handlereq) +#define XFS_IOC_SWAPEXT _IOWR('X', 109, struct xfs_swapext) +#define XFS_IOC_FSGROWFSDATA _IOW ('X', 110, struct xfs_growfs_data) +#define XFS_IOC_FSGROWFSLOG _IOW ('X', 111, struct xfs_growfs_log) +#define XFS_IOC_FSGROWFSRT _IOW ('X', 112, struct xfs_growfs_rt) +#define XFS_IOC_FSCOUNTS _IOR ('X', 113, struct xfs_fsop_counts) +#define XFS_IOC_SET_RESBLKS _IOWR('X', 114, struct xfs_fsop_resblks) +#define XFS_IOC_GET_RESBLKS _IOR ('X', 115, struct xfs_fsop_resblks) +#define XFS_IOC_ERROR_INJECTION _IOW ('X', 116, struct xfs_error_injection) +#define XFS_IOC_ERROR_CLEARALL _IOW ('X', 117, struct xfs_error_injection) +/* XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118 */ +#define XFS_IOC_FREEZE _IOWR('X', 119, int) +#define XFS_IOC_THAW _IOWR('X', 120, int) +#define XFS_IOC_FSSETDM_BY_HANDLE _IOW ('X', 121, struct xfs_fsop_setdm_handlereq) +#define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq) +#define XFS_IOC_ATTRMULTI_BY_HANDLE _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq) +#define XFS_IOC_FSGEOMETRY _IOR ('X', 124, struct xfs_fsop_geom) +/* XFS_IOC_GETFSUUID ---------- deprecated 140 */ + + +#ifndef HAVE_BBMACROS +/* + * Block I/O parameterization. A basic block (BB) is the lowest size of + * filesystem allocation, and must equal 512. Length units given to bio + * routines are in BB's. + */ +#define BBSHIFT 9 +#define BBSIZE (1<> BBSHIFT) +#define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT) +#define BBTOB(bbs) ((bbs) << BBSHIFT) +#endif + +#endif /* __XFS_FS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_fsops.c linux.22-ac2/fs/xfs/xfs_fsops.c --- linux.vanilla/fs/xfs/xfs_fsops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_fsops.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,628 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_error.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_fsops.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_trans_space.h" +#include "xfs_rtalloc.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_inode_item.h" + +/* + * File system operations + */ + +int +xfs_fs_geometry( + xfs_mount_t *mp, + xfs_fsop_geom_t *geo, + int new_version) +{ + geo->blocksize = mp->m_sb.sb_blocksize; + geo->rtextsize = mp->m_sb.sb_rextsize; + geo->agblocks = mp->m_sb.sb_agblocks; + geo->agcount = mp->m_sb.sb_agcount; + geo->logblocks = mp->m_sb.sb_logblocks; + geo->sectsize = mp->m_sb.sb_sectsize; + geo->inodesize = mp->m_sb.sb_inodesize; + geo->imaxpct = mp->m_sb.sb_imax_pct; + geo->datablocks = mp->m_sb.sb_dblocks; + geo->rtblocks = mp->m_sb.sb_rblocks; + geo->rtextents = mp->m_sb.sb_rextents; + geo->logstart = mp->m_sb.sb_logstart; + ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); + memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); + if (new_version >= 2) { + geo->sunit = mp->m_sb.sb_unit; + geo->swidth = mp->m_sb.sb_width; + } + if (new_version >= 3) { + geo->version = XFS_FSOP_GEOM_VERSION; + geo->flags = + (XFS_SB_VERSION_HASATTR(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_ATTR : 0) | + (XFS_SB_VERSION_HASNLINK(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_NLINK : 0) | + (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_QUOTA : 0) | + (XFS_SB_VERSION_HASALIGN(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_IALIGN : 0) | + (XFS_SB_VERSION_HASDALIGN(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_DALIGN : 0) | + (XFS_SB_VERSION_HASSHARED(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_SHARED : 0) | + (XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) | + (XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | + (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_SECTOR : 0); + geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? + mp->m_sb.sb_logsectsize : BBSIZE; + geo->rtsectsize = mp->m_sb.sb_blocksize; + geo->dirblocksize = mp->m_dirblksize; + } + if (new_version >= 4) { + geo->flags |= + (XFS_SB_VERSION_HASLOGV2(&mp->m_sb) ? + XFS_FSOP_GEOM_FLAGS_LOGV2 : 0); + geo->logsunit = mp->m_sb.sb_logsunit; + } + return 0; +} + +static int +xfs_growfs_data_private( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_data_t *in) /* growfs data input struct */ +{ + xfs_agf_t *agf; + xfs_agi_t *agi; + xfs_agnumber_t agno; + xfs_extlen_t agsize; + xfs_extlen_t tmpsize; + xfs_alloc_rec_t *arec; + xfs_btree_sblock_t *block; + xfs_buf_t *bp; + int bucket; + int dpct; + int error; + xfs_agnumber_t nagcount; + xfs_rfsblock_t nb, nb_mod; + xfs_rfsblock_t new; + xfs_rfsblock_t nfree; + xfs_agnumber_t oagcount; + int pct; + xfs_sb_t *sbp; + xfs_trans_t *tp; + + nb = in->newblocks; + pct = in->imaxpct; + if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) + return XFS_ERROR(EINVAL); + dpct = pct - mp->m_sb.sb_imax_pct; + error = xfs_read_buf(mp, mp->m_ddev_targp, + XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + xfs_buf_relse(bp); + + new = nb; /* use new as a temporary here */ + nb_mod = do_div(new, mp->m_sb.sb_agblocks); + nagcount = new + (nb_mod != 0); + if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { + nagcount--; + nb = nagcount * mp->m_sb.sb_agblocks; + if (nb < mp->m_sb.sb_dblocks) + return XFS_ERROR(EINVAL); + } + new = in->newblocks - mp->m_sb.sb_dblocks; + oagcount = mp->m_sb.sb_agcount; + if (nagcount > oagcount) { + down_write(&mp->m_peraglock); + mp->m_perag = kmem_realloc(mp->m_perag, + sizeof(xfs_perag_t) * nagcount, + sizeof(xfs_perag_t) * oagcount, + KM_SLEEP); + memset(&mp->m_perag[oagcount], 0, + (nagcount - oagcount) * sizeof(xfs_perag_t)); + mp->m_flags |= XFS_MOUNT_32BITINODES; + xfs_initialize_perag(mp, nagcount); + up_write(&mp->m_peraglock); + } + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); + if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), + XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + + nfree = 0; + for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { + /* + * AG freelist header block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + agf = XFS_BUF_TO_AGF(bp); + memset(agf, 0, mp->m_sb.sb_sectsize); + INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC); + INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION); + INT_SET(agf->agf_seqno, ARCH_CONVERT, agno); + if (agno == nagcount - 1) + agsize = + nb - + (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); + else + agsize = mp->m_sb.sb_agblocks; + INT_SET(agf->agf_length, ARCH_CONVERT, agsize); + INT_SET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT, + XFS_BNO_BLOCK(mp)); + INT_SET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT, + XFS_CNT_BLOCK(mp)); + INT_SET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT, 1); + INT_SET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT, 1); + INT_ZERO(agf->agf_flfirst, ARCH_CONVERT); + INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1); + INT_ZERO(agf->agf_flcount, ARCH_CONVERT); + tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); + INT_SET(agf->agf_freeblks, ARCH_CONVERT, tmpsize); + INT_SET(agf->agf_longest, ARCH_CONVERT, tmpsize); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * AG inode header block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + agi = XFS_BUF_TO_AGI(bp); + memset(agi, 0, mp->m_sb.sb_sectsize); + INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC); + INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION); + INT_SET(agi->agi_seqno, ARCH_CONVERT, agno); + INT_SET(agi->agi_length, ARCH_CONVERT, agsize); + INT_ZERO(agi->agi_count, ARCH_CONVERT); + INT_SET(agi->agi_root, ARCH_CONVERT, XFS_IBT_BLOCK(mp)); + INT_SET(agi->agi_level, ARCH_CONVERT, 1); + INT_ZERO(agi->agi_freecount, ARCH_CONVERT); + INT_SET(agi->agi_newino, ARCH_CONVERT, NULLAGINO); + INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO); + for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) + INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, + NULLAGINO); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * BNO btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, + block, 1, mp->m_alloc_mxr[0]); + INT_SET(arec->ar_startblock, ARCH_CONVERT, + XFS_PREALLOC_BLOCKS(mp)); + INT_SET(arec->ar_blockcount, ARCH_CONVERT, + agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * CNT btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, + block, 1, mp->m_alloc_mxr[0]); + INT_SET(arec->ar_startblock, ARCH_CONVERT, + XFS_PREALLOC_BLOCKS(mp)); + INT_SET(arec->ar_blockcount, ARCH_CONVERT, + agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); + nfree += INT_GET(arec->ar_blockcount, ARCH_CONVERT); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + /* + * INO btree root block + */ + bp = xfs_buf_get(mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), 0); + block = XFS_BUF_TO_SBLOCK(bp); + memset(block, 0, mp->m_sb.sb_blocksize); + INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC); + INT_ZERO(block->bb_level, ARCH_CONVERT); + INT_ZERO(block->bb_numrecs, ARCH_CONVERT); + INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + error = xfs_bwrite(mp, bp); + if (error) { + goto error0; + } + } + xfs_trans_agblocks_delta(tp, nfree); + /* + * There are new blocks in the old last a.g. + */ + if (new) { + /* + * Change the agi length. + */ + error = xfs_ialloc_read_agi(mp, tp, agno, &bp); + if (error) { + goto error0; + } + ASSERT(bp); + agi = XFS_BUF_TO_AGI(bp); + INT_MOD(agi->agi_length, ARCH_CONVERT, new); + ASSERT(nagcount == oagcount || + INT_GET(agi->agi_length, ARCH_CONVERT) == + mp->m_sb.sb_agblocks); + xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); + /* + * Change agf length. + */ + error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); + if (error) { + goto error0; + } + ASSERT(bp); + agf = XFS_BUF_TO_AGF(bp); + INT_MOD(agf->agf_length, ARCH_CONVERT, new); + ASSERT(INT_GET(agf->agf_length, ARCH_CONVERT) == + INT_GET(agi->agi_length, ARCH_CONVERT)); + /* + * Free the new space. + */ + error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, + INT_GET(agf->agf_length, ARCH_CONVERT) - new), new); + if (error) { + goto error0; + } + } + if (nagcount > oagcount) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); + if (nb > mp->m_sb.sb_dblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, + nb - mp->m_sb.sb_dblocks); + if (nfree) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); + if (dpct) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); + error = xfs_trans_commit(tp, 0, NULL); + if (error) { + return error; + } + if (mp->m_sb.sb_imax_pct) { + __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; + do_div(icount, 100); + mp->m_maxicount = icount << mp->m_sb.sb_inopblog; + } else + mp->m_maxicount = 0; + for (agno = 1; agno < nagcount; agno++) { + error = xfs_read_buf(mp, mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, + "error %d reading secondary superblock for ag %d", + error, agno); + break; + } + sbp = XFS_BUF_TO_SBP(bp); + xfs_xlatesb(sbp, &mp->m_sb, -1, ARCH_CONVERT, XFS_SB_ALL_BITS); + /* + * If we get an error writing out the alternate superblocks, + * just issue a warning and continue. The real work is + * already done and committed. + */ + if (!(error = xfs_bwrite(mp, bp))) { + continue; + } else { + xfs_fs_cmn_err(CE_WARN, mp, + "write error %d updating secondary superblock for ag %d", + error, agno); + break; /* no point in continuing */ + } + } + return 0; + + error0: + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return error; +} + +static int +xfs_growfs_log_private( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_log_t *in) /* growfs log input struct */ +{ + xfs_extlen_t nb; + + nb = in->newblocks; + if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) + return XFS_ERROR(EINVAL); + if (nb == mp->m_sb.sb_logblocks && + in->isint == (mp->m_sb.sb_logstart != 0)) + return XFS_ERROR(EINVAL); + /* + * Moving the log is hard, need new interfaces to sync + * the log first, hold off all activity while moving it. + * Can have shorter or longer log in the same space, + * or transform internal to external log or vice versa. + */ + return XFS_ERROR(ENOSYS); +} + +/* + * protected versions of growfs function acquire and release locks on the mount + * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, + * XFS_IOC_FSGROWFSRT + */ + + +int +xfs_growfs_data( + xfs_mount_t *mp, + xfs_growfs_data_t *in) +{ + int error; + if (!cpsema(&mp->m_growlock)) + return XFS_ERROR(EWOULDBLOCK); + error = xfs_growfs_data_private(mp, in); + vsema(&mp->m_growlock); + return error; +} + +int +xfs_growfs_log( + xfs_mount_t *mp, + xfs_growfs_log_t *in) +{ + int error; + if (!cpsema(&mp->m_growlock)) + return XFS_ERROR(EWOULDBLOCK); + error = xfs_growfs_log_private(mp, in); + vsema(&mp->m_growlock); + return error; +} + +/* + * exported through ioctl XFS_IOC_FSCOUNTS + */ + +int +xfs_fs_counts( + xfs_mount_t *mp, + xfs_fsop_counts_t *cnt) +{ + unsigned long s; + + s = XFS_SB_LOCK(mp); + cnt->freedata = mp->m_sb.sb_fdblocks; + cnt->freertx = mp->m_sb.sb_frextents; + cnt->freeino = mp->m_sb.sb_ifree; + cnt->allocino = mp->m_sb.sb_icount; + XFS_SB_UNLOCK(mp, s); + return 0; +} + +/* + * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS + * + * xfs_reserve_blocks is called to set m_resblks + * in the in-core mount table. The number of unused reserved blocks + * is kept in m_resbls_avail. + * + * Reserve the requested number of blocks if available. Otherwise return + * as many as possible to satisfy the request. The actual number + * reserved are returned in outval + * + * A null inval pointer indicates that only the current reserved blocks + * available should be returned no settings are changed. + */ + +int +xfs_reserve_blocks( + xfs_mount_t *mp, + __uint64_t *inval, + xfs_fsop_resblks_t *outval) +{ + __uint64_t lcounter, delta; + __uint64_t request; + unsigned long s; + + /* If inval is null, report current values and return */ + + if (inval == (__uint64_t *)NULL) { + outval->resblks = mp->m_resblks; + outval->resblks_avail = mp->m_resblks_avail; + return(0); + } + + request = *inval; + s = XFS_SB_LOCK(mp); + + /* + * If our previous reservation was larger than the current value, + * then move any unused blocks back to the free pool. + */ + + if (mp->m_resblks > request) { + lcounter = mp->m_resblks_avail - request; + if (lcounter > 0) { /* release unused blocks */ + mp->m_sb.sb_fdblocks += lcounter; + mp->m_resblks_avail -= lcounter; + } + mp->m_resblks = request; + } else { + delta = request - mp->m_resblks; + lcounter = mp->m_sb.sb_fdblocks; + lcounter -= delta; + if (lcounter < 0) { + /* We can't satisfy the request, just get what we can */ + mp->m_resblks += mp->m_sb.sb_fdblocks; + mp->m_resblks_avail += mp->m_sb.sb_fdblocks; + mp->m_sb.sb_fdblocks = 0; + } else { + mp->m_sb.sb_fdblocks = lcounter; + mp->m_resblks = request; + mp->m_resblks_avail += delta; + } + } + + outval->resblks = mp->m_resblks; + outval->resblks_avail = mp->m_resblks_avail; + XFS_SB_UNLOCK(mp, s); + return(0); +} + +void +xfs_fs_log_dummy(xfs_mount_t *mp) +{ + xfs_trans_t *tp; + xfs_inode_t *ip; + + + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + atomic_inc(&mp->m_active_trans); + if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) { + xfs_trans_cancel(tp, 0); + return; + } + + ip = mp->m_rootip; + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_commit(tp, XFS_TRANS_SYNC, NULL); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); +} + +int +xfs_fs_freeze( + xfs_mount_t *mp) +{ + vfs_t *vfsp; + /*REFERENCED*/ + int error; + + vfsp = XFS_MTOVFS(mp); + + /* Stop new writers */ + xfs_start_freeze(mp, XFS_FREEZE_WRITE); + + /* Flush the refcache */ + xfs_refcache_purge_mp(mp); + + /* Flush delalloc and delwri data */ + VFS_SYNC(vfsp, SYNC_DELWRI|SYNC_WAIT, NULL, error); + + /* Pause transaction subsystem */ + xfs_start_freeze(mp, XFS_FREEZE_TRANS); + + /* Flush any remaining inodes into buffers */ + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error); + + /* Push all buffers out to disk */ + xfs_binval(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + xfs_binval(mp->m_rtdev_targp); + } + + /* Push the superblock and write an unmount record */ + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); + + return 0; +} + +int +xfs_fs_thaw( + xfs_mount_t *mp) +{ + xfs_finish_freeze(mp); + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_fsops.h linux.22-ac2/fs/xfs/xfs_fsops.h --- linux.vanilla/fs/xfs/xfs_fsops.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_fsops.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_FSOPS_H__ +#define __XFS_FSOPS_H__ + +int +xfs_fs_geometry( + xfs_mount_t *mp, + xfs_fsop_geom_t *geo, + int new_version); + +int +xfs_growfs_data( + xfs_mount_t *mp, + xfs_growfs_data_t *in); + +int +xfs_growfs_log( + xfs_mount_t *mp, + xfs_growfs_log_t *in); + +int +xfs_fs_counts( + xfs_mount_t *mp, + xfs_fsop_counts_t *cnt); + +int +xfs_reserve_blocks( + xfs_mount_t *mp, + __uint64_t *inval, + xfs_fsop_resblks_t *outval); + +int +xfs_fs_freeze( + xfs_mount_t *mp); + +int +xfs_fs_thaw( + xfs_mount_t *mp); + +#endif /* __XFS_FSOPS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs.h linux.22-ac2/fs/xfs/xfs.h --- linux.vanilla/fs/xfs/xfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_H__ +#define __XFS_H__ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#endif /* __XFS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_ialloc_btree.c linux.22-ac2/fs/xfs/xfs_ialloc_btree.c --- linux.vanilla/fs/xfs/xfs_ialloc_btree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_ialloc_btree.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,2122 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" + +/* + * Inode allocation management for XFS. + */ + +/* + * Prototypes for internal functions. + */ + +STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int); +STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); +STATIC int xfs_inobt_lshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); +STATIC int xfs_inobt_rshift(xfs_btree_cur_t *, int, int *); +STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, + xfs_inobt_key_t *, xfs_btree_cur_t **, int *); +STATIC int xfs_inobt_updkey(xfs_btree_cur_t *, xfs_inobt_key_t *, int); + +/* + * Internal functions. + */ + +#ifdef _NOTYET_ +/* + * Single level of the xfs_inobt_delete record deletion routine. + * Delete record pointed to by cur/level. + * Remove the record from its block then rebalance the tree. + * Return 0 for error, 1 for done, 2 to go on to the next level. + */ +STATIC int /* error */ +xfs_inobt_delrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level removing record from */ + int *stat) /* fail/done/go-on */ +{ + xfs_buf_t *agbp; /* buffer for a.g. inode header */ + xfs_agnumber_t agfbno; /* agf block of freed btree block */ + xfs_buf_t *agfbp; /* bp of agf block of freed block */ + xfs_agi_t *agi; /* allocation group inode header */ + xfs_inobt_block_t *block; /* btree block record/key lives in */ + xfs_agblock_t bno; /* btree block number */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* kp points here if block is level 0 */ + xfs_inobt_key_t *kp; /* pointer to btree keys */ + xfs_agblock_t lbno; /* left block's block number */ + xfs_buf_t *lbp; /* left block's buffer pointer */ + xfs_inobt_block_t *left; /* left btree block */ + xfs_inobt_key_t *lkp; /* left block key pointer */ + xfs_inobt_ptr_t *lpp; /* left block address pointer */ + int lrecs; /* number of records in left block */ + xfs_inobt_rec_t *lrp; /* left block record pointer */ + xfs_inobt_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_agblock_t rbno; /* right block's block number */ + xfs_buf_t *rbp; /* right block's buffer pointer */ + xfs_inobt_block_t *right; /* right btree block */ + xfs_inobt_key_t *rkp; /* right block key pointer */ + xfs_inobt_rec_t *rp; /* pointer to btree records */ + xfs_inobt_ptr_t *rpp; /* right block address pointer */ + int rrecs; /* number of records in right block */ + xfs_inobt_rec_t *rrp; /* right block record pointer */ + xfs_btree_cur_t *tcur; /* temporary btree cursor */ + + + /* + * Get the index of the entry being deleted, check for nothing there. + */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get the buffer & block containing the record or key/ptr. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, block, level, bp)) + return error; +#endif + /* + * Fail if we're off the end of the block. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * It's a nonleaf. Excise the key and ptr being deleted, by + * sliding the entries past them down one. + * Log the changed areas of the block. + */ + if (level > 0) { + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { + if (error = xfs_btree_check_sptr(cur, INT_GET(pp[i], ARCH_CONVERT), level)) + return error; + } +#endif + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&kp[ptr - 1], &kp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*kp)); + memmove(&pp[ptr - 1], &pp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*pp)); + xfs_inobt_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + xfs_inobt_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + } + /* + * It's a leaf. Excise the record being deleted, by sliding the + * entries past it down one. Log the changed areas of the block. + */ + else { + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + memmove(&rp[ptr - 1], &rp[ptr], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*rp)); + xfs_inobt_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); + } + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + if (ptr == 1) { + INT_COPY(key.ir_startino, rp->ir_startino, ARCH_CONVERT); + kp = &key; + } + } + /* + * Decrement and log the number of entries in the block. + */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); + /* + * Is this the root level? If so, we're almost done. + */ + if (level == cur->bc_nlevels - 1) { + /* + * If this is the root level, + * and there's only one entry left, + * and it's NOT the leaf level, + * then we can get rid of this level. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { + agbp = cur->bc_private.i.agbp; + agi = XFS_BUF_TO_AGI(agbp); + /* + * pp is still set to the first pointer in the block. + * Make it the new root of the btree. + */ + bno = INT_GET(agi->agi_root, ARCH_CONVERT); + INT_COPY(agi->agi_root, *pp, ARCH_CONVERT); + INT_MOD(agi->agi_level, ARCH_CONVERT, -1); + /* + * Free the block. + */ + if (error = xfs_free_extent(cur->bc_tp, bno, 1)) + return error; + xfs_trans_binval(cur->bc_tp, bp); + xfs_ialloc_log_agi(cur->bc_tp, agbp, + XFS_AGI_ROOT | XFS_AGI_LEVEL); + /* + * Update the cursor so there's one fewer level. + */ + cur->bc_bufs[level] = NULL; + cur->bc_nlevels--; + /* + * To ensure that the freed block is not used for + * user data until this transaction is permanent, + * we lock the agf buffer for this ag until the + * transaction record makes it to the on-disk log. + */ + agfbno = XFS_AG_DADDR(cur->bc_mp, + cur->bc_private.i.agno, + XFS_AGF_DADDR(mp)); + if (error = xfs_trans_read_buf(cur->bc_mp, cur->bc_tp, + cur->bc_mp->m_ddev_targp, agfbno, + XFS_FSS_TO_BB(mp, 1), 0, &agfbp)) + return error; + ASSERT(!XFS_BUF_GETERROR(agfbp)); + xfs_trans_bhold_until_committed(cur->bc_tp, agfbp); + } else if (level > 0 && + (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * If we deleted the leftmost entry in the block, update the + * key values above us in the tree. + */ + if (ptr == 1 && (error = xfs_inobt_updkey(cur, kp, level + 1))) + return error; + /* + * If the number of records remaining in the block is at least + * the minimum, we're done. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (level > 0 && + (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * Otherwise, we have to move some records around to keep the + * tree balanced. Look at the left and right sibling blocks to + * see if we can re-balance by moving only one record. + */ + rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); + lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); + bno = NULLAGBLOCK; + ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); + /* + * Duplicate the cursor so our btree manipulations here won't + * disrupt the next level up. + */ + if (error = xfs_btree_dup_cursor(cur, &tcur)) + return error; + /* + * If there's a right sibling, see if it's ok to shift an entry + * out of it. + */ + if (rbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the last entry in the next block. + * Actually any entry but the first would suffice. + */ + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (error = xfs_inobt_increment(tcur, level, &i)) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Grab a pointer to the block. + */ + rbp = tcur->bc_bufs[level]; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, right, level, rbp)) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + /* + * If right block is full enough so that removing one entry + * won't make it too empty, and left-shifting an entry out + * of right to us works, we're done. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (error = xfs_inobt_lshift(tcur, level, &i)) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_INOBT_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level > 0 && + (error = xfs_inobt_decrement(cur, level, + &i))) + return error; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference, and fix up the temp cursor to point + * to our block again (last record). + */ + rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); + if (lbno != NULLAGBLOCK) { + xfs_btree_firstrec(tcur, level); + if (error = xfs_inobt_decrement(tcur, level, &i)) + goto error0; + } + } + /* + * If there's a left sibling, see if it's ok to shift an entry + * out of it. + */ + if (lbno != NULLAGBLOCK) { + /* + * Move the temp cursor to the first entry in the + * previous block. + */ + xfs_btree_firstrec(tcur, level); + if (error = xfs_inobt_decrement(tcur, level, &i)) + goto error0; + xfs_btree_firstrec(tcur, level); + /* + * Grab a pointer to the block. + */ + lbp = tcur->bc_bufs[level]; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if (error = xfs_btree_check_sblock(cur, left, level, lbp)) + goto error0; +#endif + /* + * Grab the current block number, for future use. + */ + bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + /* + * If left block is full enough so that removing one entry + * won't make it too empty, and right-shifting an entry out + * of left to us works, we're done. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= + XFS_INOBT_BLOCK_MINRECS(level, cur)) { + if (error = xfs_inobt_rshift(tcur, level, &i)) + goto error0; + if (i) { + ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= + XFS_INOBT_BLOCK_MINRECS(level, cur)); + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + if (level == 0) + cur->bc_ptrs[0]++; + *stat = 1; + return 0; + } + } + /* + * Otherwise, grab the number of records in right for + * future reference. + */ + lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * Delete the temp cursor, we're done with it. + */ + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + /* + * If here, we need to do a join to keep the tree balanced. + */ + ASSERT(bno != NULLAGBLOCK); + /* + * See if we can join with the left neighbor block. + */ + if (lbno != NULLAGBLOCK && + lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * Set "right" to be the starting block, + * "left" to be the left neighbor. + */ + rbno = bno; + right = block; + rbp = bp; + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, lbno, 0, &lbp, + XFS_INO_BTREE_REF)) + return error; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if (error = xfs_btree_check_sblock(cur, left, level, lbp)) + return error; + } + /* + * If that won't work, see if we can join with the right neighbor block. + */ + else if (rbno != NULLAGBLOCK && + rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= + XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * Set "left" to be the starting block, + * "right" to be the right neighbor. + */ + lbno = bno; + left = block; + lbp = bp; + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, rbno, 0, &rbp, + XFS_INO_BTREE_REF)) + return error; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if (error = xfs_btree_check_sblock(cur, right, level, rbp)) + return error; + } + /* + * Otherwise, we can't fix the imbalance. + * Just return. This is probably a logic error, but it's not fatal. + */ + else { + if (level > 0 && (error = xfs_inobt_decrement(cur, level, &i))) + return error; + *stat = 1; + return 0; + } + /* + * We're now going to join "left" and "right" by moving all the stuff + * in "right" to "left" and deleting "right". + */ + if (level > 0) { + /* + * It's a non-leaf. Move keys and pointers. + */ + lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if (error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)) + return error; + } +#endif + memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); + memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); + xfs_inobt_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf. Move records. + */ + lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); + xfs_inobt_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, + INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } + /* + * Fix up the number of records in the surviving block. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + /* + * Fix up the right block pointer in the surviving block, and log it. + */ + INT_COPY(left->bb_rightsib, right->bb_rightsib, ARCH_CONVERT); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there is a right sibling now, make it point to the + * remaining block. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_inobt_block_t *rrblock; + xfs_buf_t *rrbp; + + if (error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, + &rrbp, XFS_INO_BTREE_REF)) + return error; + rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); + if (error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); + xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * Free the deleting block. + */ + if (error = xfs_free_extent(cur->bc_tp, rbno, 1)) + return error; + xfs_trans_binval(cur->bc_tp, rbp); + /* + * To ensure that the freed block is not used for + * user data until this transaction is permanent, + * we lock the agf buffer for this ag until the + * transaction record makes it to the on-disk log. + */ + agfbno = XFS_AG_DADDR(cur->bc_mp, cur->bc_private.i.agno, + XFS_AGF_DADDR(mp)); + if (error = xfs_trans_read_buf(cur->bc_mp, cur->bc_tp, + cur->bc_mp->m_ddev_targp, agfbno, + XFS_FSS_TO_BB(mp, 1), 0, &agfbp)) + return error; + ASSERT(!XFS_BUF_GETERROR(agfbp)); + xfs_trans_bhold_until_committed(cur->bc_tp, agfbp); + /* + * If we joined with the left neighbor, set the buffer in the + * cursor to the left block, and fix up the index. + */ + if (bp != lbp) { + cur->bc_bufs[level] = lbp; + cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); + cur->bc_ra[level] = 0; + } + /* + * If we joined with the right neighbor and there's a level above + * us, increment the cursor at that level. + */ + else if (level + 1 < cur->bc_nlevels && + (error = xfs_inobt_increment(cur, level + 1, &i))) { + return error; + } + /* + * Readjust the ptr at this level if it's not a leaf, since it's + * still pointing at the deletion point, which makes the cursor + * inconsistent. If this makes the ptr 0, the caller fixes it up. + * We can't use decrement because it would change the next level up. + */ + if (level > 0) + cur->bc_ptrs[level]--; + /* + * Return value means the next level up has something to do. + */ + *stat = 2; + return 0; + +error0: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} +#endif /* _NOTYET_ */ + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int /* error */ +xfs_inobt_insrec( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to insert record at */ + xfs_agblock_t *bnop, /* i/o: block number inserted */ + xfs_inobt_rec_t *recp, /* i/o: record data inserted */ + xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block record/key lives in */ + xfs_buf_t *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* key value being inserted */ + xfs_inobt_key_t *kp=NULL; /* pointer to btree keys */ + xfs_agblock_t nbno; /* block number of allocated block */ + xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ + xfs_inobt_key_t nkey; /* new key value, from split */ + xfs_inobt_rec_t nrec; /* new record value, for caller */ + int optr; /* old ptr value */ + xfs_inobt_ptr_t *pp; /* pointer to btree addresses */ + int ptr; /* index in btree block for this rec */ + xfs_inobt_rec_t *rp=NULL; /* pointer to btree records */ + + /* + * If we made it to the root level, allocate a new root block + * and we're done. + */ + if (level >= cur->bc_nlevels) { + error = xfs_inobt_newroot(cur, &i); + *bnop = NULLAGBLOCK; + *stat = i; + return error; + } + /* + * Make a key out of the record data to be inserted, and save it. + */ + key.ir_startino = recp->ir_startino; /* INT_: direct copy */ + optr = ptr = cur->bc_ptrs[level]; + /* + * If we're off the left edge, return failure. + */ + if (ptr == 0) { + *stat = 0; + return 0; + } + /* + * Get pointers to the btree buffer and block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; + /* + * Check that the new entry is being inserted in the right place. + */ + if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) { + rp = XFS_INOBT_REC_ADDR(block, ptr, cur); + xfs_btree_check_rec(cur->bc_btnum, recp, rp); + } else { + kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); + xfs_btree_check_key(cur->bc_btnum, &key, kp); + } + } +#endif + nbno = NULLAGBLOCK; + ncur = (xfs_btree_cur_t *)0; + /* + * If the block is full, we can't insert the new entry until we + * make the block un-full. + */ + if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + /* + * First, try shifting an entry to the right neighbor. + */ + if ((error = xfs_inobt_rshift(cur, level, &i))) + return error; + if (i) { + /* nothing */ + } + /* + * Next, try shifting an entry to the left neighbor. + */ + else { + if ((error = xfs_inobt_lshift(cur, level, &i))) + return error; + if (i) { + optr = ptr = cur->bc_ptrs[level]; + } else { + /* + * Next, try splitting the current block + * in half. If this works we have to + * re-set our variables because + * we could be in a different block now. + */ + if ((error = xfs_inobt_split(cur, level, &nbno, + &nkey, &ncur, &i))) + return error; + if (i) { + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, + block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + nrec.ir_startino = nkey.ir_startino; /* INT_: direct copy */ + } else { + /* + * Otherwise the insert fails. + */ + *stat = 0; + return 0; + } + } + } + } + /* + * At this point we know there's room for our new entry in the block + * we're pointing at. + */ + if (level > 0) { + /* + * It's a non-leaf entry. Make a hole for the new data + * in the key and ptr regions of the block. + */ + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); +#ifdef DEBUG + for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(&kp[ptr], &kp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); + memmove(&pp[ptr], &pp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); + /* + * Now stuff the new data in, bump numrecs and log the new data. + */ +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, *bnop, level))) + return error; +#endif + kp[ptr - 1] = key; /* INT_: struct copy */ + INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + } else { + /* + * It's a leaf entry. Make a hole for the new record. + */ + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + memmove(&rp[ptr], &rp[ptr - 1], + (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); + /* + * Now stuff the new record in, bump numrecs + * and log the new data. + */ + rp[ptr - 1] = *recp; /* INT_: struct copy */ + INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); + } + /* + * Log the new number of records in the btree header. + */ + xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); +#ifdef DEBUG + /* + * Check that the key/record is in the right place, now. + */ + if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + if (level == 0) + xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, + rp + ptr); + else + xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, + kp + ptr); + } +#endif + /* + * If we inserted at the start of a block, update the parents' keys. + */ + if (optr == 1 && (error = xfs_inobt_updkey(cur, &key, level + 1))) + return error; + /* + * Return the new block number, if any. + * If there is one, give back a record value and a cursor too. + */ + *bnop = nbno; + if (nbno != NULLAGBLOCK) { + *recp = nrec; /* INT_: struct copy */ + *curp = ncur; + } + *stat = 1; + return 0; +} + +/* + * Log header fields from a btree block. + */ +STATIC void +xfs_inobt_log_block( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* buffer containing btree block */ + int fields) /* mask of fields: XFS_BB_... */ +{ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + static const short offsets[] = { /* table of offsets */ + offsetof(xfs_inobt_block_t, bb_magic), + offsetof(xfs_inobt_block_t, bb_level), + offsetof(xfs_inobt_block_t, bb_numrecs), + offsetof(xfs_inobt_block_t, bb_leftsib), + offsetof(xfs_inobt_block_t, bb_rightsib), + sizeof(xfs_inobt_block_t) + }; + + xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Log keys from a btree block (nonleaf). + */ +STATIC void +xfs_inobt_log_keys( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int kfirst, /* index of first key to log */ + int klast) /* index of last key to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + xfs_inobt_key_t *kp; /* key pointer in btree block */ + int last; /* last byte offset logged */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + kp = XFS_INOBT_KEY_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log block pointer fields from a btree block (nonleaf). + */ +STATIC void +xfs_inobt_log_ptrs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int pfirst, /* index of first pointer to log */ + int plast) /* index of last pointer to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_inobt_ptr_t *pp; /* block-pointer pointer in btree blk */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + pp = XFS_INOBT_PTR_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Log records from a btree block (leaf). + */ +STATIC void +xfs_inobt_log_recs( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_buf_t *bp, /* buffer containing btree block */ + int rfirst, /* index of first record to log */ + int rlast) /* index of last record to log */ +{ + xfs_inobt_block_t *block; /* btree block to log from */ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + xfs_inobt_rec_t *rp; /* record pointer for btree block */ + + block = XFS_BUF_TO_INOBT_BLOCK(bp); + rp = XFS_INOBT_REC_ADDR(block, 1, cur); + first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); + last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + * Return 0 if can't find any such record, 1 for success. + */ +STATIC int /* error */ +xfs_inobt_lookup( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_lookup_t dir, /* <=, ==, or >= */ + int *stat) /* success/failure */ +{ + xfs_agblock_t agbno; /* a.g. relative btree block number */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_inobt_block_t *block=NULL; /* current btree block */ + int diff; /* difference for the current key */ + int error; /* error return value */ + int keyno=0; /* current key number */ + int level; /* level in the btree */ + xfs_mount_t *mp; /* file system mount point */ + + /* + * Get the allocation group header, and the root block number. + */ + mp = cur->bc_mp; + { + xfs_agi_t *agi; /* a.g. inode header */ + + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); + agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + } + /* + * Iterate over each level in the btree, starting at the root. + * For each level above the leaves, find the key we need, based + * on the lookup record, then follow the corresponding block + * pointer down to the next level. + */ + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + xfs_buf_t *bp; /* buffer pointer for btree block */ + xfs_daddr_t d; /* disk address of btree block */ + + /* + * Get the disk address we're looking for. + */ + d = XFS_AGB_TO_DADDR(mp, agno, agbno); + /* + * If the old buffer at this level is for a different block, + * throw it away, otherwise just use it. + */ + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) != d) + bp = (xfs_buf_t *)0; + if (!bp) { + /* + * Need to get a new buffer. Read it, then + * set it in the cursor, releasing the old one. + */ + if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, + agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) + return error; + xfs_btree_setbuf(cur, level, bp); + /* + * Point to the btree block, now that we have the buffer + */ + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, level, + bp))) + return error; + } else + block = XFS_BUF_TO_INOBT_BLOCK(bp); + /* + * If we already had a key match at a higher level, we know + * we need to use the first entry in this block. + */ + if (diff == 0) + keyno = 1; + /* + * Otherwise we need to search this block. Do a binary search. + */ + else { + int high; /* high entry number */ + xfs_inobt_key_t *kkbase=NULL;/* base of keys in block */ + xfs_inobt_rec_t *krbase=NULL;/* base of records in block */ + int low; /* low entry number */ + + /* + * Get a pointer to keys or records. + */ + if (level > 0) + kkbase = XFS_INOBT_KEY_ADDR(block, 1, cur); + else + krbase = XFS_INOBT_REC_ADDR(block, 1, cur); + /* + * Set low and high entry numbers, 1-based. + */ + low = 1; + if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { + /* + * If the block is empty, the tree must + * be an empty leaf. + */ + ASSERT(level == 0 && cur->bc_nlevels == 1); + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + *stat = 0; + return 0; + } + /* + * Binary search the block. + */ + while (low <= high) { + xfs_agino_t startino; /* key value */ + + /* + * keyno is average of low and high. + */ + keyno = (low + high) >> 1; + /* + * Get startino. + */ + if (level > 0) { + xfs_inobt_key_t *kkp; + + kkp = kkbase + keyno - 1; + startino = INT_GET(kkp->ir_startino, ARCH_CONVERT); + } else { + xfs_inobt_rec_t *krp; + + krp = krbase + keyno - 1; + startino = INT_GET(krp->ir_startino, ARCH_CONVERT); + } + /* + * Compute difference to get next direction. + */ + diff = (int)startino - cur->bc_rec.i.ir_startino; + /* + * Less than, move right. + */ + if (diff < 0) + low = keyno + 1; + /* + * Greater than, move left. + */ + else if (diff > 0) + high = keyno - 1; + /* + * Equal, we're done. + */ + else + break; + } + } + /* + * If there are more levels, set up for the next level + * by getting the block number and filling in the cursor. + */ + if (level > 0) { + /* + * If we moved left, need the previous key number, + * unless there isn't one. + */ + if (diff > 0 && --keyno < 1) + keyno = 1; + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, agbno, level))) + return error; +#endif + cur->bc_ptrs[level] = keyno; + } + } + /* + * Done with the search. + * See if we need to adjust the results. + */ + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + if (dir == XFS_LOOKUP_GE && + keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && + INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + int i; + + cur->bc_ptrs[0] = keyno; + if ((error = xfs_inobt_increment(cur, 0, &i))) + return error; + ASSERT(i == 1); + *stat = 1; + return 0; + } + } + else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + /* + * Return if we succeeded or not. + */ + if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) + *stat = 0; + else + *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); + return 0; +} + +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_inobt_lshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ +#ifdef DEBUG + int i; /* loop index */ +#endif + xfs_inobt_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left neighbor block */ + xfs_inobt_block_t *left; /* left neighbor btree block */ + xfs_inobt_key_t *lkp=NULL; /* key pointer for left block */ + xfs_inobt_ptr_t *lpp; /* address pointer for left block */ + xfs_inobt_rec_t *lrp=NULL; /* record pointer for left block */ + int nrec; /* new number of left block entries */ + xfs_buf_t *rbp; /* buffer for right (current) block */ + xfs_inobt_block_t *right; /* right (current) btree block */ + xfs_inobt_key_t *rkp=NULL; /* key pointer for right block */ + xfs_inobt_ptr_t *rpp=NULL; /* address pointer for right block */ + xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ + + /* + * Set up variables for this block as "right". + */ + rbp = cur->bc_bufs[level]; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; +#endif + /* + * If we've got no left sibling then we can't shift an entry left. + */ + if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] <= 1) { + *stat = 0; + return 0; + } + /* + * Set up the left neighbor as "left". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, + XFS_INO_BTREE_REF))) + return error; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; + /* + * If non-leaf, copy a key and a ptr to the left block. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, nrec, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + *lkp = *rkp; + xfs_inobt_log_keys(cur, lbp, nrec, nrec); + lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) + return error; +#endif + *lpp = *rpp; /* INT_: no-change copy */ + xfs_inobt_log_ptrs(cur, lbp, nrec, nrec); + } + /* + * If leaf, copy a record to the left block. + */ + else { + lrp = XFS_INOBT_REC_ADDR(left, nrec, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + *lrp = *rrp; + xfs_inobt_log_recs(cur, lbp, nrec, nrec); + } + /* + * Bump and log left's numrecs, decrement and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); + else + xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); +#endif + INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Slide the contents of right down one entry. + */ + if (level > 0) { +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), + level))) + return error; + } +#endif + memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + } else { + memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ + rkp = &key; + } + /* + * Update the parent key values of right. + */ + if ((error = xfs_inobt_updkey(cur, rkp, level + 1))) + return error; + /* + * Slide the cursor value left one. + */ + cur->bc_ptrs[level]--; + *stat = 1; + return 0; +} + +/* + * Allocate a new root block, fill it in. + */ +STATIC int /* error */ +xfs_inobt_newroot( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + xfs_agi_t *agi; /* a.g. inode header */ + xfs_alloc_arg_t args; /* allocation argument structure */ + xfs_inobt_block_t *block; /* one half of the old root block */ + xfs_buf_t *bp; /* buffer containing block */ + int error; /* error return value */ + xfs_inobt_key_t *kp; /* btree key pointer */ + xfs_agblock_t lbno; /* left block number */ + xfs_buf_t *lbp; /* left buffer pointer */ + xfs_inobt_block_t *left; /* left btree block */ + xfs_buf_t *nbp; /* new (root) buffer */ + xfs_inobt_block_t *new; /* new (root) btree block */ + int nptr; /* new value for key index, 1 or 2 */ + xfs_inobt_ptr_t *pp; /* btree address pointer */ + xfs_agblock_t rbno; /* right block number */ + xfs_buf_t *rbp; /* right buffer pointer */ + xfs_inobt_block_t *right; /* right btree block */ + xfs_inobt_rec_t *rp; /* btree record pointer */ + + ASSERT(cur->bc_nlevels < XFS_IN_MAXLEVELS(cur->bc_mp)); + + /* + * Get a block & a buffer. + */ + agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, + INT_GET(agi->agi_root, ARCH_CONVERT)); + args.mod = args.minleft = args.alignment = args.total = args.wasdel = + args.isfl = args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) + return error; + /* + * None available, we fail. + */ + if (args.fsbno == NULLFSBLOCK) { + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + nbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); + new = XFS_BUF_TO_INOBT_BLOCK(nbp); + /* + * Set the root data in the a.g. inode structure. + */ + INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno); + INT_MOD(agi->agi_level, ARCH_CONVERT, 1); + xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, + XFS_AGI_ROOT | XFS_AGI_LEVEL); + /* + * At the previous root level there are now two blocks: the old + * root, and the new block generated when it was split. + * We don't know which one the cursor is pointing at, so we + * set up variables "left" and "right" for each case. + */ + bp = cur->bc_bufs[cur->bc_nlevels - 1]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) + return error; +#endif + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + /* + * Our block is left, pick up the right block. + */ + lbp = bp; + lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); + left = block; + rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + rbno, 0, &rbp, XFS_INO_BTREE_REF))) + return error; + bp = rbp; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, + cur->bc_nlevels - 1, rbp))) + return error; + nptr = 1; + } else { + /* + * Our block is right, pick up the left block. + */ + rbp = bp; + rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); + right = block; + lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + lbno, 0, &lbp, XFS_INO_BTREE_REF))) + return error; + bp = lbp; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); + if ((error = xfs_btree_check_sblock(cur, left, + cur->bc_nlevels - 1, lbp))) + return error; + nptr = 2; + } + /* + * Fill in the new block's btree header and log it. + */ + INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); + INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); + INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); + INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); + xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); + ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); + /* + * Fill in the key data in the new root. + */ + kp = XFS_INOBT_KEY_ADDR(new, 1, cur); + if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { + kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ + kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ + } else { + rp = XFS_INOBT_REC_ADDR(left, 1, cur); + INT_COPY(kp[0].ir_startino, rp->ir_startino, ARCH_CONVERT); + rp = XFS_INOBT_REC_ADDR(right, 1, cur); + INT_COPY(kp[1].ir_startino, rp->ir_startino, ARCH_CONVERT); + } + xfs_inobt_log_keys(cur, nbp, 1, 2); + /* + * Fill in the pointer data in the new root. + */ + pp = XFS_INOBT_PTR_ADDR(new, 1, cur); + INT_SET(pp[0], ARCH_CONVERT, lbno); + INT_SET(pp[1], ARCH_CONVERT, rbno); + xfs_inobt_log_ptrs(cur, nbp, 1, 2); + /* + * Fix up the cursor. + */ + xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); + cur->bc_ptrs[cur->bc_nlevels] = nptr; + cur->bc_nlevels++; + *stat = 1; + return 0; +} + +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +STATIC int /* error */ +xfs_inobt_rshift( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to shift record on */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* loop index */ + xfs_inobt_key_t key; /* key value for leaf level upward */ + xfs_buf_t *lbp; /* buffer for left (current) block */ + xfs_inobt_block_t *left; /* left (current) btree block */ + xfs_inobt_key_t *lkp; /* key pointer for left block */ + xfs_inobt_ptr_t *lpp; /* address pointer for left block */ + xfs_inobt_rec_t *lrp; /* record pointer for left block */ + xfs_buf_t *rbp; /* buffer for right neighbor block */ + xfs_inobt_block_t *right; /* right neighbor btree block */ + xfs_inobt_key_t *rkp; /* key pointer for right block */ + xfs_inobt_ptr_t *rpp; /* address pointer for right block */ + xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ + xfs_btree_cur_t *tcur; /* temporary cursor */ + + /* + * Set up variables for this block as "left". + */ + lbp = cur->bc_bufs[level]; + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * If we've got no right sibling then we can't shift an entry right. + */ + if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { + *stat = 0; + return 0; + } + /* + * Set up the right neighbor as "right". + */ + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, + XFS_INO_BTREE_REF))) + return error; + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) + return error; + /* + * If it's full, it can't take another entry. + */ + if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { + *stat = 0; + return 0; + } + /* + * Make a hole at the start of the right neighbor block, then + * copy the last left block entry to the hole. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); +#ifdef DEBUG + if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) + return error; +#endif + *rkp = *lkp; /* INT_: no change copy */ + *rpp = *lpp; /* INT_: no change copy */ + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + } else { + lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + *rrp = *lrp; + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); + key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ + rkp = &key; + } + /* + * Decrement and log left's numrecs, bump and log right's numrecs. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); + xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); +#ifdef DEBUG + if (level > 0) + xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); + else + xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); +#endif + xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); + /* + * Using a temporary cursor, update the parent key values of the + * block on the right. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + return error; + xfs_btree_lastrec(tcur, level); + if ((error = xfs_inobt_increment(tcur, level, &i)) || + (error = xfs_inobt_updkey(tcur, rkp, level + 1))) { + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; + } + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + *stat = 1; + return 0; +} + +/* + * Split cur/level block in half. + * Return new block number and its first record (to be inserted into parent). + */ +STATIC int /* error */ +xfs_inobt_split( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level to split */ + xfs_agblock_t *bnop, /* output: block number allocated */ + xfs_inobt_key_t *keyp, /* output: first key of new block */ + xfs_btree_cur_t **curp, /* output: new cursor */ + int *stat) /* success/failure */ +{ + xfs_alloc_arg_t args; /* allocation argument structure */ + int error; /* error return value */ + int i; /* loop index/record number */ + xfs_agblock_t lbno; /* left (current) block number */ + xfs_buf_t *lbp; /* buffer for left block */ + xfs_inobt_block_t *left; /* left (current) btree block */ + xfs_inobt_key_t *lkp; /* left btree key pointer */ + xfs_inobt_ptr_t *lpp; /* left btree address pointer */ + xfs_inobt_rec_t *lrp; /* left btree record pointer */ + xfs_buf_t *rbp; /* buffer for right block */ + xfs_inobt_block_t *right; /* right (new) btree block */ + xfs_inobt_key_t *rkp; /* right btree key pointer */ + xfs_inobt_ptr_t *rpp; /* right btree address pointer */ + xfs_inobt_rec_t *rrp; /* right btree record pointer */ + + /* + * Set up left block (current one). + */ + lbp = cur->bc_bufs[level]; + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); + /* + * Allocate the new block. + * If we can't do it, we're toast. Give up. + */ + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno); + args.mod = args.minleft = args.alignment = args.total = args.wasdel = + args.isfl = args.userdata = args.minalignslop = 0; + args.minlen = args.maxlen = args.prod = 1; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (args.fsbno == NULLFSBLOCK) { + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + rbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); + /* + * Set up the new block as "right". + */ + right = XFS_BUF_TO_INOBT_BLOCK(rbp); + /* + * "Left" is the current (according to the cursor) block. + */ + left = XFS_BUF_TO_INOBT_BLOCK(lbp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) + return error; +#endif + /* + * Fill in the btree header for the new block. + */ + INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); + right->bb_level = left->bb_level; /* INT_: direct copy */ + INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); + /* + * Make sure that if there's an odd number of entries now, that + * each new block will have the same number of entries. + */ + if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && + cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) + INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); + i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; + /* + * For non-leaf blocks, copy keys and addresses over to the new block. + */ + if (level > 0) { + lkp = XFS_INOBT_KEY_ADDR(left, i, cur); + lpp = XFS_INOBT_PTR_ADDR(left, i, cur); + rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); + rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); +#ifdef DEBUG + for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { + if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) + return error; + } +#endif + memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); + memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); + xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + *keyp = *rkp; + } + /* + * For leaf blocks, copy records over to the new block. + */ + else { + lrp = XFS_INOBT_REC_ADDR(left, i, cur); + rrp = XFS_INOBT_REC_ADDR(right, 1, cur); + memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); + xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); + keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ + } + /* + * Find the left block number by looking in the buffer. + * Adjust numrecs, sibling pointers. + */ + INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); + right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ + INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno); + INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); + xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); + xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + /* + * If there's a block to the new block's right, make that block + * point back to right instead of to left. + */ + if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { + xfs_inobt_block_t *rrblock; /* rr btree block */ + xfs_buf_t *rrbp; /* buffer for rrblock */ + + if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, + INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, + XFS_INO_BTREE_REF))) + return error; + rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); + if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) + return error; + INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno); + xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); + } + /* + * If the cursor is really in the right block, move it there. + * If it's just pointing past the last entry in left, then we'll + * insert there, so don't change anything in that case. + */ + if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); + } + /* + * If there are more levels, we'll need another cursor which refers + * the right block, no matter where this cursor was. + */ + if (level + 1 < cur->bc_nlevels) { + if ((error = xfs_btree_dup_cursor(cur, curp))) + return error; + (*curp)->bc_ptrs[level + 1]++; + } + *bnop = args.agbno; + *stat = 1; + return 0; +} + +/* + * Update keys at all levels from here to the root along the cursor's path. + */ +STATIC int /* error */ +xfs_inobt_updkey( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_inobt_key_t *keyp, /* new key value to update to */ + int level) /* starting level for update */ +{ + int ptr; /* index of key in block */ + + /* + * Go up the tree from this level toward the root. + * At each level, update the key value to the value input. + * Stop when we reach a level where the cursor isn't pointing + * at the first entry in the block. + */ + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { + xfs_buf_t *bp; /* buffer for block */ + xfs_inobt_block_t *block; /* btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + xfs_inobt_key_t *kp; /* ptr to btree block keys */ + + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + ptr = cur->bc_ptrs[level]; + kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); + *kp = *keyp; + xfs_inobt_log_keys(cur, bp, ptr, ptr); + } + return 0; +} + +/* + * Externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_decrement( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block */ + int error; + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the left at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + /* + * Decrement the ptr at this level. If we're still in the block + * then we're done. + */ + if (--cur->bc_ptrs[level] > 0) { + *stat = 1; + return 0; + } + /* + * Get a pointer to the btree block. + */ + block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[level]); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, + cur->bc_bufs[level]))) + return error; +#endif + /* + * If we just went off the left edge of the tree, return failure. + */ + if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree decrementing pointers. + * Stop when we don't go off the left edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + /* + * Read-ahead the left block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ + + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, agbno, 0, &bp, + XFS_INO_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +#ifdef _NOTYET_ +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_inobt_delete( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; + int i; /* result code */ + int level; /* btree level */ + + /* + * Go up the tree, starting at leaf level. + * If 2 is returned then a join was done; go to the next level. + * Otherwise we are done. + */ + for (level = 0, i = 2; i == 2; level++) { + if (error = xfs_inobt_delrec(cur, level, &i)) + return error; + } + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + if (error = xfs_inobt_decrement(cur, level, &i)) + return error; + break; + } + } + } + *stat = i; + return 0; +} +#endif /* _NOTYET_ */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_inobt_get_rec( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t *ino, /* output: starting inode of chunk */ + __int32_t *fcnt, /* output: number of free inodes */ + xfs_inofree_t *free, /* output: free inode mask */ + int *stat, /* output: success/failure */ + xfs_arch_t arch) /* input: architecture */ +{ + xfs_inobt_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ +#ifdef DEBUG + int error; /* error return value */ +#endif + int ptr; /* record number */ + xfs_inobt_rec_t *rec; /* record data */ + + bp = cur->bc_bufs[0]; + ptr = cur->bc_ptrs[0]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) + return error; +#endif + /* + * Off the right end or left end, return failure. + */ + if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { + *stat = 0; + return 0; + } + /* + * Point to the record and extract its data. + */ + rec = XFS_INOBT_REC_ADDR(block, ptr, cur); + ASSERT(arch == ARCH_NOCONVERT || arch == ARCH_CONVERT); + if (arch == ARCH_NOCONVERT) { + *ino = INT_GET(rec->ir_startino, ARCH_CONVERT); + *fcnt = INT_GET(rec->ir_freecount, ARCH_CONVERT); + *free = INT_GET(rec->ir_free, ARCH_CONVERT); + } else { + INT_COPY(*ino, rec->ir_startino, ARCH_CONVERT); + INT_COPY(*fcnt, rec->ir_freecount, ARCH_CONVERT); + INT_COPY(*free, rec->ir_free, ARCH_CONVERT); + } + *stat = 1; + return 0; +} + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_increment( + xfs_btree_cur_t *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat) /* success/failure */ +{ + xfs_inobt_block_t *block; /* btree block */ + xfs_buf_t *bp; /* buffer containing btree block */ + int error; /* error return value */ + int lev; /* btree level */ + + ASSERT(level < cur->bc_nlevels); + /* + * Read-ahead to the right at this level. + */ + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + /* + * Get a pointer to the btree block. + */ + bp = cur->bc_bufs[level]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, level, bp))) + return error; +#endif + /* + * Increment the ptr at this level. If we're still in the block + * then we're done. + */ + if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { + *stat = 1; + return 0; + } + /* + * If we just went off the right edge of the tree, return failure. + */ + if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { + *stat = 0; + return 0; + } + /* + * March up the tree incrementing pointers. + * Stop when we don't go off the right edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + bp = cur->bc_bufs[lev]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; +#endif + if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) + break; + /* + * Read-ahead the right block, we're going to read it + * in the next loop. + */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + /* + * If we went off the root then we are seriously confused. + */ + ASSERT(lev < cur->bc_nlevels); + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_INOBT_BLOCK(bp); + lev > level; ) { + xfs_agblock_t agbno; /* block number of btree block */ + + agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); + if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, + cur->bc_private.i.agno, agbno, 0, &bp, + XFS_INO_BTREE_REF))) + return error; + lev--; + xfs_btree_setbuf(cur, lev, bp); + block = XFS_BUF_TO_INOBT_BLOCK(bp); + if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) + return error; + cur->bc_ptrs[lev] = 1; + } + *stat = 1; + return 0; +} + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_inobt_insert( + xfs_btree_cur_t *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int i; /* result value, 0 for failure */ + int level; /* current level number in btree */ + xfs_agblock_t nbno; /* new block number (split result) */ + xfs_btree_cur_t *ncur; /* new cursor (split result) */ + xfs_inobt_rec_t nrec; /* record being inserted this level */ + xfs_btree_cur_t *pcur; /* previous level's cursor */ + + level = 0; + nbno = NULLAGBLOCK; + INT_SET(nrec.ir_startino, ARCH_CONVERT, cur->bc_rec.i.ir_startino); + INT_SET(nrec.ir_freecount, ARCH_CONVERT, cur->bc_rec.i.ir_freecount); + INT_SET(nrec.ir_free, ARCH_CONVERT, cur->bc_rec.i.ir_free); + ncur = (xfs_btree_cur_t *)0; + pcur = cur; + /* + * Loop going up the tree, starting at the leaf level. + * Stop when we don't get a split block, that must mean that + * the insert is finished with this level. + */ + do { + /* + * Insert nrec/nbno into this level of the tree. + * Note if we fail, nbno will be null. + */ + if ((error = xfs_inobt_insrec(pcur, level++, &nbno, &nrec, &ncur, + &i))) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + return error; + } + /* + * See if the cursor we just used is trash. + * Can't trash the caller's cursor, but otherwise we should + * if ncur is a new cursor or we're about to be done. + */ + if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { + cur->bc_nlevels = pcur->bc_nlevels; + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + /* + * If we got a new cursor, switch to it. + */ + if (ncur) { + pcur = ncur; + ncur = (xfs_btree_cur_t *)0; + } + } while (nbno != NULLAGBLOCK); + *stat = i; + return 0; +} + +/* + * Lookup the record equal to ino in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_eq( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_ge( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_le( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_inobt_lookup(cur, XFS_LOOKUP_LE, stat); +} + +/* + * Update the record referred to by cur, to the value given + * by [ino, fcnt, free]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_inobt_update( + xfs_btree_cur_t *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free) /* free inode mask */ +{ + xfs_inobt_block_t *block; /* btree block to update */ + xfs_buf_t *bp; /* buffer containing btree block */ + int error; /* error return value */ + int ptr; /* current record number (updating) */ + xfs_inobt_rec_t *rp; /* pointer to updated record */ + + /* + * Pick up the current block. + */ + bp = cur->bc_bufs[0]; + block = XFS_BUF_TO_INOBT_BLOCK(bp); +#ifdef DEBUG + if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) + return error; +#endif + /* + * Get the address of the rec to be updated. + */ + ptr = cur->bc_ptrs[0]; + rp = XFS_INOBT_REC_ADDR(block, ptr, cur); + /* + * Fill in the new contents and log them. + */ + INT_SET(rp->ir_startino, ARCH_CONVERT, ino); + INT_SET(rp->ir_freecount, ARCH_CONVERT, fcnt); + INT_SET(rp->ir_free, ARCH_CONVERT, free); + xfs_inobt_log_recs(cur, bp, ptr, ptr); + /* + * Updating first record in leaf. Pass new key value up to our parent. + */ + if (ptr == 1) { + xfs_inobt_key_t key; /* key containing [ino] */ + + INT_SET(key.ir_startino, ARCH_CONVERT, ino); + if ((error = xfs_inobt_updkey(cur, &key, 1))) + return error; + } + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_ialloc_btree.h linux.22-ac2/fs/xfs/xfs_ialloc_btree.h --- linux.vanilla/fs/xfs/xfs_ialloc_btree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_ialloc_btree.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IALLOC_BTREE_H__ +#define __XFS_IALLOC_BTREE_H__ + +/* + * Inode map on-disk structures + */ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_btree_sblock; +struct xfs_mount; + +/* + * There is a btree for the inode map per allocation group. + */ +#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */ + +typedef __uint64_t xfs_inofree_t; +#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t)) +#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) +#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASKN) +xfs_inofree_t xfs_inobt_maskn(int i, int n); +#define XFS_INOBT_MASKN(i,n) xfs_inobt_maskn(i,n) +#else +#define XFS_INOBT_MASKN(i,n) \ + ((((n) >= XFS_INODES_PER_CHUNK ? \ + (xfs_inofree_t)0 : ((xfs_inofree_t)1 << (n))) - 1) << (i)) +#endif + +/* + * Data record structure + */ +typedef struct xfs_inobt_rec +{ + xfs_agino_t ir_startino; /* starting inode number */ + __int32_t ir_freecount; /* count of free inodes (set bits) */ + xfs_inofree_t ir_free; /* free inode mask */ +} xfs_inobt_rec_t; + +/* + * Key structure + */ +typedef struct xfs_inobt_key +{ + xfs_agino_t ir_startino; /* starting inode number */ +} xfs_inobt_key_t; + +typedef xfs_agblock_t xfs_inobt_ptr_t; /* btree pointer type */ + /* btree block header type */ +typedef struct xfs_btree_sblock xfs_inobt_block_t; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_INOBT_BLOCK) +xfs_inobt_block_t *xfs_buf_to_inobt_block(struct xfs_buf *bp); +#define XFS_BUF_TO_INOBT_BLOCK(bp) xfs_buf_to_inobt_block(bp) +#else +#define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)(XFS_BUF_PTR(bp))) +#endif + +/* + * Bit manipulations for ir_free. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_MASK) +xfs_inofree_t xfs_inobt_mask(int i); +#define XFS_INOBT_MASK(i) xfs_inobt_mask(i) +#else +#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_FREE) +int xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_IS_FREE(rp,i,arch) xfs_inobt_is_free(rp,i,arch) +#else +#define XFS_INOBT_IS_FREE(rp,i,arch) ((INT_GET((rp)->ir_free, arch) \ + & XFS_INOBT_MASK(i)) != 0) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_SET_FREE) +void xfs_inobt_set_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_SET_FREE(rp,i,arch) xfs_inobt_set_free(rp,i,arch) +#else +#define XFS_INOBT_SET_FREE(rp,i,arch) (INT_MOD_EXPR((rp)->ir_free, arch, |= XFS_INOBT_MASK(i))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_CLR_FREE) +void xfs_inobt_clr_free(xfs_inobt_rec_t *rp, int i, xfs_arch_t arch); +#define XFS_INOBT_CLR_FREE(rp,i,arch) xfs_inobt_clr_free(rp,i,arch) +#else +#define XFS_INOBT_CLR_FREE(rp,i,arch) (INT_MOD_EXPR((rp)->ir_free, arch, &= ~XFS_INOBT_MASK(i))) +#endif + +/* + * Real block structures have a size equal to the disk block size. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_SIZE) +int xfs_inobt_block_size(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_SIZE(lev,cur) xfs_inobt_block_size(lev,cur) +#else +#define XFS_INOBT_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MAXRECS) +int xfs_inobt_block_maxrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) xfs_inobt_block_maxrecs(lev,cur) +#else +#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) \ + ((cur)->bc_mp->m_inobt_mxr[lev != 0]) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_BLOCK_MINRECS) +int xfs_inobt_block_minrecs(int lev, struct xfs_btree_cur *cur); +#define XFS_INOBT_BLOCK_MINRECS(lev,cur) xfs_inobt_block_minrecs(lev,cur) +#else +#define XFS_INOBT_BLOCK_MINRECS(lev,cur) \ + ((cur)->bc_mp->m_inobt_mnr[lev != 0]) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_LAST_REC) +int xfs_inobt_is_last_rec(struct xfs_btree_cur *cur); +#define XFS_INOBT_IS_LAST_REC(cur) xfs_inobt_is_last_rec(cur) +#else +#define XFS_INOBT_IS_LAST_REC(cur) \ + ((cur)->bc_ptrs[0] == \ + INT_GET(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs, ARCH_CONVERT)) +#endif + +/* + * Maximum number of inode btree levels. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IN_MAXLEVELS) +int xfs_in_maxlevels(struct xfs_mount *mp); +#define XFS_IN_MAXLEVELS(mp) xfs_in_maxlevels(mp) +#else +#define XFS_IN_MAXLEVELS(mp) ((mp)->m_in_maxlevels) +#endif + +/* + * block numbers in the AG. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IBT_BLOCK) +xfs_agblock_t xfs_ibt_block(struct xfs_mount *mp); +#define XFS_IBT_BLOCK(mp) xfs_ibt_block(mp) +#else +#define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_PREALLOC_BLOCKS) +xfs_agblock_t xfs_prealloc_blocks(struct xfs_mount *mp); +#define XFS_PREALLOC_BLOCKS(mp) xfs_prealloc_blocks(mp) +#else +#define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1)) +#endif + +/* + * Record, key, and pointer address macros for btree blocks. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_REC_ADDR) +xfs_inobt_rec_t * +xfs_inobt_rec_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_REC_ADDR(bb,i,cur) xfs_inobt_rec_addr(bb,i,cur) +#else +#define XFS_INOBT_REC_ADDR(bb,i,cur) \ + XFS_BTREE_REC_ADDR(XFS_INOBT_BLOCK_SIZE(0,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(0, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_KEY_ADDR) +xfs_inobt_key_t * +xfs_inobt_key_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_KEY_ADDR(bb,i,cur) xfs_inobt_key_addr(bb,i,cur) +#else +#define XFS_INOBT_KEY_ADDR(bb,i,cur) \ + XFS_BTREE_KEY_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(1, cur)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_PTR_ADDR) +xfs_inobt_ptr_t * +xfs_inobt_ptr_addr(xfs_inobt_block_t *bb, int i, struct xfs_btree_cur *cur); +#define XFS_INOBT_PTR_ADDR(bb,i,cur) xfs_inobt_ptr_addr(bb,i,cur) +#else +#define XFS_INOBT_PTR_ADDR(bb,i,cur) \ + XFS_BTREE_PTR_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, i, \ + XFS_INOBT_BLOCK_MAXRECS(1, cur)) +#endif + +/* + * Prototypes for externally visible routines. + */ + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_decrement( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +#ifdef _NOTYET_ +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_inobt_delete( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ +#endif /* _NOTYET_ */ + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_inobt_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t *ino, /* output: starting inode of chunk */ + __int32_t *fcnt, /* output: number of free inodes */ + xfs_inofree_t *free, /* output: free inode mask */ + int *stat, /* output: success/failure */ + xfs_arch_t arch); /* output: architecture */ + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_inobt_increment( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in btree, 0 is leaf */ + int *stat); /* success/failure */ + +/* + * Insert the current record at the point referenced by cur. + * The cursor may be inconsistent on return if splits have been done. + */ +int /* error */ +xfs_inobt_insert( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat); /* success/failure */ + +/* + * Lookup the record equal to ino in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat); /* success/failure */ + +/* + * Update the record referred to by cur, to the value given + * by [ino, fcnt, free]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +int /* error */ +xfs_inobt_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free); /* free inode mask */ + +#endif /* __XFS_IALLOC_BTREE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_ialloc.c linux.22-ac2/fs/xfs/xfs_ialloc.c --- linux.vanilla/fs/xfs/xfs_ialloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_ialloc.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1379 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" + +/* + * Log specified fields for the inode given by bp and off. + */ +STATIC void +xfs_ialloc_log_di( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* inode buffer */ + int off, /* index of inode in buffer */ + int fields) /* bitmask of fields to log */ +{ + int first; /* first byte number */ + int ioffset; /* off in bytes */ + int last; /* last byte number */ + xfs_mount_t *mp; /* mount point structure */ + static const short offsets[] = { /* field offsets */ + /* keep in sync with bits */ + offsetof(xfs_dinode_core_t, di_magic), + offsetof(xfs_dinode_core_t, di_mode), + offsetof(xfs_dinode_core_t, di_version), + offsetof(xfs_dinode_core_t, di_format), + offsetof(xfs_dinode_core_t, di_onlink), + offsetof(xfs_dinode_core_t, di_uid), + offsetof(xfs_dinode_core_t, di_gid), + offsetof(xfs_dinode_core_t, di_nlink), + offsetof(xfs_dinode_core_t, di_projid), + offsetof(xfs_dinode_core_t, di_pad), + offsetof(xfs_dinode_core_t, di_atime), + offsetof(xfs_dinode_core_t, di_mtime), + offsetof(xfs_dinode_core_t, di_ctime), + offsetof(xfs_dinode_core_t, di_size), + offsetof(xfs_dinode_core_t, di_nblocks), + offsetof(xfs_dinode_core_t, di_extsize), + offsetof(xfs_dinode_core_t, di_nextents), + offsetof(xfs_dinode_core_t, di_anextents), + offsetof(xfs_dinode_core_t, di_forkoff), + offsetof(xfs_dinode_core_t, di_aformat), + offsetof(xfs_dinode_core_t, di_dmevmask), + offsetof(xfs_dinode_core_t, di_dmstate), + offsetof(xfs_dinode_core_t, di_flags), + offsetof(xfs_dinode_core_t, di_gen), + offsetof(xfs_dinode_t, di_next_unlinked), + offsetof(xfs_dinode_t, di_u), + offsetof(xfs_dinode_t, di_a), + sizeof(xfs_dinode_t) + }; + + + ASSERT(offsetof(xfs_dinode_t, di_core) == 0); + ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0); + mp = tp->t_mountp; + /* + * Get the inode-relative first and last bytes for these fields + */ + xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last); + /* + * Convert to buffer offsets and log it. + */ + ioffset = off << mp->m_sb.sb_inodelog; + first += ioffset; + last += ioffset; + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Allocation group level functions. + */ + +/* + * Allocate new inodes in the allocation group specified by agbp. + * Return 0 for success, else error code. + */ +STATIC int /* error code or 0 */ +xfs_ialloc_ag_alloc( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *agbp, /* alloc group buffer */ + int *alloc) +{ + xfs_agi_t *agi; /* allocation group header */ + xfs_alloc_arg_t args; /* allocation argument structure */ + int blks_per_cluster; /* fs blocks per inode cluster */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + xfs_daddr_t d; /* disk addr of buffer */ + int error; + xfs_buf_t *fbuf; /* new free inodes' buffer */ + xfs_dinode_t *free; /* new free inode structure */ + int i; /* inode counter */ + int j; /* block counter */ + int nbufs; /* num bufs of new inodes */ + xfs_agino_t newino; /* new first inode's number */ + xfs_agino_t newlen; /* new number of inodes */ + int ninodes; /* num inodes per buf */ + xfs_agino_t thisino; /* current inode number, for loop */ + int version; /* inode version number to use */ + static xfs_timestamp_t ztime; /* zero xfs timestamp */ + int isaligned; /* inode allocation at stripe unit */ + /* boundary */ + xfs_dinode_core_t dic; /* a dinode_core to copy to new */ + /* inodes */ + + args.tp = tp; + args.mp = tp->t_mountp; + + /* + * Locking will ensure that we don't have two callers in here + * at one time. + */ + newlen = XFS_IALLOC_INODES(args.mp); + if (args.mp->m_maxicount && + args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) + return XFS_ERROR(ENOSPC); + args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); + /* + * Set the alignment for the allocation. + * If stripe alignment is turned on then align at stripe unit + * boundary. + * If the cluster size is smaller than a filesystem block + * then we're doing I/O for inodes in filesystem block size pieces, + * so don't need alignment anyway. + */ + isaligned = 0; + if (args.mp->m_sinoalign) { + ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); + args.alignment = args.mp->m_dalign; + isaligned = 1; + } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && + args.mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) + args.alignment = args.mp->m_sb.sb_inoalignmt; + else + args.alignment = 1; + agi = XFS_BUF_TO_AGI(agbp); + /* + * Need to figure out where to allocate the inode blocks. + * Ideally they should be spaced out through the a.g. + * For now, just allocate blocks up front. + */ + args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + args.agbno); + /* + * Allocate a fixed-size extent of inodes. + */ + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.mod = args.total = args.wasdel = args.isfl = args.userdata = + args.minalignslop = 0; + args.prod = 1; + /* + * Allow space for the inode btree to split. + */ + args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + + /* + * If stripe alignment is turned on, then try again with cluster + * alignment. + */ + if (isaligned && args.fsbno == NULLFSBLOCK) { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); + args.fsbno = XFS_AGB_TO_FSB(args.mp, + INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno); + if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && + args.mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) + args.alignment = args.mp->m_sb.sb_inoalignmt; + else + args.alignment = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + + if (args.fsbno == NULLFSBLOCK) { + *alloc = 0; + return 0; + } + ASSERT(args.len == args.minlen); + /* + * Convert the results. + */ + newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); + /* + * Loop over the new block(s), filling in the inodes. + * For small block sizes, manipulate the inodes in buffers + * which are multiples of the blocks size. + */ + if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) { + blks_per_cluster = 1; + nbufs = (int)args.len; + ninodes = args.mp->m_sb.sb_inopblock; + } else { + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) / + args.mp->m_sb.sb_blocksize; + nbufs = (int)args.len / blks_per_cluster; + ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock; + } + /* + * Figure out what version number to use in the inodes we create. + * If the superblock version has caught up to the one that supports + * the new inode format, then use the new inode version. Otherwise + * use the old version so that old kernels will continue to be + * able to use the file system. + */ + if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb)) + version = XFS_DINODE_VERSION_2; + else + version = XFS_DINODE_VERSION_1; + for (j = 0; j < nbufs; j++) { + /* + * Get the block. + */ + d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + args.agbno + (j * blks_per_cluster)); + fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, + args.mp->m_bsize * blks_per_cluster, + XFS_BUF_LOCK); + ASSERT(fbuf); + ASSERT(!XFS_BUF_GETERROR(fbuf)); + /* + * Loop over the inodes in this buffer. + */ + INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); + INT_ZERO(dic.di_mode, ARCH_CONVERT); + INT_SET(dic.di_version, ARCH_CONVERT, version); + INT_ZERO(dic.di_format, ARCH_CONVERT); + INT_ZERO(dic.di_onlink, ARCH_CONVERT); + INT_ZERO(dic.di_uid, ARCH_CONVERT); + INT_ZERO(dic.di_gid, ARCH_CONVERT); + INT_ZERO(dic.di_nlink, ARCH_CONVERT); + INT_ZERO(dic.di_projid, ARCH_CONVERT); + memset(&(dic.di_pad[0]), 0, sizeof(dic.di_pad)); + INT_SET(dic.di_atime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_atime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_SET(dic.di_mtime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_mtime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_SET(dic.di_ctime.t_sec, ARCH_CONVERT, ztime.t_sec); + INT_SET(dic.di_ctime.t_nsec, ARCH_CONVERT, ztime.t_nsec); + + INT_ZERO(dic.di_size, ARCH_CONVERT); + INT_ZERO(dic.di_nblocks, ARCH_CONVERT); + INT_ZERO(dic.di_extsize, ARCH_CONVERT); + INT_ZERO(dic.di_nextents, ARCH_CONVERT); + INT_ZERO(dic.di_anextents, ARCH_CONVERT); + INT_ZERO(dic.di_forkoff, ARCH_CONVERT); + INT_ZERO(dic.di_aformat, ARCH_CONVERT); + INT_ZERO(dic.di_dmevmask, ARCH_CONVERT); + INT_ZERO(dic.di_dmstate, ARCH_CONVERT); + INT_ZERO(dic.di_flags, ARCH_CONVERT); + INT_ZERO(dic.di_gen, ARCH_CONVERT); + + for (i = 0; i < ninodes; i++) { + free = XFS_MAKE_IPTR(args.mp, fbuf, i); + memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); + INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + xfs_ialloc_log_di(tp, fbuf, i, + XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); + } + xfs_trans_inode_alloc_buf(tp, fbuf); + } + INT_MOD(agi->agi_count, ARCH_CONVERT, newlen); + INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen); + down_read(&args.mp->m_peraglock); + args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen; + up_read(&args.mp->m_peraglock); + INT_SET(agi->agi_newino, ARCH_CONVERT, newino); + /* + * Insert records describing the new inode chunk into the btree. + */ + cur = xfs_btree_init_cursor(args.mp, tp, agbp, + INT_GET(agi->agi_seqno, ARCH_CONVERT), + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + for (thisino = newino; + thisino < newino + newlen; + thisino += XFS_INODES_PER_CHUNK) { + if ((error = xfs_inobt_lookup_eq(cur, thisino, + XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + ASSERT(i == 0); + if ((error = xfs_inobt_insert(cur, &i))) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } + ASSERT(i == 1); + } + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + /* + * Log allocation group header fields + */ + xfs_ialloc_log_agi(tp, agbp, + XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); + /* + * Modify/log superblock values for inode count and inode free count. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); + *alloc = 1; + return 0; +} + +STATIC __inline xfs_agnumber_t +xfs_ialloc_next_ag( + xfs_mount_t *mp) +{ + xfs_agnumber_t agno; + + spin_lock(&mp->m_agirotor_lock); + agno = mp->m_agirotor; + if (++mp->m_agirotor == mp->m_maxagi) + mp->m_agirotor = 0; + spin_unlock(&mp->m_agirotor_lock); + + return agno; +} + +/* + * Select an allocation group to look for a free inode in, based on the parent + * inode and then mode. Return the allocation group buffer. + */ +STATIC xfs_buf_t * /* allocation group buffer */ +xfs_ialloc_ag_select( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent directory inode number */ + mode_t mode, /* bits set to indicate file type */ + int okalloc) /* ok to allocate more space */ +{ + xfs_buf_t *agbp; /* allocation group header buffer */ + xfs_agnumber_t agcount; /* number of ag's in the filesystem */ + xfs_agnumber_t agno; /* current ag number */ + int flags; /* alloc buffer locking flags */ + xfs_extlen_t ineed; /* blocks needed for inode allocation */ + xfs_extlen_t longest = 0; /* longest extent available */ + xfs_mount_t *mp; /* mount point structure */ + int needspace; /* file mode implies space allocated */ + xfs_perag_t *pag; /* per allocation group data */ + xfs_agnumber_t pagno; /* parent (starting) ag number */ + + /* + * Files of these types need at least one block if length > 0 + * (and they won't fit in the inode, but that's hard to figure out). + */ + needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); + mp = tp->t_mountp; + agcount = mp->m_maxagi; + if (S_ISDIR(mode)) + pagno = xfs_ialloc_next_ag(mp); + else { + pagno = XFS_INO_TO_AGNO(mp, parent); + if (pagno >= agcount) + pagno = 0; + } + ASSERT(pagno < agcount); + /* + * Loop through allocation groups, looking for one with a little + * free space in it. Note we don't look for free inodes, exactly. + * Instead, we include whether there is a need to allocate inodes + * to mean that blocks must be allocated for them, + * if none are currently free. + */ + agno = pagno; + flags = XFS_ALLOC_FLAG_TRYLOCK; + down_read(&mp->m_peraglock); + for (;;) { + pag = &mp->m_perag[agno]; + if (!pag->pagi_init) { + if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + } else + agbp = NULL; + + if (!pag->pagi_inodeok) { + xfs_ialloc_next_ag(mp); + goto unlock_nextag; + } + + /* + * Is there enough free space for the file plus a block + * of inodes (if we need to allocate some)? + */ + ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp); + if (ineed && !pag->pagf_init) { + if (agbp == NULL && + xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + (void)xfs_alloc_pagf_init(mp, tp, agno, flags); + } + if (!ineed || pag->pagf_init) { + if (ineed && !(longest = pag->pagf_longest)) + longest = pag->pagf_flcount > 0; + if (!ineed || + (pag->pagf_freeblks >= needspace + ineed && + longest >= ineed && + okalloc)) { + if (agbp == NULL && + xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { + agbp = NULL; + goto nextag; + } + up_read(&mp->m_peraglock); + return agbp; + } + } +unlock_nextag: + if (agbp) + xfs_trans_brelse(tp, agbp); +nextag: + /* + * No point in iterating over the rest, if we're shutting + * down. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + up_read(&mp->m_peraglock); + return (xfs_buf_t *)0; + } + agno++; + if (agno >= agcount) + agno = 0; + if (agno == pagno) { + if (flags == 0) { + up_read(&mp->m_peraglock); + return (xfs_buf_t *)0; + } + flags = 0; + } + } +} + +/* + * Visible inode allocation functions. + */ + +/* + * Allocate an inode on disk. + * Mode is used to tell whether the new inode will need space, and whether + * it is a directory. + * + * The arguments IO_agbp and alloc_done are defined to work within + * the constraint of one allocation per transaction. + * xfs_dialloc() is designed to be called twice if it has to do an + * allocation to make more free inodes. On the first call, + * IO_agbp should be set to NULL. If an inode is available, + * i.e., xfs_dialloc() did not need to do an allocation, an inode + * number is returned. In this case, IO_agbp would be set to the + * current ag_buf and alloc_done set to false. + * If an allocation needed to be done, xfs_dialloc would return + * the current ag_buf in IO_agbp and set alloc_done to true. + * The caller should then commit the current transaction, allocate a new + * transaction, and call xfs_dialloc() again, passing in the previous + * value of IO_agbp. IO_agbp should be held across the transactions. + * Since the agbp is locked across the two calls, the second call is + * guaranteed to have a free inode available. + * + * Once we successfully pick an inode its number is returned and the + * on-disk data structures are updated. The inode itself is not read + * in, since doing so would break ordering constraints with xfs_reclaim. + */ +int +xfs_dialloc( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent inode (directory) */ + mode_t mode, /* mode bits for new inode */ + int okalloc, /* ok to allocate more space */ + xfs_buf_t **IO_agbp, /* in/out ag header's buffer */ + boolean_t *alloc_done, /* true if we needed to replenish + inode freelist */ + xfs_ino_t *inop) /* inode number allocated */ +{ + xfs_agnumber_t agcount; /* number of allocation groups */ + xfs_buf_t *agbp; /* allocation group header's buffer */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agi_t *agi; /* allocation group header structure */ + xfs_btree_cur_t *cur; /* inode allocation btree cursor */ + int error; /* error return value */ + int i; /* result code */ + int ialloced; /* inode allocation status */ + int noroom = 0; /* no space for inode blk allocation */ + xfs_ino_t ino; /* fs-relative inode to be returned */ + /* REFERENCED */ + int j; /* result code */ + xfs_mount_t *mp; /* file system mount structure */ + int offset; /* index of inode in chunk */ + xfs_agino_t pagino; /* parent's a.g. relative inode # */ + xfs_agnumber_t pagno; /* parent's allocation group number */ + xfs_inobt_rec_t rec; /* inode allocation record */ + xfs_agnumber_t tagno; /* testing allocation group number */ + xfs_btree_cur_t *tcur; /* temp cursor */ + xfs_inobt_rec_t trec; /* temp inode allocation record */ + + + if (*IO_agbp == NULL) { + /* + * We do not have an agbp, so select an initial allocation + * group for inode allocation. + */ + agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc); + /* + * Couldn't find an allocation group satisfying the + * criteria, give up. + */ + if (!agbp) { + *inop = NULLFSINO; + return 0; + } + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + } else { + /* + * Continue where we left off before. In this case, we + * know that the allocation group has free inodes. + */ + agbp = *IO_agbp; + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + } + mp = tp->t_mountp; + agcount = mp->m_sb.sb_agcount; + agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); + tagno = agno; + pagno = XFS_INO_TO_AGNO(mp, parent); + pagino = XFS_INO_TO_AGINO(mp, parent); + + /* + * If we have already hit the ceiling of inode blocks then clear + * okalloc so we scan all available agi structures for a free + * inode. + */ + + if (mp->m_maxicount && + mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { + noroom = 1; + okalloc = 0; + } + + /* + * Loop until we find an allocation group that either has free inodes + * or in which we can allocate some inodes. Iterate through the + * allocation groups upward, wrapping at the end. + */ + *alloc_done = B_FALSE; + while (INT_ISZERO(agi->agi_freecount, ARCH_CONVERT)) { + /* + * Don't do anything if we're not supposed to allocate + * any blocks, just go on to the next ag. + */ + if (okalloc) { + /* + * Try to allocate some new inodes in the allocation + * group. + */ + if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) { + xfs_trans_brelse(tp, agbp); + if (error == ENOSPC) { + *inop = NULLFSINO; + return 0; + } else + return error; + } + if (ialloced) { + /* + * We successfully allocated some inodes, return + * the current context to the caller so that it + * can commit the current transaction and call + * us again where we left off. + */ + ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); + *alloc_done = B_TRUE; + *IO_agbp = agbp; + *inop = NULLFSINO; + return 0; + } + } + /* + * If it failed, give up on this ag. + */ + xfs_trans_brelse(tp, agbp); + /* + * Go on to the next ag: get its ag header. + */ +nextag: + if (++tagno == agcount) + tagno = 0; + if (tagno == agno) { + *inop = NULLFSINO; + return noroom ? ENOSPC : 0; + } + down_read(&mp->m_peraglock); + if (mp->m_perag[tagno].pagi_inodeok == 0) { + up_read(&mp->m_peraglock); + goto nextag; + } + error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp); + up_read(&mp->m_peraglock); + if (error) + goto nextag; + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + } + /* + * Here with an allocation group that has a free inode. + * Reset agno since we may have chosen a new ag in the + * loop above. + */ + agno = tagno; + *IO_agbp = NULL; + cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT), + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + /* + * If pagino is 0 (this is the root inode allocation) use newino. + * This must work because we've just allocated some. + */ + if (!pagino) + pagino = INT_GET(agi->agi_newino, ARCH_CONVERT); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + /* + * If in the same a.g. as the parent, try to get near the parent. + */ + if (pagno == agno) { + if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i))) + goto error0; + if (i != 0 && + (error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 && + j == 1 && + rec.ir_freecount > 0) { + /* + * Found a free inode in the same chunk + * as parent, done. + */ + } + /* + * In the same a.g. as parent, but parent's chunk is full. + */ + else { + int doneleft; /* done, to the left */ + int doneright; /* done, to the right */ + + if (error) + goto error0; + ASSERT(i == 1); + ASSERT(j == 1); + /* + * Duplicate the cursor, search left & right + * simultaneously. + */ + if ((error = xfs_btree_dup_cursor(cur, &tcur))) + goto error0; + /* + * Search left with tcur, back up 1 record. + */ + if ((error = xfs_inobt_decrement(tcur, 0, &i))) + goto error1; + doneleft = !i; + if (!doneleft) { + if ((error = xfs_inobt_get_rec(tcur, + &trec.ir_startino, + &trec.ir_freecount, + &trec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, error1); + } + /* + * Search right with cur, go forward 1 record. + */ + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error1; + doneright = !i; + if (!doneright) { + if ((error = xfs_inobt_get_rec(cur, + &rec.ir_startino, + &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, error1); + } + /* + * Loop until we find the closest inode chunk + * with a free one. + */ + while (!doneleft || !doneright) { + int useleft; /* using left inode + chunk this time */ + + /* + * Figure out which block is closer, + * if both are valid. + */ + if (!doneleft && !doneright) + useleft = + pagino - + (trec.ir_startino + + XFS_INODES_PER_CHUNK - 1) < + rec.ir_startino - pagino; + else + useleft = !doneleft; + /* + * If checking the left, does it have + * free inodes? + */ + if (useleft && trec.ir_freecount) { + /* + * Yes, set it up as the chunk to use. + */ + rec = trec; + xfs_btree_del_cursor(cur, + XFS_BTREE_NOERROR); + cur = tcur; + break; + } + /* + * If checking the right, does it have + * free inodes? + */ + if (!useleft && rec.ir_freecount) { + /* + * Yes, it's already set up. + */ + xfs_btree_del_cursor(tcur, + XFS_BTREE_NOERROR); + break; + } + /* + * If used the left, get another one + * further left. + */ + if (useleft) { + if ((error = xfs_inobt_decrement(tcur, 0, + &i))) + goto error1; + doneleft = !i; + if (!doneleft) { + if ((error = xfs_inobt_get_rec( + tcur, + &trec.ir_startino, + &trec.ir_freecount, + &trec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, + error1); + } + } + /* + * If used the right, get another one + * further right. + */ + else { + if ((error = xfs_inobt_increment(cur, 0, + &i))) + goto error1; + doneright = !i; + if (!doneright) { + if ((error = xfs_inobt_get_rec( + cur, + &rec.ir_startino, + &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error1; + XFS_WANT_CORRUPTED_GOTO(i == 1, + error1); + } + } + } + ASSERT(!doneleft || !doneright); + } + } + /* + * In a different a.g. from the parent. + * See if the most recently allocated block has any free. + */ + else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) { + if ((error = xfs_inobt_lookup_eq(cur, + INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i))) + goto error0; + if (i == 1 && + (error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 && + j == 1 && + rec.ir_freecount > 0) { + /* + * The last chunk allocated in the group still has + * a free inode. + */ + } + /* + * None left in the last group, search the whole a.g. + */ + else { + if (error) + goto error0; + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + ASSERT(i == 1); + for (;;) { + if ((error = xfs_inobt_get_rec(cur, + &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, + &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (rec.ir_freecount > 0) + break; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + } + offset = XFS_IALLOC_FIND_FREE(&rec.ir_free); + ASSERT(offset >= 0); + ASSERT(offset < XFS_INODES_PER_CHUNK); + ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % + XFS_INODES_PER_CHUNK) == 0); + ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); + XFS_INOBT_CLR_FREE(&rec, offset, ARCH_NOCONVERT); + rec.ir_freecount--; + if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, + rec.ir_free))) + goto error0; + INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1); + xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); + down_read(&mp->m_peraglock); + mp->m_perag[tagno].pagi_freecount--; + up_read(&mp->m_peraglock); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); + *inop = ino; + return 0; +error1: + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Free disk inode. Carefully avoids touching the incore inode, all + * manipulations incore are the caller's responsibility. + * The on-disk inode is not changed by this operation, only the + * btree (free inode mask) is changed. + */ +int +xfs_difree( + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t inode) /* inode to be freed */ +{ + /* REFERENCED */ + xfs_agblock_t agbno; /* block number containing inode */ + xfs_buf_t *agbp; /* buffer containing allocation group header */ + xfs_agino_t agino; /* inode number relative to allocation group */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_agi_t *agi; /* allocation group header */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + int error; /* error return value */ + int i; /* result code */ + xfs_mount_t *mp; /* mount structure for filesystem */ + int off; /* offset of inode in inode chunk */ + xfs_inobt_rec_t rec; /* btree record */ + + mp = tp->t_mountp; + + /* + * Break up inode number into its components. + */ + agno = XFS_INO_TO_AGNO(mp, inode); + if (agno >= mp->m_sb.sb_agcount) { + cmn_err(CE_WARN, + "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.", + agno, mp->m_sb.sb_agcount, mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + agino = XFS_INO_TO_AGINO(mp, inode); + if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { + cmn_err(CE_WARN, + "xfs_difree: inode != XFS_AGINO_TO_INO() (%d != %d) on %s. Returning EINVAL.", + inode, XFS_AGINO_TO_INO(mp, agno, agino), mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + agbno = XFS_AGINO_TO_AGBNO(mp, agino); + if (agbno >= mp->m_sb.sb_agblocks) { + cmn_err(CE_WARN, + "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.", + agbno, mp->m_sb.sb_agblocks, mp->m_fsname); + ASSERT(0); + return XFS_ERROR(EINVAL); + } + /* + * Get the allocation group header. + */ + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + cmn_err(CE_WARN, + "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + agi = XFS_BUF_TO_AGI(agbp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + ASSERT(agbno < INT_GET(agi->agi_length, ARCH_CONVERT)); + /* + * Initialize the cursor. + */ + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + /* + * Look for the entry describing this inode. + */ + if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount, + &rec.ir_free, &i, ARCH_NOCONVERT))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + /* + * Get the offset in the inode chunk. + */ + off = agino - rec.ir_startino; + ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); + ASSERT(!XFS_INOBT_IS_FREE(&rec, off, ARCH_NOCONVERT)); + /* + * Mark the inode free & increment the count. + */ + XFS_INOBT_SET_FREE(&rec, off, ARCH_NOCONVERT); + rec.ir_freecount++; + if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) { + cmn_err(CE_WARN, + "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + goto error0; + } + /* + * Change the inode free counts and log the ag/sb changes. + */ + INT_MOD(agi->agi_freecount, ARCH_CONVERT, 1); + xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); + down_read(&mp->m_peraglock); + mp->m_perag[agno].pagi_freecount++; + up_read(&mp->m_peraglock); +#ifdef DEBUG + if (cur->bc_nlevels == 1) { + int freecount = 0; + + if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) + goto error0; + do { + if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, + &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT))) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + freecount += rec.ir_freecount; + if ((error = xfs_inobt_increment(cur, 0, &i))) + goto error0; + } while (i == 1); + ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || + XFS_FORCED_SHUTDOWN(mp)); + } +#endif + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); + return 0; + +error0: + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Return the location of the inode in bno/off, for mapping it into a buffer. + */ +/*ARGSUSED*/ +int +xfs_dilocate( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode to locate */ + xfs_fsblock_t *bno, /* output: block containing inode */ + int *len, /* output: num blocks in inode cluster */ + int *off, /* output: index in block of inode */ + uint flags) /* flags concerning inode lookup */ +{ + xfs_agblock_t agbno; /* block number of inode in the alloc group */ + xfs_buf_t *agbp; /* agi buffer */ + xfs_agino_t agino; /* inode number within alloc group */ + xfs_agnumber_t agno; /* allocation group number */ + int blks_per_cluster; /* num blocks per inode cluster */ + xfs_agblock_t chunk_agbno; /* first block in inode chunk */ + xfs_agino_t chunk_agino; /* first agino in inode chunk */ + __int32_t chunk_cnt; /* count of free inodes in chunk */ + xfs_inofree_t chunk_free; /* mask of free inodes in chunk */ + xfs_agblock_t cluster_agbno; /* first block in inode cluster */ + xfs_btree_cur_t *cur; /* inode btree cursor */ + int error; /* error code */ + int i; /* temp state */ + int offset; /* index of inode in its buffer */ + int offset_agbno; /* blks from chunk start to inode */ + + ASSERT(ino != NULLFSINO); + /* + * Split up the inode number into its parts. + */ + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + agbno = XFS_AGINO_TO_AGBNO(mp, agino); + if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || + ino != XFS_AGINO_TO_INO(mp, agno, agino)) { +#ifdef DEBUG + if (agno >= mp->m_sb.sb_agcount) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: agno (%d) >= " + "mp->m_sb.sb_agcount (%d)", + agno, mp->m_sb.sb_agcount); + } + if (agbno >= mp->m_sb.sb_agblocks) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: agbno (0x%llx) >= " + "mp->m_sb.sb_agblocks (0x%lx)", + (unsigned long long) agbno, + (unsigned long) mp->m_sb.sb_agblocks); + } + if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_dilocate: ino (0x%llx) != " + "XFS_AGINO_TO_INO(mp, agno, agino) " + "(0x%llx)", + ino, XFS_AGINO_TO_INO(mp, agno, agino)); + } +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) || + !(flags & XFS_IMAP_LOOKUP)) { + offset = XFS_INO_TO_OFFSET(mp, ino); + ASSERT(offset < mp->m_sb.sb_inopblock); + *bno = XFS_AGB_TO_FSB(mp, agno, agbno); + *off = offset; + *len = 1; + return 0; + } + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; + if (*bno != NULLFSBLOCK) { + offset = XFS_INO_TO_OFFSET(mp, ino); + ASSERT(offset < mp->m_sb.sb_inopblock); + cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno); + *off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + + offset; + *len = blks_per_cluster; + return 0; + } + if (mp->m_inoalign_mask) { + offset_agbno = agbno & mp->m_inoalign_mask; + chunk_agbno = agbno - offset_agbno; + } else { + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_ialloc_read_agi() returned " + "error %d, agno %d", + error, agno); +#endif /* DEBUG */ + return error; + } + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); + if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_lookup_le() failed"); +#endif /* DEBUG */ + goto error0; + } + if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt, + &chunk_free, &i, ARCH_NOCONVERT))) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_get_rec() failed"); +#endif /* DEBUG */ + goto error0; + } + if (i == 0) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + "xfs_inobt_get_rec() failed"); +#endif /* DEBUG */ + error = XFS_ERROR(EINVAL); + } + xfs_trans_brelse(tp, agbp); + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + if (error) + return error; + chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino); + offset_agbno = agbno - chunk_agbno; + } + ASSERT(agbno >= chunk_agbno); + cluster_agbno = chunk_agbno + + ((offset_agbno / blks_per_cluster) * blks_per_cluster); + offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + + XFS_INO_TO_OFFSET(mp, ino); + *bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno); + *off = offset; + *len = blks_per_cluster; + return 0; +error0: + xfs_trans_brelse(tp, agbp); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; +} + +/* + * Compute and fill in value of m_in_maxlevels. + */ +void +xfs_ialloc_compute_maxlevels( + xfs_mount_t *mp) /* file system mount structure */ +{ + int level; + uint maxblocks; + uint maxleafents; + int minleafrecs; + int minnoderecs; + + maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >> + XFS_INODES_PER_CHUNK_LOG; + minleafrecs = mp->m_alloc_mnr[0]; + minnoderecs = mp->m_alloc_mnr[1]; + maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; + for (level = 1; maxblocks > 1; level++) + maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; + mp->m_in_maxlevels = level; +} + +/* + * Log specified fields for the ag hdr (inode section) + */ +void +xfs_ialloc_log_agi( + xfs_trans_t *tp, /* transaction pointer */ + xfs_buf_t *bp, /* allocation group header buffer */ + int fields) /* bitmask of fields to log */ +{ + int first; /* first byte number */ + int last; /* last byte number */ + static const short offsets[] = { /* field starting offsets */ + /* keep in sync with bit definitions */ + offsetof(xfs_agi_t, agi_magicnum), + offsetof(xfs_agi_t, agi_versionnum), + offsetof(xfs_agi_t, agi_seqno), + offsetof(xfs_agi_t, agi_length), + offsetof(xfs_agi_t, agi_count), + offsetof(xfs_agi_t, agi_root), + offsetof(xfs_agi_t, agi_level), + offsetof(xfs_agi_t, agi_freecount), + offsetof(xfs_agi_t, agi_newino), + offsetof(xfs_agi_t, agi_dirino), + offsetof(xfs_agi_t, agi_unlinked), + sizeof(xfs_agi_t) + }; +#ifdef DEBUG + xfs_agi_t *agi; /* allocation group header */ + + agi = XFS_BUF_TO_AGI(bp); + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); +#endif + /* + * Compute byte offsets for the first and last fields. + */ + xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last); + /* + * Log the allocation group inode header buffer. + */ + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * Read in the allocation group header (inode allocation section) + */ +int +xfs_ialloc_read_agi( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_buf_t **bpp) /* allocation group hdr buf */ +{ + xfs_agi_t *agi; /* allocation group header */ + int agi_ok; /* agi is consistent */ + xfs_buf_t *bp; /* allocation group hdr buf */ + xfs_perag_t *pag; /* per allocation group data */ + int error; + + ASSERT(agno != NULLAGNUMBER); + error = xfs_trans_read_buf( + mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(bp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION( + INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, + XFS_RANDOM_IALLOC_READ_AGI))) { + XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, + mp, agi); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + pag = &mp->m_perag[agno]; + if (!pag->pagi_init) { + pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT); + pag->pagi_init = 1; + } else { + /* + * It's possible for these to be out of sync if + * we are in the middle of a forced shutdown. + */ + ASSERT(pag->pagi_freecount == + INT_GET(agi->agi_freecount, ARCH_CONVERT) + || XFS_FORCED_SHUTDOWN(mp)); + } + +#ifdef DEBUG + { + int i; + + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) + ASSERT(!INT_ISZERO(agi->agi_unlinked[i], ARCH_CONVERT)); + } +#endif + + XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF); + *bpp = bp; + return 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_ialloc.h linux.22-ac2/fs/xfs/xfs_ialloc.h --- linux.vanilla/fs/xfs/xfs_ialloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_ialloc.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IALLOC_H__ +#define __XFS_IALLOC_H__ + +struct xfs_buf; +struct xfs_dinode; +struct xfs_mount; +struct xfs_trans; + +/* + * Allocation parameters for inode allocation. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_INODES) +int xfs_ialloc_inodes(struct xfs_mount *mp); +#define XFS_IALLOC_INODES(mp) xfs_ialloc_inodes(mp) +#else +#define XFS_IALLOC_INODES(mp) ((mp)->m_ialloc_inos) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_BLOCKS) +xfs_extlen_t xfs_ialloc_blocks(struct xfs_mount *mp); +#define XFS_IALLOC_BLOCKS(mp) xfs_ialloc_blocks(mp) +#else +#define XFS_IALLOC_BLOCKS(mp) ((mp)->m_ialloc_blks) +#endif + +/* + * For small block file systems, move inodes in clusters of this size. + * When we don't have a lot of memory, however, we go a bit smaller + * to reduce the number of AGI and ialloc btree blocks we need to keep + * around for xfs_dilocate(). We choose which one to use in + * xfs_mount_int(). + */ +#define XFS_INODE_BIG_CLUSTER_SIZE 8192 +#define XFS_INODE_SMALL_CLUSTER_SIZE 4096 +#define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size + +/* + * Make an inode pointer out of the buffer/offset. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MAKE_IPTR) +struct xfs_dinode *xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o); +#define XFS_MAKE_IPTR(mp,b,o) xfs_make_iptr(mp,b,o) +#else +#define XFS_MAKE_IPTR(mp,b,o) \ + ((xfs_dinode_t *)(xfs_buf_offset(b, (o) << (mp)->m_sb.sb_inodelog))) +#endif + +/* + * Find a free (set) bit in the inode bitmask. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IALLOC_FIND_FREE) +int xfs_ialloc_find_free(xfs_inofree_t *fp); +#define XFS_IALLOC_FIND_FREE(fp) xfs_ialloc_find_free(fp) +#else +#define XFS_IALLOC_FIND_FREE(fp) xfs_lowbit64(*(fp)) +#endif + + +#ifdef __KERNEL__ + +/* + * Prototypes for visible xfs_ialloc.c routines. + */ + +/* + * Allocate an inode on disk. + * Mode is used to tell whether the new inode will need space, and whether + * it is a directory. + * + * To work within the constraint of one allocation per transaction, + * xfs_dialloc() is designed to be called twice if it has to do an + * allocation to make more free inodes. If an inode is + * available without an allocation, agbp would be set to the current + * agbp and alloc_done set to false. + * If an allocation needed to be done, agbp would be set to the + * inode header of the allocation group and alloc_done set to true. + * The caller should then commit the current transaction and allocate a new + * transaction. xfs_dialloc() should then be called again with + * the agbp value returned from the previous call. + * + * Once we successfully pick an inode its number is returned and the + * on-disk data structures are updated. The inode itself is not read + * in, since doing so would break ordering constraints with xfs_reclaim. + * + * *agbp should be set to NULL on the first call, *alloc_done set to FALSE. + */ +int /* error */ +xfs_dialloc( + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t parent, /* parent inode (directory) */ + mode_t mode, /* mode bits for new inode */ + int okalloc, /* ok to allocate more space */ + struct xfs_buf **agbp, /* buf for a.g. inode header */ + boolean_t *alloc_done, /* an allocation was done to replenish + the free inodes */ + xfs_ino_t *inop); /* inode number allocated */ + +/* + * Free disk inode. Carefully avoids touching the incore inode, all + * manipulations incore are the caller's responsibility. + * The on-disk inode is not changed by this operation, only the + * btree (free inode mask) is changed. + */ +int /* error */ +xfs_difree( + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t inode); /* inode to be freed */ + +/* + * Return the location of the inode in bno/len/off, + * for mapping it into a buffer. + */ +int +xfs_dilocate( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode to locate */ + xfs_fsblock_t *bno, /* output: block containing inode */ + int *len, /* output: num blocks in cluster*/ + int *off, /* output: index in block of inode */ + uint flags); /* flags for inode btree lookup */ + +/* + * Compute and fill in value of m_in_maxlevels. + */ +void +xfs_ialloc_compute_maxlevels( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Log specified fields for the ag hdr (inode section) + */ +void +xfs_ialloc_log_agi( + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *bp, /* allocation group header buffer */ + int fields); /* bitmask of fields to log */ + +/* + * Read in the allocation group header (inode allocation section) + */ +int /* error */ +xfs_ialloc_read_agi( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + struct xfs_buf **bpp); /* allocation group hdr buf */ + +#endif /* __KERNEL__ */ + +#endif /* __XFS_IALLOC_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfsidbg.c linux.22-ac2/fs/xfs/xfsidbg.c --- linux.vanilla/fs/xfs/xfsidbg.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfsidbg.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,5468 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "pagebuf/page_buf_internal.h" + +#include +#include +#include +#include +#include + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_buf_item.h" +#include "xfs_extfree_item.h" +#include "xfs_inode_item.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_dir_leaf.h" +#include "xfs_dir2_data.h" +#include "xfs_dir2_leaf.h" +#include "xfs_dir2_block.h" +#include "xfs_dir2_node.h" +#include "xfs_dir2_trace.h" +#include "xfs_log_priv.h" +#include "xfs_log_recover.h" +#include "xfs_rw.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_log_recover.h" +#include "quota/xfs_qm.h" + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Additional kdb commands for debugging XFS"); +MODULE_LICENSE("GPL"); +EXPORT_NO_SYMBOLS; + +/* + * Command table functions. + */ +static void xfsidbg_xagf(xfs_agf_t *); +static void xfsidbg_xagi(xfs_agi_t *); +static void xfsidbg_xaildump(xfs_mount_t *); +static void xfsidbg_xalloc(xfs_alloc_arg_t *); +#ifdef DEBUG +static void xfsidbg_xalmtrace(xfs_mount_t *); +#endif +static void xfsidbg_xattrcontext(xfs_attr_list_context_t *); +static void xfsidbg_xattrleaf(xfs_attr_leafblock_t *); +static void xfsidbg_xattrsf(xfs_attr_shortform_t *); +static void xfsidbg_xbirec(xfs_bmbt_irec_t *r); +static void xfsidbg_xbmalla(xfs_bmalloca_t *); +static void xfsidbg_xbrec(xfs_bmbt_rec_64_t *); +static void xfsidbg_xbroot(xfs_inode_t *); +static void xfsidbg_xbroota(xfs_inode_t *); +static void xfsidbg_xbtcur(xfs_btree_cur_t *); +static void xfsidbg_xbuf(xfs_buf_t *); +static void xfsidbg_xbuf_real(xfs_buf_t *, int); +static void xfsidbg_xchash(xfs_mount_t *mp); +static void xfsidbg_xchashlist(xfs_chashlist_t *chl); +static void xfsidbg_xdaargs(xfs_da_args_t *); +static void xfsidbg_xdabuf(xfs_dabuf_t *); +static void xfsidbg_xdanode(xfs_da_intnode_t *); +static void xfsidbg_xdastate(xfs_da_state_t *); +static void xfsidbg_xdirleaf(xfs_dir_leafblock_t *); +static void xfsidbg_xdirsf(xfs_dir_shortform_t *); +static void xfsidbg_xdir2free(xfs_dir2_free_t *); +static void xfsidbg_xdir2sf(xfs_dir2_sf_t *); +static void xfsidbg_xexlist(xfs_inode_t *); +static void xfsidbg_xflist(xfs_bmap_free_t *); +static void xfsidbg_xhelp(void); +static void xfsidbg_xiclog(xlog_in_core_t *); +static void xfsidbg_xiclogall(xlog_in_core_t *); +static void xfsidbg_xiclogcb(xlog_in_core_t *); +static void xfsidbg_xihash(xfs_mount_t *mp); +static void xfsidbg_xinodes(xfs_mount_t *); +static void xfsidbg_delayed_blocks(xfs_mount_t *); +static void xfsidbg_xinodes_quiesce(xfs_mount_t *); +static void xfsidbg_xlog(xlog_t *); +static void xfsidbg_xlog_ritem(xlog_recover_item_t *); +static void xfsidbg_xlog_rtrans(xlog_recover_t *); +static void xfsidbg_xlog_rtrans_entire(xlog_recover_t *); +static void xfsidbg_xlog_tic(xlog_ticket_t *); +static void xfsidbg_xlogitem(xfs_log_item_t *); +static void xfsidbg_xmount(xfs_mount_t *); +static void xfsidbg_xnode(xfs_inode_t *ip); +static void xfsidbg_xcore(xfs_iocore_t *io); +static void xfsidbg_xperag(xfs_mount_t *); +static void xfsidbg_xqm_diskdq(xfs_disk_dquot_t *); +static void xfsidbg_xqm_dqattached_inos(xfs_mount_t *); +static void xfsidbg_xqm_dquot(xfs_dquot_t *); +static void xfsidbg_xqm_mplist(xfs_mount_t *); +static void xfsidbg_xqm_qinfo(xfs_mount_t *mp); +static void xfsidbg_xqm_tpdqinfo(xfs_trans_t *tp); +static void xfsidbg_xsb(xfs_sb_t *, int convert); +static void xfsidbg_xtp(xfs_trans_t *); +static void xfsidbg_xtrans_res(xfs_mount_t *); +#ifdef CONFIG_XFS_QUOTA +static void xfsidbg_xqm(void); +static void xfsidbg_xqm_htab(void); +static void xfsidbg_xqm_freelist_print(xfs_frlist_t *qlist, char *title); +static void xfsidbg_xqm_freelist(void); +#endif + +/* kdb wrappers */ + +static int kdbm_xfs_xagf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xagf((xfs_agf_t *)addr); + return 0; +} + +static int kdbm_xfs_xagi( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xagi((xfs_agi_t *)addr); + return 0; +} + +static int kdbm_xfs_xaildump( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xaildump((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xalloc( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xalloc((xfs_alloc_arg_t *) addr); + return 0; +} + +#ifdef DEBUG +static int kdbm_xfs_xalmtrace( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xalmtrace((xfs_mount_t *) addr); + return 0; +} +#endif /* DEBUG */ + +static int kdbm_xfs_xattrcontext( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrcontext((xfs_attr_list_context_t *) addr); + return 0; +} + +static int kdbm_xfs_xattrleaf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrleaf((xfs_attr_leafblock_t *) addr); + return 0; +} + +static int kdbm_xfs_xattrsf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xattrsf((xfs_attr_shortform_t *) addr); + return 0; +} + +static int kdbm_xfs_xbirec( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbirec((xfs_bmbt_irec_t *) addr); + return 0; +} + +static int kdbm_xfs_xbmalla( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbmalla((xfs_bmalloca_t *)addr); + return 0; +} + +static int kdbm_xfs_xbrec( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbrec((xfs_bmbt_rec_64_t *) addr); + return 0; +} + +static int kdbm_xfs_xbroot( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbroot((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xbroota( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbroota((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xbtcur( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbtcur((xfs_btree_cur_t *) addr); + return 0; +} + +static int kdbm_xfs_xbuf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xbuf((xfs_buf_t *) addr); + return 0; +} + + +static int kdbm_xfs_xchash( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xchash((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xchashlist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xchashlist((xfs_chashlist_t *) addr); + return 0; +} + + +static int kdbm_xfs_xdaargs( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdaargs((xfs_da_args_t *) addr); + return 0; +} + +static int kdbm_xfs_xdabuf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdabuf((xfs_dabuf_t *) addr); + return 0; +} + +static int kdbm_xfs_xdanode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdanode((xfs_da_intnode_t *) addr); + return 0; +} + +static int kdbm_xfs_xdastate( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdastate((xfs_da_state_t *) addr); + return 0; +} + +static int kdbm_xfs_xdirleaf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdirleaf((xfs_dir_leafblock_t *) addr); + return 0; +} + +static int kdbm_xfs_xdirsf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdirsf((xfs_dir_shortform_t *) addr); + return 0; +} + +static int kdbm_xfs_xdir2free( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdir2free((xfs_dir2_free_t *) addr); + return 0; +} + +static int kdbm_xfs_xdir2sf( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xdir2sf((xfs_dir2_sf_t *) addr); + return 0; +} + +static int kdbm_xfs_xexlist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xexlist((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xflist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xflist((xfs_bmap_free_t *) addr); + return 0; +} + +static int kdbm_xfs_xhelp( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xhelp(); + return 0; +} + +static int kdbm_xfs_xiclog( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclog((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xiclogall( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclogall((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xiclogcb( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xiclogcb((xlog_in_core_t *) addr); + return 0; +} + +static int kdbm_xfs_xihash( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xihash((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xinodes( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xinodes((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_delayed_blocks( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_delayed_blocks((xfs_mount_t *) addr); + return 0; +} + + +static int kdbm_xfs_xinodes_quiesce( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xinodes_quiesce((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog((xlog_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_ritem( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_ritem((xlog_recover_item_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_rtrans( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_rtrans((xlog_recover_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_rtrans_entire( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_rtrans_entire((xlog_recover_t *) addr); + return 0; +} + +static int kdbm_xfs_xlog_tic( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlog_tic((xlog_ticket_t *) addr); + return 0; +} + +static int kdbm_xfs_xlogitem( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xlogitem((xfs_log_item_t *) addr); + return 0; +} + +static int kdbm_xfs_xmount( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xmount((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xnode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xnode((xfs_inode_t *) addr); + return 0; +} + +static int kdbm_xfs_xcore( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xcore((xfs_iocore_t *) addr); + return 0; +} + +static int kdbm_xfs_xperag( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xperag((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_diskdq( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_diskdq((xfs_disk_dquot_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_dqattached_inos( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_dqattached_inos((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_dquot( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_dquot((xfs_dquot_t *) addr); + return 0; +} + +#ifdef CONFIG_XFS_QUOTA +static int kdbm_xfs_xqm( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm(); + return 0; +} + +static int kdbm_xfs_xqm_freelist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm_freelist(); + return 0; +} + +static int kdbm_xfs_xqm_htab( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + if (argc != 0) + return KDB_ARGCOUNT; + + xfsidbg_xqm_htab(); + return 0; +} +#endif + +static int kdbm_xfs_xqm_mplist( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_mplist((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_qinfo( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_qinfo((xfs_mount_t *) addr); + return 0; +} + +static int kdbm_xfs_xqm_tpdqinfo( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xqm_tpdqinfo((xfs_trans_t *) addr); + return 0; +} + +static int kdbm_xfs_xsb( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + unsigned long convert=0; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1 && argc!=2) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + if (argc==2) { + /* extra argument - conversion flag */ + diag = kdbgetaddrarg(argc, argv, &nextarg, &convert, &offset, NULL, regs); + if (diag) + return diag; + } + + xfsidbg_xsb((xfs_sb_t *) addr, (int)convert); + return 0; +} + +static int kdbm_xfs_xtp( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xtp((xfs_trans_t *) addr); + return 0; +} + +static int kdbm_xfs_xtrans_res( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) + return diag; + + xfsidbg_xtrans_res((xfs_mount_t *) addr); + return 0; +} + +/* + * Vnode descriptor dump. + * This table is a string version of all the flags defined in vnode.h. + */ +char *tab_vflags[] = { + /* local only flags */ + "VINACT", /* 0x01 */ + "VRECLM", /* 0x02 */ + "VWAIT", /* 0x04 */ + "VMODIFIED", /* 0x08 */ + "INVALID0x10", /* 0x10 */ + "INVALID0x20", /* 0x20 */ + "INVALID0x40", /* 0x40 */ + "INVALID0x80", /* 0x80 */ + "INVALID0x100", /* 0x100 */ + "INVALID0x200", /* 0x200 */ + "INVALID0x400", /* 0x400 */ + "INVALID0x800", /* 0x800 */ + "INVALID0x1000", /* 0x1000 */ + "INVALID0x2000", /* 0x2000 */ + "INVALID0x4000", /* 0x4000 */ + "INVALID0x8000", /* 0x8000 */ + "INVALID0x10000", /* 0x10000 */ + "INVALID0x20000", /* 0x20000 */ + "INVALID0x40000", /* 0x40000 */ + "INVALID0x80000", /* 0x80000 */ + "VROOT", /* 0x100000 */ + "INVALID0x200000", /* 0x200000 */ + "INVALID00x400000", /* 0x400000 */ + "INVALID0x800000", /* 0x800000 */ + "INVALID0x1000000", /* 0x1000000 */ + "INVALID0x2000000", /* 0x2000000 */ + "VSHARE", /* 0x4000000 */ + "INVALID0x8000000", /* 0x8000000 */ + "VENF_LOCKING", /* 0x10000000 */ + "VOPLOCK", /* 0x20000000 */ + "VPURGE", /* 0x40000000 */ + "INVALID0x80000000", /* 0x80000000 */ + 0 +}; + + +static char *vnode_type[] = { + "VNON", "VREG", "VDIR", "VBLK", "VLNK", "VFIFO", "VBAD", "VSOCK" +}; + +static void +printflags(register uint64_t flags, + register char **strings, + register char *name) +{ + register uint64_t mask = 1; + + if (name) + kdb_printf("%s 0x%llx <", name, (unsigned long long)flags); + + while (flags != 0 && *strings) { + if (mask & flags) { + kdb_printf("%s ", *strings); + flags &= ~mask; + } + mask <<= 1; + strings++; + } + + if (name) + kdb_printf("> "); + + return; +} + + +static void printvnode(vnode_t *vp, unsigned long addr) +{ + bhv_desc_t *bh; + kdb_symtab_t symtab; + + + kdb_printf("vnode: 0x%lx type ", addr); + if ((size_t)vp->v_type >= sizeof(vnode_type)/sizeof(vnode_type[0])) + kdb_printf("out of range 0x%x", vp->v_type); + else + kdb_printf("%s", vnode_type[vp->v_type]); + kdb_printf(" v_bh %p\n", &vp->v_bh); + + if ((bh = vp->v_bh.bh_first)) { + kdb_printf(" v_inode 0x%p v_bh->bh_first 0x%p pobj 0x%p\n", + LINVFS_GET_IP((struct vnode *) addr), + bh, bh->bd_pdata); + + if (kdbnearsym((unsigned long)bh->bd_ops, &symtab)) + kdb_printf(" ops %s ", symtab.sym_name); + else + kdb_printf(" ops %s/0x%p ", + "???", (void *)bh->bd_ops); + } else { + kdb_printf(" v_inode 0x%p v_bh->bh_first = NULLBHV ", + LINVFS_GET_IP((struct vnode *) addr)); + } + + printflags((__psunsigned_t)vp->v_flag, tab_vflags, "flag ="); + kdb_printf("\n"); + +#ifdef CONFIG_XFS_VNODE_TRACING + kdb_printf(" v_trace 0x%p\n", vp->v_trace); +#endif /* CONFIG_XFS_VNODE_TRACING */ + + kdb_printf(" v_vfsp 0x%p v_number %Lx\n", + vp->v_vfsp, vp->v_number); +} + +static int kdbm_vnode( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + vnode_t vp; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vp, addr))) + return diag; + + printvnode(&vp, addr); + + return 0; +} + +static void +print_vfs(vfs_t *vfs, unsigned long addr) +{ + kdb_printf("vfsp at 0x%lx", addr); + kdb_printf(" vfs_fbhv 0x%p sb 0x%p\n", vfs->vfs_fbhv, vfs->vfs_super); +} + +static int kdbm_vfs( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + unsigned long addr; + int nextarg = 1; + long offset = 0; + int diag; + vfs_t vfs; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vfs, addr))) + return diag; + + print_vfs(&vfs, addr); + + return 0; +} + + +#ifdef CONFIG_XFS_VNODE_TRACING +/* + * Print a vnode trace entry. + */ +static int +vn_trace_pr_entry(ktrace_entry_t *ktep) +{ + char funcname[128]; + kdb_symtab_t symtab; + + + if ((__psint_t)ktep->val[0] == 0) + return 0; + + if (kdbnearsym((unsigned int)ktep->val[8], &symtab)) { + unsigned long offval; + + offval = (unsigned int)ktep->val[8] - symtab.sym_start; + + if (offval) + sprintf(funcname, "%s+0x%lx", symtab.sym_name, offval); + else + sprintf(funcname, "%s", symtab.sym_name); + } else + funcname[0] = '\0'; + + + switch ((__psint_t)ktep->val[0]) { + case VNODE_KTRACE_ENTRY: + kdb_printf("entry to %s i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_EXIT: + kdb_printf("exit from %s i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_HOLD: + if ((__psint_t)ktep->val[3] != 1) + kdb_printf("hold @%s:%d(%s) i_count %d => %d ", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3] - 1, + (__psint_t)ktep->val[3]); + else + kdb_printf("get @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_REF: + kdb_printf("ref @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + case VNODE_KTRACE_RELE: + if ((__psint_t)ktep->val[3] != 1) + kdb_printf("rele @%s:%d(%s) i_count %d => %d ", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3], + (__psint_t)ktep->val[3] - 1); + else + kdb_printf("free @%s:%d(%s) i_count = %d", + (char *)ktep->val[1], + (__psint_t)ktep->val[2], + funcname, + (__psint_t)ktep->val[3]); + break; + + default: + kdb_printf("unknown vntrace record\n"); + return 1; + } + + kdb_printf("\n"); + + kdb_printf(" cpu = %d pid = %d ", + (__psint_t)ktep->val[6], (pid_t)ktep->val[7]); + + printflags((__psunsigned_t)ktep->val[5], tab_vflags, "flag ="); + + if (kdbnearsym((unsigned int)ktep->val[4], &symtab)) { + unsigned long offval; + + offval = (unsigned int)ktep->val[4] - symtab.sym_start; + + if (offval) + kdb_printf(" ra = %s+0x%lx", symtab.sym_name, offval); + else + kdb_printf(" ra = %s", symtab.sym_name); + } else + kdb_printf(" ra = ?? 0x%p", (void *)ktep->val[4]); + + return 1; +} + + +/* + * Print out the trace buffer attached to the given vnode. + */ +static int kdbm_vntrace( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; + long offset = 0; + unsigned long addr; + vnode_t *vp; + ktrace_entry_t *ktep; + ktrace_snap_t kts; + + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + vp = (vnode_t *)addr; + + if (vp->v_trace == NULL) { + kdb_printf("The vnode trace buffer is not initialized\n"); + + return 0; + } + + kdb_printf("vntrace vp 0x%p\n", vp); + + ktep = ktrace_first(vp->v_trace, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(vp->v_trace, &kts); + } + + return 0; +} +/* + * Print out the trace buffer attached to the given vnode. + */ +static int kdbm_vntraceaddr( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; + long offset = 0; + unsigned long addr; + struct ktrace *kt; + ktrace_entry_t *ktep; + ktrace_snap_t kts; + + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + kt = (struct ktrace *)addr; + + kdb_printf("vntraceaddr kt 0x%p\n", kt); + + ktep = ktrace_first(kt, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(kt, &kts); + } + + return 0; +} +#endif /* CONFIG_XFS_VNODE_TRACING */ + + +static void printinode(struct inode *ip) +{ + unsigned long addr; + + + if (ip == NULL) + return; + + kdb_printf(" i_ino = %lu i_count = %u i_dev = 0x%x i_size %Ld\n", + ip->i_ino, atomic_read(&ip->i_count), + kdev_t_to_nr(ip->i_dev), ip->i_size); + + kdb_printf( + " i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx\n", + ip->i_mode, ip->i_nlink, + kdev_t_to_nr(ip->i_rdev), ip->i_state); + + kdb_printf(" i_hash.nxt = 0x%p i_hash.prv = 0x%p\n", + ip->i_hash.next, ip->i_hash.prev); + kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n", + ip->i_list.next, ip->i_list.prev); + kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n", + ip->i_dentry.next, + ip->i_dentry.prev); + + addr = (unsigned long)ip; + + kdb_printf(" i_sb = 0x%p i_op = 0x%p i_data = 0x%lx nrpages = %lu\n", + ip->i_sb, ip->i_op, + addr + offsetof(struct inode, i_data), + ip->i_data.nrpages); + + kdb_printf(" vnode ptr 0x%p\n", LINVFS_GET_VP(ip)); +} + + +static int kdbm_vn( + int argc, + const char **argv, + const char **envp, + struct pt_regs *regs) +{ + int diag; + int nextarg = 1; +/* char *symname; */ + long offset = 0; + unsigned long addr; + struct inode *ip; +/* bhv_desc_t *bh; */ +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_entry_t *ktep; + ktrace_snap_t kts; +#endif + vnode_t vp; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + + if (diag) + return diag; + + if ((diag = kdb_getarea(vp, addr))) + return diag; + + ip = LINVFS_GET_IP((vnode_t *)addr); + + kdb_printf("--> Inode @ 0x%p\n", ip); + printinode(ip); + + kdb_printf("--> Vnode @ 0x%lx\n", addr); + printvnode(&vp, addr); + +#ifdef CONFIG_XFS_VNODE_TRACING + + kdb_printf("--> Vntrace @ 0x%p/0x%p\n", vp, vp->v_trace); + + if (vp->v_trace == NULL) + return 0; + + ktep = ktrace_first(vp->v_trace, &kts); + + while (ktep != NULL) { + if (vn_trace_pr_entry(ktep)) + kdb_printf("\n"); + + ktep = ktrace_next(vp->v_trace, &kts); + } +#endif /* CONFIG_XFS_VNODE_TRACING */ + + return 0; +} + + +/* pagebuf stuff */ + +static char *pb_flag_vals[] = { +/* 0 */ "READ", "WRITE", "MAPPED", "PARTIAL", "ASYNC", +/* 5 */ "NONE", "DELWRI", "FREED", "SYNC", "MAPPABLE", +/* 10 */ "STALE", "FS_MANAGED", "FS_DATAIOD", "LOCK", "TRYLOCK", +/* 15 */ "DONT_BLOCK", "LOCKABLE", "PRIVATE_BH", "ALL_PAGES_MAPPED", + "ADDR_ALLOCATED", +/* 20 */ "MEM_ALLOCATED", "FORCEIO", "FLUSH", "READ_AHEAD", + NULL }; + +static char *pbm_flag_vals[] = { + "EOF", "HOLE", "DELAY", "INVALID0x08", + "INVALID0x10", "UNWRITTEN", "INVALID0x40", "INVALID0x80", + NULL }; + + +static char *map_flags(unsigned long flags, char *mapping[]) +{ + static char buffer[256]; + int index; + int offset = 12; + + buffer[0] = '\0'; + + for (index = 0; flags && mapping[index]; flags >>= 1, index++) { + if (flags & 1) { + if ((offset + strlen(mapping[index]) + 1) >= 80) { + strcat(buffer, "\n "); + offset = 12; + } else if (offset > 12) { + strcat(buffer, " "); + offset++; + } + strcat(buffer, mapping[index]); + offset += strlen(mapping[index]); + } + } + + return (buffer); +} + +static char *pb_flags(page_buf_flags_t pb_flag) +{ + return(map_flags((unsigned long) pb_flag, pb_flag_vals)); +} + +static int +kdbm_pb_flags(int argc, const char **argv, const char **envp, struct pt_regs *regs) +{ + unsigned long flags; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + diag = kdbgetularg(argv[1], &flags); + if (diag) + return diag; + + kdb_printf("pb flags 0x%lx = %s\n", flags, pb_flags(flags)); + + return 0; +} + +static void +print_pagebuf( + page_buf_t *pb, + unsigned long addr) +{ + kdb_printf("page_buf_t at 0x%lx\n", addr); + kdb_printf(" pb_flags %s\n", pb_flags(pb->pb_flags)); + kdb_printf(" pb_target 0x%p pb_hold %d pb_next 0x%p pb_prev 0x%p\n", + pb->pb_target, pb->pb_hold.counter, + list_entry(pb->pb_list.next, page_buf_t, pb_list), + list_entry(pb->pb_list.prev, page_buf_t, pb_list)); + kdb_printf(" pb_hash_index %d pb_hash_next 0x%p pb_hash_prev 0x%p\n", + pb->pb_hash_index, + list_entry(pb->pb_hash_list.next, page_buf_t, pb_hash_list), + list_entry(pb->pb_hash_list.prev, page_buf_t, pb_hash_list)); + kdb_printf(" pb_file_offset 0x%llx pb_buffer_length 0x%llx pb_addr 0x%p\n", + (unsigned long long) pb->pb_file_offset, + (unsigned long long) pb->pb_buffer_length, + pb->pb_addr); + kdb_printf(" pb_bn 0x%Lx pb_count_desired 0x%lx\n", + pb->pb_bn, + (unsigned long) pb->pb_count_desired); + kdb_printf(" pb_flushtime %ld (%ld) pb_io_remaining %d pb_error %u\n", + pb->pb_flushtime, pb->pb_flushtime - jiffies, + pb->pb_io_remaining.counter, pb->pb_error); + kdb_printf(" pb_page_count %u pb_offset 0x%x pb_pages 0x%p\n", + pb->pb_page_count, pb->pb_offset, + pb->pb_pages); +#ifdef PAGEBUF_LOCK_TRACKING + kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d) last holder %d\n", + pb->pb_iodonesema.count.counter, + pb->pb_iodonesema.sleepers, + pb->pb_sema.count.counter, pb->pb_sema.sleepers, + pb->pb_pin_count.counter, pb->pb_last_holder); +#else + kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d)\n", + pb->pb_iodonesema.count.counter, + pb->pb_iodonesema.sleepers, + pb->pb_sema.count.counter, pb->pb_sema.sleepers, + pb->pb_pin_count.counter); +#endif + if (pb->pb_fspriv || pb->pb_fspriv2) { + kdb_printf( "pb_fspriv 0x%p pb_fspriv2 0x%p\n", + pb->pb_fspriv, pb->pb_fspriv2); + } +} + +static int +kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs) +{ + page_buf_t bp; + unsigned long addr; + long offset=0; + int nextarg; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + nextarg = 1; + if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) || + (diag = kdb_getarea(bp, addr))) + return diag; + + print_pagebuf(&bp, addr); + + return 0; +} + +static int +kdbm_pbdelay(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + unsigned long verbose = 0; + int count = 0; + struct list_head *curr, *next; + page_buf_t bp; + unsigned long addr; + int diag; + extern struct list_head pbd_delwrite_queue; + + if (argc > 1) + return KDB_ARGCOUNT; + + if (argc == 1) { + if ((diag = kdbgetularg(argv[1], &verbose))) { + return diag; + } + } + + if (!verbose) { + kdb_printf("index pb pin flushtime\n"); + } + + list_for_each_safe(curr, next, &pbd_delwrite_queue) { + addr = (unsigned long)list_entry(curr, page_buf_t, pb_list); + if ((diag = kdb_getarea(bp, addr))) + return diag; + + if (verbose) { + print_pagebuf(&bp, addr); + } else { + kdb_printf("%4d 0x%lx %d %ld\n", + count++, addr, + bp.pb_pin_count.counter, + bp.pb_flushtime - jiffies); + } + } + + return 0; +} + +static int +kdbm_pbmap(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + page_buf_bmap_t pbm; + unsigned long addr; + long offset=0; + int nextarg; + int diag; + + if (argc != 1) + return KDB_ARGCOUNT; + + nextarg = 1; + if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs)) || + (diag = kdb_getarea(pbm, addr))) + + kdb_printf("page_buf_bmap_t at 0x%lx\n", addr); + kdb_printf(" pbm_bn 0x%llx pbm_offset 0x%Lx pbm_delta 0x%lx pbm_bsize 0x%lx\n", + (long long) pbm.pbm_bn, pbm.pbm_offset, + (unsigned long) pbm.pbm_delta, (unsigned long) pbm.pbm_bsize); + + kdb_printf(" pbm_flags %s\n", map_flags(pbm.pbm_flags, pbm_flag_vals)); + + return 0; +} + +#ifdef PAGEBUF_TRACE +# ifdef __PAGEBUF_TRACE__ +# undef __PAGEBUF_TRACE__ +# undef PB_DEFINE_TRACES +# undef PB_TRACE_START +# undef PB_TRACE_REC +# undef PB_TRACE_END +# endif +#include "pagebuf/page_buf_trace.h" + +#define EV_SIZE (sizeof(event_names)/sizeof(char *)) + +void +pb_trace_core( + unsigned long match, + char *event_match, + unsigned long long offset, + long long mask) +{ + extern struct pagebuf_trace_buf pb_trace; + int i, total, end; + pagebuf_trace_t *trace; + char *event; + char value[10]; + + end = pb_trace.start - 1; + if (end < 0) + end = PB_TRACE_BUFSIZE - 1; + + if (match && (match < PB_TRACE_BUFSIZE)) { + for (i = pb_trace.start, total = 0; i != end; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + if (trace->pb == 0) + continue; + total++; + } + total = total - match; + for (i = pb_trace.start; i != end && total; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + if (trace->pb == 0) + continue; + total--; + } + match = 0; + } else + i = pb_trace.start; + for ( ; i != end; i = CIRC_INC(i)) { + trace = &pb_trace.buf[i]; + + if (offset) { + if ((trace->offset & ~mask) != offset) + continue; + } + + if (trace->pb == 0) + continue; + + if ((match != 0) && (trace->pb != match)) + continue; + + if ((trace->event < EV_SIZE-1) && event_names[trace->event]) { + event = event_names[trace->event]; + } else if (trace->event == EV_SIZE-1) { + event = (char *)trace->misc; + } else { + event = value; + sprintf(value, "%8d", trace->event); + } + + if (event_match && strcmp(event, event_match)) { + continue; + } + + + kdb_printf("pb 0x%lx [%s] (hold %u lock %d) misc 0x%p", + trace->pb, event, + trace->hold, trace->lock_value, + trace->misc); + kdb_symbol_print((unsigned int)trace->ra, NULL, + KDB_SP_SPACEB|KDB_SP_PAREN|KDB_SP_NEWLINE); + kdb_printf(" offset 0x%Lx size 0x%x task 0x%p\n", + trace->offset, trace->size, trace->task); + kdb_printf(" flags: %s\n", + pb_flags(trace->flags)); + } +} + + +static int +kdbm_pbtrace_offset(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + long mask = 0; + unsigned long offset = 0; + int diag; + + if (argc > 2) + return KDB_ARGCOUNT; + + if (argc > 0) { + diag = kdbgetularg(argv[1], &offset); + if (diag) + return diag; + } + + if (argc > 1) { + diag = kdbgetularg(argv[1], &mask); + if (diag) + return diag; + } + + pb_trace_core(0, NULL, (unsigned long long)offset, + (long long)mask); /* sign extent mask */ + return 0; +} + +static int +kdbm_pbtrace(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + unsigned long addr = 0; + int diag, nextarg; + long offset = 0; + char *event_match = NULL; + + if (argc > 1) + return KDB_ARGCOUNT; + + if (argc == 1) { + if (isupper(argv[1][0]) || islower(argv[1][0])) { + event_match = (char *)argv[1]; + printk("event match on \"%s\"\n", event_match); + argc = 0; + } else { + nextarg = 1; + diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL, regs); + if (diag) { + printk("failed to parse %s as a number\n", + argv[1]); + return diag; + } + } + } + + pb_trace_core(addr, event_match, 0LL, 0LL); + return 0; +} + +#else /* PAGEBUF_TRACE */ +static int +kdbm_pbtrace(int argc, const char **argv, const char **envp, + struct pt_regs *regs) +{ + kdb_printf("pagebuf tracing not compiled in\n"); + + return 0; +} +#endif /* PAGEBUF_TRACE */ + +static struct xif { + char *name; + int (*func)(int, const char **, const char **, struct pt_regs *); + char *args; + char *help; +} xfsidbg_funcs[] = { + { "vn", kdbm_vn, "", "Dump inode/vnode/trace"}, + { "vnode", kdbm_vnode, "", "Dump vnode"}, + { "vfs", kdbm_vfs, "", "Dump vfs"}, +#ifdef CONFIG_XFS_VNODE_TRACING + { "vntrace", kdbm_vntrace, "", "Dump vnode Trace"}, + { "vntraceaddr", kdbm_vntraceaddr, "", "Dump vnode Trace by Address"}, +#endif /* CONFIG_XFS_VNODE_TRACING */ + { "xagf", kdbm_xfs_xagf, "", + "Dump XFS allocation group freespace" }, + { "xagi", kdbm_xfs_xagi, "", + "Dump XFS allocation group inode" }, + { "xail", kdbm_xfs_xaildump, "", + "Dump XFS AIL for a mountpoint" }, + { "xalloc", kdbm_xfs_xalloc, "", + "Dump XFS allocation args structure" }, +#ifdef DEBUG + { "xalmtrc", kdbm_xfs_xalmtrace, "", + "Dump XFS alloc mount-point trace" }, +#endif + { "xattrcx", kdbm_xfs_xattrcontext, "", + "Dump XFS attr_list context struct"}, + { "xattrlf", kdbm_xfs_xattrleaf, "", + "Dump XFS attribute leaf block"}, + { "xattrsf", kdbm_xfs_xattrsf, "", + "Dump XFS attribute shortform"}, + { "xbirec", kdbm_xfs_xbirec, "", + "Dump XFS bmalloc args structure"}, + { "xbrec", kdbm_xfs_xbrec, "", + "Dump XFS bmap btree root (data)"}, + { "xbroota", kdbm_xfs_xbroota, "", + "Dump XFS bmap btree root (attr)"}, + { "xbtcur", kdbm_xfs_xbtcur, "", + "Dump XFS btree cursor"}, + { "xbuf", kdbm_xfs_xbuf, "", + "Dump XFS data from a buffer"}, + { "xchash", kdbm_xfs_xchash, "", + "Dump XFS cluster hash"}, + { "xchlist", kdbm_xfs_xchashlist, "", + "Dump XFS cluster hash list"}, + { "xd2free", kdbm_xfs_xdir2free, "", + "Dump XFS directory v2 freemap"}, + { "xdaargs", kdbm_xfs_xdaargs, "", + "Dump XFS dir/attr args structure"}, + { "xdabuf", kdbm_xfs_xdabuf, "", + "Dump XFS dir/attr buf structure"}, + { "xdanode", kdbm_xfs_xdanode, "", + "Dump XFS dir/attr node block"}, + { "xdastat", kdbm_xfs_xdastate, "", + "Dump XFS dir/attr state_blk struct"}, + { "xdelay", kdbm_xfs_delayed_blocks, "", + "Dump delayed block totals"}, + { "xdirlf", kdbm_xfs_xdirleaf, "", + "Dump XFS directory leaf block"}, + { "xdirsf", kdbm_xfs_xdirsf, "", + "Dump XFS directory shortform"}, + { "xdir2sf", kdbm_xfs_xdir2sf, "", + "Dump XFS directory v2 shortform"}, + { "xdiskdq", kdbm_xfs_xqm_diskdq, "", + "Dump XFS ondisk dquot (quota) struct"}, + { "xdqatt", kdbm_xfs_xqm_dqattached_inos, "", + "All incore inodes with dquots"}, + { "xdqinfo", kdbm_xfs_xqm_tpdqinfo, "", + "Dump dqinfo structure of a trans"}, + { "xdquot", kdbm_xfs_xqm_dquot, "", + "Dump XFS dquot (quota) structure"}, + { "xexlist", kdbm_xfs_xexlist, "", + "Dump XFS bmap extents in inode"}, + { "xflist", kdbm_xfs_xflist, "", + "Dump XFS to-be-freed extent list"}, + { "xhelp", kdbm_xfs_xhelp, "", + "Print idbg-xfs help"}, + { "xicall", kdbm_xfs_xiclogall, "", + "Dump All XFS in-core logs"}, + { "xiclog", kdbm_xfs_xiclog, "", + "Dump XFS in-core log"}, + { "xihash", kdbm_xfs_xihash, "", + "Dump XFS inode hash statistics"}, + { "xinodes", kdbm_xfs_xinodes, "", + "Dump XFS inodes per mount"}, + { "xquiesce",kdbm_xfs_xinodes_quiesce, "", + "Dump non-quiesced XFS inodes per mount"}, + { "xl_rcit", kdbm_xfs_xlog_ritem, "", + "Dump XFS recovery item"}, + { "xl_rctr", kdbm_xfs_xlog_rtrans, "", + "Dump XFS recovery transaction"}, + { "xl_rctr2",kdbm_xfs_xlog_rtrans_entire, "", + "Dump entire recovery transaction"}, + { "xl_tic", kdbm_xfs_xlog_tic, "", + "Dump XFS log ticket"}, + { "xlog", kdbm_xfs_xlog, "", + "Dump XFS log"}, + { "xlogcb", kdbm_xfs_xiclogcb, "", + "Dump XFS in-core log callbacks"}, + { "xlogitm", kdbm_xfs_xlogitem, "", + "Dump XFS log item structure"}, + { "xmount", kdbm_xfs_xmount, "", + "Dump XFS mount structure"}, + { "xnode", kdbm_xfs_xnode, "", + "Dump XFS inode"}, + { "xiocore", kdbm_xfs_xcore, "", + "Dump XFS iocore"}, + { "xperag", kdbm_xfs_xperag, "", + "Dump XFS per-allocation group data"}, + { "xqinfo", kdbm_xfs_xqm_qinfo, "", + "Dump mount->m_quotainfo structure"}, +#ifdef CONFIG_XFS_QUOTA + { "xqm", kdbm_xfs_xqm, "", + "Dump XFS quota manager structure"}, + { "xqmfree", kdbm_xfs_xqm_freelist, "", + "Dump XFS global freelist of dquots"}, + { "xqmhtab", kdbm_xfs_xqm_htab, "", + "Dump XFS hashtable of dquots"}, +#endif /* CONFIG_XFS_QUOTA */ + { "xqmplist",kdbm_xfs_xqm_mplist, "", + "Dump XFS all dquots of a f/s"}, + { "xsb", kdbm_xfs_xsb, " ", + "Dump XFS superblock"}, + { "xtp", kdbm_xfs_xtp, "", + "Dump XFS transaction structure"}, + { "xtrres", kdbm_xfs_xtrans_res, "", + "Dump XFS reservation values"}, + { 0, 0, 0 } +}; + +static int +__init xfsidbg_init(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_register(p->name, p->func, p->args, p->help, 0); + + kdb_register("pb", kdbm_pb, "", "Display page_buf_t", 0); + kdb_register("pbflags", kdbm_pb_flags, "", + "Display page buf flags", 0); + kdb_register("pbmap", kdbm_pbmap, "", + "Display Bmap", 0); + kdb_register("pbdelay", kdbm_pbdelay, "0|1", + "Display delwri pagebufs", 0); + kdb_register("pbtrace", kdbm_pbtrace, "|", + "page_buf_t trace", 0); +#ifdef PAGEBUF_TRACE + kdb_register("pboffset", kdbm_pbtrace_offset, " []", + "page_buf_t trace", 0); +#endif + return 0; +} + +static void +__exit xfsidbg_exit(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_unregister(p->name); + + kdb_unregister("pb"); + kdb_unregister("pbflags"); + kdb_unregister("pbmap"); + kdb_unregister("pbdelay"); + kdb_unregister("pbtrace"); +#ifdef PAGEBUF_TRACE + kdb_unregister("pboffset"); +#endif + +} + +/* + * Argument to xfs_alloc routines, for allocation type. + */ +static char *xfs_alloctype[] = { + "any_ag", "first_ag", "start_ag", "this_ag", + "start_bno", "near_bno", "this_bno" +}; + + +/* + * Prototypes for static functions. + */ +#ifdef DEBUG +static int xfs_alloc_trace_entry(ktrace_entry_t *ktep); +#endif +static void xfs_broot(xfs_inode_t *ip, xfs_ifork_t *f); +static void xfs_btalloc(xfs_alloc_block_t *bt, int bsz); +static void xfs_btbmap(xfs_bmbt_block_t *bt, int bsz); +static void xfs_btino(xfs_inobt_block_t *bt, int bsz); +static void xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary); +static void xfs_dastate_path(xfs_da_state_path_t *p); +static void xfs_dir2data(void *addr, int size); +static void xfs_dir2leaf(xfs_dir2_leaf_t *leaf, int size); +static void xfs_dquot_item_print(xfs_dq_logitem_t *lip, int summary); +static void xfs_efd_item_print(xfs_efd_log_item_t *efdp, int summary); +static void xfs_efi_item_print(xfs_efi_log_item_t *efip, int summary); +static char *xfs_fmtformat(xfs_dinode_fmt_t f); +static char *xfs_fmtfsblock(xfs_fsblock_t bno, xfs_mount_t *mp); +static char *xfs_fmtino(xfs_ino_t ino, xfs_mount_t *mp); +static char *xfs_fmtlsn(xfs_lsn_t *lsnp); +static char *xfs_fmtmode(int m); +static char *xfs_fmtsize(size_t i); +static char *xfs_fmtuuid(uuid_t *); +static void xfs_inode_item_print(xfs_inode_log_item_t *ilip, int summary); +static void xfs_inodebuf(xfs_buf_t *bp); +static void xfs_prdinode(xfs_dinode_t *di, int coreonly, int convert); +static void xfs_prdinode_core(xfs_dinode_core_t *dip, int convert); +static void xfs_qoff_item_print(xfs_qoff_logitem_t *lip, int summary); +static void xfs_xexlist_fork(xfs_inode_t *ip, int whichfork); +static void xfs_xnode_fork(char *name, xfs_ifork_t *f); + +/* + * Static functions. + */ + +#ifdef DEBUG +/* + * Print xfs alloc trace buffer entry. + */ +static int +xfs_alloc_trace_entry(ktrace_entry_t *ktep) +{ + static char *modagf_flags[] = { + "magicnum", + "versionnum", + "seqno", + "length", + "roots", + "levels", + "flfirst", + "fllast", + "flcount", + "freeblks", + "longest", + NULL + }; + + if (((__psint_t)ktep->val[0] & 0xffff) == 0) + return 0; + switch ((long)ktep->val[0] & 0xffffL) { + case XFS_ALLOC_KTRACE_ALLOC: + kdb_printf("alloc %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf( + "agno %d agbno %d minlen %d maxlen %d mod %d prod %d minleft %d\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8], + (__psunsigned_t)ktep->val[9], + (__psunsigned_t)ktep->val[10]); + kdb_printf("total %d alignment %d len %d type %s otype %s\n", + (__psunsigned_t)ktep->val[11], + (__psunsigned_t)ktep->val[12], + (__psunsigned_t)ktep->val[13], + xfs_alloctype[((__psint_t)ktep->val[14]) >> 16], + xfs_alloctype[((__psint_t)ktep->val[14]) & 0xffff]); + kdb_printf("wasdel %d wasfromfl %d isfl %d userdata %d\n", + ((__psint_t)ktep->val[15] & (1 << 3)) != 0, + ((__psint_t)ktep->val[15] & (1 << 2)) != 0, + ((__psint_t)ktep->val[15] & (1 << 1)) != 0, + ((__psint_t)ktep->val[15] & (1 << 0)) != 0); + break; + case XFS_ALLOC_KTRACE_FREE: + kdb_printf("free %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf("agno %d agbno %d len %d isfl %d\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psint_t)ktep->val[7]); + break; + case XFS_ALLOC_KTRACE_MODAGF: + kdb_printf("modagf %s[%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + printflags((__psint_t)ktep->val[4], modagf_flags, "modified"); + kdb_printf("seqno %d length %d roots b %d c %d\n", + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + kdb_printf("levels b %d c %d flfirst %d fllast %d flcount %d\n", + (__psunsigned_t)ktep->val[9], + (__psunsigned_t)ktep->val[10], + (__psunsigned_t)ktep->val[11], + (__psunsigned_t)ktep->val[12], + (__psunsigned_t)ktep->val[13]); + kdb_printf("freeblks %d longest %d\n", + (__psunsigned_t)ktep->val[14], + (__psunsigned_t)ktep->val[15]); + break; + + case XFS_ALLOC_KTRACE_UNBUSY: + kdb_printf("unbusy %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + case XFS_ALLOC_KTRACE_BUSY: + kdb_printf("busy %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + case XFS_ALLOC_KTRACE_BUSYSEARCH: + kdb_printf("busy-search %s [%s %d] mp 0x%p\n", + (char *)ktep->val[1], + ktep->val[2] ? (char *)ktep->val[2] : "", + (__psint_t)ktep->val[0] >> 16, + (xfs_mount_t *)ktep->val[3]); + kdb_printf(" agno %d agbno %d len %d slot %d tp 0x%x\n", + (__psunsigned_t)ktep->val[4], + (__psunsigned_t)ktep->val[5], + (__psunsigned_t)ktep->val[6], + (__psunsigned_t)ktep->val[7], + (__psunsigned_t)ktep->val[8]); + break; + default: + kdb_printf("unknown alloc trace record\n"); + break; + } + return 1; +} +#endif /* DEBUG */ + +/* + * Print an xfs in-inode bmap btree root. + */ +static void +xfs_broot(xfs_inode_t *ip, xfs_ifork_t *f) +{ + xfs_bmbt_block_t *broot; + int format; + int i; + xfs_bmbt_key_t *kp; + xfs_bmbt_ptr_t *pp; + + format = f == &ip->i_df ? ip->i_d.di_format : ip->i_d.di_aformat; + if ((f->if_flags & XFS_IFBROOT) == 0 || + format != XFS_DINODE_FMT_BTREE) { + kdb_printf("inode 0x%p not btree format\n", ip); + return; + } + broot = f->if_broot; + kdb_printf("block @0x%p magic %x level %d numrecs %d\n", + broot, INT_GET(broot->bb_magic, ARCH_CONVERT), INT_GET(broot->bb_level, ARCH_CONVERT), INT_GET(broot->bb_numrecs, ARCH_CONVERT)); + kp = XFS_BMAP_BROOT_KEY_ADDR(broot, 1, f->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(broot, 1, f->if_broot_bytes); + for (i = 1; i <= INT_GET(broot->bb_numrecs, ARCH_CONVERT); i++) + kdb_printf("\t%d: startoff %Ld ptr %Lx %s\n", + i, INT_GET(kp[i - 1].br_startoff, ARCH_CONVERT), INT_GET(pp[i - 1], ARCH_CONVERT), + xfs_fmtfsblock(INT_GET(pp[i - 1], ARCH_CONVERT), ip->i_mount)); +} + +/* + * Print allocation btree block. + */ +static void +xfs_btalloc(xfs_alloc_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib 0x%x rightsib 0x%x\n", + INT_GET(bt->bb_magic, ARCH_CONVERT), INT_GET(bt->bb_level, ARCH_CONVERT), INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT), INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_alloc_rec_t *r; + + r = XFS_BTREE_REC_ADDR(bsz, xfs_alloc, bt, i, 0); + kdb_printf("rec %d startblock 0x%x blockcount %d\n", + i, INT_GET(r->ar_startblock, ARCH_CONVERT), INT_GET(r->ar_blockcount, ARCH_CONVERT)); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_alloc, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_alloc_key_t *k; + xfs_alloc_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_alloc, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_alloc, bt, i, mxr); + kdb_printf("key %d startblock 0x%x blockcount %d ptr 0x%x\n", + i, INT_GET(k->ar_startblock, ARCH_CONVERT), INT_GET(k->ar_blockcount, ARCH_CONVERT), *p); + } + } +} + +/* + * Print a bmap btree block. + */ +static void +xfs_btbmap(xfs_bmbt_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib %Lx ", + INT_GET(bt->bb_magic, ARCH_CONVERT), + INT_GET(bt->bb_level, ARCH_CONVERT), + INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT)); + kdb_printf("rightsib %Lx\n", INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_bmbt_rec_t *r; + xfs_bmbt_irec_t irec; + + r = (xfs_bmbt_rec_t *)XFS_BTREE_REC_ADDR(bsz, + xfs_bmbt, bt, i, 0); + + xfs_bmbt_disk_get_all((xfs_bmbt_rec_t *)r, &irec); + kdb_printf("rec %d startoff %Ld startblock %Lx blockcount %Ld flag %d\n", + i, irec.br_startoff, + (__uint64_t)irec.br_startblock, + irec.br_blockcount, irec.br_state); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_bmbt, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_bmbt_key_t *k; + xfs_bmbt_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_bmbt, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_bmbt, bt, i, mxr); + kdb_printf("key %d startoff %Ld ", + i, INT_GET(k->br_startoff, ARCH_CONVERT)); + kdb_printf("ptr %Lx\n", INT_GET(*p, ARCH_CONVERT)); + } + } +} + +/* + * Print an inode btree block. + */ +static void +xfs_btino(xfs_inobt_block_t *bt, int bsz) +{ + int i; + + kdb_printf("magic 0x%x level %d numrecs %d leftsib 0x%x rightsib 0x%x\n", + INT_GET(bt->bb_magic, ARCH_CONVERT), INT_GET(bt->bb_level, ARCH_CONVERT), INT_GET(bt->bb_numrecs, ARCH_CONVERT), + INT_GET(bt->bb_leftsib, ARCH_CONVERT), INT_GET(bt->bb_rightsib, ARCH_CONVERT)); + if (INT_ISZERO(bt->bb_level, ARCH_CONVERT)) { + + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_inobt_rec_t *r; + + r = XFS_BTREE_REC_ADDR(bsz, xfs_inobt, bt, i, 0); + kdb_printf("rec %d startino 0x%x freecount %d, free %Lx\n", + i, INT_GET(r->ir_startino, ARCH_CONVERT), INT_GET(r->ir_freecount, ARCH_CONVERT), + INT_GET(r->ir_free, ARCH_CONVERT)); + } + } else { + int mxr; + + mxr = XFS_BTREE_BLOCK_MAXRECS(bsz, xfs_inobt, 0); + for (i = 1; i <= INT_GET(bt->bb_numrecs, ARCH_CONVERT); i++) { + xfs_inobt_key_t *k; + xfs_inobt_ptr_t *p; + + k = XFS_BTREE_KEY_ADDR(bsz, xfs_inobt, bt, i, mxr); + p = XFS_BTREE_PTR_ADDR(bsz, xfs_inobt, bt, i, mxr); + kdb_printf("key %d startino 0x%x ptr 0x%x\n", + i, INT_GET(k->ir_startino, ARCH_CONVERT), INT_GET(*p, ARCH_CONVERT)); + } + } +} + +/* + * Print a buf log item. + */ +static void +xfs_buf_item_print(xfs_buf_log_item_t *blip, int summary) +{ + static char *bli_flags[] = { + "hold", /* 0x1 */ + "dirty", /* 0x2 */ + "stale", /* 0x4 */ + "logged", /* 0x8 */ + "ialloc", /* 0x10 */ + 0 + }; + static char *blf_flags[] = { + "inode", /* 0x1 */ + "cancel", /* 0x2 */ + 0 + }; + + if (summary) { + kdb_printf("buf 0x%p blkno 0x%Lx ", blip->bli_buf, + blip->bli_format.blf_blkno); + printflags(blip->bli_flags, bli_flags, "flags:"); + kdb_printf("\n "); + xfsidbg_xbuf_real(blip->bli_buf, 1); + return; + } + kdb_printf("buf 0x%p recur %d refcount %d flags:", + blip->bli_buf, blip->bli_recur, + atomic_read(&blip->bli_refcount)); + printflags(blip->bli_flags, bli_flags, NULL); + kdb_printf("\n"); + kdb_printf("size %d blkno 0x%Lx len 0x%x map size %d map 0x%p\n", + blip->bli_format.blf_size, blip->bli_format.blf_blkno, + (uint) blip->bli_format.blf_len, blip->bli_format.blf_map_size, + &(blip->bli_format.blf_data_map[0])); + kdb_printf("blf flags: "); + printflags((uint)blip->bli_format.blf_flags, blf_flags, NULL); +#ifdef XFS_TRANS_DEBUG + kdb_printf("orig 0x%x logged 0x%x", + blip->bli_orig, blip->bli_logged); +#endif + kdb_printf("\n"); +} + +/* + * Print an xfs_da_state_path structure. + */ +static void +xfs_dastate_path(xfs_da_state_path_t *p) +{ + int i; + + kdb_printf("active %d\n", p->active); + for (i = 0; i < XFS_DA_NODE_MAXDEPTH; i++) { + kdb_printf(" blk %d bp 0x%p blkno 0x%x", + i, p->blk[i].bp, p->blk[i].blkno); + kdb_printf(" index %d hashval 0x%x ", + p->blk[i].index, (uint_t)p->blk[i].hashval); + switch(p->blk[i].magic) { + case XFS_DA_NODE_MAGIC: kdb_printf("NODE\n"); break; + case XFS_DIR_LEAF_MAGIC: kdb_printf("DIR\n"); break; + case XFS_ATTR_LEAF_MAGIC: kdb_printf("ATTR\n"); break; + case XFS_DIR2_LEAFN_MAGIC: kdb_printf("DIR2\n"); break; + default: kdb_printf("type ??\n"); break; + } + } +} + + +/* + * Print an efd log item. + */ +static void +xfs_efd_item_print(xfs_efd_log_item_t *efdp, int summary) +{ + int i; + xfs_extent_t *ep; + + if (summary) { + kdb_printf("Extent Free Done: ID 0x%Lx nextents %d (at 0x%p)\n", + efdp->efd_format.efd_efi_id, + efdp->efd_format.efd_nextents, efdp); + return; + } + kdb_printf("size %d nextents %d next extent %d efip 0x%p\n", + efdp->efd_format.efd_size, efdp->efd_format.efd_nextents, + efdp->efd_next_extent, efdp->efd_efip); + kdb_printf("efi_id 0x%Lx\n", efdp->efd_format.efd_efi_id); + kdb_printf("efd extents:\n"); + ep = &(efdp->efd_format.efd_extents[0]); + for (i = 0; i < efdp->efd_next_extent; i++, ep++) { + kdb_printf(" block %Lx len %d\n", + ep->ext_start, ep->ext_len); + } +} + +/* + * Print an efi log item. + */ +static void +xfs_efi_item_print(xfs_efi_log_item_t *efip, int summary) +{ + int i; + xfs_extent_t *ep; + static char *efi_flags[] = { + "recovered", /* 0x1 */ + "committed", /* 0x2 */ + "cancelled", /* 0x4 */ + 0, + }; + + if (summary) { + kdb_printf("Extent Free Intention: ID 0x%Lx nextents %d (at 0x%p)\n", + efip->efi_format.efi_id, + efip->efi_format.efi_nextents, efip); + return; + } + kdb_printf("size %d nextents %d next extent %d\n", + efip->efi_format.efi_size, efip->efi_format.efi_nextents, + efip->efi_next_extent); + kdb_printf("id %Lx", efip->efi_format.efi_id); + printflags(efip->efi_flags, efi_flags, "flags :"); + kdb_printf("\n"); + kdb_printf("efi extents:\n"); + ep = &(efip->efi_format.efi_extents[0]); + for (i = 0; i < efip->efi_next_extent; i++, ep++) { + kdb_printf(" block %Lx len %d\n", + ep->ext_start, ep->ext_len); + } +} + +/* + * Format inode "format" into a static buffer & return it. + */ +static char * +xfs_fmtformat(xfs_dinode_fmt_t f) +{ + static char *t[] = { + "dev", + "local", + "extents", + "btree", + "uuid" + }; + + return t[f]; +} + +/* + * Format fsblock number into a static buffer & return it. + */ +static char * +xfs_fmtfsblock(xfs_fsblock_t bno, xfs_mount_t *mp) +{ + static char rval[50]; + + if (bno == NULLFSBLOCK) + sprintf(rval, "NULLFSBLOCK"); + else if (ISNULLSTARTBLOCK(bno)) + sprintf(rval, "NULLSTARTBLOCK(%Ld)", STARTBLOCKVAL(bno)); + else if (mp) + sprintf(rval, "%Ld[%x:%x]", (xfs_dfsbno_t)bno, + XFS_FSB_TO_AGNO(mp, bno), XFS_FSB_TO_AGBNO(mp, bno)); + else + sprintf(rval, "%Ld", (xfs_dfsbno_t)bno); + return rval; +} + +/* + * Format inode number into a static buffer & return it. + */ +static char * +xfs_fmtino(xfs_ino_t ino, xfs_mount_t *mp) +{ + static char rval[50]; + + if (mp) + sprintf(rval, "%llu[%x:%x:%x]", + (unsigned long long) ino, + XFS_INO_TO_AGNO(mp, ino), + XFS_INO_TO_AGBNO(mp, ino), + XFS_INO_TO_OFFSET(mp, ino)); + else + sprintf(rval, "%llu", (unsigned long long) ino); + return rval; +} + +/* + * Format an lsn for printing into a static buffer & return it. + */ +static char * +xfs_fmtlsn(xfs_lsn_t *lsnp) +{ + uint *wordp; + uint *word2p; + static char buf[20]; + + wordp = (uint *)lsnp; + word2p = wordp++; + sprintf(buf, "[%u:%u]", *wordp, *word2p); + + return buf; +} + +/* + * Format file mode into a static buffer & return it. + */ +static char * +xfs_fmtmode(int m) +{ + static char rval[16]; + + sprintf(rval, "%c%c%c%c%c%c%c%c%c%c%c%c%c", + "?fc?dxb?r?l?S?m?"[(m & IFMT) >> 12], + m & ISUID ? 'u' : '-', + m & ISGID ? 'g' : '-', + m & ISVTX ? 'v' : '-', + m & IREAD ? 'r' : '-', + m & IWRITE ? 'w' : '-', + m & IEXEC ? 'x' : '-', + m & (IREAD >> 3) ? 'r' : '-', + m & (IWRITE >> 3) ? 'w' : '-', + m & (IEXEC >> 3) ? 'x' : '-', + m & (IREAD >> 6) ? 'r' : '-', + m & (IWRITE >> 6) ? 'w' : '-', + m & (IEXEC >> 6) ? 'x' : '-'); + return rval; +} + +/* + * Format a size into a static buffer & return it. + */ +static char * +xfs_fmtsize(size_t i) +{ + static char rval[20]; + + /* size_t is 32 bits in 32-bit kernel, 64 bits in 64-bit kernel */ + sprintf(rval, "0x%lx", (unsigned long) i); + return rval; +} + +/* + * Format a uuid into a static buffer & return it. + */ +static char * +xfs_fmtuuid(uuid_t *uu) +{ + static char rval[40]; + char *o = rval; + char *i = (unsigned char*)uu; + int b; + + for (b=0;b<16;b++) { + o+=sprintf(o, "%02x", *i++); + if (b==3||b==5||b==7||b==9) *o++='-'; + } + *o='\0'; + + return rval; +} + +/* + * Print an inode log item. + */ +static void +xfs_inode_item_print(xfs_inode_log_item_t *ilip, int summary) +{ + static char *ili_flags[] = { + "hold", /* 0x1 */ + "iolock excl", /* 0x2 */ + "iolock shrd", /* 0x4 */ + 0 + }; + static char *ilf_fields[] = { + "core", /* 0x001 */ + "ddata", /* 0x002 */ + "dexts", /* 0x004 */ + "dbroot", /* 0x008 */ + "dev", /* 0x010 */ + "uuid", /* 0x020 */ + "adata", /* 0x040 */ + "aext", /* 0x080 */ + "abroot", /* 0x100 */ + 0 + }; + + if (summary) { + kdb_printf("inode 0x%p logged %d ", + ilip->ili_inode, ilip->ili_logged); + printflags(ilip->ili_flags, ili_flags, "flags:"); + printflags(ilip->ili_format.ilf_fields, ilf_fields, "format:"); + printflags(ilip->ili_last_fields, ilf_fields, "lastfield:"); + kdb_printf("\n"); + return; + } + kdb_printf("inode 0x%p ino 0x%llu pushbuf %d logged %d flags: ", + ilip->ili_inode, (unsigned long long) ilip->ili_format.ilf_ino, + ilip->ili_pushbuf_flag, ilip->ili_logged); + printflags(ilip->ili_flags, ili_flags, NULL); + kdb_printf("\n"); + kdb_printf("ilock recur %d iolock recur %d ext buf 0x%p\n", + ilip->ili_ilock_recur, ilip->ili_iolock_recur, + ilip->ili_extents_buf); +#ifdef XFS_TRANS_DEBUG + kdb_printf("root bytes %d root orig 0x%x\n", + ilip->ili_root_size, ilip->ili_orig_root); +#endif + kdb_printf("size %d ", ilip->ili_format.ilf_size); + printflags(ilip->ili_format.ilf_fields, ilf_fields, "fields:"); + printflags(ilip->ili_last_fields, ilf_fields, " last fields: "); + kdb_printf("\n"); + kdb_printf(" flush lsn %s last lsn %s\n", + xfs_fmtlsn(&(ilip->ili_flush_lsn)), + xfs_fmtlsn(&(ilip->ili_last_lsn))); + kdb_printf("dsize %d, asize %d, rdev 0x%x\n", + ilip->ili_format.ilf_dsize, + ilip->ili_format.ilf_asize, + ilip->ili_format.ilf_u.ilfu_rdev); + kdb_printf("blkno 0x%Lx len 0x%x boffset 0x%x\n", + ilip->ili_format.ilf_blkno, + ilip->ili_format.ilf_len, + ilip->ili_format.ilf_boffset); +} + +/* + * Print a dquot log item. + */ +/* ARGSUSED */ +static void +xfs_dquot_item_print(xfs_dq_logitem_t *lip, int summary) +{ + kdb_printf("dquot 0x%p\n", + lip->qli_dquot); + +} + +/* + * Print a quotaoff log item. + */ +/* ARGSUSED */ +static void +xfs_qoff_item_print(xfs_qoff_logitem_t *lip, int summary) +{ + kdb_printf("start qoff item 0x%p flags 0x%x\n", + lip->qql_start_lip, lip->qql_format.qf_flags); + +} + +/* + * Print buffer full of inodes. + */ +static void +xfs_inodebuf(xfs_buf_t *bp) +{ + xfs_dinode_t *di; + int n, i; + + n = XFS_BUF_COUNT(bp) >> 8; + for (i = 0; i < n; i++) { + di = (xfs_dinode_t *)xfs_buf_offset(bp, + i * 256); + xfs_prdinode(di, 0, ARCH_CONVERT); + } +} + + +/* + * Print disk inode. + */ +static void +xfs_prdinode(xfs_dinode_t *di, int coreonly, int convert) +{ + xfs_prdinode_core(&di->di_core, convert); + if (!coreonly) + kdb_printf("next_unlinked 0x%x u@0x%p\n", + INT_GET(di->di_next_unlinked, convert), + &di->di_u); +} + +/* + * Print disk inode core. + */ +static void +xfs_prdinode_core(xfs_dinode_core_t *dip, int convert) +{ + static char *diflags[] = { + "realtime", /* XFS_DIFLAG_REALTIME */ + "prealloc", /* XFS_DIFLAG_PREALLOC */ + NULL + }; + + kdb_printf("magic 0x%x mode 0%o (%s) version 0x%x format 0x%x (%s)\n", + INT_GET(dip->di_magic, convert), + INT_GET(dip->di_mode, convert), + xfs_fmtmode(INT_GET(dip->di_mode, convert)), + INT_GET(dip->di_version, convert), + INT_GET(dip->di_format, convert), + xfs_fmtformat( + (xfs_dinode_fmt_t)INT_GET(dip->di_format, convert))); + kdb_printf("nlink %d uid %d gid %d projid %d\n", + INT_GET(dip->di_nlink, convert), + INT_GET(dip->di_uid, convert), + INT_GET(dip->di_gid, convert), + (uint)INT_GET(dip->di_projid, convert)); + kdb_printf("atime %u:%u mtime %ud:%u ctime %u:%u\n", + INT_GET(dip->di_atime.t_sec, convert), + INT_GET(dip->di_atime.t_nsec, convert), + INT_GET(dip->di_mtime.t_sec, convert), + INT_GET(dip->di_mtime.t_nsec, convert), + INT_GET(dip->di_ctime.t_sec, convert), + INT_GET(dip->di_ctime.t_nsec, convert)); + kdb_printf("size 0x%Ld ", INT_GET(dip->di_size, convert)); + kdb_printf("nblocks %Ld extsize 0x%x nextents 0x%x anextents 0x%x\n", + INT_GET(dip->di_nblocks, convert), + INT_GET(dip->di_extsize, convert), + INT_GET(dip->di_nextents, convert), + INT_GET(dip->di_anextents, convert)); + kdb_printf("forkoff %d aformat 0x%x (%s) dmevmask 0x%x dmstate 0x%x ", + INT_GET(dip->di_forkoff, convert), + INT_GET(dip->di_aformat, convert), + xfs_fmtformat( + (xfs_dinode_fmt_t)INT_GET(dip->di_aformat, convert)), + INT_GET(dip->di_dmevmask, convert), + INT_GET(dip->di_dmstate, convert)); + printflags(INT_GET(dip->di_flags, convert), diflags, "flags"); + kdb_printf("gen 0x%x\n", INT_GET(dip->di_gen, convert)); +} + +/* + * Print xfs extent list for a fork. + */ +static void +xfs_xexlist_fork(xfs_inode_t *ip, int whichfork) +{ + int nextents, i; + xfs_ifork_t *ifp; + xfs_bmbt_irec_t irec; + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (ifp->if_flags & XFS_IFEXTENTS) { + nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_64_t); + kdb_printf("inode 0x%p %cf extents 0x%p nextents 0x%x\n", + ip, "da"[whichfork], ifp->if_u1.if_extents, nextents); + for (i = 0; i < nextents; i++) { + xfs_bmbt_get_all(&ifp->if_u1.if_extents[i], &irec); + kdb_printf( + "%d: startoff %Ld startblock %s blockcount %Ld flag %d\n", + i, irec.br_startoff, + xfs_fmtfsblock(irec.br_startblock, ip->i_mount), + irec.br_blockcount, irec.br_state); + } + } +} + +static void +xfs_xnode_fork(char *name, xfs_ifork_t *f) +{ + static char *tab_flags[] = { + "inline", /* XFS_IFINLINE */ + "extents", /* XFS_IFEXTENTS */ + "broot", /* XFS_IFBROOT */ + NULL + }; + int *p; + + kdb_printf("%s fork", name); + if (f == NULL) { + kdb_printf(" empty\n"); + return; + } else + kdb_printf("\n"); + kdb_printf(" bytes %s ", xfs_fmtsize(f->if_bytes)); + kdb_printf("real_bytes %s lastex 0x%x u1:%s 0x%p\n", + xfs_fmtsize(f->if_real_bytes), f->if_lastex, + f->if_flags & XFS_IFINLINE ? "data" : "extents", + f->if_flags & XFS_IFINLINE ? + f->if_u1.if_data : + (char *)f->if_u1.if_extents); + kdb_printf(" broot 0x%p broot_bytes %s ext_max %d ", + f->if_broot, xfs_fmtsize(f->if_broot_bytes), f->if_ext_max); + printflags(f->if_flags, tab_flags, "flags"); + kdb_printf("\n"); + kdb_printf(" u2"); + for (p = (int *)&f->if_u2; + p < (int *)((char *)&f->if_u2 + XFS_INLINE_DATA); + p++) + kdb_printf(" 0x%x", *p); + kdb_printf("\n"); +} + +/* + * Command-level xfs-idbg functions. + */ + +/* + * Print xfs allocation group freespace header. + */ +static void +xfsidbg_xagf(xfs_agf_t *agf) +{ + kdb_printf("magicnum 0x%x versionnum 0x%x seqno 0x%x length 0x%x\n", + INT_GET(agf->agf_magicnum, ARCH_CONVERT), + INT_GET(agf->agf_versionnum, ARCH_CONVERT), + INT_GET(agf->agf_seqno, ARCH_CONVERT), + INT_GET(agf->agf_length, ARCH_CONVERT)); + kdb_printf("roots b 0x%x c 0x%x levels b %d c %d\n", + INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT), + INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT), + INT_GET(agf->agf_levels[XFS_BTNUM_BNO], ARCH_CONVERT), + INT_GET(agf->agf_levels[XFS_BTNUM_CNT], ARCH_CONVERT)); + kdb_printf("flfirst %d fllast %d flcount %d freeblks %d longest %d\n", + INT_GET(agf->agf_flfirst, ARCH_CONVERT), + INT_GET(agf->agf_fllast, ARCH_CONVERT), + INT_GET(agf->agf_flcount, ARCH_CONVERT), + INT_GET(agf->agf_freeblks, ARCH_CONVERT), + INT_GET(agf->agf_longest, ARCH_CONVERT)); +} + +/* + * Print xfs allocation group inode header. + */ +static void +xfsidbg_xagi(xfs_agi_t *agi) +{ + int i; + int j; + + kdb_printf("magicnum 0x%x versionnum 0x%x seqno 0x%x length 0x%x\n", + INT_GET(agi->agi_magicnum, ARCH_CONVERT), + INT_GET(agi->agi_versionnum, ARCH_CONVERT), + INT_GET(agi->agi_seqno, ARCH_CONVERT), + INT_GET(agi->agi_length, ARCH_CONVERT)); + kdb_printf("count 0x%x root 0x%x level 0x%x\n", + INT_GET(agi->agi_count, ARCH_CONVERT), + INT_GET(agi->agi_root, ARCH_CONVERT), + INT_GET(agi->agi_level, ARCH_CONVERT)); + kdb_printf("freecount 0x%x newino 0x%x dirino 0x%x\n", + INT_GET(agi->agi_freecount, ARCH_CONVERT), + INT_GET(agi->agi_newino, ARCH_CONVERT), + INT_GET(agi->agi_dirino, ARCH_CONVERT)); + + kdb_printf("unlinked buckets\n"); + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { + for (j = 0; j < 4; j++, i++) { + kdb_printf("0x%08x ", + INT_GET(agi->agi_unlinked[i], ARCH_CONVERT)); + } + kdb_printf("\n"); + } +} + + +/* + * Print an allocation argument structure for XFS. + */ +static void +xfsidbg_xalloc(xfs_alloc_arg_t *args) +{ + kdb_printf("tp 0x%p mp 0x%p agbp 0x%p pag 0x%p fsbno %s\n", + args->tp, args->mp, args->agbp, args->pag, + xfs_fmtfsblock(args->fsbno, args->mp)); + kdb_printf("agno 0x%x agbno 0x%x minlen 0x%x maxlen 0x%x mod 0x%x\n", + args->agno, args->agbno, args->minlen, args->maxlen, args->mod); + kdb_printf("prod 0x%x minleft 0x%x total 0x%x alignment 0x%x\n", + args->prod, args->minleft, args->total, args->alignment); + kdb_printf("minalignslop 0x%x len 0x%x type %s otype %s wasdel %d\n", + args->minalignslop, args->len, xfs_alloctype[args->type], + xfs_alloctype[args->otype], args->wasdel); + kdb_printf("wasfromfl %d isfl %d userdata %d\n", + args->wasfromfl, args->isfl, args->userdata); +} + +#ifdef DEBUG +/* + * Print out all the entries in the alloc trace buf corresponding + * to the given mount point. + */ +static void +xfsidbg_xalmtrace(xfs_mount_t *mp) +{ + ktrace_entry_t *ktep; + ktrace_snap_t kts; + extern ktrace_t *xfs_alloc_trace_buf; + + if (xfs_alloc_trace_buf == NULL) { + kdb_printf("The xfs alloc trace buffer is not initialized\n"); + return; + } + + ktep = ktrace_first(xfs_alloc_trace_buf, &kts); + while (ktep != NULL) { + if ((__psint_t)ktep->val[0] && (xfs_mount_t *)ktep->val[3] == mp) { + (void)xfs_alloc_trace_entry(ktep); + kdb_printf("\n"); + } + ktep = ktrace_next(xfs_alloc_trace_buf, &kts); + } +} +#endif /* DEBUG */ + +/* + * Print an attr_list() context structure. + */ +static void +xfsidbg_xattrcontext(xfs_attr_list_context_t *context) +{ + static char *attr_arg_flags[] = { + "DONTFOLLOW", /* 0x0001 */ + "?", /* 0x0002 */ + "?", /* 0x0004 */ + "?", /* 0x0008 */ + "CREATE", /* 0x0010 */ + "?", /* 0x0020 */ + "?", /* 0x0040 */ + "?", /* 0x0080 */ + "?", /* 0x0100 */ + "?", /* 0x0200 */ + "?", /* 0x0400 */ + "?", /* 0x0800 */ + "KERNOTIME", /* 0x1000 */ + NULL + }; + + kdb_printf("dp 0x%p, dupcnt %d, resynch %d", + context->dp, context->dupcnt, context->resynch); + printflags((__psunsigned_t)context->flags, attr_arg_flags, ", flags"); + kdb_printf("\ncursor h/b/o 0x%x/0x%x/%d -- p/p/i 0x%x/0x%x/0x%x\n", + context->cursor->hashval, context->cursor->blkno, + context->cursor->offset, context->cursor->pad1, + context->cursor->pad2, context->cursor->initted); + kdb_printf("alist 0x%p, bufsize 0x%x, count %d, firstu 0x%x\n", + context->alist, context->bufsize, context->count, + context->firstu); +} + +/* + * Print attribute leaf block. + */ +static void +xfsidbg_xattrleaf(xfs_attr_leafblock_t *leaf) +{ + xfs_attr_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_attr_leaf_map_t *m; + xfs_attr_leaf_entry_t *e; + xfs_attr_leaf_name_local_t *l; + xfs_attr_leaf_name_remote_t *r; + int j, k; + + h = &leaf->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + i->forw, i->back, i->magic); + kdb_printf("hdr count %d usedbytes %d firstused %d holes %d\n", + INT_GET(h->count, ARCH_CONVERT), + INT_GET(h->usedbytes, ARCH_CONVERT), + INT_GET(h->firstused, ARCH_CONVERT), h->holes); + for (j = 0, m = h->freemap; j < XFS_ATTR_LEAF_MAPSIZE; j++, m++) { + kdb_printf("hdr freemap %d base %d size %d\n", + j, INT_GET(m->base, ARCH_CONVERT), + INT_GET(m->size, ARCH_CONVERT)); + } + for (j = 0, e = leaf->entries; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("[%2d] hash 0x%x nameidx %d flags 0x%x", + j, INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->nameidx, ARCH_CONVERT), e->flags); + if (e->flags & XFS_ATTR_LOCAL) + kdb_printf("LOCAL "); + if (e->flags & XFS_ATTR_ROOT) + kdb_printf("ROOT "); + if (e->flags & XFS_ATTR_INCOMPLETE) + kdb_printf("INCOMPLETE "); + k = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_INCOMPLETE); + if ((e->flags & k) != 0) + kdb_printf("0x%x", e->flags & k); + kdb_printf(">\n name \""); + if (e->flags & XFS_ATTR_LOCAL) { + l = XFS_ATTR_LEAF_NAME_LOCAL(leaf, j); + for (k = 0; k < l->namelen; k++) + kdb_printf("%c", l->nameval[k]); + kdb_printf("\"(%d) value \"", l->namelen); + for (k = 0; (k < INT_GET(l->valuelen, ARCH_CONVERT)) && (k < 32); k++) + kdb_printf("%c", l->nameval[l->namelen + k]); + if (k == 32) + kdb_printf("..."); + kdb_printf("\"(%d)\n", + INT_GET(l->valuelen, ARCH_CONVERT)); + } else { + r = XFS_ATTR_LEAF_NAME_REMOTE(leaf, j); + for (k = 0; k < r->namelen; k++) + kdb_printf("%c", r->name[k]); + kdb_printf("\"(%d) value blk 0x%x len %d\n", + r->namelen, + INT_GET(r->valueblk, ARCH_CONVERT), + INT_GET(r->valuelen, ARCH_CONVERT)); + } + } +} + +/* + * Print a shortform attribute list. + */ +static void +xfsidbg_xattrsf(xfs_attr_shortform_t *s) +{ + xfs_attr_sf_hdr_t *sfh; + xfs_attr_sf_entry_t *sfe; + int i, j; + + sfh = &s->hdr; + kdb_printf("hdr count %d\n", INT_GET(sfh->count, ARCH_CONVERT)); + for (i = 0, sfe = s->list; i < INT_GET(sfh->count, ARCH_CONVERT); i++) { + kdb_printf("entry %d namelen %d name \"", i, sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->nameval[j]); + kdb_printf("\" valuelen %d value \"", INT_GET(sfe->valuelen, ARCH_CONVERT)); + for (j = 0; (j < INT_GET(sfe->valuelen, ARCH_CONVERT)) && (j < 32); j++) + kdb_printf("%c", sfe->nameval[sfe->namelen + j]); + if (j == 32) + kdb_printf("..."); + kdb_printf("\"\n"); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } +} + + +/* + * Print xfs bmap internal record + */ +static void +xfsidbg_xbirec(xfs_bmbt_irec_t *r) +{ + kdb_printf( + "startoff %Ld startblock %Lx blockcount %Ld state %Ld\n", + (__uint64_t)r->br_startoff, + (__uint64_t)r->br_startblock, + (__uint64_t)r->br_blockcount, + (__uint64_t)r->br_state); +} + + +/* + * Print a bmap alloc argument structure for XFS. + */ +static void +xfsidbg_xbmalla(xfs_bmalloca_t *a) +{ + kdb_printf("tp 0x%p ip 0x%p eof %d prevp 0x%p\n", + a->tp, a->ip, a->eof, a->prevp); + kdb_printf("gotp 0x%p firstblock %s alen %d total %d\n", + a->gotp, xfs_fmtfsblock(a->firstblock, a->ip->i_mount), + a->alen, a->total); + kdb_printf("off %s wasdel %d userdata %d minlen %d\n", + xfs_fmtfsblock(a->off, a->ip->i_mount), a->wasdel, + a->userdata, a->minlen); + kdb_printf("minleft %d low %d rval %s aeof %d\n", + a->minleft, a->low, xfs_fmtfsblock(a->rval, a->ip->i_mount), + a->aeof); +} + + +/* + * Print xfs bmap record + */ +static void +xfsidbg_xbrec(xfs_bmbt_rec_64_t *r) +{ + xfs_bmbt_irec_t irec; + + xfs_bmbt_get_all((xfs_bmbt_rec_t *)r, &irec); + kdb_printf("startoff %Ld startblock %Lx blockcount %Ld flag %d\n", + irec.br_startoff, (__uint64_t)irec.br_startblock, + irec.br_blockcount, irec.br_state); +} + +/* + * Print an xfs in-inode bmap btree root (data fork). + */ +static void +xfsidbg_xbroot(xfs_inode_t *ip) +{ + xfs_broot(ip, &ip->i_df); +} + +/* + * Print an xfs in-inode bmap btree root (attribute fork). + */ +static void +xfsidbg_xbroota(xfs_inode_t *ip) +{ + if (ip->i_afp) + xfs_broot(ip, ip->i_afp); +} + +/* + * Print xfs btree cursor. + */ +static void +xfsidbg_xbtcur(xfs_btree_cur_t *c) +{ + int l; + + kdb_printf("tp 0x%p mp 0x%p\n", + c->bc_tp, + c->bc_mp); + if (c->bc_btnum == XFS_BTNUM_BMAP) { + kdb_printf("rec.b "); + xfsidbg_xbirec(&c->bc_rec.b); + } else if (c->bc_btnum == XFS_BTNUM_INO) { + kdb_printf("rec.i startino 0x%x freecount 0x%x free %Lx\n", + c->bc_rec.i.ir_startino, c->bc_rec.i.ir_freecount, + c->bc_rec.i.ir_free); + } else { + kdb_printf("rec.a startblock 0x%x blockcount 0x%x\n", + c->bc_rec.a.ar_startblock, + c->bc_rec.a.ar_blockcount); + } + kdb_printf("bufs"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" 0x%p", c->bc_bufs[l]); + kdb_printf("\n"); + kdb_printf("ptrs"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" 0x%x", c->bc_ptrs[l]); + kdb_printf(" ra"); + for (l = 0; l < c->bc_nlevels; l++) + kdb_printf(" %d", c->bc_ra[l]); + kdb_printf("\n"); + kdb_printf("nlevels %d btnum %s blocklog %d\n", + c->bc_nlevels, + c->bc_btnum == XFS_BTNUM_BNO ? "bno" : + (c->bc_btnum == XFS_BTNUM_CNT ? "cnt" : + (c->bc_btnum == XFS_BTNUM_BMAP ? "bmap" : "ino")), + c->bc_blocklog); + if (c->bc_btnum == XFS_BTNUM_BMAP) { + kdb_printf("private forksize 0x%x whichfork %d ip 0x%p flags %d\n", + c->bc_private.b.forksize, + c->bc_private.b.whichfork, + c->bc_private.b.ip, + c->bc_private.b.flags); + kdb_printf("private firstblock %s flist 0x%p allocated 0x%x\n", + xfs_fmtfsblock(c->bc_private.b.firstblock, c->bc_mp), + c->bc_private.b.flist, + c->bc_private.b.allocated); + } else if (c->bc_btnum == XFS_BTNUM_INO) { + kdb_printf("private agbp 0x%p agno 0x%x\n", + c->bc_private.i.agbp, + c->bc_private.i.agno); + } else { + kdb_printf("private agbp 0x%p agno 0x%x\n", + c->bc_private.a.agbp, + c->bc_private.a.agno); + } +} + +/* + * Figure out what kind of xfs block the buffer contains, + * and invoke a print routine. + */ +static void +xfsidbg_xbuf(xfs_buf_t *bp) +{ + xfsidbg_xbuf_real(bp, 0); +} + +/* + * Figure out what kind of xfs block the buffer contains, + * and invoke a print routine (if asked to). + */ +static void +xfsidbg_xbuf_real(xfs_buf_t *bp, int summary) +{ + void *d; + xfs_agf_t *agf; + xfs_agi_t *agi; + xfs_sb_t *sb; + xfs_alloc_block_t *bta; + xfs_bmbt_block_t *btb; + xfs_inobt_block_t *bti; + xfs_attr_leafblock_t *aleaf; + xfs_dir_leafblock_t *dleaf; + xfs_da_intnode_t *node; + xfs_dinode_t *di; + xfs_disk_dquot_t *dqb; + xfs_dir2_block_t *d2block; + xfs_dir2_data_t *d2data; + xfs_dir2_leaf_t *d2leaf; + xfs_dir2_free_t *d2free; + + d = XFS_BUF_PTR(bp); + if (INT_GET((agf = d)->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC) { + if (summary) { + kdb_printf("freespace hdr for AG %d (at 0x%p)\n", + INT_GET(agf->agf_seqno, ARCH_CONVERT), agf); + } else { + kdb_printf("buf 0x%p agf 0x%p\n", bp, agf); + xfsidbg_xagf(agf); + } + } else if (INT_GET((agi = d)->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC) { + if (summary) { + kdb_printf("Inode hdr for AG %d (at 0x%p)\n", + INT_GET(agi->agi_seqno, ARCH_CONVERT), agi); + } else { + kdb_printf("buf 0x%p agi 0x%p\n", bp, agi); + xfsidbg_xagi(agi); + } + } else if (INT_GET((bta = d)->bb_magic, ARCH_CONVERT) == XFS_ABTB_MAGIC) { + if (summary) { + kdb_printf("Alloc BNO Btree blk, level %d (at 0x%p)\n", + INT_GET(bta->bb_level, ARCH_CONVERT), bta); + } else { + kdb_printf("buf 0x%p abtbno 0x%p\n", bp, bta); + xfs_btalloc(bta, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((bta = d)->bb_magic, ARCH_CONVERT) == XFS_ABTC_MAGIC) { + if (summary) { + kdb_printf("Alloc COUNT Btree blk, level %d (at 0x%p)\n", + INT_GET(bta->bb_level, ARCH_CONVERT), bta); + } else { + kdb_printf("buf 0x%p abtcnt 0x%p\n", bp, bta); + xfs_btalloc(bta, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((btb = d)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC) { + if (summary) { + kdb_printf("Bmap Btree blk, level %d (at 0x%p)\n", + INT_GET(btb->bb_level, ARCH_CONVERT), btb); + } else { + kdb_printf("buf 0x%p bmapbt 0x%p\n", bp, btb); + xfs_btbmap(btb, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((bti = d)->bb_magic, ARCH_CONVERT) == XFS_IBT_MAGIC) { + if (summary) { + kdb_printf("Inode Btree blk, level %d (at 0x%p)\n", + INT_GET(bti->bb_level, ARCH_CONVERT), bti); + } else { + kdb_printf("buf 0x%p inobt 0x%p\n", bp, bti); + xfs_btino(bti, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((aleaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + if (summary) { + kdb_printf("Attr Leaf, 1st hash 0x%x (at 0x%p)\n", + INT_GET(aleaf->entries[0].hashval, ARCH_CONVERT), aleaf); + } else { + kdb_printf("buf 0x%p attr leaf 0x%p\n", bp, aleaf); + xfsidbg_xattrleaf(aleaf); + } + } else if (INT_GET((dleaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + if (summary) { + kdb_printf("Dir Leaf, 1st hash 0x%x (at 0x%p)\n", + dleaf->entries[0].hashval, dleaf); + } else { + kdb_printf("buf 0x%p dir leaf 0x%p\n", bp, dleaf); + xfsidbg_xdirleaf(dleaf); + } + } else if (INT_GET((node = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + if (summary) { + kdb_printf("Dir/Attr Node, level %d, 1st hash 0x%x (at 0x%p)\n", + node->hdr.level, node->btree[0].hashval, node); + } else { + kdb_printf("buf 0x%p dir/attr node 0x%p\n", bp, node); + xfsidbg_xdanode(node); + } + } else if (INT_GET((di = d)->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC) { + if (summary) { + kdb_printf("Disk Inode (at 0x%p)\n", di); + } else { + kdb_printf("buf 0x%p dinode 0x%p\n", bp, di); + xfs_inodebuf(bp); + } + } else if (INT_GET((sb = d)->sb_magicnum, ARCH_CONVERT) == XFS_SB_MAGIC) { + if (summary) { + kdb_printf("Superblock (at 0x%p)\n", sb); + } else { + kdb_printf("buf 0x%p sb 0x%p\n", bp, sb); + /* SB in a buffer - we need to convert */ + xfsidbg_xsb(sb, 1); + } + } else if ((dqb = d)->d_magic == XFS_DQUOT_MAGIC) { +#define XFSIDBG_DQTYPESTR(d) \ + ((INT_GET((d)->d_flags, ARCH_CONVERT) & XFS_DQ_USER) ? "USR" : \ + ((INT_GET((d)->d_flags, ARCH_CONVERT) & XFS_DQ_GROUP) ? "GRP" : "???")) + kdb_printf("Quota blk starting ID [%d], type %s at 0x%p\n", + INT_GET(dqb->d_id, ARCH_CONVERT), XFSIDBG_DQTYPESTR(dqb), dqb); + + } else if (INT_GET((d2block = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (summary) { + kdb_printf("Dir2 block (at 0x%p)\n", d2block); + } else { + kdb_printf("buf 0x%p dir2 block 0x%p\n", bp, d2block); + xfs_dir2data((void *)d2block, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2data = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) { + if (summary) { + kdb_printf("Dir2 data (at 0x%p)\n", d2data); + } else { + kdb_printf("buf 0x%p dir2 data 0x%p\n", bp, d2data); + xfs_dir2data((void *)d2data, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2leaf = d)->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC) { + if (summary) { + kdb_printf("Dir2 leaf(1) (at 0x%p)\n", d2leaf); + } else { + kdb_printf("buf 0x%p dir2 leaf 0x%p\n", bp, d2leaf); + xfs_dir2leaf(d2leaf, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET(d2leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + if (summary) { + kdb_printf("Dir2 leaf(n) (at 0x%p)\n", d2leaf); + } else { + kdb_printf("buf 0x%p dir2 leaf 0x%p\n", bp, d2leaf); + xfs_dir2leaf(d2leaf, XFS_BUF_COUNT(bp)); + } + } else if (INT_GET((d2free = d)->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC) { + if (summary) { + kdb_printf("Dir2 free (at 0x%p)\n", d2free); + } else { + kdb_printf("buf 0x%p dir2 free 0x%p\n", bp, d2free); + xfsidbg_xdir2free(d2free); + } + } else { + kdb_printf("buf 0x%p unknown 0x%p\n", bp, d); + } +} + + +/* + * Print an xfs_da_args structure. + */ +static void +xfsidbg_xdaargs(xfs_da_args_t *n) +{ + char *ch; + int i; + + kdb_printf(" name \""); + for (i = 0; i < n->namelen; i++) { + kdb_printf("%c", n->name[i]); + } + kdb_printf("\"(%d) value ", n->namelen); + if (n->value) { + kdb_printf("\""); + ch = n->value; + for (i = 0; (i < n->valuelen) && (i < 32); ch++, i++) { + switch(*ch) { + case '\n': kdb_printf("\n"); break; + case '\b': kdb_printf("\b"); break; + case '\t': kdb_printf("\t"); break; + default: kdb_printf("%c", *ch); break; + } + } + if (i == 32) + kdb_printf("..."); + kdb_printf("\"(%d)\n", n->valuelen); + } else { + kdb_printf("(NULL)(%d)\n", n->valuelen); + } + kdb_printf(" hashval 0x%x whichfork %d flags <", + (uint_t)n->hashval, n->whichfork); + if (n->flags & ATTR_ROOT) + kdb_printf("ROOT "); + if (n->flags & ATTR_CREATE) + kdb_printf("CREATE "); + if (n->flags & ATTR_REPLACE) + kdb_printf("REPLACE "); + if (n->flags & XFS_ATTR_INCOMPLETE) + kdb_printf("INCOMPLETE "); + i = ~(ATTR_ROOT | ATTR_CREATE | ATTR_REPLACE | XFS_ATTR_INCOMPLETE); + if ((n->flags & i) != 0) + kdb_printf("0x%x", n->flags & i); + kdb_printf(">\n"); + kdb_printf(" rename %d justcheck %d addname %d oknoent %d\n", + n->rename, n->justcheck, n->addname, n->oknoent); + kdb_printf(" leaf: blkno %d index %d rmtblkno %d rmtblkcnt %d\n", + n->blkno, n->index, n->rmtblkno, n->rmtblkcnt); + kdb_printf(" leaf2: blkno %d index %d rmtblkno %d rmtblkcnt %d\n", + n->blkno2, n->index2, n->rmtblkno2, n->rmtblkcnt2); + kdb_printf(" inumber %llu dp 0x%p firstblock 0x%p flist 0x%p\n", + (unsigned long long) n->inumber, + n->dp, n->firstblock, n->flist); + kdb_printf(" trans 0x%p total %d\n", + n->trans, n->total); +} + +/* + * Print a da buffer structure. + */ +static void +xfsidbg_xdabuf(xfs_dabuf_t *dabuf) +{ + int i; + + kdb_printf("nbuf %d dirty %d bbcount %d data 0x%p bps", + dabuf->nbuf, dabuf->dirty, dabuf->bbcount, dabuf->data); + for (i = 0; i < dabuf->nbuf; i++) + kdb_printf(" %d:0x%p", i, dabuf->bps[i]); + kdb_printf("\n"); +#ifdef XFS_DABUF_DEBUG + kdb_printf(" ra 0x%x prev 0x%x next 0x%x dev 0x%x blkno 0x%x\n", + dabuf->ra, dabuf->prev, dabuf->next, dabuf->dev, dabuf->blkno); +#endif +} + +/* + * Print a directory/attribute internal node block. + */ +static void +xfsidbg_xdanode(xfs_da_intnode_t *node) +{ + xfs_da_node_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_da_node_entry_t *e; + int j; + + h = &node->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d level %d\n", + INT_GET(h->count, ARCH_CONVERT), INT_GET(h->level, ARCH_CONVERT)); + for (j = 0, e = node->btree; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("btree %d hashval 0x%x before 0x%x\n", + j, (uint_t)INT_GET(e->hashval, ARCH_CONVERT), INT_GET(e->before, ARCH_CONVERT)); + } +} + +/* + * Print an xfs_da_state_blk structure. + */ +static void +xfsidbg_xdastate(xfs_da_state_t *s) +{ + xfs_da_state_blk_t *eblk; + + kdb_printf("args 0x%p mp 0x%p blocksize %u node_ents %u inleaf %u\n", + s->args, s->mp, s->blocksize, s->node_ents, s->inleaf); + if (s->args) + xfsidbg_xdaargs(s->args); + + kdb_printf("path: "); + xfs_dastate_path(&s->path); + + kdb_printf("altpath: "); + xfs_dastate_path(&s->altpath); + + eblk = &s->extrablk; + kdb_printf("extra: valid %d, after %d\n", s->extravalid, s->extraafter); + kdb_printf(" bp 0x%p blkno 0x%x ", eblk->bp, eblk->blkno); + kdb_printf("index %d hashval 0x%x\n", eblk->index, (uint_t)eblk->hashval); +} + +/* + * Print a directory leaf block. + */ +static void +xfsidbg_xdirleaf(xfs_dir_leafblock_t *leaf) +{ + xfs_dir_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_dir_leaf_map_t *m; + xfs_dir_leaf_entry_t *e; + xfs_dir_leaf_name_t *n; + int j, k; + xfs_ino_t ino; + + h = &leaf->hdr; + i = &h->info; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d namebytes %d firstused %d holes %d\n", + INT_GET(h->count, ARCH_CONVERT), INT_GET(h->namebytes, ARCH_CONVERT), INT_GET(h->firstused, ARCH_CONVERT), h->holes); + for (j = 0, m = h->freemap; j < XFS_DIR_LEAF_MAPSIZE; j++, m++) { + kdb_printf("hdr freemap %d base %d size %d\n", + j, INT_GET(m->base, ARCH_CONVERT), INT_GET(m->size, ARCH_CONVERT)); + } + for (j = 0, e = leaf->entries; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + n = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(e->nameidx, ARCH_CONVERT)); + XFS_DIR_SF_GET_DIRINO_ARCH(&n->inumber, &ino, ARCH_CONVERT); + kdb_printf("leaf %d hashval 0x%x nameidx %d inumber %llu ", + j, (uint_t)INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->nameidx, ARCH_CONVERT), + (unsigned long long)ino); + kdb_printf("namelen %d name \"", e->namelen); + for (k = 0; k < e->namelen; k++) + kdb_printf("%c", n->name[k]); + kdb_printf("\"\n"); + } +} + +/* + * Print a directory v2 data block, single or multiple. + */ +static void +xfs_dir2data(void *addr, int size) +{ + xfs_dir2_data_t *db; + xfs_dir2_block_t *bb; + xfs_dir2_data_hdr_t *h; + xfs_dir2_data_free_t *m; + xfs_dir2_data_entry_t *e; + xfs_dir2_data_unused_t *u; + xfs_dir2_leaf_entry_t *l=NULL; + int j, k; + char *p; + char *t; + xfs_dir2_block_tail_t *tail=NULL; + + db = (xfs_dir2_data_t *)addr; + bb = (xfs_dir2_block_t *)addr; + h = &db->hdr; + kdb_printf("hdr magic 0x%x (%s)\nhdr bestfree", INT_GET(h->magic, ARCH_CONVERT), + INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC ? "DATA" : + (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC ? "BLOCK" : "")); + for (j = 0, m = h->bestfree; j < XFS_DIR2_DATA_FD_COUNT; j++, m++) { + kdb_printf(" %d: 0x%x@0x%x", j, INT_GET(m->length, ARCH_CONVERT), INT_GET(m->offset, ARCH_CONVERT)); + } + kdb_printf("\n"); + if (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + t = (char *)db + size; + else { + /* XFS_DIR2_BLOCK_TAIL_P */ + tail = (xfs_dir2_block_tail_t *) + ((char *)bb + size - sizeof(xfs_dir2_block_tail_t)); + l = XFS_DIR2_BLOCK_LEAF_P_ARCH(tail, ARCH_CONVERT); + t = (char *)l; + } + for (p = (char *)(h + 1); p < t; ) { + u = (xfs_dir2_data_unused_t *)p; + if (u->freetag == XFS_DIR2_DATA_FREE_TAG) { + kdb_printf("0x%lx unused freetag 0x%x length 0x%x tag 0x%x\n", + (unsigned long) (p - (char *)addr), + INT_GET(u->freetag, ARCH_CONVERT), + INT_GET(u->length, ARCH_CONVERT), + INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P_ARCH(u, ARCH_CONVERT), ARCH_CONVERT)); + p += INT_GET(u->length, ARCH_CONVERT); + continue; + } + e = (xfs_dir2_data_entry_t *)p; + kdb_printf("0x%lx entry inumber %llu namelen %d name \"", + (unsigned long) (p - (char *)addr), + (unsigned long long) INT_GET(e->inumber, ARCH_CONVERT), + e->namelen); + for (k = 0; k < e->namelen; k++) + kdb_printf("%c", e->name[k]); + kdb_printf("\" tag 0x%x\n", INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(e), ARCH_CONVERT)); + p += XFS_DIR2_DATA_ENTSIZE(e->namelen); + } + if (INT_GET(h->magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + return; + for (j = 0; j < INT_GET(tail->count, ARCH_CONVERT); j++, l++) { + kdb_printf("0x%lx leaf %d hashval 0x%x address 0x%x (byte 0x%x)\n", + (unsigned long) ((char *)l - (char *)addr), j, + (uint_t)INT_GET(l->hashval, ARCH_CONVERT), + INT_GET(l->address, ARCH_CONVERT), + /* XFS_DIR2_DATAPTR_TO_BYTE */ + INT_GET(l->address, ARCH_CONVERT) << XFS_DIR2_DATA_ALIGN_LOG); + } + kdb_printf("0x%lx tail count %d\n", + (unsigned long) ((char *)tail - (char *)addr), + INT_GET(tail->count, ARCH_CONVERT)); +} + +static void +xfs_dir2leaf(xfs_dir2_leaf_t *leaf, int size) +{ + xfs_dir2_leaf_hdr_t *h; + xfs_da_blkinfo_t *i; + xfs_dir2_leaf_entry_t *e; + xfs_dir2_data_off_t *b; + xfs_dir2_leaf_tail_t *t; + int j; + + h = &leaf->hdr; + i = &h->info; + e = leaf->ents; + kdb_printf("hdr info forw 0x%x back 0x%x magic 0x%x\n", + INT_GET(i->forw, ARCH_CONVERT), INT_GET(i->back, ARCH_CONVERT), INT_GET(i->magic, ARCH_CONVERT)); + kdb_printf("hdr count %d stale %d\n", INT_GET(h->count, ARCH_CONVERT), INT_GET(h->stale, ARCH_CONVERT)); + for (j = 0; j < INT_GET(h->count, ARCH_CONVERT); j++, e++) { + kdb_printf("0x%lx ent %d hashval 0x%x address 0x%x (byte 0x%x)\n", + (unsigned long) ((char *)e - (char *)leaf), j, + (uint_t)INT_GET(e->hashval, ARCH_CONVERT), + INT_GET(e->address, ARCH_CONVERT), + /* XFS_DIR2_DATAPTR_TO_BYTE */ + INT_GET(e->address, ARCH_CONVERT) << XFS_DIR2_DATA_ALIGN_LOG); + } + if (INT_GET(i->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) + return; + /* XFS_DIR2_LEAF_TAIL_P */ + t = (xfs_dir2_leaf_tail_t *)((char *)leaf + size - sizeof(*t)); + b = XFS_DIR2_LEAF_BESTS_P_ARCH(t, ARCH_CONVERT); + for (j = 0; j < INT_GET(t->bestcount, ARCH_CONVERT); j++, b++) { + kdb_printf("0x%lx best %d 0x%x\n", + (unsigned long) ((char *)b - (char *)leaf), j, + INT_GET(*b, ARCH_CONVERT)); + } + kdb_printf("tail bestcount %d\n", INT_GET(t->bestcount, ARCH_CONVERT)); +} + +/* + * Print a shortform directory. + */ +static void +xfsidbg_xdirsf(xfs_dir_shortform_t *s) +{ + xfs_dir_sf_hdr_t *sfh; + xfs_dir_sf_entry_t *sfe; + xfs_ino_t ino; + int i, j; + + sfh = &s->hdr; + XFS_DIR_SF_GET_DIRINO_ARCH(&sfh->parent, &ino, ARCH_CONVERT); + kdb_printf("hdr parent %llu", (unsigned long long)ino); + kdb_printf(" count %d\n", sfh->count); + for (i = 0, sfe = s->list; i < sfh->count; i++) { + XFS_DIR_SF_GET_DIRINO_ARCH(&sfe->inumber, &ino, ARCH_CONVERT); + kdb_printf("entry %d inumber %llu", i, (unsigned long long)ino); + kdb_printf(" namelen %d name \"", sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->name[j]); + kdb_printf("\"\n"); + sfe = XFS_DIR_SF_NEXTENTRY(sfe); + } +} + +/* + * Print a shortform v2 directory. + */ +static void +xfsidbg_xdir2sf(xfs_dir2_sf_t *s) +{ + xfs_dir2_sf_hdr_t *sfh; + xfs_dir2_sf_entry_t *sfe; + xfs_ino_t ino; + int i, j; + + sfh = &s->hdr; + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(s, &sfh->parent, ARCH_CONVERT); + kdb_printf("hdr count %d i8count %d parent %llu\n", + sfh->count, sfh->i8count, (unsigned long long) ino); + for (i = 0, sfe = XFS_DIR2_SF_FIRSTENTRY(s); i < sfh->count; i++) { + ino = XFS_DIR2_SF_GET_INUMBER_ARCH(s, XFS_DIR2_SF_INUMBERP(sfe), ARCH_CONVERT); + kdb_printf("entry %d inumber %llu offset 0x%x namelen %d name \"", + i, (unsigned long long) ino, + XFS_DIR2_SF_GET_OFFSET_ARCH(sfe, ARCH_CONVERT), + sfe->namelen); + for (j = 0; j < sfe->namelen; j++) + kdb_printf("%c", sfe->name[j]); + kdb_printf("\"\n"); + sfe = XFS_DIR2_SF_NEXTENTRY(s, sfe); + } +} + +/* + * Print a node-form v2 directory freemap block. + */ +static void +xfsidbg_xdir2free(xfs_dir2_free_t *f) +{ + int i; + + kdb_printf("hdr magic 0x%x firstdb %d nvalid %d nused %d\n", + INT_GET(f->hdr.magic, ARCH_CONVERT), INT_GET(f->hdr.firstdb, ARCH_CONVERT), INT_GET(f->hdr.nvalid, ARCH_CONVERT), INT_GET(f->hdr.nused, ARCH_CONVERT)); + for (i = 0; i < INT_GET(f->hdr.nvalid, ARCH_CONVERT); i++) { + kdb_printf("entry %d db %d count %d\n", + i, i + INT_GET(f->hdr.firstdb, ARCH_CONVERT), INT_GET(f->bests[i], ARCH_CONVERT)); + } +} + + +/* + * Print xfs extent list. + */ +static void +xfsidbg_xexlist(xfs_inode_t *ip) +{ + xfs_xexlist_fork(ip, XFS_DATA_FORK); + if (XFS_IFORK_Q(ip)) + xfs_xexlist_fork(ip, XFS_ATTR_FORK); +} + +/* + * Print an xfs free-extent list. + */ +static void +xfsidbg_xflist(xfs_bmap_free_t *flist) +{ + xfs_bmap_free_item_t *item; + + kdb_printf("flist@0x%p: first 0x%p count %d low %d\n", flist, + flist->xbf_first, flist->xbf_count, flist->xbf_low); + for (item = flist->xbf_first; item; item = item->xbfi_next) { + kdb_printf("item@0x%p: startblock %Lx blockcount %d", item, + (xfs_dfsbno_t)item->xbfi_startblock, + item->xbfi_blockcount); + } +} + +/* + * Print out the help messages for these functions. + */ +static void +xfsidbg_xhelp(void) +{ + struct xif *p; + + for (p = xfsidbg_funcs; p->name; p++) + kdb_printf("%-16s %s %s\n", p->name, p->args, p->help); +} + +/* + * Print out an XFS in-core log structure. + */ +static void +xfsidbg_xiclog(xlog_in_core_t *iclog) +{ + int i; + static char *ic_flags[] = { + "ACTIVE", /* 0x0001 */ + "WANT_SYNC", /* 0x0002 */ + "SYNCING", /* 0X0004 */ + "DONE_SYNC", /* 0X0008 */ + "DO_CALLBACK", /* 0X0010 */ + "CALLBACK", /* 0X0020 */ + "DIRTY", /* 0X0040 */ + "IOERROR", /* 0X0080 */ + "NOTUSED", /* 0X8000 */ + 0 + }; + + kdb_printf("xlog_in_core/header at 0x%p/0x%p\n", + iclog, iclog->hic_data); + kdb_printf("magicno: %x cycle: %d version: %d lsn: 0x%Lx\n", + INT_GET(iclog->ic_header.h_magicno, ARCH_CONVERT), INT_GET(iclog->ic_header.h_cycle, ARCH_CONVERT), + INT_GET(iclog->ic_header.h_version, ARCH_CONVERT), INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)); + kdb_printf("tail_lsn: 0x%Lx len: %d prev_block: %d num_ops: %d\n", + INT_GET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT), INT_GET(iclog->ic_header.h_len, ARCH_CONVERT), + INT_GET(iclog->ic_header.h_prev_block, ARCH_CONVERT), INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT)); + kdb_printf("cycle_data: "); + for (i=0; i<(iclog->ic_size>>BBSHIFT); i++) { + kdb_printf("%x ", INT_GET(iclog->ic_header.h_cycle_data[i], ARCH_CONVERT)); + } + kdb_printf("\n"); + kdb_printf("size: %d\n", INT_GET(iclog->ic_header.h_size, ARCH_CONVERT)); + kdb_printf("\n"); + kdb_printf("--------------------------------------------------\n"); + kdb_printf("data: 0x%p &forcesema: 0x%p next: 0x%p bp: 0x%p\n", + iclog->ic_datap, &iclog->ic_forcesema, iclog->ic_next, + iclog->ic_bp); + kdb_printf("log: 0x%p callb: 0x%p callb_tail: 0x%p roundoff: %d\n", + iclog->ic_log, iclog->ic_callback, iclog->ic_callback_tail, + iclog->ic_roundoff); + kdb_printf("size: %d (OFFSET: %d) refcnt: %d bwritecnt: %d", + iclog->ic_size, iclog->ic_offset, + iclog->ic_refcnt, iclog->ic_bwritecnt); + if (iclog->ic_state & XLOG_STATE_ALL) + printflags(iclog->ic_state, ic_flags, "state:"); + else + kdb_printf("state: ILLEGAL 0x%x", iclog->ic_state); + kdb_printf("\n"); +} /* xfsidbg_xiclog */ + + +/* + * Print all incore logs. + */ +static void +xfsidbg_xiclogall(xlog_in_core_t *iclog) +{ + xlog_in_core_t *first_iclog = iclog; + + do { + xfsidbg_xiclog(iclog); + kdb_printf("=================================================\n"); + iclog = iclog->ic_next; + } while (iclog != first_iclog); +} /* xfsidbg_xiclogall */ + +/* + * Print out the callback structures attached to an iclog. + */ +static void +xfsidbg_xiclogcb(xlog_in_core_t *iclog) +{ + xfs_log_callback_t *cb; + kdb_symtab_t symtab; + + for (cb = iclog->ic_callback; cb != NULL; cb = cb->cb_next) { + + if (kdbnearsym((unsigned long)cb->cb_func, &symtab)) { + unsigned long offval; + + offval = (unsigned long)cb->cb_func - symtab.sym_start; + + if (offval) + kdb_printf("func = %s+0x%lx", + symtab.sym_name, + offval); + else + kdb_printf("func = %s", symtab.sym_name); + } else + kdb_printf("func = ?? 0x%p", (void *)cb->cb_func); + + kdb_printf(" arg 0x%p next 0x%p\n", cb->cb_arg, cb->cb_next); + } +} + + +/* + * Print all of the inodes attached to the given mount structure. + */ +static void +xfsidbg_xinodes(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + + kdb_printf("xfs_mount at 0x%p\n", mp); + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + kdb_printf("\n"); + xfsidbg_xnode(ip); + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("\nEnd of Inodes\n"); +} + +static void +xfsidbg_delayed_blocks(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + unsigned int total = 0; + unsigned int icount = 0; + + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (ip->i_delayed_blks) { + total += ip->i_delayed_blks; + icount++; + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("delayed blocks total: %d in %d inodes\n", total, icount); +} + +static void +xfsidbg_xinodes_quiesce(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + + kdb_printf("xfs_mount at 0x%p\n", mp); + ip = mp->m_inodes; + if (ip != NULL) { + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (!(ip->i_flags & XFS_IQUIESCE)) { + kdb_printf("ip 0x%p not quiesced\n", ip); + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + } + kdb_printf("\nEnd of Inodes\n"); +} + +static char * +xfsidbg_get_cstate(int state) +{ + switch(state) { + case XLOG_STATE_COVER_IDLE: + return("idle"); + case XLOG_STATE_COVER_NEED: + return("need"); + case XLOG_STATE_COVER_DONE: + return("done"); + case XLOG_STATE_COVER_NEED2: + return("need2"); + case XLOG_STATE_COVER_DONE2: + return("done2"); + default: + return("unknown"); + } +} + +/* + * Print out an XFS log structure. + */ +static void +xfsidbg_xlog(xlog_t *log) +{ + int rbytes; + int wbytes; + static char *t_flags[] = { + "CHKSUM_MISMATCH", /* 0x01 */ + "ACTIVE_RECOVERY", /* 0x02 */ + "RECOVERY_NEEDED", /* 0x04 */ + "IO_ERROR", /* 0x08 */ + 0 + }; + + kdb_printf("xlog at 0x%p\n", log); + kdb_printf("&flushsm: 0x%p flushcnt: %d tic_cnt: %d tic_tcnt: %d \n", + &log->l_flushsema, log->l_flushcnt, + log->l_ticket_cnt, log->l_ticket_tcnt); + kdb_printf("freelist: 0x%p tail: 0x%p ICLOG: 0x%p \n", + log->l_freelist, log->l_tail, log->l_iclog); + kdb_printf("&icloglock: 0x%p tail_lsn: %s last_sync_lsn: %s \n", + &log->l_icloglock, xfs_fmtlsn(&log->l_tail_lsn), + xfs_fmtlsn(&log->l_last_sync_lsn)); + kdb_printf("mp: 0x%p xbuf: 0x%p roundoff: %d l_covered_state: %s \n", + log->l_mp, log->l_xbuf, log->l_roundoff, + xfsidbg_get_cstate(log->l_covered_state)); + kdb_printf("flags: "); + printflags(log->l_flags, t_flags,"log"); + kdb_printf(" dev: 0x%x logBBstart: %lld logsize: %d logBBsize: %d\n", + log->l_dev, (long long) log->l_logBBstart, + log->l_logsize,log->l_logBBsize); + kdb_printf("curr_cycle: %d prev_cycle: %d curr_block: %d prev_block: %d\n", + log->l_curr_cycle, log->l_prev_cycle, log->l_curr_block, + log->l_prev_block); + kdb_printf("iclog_bak: 0x%p iclog_size: 0x%x (%d) num iclogs: %d\n", + log->l_iclog_bak, log->l_iclog_size, log->l_iclog_size, + log->l_iclog_bufs); + kdb_printf("l_iclog_hsize %d l_iclog_heads %d\n", + log->l_iclog_hsize, log->l_iclog_heads); + kdb_printf("&grant_lock: 0x%p resHeadQ: 0x%p wrHeadQ: 0x%p\n", + &log->l_grant_lock, log->l_reserve_headq, log->l_write_headq); + kdb_printf("GResCycle: %d GResBytes: %d GWrCycle: %d GWrBytes: %d\n", + log->l_grant_reserve_cycle, log->l_grant_reserve_bytes, + log->l_grant_write_cycle, log->l_grant_write_bytes); + rbytes = log->l_grant_reserve_bytes + log->l_roundoff; + wbytes = log->l_grant_write_bytes + log->l_roundoff; + kdb_printf("GResBlocks: %d GResRemain: %d GWrBlocks: %d GWrRemain: %d\n", + rbytes / BBSIZE, rbytes % BBSIZE, + wbytes / BBSIZE, wbytes % BBSIZE); +} /* xfsidbg_xlog */ + + +/* + * Print out an XFS recovery transaction + */ +static void +xfsidbg_xlog_ritem(xlog_recover_item_t *item) +{ + int i = XLOG_MAX_REGIONS_IN_ITEM; + + kdb_printf("(xlog_recover_item 0x%p) ", item); + kdb_printf("next: 0x%p prev: 0x%p type: %d cnt: %d ttl: %d\n", + item->ri_next, item->ri_prev, ITEM_TYPE(item), item->ri_cnt, + item->ri_total); + for ( ; i > 0; i--) { + if (!item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_addr) + break; + kdb_printf("a: 0x%p l: %d ", + item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_addr, + item->ri_buf[XLOG_MAX_REGIONS_IN_ITEM-i].i_len); + } + kdb_printf("\n"); +} /* xfsidbg_xlog_ritem */ + +/* + * Print out an XFS recovery transaction + */ +static void +xfsidbg_xlog_rtrans(xlog_recover_t *trans) +{ + xlog_recover_item_t *rip, *first_rip; + + kdb_printf("(xlog_recover 0x%p) ", trans); + kdb_printf("tid: %x type: %d items: %d ttid: 0x%x ", + trans->r_log_tid, trans->r_theader.th_type, + trans->r_theader.th_num_items, trans->r_theader.th_tid); + kdb_printf("itemq: 0x%p\n", trans->r_itemq); + if (trans->r_itemq) { + rip = first_rip = trans->r_itemq; + do { + kdb_printf("(recovery item: 0x%p) ", rip); + kdb_printf("type: %d cnt: %d total: %d\n", + ITEM_TYPE(rip), rip->ri_cnt, rip->ri_total); + rip = rip->ri_next; + } while (rip != first_rip); + } +} /* xfsidbg_xlog_rtrans */ + +static void +xfsidbg_xlog_buf_logitem(xlog_recover_item_t *item) +{ + xfs_buf_log_format_t *buf_f; + int i, j; + int bit; + int nbits; + unsigned int *data_map; + unsigned int map_size; + int size; + + buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; + if (buf_f->blf_flags & XFS_BLI_INODE_BUF) { + kdb_printf("\tINODE BUF \n", + buf_f->blf_blkno, buf_f->blf_len); + } else if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) { + kdb_printf("\tDQUOT BUF \n", + buf_f->blf_blkno, buf_f->blf_len); + } else { + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + kdb_printf("\tREG BUF \n", + buf_f->blf_blkno, buf_f->blf_len, data_map, map_size); + bit = 0; + i = 0; /* 0 is the buf format structure */ + while (1) { + bit = xfs_next_bit(data_map, map_size, bit); + if (bit == -1) + break; + nbits = xfs_contig_bits(data_map, map_size, bit); + size = ((uint)bit << XFS_BLI_SHIFT)+(nbits<ri_buf[i].i_addr, size); + kdb_printf("\t\t\t\""); + for (j=0; j<8 && jri_buf[i].i_addr)[j]); + } + kdb_printf("...\"\n"); + i++; + bit += nbits; + } + + } +} + +/* + * Print out an ENTIRE XFS recovery transaction + */ +static void +xfsidbg_xlog_rtrans_entire(xlog_recover_t *trans) +{ + xlog_recover_item_t *item, *first_rip; + + kdb_printf("(Recovering Xact 0x%p) ", trans); + kdb_printf("tid: %x type: %d nitems: %d ttid: 0x%x ", + trans->r_log_tid, trans->r_theader.th_type, + trans->r_theader.th_num_items, trans->r_theader.th_tid); + kdb_printf("itemq: 0x%p\n", trans->r_itemq); + if (trans->r_itemq) { + item = first_rip = trans->r_itemq; + do { + /* + kdb_printf("(recovery item: 0x%x) ", item); + kdb_printf("type: %d cnt: %d total: %d\n", + item->ri_type, item->ri_cnt, item->ri_total); + */ + if ((ITEM_TYPE(item) == XFS_LI_BUF) || + (ITEM_TYPE(item) == XFS_LI_6_1_BUF) || + (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) { + kdb_printf("BUF:"); + xfsidbg_xlog_buf_logitem(item); + } else if ((ITEM_TYPE(item) == XFS_LI_INODE) || + (ITEM_TYPE(item) == XFS_LI_6_1_INODE) || + (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) { + kdb_printf("INODE:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_EFI) { + kdb_printf("EFI:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_EFD) { + kdb_printf("EFD:\n"); + } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) { + kdb_printf("DQUOT:\n"); + } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) { + kdb_printf("QUOTAOFF:\n"); + } else { + kdb_printf("UNKNOWN LOGITEM 0x%x\n", ITEM_TYPE(item)); + } + item = item->ri_next; + } while (item != first_rip); + } +} /* xfsidbg_xlog_rtrans */ + +/* + * Print out an XFS ticket structure. + */ +static void +xfsidbg_xlog_tic(xlog_ticket_t *tic) +{ + static char *t_flags[] = { + "INIT", /* 0x1 */ + "PERM_RES", /* 0x2 */ + "IN_Q", /* 0x4 */ + 0 + }; + + kdb_printf("xlog_ticket at 0x%p\n", tic); + kdb_printf("next: 0x%p prev: 0x%p tid: 0x%x \n", + tic->t_next, tic->t_prev, tic->t_tid); + kdb_printf("curr_res: %d unit_res: %d ocnt: %d cnt: %d\n", + tic->t_curr_res, tic->t_unit_res, (int)tic->t_ocnt, + (int)tic->t_cnt); + kdb_printf("clientid: %c \n", tic->t_clientid); + printflags(tic->t_flags, t_flags,"ticket"); + kdb_printf("\n"); +} /* xfsidbg_xlog_tic */ + +/* + * Print out a single log item. + */ +static void +xfsidbg_xlogitem(xfs_log_item_t *lip) +{ + xfs_log_item_t *bio_lip; + static char *lid_type[] = { + "???", /* 0 */ + "5-3-buf", /* 1 */ + "5-3-inode", /* 2 */ + "efi", /* 3 */ + "efd", /* 4 */ + "iunlink", /* 5 */ + "6-1-inode", /* 6 */ + "6-1-buf", /* 7 */ + "inode", /* 8 */ + "buf", /* 9 */ + "dquot", /* 10 */ + 0 + }; + static char *li_flags[] = { + "in ail", /* 0x1 */ + 0 + }; + + kdb_printf("type %s mountp 0x%p flags ", + lid_type[lip->li_type - XFS_LI_5_3_BUF + 1], + lip->li_mountp); + printflags((uint)(lip->li_flags), li_flags,"log"); + kdb_printf("\n"); + kdb_printf("ail forw 0x%p ail back 0x%p lsn %s\ndesc %p ops 0x%p", + lip->li_ail.ail_forw, lip->li_ail.ail_back, + xfs_fmtlsn(&(lip->li_lsn)), lip->li_desc, lip->li_ops); + kdb_printf(" iodonefunc &0x%p\n", lip->li_cb); + if (lip->li_type == XFS_LI_BUF) { + bio_lip = lip->li_bio_list; + if (bio_lip != NULL) { + kdb_printf("iodone list:\n"); + } + while (bio_lip != NULL) { + kdb_printf("item 0x%p func 0x%p\n", + bio_lip, bio_lip->li_cb); + bio_lip = bio_lip->li_bio_list; + } + } + switch (lip->li_type) { + case XFS_LI_BUF: + xfs_buf_item_print((xfs_buf_log_item_t *)lip, 0); + break; + case XFS_LI_INODE: + xfs_inode_item_print((xfs_inode_log_item_t *)lip, 0); + break; + case XFS_LI_EFI: + xfs_efi_item_print((xfs_efi_log_item_t *)lip, 0); + break; + case XFS_LI_EFD: + xfs_efd_item_print((xfs_efd_log_item_t *)lip, 0); + break; + case XFS_LI_DQUOT: + xfs_dquot_item_print((xfs_dq_logitem_t *)lip, 0); + break; + case XFS_LI_QUOTAOFF: + xfs_qoff_item_print((xfs_qoff_logitem_t *)lip, 0); + break; + + default: + kdb_printf("Unknown item type %d\n", lip->li_type); + break; + } +} + +/* + * Print out a summary of the AIL hanging off of a mount struct. + */ +static void +xfsidbg_xaildump(xfs_mount_t *mp) +{ + xfs_log_item_t *lip; + static char *lid_type[] = { + "???", /* 0 */ + "5-3-buf", /* 1 */ + "5-3-inode", /* 2 */ + "efi", /* 3 */ + "efd", /* 4 */ + "iunlink", /* 5 */ + "6-1-inode", /* 6 */ + "6-1-buf", /* 7 */ + "inode", /* 8 */ + "buf", /* 9 */ + "dquot", /* 10 */ + 0 + }; + static char *li_flags[] = { + "in ail", /* 0x1 */ + 0 + }; + int count; + + if ((mp->m_ail.ail_forw == NULL) || + (mp->m_ail.ail_forw == (xfs_log_item_t *)&mp->m_ail)) { + kdb_printf("AIL is empty\n"); + return; + } + kdb_printf("AIL for mp 0x%p, oldest first\n", mp); + lip = (xfs_log_item_t*)mp->m_ail.ail_forw; + for (count = 0; lip; count++) { + kdb_printf("[%d] type %s ", count, + lid_type[lip->li_type - XFS_LI_5_3_BUF + 1]); + printflags((uint)(lip->li_flags), li_flags, "flags:"); + kdb_printf(" lsn %s\n ", xfs_fmtlsn(&(lip->li_lsn))); + switch (lip->li_type) { + case XFS_LI_BUF: + xfs_buf_item_print((xfs_buf_log_item_t *)lip, 1); + break; + case XFS_LI_INODE: + xfs_inode_item_print((xfs_inode_log_item_t *)lip, 1); + break; + case XFS_LI_EFI: + xfs_efi_item_print((xfs_efi_log_item_t *)lip, 1); + break; + case XFS_LI_EFD: + xfs_efd_item_print((xfs_efd_log_item_t *)lip, 1); + break; + case XFS_LI_DQUOT: + xfs_dquot_item_print((xfs_dq_logitem_t *)lip, 1); + break; + case XFS_LI_QUOTAOFF: + xfs_qoff_item_print((xfs_qoff_logitem_t *)lip, 1); + break; + default: + kdb_printf("Unknown item type %d\n", lip->li_type); + break; + } + + if (lip->li_ail.ail_forw == (xfs_log_item_t*)&mp->m_ail) { + lip = NULL; + } else { + lip = lip->li_ail.ail_forw; + } + } +} + +/* + * Print xfs mount structure. + */ +static void +xfsidbg_xmount(xfs_mount_t *mp) +{ + static char *xmount_flags[] = { + "WSYNC", /* 0x0001 */ + "INO64", /* 0x0002 */ + "RQCHK", /* 0x0004 */ + "FSCLEAN", /* 0x0008 */ + "FSSHUTDN", /* 0x0010 */ + "NOATIME", /* 0x0020 */ + "RETERR", /* 0x0040 */ + "NOALIGN", /* 0x0080 */ + "UNSHRD", /* 0x0100 */ + "RGSTRD", /* 0x0200 */ + "NORECVR", /* 0x0400 */ + "SHRD", /* 0x0800 */ + "IOSZ", /* 0x1000 */ + "OSYNC", /* 0x2000 */ + "NOUUID", /* 0x4000 */ + "32BIT", /* 0x8000 */ + "NOLOGFLUSH", /* 0x10000 */ + 0 + }; + + static char *quota_flags[] = { + "UQ", /* 0x0001 */ + "UQE", /* 0x0002 */ + "UQCHKD", /* 0x0004 */ + "PQ", /* 0x0008 (IRIX ondisk) */ + "GQE", /* 0x0010 */ + "GQCHKD", /* 0x0020 */ + "GQ", /* 0x0040 */ + "UQACTV", /* 0x0080 */ + "GQACTV", /* 0x0100 */ + "QMAYBE", /* 0x0200 */ + 0 + }; + + kdb_printf("xfs_mount at 0x%p\n", mp); + kdb_printf("vfsp 0x%p tid 0x%x ail_lock 0x%p &ail 0x%p\n", + XFS_MTOVFS(mp), mp->m_tid, &mp->m_ail_lock, &mp->m_ail); + kdb_printf("ail_gen 0x%x &sb 0x%p\n", + mp->m_ail_gen, &mp->m_sb); + kdb_printf("sb_lock 0x%p sb_bp 0x%p dev 0x%x logdev 0x%x rtdev 0x%x\n", + &mp->m_sb_lock, mp->m_sb_bp, + mp->m_ddev_targp ? mp->m_ddev_targp->pbr_dev : 0, + mp->m_logdev_targp ? mp->m_logdev_targp->pbr_dev : 0, + mp->m_rtdev_targp ? mp->m_rtdev_targp->pbr_dev : 0); + kdb_printf("bsize %d agfrotor %d agirotor %d ihash 0x%p ihsize %d\n", + mp->m_bsize, mp->m_agfrotor, mp->m_agirotor, + mp->m_ihash, mp->m_ihsize); + kdb_printf("inodes 0x%p ilock 0x%p ireclaims 0x%x\n", + mp->m_inodes, &mp->m_ilock, mp->m_ireclaims); + kdb_printf("readio_log 0x%x readio_blocks 0x%x ", + mp->m_readio_log, mp->m_readio_blocks); + kdb_printf("writeio_log 0x%x writeio_blocks 0x%x\n", + mp->m_writeio_log, mp->m_writeio_blocks); + kdb_printf("logbufs %d logbsize %d LOG 0x%p\n", mp->m_logbufs, + mp->m_logbsize, mp->m_log); + kdb_printf("rsumlevels 0x%x rsumsize 0x%x rbmip 0x%p rsumip 0x%p\n", + mp->m_rsumlevels, mp->m_rsumsize, mp->m_rbmip, mp->m_rsumip); + kdb_printf("rootip 0x%p\n", mp->m_rootip); + kdb_printf("dircook_elog %d blkbit_log %d blkbb_log %d agno_log %d\n", + mp->m_dircook_elog, mp->m_blkbit_log, mp->m_blkbb_log, + mp->m_agno_log); + kdb_printf("agino_log %d nreadaheads %d inode cluster size %d\n", + mp->m_agino_log, mp->m_nreadaheads, + mp->m_inode_cluster_size); + kdb_printf("blockmask 0x%x blockwsize 0x%x blockwmask 0x%x\n", + mp->m_blockmask, mp->m_blockwsize, mp->m_blockwmask); + kdb_printf("alloc_mxr[lf,nd] %d %d alloc_mnr[lf,nd] %d %d\n", + mp->m_alloc_mxr[0], mp->m_alloc_mxr[1], + mp->m_alloc_mnr[0], mp->m_alloc_mnr[1]); + kdb_printf("bmap_dmxr[lfnr,ndnr] %d %d bmap_dmnr[lfnr,ndnr] %d %d\n", + mp->m_bmap_dmxr[0], mp->m_bmap_dmxr[1], + mp->m_bmap_dmnr[0], mp->m_bmap_dmnr[1]); + kdb_printf("inobt_mxr[lf,nd] %d %d inobt_mnr[lf,nd] %d %d\n", + mp->m_inobt_mxr[0], mp->m_inobt_mxr[1], + mp->m_inobt_mnr[0], mp->m_inobt_mnr[1]); + kdb_printf("ag_maxlevels %d bm_maxlevels[d,a] %d %d in_maxlevels %d\n", + mp->m_ag_maxlevels, mp->m_bm_maxlevels[0], + mp->m_bm_maxlevels[1], mp->m_in_maxlevels); + kdb_printf("perag 0x%p &peraglock 0x%p &growlock 0x%p\n", + mp->m_perag, &mp->m_peraglock, &mp->m_growlock); + printflags(mp->m_flags, xmount_flags,"flags"); + kdb_printf("ialloc_inos %d ialloc_blks %d litino %d\n", + mp->m_ialloc_inos, mp->m_ialloc_blks, mp->m_litino); + kdb_printf("dir_node_ents %u attr_node_ents %u\n", + mp->m_dir_node_ents, mp->m_attr_node_ents); + kdb_printf("attroffset %d maxicount %Ld inoalign_mask %d\n", + mp->m_attroffset, mp->m_maxicount, mp->m_inoalign_mask); + kdb_printf("resblks %Ld resblks_avail %Ld\n", mp->m_resblks, + mp->m_resblks_avail); +#if XFS_BIG_FILESYSTEMS + kdb_printf(" inoadd %llx\n", (unsigned long long) mp->m_inoadd); +#else + kdb_printf("\n"); +#endif + if (mp->m_quotainfo) + kdb_printf("quotainfo 0x%p (uqip = 0x%p, gqip = 0x%p)\n", + mp->m_quotainfo, + mp->m_quotainfo->qi_uquotaip, + mp->m_quotainfo->qi_gquotaip); + else + kdb_printf("quotainfo NULL\n"); + printflags(mp->m_qflags, quota_flags,"quotaflags"); + kdb_printf("\n"); + kdb_printf("dalign %d swidth %d sinoalign %d attr_magicpct %d dir_magicpct %d\n", + mp->m_dalign, mp->m_swidth, mp->m_sinoalign, + mp->m_attr_magicpct, mp->m_dir_magicpct); + kdb_printf("mk_sharedro %d inode_quiesce %d sectbb_log %d\n", + mp->m_mk_sharedro, mp->m_inode_quiesce, mp->m_sectbb_log); + kdb_printf("dirversion %d dirblkfsbs %d &dirops 0x%p\n", + mp->m_dirversion, mp->m_dirblkfsbs, &mp->m_dirops); + kdb_printf("dirblksize %d dirdatablk 0x%Lx dirleafblk 0x%Lx dirfreeblk 0x%Lx\n", + mp->m_dirblksize, + (xfs_dfiloff_t)mp->m_dirdatablk, + (xfs_dfiloff_t)mp->m_dirleafblk, + (xfs_dfiloff_t)mp->m_dirfreeblk); + kdb_printf("chsize %d chash 0x%p\n", + mp->m_chsize, mp->m_chash); + kdb_printf("m_lstripemask %d\n", mp->m_lstripemask); + kdb_printf("m_frozen %d m_active_trans %d\n", + mp->m_frozen, mp->m_active_trans.counter); + if (mp->m_fsname != NULL) + kdb_printf("mountpoint \"%s\"\n", mp->m_fsname); + else + kdb_printf("No name!!!\n"); + +} + +static void +xfsidbg_xihash(xfs_mount_t *mp) +{ + xfs_ihash_t *ih; + int i; + int j; + int total; + int numzeros; + xfs_inode_t *ip; + int *hist; + int hist_bytes = mp->m_ihsize * sizeof(int); + int hist2[21]; + + hist = (int *) kmalloc(hist_bytes, GFP_KERNEL); + + if (hist == NULL) { + kdb_printf("xfsidbg_xihash: kmalloc(%d) failed!\n", + hist_bytes); + return; + } + + for (i = 0; i < mp->m_ihsize; i++) { + ih = mp->m_ihash + i; + j = 0; + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) + j++; + hist[i] = j; + } + + numzeros = total = 0; + + for (i = 0; i < 21; i++) + hist2[i] = 0; + + for (i = 0; i < mp->m_ihsize; i++) { + kdb_printf("%d ", hist[i]); + total += hist[i]; + numzeros += hist[i] == 0 ? 1 : 0; + if (hist[i] > 20) + j = 20; + else + j = hist[i]; + + if (! (j <= 20)) { + kdb_printf("xfsidbg_xihash: (j > 20)/%d @ line # %d\n", + j, __LINE__); + return; + } + + hist2[j]++; + } + + kdb_printf("\n"); + + kdb_printf("total inodes = %d, average length = %d, adjusted average = %d \n", + total, total / mp->m_ihsize, + total / (mp->m_ihsize - numzeros)); + + for (i = 0; i < 21; i++) { + kdb_printf("%d - %d , ", i, hist2[i]); + } + kdb_printf("\n"); + kfree(hist); +} + +/* + * Command to print xfs inodes: kp xnode + */ +static void +xfsidbg_xnode(xfs_inode_t *ip) +{ + static char *tab_flags[] = { + "grio", /* XFS_IGRIO */ + "uiosize", /* XFS_IUIOSZ */ + "quiesce", /* XFS_IQUIESCE */ + "reclaim", /* XFS_IRECLAIM */ + NULL + }; + + kdb_printf("hash 0x%p next 0x%p prevp 0x%p mount 0x%p\n", + ip->i_hash, + ip->i_next, + ip->i_prevp, + ip->i_mount); + kdb_printf("mnext 0x%p mprev 0x%p vnode 0x%p \n", + ip->i_mnext, + ip->i_mprev, + XFS_ITOV_NULL(ip)); + kdb_printf("dev %x ino %s\n", + ip->i_mount->m_dev, + xfs_fmtino(ip->i_ino, ip->i_mount)); + kdb_printf("blkno 0x%llx len 0x%x boffset 0x%x\n", + (long long) ip->i_blkno, + ip->i_len, + ip->i_boffset); + kdb_printf("transp 0x%p &itemp 0x%p\n", + ip->i_transp, + ip->i_itemp); + kdb_printf("&lock 0x%p &iolock 0x%p ", + &ip->i_lock, + &ip->i_iolock); + kdb_printf("&flock 0x%p (%d) pincount 0x%x\n", + &ip->i_flock, valusema(&ip->i_flock), + xfs_ipincount(ip)); + kdb_printf("udquotp 0x%p gdquotp 0x%p\n", + ip->i_udquot, ip->i_gdquot); + kdb_printf("new_size %Ld\n", ip->i_iocore.io_new_size); + printflags((int)ip->i_flags, tab_flags, "flags"); + kdb_printf("\n"); + kdb_printf("update_core %d update size %d\n", + (int)(ip->i_update_core), (int) ip->i_update_size); + kdb_printf("gen 0x%x delayed blks %d", + ip->i_gen, + ip->i_delayed_blks); + kdb_printf("\n"); + kdb_printf("chash 0x%p cnext 0x%p cprev 0x%p\n", + ip->i_chash, + ip->i_cnext, + ip->i_cprev); + xfs_xnode_fork("data", &ip->i_df); + xfs_xnode_fork("attr", ip->i_afp); + kdb_printf("\n"); + xfs_prdinode_core(&ip->i_d, ARCH_NOCONVERT); +} + +static void +xfsidbg_xcore(xfs_iocore_t *io) +{ + kdb_printf("io_obj 0x%p io_flags 0x%x io_mount 0x%p\n", + io->io_obj, io->io_flags, io->io_mount); + kdb_printf("new_size %Lx\n", io->io_new_size); +} + +/* + * Command to print xfs inode cluster hash table: kp xchash + */ +static void +xfsidbg_xchash(xfs_mount_t *mp) +{ + int i; + xfs_chash_t *ch; + + kdb_printf("m_chash 0x%p size %d\n", + mp->m_chash, mp->m_chsize); + for (i = 0; i < mp->m_chsize; i++) { + ch = mp->m_chash + i; + kdb_printf("[%3d] ch 0x%p chashlist 0x%p\n", i, ch, ch->ch_list); + xfsidbg_xchashlist(ch->ch_list); + } +} + +/* + * Command to print xfs inode cluster hash list: kp xchashlist + */ +static void +xfsidbg_xchashlist(xfs_chashlist_t *chl) +{ + xfs_inode_t *ip; + + while (chl != NULL) { +#ifdef DEBUG + kdb_printf("hashlist inode 0x%p blkno %Ld buf 0x%p", + chl->chl_ip, chl->chl_blkno, chl->chl_buf); +#else + kdb_printf("hashlist inode 0x%p blkno %lld", + chl->chl_ip, (long long) chl->chl_blkno); +#endif + + kdb_printf("\n"); + + /* print inodes on chashlist */ + ip = chl->chl_ip; + do { + kdb_printf("0x%p ", ip); + ip = ip->i_cnext; + } while (ip != chl->chl_ip); + kdb_printf("\n"); + + chl=chl->chl_next; + } +} + +/* + * Print xfs per-ag data structures for filesystem. + */ +static void +xfsidbg_xperag(xfs_mount_t *mp) +{ + xfs_agnumber_t agno; + xfs_perag_t *pag; + int busy; + + pag = mp->m_perag; + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++, pag++) { + kdb_printf("ag %d f_init %d i_init %d\n", + agno, pag->pagf_init, pag->pagi_init); + if (pag->pagf_init) + kdb_printf( + " f_levels[b,c] %d,%d f_flcount %d f_freeblks %d f_longest %d\n" + " f__metadata %d\n", + pag->pagf_levels[XFS_BTNUM_BNOi], + pag->pagf_levels[XFS_BTNUM_CNTi], + pag->pagf_flcount, pag->pagf_freeblks, + pag->pagf_longest, pag->pagf_metadata); + if (pag->pagi_init) + kdb_printf(" i_freecount %d i_inodeok %d\n", + pag->pagi_freecount, pag->pagi_inodeok); + if (pag->pagf_init) { + for (busy = 0; busy < XFS_PAGB_NUM_SLOTS; busy++) { + if (pag->pagb_list[busy].busy_length != 0) { + kdb_printf( + " %04d: start %d length %d tp 0x%p\n", + busy, + pag->pagb_list[busy].busy_start, + pag->pagb_list[busy].busy_length, + pag->pagb_list[busy].busy_tp); + } + } + } + } +} + +#ifdef CONFIG_XFS_QUOTA +static void +xfsidbg_xqm() +{ + if (xfs_Gqm == NULL) { + kdb_printf("NULL XQM!!\n"); + return; + } + + kdb_printf("usrhtab 0x%p\tgrphtab 0x%p\tndqfree 0x%x\thashmask 0x%x\n", + xfs_Gqm->qm_usr_dqhtable, + xfs_Gqm->qm_grp_dqhtable, + xfs_Gqm->qm_dqfreelist.qh_nelems, + xfs_Gqm->qm_dqhashmask); + kdb_printf("&freelist 0x%p, totaldquots 0x%x nrefs 0x%x\n", + &xfs_Gqm->qm_dqfreelist, + atomic_read(&xfs_Gqm->qm_totaldquots), + xfs_Gqm->qm_nrefs); +} +#endif + +static void +xfsidbg_xqm_diskdq(xfs_disk_dquot_t *d) +{ + kdb_printf("magic 0x%x\tversion 0x%x\tID 0x%x (%d)\t\n", + INT_GET(d->d_magic, ARCH_CONVERT), + INT_GET(d->d_version, ARCH_CONVERT), + INT_GET(d->d_id, ARCH_CONVERT), + INT_GET(d->d_id, ARCH_CONVERT)); + kdb_printf("bhard 0x%llx\tbsoft 0x%llx\tihard 0x%llx\tisoft 0x%llx\n", + (unsigned long long)INT_GET(d->d_blk_hardlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_blk_softlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_ino_hardlimit, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_ino_softlimit, ARCH_CONVERT)); + kdb_printf("bcount 0x%llx icount 0x%llx\n", + (unsigned long long)INT_GET(d->d_bcount, ARCH_CONVERT), + (unsigned long long)INT_GET(d->d_icount, ARCH_CONVERT)); + kdb_printf("btimer 0x%x itimer 0x%x \n", + (int)INT_GET(d->d_btimer, ARCH_CONVERT), + (int)INT_GET(d->d_itimer, ARCH_CONVERT)); +} + +static void +xfsidbg_xqm_dquot(xfs_dquot_t *dqp) +{ + static char *qflags[] = { + "USR", + "GRP", + "LCKD", + "FLKD", + "DIRTY", + "WANT", + "INACT", + "MARKER", + 0 + }; + kdb_printf("mount 0x%p hash 0x%p gdquotp 0x%p HL_next 0x%p HL_prevp 0x%p\n", + dqp->q_mount, + dqp->q_hash, + dqp->q_gdquot, + dqp->HL_NEXT, + dqp->HL_PREVP); + kdb_printf("MPL_next 0x%p MPL_prevp 0x%p FL_next 0x%p FL_prev 0x%p\n", + dqp->MPL_NEXT, + dqp->MPL_PREVP, + dqp->dq_flnext, + dqp->dq_flprev); + + kdb_printf("nrefs 0x%x, res_bcount %d, ", + dqp->q_nrefs, (int) dqp->q_res_bcount); + printflags(dqp->dq_flags, qflags, "flags:"); + kdb_printf("\nblkno 0x%llx\tboffset 0x%x\n", + (unsigned long long) dqp->q_blkno, (int) dqp->q_bufoffset); + kdb_printf("qlock 0x%p flock 0x%p (%s) pincount 0x%x\n", + &dqp->q_qlock, + &dqp->q_flock, + (valusema(&dqp->q_flock) <= 0) ? "LCK" : "UNLKD", + dqp->q_pincount); + kdb_printf("disk-dquot 0x%p\n", &dqp->q_core); + xfsidbg_xqm_diskdq(&dqp->q_core); + +} + + +#define XQMIDBG_LIST_PRINT(l, NXT) \ +{ \ + xfs_dquot_t *dqp;\ + int i = 0; \ + kdb_printf("[#%d dquots]\n", (int) (l)->qh_nelems); \ + for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) {\ + kdb_printf( \ + "\t%d. [0x%p] \"%d (%s)\"\t blks = %d, inos = %d refs = %d\n", \ + ++i, dqp, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \ + DQFLAGTO_TYPESTR(dqp), \ + (int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \ + (int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \ + (int) dqp->q_nrefs); }\ + kdb_printf("\n"); \ +} + +static void +xfsidbg_xqm_dqattached_inos(xfs_mount_t *mp) +{ + xfs_inode_t *ip; + int n = 0; + + ip = mp->m_inodes; + do { + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + if (ip->i_udquot || ip->i_gdquot) { + n++; + kdb_printf("inode = 0x%p, ino %d: udq 0x%p, gdq 0x%p\n", + ip, (int)ip->i_ino, ip->i_udquot, ip->i_gdquot); + } + ip = ip->i_mnext; + } while (ip != mp->m_inodes); + kdb_printf("\nNumber of inodes with dquots attached: %d\n", n); +} + +#ifdef CONFIG_XFS_QUOTA +static void +xfsidbg_xqm_freelist_print(xfs_frlist_t *qlist, char *title) +{ + xfs_dquot_t *dq; + int i = 0; + kdb_printf("%s (#%d)\n", title, (int) qlist->qh_nelems); + FOREACH_DQUOT_IN_FREELIST(dq, qlist) { + kdb_printf("\t%d.\t\"%d (%s:0x%p)\"\t bcnt = %d, icnt = %d " + "refs = %d\n", + ++i, (int) INT_GET(dq->q_core.d_id, ARCH_CONVERT), + DQFLAGTO_TYPESTR(dq), dq, + (int) INT_GET(dq->q_core.d_bcount, ARCH_CONVERT), + (int) INT_GET(dq->q_core.d_icount, ARCH_CONVERT), + (int) dq->q_nrefs); + } +} + +static void +xfsidbg_xqm_freelist(void) +{ + if (xfs_Gqm) { + xfsidbg_xqm_freelist_print(&(xfs_Gqm->qm_dqfreelist), "Freelist"); + } else + kdb_printf("NULL XQM!!\n"); +} + +static void +xfsidbg_xqm_htab(void) +{ + int i; + xfs_dqhash_t *h; + + if (xfs_Gqm == NULL) { + kdb_printf("NULL XQM!!\n"); + return; + } + for (i = 0; i <= xfs_Gqm->qm_dqhashmask; i++) { + h = &xfs_Gqm->qm_usr_dqhtable[i]; + if (h->qh_next) { + kdb_printf("USR %d: ", i); + XQMIDBG_LIST_PRINT(h, HL_NEXT); + } + } + for (i = 0; i <= xfs_Gqm->qm_dqhashmask; i++) { + h = &xfs_Gqm->qm_grp_dqhtable[i]; + if (h->qh_next) { + kdb_printf("GRP %d: ", i); + XQMIDBG_LIST_PRINT(h, HL_NEXT); + } + } +} +#endif + +static void +xfsidbg_xqm_mplist(xfs_mount_t *mp) +{ + if (mp->m_quotainfo == NULL) { + kdb_printf("NULL quotainfo\n"); + return; + } + + XQMIDBG_LIST_PRINT(&(mp->m_quotainfo->qi_dqlist), MPL_NEXT); + +} + + +static void +xfsidbg_xqm_qinfo(xfs_mount_t *mp) +{ + if (mp == NULL || mp->m_quotainfo == NULL) { + kdb_printf("NULL quotainfo\n"); + return; + } + + kdb_printf("uqip 0x%p, gqip 0x%p, &pinlock 0x%p &dqlist 0x%p\n", + mp->m_quotainfo->qi_uquotaip, + mp->m_quotainfo->qi_gquotaip, + &mp->m_quotainfo->qi_pinlock, + &mp->m_quotainfo->qi_dqlist); + + kdb_printf("nreclaims %d, btmlimit 0x%x, itmlimit 0x%x, RTbtmlim 0x%x\n", + (int)mp->m_quotainfo->qi_dqreclaims, + (int)mp->m_quotainfo->qi_btimelimit, + (int)mp->m_quotainfo->qi_itimelimit, + (int)mp->m_quotainfo->qi_rtbtimelimit); + + kdb_printf("bwarnlim 0x%x, iwarnlim 0x%x, &qofflock 0x%p, " + "chunklen 0x%x, dqperchunk 0x%x\n", + (int)mp->m_quotainfo->qi_bwarnlimit, + (int)mp->m_quotainfo->qi_iwarnlimit, + &mp->m_quotainfo->qi_quotaofflock, + (int)mp->m_quotainfo->qi_dqchunklen, + (int)mp->m_quotainfo->qi_dqperchunk); +} + +static void +xfsidbg_xqm_tpdqinfo(xfs_trans_t *tp) +{ + xfs_dqtrx_t *qa, *q; + int i,j; + + kdb_printf("dqinfo 0x%p\n", tp->t_dqinfo); + if (! tp->t_dqinfo) + return; + kdb_printf("USR: \n"); + qa = tp->t_dqinfo->dqa_usrdquots; + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (qa[i].qt_dquot == NULL) + break; + q = &qa[i]; + kdb_printf( + "\"%d\"[0x%p]: bres %d, bres-used %d, bdelta %d, del-delta %d, icnt-delta %d\n", + (int) q->qt_dquot->q_core.d_id, + q->qt_dquot, + (int) q->qt_blk_res, + (int) q->qt_blk_res_used, + (int) q->qt_bcount_delta, + (int) q->qt_delbcnt_delta, + (int) q->qt_icount_delta); + } + if (j == 0) { + qa = tp->t_dqinfo->dqa_grpdquots; + kdb_printf("GRP: \n"); + } + } + +} + + + +/* + * Print xfs superblock. + */ +static void +xfsidbg_xsb(xfs_sb_t *sbp, int convert) +{ + xfs_arch_t arch=convert?ARCH_CONVERT:ARCH_NOCONVERT; + + kdb_printf(convert?"\n":"\n"); + + kdb_printf("magicnum 0x%x blocksize 0x%x dblocks %Ld rblocks %Ld\n", + INT_GET(sbp->sb_magicnum, arch), INT_GET(sbp->sb_blocksize, arch), + INT_GET(sbp->sb_dblocks, arch), INT_GET(sbp->sb_rblocks, arch)); + kdb_printf("rextents %Ld uuid %s logstart %s\n", + INT_GET(sbp->sb_rextents, arch), + xfs_fmtuuid(&sbp->sb_uuid), + xfs_fmtfsblock(INT_GET(sbp->sb_logstart, arch), NULL)); + kdb_printf("rootino %s ", + xfs_fmtino(INT_GET(sbp->sb_rootino, arch), NULL)); + kdb_printf("rbmino %s ", + xfs_fmtino(INT_GET(sbp->sb_rbmino, arch), NULL)); + kdb_printf("rsumino %s\n", + xfs_fmtino(INT_GET(sbp->sb_rsumino, arch), NULL)); + kdb_printf("rextsize 0x%x agblocks 0x%x agcount 0x%x rbmblocks 0x%x\n", + INT_GET(sbp->sb_rextsize, arch), + INT_GET(sbp->sb_agblocks, arch), + INT_GET(sbp->sb_agcount, arch), + INT_GET(sbp->sb_rbmblocks, arch)); + kdb_printf("logblocks 0x%x versionnum 0x%x sectsize 0x%x inodesize 0x%x\n", + INT_GET(sbp->sb_logblocks, arch), + INT_GET(sbp->sb_versionnum, arch), + INT_GET(sbp->sb_sectsize, arch), + INT_GET(sbp->sb_inodesize, arch)); + kdb_printf("inopblock 0x%x blocklog 0x%x sectlog 0x%x inodelog 0x%x\n", + INT_GET(sbp->sb_inopblock, arch), + INT_GET(sbp->sb_blocklog, arch), + INT_GET(sbp->sb_sectlog, arch), + INT_GET(sbp->sb_inodelog, arch)); + kdb_printf("inopblog %d agblklog %d rextslog %d inprogress %d imax_pct %d\n", + INT_GET(sbp->sb_inopblog, arch), + INT_GET(sbp->sb_agblklog, arch), + INT_GET(sbp->sb_rextslog, arch), + INT_GET(sbp->sb_inprogress, arch), + INT_GET(sbp->sb_imax_pct, arch)); + kdb_printf("icount %Lx ifree %Lx fdblocks %Lx frextents %Lx\n", + INT_GET(sbp->sb_icount, arch), + INT_GET(sbp->sb_ifree, arch), + INT_GET(sbp->sb_fdblocks, arch), + INT_GET(sbp->sb_frextents, arch)); + kdb_printf("uquotino %s ", xfs_fmtino(INT_GET(sbp->sb_uquotino, arch), NULL)); + kdb_printf("gquotino %s ", xfs_fmtino(INT_GET(sbp->sb_gquotino, arch), NULL)); + kdb_printf("qflags 0x%x flags 0x%x shared_vn %d inoaligmt %d\n", + INT_GET(sbp->sb_qflags, arch), INT_GET(sbp->sb_flags, arch), INT_GET(sbp->sb_shared_vn, arch), + INT_GET(sbp->sb_inoalignmt, arch)); + kdb_printf("unit %d width %d dirblklog %d\n", + INT_GET(sbp->sb_unit, arch), INT_GET(sbp->sb_width, arch), INT_GET(sbp->sb_dirblklog, arch)); + kdb_printf("log sunit %d\n", INT_GET(sbp->sb_logsunit, arch)); +} + + +/* + * Print out an XFS transaction structure. Print summaries for + * each of the items. + */ +static void +xfsidbg_xtp(xfs_trans_t *tp) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_log_busy_chunk_t *lbcp; + int i; + int chunk; + static char *xtp_flags[] = { + "dirty", /* 0x1 */ + "sb_dirty", /* 0x2 */ + "perm_log_res", /* 0x4 */ + "sync", /* 0x08 */ + "dq_dirty", /* 0x10 */ + 0 + }; + static char *lid_flags[] = { + "dirty", /* 0x1 */ + "pinned", /* 0x2 */ + "sync unlock", /* 0x4 */ + "buf stale", /* 0x8 */ + 0 + }; + + kdb_printf("tp 0x%p type ", tp); + switch (tp->t_type) { + case XFS_TRANS_SETATTR_NOT_SIZE: kdb_printf("SETATTR_NOT_SIZE"); break; + case XFS_TRANS_SETATTR_SIZE: kdb_printf("SETATTR_SIZE"); break; + case XFS_TRANS_INACTIVE: kdb_printf("INACTIVE"); break; + case XFS_TRANS_CREATE: kdb_printf("CREATE"); break; + case XFS_TRANS_CREATE_TRUNC: kdb_printf("CREATE_TRUNC"); break; + case XFS_TRANS_TRUNCATE_FILE: kdb_printf("TRUNCATE_FILE"); break; + case XFS_TRANS_REMOVE: kdb_printf("REMOVE"); break; + case XFS_TRANS_LINK: kdb_printf("LINK"); break; + case XFS_TRANS_RENAME: kdb_printf("RENAME"); break; + case XFS_TRANS_MKDIR: kdb_printf("MKDIR"); break; + case XFS_TRANS_RMDIR: kdb_printf("RMDIR"); break; + case XFS_TRANS_SYMLINK: kdb_printf("SYMLINK"); break; + case XFS_TRANS_SET_DMATTRS: kdb_printf("SET_DMATTRS"); break; + case XFS_TRANS_GROWFS: kdb_printf("GROWFS"); break; + case XFS_TRANS_STRAT_WRITE: kdb_printf("STRAT_WRITE"); break; + case XFS_TRANS_DIOSTRAT: kdb_printf("DIOSTRAT"); break; + case XFS_TRANS_WRITE_SYNC: kdb_printf("WRITE_SYNC"); break; + case XFS_TRANS_WRITEID: kdb_printf("WRITEID"); break; + case XFS_TRANS_ADDAFORK: kdb_printf("ADDAFORK"); break; + case XFS_TRANS_ATTRINVAL: kdb_printf("ATTRINVAL"); break; + case XFS_TRANS_ATRUNCATE: kdb_printf("ATRUNCATE"); break; + case XFS_TRANS_ATTR_SET: kdb_printf("ATTR_SET"); break; + case XFS_TRANS_ATTR_RM: kdb_printf("ATTR_RM"); break; + case XFS_TRANS_ATTR_FLAG: kdb_printf("ATTR_FLAG"); break; + case XFS_TRANS_CLEAR_AGI_BUCKET: kdb_printf("CLEAR_AGI_BUCKET"); break; + case XFS_TRANS_QM_SBCHANGE: kdb_printf("QM_SBCHANGE"); break; + case XFS_TRANS_QM_QUOTAOFF: kdb_printf("QM_QUOTAOFF"); break; + case XFS_TRANS_QM_DQALLOC: kdb_printf("QM_DQALLOC"); break; + case XFS_TRANS_QM_SETQLIM: kdb_printf("QM_SETQLIM"); break; + case XFS_TRANS_QM_DQCLUSTER: kdb_printf("QM_DQCLUSTER"); break; + case XFS_TRANS_QM_QINOCREATE: kdb_printf("QM_QINOCREATE"); break; + case XFS_TRANS_QM_QUOTAOFF_END: kdb_printf("QM_QOFF_END"); break; + case XFS_TRANS_SB_UNIT: kdb_printf("SB_UNIT"); break; + case XFS_TRANS_FSYNC_TS: kdb_printf("FSYNC_TS"); break; + case XFS_TRANS_GROWFSRT_ALLOC: kdb_printf("GROWFSRT_ALLOC"); break; + case XFS_TRANS_GROWFSRT_ZERO: kdb_printf("GROWFSRT_ZERO"); break; + case XFS_TRANS_GROWFSRT_FREE: kdb_printf("GROWFSRT_FREE"); break; + + default: kdb_printf("0x%x", tp->t_type); break; + } + kdb_printf(" mount 0x%p\n", tp->t_mountp); + kdb_printf("flags "); + printflags(tp->t_flags, xtp_flags,"xtp"); + kdb_printf("\n"); + kdb_printf("callback 0x%p forw 0x%p back 0x%p\n", + &tp->t_logcb, tp->t_forw, tp->t_back); + kdb_printf("log res %d block res %d block res used %d\n", + tp->t_log_res, tp->t_blk_res, tp->t_blk_res_used); + kdb_printf("rt res %d rt res used %d\n", tp->t_rtx_res, + tp->t_rtx_res_used); + kdb_printf("ticket 0x%lx lsn %s commit_lsn %s\n", + (unsigned long) tp->t_ticket, + xfs_fmtlsn(&tp->t_lsn), + xfs_fmtlsn(&tp->t_commit_lsn)); + kdb_printf("callback 0x%p callarg 0x%p\n", + tp->t_callback, tp->t_callarg); + kdb_printf("icount delta %ld ifree delta %ld\n", + tp->t_icount_delta, tp->t_ifree_delta); + kdb_printf("blocks delta %ld res blocks delta %ld\n", + tp->t_fdblocks_delta, tp->t_res_fdblocks_delta); + kdb_printf("rt delta %ld res rt delta %ld\n", + tp->t_frextents_delta, tp->t_res_frextents_delta); + kdb_printf("ag freeblks delta %ld ag flist delta %ld ag btree delta %ld\n", + tp->t_ag_freeblks_delta, tp->t_ag_flist_delta, + tp->t_ag_btree_delta); + kdb_printf("dblocks delta %ld agcount delta %ld imaxpct delta %ld\n", + tp->t_dblocks_delta, tp->t_agcount_delta, tp->t_imaxpct_delta); + kdb_printf("rextsize delta %ld rbmblocks delta %ld\n", + tp->t_rextsize_delta, tp->t_rbmblocks_delta); + kdb_printf("rblocks delta %ld rextents delta %ld rextslog delta %ld\n", + tp->t_rblocks_delta, tp->t_rextents_delta, + tp->t_rextslog_delta); + kdb_printf("dqinfo 0x%p\n", tp->t_dqinfo); + kdb_printf("log items:\n"); + licp = &tp->t_items; + chunk = 0; + while (licp != NULL) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + chunk++; + continue; + } + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + kdb_printf("\n"); + kdb_printf("chunk %d index %d item 0x%p size %d\n", + chunk, i, lidp->lid_item, lidp->lid_size); + kdb_printf("flags "); + printflags(lidp->lid_flags, lid_flags,"lic"); + kdb_printf("\n"); + xfsidbg_xlogitem(lidp->lid_item); + } + chunk++; + licp = licp->lic_next; + } + + kdb_printf("log busy free %d, list:\n", tp->t_busy_free); + lbcp = &tp->t_busy; + chunk = 0; + while (lbcp != NULL) { + kdb_printf("Chunk %d at 0x%p next 0x%p free 0x%08x unused %d\n", + chunk, lbcp, lbcp->lbc_next, lbcp->lbc_free, + lbcp->lbc_unused); + for (i = 0; i < XFS_LBC_NUM_SLOTS; i++) { + kdb_printf(" %02d: ag %d idx %d\n", + i, + lbcp->lbc_busy[i].lbc_ag, + lbcp->lbc_busy[i].lbc_idx); + } + lbcp = lbcp->lbc_next; + } +} + +static void +xfsidbg_xtrans_res( + xfs_mount_t *mp) +{ + xfs_trans_reservations_t *xtrp; + + xtrp = &mp->m_reservations; + kdb_printf("write: %d\ttruncate: %d\trename: %d\n", + xtrp->tr_write, xtrp->tr_itruncate, xtrp->tr_rename); + kdb_printf("link: %d\tremove: %d\tsymlink: %d\n", + xtrp->tr_link, xtrp->tr_remove, xtrp->tr_symlink); + kdb_printf("create: %d\tmkdir: %d\tifree: %d\n", + xtrp->tr_create, xtrp->tr_mkdir, xtrp->tr_ifree); + kdb_printf("ichange: %d\tgrowdata: %d\tswrite: %d\n", + xtrp->tr_ichange, xtrp->tr_growdata, xtrp->tr_swrite); + kdb_printf("addafork: %d\twriteid: %d\tattrinval: %d\n", + xtrp->tr_addafork, xtrp->tr_writeid, xtrp->tr_attrinval); + kdb_printf("attrset: %d\tattrrm: %d\tclearagi: %d\n", + xtrp->tr_attrset, xtrp->tr_attrrm, xtrp->tr_clearagi); + kdb_printf("growrtalloc: %d\tgrowrtzero: %d\tgrowrtfree: %d\n", + xtrp->tr_growrtalloc, xtrp->tr_growrtzero, xtrp->tr_growrtfree); +} + +module_init(xfsidbg_init) +module_exit(xfsidbg_exit) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_iget.c linux.22-ac2/fs/xfs/xfs_iget.c --- linux.vanilla/fs/xfs/xfs_iget.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_iget.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1009 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_utils.h" + +/* + * Initialize the inode hash table for the newly mounted file system. + * + * mp -- this is the mount point structure for the file system being + * initialized + */ +void +xfs_ihash_init(xfs_mount_t *mp) +{ + int i; + + mp->m_ihsize = XFS_BUCKETS(mp); + mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize + * sizeof(xfs_ihash_t), KM_SLEEP); + ASSERT(mp->m_ihash != NULL); + for (i = 0; i < mp->m_ihsize; i++) { + rwlock_init(&(mp->m_ihash[i].ih_lock)); + } +} + +/* + * Free up structures allocated by xfs_ihash_init, at unmount time. + */ +void +xfs_ihash_free(xfs_mount_t *mp) +{ + kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t)); + mp->m_ihash = NULL; +} + +/* + * Initialize the inode cluster hash table for the newly mounted file system. + * + * mp -- this is the mount point structure for the file system being + * initialized + */ +void +xfs_chash_init(xfs_mount_t *mp) +{ + int i; + + /* + * m_chash size is based on m_ihash + * with a minimum of 37 entries + */ + mp->m_chsize = (XFS_BUCKETS(mp)) / + (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); + if (mp->m_chsize < 37) { + mp->m_chsize = 37; + } + mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize + * sizeof(xfs_chash_t), + KM_SLEEP); + ASSERT(mp->m_chash != NULL); + + for (i = 0; i < mp->m_chsize; i++) { + spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); + } +} + +/* + * Free up structures allocated by xfs_chash_init, at unmount time. + */ +void +xfs_chash_free(xfs_mount_t *mp) +{ + int i; + + for (i = 0; i < mp->m_chsize; i++) { + spinlock_destroy(&mp->m_chash[i].ch_lock); + } + + kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t)); + mp->m_chash = NULL; +} + +/* + * Look up an inode by number in the given file system. + * The inode is looked up in the hash table for the file system + * represented by the mount point parameter mp. Each bucket of + * the hash table is guarded by an individual semaphore. + * + * If the inode is found in the hash table, its corresponding vnode + * is obtained with a call to vn_get(). This call takes care of + * coordination with the reclamation of the inode and vnode. Note + * that the vmap structure is filled in while holding the hash lock. + * This gives us the state of the inode/vnode when we found it and + * is used for coordination in vn_get(). + * + * If it is not in core, read it in from the file system's device and + * add the inode into the hash table. + * + * The inode is locked according to the value of the lock_flags parameter. + * This flag parameter indicates how and if the inode's IO lock and inode lock + * should be taken. + * + * mp -- the mount point structure for the current file system. It points + * to the inode hash table. + * tp -- a pointer to the current transaction if there is one. This is + * simply passed through to the xfs_iread() call. + * ino -- the number of the inode desired. This is the unique identifier + * within the file system for the inode being requested. + * lock_flags -- flags indicating how to lock the inode. See the comment + * for xfs_ilock() for a list of valid values. + * bno -- the block number starting the buffer containing the inode, + * if known (as by bulkstat), else 0. + */ +STATIC int +xfs_iget_core( + vnode_t *vp, + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + xfs_ihash_t *ih; + xfs_inode_t *ip; + xfs_inode_t *iq; + vnode_t *inode_vp; + ulong version; + int error; + /* REFERENCED */ + int newnode; + xfs_chash_t *ch; + xfs_chashlist_t *chl, *chlnew; + SPLDECL(s); + + + ih = XFS_IHASH(mp, ino); + +again: + read_lock(&ih->ih_lock); + + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { + if (ip->i_ino == ino) { + + inode_vp = XFS_ITOV_NULL(ip); + + if (inode_vp == NULL) { + /* If IRECLAIM is set this inode is + * on its way out of the system, + * we need to pause and try again. + */ + if (ip->i_flags & XFS_IRECLAIM) { + read_unlock(&ih->ih_lock); + delay(1); + XFS_STATS_INC(xfsstats.xs_ig_frecycle); + + goto again; + } + + vn_trace_exit(vp, "xfs_iget.alloc", + (inst_t *)__return_address); + + XFS_STATS_INC(xfsstats.xs_ig_found); + + read_unlock(&ih->ih_lock); + goto finish_inode; + + } else if (vp != inode_vp) { + struct inode *inode = LINVFS_GET_IP(inode_vp); + + /* The inode is being torn down, pause and + * try again. + */ + if (inode->i_state & (I_FREEING | I_CLEAR)) { + read_unlock(&ih->ih_lock); + delay(1); + XFS_STATS_INC(xfsstats.xs_ig_frecycle); + + goto again; + } +/* Chances are the other vnode (the one in the inode) is being torn + * down right now, and we landed on top of it. Question is, what do + * we do? Unhook the old inode and hook up the new one? + */ + cmn_err(CE_PANIC, + "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", + inode_vp, vp); + } + + read_unlock(&ih->ih_lock); + + XFS_STATS_INC(xfsstats.xs_ig_found); + +finish_inode: + if (lock_flags != 0) { + xfs_ilock(ip, lock_flags); + } + + newnode = (ip->i_d.di_mode == 0); + if (newnode) { + xfs_iocore_inode_reinit(ip); + } + + XFS_MOUNT_ILOCK(mp); + list_del_init(&ip->i_reclaim); + XFS_MOUNT_IUNLOCK(mp); + + vn_trace_exit(vp, "xfs_iget.found", + (inst_t *)__return_address); + goto return_ip; + } + } + + /* + * Inode cache miss: save the hash chain version stamp and unlock + * the chain, so we don't deadlock in vn_alloc. + */ + XFS_STATS_INC(xfsstats.xs_ig_missed); + + version = ih->ih_version; + + read_unlock(&ih->ih_lock); + + /* + * Read the disk inode attributes into a new inode structure and get + * a new vnode for it. This should also initialize i_ino and i_mount. + */ + error = xfs_iread(mp, tp, ino, &ip, bno); + if (error) { + return error; + } + + vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address); + + xfs_inode_lock_init(ip, vp); + xfs_iocore_inode_init(ip); + + if (lock_flags != 0) { + xfs_ilock(ip, lock_flags); + } + + /* + * Put ip on its hash chain, unless someone else hashed a duplicate + * after we released the hash lock. + */ + write_lock(&ih->ih_lock); + + if (ih->ih_version != version) { + for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) { + if (iq->i_ino == ino) { + write_unlock(&ih->ih_lock); + xfs_idestroy(ip); + + XFS_STATS_INC(xfsstats.xs_ig_dup); + goto again; + } + } + } + + /* + * These values _must_ be set before releasing ihlock! + */ + ip->i_hash = ih; + if ((iq = ih->ih_next)) { + iq->i_prevp = &ip->i_next; + } + ip->i_next = iq; + ip->i_prevp = &ih->ih_next; + ih->ih_next = ip; + ip->i_udquot = ip->i_gdquot = NULL; + ih->ih_version++; + + write_unlock(&ih->ih_lock); + + /* + * put ip on its cluster's hash chain + */ + ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL && + ip->i_cnext == NULL); + + chlnew = NULL; + ch = XFS_CHASH(mp, ip->i_blkno); + chlredo: + s = mutex_spinlock(&ch->ch_lock); + for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) { + if (chl->chl_blkno == ip->i_blkno) { + + /* insert this inode into the doubly-linked list + * where chl points */ + if ((iq = chl->chl_ip)) { + ip->i_cprev = iq->i_cprev; + iq->i_cprev->i_cnext = ip; + iq->i_cprev = ip; + ip->i_cnext = iq; + } else { + ip->i_cnext = ip; + ip->i_cprev = ip; + } + chl->chl_ip = ip; + ip->i_chash = chl; + break; + } + } + + /* no hash list found for this block; add a new hash list */ + if (chl == NULL) { + if (chlnew == NULL) { + mutex_spinunlock(&ch->ch_lock, s); + ASSERT(xfs_chashlist_zone != NULL); + chlnew = (xfs_chashlist_t *) + kmem_zone_alloc(xfs_chashlist_zone, + KM_SLEEP); + ASSERT(chlnew != NULL); + goto chlredo; + } else { + ip->i_cnext = ip; + ip->i_cprev = ip; + ip->i_chash = chlnew; + chlnew->chl_ip = ip; + chlnew->chl_blkno = ip->i_blkno; + chlnew->chl_next = ch->ch_list; + ch->ch_list = chlnew; + chlnew = NULL; + } + } else { + if (chlnew != NULL) { + kmem_zone_free(xfs_chashlist_zone, chlnew); + } + } + + mutex_spinunlock(&ch->ch_lock, s); + + + /* + * Link ip to its mount and thread it on the mount's inode list. + */ + XFS_MOUNT_ILOCK(mp); + if ((iq = mp->m_inodes)) { + ASSERT(iq->i_mprev->i_mnext == iq); + ip->i_mprev = iq->i_mprev; + iq->i_mprev->i_mnext = ip; + iq->i_mprev = ip; + ip->i_mnext = iq; + } else { + ip->i_mnext = ip; + ip->i_mprev = ip; + } + mp->m_inodes = ip; + + XFS_MOUNT_IUNLOCK(mp); + + newnode = 1; + + return_ip: + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); + + ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == + ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); + + *ipp = ip; + + /* + * If we have a real type for an on-disk inode, we can set ops(&unlock) + * now. If it's a new inode being created, xfs_ialloc will handle it. + */ + VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1); + + return 0; +} + + +/* + * The 'normal' internal xfs_iget, if needed it will + * 'allocate', or 'get', the vnode. + */ +int +xfs_iget( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + struct inode *inode; + vnode_t *vp = NULL; + int error; + +retry: + XFS_STATS_INC(xfsstats.xs_ig_attempts); + + if ((inode = VFS_GET_INODE(XFS_MTOVFS(mp), ino, 0))) { + bhv_desc_t *bdp; + xfs_inode_t *ip; + int newnode; + + vp = LINVFS_GET_VP(inode); + if (inode->i_state & I_NEW) { +inode_allocate: + vn_initialize(inode); + error = xfs_iget_core(vp, mp, tp, ino, + lock_flags, ipp, bno); + if (error) { + remove_inode_hash(inode); + make_bad_inode(inode); + if (inode->i_state & I_NEW) + unlock_new_inode(inode); + iput(inode); + } + } else { + /* These are true if the inode is in inactive or + * reclaim. The linux inode is about to go away, + * wait for that path to finish, and try again. + */ + if (vp->v_flag & (VINACT | VRECLM)) { + vn_wait(vp); + iput(inode); + goto retry; + } + + if (is_bad_inode(inode)) { + iput(inode); + return EIO; + } + + bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); + if (bdp == NULL) { + XFS_STATS_INC(xfsstats.xs_ig_dup); + goto inode_allocate; + } + ip = XFS_BHVTOI(bdp); + if (lock_flags != 0) + xfs_ilock(ip, lock_flags); + newnode = (ip->i_d.di_mode == 0); + if (newnode) + xfs_iocore_inode_reinit(ip); + XFS_STATS_INC(xfsstats.xs_ig_found); + *ipp = ip; + error = 0; + } + } else + error = ENOMEM; /* If we got no inode we are out of memory */ + + return error; +} + +/* + * Do the setup for the various locks within the incore inode. + */ +void +xfs_inode_lock_init( + xfs_inode_t *ip, + vnode_t *vp) +{ + mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, + "xfsino", (long)vp->v_number); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number); +#ifdef NOTYET + mutex_init(&ip->i_range_lock.r_spinlock, MUTEX_SPIN, "xrange"); +#endif /* NOTYET */ + init_waitqueue_head(&ip->i_ipin_wait); + atomic_set(&ip->i_pincount, 0); + init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number); +} + +/* + * Look for the inode corresponding to the given ino in the hash table. + * If it is there and its i_transp pointer matches tp, return it. + * Otherwise, return NULL. + */ +xfs_inode_t * +xfs_inode_incore(xfs_mount_t *mp, + xfs_ino_t ino, + xfs_trans_t *tp) +{ + xfs_ihash_t *ih; + xfs_inode_t *ip; + + ih = XFS_IHASH(mp, ino); + read_lock(&ih->ih_lock); + for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { + if (ip->i_ino == ino) { + /* + * If we find it and tp matches, return it. + * Otherwise break from the loop and return + * NULL. + */ + if (ip->i_transp == tp) { + read_unlock(&ih->ih_lock); + return (ip); + } + break; + } + } + read_unlock(&ih->ih_lock); + return (NULL); +} + +/* + * Decrement reference count of an inode structure and unlock it. + * + * ip -- the inode being released + * lock_flags -- this parameter indicates the inode's locks to be + * to be released. See the comment on xfs_iunlock() for a list + * of valid values. + */ +void +xfs_iput(xfs_inode_t *ip, + uint lock_flags) +{ + vnode_t *vp = XFS_ITOV(ip); + + vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address); + + xfs_iunlock(ip, lock_flags); + + VN_RELE(vp); +} + +/* + * Special iput for brand-new inodes that are still locked + */ +void +xfs_iput_new(xfs_inode_t *ip, + uint lock_flags) +{ + vnode_t *vp = XFS_ITOV(ip); + struct inode *inode = LINVFS_GET_IP(vp); + + vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); + + /* We shouldn't get here without this being true, but just in case */ + if (inode->i_state & I_NEW) { + remove_inode_hash(inode); + make_bad_inode(inode); + unlock_new_inode(inode); + } + if (lock_flags) + xfs_iunlock(ip, lock_flags); + VN_RELE(vp); +} + + +/* + * This routine embodies the part of the reclaim code that pulls + * the inode from the inode hash table and the mount structure's + * inode list. + * This should only be called from xfs_reclaim(). + */ +void +xfs_ireclaim(xfs_inode_t *ip) +{ + vnode_t *vp; + + /* + * Remove from old hash list and mount list. + */ + XFS_STATS_INC(xfsstats.xs_ig_reclaims); + + xfs_iextract(ip); + + /* + * Here we do a spurious inode lock in order to coordinate with + * xfs_sync(). This is because xfs_sync() references the inodes + * in the mount list without taking references on the corresponding + * vnodes. We make that OK here by ensuring that we wait until + * the inode is unlocked in xfs_sync() before we go ahead and + * free it. We get both the regular lock and the io lock because + * the xfs_sync() code may need to drop the regular one but will + * still hold the io lock. + */ + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + /* + * Release dquots (and their references) if any. An inode may escape + * xfs_inactive and get here via vn_alloc->vn_reclaim path. + */ + XFS_QM_DQDETACH(ip->i_mount, ip); + + /* + * Pull our behavior descriptor from the vnode chain. + */ + vp = XFS_ITOV_NULL(ip); + if (vp) { + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); + } + + /* + * Free all memory associated with the inode. + */ + xfs_idestroy(ip); +} + +/* + * This routine removes an about-to-be-destroyed inode from + * all of the lists in which it is located with the exception + * of the behavior chain. + */ +void +xfs_iextract( + xfs_inode_t *ip) +{ + xfs_ihash_t *ih; + xfs_inode_t *iq; + xfs_mount_t *mp; + xfs_chash_t *ch; + xfs_chashlist_t *chl, *chm; + SPLDECL(s); + + ih = ip->i_hash; + write_lock(&ih->ih_lock); + if ((iq = ip->i_next)) { + iq->i_prevp = ip->i_prevp; + } + *ip->i_prevp = iq; + write_unlock(&ih->ih_lock); + + /* + * Remove from cluster hash list + * 1) delete the chashlist if this is the last inode on the chashlist + * 2) unchain from list of inodes + * 3) point chashlist->chl_ip to 'chl_next' if to this inode. + */ + mp = ip->i_mount; + ch = XFS_CHASH(mp, ip->i_blkno); + s = mutex_spinlock(&ch->ch_lock); + + if (ip->i_cnext == ip) { + /* Last inode on chashlist */ + ASSERT(ip->i_cnext == ip && ip->i_cprev == ip); + ASSERT(ip->i_chash != NULL); + chm=NULL; + for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) { + if (chl->chl_blkno == ip->i_blkno) { + if (chm == NULL) { + /* first item on the list */ + ch->ch_list = chl->chl_next; + } else { + chm->chl_next = chl->chl_next; + } + kmem_zone_free(xfs_chashlist_zone, chl); + break; + } else { + ASSERT(chl->chl_ip != ip); + chm = chl; + } + } + ASSERT_ALWAYS(chl != NULL); + } else { + /* delete one inode from a non-empty list */ + iq = ip->i_cnext; + iq->i_cprev = ip->i_cprev; + ip->i_cprev->i_cnext = iq; + if (ip->i_chash->chl_ip == ip) { + ip->i_chash->chl_ip = iq; + } + ip->i_chash = __return_address; + ip->i_cprev = __return_address; + ip->i_cnext = __return_address; + } + mutex_spinunlock(&ch->ch_lock, s); + + /* + * Remove from mount's inode list. + */ + XFS_MOUNT_ILOCK(mp); + ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL)); + iq = ip->i_mnext; + iq->i_mprev = ip->i_mprev; + ip->i_mprev->i_mnext = iq; + + /* + * Fix up the head pointer if it points to the inode being deleted. + */ + if (mp->m_inodes == ip) { + if (ip == iq) { + mp->m_inodes = NULL; + } else { + mp->m_inodes = iq; + } + } + + /* Deal with the deleted inodes list */ + list_del_init(&ip->i_reclaim); + + mp->m_ireclaims++; + XFS_MOUNT_IUNLOCK(mp); +} + +/* + * This is a wrapper routine around the xfs_ilock() routine + * used to centralize some grungy code. It is used in places + * that wish to lock the inode solely for reading the extents. + * The reason these places can't just call xfs_ilock(SHARED) + * is that the inode lock also guards to bringing in of the + * extents from disk for a file in b-tree format. If the inode + * is in b-tree format, then we need to lock the inode exclusively + * until the extents are read in. Locking it exclusively all + * the time would limit our parallelism unnecessarily, though. + * What we do instead is check to see if the extents have been + * read in yet, and only lock the inode exclusively if they + * have not. + * + * The function returns a value which should be given to the + * corresponding xfs_iunlock_map_shared(). This value is + * the mode in which the lock was actually taken. + */ +uint +xfs_ilock_map_shared( + xfs_inode_t *ip) +{ + uint lock_mode; + + if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && + ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { + lock_mode = XFS_ILOCK_EXCL; + } else { + lock_mode = XFS_ILOCK_SHARED; + } + + xfs_ilock(ip, lock_mode); + + return lock_mode; +} + +/* + * This is simply the unlock routine to go with xfs_ilock_map_shared(). + * All it does is call xfs_iunlock() with the given lock_mode. + */ +void +xfs_iunlock_map_shared( + xfs_inode_t *ip, + unsigned int lock_mode) +{ + xfs_iunlock(ip, lock_mode); +} + +/* + * The xfs inode contains 2 locks: a multi-reader lock called the + * i_iolock and a multi-reader lock called the i_lock. This routine + * allows either or both of the locks to be obtained. + * + * The 2 locks should always be ordered so that the IO lock is + * obtained first in order to prevent deadlock. + * + * ip -- the inode being locked + * lock_flags -- this parameter indicates the inode's locks + * to be locked. It can be: + * XFS_IOLOCK_SHARED, + * XFS_IOLOCK_EXCL, + * XFS_ILOCK_SHARED, + * XFS_ILOCK_EXCL, + * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, + * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, + * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, + * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL + */ +void +xfs_ilock(xfs_inode_t *ip, + uint lock_flags) +{ + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + + if (lock_flags & XFS_IOLOCK_EXCL) { + mrupdate(&ip->i_iolock); + } else if (lock_flags & XFS_IOLOCK_SHARED) { + mraccess(&ip->i_iolock); + } + if (lock_flags & XFS_ILOCK_EXCL) { + mrupdate(&ip->i_lock); + } else if (lock_flags & XFS_ILOCK_SHARED) { + mraccess(&ip->i_lock); + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)return_address); +#endif +} + +/* + * This is just like xfs_ilock(), except that the caller + * is guaranteed not to sleep. It returns 1 if it gets + * the requested locks and 0 otherwise. If the IO lock is + * obtained but the inode lock cannot be, then the IO lock + * is dropped before returning. + * + * ip -- the inode being locked + * lock_flags -- this parameter indicates the inode's locks to be + * to be locked. See the comment for xfs_ilock() for a list + * of valid values. + * + */ +int +xfs_ilock_nowait(xfs_inode_t *ip, + uint lock_flags) +{ + int iolocked; + int ilocked; + + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + + iolocked = 0; + if (lock_flags & XFS_IOLOCK_EXCL) { + iolocked = mrtryupdate(&ip->i_iolock); + if (!iolocked) { + return 0; + } + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iolocked = mrtryaccess(&ip->i_iolock); + if (!iolocked) { + return 0; + } + } + if (lock_flags & XFS_ILOCK_EXCL) { + ilocked = mrtryupdate(&ip->i_lock); + if (!ilocked) { + if (iolocked) { + mrunlock(&ip->i_iolock); + } + return 0; + } + } else if (lock_flags & XFS_ILOCK_SHARED) { + ilocked = mrtryaccess(&ip->i_lock); + if (!ilocked) { + if (iolocked) { + mrunlock(&ip->i_iolock); + } + return 0; + } + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); +#endif + return 1; +} + +/* + * xfs_iunlock() is used to drop the inode locks acquired with + * xfs_ilock() and xfs_ilock_nowait(). The caller must pass + * in the flags given to xfs_ilock() or xfs_ilock_nowait() so + * that we know which locks to drop. + * + * ip -- the inode being unlocked + * lock_flags -- this parameter indicates the inode's locks to be + * to be unlocked. See the comment for xfs_ilock() for a list + * of valid values for this parameter. + * + */ +void +xfs_iunlock(xfs_inode_t *ip, + uint lock_flags) +{ + /* + * You can't set both SHARED and EXCL for the same lock, + * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, + * and XFS_ILOCK_EXCL are valid values to set in lock_flags. + */ + ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != + (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); + ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != + (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0); + ASSERT(lock_flags != 0); + + if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { + ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) || + (ismrlocked(&ip->i_iolock, MR_ACCESS))); + ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) || + (ismrlocked(&ip->i_iolock, MR_UPDATE))); + mrunlock(&ip->i_iolock); + } + + if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) { + ASSERT(!(lock_flags & XFS_ILOCK_SHARED) || + (ismrlocked(&ip->i_lock, MR_ACCESS))); + ASSERT(!(lock_flags & XFS_ILOCK_EXCL) || + (ismrlocked(&ip->i_lock, MR_UPDATE))); + mrunlock(&ip->i_lock); + + /* + * Let the AIL know that this item has been unlocked in case + * it is in the AIL and anyone is waiting on it. Don't do + * this if the caller has asked us not to. + */ + if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) && + ip->i_itemp != NULL) { + xfs_trans_unlocked_item(ip->i_mount, + (xfs_log_item_t*)(ip->i_itemp)); + } + } +#ifdef XFS_ILOCK_TRACE + xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); +#endif +} + +/* + * give up write locks. the i/o lock cannot be held nested + * if it is being demoted. + */ +void +xfs_ilock_demote(xfs_inode_t *ip, + uint lock_flags) +{ + ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); + ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); + + if (lock_flags & XFS_ILOCK_EXCL) { + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + mrdemote(&ip->i_lock); + } + if (lock_flags & XFS_IOLOCK_EXCL) { + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + mrdemote(&ip->i_iolock); + } +} + +/* + * The following three routines simply manage the i_flock + * semaphore embedded in the inode. This semaphore synchronizes + * processes attempting to flush the in-core inode back to disk. + */ +void +xfs_iflock(xfs_inode_t *ip) +{ + psema(&(ip->i_flock), PINOD|PLTWAIT); +} + +int +xfs_iflock_nowait(xfs_inode_t *ip) +{ + return (cpsema(&(ip->i_flock))); +} + +void +xfs_ifunlock(xfs_inode_t *ip) +{ + ASSERT(valusema(&(ip->i_flock)) <= 0); + vsema(&(ip->i_flock)); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_imap.h linux.22-ac2/fs/xfs/xfs_imap.h --- linux.vanilla/fs/xfs/xfs_imap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_imap.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_IMAP_H__ +#define __XFS_IMAP_H__ + +/* + * This is the structure passed to xfs_imap() to map + * an inode number to its on disk location. + */ +typedef struct xfs_imap { + xfs_daddr_t im_blkno; /* starting BB of inode chunk */ + uint im_len; /* length in BBs of inode chunk */ + xfs_agblock_t im_agblkno; /* logical block of inode chunk in ag */ + ushort im_ioffset; /* inode offset in block in "inodes" */ + ushort im_boffset; /* inode offset in block in bytes */ +} xfs_imap_t; + +#ifdef __KERNEL__ +struct xfs_mount; +struct xfs_trans; +int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_imap_t *, uint); +#endif + +#endif /* __XFS_IMAP_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_inode.c linux.22-ac2/fs/xfs/xfs_inode.c --- linux.vanilla/fs/xfs/xfs_inode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_inode.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,3661 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_imap.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_buf_item.h" +#include "xfs_rw.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_utils.h" +#include "xfs_dir2_trace.h" +#include "xfs_quota.h" +#include "xfs_mac.h" +#include "xfs_acl.h" + + +kmem_zone_t *xfs_ifork_zone; +kmem_zone_t *xfs_inode_zone; +kmem_zone_t *xfs_chashlist_zone; + +/* + * Used in xfs_itruncate(). This is the maximum number of extents + * freed from a file in a single transaction. + */ +#define XFS_ITRUNC_MAX_EXTENTS 2 + +STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); +STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); +STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); +STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); + + +#ifdef DEBUG +/* + * Make sure that the extents in the given memory buffer + * are valid. + */ +STATIC void +xfs_validate_extents( + xfs_bmbt_rec_t *ep, + int nrecs, + int disk, + xfs_exntfmt_t fmt) +{ + xfs_bmbt_irec_t irec; + xfs_bmbt_rec_t rec; + int i; + + for (i = 0; i < nrecs; i++) { + rec.l0 = get_unaligned((__uint64_t*)&ep->l0); + rec.l1 = get_unaligned((__uint64_t*)&ep->l1); + if (disk) + xfs_bmbt_disk_get_all(&rec, &irec); + else + xfs_bmbt_get_all(&rec, &irec); + if (fmt == XFS_EXTFMT_NOSTATE) + ASSERT(irec.br_state == XFS_EXT_NORM); + ep++; + } +} +#else /* DEBUG */ +#define xfs_validate_extents(ep, nrecs, disk, fmt) +#endif /* DEBUG */ + +/* + * Check that none of the inode's in the buffer have a next + * unlinked field of 0. + */ +#if defined(DEBUG) +void +xfs_inobp_check( + xfs_mount_t *mp, + xfs_buf_t *bp) +{ + int i; + int j; + xfs_dinode_t *dip; + + j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; + + for (i = 0; i < j; i++) { + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + i * mp->m_sb.sb_inodesize); + if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", + bp); + ASSERT(!INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)); + } + } +} +#endif + +/* + * called from bwrite on xfs inode buffers + */ +void +xfs_inobp_bwcheck(xfs_buf_t *bp) +{ + xfs_mount_t *mp; + int i; + int j; + xfs_dinode_t *dip; + + ASSERT(XFS_BUF_FSPRIVATE3(bp, void *) != NULL); + + mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); + + + j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; + + for (i = 0; i < j; i++) { + dip = (xfs_dinode_t *) xfs_buf_offset(bp, + i * mp->m_sb.sb_inodesize); + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) { + cmn_err(CE_WARN, +"Bad magic # 0x%x in XFS inode buffer 0x%Lx, starting blockno %Ld, offset 0x%x", + INT_GET(dip->di_core.di_magic, ARCH_CONVERT), + (__uint64_t)(__psunsigned_t) bp, + (__int64_t) XFS_BUF_ADDR(bp), + xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); + xfs_fs_cmn_err(CE_WARN, mp, + "corrupt, unmount and run xfs_repair"); + } + if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) { + cmn_err(CE_WARN, +"Bad next_unlinked field (0) in XFS inode buffer 0x%p, starting blockno %Ld, offset 0x%x", + (__uint64_t)(__psunsigned_t) bp, + (__int64_t) XFS_BUF_ADDR(bp), + xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); + xfs_fs_cmn_err(CE_WARN, mp, + "corrupt, unmount and run xfs_repair"); + } + } + + return; +} + +/* + * This routine is called to map an inode number within a file + * system to the buffer containing the on-disk version of the + * inode. It returns a pointer to the buffer containing the + * on-disk inode in the bpp parameter, and in the dip parameter + * it returns a pointer to the on-disk inode within that buffer. + * + * If a non-zero error is returned, then the contents of bpp and + * dipp are undefined. + * + * Use xfs_imap() to determine the size and location of the + * buffer to read from disk. + */ +int +xfs_inotobp( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_dinode_t **dipp, + xfs_buf_t **bpp, + int *offset) +{ + int di_ok; + xfs_imap_t imap; + xfs_buf_t *bp; + int error; + xfs_dinode_t *dip; + + /* + * Call the space managment code to find the location of the + * inode on disk. + */ + imap.im_blkno = 0; + error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); + if (error != 0) { + cmn_err(CE_WARN, + "xfs_inotobp: xfs_imap() returned an " + "error %d on %s. Returning error.", error, mp->m_fsname); + return error; + } + + /* + * If the inode number maps to a block outside the bounds of the + * file system then return NULL rather than calling read_buf + * and panicing when we get an error from the driver. + */ + if ((imap.im_blkno + imap.im_len) > + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { + cmn_err(CE_WARN, + "xfs_inotobp: inode number (%d + %d) maps to a block outside the bounds " + "of the file system %s. Returning EINVAL.", + imap.im_blkno, imap.im_len,mp->m_fsname); + return XFS_ERROR(EINVAL); + } + + /* + * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will + * default to just a read_buf() call. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, + (int)imap.im_len, XFS_BUF_LOCK, &bp); + + if (error) { + cmn_err(CE_WARN, + "xfs_inotobp: xfs_trans_read_buf() returned an " + "error %d on %s. Returning error.", error, mp->m_fsname); + return error; + } + dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0); + di_ok = + INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && + XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { + XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip); + xfs_trans_brelse(tp, bp); + cmn_err(CE_WARN, + "xfs_inotobp: XFS_TEST_ERROR() returned an " + "error on %s. Returning EFSCORRUPTED.", mp->m_fsname); + return XFS_ERROR(EFSCORRUPTED); + } + + xfs_inobp_check(mp, bp); + + /* + * Set *dipp to point to the on-disk inode in the buffer. + */ + *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + *bpp = bp; + *offset = imap.im_boffset; + return 0; +} + + +/* + * This routine is called to map an inode to the buffer containing + * the on-disk version of the inode. It returns a pointer to the + * buffer containing the on-disk inode in the bpp parameter, and in + * the dip parameter it returns a pointer to the on-disk inode within + * that buffer. + * + * If a non-zero error is returned, then the contents of bpp and + * dipp are undefined. + * + * If the inode is new and has not yet been initialized, use xfs_imap() + * to determine the size and location of the buffer to read from disk. + * If the inode has already been mapped to its buffer and read in once, + * then use the mapping information stored in the inode rather than + * calling xfs_imap(). This allows us to avoid the overhead of looking + * at the inode btree for small block file systems (see xfs_dilocate()). + * We can tell whether the inode has been mapped in before by comparing + * its disk block address to 0. Only uninitialized inodes will have + * 0 for the disk block address. + */ +int +xfs_itobp( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dinode_t **dipp, + xfs_buf_t **bpp, + xfs_daddr_t bno) +{ + xfs_buf_t *bp; + int error; + xfs_imap_t imap; +#ifdef __KERNEL__ + int i; + int ni; +#endif + + if (ip->i_blkno == (xfs_daddr_t)0) { + /* + * Call the space management code to find the location of the + * inode on disk. + */ + imap.im_blkno = bno; + error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); + if (error != 0) { + return error; + } + + /* + * If the inode number maps to a block outside the bounds + * of the file system then return NULL rather than calling + * read_buf and panicing when we get an error from the + * driver. + */ + if ((imap.im_blkno + imap.im_len) > + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " + "(imap.im_blkno (0x%llx) " + "+ imap.im_len (0x%llx)) > " + " XFS_FSB_TO_BB(mp, " + "mp->m_sb.sb_dblocks) (0x%llx)", + (unsigned long long) imap.im_blkno, + (unsigned long long) imap.im_len, + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + + /* + * Fill in the fields in the inode that will be used to + * map the inode to its buffer from now on. + */ + ip->i_blkno = imap.im_blkno; + ip->i_len = imap.im_len; + ip->i_boffset = imap.im_boffset; + } else { + /* + * We've already mapped the inode once, so just use the + * mapping that we saved the first time. + */ + imap.im_blkno = ip->i_blkno; + imap.im_len = ip->i_len; + imap.im_boffset = ip->i_boffset; + } + ASSERT(bno == 0 || bno == imap.im_blkno); + + /* + * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will + * default to just a read_buf() call. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, + (int)imap.im_len, XFS_BUF_LOCK, &bp); + + if (error) { +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " + "xfs_trans_read_buf() returned error %d, " + "imap.im_blkno 0x%llx, imap.im_len 0x%llx", + error, (unsigned long long) imap.im_blkno, + (unsigned long long) imap.im_len); +#endif /* DEBUG */ + return error; + } +#ifdef __KERNEL__ + /* + * Validate the magic number and version of every inode in the buffer + * (if DEBUG kernel) or the first inode in the buffer, otherwise. + */ +#ifdef DEBUG + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; +#else + ni = 1; +#endif + for (i = 0; i < ni; i++) { + int di_ok; + xfs_dinode_t *dip; + + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + (i << mp->m_sb.sb_inodelog)); + di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && + XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { +#ifdef DEBUG + prdev("bad inode magic/vsn daddr 0x%llx #%d (magic=%x)", + mp->m_dev, (unsigned long long)imap.im_blkno, i, + INT_GET(dip->di_core.di_magic, ARCH_CONVERT)); +#endif + XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_LOW, + mp, dip); + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EFSCORRUPTED); + } + } +#endif /* __KERNEL__ */ + + xfs_inobp_check(mp, bp); + + /* + * Mark the buffer as an inode buffer now that it looks good + */ + XFS_BUF_SET_VTYPE(bp, B_FS_INO); + + /* + * Set *dipp to point to the on-disk inode in the buffer. + */ + *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + *bpp = bp; + return 0; +} + +/* + * Move inode type and inode format specific information from the + * on-disk inode to the in-core inode. For fifos, devs, and sockets + * this means set if_rdev to the proper value. For files, directories, + * and symlinks this means to bring in the in-line data or extent + * pointers. For a file in B-tree format, only the root is immediately + * brought in-core. The rest will be in-lined in if_extents when it + * is first referenced (see xfs_iread_extents()). + */ +STATIC int +xfs_iformat( + xfs_inode_t *ip, + xfs_dinode_t *dip) +{ + xfs_attr_shortform_t *atp; + int size; + int error; + xfs_fsize_t di_size; + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + error = 0; + + if (unlikely( + INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) > + INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, extent total = %d, nblocks = %Lu." + " Unmount and run xfs_repair.", + (unsigned long long)ip->i_ino, + (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)), + (unsigned long long) + INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT)); + XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt dinode %Lu, forkoff = 0x%x." + " Unmount and run xfs_repair.", + (unsigned long long)ip->i_ino, + (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT))); + XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + switch (ip->i_d.di_mode & IFMT) { + case IFIFO: + case IFCHR: + case IFBLK: + case IFSOCK: + if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) { + XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + ip->i_d.di_size = 0; + ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT); + break; + + case IFREG: + case IFLNK: + case IFDIR: + switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) { + case XFS_DINODE_FMT_LOCAL: + /* + * no local regular files yet + */ + if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & IFMT) == IFREG)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_CORRUPTION_ERROR("xfs_iformat(4)", + XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT); + if (unlikely(di_size > + XFS_DFORK_DSIZE_ARCH(dip, ip->i_mount, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, + (long long) di_size); + XFS_CORRUPTION_ERROR("xfs_iformat(5)", + XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + size = (int)di_size; + error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); + break; + default: + XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + break; + + default: + XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + if (error) { + return error; + } + if (!XFS_DFORK_Q_ARCH(dip, ARCH_CONVERT)) + return 0; + ASSERT(ip->i_afp == NULL); + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); + ip->i_afp->if_ext_max = + XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { + case XFS_DINODE_FMT_LOCAL: + atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR_ARCH(dip, ARCH_CONVERT); + size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); + error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); + break; + case XFS_DINODE_FMT_EXTENTS: + error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); + break; + case XFS_DINODE_FMT_BTREE: + error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); + break; + default: + error = XFS_ERROR(EFSCORRUPTED); + break; + } + if (error) { + kmem_zone_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + xfs_idestroy_fork(ip, XFS_DATA_FORK); + } + return error; +} + +/* + * The file is in-lined in the on-disk inode. + * If it fits into if_inline_data, then copy + * it there, otherwise allocate a buffer for it + * and copy the data there. Either way, set + * if_data to point at the data. + * If we allocate a buffer for the data, make + * sure that its size is a multiple of 4 and + * record the real size in i_real_bytes. + */ +STATIC int +xfs_iformat_local( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork, + int size) +{ + xfs_ifork_t *ifp; + int real_size; + + /* + * If the size is unreasonable, then something + * is wrong and we just bail out rather than crash in + * kmem_alloc() or memcpy() below. + */ + if (unlikely(size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, size, + XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT)); + XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + ifp = XFS_IFORK_PTR(ip, whichfork); + real_size = 0; + if (size == 0) + ifp->if_u1.if_data = NULL; + else if (size <= sizeof(ifp->if_u2.if_inline_data)) + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + else { + real_size = roundup(size, 4); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + } + ifp->if_bytes = size; + ifp->if_real_bytes = real_size; + if (size) + memcpy(ifp->if_u1.if_data, + XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT), size); + ifp->if_flags &= ~XFS_IFEXTENTS; + ifp->if_flags |= XFS_IFINLINE; + return 0; +} + +/* + * The file consists of a set of extents all + * of which fit into the on-disk inode. + * If there are few enough extents to fit into + * the if_inline_ext, then copy them there. + * Otherwise allocate a buffer for them and copy + * them into it. Either way, set if_extents + * to point at the extents. + */ +STATIC int +xfs_iformat_extents( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork) +{ + xfs_bmbt_rec_t *ep, *dp; + xfs_ifork_t *ifp; + int nex; + int real_size; + int size; + int i; + + ifp = XFS_IFORK_PTR(ip, whichfork); + nex = XFS_DFORK_NEXTENTS_ARCH(dip, whichfork, ARCH_CONVERT); + size = nex * (uint)sizeof(xfs_bmbt_rec_t); + + /* + * If the number of extents is unreasonable, then something + * is wrong and we just bail out rather than crash in + * kmem_alloc() or memcpy() below. + */ + if (unlikely(size < 0 || size > XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT))) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu ((a)extents = %d). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino, nex); + XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, + ip->i_mount, dip); + return XFS_ERROR(EFSCORRUPTED); + } + + real_size = 0; + if (nex == 0) + ifp->if_u1.if_extents = NULL; + else if (nex <= XFS_INLINE_EXTS) + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + else { + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); + real_size = size; + } + ifp->if_bytes = size; + ifp->if_real_bytes = real_size; + if (size) { + dp = (xfs_bmbt_rec_t *) + XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); + ep = ifp->if_u1.if_extents; + for (i = 0; i < nex; i++, ep++, dp++) { + ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), + ARCH_CONVERT); + ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), + ARCH_CONVERT); + } + xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex, + whichfork); + if (whichfork != XFS_DATA_FORK || + XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) + if (unlikely(xfs_check_nostate_extents( + ifp->if_u1.if_extents, nex))) { + XFS_ERROR_REPORT("xfs_iformat_extents(2)", + XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + } + ifp->if_flags |= XFS_IFEXTENTS; + return 0; +} + +/* + * The file has too many extents to fit into + * the inode, so they are in B-tree format. + * Allocate a buffer for the root of the B-tree + * and copy the root into it. The i_extents + * field will remain NULL until all of the + * extents are read in (when they are needed). + */ +STATIC int +xfs_iformat_btree( + xfs_inode_t *ip, + xfs_dinode_t *dip, + int whichfork) +{ + xfs_bmdr_block_t *dfp; + xfs_ifork_t *ifp; + /* REFERENCED */ + int nrecs; + int size; + + ifp = XFS_IFORK_PTR(ip, whichfork); + dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + size = XFS_BMAP_BROOT_SPACE(dfp); + nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); + + /* + * blow out if -- fork has less extents than can fit in + * fork (fork shouldn't be a btree format), root btree + * block has more records than can fit into the fork, + * or the number of extents is greater than the number of + * blocks. + */ + if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max + || XFS_BMDR_SPACE_CALC(nrecs) > + XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT) + || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "corrupt inode %Lu (btree). Unmount and run xfs_repair.", + (unsigned long long) ip->i_ino); + XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + + ifp->if_broot_bytes = size; + ifp->if_broot = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_broot != NULL); + /* + * Copy and convert from the on-disk structure + * to the in-memory structure. + */ + xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE_ARCH(dip, ip->i_mount, whichfork, ARCH_CONVERT), + ifp->if_broot, size); + ifp->if_flags &= ~XFS_IFEXTENTS; + ifp->if_flags |= XFS_IFBROOT; + + return 0; +} + +/* + * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk + * and native format + * + * buf = on-disk representation + * dip = native representation + * dir = direction - +ve -> disk to native + * -ve -> native to disk + * arch = on-disk architecture + */ +void +xfs_xlate_dinode_core( + xfs_caddr_t buf, + xfs_dinode_core_t *dip, + int dir, + xfs_arch_t arch) +{ + xfs_dinode_core_t *buf_core = (xfs_dinode_core_t *)buf; + xfs_dinode_core_t *mem_core = (xfs_dinode_core_t *)dip; + + ASSERT(dir); + if (arch == ARCH_NOCONVERT) { + if (dir > 0) { + memcpy((xfs_caddr_t)mem_core, (xfs_caddr_t)buf_core, + sizeof(xfs_dinode_core_t)); + } else { + memcpy((xfs_caddr_t)buf_core, (xfs_caddr_t)mem_core, + sizeof(xfs_dinode_core_t)); + } + return; + } + + INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch); + INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch); + INT_XLATE(buf_core->di_version, mem_core->di_version, dir, arch); + INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch); + INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch); + INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch); + INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch); + INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch); + INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch); + + if (dir > 0) { + memcpy(mem_core->di_pad, buf_core->di_pad, + sizeof(buf_core->di_pad)); + } else { + memcpy(buf_core->di_pad, mem_core->di_pad, + sizeof(buf_core->di_pad)); + } + + INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec, + dir, arch); + INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec, + dir, arch); + INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch); + INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch); + INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch); + INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch); + INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch); + INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch); + INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch); + INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch); + INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch); + INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch); + INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch); +} + +/* + * Given a mount structure and an inode number, return a pointer + * to a newly allocated in-core inode coresponding to the given + * inode number. + * + * Initialize the inode's attributes and extent pointers if it + * already has them (it will not if the inode has no links). + */ +int +xfs_iread( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + xfs_buf_t *bp; + xfs_dinode_t *dip; + xfs_inode_t *ip; + int error; + + ASSERT(xfs_inode_zone != NULL); + + ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); + ip->i_ino = ino; + ip->i_mount = mp; + + /* + * Get pointer's to the on-disk inode and the buffer containing it. + * If the inode number refers to a block outside the file system + * then xfs_itobp() will return NULL. In this case we should + * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will + * know that this is a new incore inode. + */ + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); + + if (error != 0) { + kmem_zone_free(xfs_inode_zone, ip); + return error; + } + + /* + * Initialize inode's trace buffers. + * Do this before xfs_iformat in case it adds entries. + */ +#ifdef XFS_BMAP_TRACE + ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMBT_TRACE + ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_RW_TRACE + ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_STRAT_TRACE + ip->i_strat_trace = ktrace_alloc(XFS_STRAT_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_ILOCK_TRACE + ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR2_TRACE + ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); +#endif + + /* + * If we got something that isn't an inode it means someone + * (nfs or dmi) has a stale handle. + */ + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) { + kmem_zone_free(xfs_inode_zone, ip); + xfs_trans_brelse(tp, bp); +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " + "dip->di_core.di_magic (0x%x) != " + "XFS_DINODE_MAGIC (0x%x)", + INT_GET(dip->di_core.di_magic, ARCH_CONVERT), + XFS_DINODE_MAGIC); +#endif /* DEBUG */ + return XFS_ERROR(EINVAL); + } + + /* + * If the on-disk inode is already linked to a directory + * entry, copy all of the inode into the in-core inode. + * xfs_iformat() handles copying in the inode format + * specific information. + * Otherwise, just get the truly permanent information. + */ + if (!INT_ISZERO(dip->di_core.di_mode, ARCH_CONVERT)) { + xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core, + &(ip->i_d), 1, ARCH_CONVERT); + error = xfs_iformat(ip, dip); + if (error) { + kmem_zone_free(xfs_inode_zone, ip); + xfs_trans_brelse(tp, bp); +#ifdef DEBUG + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " + "xfs_iformat() returned error %d", + error); +#endif /* DEBUG */ + return error; + } + } else { + ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT); + ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT); + ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT); + /* + * Make sure to pull in the mode here as well in + * case the inode is released without being used. + * This ensures that xfs_inactive() will see that + * the inode is already free and not try to mess + * with the uninitialized part of it. + */ + ip->i_d.di_mode = 0; + /* + * Initialize the per-fork minima and maxima for a new + * inode here. xfs_iformat will do it for old inodes. + */ + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + } + + INIT_LIST_HEAD(&ip->i_reclaim); + + /* + * The inode format changed when we moved the link count and + * made it 32 bits long. If this is an old format inode, + * convert it in memory to look like a new one. If it gets + * flushed to disk we will convert back before flushing or + * logging it. We zero out the new projid field and the old link + * count field. We'll handle clearing the pad field (the remains + * of the old uuid field) when we actually convert the inode to + * the new format. We don't change the version number so that we + * can distinguish this from a real new format inode. + */ + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ip->i_d.di_nlink = ip->i_d.di_onlink; + ip->i_d.di_onlink = 0; + ip->i_d.di_projid = 0; + } + + ip->i_delayed_blks = 0; + + /* + * Mark the buffer containing the inode as something to keep + * around for a while. This helps to keep recently accessed + * meta-data in-core longer. + */ + XFS_BUF_SET_REF(bp, XFS_INO_REF); + + /* + * Use xfs_trans_brelse() to release the buffer containing the + * on-disk inode, because it was acquired with xfs_trans_read_buf() + * in xfs_itobp() above. If tp is NULL, this is just a normal + * brelse(). If we're within a transaction, then xfs_trans_brelse() + * will only release the buffer if it is not dirty within the + * transaction. It will be OK to release the buffer in this case, + * because inodes on disk are never destroyed and we will be + * locking the new in-core inode before putting it in the hash + * table where other processes can find it. Thus we don't have + * to worry about the inode being changed just because we released + * the buffer. + */ + xfs_trans_brelse(tp, bp); + *ipp = ip; + return 0; +} + +/* + * Read in extents from a btree-format inode. + * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. + */ +int +xfs_iread_extents( + xfs_trans_t *tp, + xfs_inode_t *ip, + int whichfork) +{ + int error; + xfs_ifork_t *ifp; + size_t size; + + if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { + XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, + ip->i_mount); + return XFS_ERROR(EFSCORRUPTED); + } + size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); + ifp = XFS_IFORK_PTR(ip, whichfork); + /* + * We know that the size is legal (it's checked in iformat_btree) + */ + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); + ifp->if_lastex = NULLEXTNUM; + ifp->if_bytes = ifp->if_real_bytes = (int)size; + ifp->if_flags |= XFS_IFEXTENTS; + error = xfs_bmap_read_extents(tp, ip, whichfork); + if (error) { + kmem_free(ifp->if_u1.if_extents, size); + ifp->if_u1.if_extents = NULL; + ifp->if_bytes = ifp->if_real_bytes = 0; + ifp->if_flags &= ~XFS_IFEXTENTS; + return error; + } + xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, + XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip)); + return 0; +} + +/* + * Allocate an inode on disk and return a copy of its in-core version. + * The in-core inode is locked exclusively. Set mode, nlink, and rdev + * appropriately within the inode. The uid and gid for the inode are + * set according to the contents of the given cred structure. + * + * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() + * has a free inode available, call xfs_iget() + * to obtain the in-core version of the allocated inode. Finally, + * fill in the inode and log its initial contents. In this case, + * ialloc_context would be set to NULL and call_again set to false. + * + * If xfs_dialloc() does not have an available inode, + * it will replenish its supply by doing an allocation. Since we can + * only do one allocation within a transaction without deadlocks, we + * must commit the current transaction before returning the inode itself. + * In this case, therefore, we will set call_again to true and return. + * The caller should then commit the current transaction, start a new + * transaction, and call xfs_ialloc() again to actually get the inode. + * + * To ensure that some other process does not grab the inode that + * was allocated during the first call to xfs_ialloc(), this routine + * also returns the [locked] bp pointing to the head of the freelist + * as ialloc_context. The caller should hold this buffer across + * the commit and pass it back into this routine on the second call. + */ +int +xfs_ialloc( + xfs_trans_t *tp, + xfs_inode_t *pip, + mode_t mode, + nlink_t nlink, + xfs_dev_t rdev, + cred_t *cr, + xfs_prid_t prid, + int okalloc, + xfs_buf_t **ialloc_context, + boolean_t *call_again, + xfs_inode_t **ipp) +{ + xfs_ino_t ino; + xfs_inode_t *ip; + vnode_t *vp; + uint flags; + int error; + + /* + * Call the space management code to pick + * the on-disk inode to be allocated. + */ + ASSERT(pip != NULL); + error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, + ialloc_context, call_again, &ino); + if (error != 0) { + return error; + } + if (*call_again || ino == NULLFSINO) { + *ipp = NULL; + return 0; + } + ASSERT(*ialloc_context == NULL); + + /* + * Get the in-core inode with the lock held exclusively. + * This is because we're setting fields here we need + * to prevent others from looking at until we're done. + */ + error = xfs_trans_iget(tp->t_mountp, tp, ino, XFS_ILOCK_EXCL, &ip); + if (error != 0) { + return error; + } + ASSERT(ip != NULL); + + vp = XFS_ITOV(ip); + vp->v_type = IFTOVT(mode); + ip->i_d.di_mode = (__uint16_t)mode; + ip->i_d.di_onlink = 0; + ip->i_d.di_nlink = nlink; + ASSERT(ip->i_d.di_nlink == nlink); + ip->i_d.di_uid = current->fsuid; + ip->i_d.di_gid = current->fsgid; + ip->i_d.di_projid = prid; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + + /* + * If the superblock version is up to where we support new format + * inodes and this is currently an old format inode, then change + * the inode version number now. This way we only do the conversion + * here rather than here and in the flush/logging code. + */ + if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) && + ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ip->i_d.di_version = XFS_DINODE_VERSION_2; + /* + * We've already zeroed the old link count, the projid field, + * and the pad field. + */ + } + + /* + * Project ids won't be stored on disk if we are using a version 1 inode. + */ + if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) + xfs_bump_ino_vers2(tp, ip); + + if (XFS_INHERIT_GID(pip, vp->v_vfsp)) { + ip->i_d.di_gid = pip->i_d.di_gid; + if ((pip->i_d.di_mode & ISGID) && (mode & IFMT) == IFDIR) { + ip->i_d.di_mode |= ISGID; + } + } + + /* + * If the group ID of the new file does not match the effective group + * ID or one of the supplementary group IDs, the ISGID bit is cleared + * (and only if the irix_sgid_inherit compatibility variable is set). + */ + if ((irix_sgid_inherit) && + (ip->i_d.di_mode & ISGID) && + (!in_group_p((gid_t)ip->i_d.di_gid))) { + ip->i_d.di_mode &= ~ISGID; + } + + ip->i_d.di_size = 0; + ip->i_d.di_nextents = 0; + ASSERT(ip->i_d.di_nblocks == 0); + xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); + /* + * di_gen will have been taken care of in xfs_iread. + */ + ip->i_d.di_extsize = 0; + ip->i_d.di_dmevmask = 0; + ip->i_d.di_dmstate = 0; + ip->i_d.di_flags = 0; + flags = XFS_ILOG_CORE; + switch (mode & IFMT) { + case IFIFO: + case IFCHR: + case IFBLK: + case IFSOCK: + ip->i_d.di_format = XFS_DINODE_FMT_DEV; + ip->i_df.if_u2.if_rdev = rdev; + ip->i_df.if_flags = 0; + flags |= XFS_ILOG_DEV; + break; + case IFREG: + case IFDIR: + case IFLNK: + ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; + ip->i_df.if_flags = XFS_IFEXTENTS; + ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; + ip->i_df.if_u1.if_extents = NULL; + break; + default: + ASSERT(0); + } + /* + * Attribute fork settings for new inode. + */ + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + ip->i_d.di_anextents = 0; + + /* + * Log the new values stuffed into the inode. + */ + xfs_trans_log_inode(tp, ip, flags); + + /* now that we have a v_type we can set Linux inode ops (& unlock) */ + VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1); + + *ipp = ip; + return 0; +} + +/* + * Check to make sure that there are no blocks allocated to the + * file beyond the size of the file. We don't check this for + * files with fixed size extents or real time extents, but we + * at least do it for regular files. + */ +#ifdef DEBUG +void +xfs_isize_check( + xfs_mount_t *mp, + xfs_inode_t *ip, + xfs_fsize_t isize) +{ + xfs_fileoff_t map_first; + int nimaps; + xfs_bmbt_irec_t imaps[2]; + + if ((ip->i_d.di_mode & IFMT) != IFREG) + return; + + if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME ) + return; + + nimaps = 2; + map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); + /* + * The filesystem could be shutting down, so bmapi may return + * an error. + */ + if (xfs_bmapi(NULL, ip, map_first, + (XFS_B_TO_FSB(mp, + (xfs_ufsize_t)XFS_MAX_FILE_OFFSET) - + map_first), + XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, + NULL)) + return; + ASSERT(nimaps == 1); + ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); +} +#endif /* DEBUG */ + +/* + * Calculate the last possible buffered byte in a file. This must + * include data that was buffered beyond the EOF by the write code. + * This also needs to deal with overflowing the xfs_fsize_t type + * which can happen for sizes near the limit. + * + * We also need to take into account any blocks beyond the EOF. It + * may be the case that they were buffered by a write which failed. + * In that case the pages will still be in memory, but the inode size + * will never have been updated. + */ +xfs_fsize_t +xfs_file_last_byte( + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_fsize_t last_byte; + xfs_fileoff_t last_block; + xfs_fileoff_t size_last_block; + int error; + + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); + + mp = ip->i_mount; + /* + * Only check for blocks beyond the EOF if the extents have + * been read in. This eliminates the need for the inode lock, + * and it also saves us from looking when it really isn't + * necessary. + */ + if (ip->i_df.if_flags & XFS_IFEXTENTS) { + error = xfs_bmap_last_offset(NULL, ip, &last_block, + XFS_DATA_FORK); + if (error) { + last_block = 0; + } + } else { + last_block = 0; + } + size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_d.di_size); + last_block = XFS_FILEOFF_MAX(last_block, size_last_block); + + last_byte = XFS_FSB_TO_B(mp, last_block); + if (last_byte < 0) { + return XFS_MAX_FILE_OFFSET; + } + last_byte += (1 << mp->m_writeio_log); + if (last_byte < 0) { + return XFS_MAX_FILE_OFFSET; + } + return last_byte; +} + +#if defined(XFS_RW_TRACE) +STATIC void +xfs_itrunc_trace( + int tag, + xfs_inode_t *ip, + int flag, + xfs_fsize_t new_size, + xfs_off_t toss_start, + xfs_off_t toss_finish) +{ + if (ip->i_rwtrace == NULL) { + return; + } + + ktrace_enter(ip->i_rwtrace, + (void*)((long)tag), + (void*)ip, + (void*)((ip->i_d.di_size >> 32) & 0xffffffff), + (void*)(ip->i_d.di_size & 0xffffffff), + (void*)((long)flag), + (void*)((new_size >> 32) & 0xffffffff), + (void*)(new_size & 0xffffffff), + (void*)((toss_start >> 32) & 0xffffffff), + (void*)(toss_start & 0xffffffff), + (void*)((toss_finish >> 32) & 0xffffffff), + (void*)(toss_finish & 0xffffffff), + (void*)((long)private.p_cpuid), + (void*)0, + (void*)0, + (void*)0, + (void*)0); +} +#else +#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) +#endif + +/* + * Start the truncation of the file to new_size. The new size + * must be smaller than the current size. This routine will + * clear the buffer and page caches of file data in the removed + * range, and xfs_itruncate_finish() will remove the underlying + * disk blocks. + * + * The inode must have its I/O lock locked EXCLUSIVELY, and it + * must NOT have the inode lock held at all. This is because we're + * calling into the buffer/page cache code and we can't hold the + * inode lock when we do so. + * + * The flags parameter can have either the value XFS_ITRUNC_DEFINITE + * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used + * in the case that the caller is locking things out of order and + * may not be able to call xfs_itruncate_finish() with the inode lock + * held without dropping the I/O lock. If the caller must drop the + * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() + * must be called again with all the same restrictions as the initial + * call. + */ +void +xfs_itruncate_start( + xfs_inode_t *ip, + uint flags, + xfs_fsize_t new_size) +{ + xfs_fsize_t last_byte; + xfs_off_t toss_start; + xfs_mount_t *mp; + vnode_t *vp; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); + ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); + ASSERT((flags == XFS_ITRUNC_DEFINITE) || + (flags == XFS_ITRUNC_MAYBE)); + + mp = ip->i_mount; + vp = XFS_ITOV(ip); + /* + * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers + * overlapping the region being removed. We have to use + * the less efficient VOP_FLUSHINVAL_PAGES() in the case that the + * caller may not be able to finish the truncate without + * dropping the inode's I/O lock. Make sure + * to catch any pages brought in by buffers overlapping + * the EOF by searching out beyond the isize by our + * block size. We round new_size up to a block boundary + * so that we don't toss things on the same block as + * new_size but before it. + * + * Before calling VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES(), make sure to + * call remapf() over the same region if the file is mapped. + * This frees up mapped file references to the pages in the + * given range and for the VOP_FLUSHINVAL_PAGES() case it ensures + * that we get the latest mapped changes flushed out. + */ + toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); + toss_start = XFS_FSB_TO_B(mp, toss_start); + if (toss_start < 0) { + /* + * The place to start tossing is beyond our maximum + * file size, so there is no way that the data extended + * out there. + */ + return; + } + last_byte = xfs_file_last_byte(ip); + xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, + last_byte); + if (last_byte > toss_start) { + if (flags & XFS_ITRUNC_DEFINITE) { + VOP_TOSS_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED); + } else { + VOP_FLUSHINVAL_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED); + } + } + +#ifdef DEBUG + if (new_size == 0) { + ASSERT(VN_CACHED(vp) == 0); + } +#endif +} + +/* + * Shrink the file to the given new_size. The new + * size must be smaller than the current size. + * This will free up the underlying blocks + * in the removed range after a call to xfs_itruncate_start() + * or xfs_atruncate_start(). + * + * The transaction passed to this routine must have made + * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. + * This routine may commit the given transaction and + * start new ones, so make sure everything involved in + * the transaction is tidy before calling here. + * Some transaction will be returned to the caller to be + * committed. The incoming transaction must already include + * the inode, and both inode locks must be held exclusively. + * The inode must also be "held" within the transaction. On + * return the inode will be "held" within the returned transaction. + * This routine does NOT require any disk space to be reserved + * for it within the transaction. + * + * The fork parameter must be either xfs_attr_fork or xfs_data_fork, + * and it indicates the fork which is to be truncated. For the + * attribute fork we only support truncation to size 0. + * + * We use the sync parameter to indicate whether or not the first + * transaction we perform might have to be synchronous. For the attr fork, + * it needs to be so if the unlink of the inode is not yet known to be + * permanent in the log. This keeps us from freeing and reusing the + * blocks of the attribute fork before the unlink of the inode becomes + * permanent. + * + * For the data fork, we normally have to run synchronously if we're + * being called out of the inactive path or we're being called + * out of the create path where we're truncating an existing file. + * Either way, the truncate needs to be sync so blocks don't reappear + * in the file with altered data in case of a crash. wsync filesystems + * can run the first case async because anything that shrinks the inode + * has to run sync so by the time we're called here from inactive, the + * inode size is permanently set to 0. + * + * Calls from the truncate path always need to be sync unless we're + * in a wsync filesystem and the file has already been unlinked. + * + * The caller is responsible for correctly setting the sync parameter. + * It gets too hard for us to guess here which path we're being called + * out of just based on inode state. + */ +int +xfs_itruncate_finish( + xfs_trans_t **tp, + xfs_inode_t *ip, + xfs_fsize_t new_size, + int fork, + int sync) +{ + xfs_fsblock_t first_block; + xfs_fileoff_t first_unmap_block; + xfs_fileoff_t last_block; + xfs_filblks_t unmap_len=0; + xfs_mount_t *mp; + xfs_trans_t *ntp; + int done; + int committed; + xfs_bmap_free_t free_list; + int error; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size)); + ASSERT(*tp != NULL); + ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); + ASSERT(ip->i_transp == *tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); + + + ntp = *tp; + mp = (ntp)->t_mountp; + ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); + + /* + * We only support truncating the entire attribute fork. + */ + if (fork == XFS_ATTR_FORK) { + new_size = 0LL; + } + first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); + xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); + /* + * The first thing we do is set the size to new_size permanently + * on disk. This way we don't have to worry about anyone ever + * being able to look at the data being freed even in the face + * of a crash. What we're getting around here is the case where + * we free a block, it is allocated to another file, it is written + * to, and then we crash. If the new data gets written to the + * file but the log buffers containing the free and reallocation + * don't, then we'd end up with garbage in the blocks being freed. + * As long as we make the new_size permanent before actually + * freeing any blocks it doesn't matter if they get writtten to. + * + * The callers must signal into us whether or not the size + * setting here must be synchronous. There are a few cases + * where it doesn't have to be synchronous. Those cases + * occur if the file is unlinked and we know the unlink is + * permanent or if the blocks being truncated are guaranteed + * to be beyond the inode eof (regardless of the link count) + * and the eof value is permanent. Both of these cases occur + * only on wsync-mounted filesystems. In those cases, we're + * guaranteed that no user will ever see the data in the blocks + * that are being truncated so the truncate can run async. + * In the free beyond eof case, the file may wind up with + * more blocks allocated to it than it needs if we crash + * and that won't get fixed until the next time the file + * is re-opened and closed but that's ok as that shouldn't + * be too many blocks. + * + * However, we can't just make all wsync xactions run async + * because there's one call out of the create path that needs + * to run sync where it's truncating an existing file to size + * 0 whose size is > 0. + * + * It's probably possible to come up with a test in this + * routine that would correctly distinguish all the above + * cases from the values of the function parameters and the + * inode state but for sanity's sake, I've decided to let the + * layers above just tell us. It's simpler to correctly figure + * out in the layer above exactly under what conditions we + * can run async and I think it's easier for others read and + * follow the logic in case something has to be changed. + * cscope is your friend -- rcc. + * + * The attribute fork is much simpler. + * + * For the attribute fork we allow the caller to tell us whether + * the unlink of the inode that led to this call is yet permanent + * in the on disk log. If it is not and we will be freeing extents + * in this inode then we make the first transaction synchronous + * to make sure that the unlink is permanent by the time we free + * the blocks. + */ + if (fork == XFS_DATA_FORK) { + if (ip->i_d.di_nextents > 0) { + ip->i_d.di_size = new_size; + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + } + } else if (sync) { + ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); + if (ip->i_d.di_anextents > 0) + xfs_trans_set_sync(ntp); + } + ASSERT(fork == XFS_DATA_FORK || + (fork == XFS_ATTR_FORK && + ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || + (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); + + /* + * Since it is possible for space to become allocated beyond + * the end of the file (in a crash where the space is allocated + * but the inode size is not yet updated), simply remove any + * blocks which show up between the new EOF and the maximum + * possible file size. If the first block to be removed is + * beyond the maximum file size (ie it is the same as last_block), + * then there is nothing to do. + */ + last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + ASSERT(first_unmap_block <= last_block); + done = 0; + if (last_block == first_unmap_block) { + done = 1; + } else { + unmap_len = last_block - first_unmap_block + 1; + } + while (!done) { + /* + * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() + * will tell us whether it freed the entire range or + * not. If this is a synchronous mount (wsync), + * then we can tell bunmapi to keep all the + * transactions asynchronous since the unlink + * transaction that made this inode inactive has + * already hit the disk. There's no danger of + * the freed blocks being reused, there being a + * crash, and the reused blocks suddenly reappearing + * in this file with garbage in them once recovery + * runs. + */ + XFS_BMAP_INIT(&free_list, &first_block); + error = xfs_bunmapi(ntp, ip, first_unmap_block, + unmap_len, + XFS_BMAPI_AFLAG(fork) | + (sync ? 0 : XFS_BMAPI_ASYNC), + XFS_ITRUNC_MAX_EXTENTS, + &first_block, &free_list, &done); + if (error) { + /* + * If the bunmapi call encounters an error, + * return to the caller where the transaction + * can be properly aborted. We just need to + * make sure we're not holding any resources + * that we were not when we came in. + */ + xfs_bmap_cancel(&free_list); + return error; + } + + /* + * Duplicate the transaction that has the permanent + * reservation and commit the old transaction. + */ + error = xfs_bmap_finish(tp, &free_list, first_block, + &committed); + ntp = *tp; + if (error) { + /* + * If the bmap finish call encounters an error, + * return to the caller where the transaction + * can be properly aborted. We just need to + * make sure we're not holding any resources + * that we were not when we came in. + * + * Aborting from this point might lose some + * blocks in the file system, but oh well. + */ + xfs_bmap_cancel(&free_list); + if (committed) { + /* + * If the passed in transaction committed + * in xfs_bmap_finish(), then we want to + * add the inode to this one before returning. + * This keeps things simple for the higher + * level code, because it always knows that + * the inode is locked and held in the + * transaction that returns to it whether + * errors occur or not. We don't mark the + * inode dirty so that this transaction can + * be easily aborted if possible. + */ + xfs_trans_ijoin(ntp, ip, + XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + } + return error; + } + + if (committed) { + /* + * The first xact was committed, + * so add the inode to the new one. + * Mark it dirty so it will be logged + * and moved forward in the log as + * part of every commit. + */ + xfs_trans_ijoin(ntp, ip, + XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + } + ntp = xfs_trans_dup(ntp); + (void) xfs_trans_commit(*tp, 0, NULL); + *tp = ntp; + error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + /* + * Add the inode being truncated to the next chained + * transaction. + */ + xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(ntp, ip); + if (error) + return (error); + } + /* + * Only update the size in the case of the data fork, but + * always re-log the inode so that our permanent transaction + * can keep on rolling it forward in the log. + */ + if (fork == XFS_DATA_FORK) { + xfs_isize_check(mp, ip, new_size); + ip->i_d.di_size = new_size; + } + xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + ASSERT((new_size != 0) || + (fork == XFS_ATTR_FORK) || + (ip->i_delayed_blks == 0)); + ASSERT((new_size != 0) || + (fork == XFS_ATTR_FORK) || + (ip->i_d.di_nextents == 0)); + xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); + return 0; +} + + +/* + * xfs_igrow_start + * + * Do the first part of growing a file: zero any data in the last + * block that is beyond the old EOF. We need to do this before + * the inode is joined to the transaction to modify the i_size. + * That way we can drop the inode lock and call into the buffer + * cache to get the buffer mapping the EOF. + */ +int +xfs_igrow_start( + xfs_inode_t *ip, + xfs_fsize_t new_size, + cred_t *credp) +{ + xfs_fsize_t isize; + int error; + + ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(new_size > ip->i_d.di_size); + + error = 0; + isize = ip->i_d.di_size; + /* + * Zero any pages that may have been created by + * xfs_write_file() beyond the end of the file + * and any blocks between the old and new file sizes. + */ + error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize, + new_size); + return error; +} + +/* + * xfs_igrow_finish + * + * This routine is called to extend the size of a file. + * The inode must have both the iolock and the ilock locked + * for update and it must be a part of the current transaction. + * The xfs_igrow_start() function must have been called previously. + * If the change_flag is not zero, the inode change timestamp will + * be updated. + */ +void +xfs_igrow_finish( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_fsize_t new_size, + int change_flag) +{ + ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(ip->i_transp == tp); + ASSERT(new_size > ip->i_d.di_size); + + /* + * Update the file size. Update the inode change timestamp + * if change_flag set. + */ + ip->i_d.di_size = new_size; + if (change_flag) + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + +} + + +/* + * This is called when the inode's link count goes to 0. + * We place the on-disk inode on a list in the AGI. It + * will be pulled from this list when the inode is freed. + */ +int +xfs_iunlink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_agi_t *agi; + xfs_dinode_t *dip; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_agnumber_t agno; + xfs_daddr_t agdaddr; + xfs_agino_t agino; + short bucket_index; + int offset; + int error; + int agi_ok; + + ASSERT(ip->i_d.di_nlink == 0); + ASSERT(ip->i_d.di_mode != 0); + ASSERT(ip->i_transp == tp); + + mp = tp->t_mountp; + + agno = XFS_INO_TO_AGNO(mp, ip->i_ino); + agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + + /* + * Get the agi buffer first. It ensures lock ordering + * on the list. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + return error; + } + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(agibp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, + XFS_RANDOM_IUNLINK))) { + XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); + xfs_trans_brelse(tp, agibp); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Get the index into the agi hash table for the + * list this inode will go on. + */ + agino = XFS_INO_TO_AGINO(mp, ip->i_ino); + ASSERT(agino != 0); + bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; + ASSERT(!INT_ISZERO(agi->agi_unlinked[bucket_index], ARCH_CONVERT)); + ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != agino); + + if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO) { + /* + * There is already another inode in the bucket we need + * to add ourselves to. Add us at the front of the list. + * Here we put the head pointer into our next pointer, + * and then we fall through to point the head at us. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + return error; + } + ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO); + ASSERT(!INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)); + /* both on-disk, don't endian flip twice */ + dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } + + /* + * Point the bucket head pointer at the inode being inserted. + */ + ASSERT(agino != 0); + INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + return 0; +} + +/* + * Pull the on-disk inode from the AGI unlinked list. + */ +STATIC int +xfs_iunlink_remove( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_ino_t next_ino; + xfs_mount_t *mp; + xfs_agi_t *agi; + xfs_dinode_t *dip; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_agnumber_t agno; + xfs_daddr_t agdaddr; + xfs_agino_t agino; + xfs_agino_t next_agino; + xfs_buf_t *last_ibp; + xfs_dinode_t *last_dip; + short bucket_index; + int offset, last_offset; + int error; + int agi_ok; + + /* + * First pull the on-disk inode from the AGI unlinked list. + */ + mp = tp->t_mountp; + + agno = XFS_INO_TO_AGNO(mp, ip->i_ino); + agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + + /* + * Get the agi buffer first. It ensures lock ordering + * on the list. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + /* + * Validate the magic number of the agi block. + */ + agi = XFS_BUF_TO_AGI(agibp); + agi_ok = + INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); + if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, + XFS_RANDOM_IUNLINK_REMOVE))) { + XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, + mp, agi); + xfs_trans_brelse(tp, agibp); + cmn_err(CE_WARN, + "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", + mp->m_fsname); + return XFS_ERROR(EFSCORRUPTED); + } + /* + * Get the index into the agi hash table for the + * list this inode will go on. + */ + agino = XFS_INO_TO_AGINO(mp, ip->i_ino); + ASSERT(agino != 0); + bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; + ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO); + ASSERT(!INT_ISZERO(agi->agi_unlinked[bucket_index], ARCH_CONVERT)); + + if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) == agino) { + /* + * We're at the head of the list. Get the inode's + * on-disk buffer to see if there is anyone after us + * on the list. Only modify our next pointer if it + * is not already NULLAGINO. This saves us the overhead + * of dealing with the buffer when there is no need to + * change it. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != 0); + if (next_agino != NULLAGINO) { + INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } else { + xfs_trans_brelse(tp, ibp); + } + /* + * Point the bucket head pointer at the next inode. + */ + ASSERT(next_agino != 0); + ASSERT(next_agino != agino); + INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, next_agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + } else { + /* + * We need to search the list for the inode being freed. + */ + next_agino = INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT); + last_ibp = NULL; + while (next_agino != agino) { + /* + * If the last inode wasn't the one pointing to + * us, then release its buffer since we're not + * going to do anything with it. + */ + if (last_ibp != NULL) { + xfs_trans_brelse(tp, last_ibp); + } + next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); + error = xfs_inotobp(mp, tp, next_ino, &last_dip, + &last_ibp, &last_offset); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != NULLAGINO); + ASSERT(next_agino != 0); + } + /* + * Now last_ibp points to the buffer previous to us on + * the unlinked list. Pull us from the list. + */ + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + if (error) { + cmn_err(CE_WARN, + "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", + error, mp->m_fsname); + return error; + } + next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT); + ASSERT(next_agino != 0); + ASSERT(next_agino != agino); + if (next_agino != NULLAGINO) { + INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO); + offset = ip->i_boffset + + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, ibp); + xfs_trans_log_buf(tp, ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, ibp); + } else { + xfs_trans_brelse(tp, ibp); + } + /* + * Point the previous inode on the list to the next inode. + */ + INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino); + ASSERT(next_agino != 0); + offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); + xfs_trans_inode_buf(tp, last_ibp); + xfs_trans_log_buf(tp, last_ibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + xfs_inobp_check(mp, last_ibp); + } + return 0; +} + +/* + * This is called to return an inode to the inode free list. + * The inode should already be truncated to 0 length and have + * no pages associated with it. This routine also assumes that + * the inode is already a part of the transaction. + * + * The on-disk copy of the inode will have been added to the list + * of unlinked inodes in the AGI. We need to remove the inode from + * that list atomically with respect to freeing it here. + */ +int +xfs_ifree( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + int error; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_d.di_nlink == 0); + ASSERT(ip->i_d.di_nextents == 0); + ASSERT(ip->i_d.di_anextents == 0); + ASSERT((ip->i_d.di_size == 0) || + ((ip->i_d.di_mode & IFMT) != IFREG)); + ASSERT(ip->i_d.di_nblocks == 0); + + /* + * Pull the on-disk inode from the AGI unlinked list. + */ + error = xfs_iunlink_remove(tp, ip); + if (error != 0) { + return error; + } + + error = xfs_difree(tp, ip->i_ino); + if (error != 0) { + return error; + } + ip->i_d.di_mode = 0; /* mark incore inode as free */ + ip->i_d.di_flags = 0; + ip->i_d.di_dmevmask = 0; + ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ + ip->i_df.if_ext_max = + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); + ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; + ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; + + /* + * Bump the generation count so no one will be confused + * by reincarnations of this inode. + */ + ip->i_d.di_gen++; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return 0; +} + +/* + * Reallocate the space for if_broot based on the number of records + * being added or deleted as indicated in rec_diff. Move the records + * and pointers in if_broot to fit the new size. When shrinking this + * will eliminate holes between the records and pointers created by + * the caller. When growing this will create holes to be filled in + * by the caller. + * + * The caller must not request to add more records than would fit in + * the on-disk inode root. If the if_broot is currently NULL, then + * if we adding records one will be allocated. The caller must also + * not request that the number of records go below zero, although + * it can go to zero. + * + * ip -- the inode whose if_broot area is changing + * ext_diff -- the change in the number of records, positive or negative, + * requested for the if_broot array. + */ +void +xfs_iroot_realloc( + xfs_inode_t *ip, + int rec_diff, + int whichfork) +{ + int cur_max; + xfs_ifork_t *ifp; + xfs_bmbt_block_t *new_broot; + int new_max; + size_t new_size; + char *np; + char *op; + + /* + * Handle the degenerate case quietly. + */ + if (rec_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (rec_diff > 0) { + /* + * If there wasn't any memory allocated before, just + * allocate it now and get out. + */ + if (ifp->if_broot_bytes == 0) { + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); + ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, + KM_SLEEP); + ifp->if_broot_bytes = (int)new_size; + return; + } + + /* + * If there is already an existing if_broot, then we need + * to realloc() it and shift the pointers to their new + * location. The records don't change location because + * they are kept butted up against the btree block header. + */ + cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + new_max = cur_max + rec_diff; + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); + ifp->if_broot = (xfs_bmbt_block_t *) + kmem_realloc(ifp->if_broot, + new_size, + (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ + KM_SLEEP); + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + (int)new_size); + ifp->if_broot_bytes = (int)new_size; + ASSERT(ifp->if_broot_bytes <= + XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); + memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); + return; + } + + /* + * rec_diff is less than 0. In this case, we are shrinking the + * if_broot buffer. It must already exist. If we go to zero + * records, just get rid of the root and clear the status bit. + */ + ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); + cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + new_max = cur_max + rec_diff; + ASSERT(new_max >= 0); + if (new_max > 0) + new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); + else + new_size = 0; + if (new_size > 0) { + new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); + /* + * First copy over the btree block header. + */ + memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); + } else { + new_broot = NULL; + ifp->if_flags &= ~XFS_IFBROOT; + } + + /* + * Only copy the records and pointers if there are any. + */ + if (new_max > 0) { + /* + * First copy the records. + */ + op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, + (int)new_size); + memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); + + /* + * Then copy the pointers. + */ + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, + (int)new_size); + memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); + } + kmem_free(ifp->if_broot, ifp->if_broot_bytes); + ifp->if_broot = new_broot; + ifp->if_broot_bytes = (int)new_size; + ASSERT(ifp->if_broot_bytes <= + XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); + return; +} + + +/* + * This is called when the amount of space needed for if_extents + * is increased or decreased. The change in size is indicated by + * the number of extents that need to be added or deleted in the + * ext_diff parameter. + * + * If the amount of space needed has decreased below the size of the + * inline buffer, then switch to using the inline buffer. Otherwise, + * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer + * to what is needed. + * + * ip -- the inode whose if_extents area is changing + * ext_diff -- the change in the number of extents, positive or negative, + * requested for the if_extents array. + */ +void +xfs_iext_realloc( + xfs_inode_t *ip, + int ext_diff, + int whichfork) +{ + int byte_diff; + xfs_ifork_t *ifp; + int new_size; + uint rnew_size; + + if (ext_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t); + new_size = (int)ifp->if_bytes + byte_diff; + ASSERT(new_size >= 0); + + if (new_size == 0) { + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + } + ifp->if_u1.if_extents = NULL; + rnew_size = 0; + } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) { + /* + * If the valid extents can fit in if_inline_ext, + * copy them from the malloc'd vector and free it. + */ + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + /* + * For now, empty files are format EXTENTS, + * so the if_extents pointer is null. + */ + if (ifp->if_u1.if_extents) { + memcpy(ifp->if_u2.if_inline_ext, + ifp->if_u1.if_extents, new_size); + kmem_free(ifp->if_u1.if_extents, + ifp->if_real_bytes); + } + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + } + rnew_size = 0; + } else { + rnew_size = new_size; + if ((rnew_size & (rnew_size - 1)) != 0) + rnew_size = xfs_iroundup(rnew_size); + /* + * Stuck with malloc/realloc. + */ + if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(rnew_size, KM_SLEEP); + memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, + sizeof(ifp->if_u2.if_inline_ext)); + } else if (rnew_size != ifp->if_real_bytes) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_realloc(ifp->if_u1.if_extents, + rnew_size, + ifp->if_real_bytes, + KM_NOFS); + } + } + ifp->if_real_bytes = rnew_size; + ifp->if_bytes = new_size; +} + + +/* + * This is called when the amount of space needed for if_data + * is increased or decreased. The change in size is indicated by + * the number of bytes that need to be added or deleted in the + * byte_diff parameter. + * + * If the amount of space needed has decreased below the size of the + * inline buffer, then switch to using the inline buffer. Otherwise, + * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer + * to what is needed. + * + * ip -- the inode whose if_data area is changing + * byte_diff -- the change in the number of bytes, positive or negative, + * requested for the if_data array. + */ +void +xfs_idata_realloc( + xfs_inode_t *ip, + int byte_diff, + int whichfork) +{ + xfs_ifork_t *ifp; + int new_size; + int real_size; + + if (byte_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + new_size = (int)ifp->if_bytes + byte_diff; + ASSERT(new_size >= 0); + + if (new_size == 0) { + if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + } + ifp->if_u1.if_data = NULL; + real_size = 0; + } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { + /* + * If the valid extents/data can fit in if_inline_ext/data, + * copy them from the malloc'd vector and free it. + */ + if (ifp->if_u1.if_data == NULL) { + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + ASSERT(ifp->if_real_bytes != 0); + memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, + new_size); + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + ifp->if_u1.if_data = ifp->if_u2.if_inline_data; + } + real_size = 0; + } else { + /* + * Stuck with malloc/realloc. + * For inline data, the underlying buffer must be + * a multiple of 4 bytes in size so that it can be + * logged and stay on word boundaries. We enforce + * that here. + */ + real_size = roundup(new_size, 4); + if (ifp->if_u1.if_data == NULL) { + ASSERT(ifp->if_real_bytes == 0); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { + /* + * Only do the realloc if the underlying size + * is really changing. + */ + if (ifp->if_real_bytes != real_size) { + ifp->if_u1.if_data = + kmem_realloc(ifp->if_u1.if_data, + real_size, + ifp->if_real_bytes, + KM_SLEEP); + } + } else { + ASSERT(ifp->if_real_bytes == 0); + ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); + memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, + ifp->if_bytes); + } + } + ifp->if_real_bytes = real_size; + ifp->if_bytes = new_size; + ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); +} + + + + +/* + * Map inode to disk block and offset. + * + * mp -- the mount point structure for the current file system + * tp -- the current transaction + * ino -- the inode number of the inode to be located + * imap -- this structure is filled in with the information necessary + * to retrieve the given inode from disk + * flags -- flags to pass to xfs_dilocate indicating whether or not + * lookups in the inode btree were OK or not + */ +int +xfs_imap( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + xfs_imap_t *imap, + uint flags) +{ + xfs_fsblock_t fsbno; + int len; + int off; + int error; + + fsbno = imap->im_blkno ? + XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; + error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); + if (error != 0) { + return error; + } + imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); + imap->im_len = XFS_FSB_TO_BB(mp, len); + imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); + imap->im_ioffset = (ushort)off; + imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); + return 0; +} + +void +xfs_idestroy_fork( + xfs_inode_t *ip, + int whichfork) +{ + xfs_ifork_t *ifp; + + ifp = XFS_IFORK_PTR(ip, whichfork); + if (ifp->if_broot != NULL) { + kmem_free(ifp->if_broot, ifp->if_broot_bytes); + ifp->if_broot = NULL; + } + + /* + * If the format is local, then we can't have an extents + * array so just look for an inline data array. If we're + * not local then we may or may not have an extents list, + * so check and free it up if we do. + */ + if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { + if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && + (ifp->if_u1.if_data != NULL)) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); + ifp->if_u1.if_data = NULL; + ifp->if_real_bytes = 0; + } + } else if ((ifp->if_flags & XFS_IFEXTENTS) && + (ifp->if_u1.if_extents != NULL) && + (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + ifp->if_u1.if_extents = NULL; + ifp->if_real_bytes = 0; + } + ASSERT(ifp->if_u1.if_extents == NULL || + ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); + ASSERT(ifp->if_real_bytes == 0); + if (whichfork == XFS_ATTR_FORK) { + kmem_zone_free(xfs_ifork_zone, ip->i_afp); + ip->i_afp = NULL; + } +} + +/* + * This is called free all the memory associated with an inode. + * It must free the inode itself and any buffers allocated for + * if_extents/if_data and if_broot. It must also free the lock + * associated with the inode. + */ +void +xfs_idestroy( + xfs_inode_t *ip) +{ + + switch (ip->i_d.di_mode & IFMT) { + case IFREG: + case IFDIR: + case IFLNK: + xfs_idestroy_fork(ip, XFS_DATA_FORK); + break; + } + if (ip->i_afp) + xfs_idestroy_fork(ip, XFS_ATTR_FORK); +#ifdef NOTYET + if (ip->i_range_lock.r_sleep != NULL) { + freesema(ip->i_range_lock.r_sleep); + kmem_free(ip->i_range_lock.r_sleep, sizeof(sema_t)); + } +#endif /* NOTYET */ + mrfree(&ip->i_lock); + mrfree(&ip->i_iolock); +#ifdef NOTYET + mutex_destroy(&ip->i_range_lock.r_spinlock); +#endif /* NOTYET */ + freesema(&ip->i_flock); +#ifdef XFS_BMAP_TRACE + ktrace_free(ip->i_xtrace); +#endif +#ifdef XFS_BMBT_TRACE + ktrace_free(ip->i_btrace); +#endif +#ifdef XFS_RW_TRACE + ktrace_free(ip->i_rwtrace); +#endif +#ifdef XFS_STRAT_TRACE + ktrace_free(ip->i_strat_trace); +#endif +#ifdef XFS_ILOCK_TRACE + ktrace_free(ip->i_lock_trace); +#endif +#ifdef XFS_DIR2_TRACE + ktrace_free(ip->i_dir_trace); +#endif + if (ip->i_itemp) { + /* XXXdpd should be able to assert this but shutdown + * is leaving the AIL behind. */ + ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) || + XFS_FORCED_SHUTDOWN(ip->i_mount)); + xfs_inode_item_destroy(ip); + } + kmem_zone_free(xfs_inode_zone, ip); +} + + +/* + * Increment the pin count of the given buffer. + * This value is protected by ipinlock spinlock in the mount structure. + */ +void +xfs_ipin( + xfs_inode_t *ip) +{ + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + atomic_inc(&ip->i_pincount); +} + +/* + * Decrement the pin count of the given inode, and wake up + * anyone in xfs_iwait_unpin() if the count goes to 0. The + * inode must have been previoulsy pinned with a call to xfs_ipin(). + */ +void +xfs_iunpin( + xfs_inode_t *ip) +{ + ASSERT(atomic_read(&ip->i_pincount) > 0); + + if (atomic_dec_and_test(&ip->i_pincount)) { + vnode_t *vp = XFS_ITOV_NULL(ip); + + /* make sync come back and flush this inode */ + if (vp) { + struct inode *inode = LINVFS_GET_IP(vp); + + mark_inode_dirty_sync(inode); + } + + wake_up(&ip->i_ipin_wait); + } +} + +/* + * This is called to wait for the given inode to be unpinned. + * It will sleep until this happens. The caller must have the + * inode locked in at least shared mode so that the buffer cannot + * be subsequently pinned once someone is waiting for it to be + * unpinned. + */ +void +xfs_iunpin_wait( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + xfs_lsn_t lsn; + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); + + if (atomic_read(&ip->i_pincount) == 0) { + return; + } + + iip = ip->i_itemp; + if (iip && iip->ili_last_lsn) { + lsn = iip->ili_last_lsn; + } else { + lsn = (xfs_lsn_t)0; + } + + /* + * Give the log a push so we don't wait here too long. + */ + xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); + + wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); +} + + +/* + * xfs_iextents_copy() + * + * This is called to copy the REAL extents (as opposed to the delayed + * allocation extents) from the inode into the given buffer. It + * returns the number of bytes copied into the buffer. + * + * If there are no delayed allocation extents, then we can just + * memcpy() the extents into the buffer. Otherwise, we need to + * examine each extent in turn and skip those which are delayed. + */ +int +xfs_iextents_copy( + xfs_inode_t *ip, + xfs_bmbt_rec_t *buffer, + int whichfork) +{ + int copied; + xfs_bmbt_rec_t *dest_ep; + xfs_bmbt_rec_t *ep; +#ifdef XFS_BMAP_TRACE + static char fname[] = "xfs_iextents_copy"; +#endif + int i; + xfs_ifork_t *ifp; + int nrecs; + xfs_fsblock_t start_block; + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(ifp->if_bytes > 0); + + nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork); + ASSERT(nrecs > 0); + + /* + * There are some delayed allocation extents in the + * inode, so copy the extents one at a time and skip + * the delayed ones. There must be at least one + * non-delayed extent. + */ + ep = ifp->if_u1.if_extents; + dest_ep = buffer; + copied = 0; + for (i = 0; i < nrecs; i++) { + start_block = xfs_bmbt_get_startblock(ep); + if (ISNULLSTARTBLOCK(start_block)) { + /* + * It's a delayed allocation extent, so skip it. + */ + ep++; + continue; + } + + /* Translate to on disk format */ + put_unaligned(INT_GET(ep->l0, ARCH_CONVERT), + (__uint64_t*)&dest_ep->l0); + put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), + (__uint64_t*)&dest_ep->l1); + dest_ep++; + ep++; + copied++; + } + ASSERT(copied != 0); + xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); + + return (copied * (uint)sizeof(xfs_bmbt_rec_t)); +} + +/* + * Each of the following cases stores data into the same region + * of the on-disk inode, so only one of them can be valid at + * any given time. While it is possible to have conflicting formats + * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is + * in EXTENTS format, this can only happen when the fork has + * changed formats after being modified but before being flushed. + * In these cases, the format always takes precedence, because the + * format indicates the current state of the fork. + */ +/*ARGSUSED*/ +STATIC int +xfs_iflush_fork( + xfs_inode_t *ip, + xfs_dinode_t *dip, + xfs_inode_log_item_t *iip, + int whichfork, + xfs_buf_t *bp) +{ + char *cp; + xfs_ifork_t *ifp; + xfs_mount_t *mp; +#ifdef XFS_TRANS_DEBUG + int first; +#endif + static const short brootflag[2] = + { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; + static const short dataflag[2] = + { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; + static const short extflag[2] = + { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; + + if (iip == NULL) + return 0; + ifp = XFS_IFORK_PTR(ip, whichfork); + /* + * This can happen if we gave up in iformat in an error path, + * for the attribute fork. + */ + if (ifp == NULL) { + ASSERT(whichfork == XFS_ATTR_FORK); + return 0; + } + cp = XFS_DFORK_PTR_ARCH(dip, whichfork, ARCH_CONVERT); + mp = ip->i_mount; + switch (XFS_IFORK_FORMAT(ip, whichfork)) { + case XFS_DINODE_FMT_LOCAL: + if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && + (ifp->if_bytes > 0)) { + ASSERT(ifp->if_u1.if_data != NULL); + ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); + memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); + } + if (whichfork == XFS_DATA_FORK) { + if (unlikely(XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp, dip))) { + XFS_ERROR_REPORT("xfs_iflush_fork", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + } + break; + + case XFS_DINODE_FMT_EXTENTS: + ASSERT((ifp->if_flags & XFS_IFEXTENTS) || + !(iip->ili_format.ilf_fields & extflag[whichfork])); + ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); + ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); + if ((iip->ili_format.ilf_fields & extflag[whichfork]) && + (ifp->if_bytes > 0)) { + ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); + (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, + whichfork); + } + break; + + case XFS_DINODE_FMT_BTREE: + if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && + (ifp->if_broot_bytes > 0)) { + ASSERT(ifp->if_broot != NULL); + ASSERT(ifp->if_broot_bytes <= + (XFS_IFORK_SIZE(ip, whichfork) + + XFS_BROOT_SIZE_ADJ)); + xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, + (xfs_bmdr_block_t *)cp, + XFS_DFORK_SIZE_ARCH(dip, mp, whichfork, ARCH_CONVERT)); + } + break; + + case XFS_DINODE_FMT_DEV: + if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + ASSERT(whichfork == XFS_DATA_FORK); + INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev); + } + break; + + case XFS_DINODE_FMT_UUID: + if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + ASSERT(whichfork == XFS_DATA_FORK); + memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, + sizeof(uuid_t)); + } + break; + + default: + ASSERT(0); + break; + } + + return 0; +} + +/* + * xfs_iflush() will write a modified inode's changes out to the + * inode's on disk home. The caller must have the inode lock held + * in at least shared mode and the inode flush semaphore must be + * held as well. The inode lock will still be held upon return from + * the call and the caller is free to unlock it. + * The inode flush lock will be unlocked when the inode reaches the disk. + * The flags indicate how the inode's buffer should be written out. + */ +int +xfs_iflush( + xfs_inode_t *ip, + uint flags) +{ + xfs_inode_log_item_t *iip; + xfs_buf_t *bp; + xfs_dinode_t *dip; + xfs_mount_t *mp; + int error; + /* REFERENCED */ + xfs_chash_t *ch; + xfs_inode_t *iq; + int clcount; /* count of inodes clustered */ + int bufwasdelwri; + enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; + SPLDECL(s); + + XFS_STATS_INC(xfsstats.xs_iflush_count); + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(valusema(&ip->i_flock) <= 0); + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ip->i_d.di_nextents > ip->i_df.if_ext_max); + + iip = ip->i_itemp; + mp = ip->i_mount; + + /* + * If the inode isn't dirty, then just release the inode + * flush lock and do nothing. + */ + if ((ip->i_update_core == 0) && + ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + ASSERT((iip != NULL) ? + !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); + xfs_ifunlock(ip); + return 0; + } + + /* + * We can't flush the inode until it is unpinned, so + * wait for it. We know noone new can pin it, because + * we are holding the inode lock shared and you need + * to hold it exclusively to pin the inode. + */ + xfs_iunpin_wait(ip); + + /* + * This may have been unpinned because the filesystem is shutting + * down forcibly. If that's the case we must not write this inode + * to disk, because the log record didn't make it to disk! + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + ip->i_update_core = 0; + if (iip) + iip->ili_format.ilf_fields = 0; + xfs_ifunlock(ip); + return XFS_ERROR(EIO); + } + + /* + * Get the buffer containing the on-disk inode. + */ + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); + if (error != 0) { + xfs_ifunlock(ip); + return error; + } + + /* + * Decide how buffer will be flushed out. This is done before + * the call to xfs_iflush_int because this field is zeroed by it. + */ + if (iip != NULL && iip->ili_format.ilf_fields != 0) { + /* + * Flush out the inode buffer according to the directions + * of the caller. In the cases where the caller has given + * us a choice choose the non-delwri case. This is because + * the inode is in the AIL and we need to get it out soon. + */ + switch (flags) { + case XFS_IFLUSH_SYNC: + case XFS_IFLUSH_DELWRI_ELSE_SYNC: + flags = 0; + break; + case XFS_IFLUSH_ASYNC: + case XFS_IFLUSH_DELWRI_ELSE_ASYNC: + flags = INT_ASYNC; + break; + case XFS_IFLUSH_DELWRI: + flags = INT_DELWRI; + break; + default: + ASSERT(0); + flags = 0; + break; + } + } else { + switch (flags) { + case XFS_IFLUSH_DELWRI_ELSE_SYNC: + case XFS_IFLUSH_DELWRI_ELSE_ASYNC: + case XFS_IFLUSH_DELWRI: + flags = INT_DELWRI; + break; + case XFS_IFLUSH_ASYNC: + flags = INT_ASYNC; + break; + case XFS_IFLUSH_SYNC: + flags = 0; + break; + default: + ASSERT(0); + flags = 0; + break; + } + } + + /* + * First flush out the inode that xfs_iflush was called with. + */ + error = xfs_iflush_int(ip, bp); + if (error) { + goto corrupt_out; + } + + /* + * inode clustering: + * see if other inodes can be gathered into this write + */ + +#ifdef DEBUG + ip->i_chash->chl_buf = bp; /* inode clustering debug */ +#endif + + ch = XFS_CHASH(mp, ip->i_blkno); + s = mutex_spinlock(&ch->ch_lock); + + clcount = 0; + for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) { + /* + * Do an un-protected check to see if the inode is dirty and + * is a candidate for flushing. These checks will be repeated + * later after the appropriate locks are acquired. + */ + iip = iq->i_itemp; + if ((iq->i_update_core == 0) && + ((iip == NULL) || + !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) && + xfs_ipincount(iq) == 0) { + continue; + } + + /* + * Try to get locks. If any are unavailable, + * then this inode cannot be flushed and is skipped. + */ + + /* get inode locks (just i_lock) */ + if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) { + /* get inode flush lock */ + if (xfs_iflock_nowait(iq)) { + /* check if pinned */ + if (xfs_ipincount(iq) == 0) { + /* arriving here means that + * this inode can be flushed. + * first re-check that it's + * dirty + */ + iip = iq->i_itemp; + if ((iq->i_update_core != 0)|| + ((iip != NULL) && + (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + clcount++; + error = xfs_iflush_int(iq, bp); + if (error) { + xfs_iunlock(iq, + XFS_ILOCK_SHARED); + goto cluster_corrupt_out; + } + } else { + xfs_ifunlock(iq); + } + } else { + xfs_ifunlock(iq); + } + } + xfs_iunlock(iq, XFS_ILOCK_SHARED); + } + } + mutex_spinunlock(&ch->ch_lock, s); + + if (clcount) { + XFS_STATS_INC(xfsstats.xs_icluster_flushcnt); + XFS_STATS_ADD(xfsstats.xs_icluster_flushinode, clcount); + } + + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for too long. + */ + if (XFS_BUF_ISPINNED(bp)){ + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + if (flags & INT_DELWRI) { + xfs_bdwrite(mp, bp); + } else if (flags & INT_ASYNC) { + xfs_bawrite(mp, bp); + } else { + error = xfs_bwrite(mp, bp); + } + return error; + +corrupt_out: + xfs_buf_relse(bp); + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + xfs_iflush_abort(ip); + /* + * Unlocks the flush lock + */ + return XFS_ERROR(EFSCORRUPTED); + +cluster_corrupt_out: + /* Corruption detected in the clustering loop. Invalidate the + * inode buffer and shut down the filesystem. + */ + mutex_spinunlock(&ch->ch_lock, s); + + /* + * Clean up the buffer. If it was B_DELWRI, just release it -- + * brelse can handle it with no problems. If not, shut down the + * filesystem before releasing the buffer. + */ + if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) { + xfs_buf_relse(bp); + } + + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + + if(!bufwasdelwri) { + /* + * Just like incore_relse: if we have b_iodone functions, + * mark the buffer as an error and call them. Otherwise + * mark it as stale and brelse. + */ + if (XFS_BUF_IODONE_FUNC(bp)) { + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + XFS_BUF_SHUT(bp); + XFS_BUF_ERROR(bp,EIO); + xfs_biodone(bp); + } else { + XFS_BUF_STALE(bp); + xfs_buf_relse(bp); + } + } + + xfs_iflush_abort(iq); + /* + * Unlocks the flush lock + */ + return XFS_ERROR(EFSCORRUPTED); +} + + +STATIC int +xfs_iflush_int( + xfs_inode_t *ip, + xfs_buf_t *bp) +{ + xfs_inode_log_item_t *iip; + xfs_dinode_t *dip; + xfs_mount_t *mp; +#ifdef XFS_TRANS_DEBUG + int first; +#endif + SPLDECL(s); + + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(valusema(&ip->i_flock) <= 0); + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ip->i_d.di_nextents > ip->i_df.if_ext_max); + + iip = ip->i_itemp; + mp = ip->i_mount; + + + /* + * If the inode isn't dirty, then just release the inode + * flush lock and do nothing. + */ + if ((ip->i_update_core == 0) && + ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { + xfs_ifunlock(ip); + return 0; + } + + /* set *dip = inode's place in the buffer */ + dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); + + /* + * Clear i_update_core before copying out the data. + * This is for coordination with our timestamp updates + * that don't hold the inode lock. They will always + * update the timestamps BEFORE setting i_update_core, + * so if we clear i_update_core after they set it we + * are guaranteed to see their updates to the timestamps. + * I believe that this depends on strongly ordered memory + * semantics, but we have that. We use the SYNCHRONIZE + * macro to make sure that the compiler does not reorder + * the i_update_core access below the data copy below. + */ + ip->i_update_core = 0; + SYNCHRONIZE(); + + if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC, + mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", + ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip); + goto corrupt_out; + } + if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, + mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", + ip->i_ino, ip, ip->i_d.di_magic); + goto corrupt_out; + } + if ((ip->i_d.di_mode & IFMT) == IFREG) { + if (XFS_TEST_ERROR( + (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && + (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), + mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", + ip->i_ino, ip); + goto corrupt_out; + } + } else if ((ip->i_d.di_mode & IFMT) == IFDIR) { + if (XFS_TEST_ERROR( + (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && + (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && + (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), + mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", + ip->i_ino, ip); + goto corrupt_out; + } + } + if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > + ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, + XFS_RANDOM_IFLUSH_5)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", + ip->i_ino, + ip->i_d.di_nextents + ip->i_d.di_anextents, + ip->i_d.di_nblocks, + ip); + goto corrupt_out; + } + if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, + mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { + xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, + "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", + ip->i_ino, ip->i_d.di_forkoff, ip); + goto corrupt_out; + } + /* + * Copy the dirty parts of the inode into the on-disk + * inode. We always copy out the core of the inode, + * because if the inode is dirty at all the core must + * be. + */ + xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), + -1, ARCH_CONVERT); + + /* + * If this is really an old format inode and the superblock version + * has not been updated to support only new format inodes, then + * convert back to the old inode format. If the superblock version + * has been updated, then make the conversion permanent. + */ + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || + XFS_SB_VERSION_HASNLINK(&mp->m_sb)); + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + /* + * Convert it back. + */ + ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); + INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink); + } else { + /* + * The superblock version has already been bumped, + * so just make the conversion to the new inode + * format permanent. + */ + ip->i_d.di_version = XFS_DINODE_VERSION_2; + INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2); + ip->i_d.di_onlink = 0; + INT_ZERO(dip->di_core.di_onlink, ARCH_CONVERT); + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + memset(&(dip->di_core.di_pad[0]), 0, + sizeof(dip->di_core.di_pad)); + ASSERT(ip->i_d.di_projid == 0); + } + } + + if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) { + goto corrupt_out; + } + + if (XFS_IFORK_Q(ip)) { + /* + * The only error from xfs_iflush_fork is on the data fork. + */ + (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); + } + xfs_inobp_check(mp, bp); + + /* + * We've recorded everything logged in the inode, so we'd + * like to clear the ilf_fields bits so we don't log and + * flush things unnecessarily. However, we can't stop + * logging all this information until the data we've copied + * into the disk buffer is written to disk. If we did we might + * overwrite the copy of the inode in the log with all the + * data after re-logging only part of it, and in the face of + * a crash we wouldn't have all the data we need to recover. + * + * What we do is move the bits to the ili_last_fields field. + * When logging the inode, these bits are moved back to the + * ilf_fields field. In the xfs_iflush_done() routine we + * clear ili_last_fields, since we know that the information + * those bits represent is permanently on disk. As long as + * the flush completes before the inode is logged again, then + * both ilf_fields and ili_last_fields will be cleared. + * + * We can play with the ilf_fields bits here, because the inode + * lock must be held exclusively in order to set bits there + * and the flush lock protects the ili_last_fields bits. + * Set ili_logged so the flush done + * routine can tell whether or not to look in the AIL. + * Also, store the current LSN of the inode so that we can tell + * whether the item has moved in the AIL from xfs_iflush_done(). + * In order to read the lsn we need the AIL lock, because + * it is a 64 bit value that cannot be read atomically. + */ + if (iip != NULL && iip->ili_format.ilf_fields != 0) { + iip->ili_last_fields = iip->ili_format.ilf_fields; + iip->ili_format.ilf_fields = 0; + iip->ili_logged = 1; + + ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ + AIL_LOCK(mp,s); + iip->ili_flush_lsn = iip->ili_item.li_lsn; + AIL_UNLOCK(mp, s); + + /* + * Attach the function xfs_iflush_done to the inode's + * buffer. This will remove the inode from the AIL + * and unlock the inode's flush lock when the inode is + * completely written to disk. + */ + xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) + xfs_iflush_done, (xfs_log_item_t *)iip); + + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); + } else { + /* + * We're flushing an inode which is not in the AIL and has + * not been logged but has i_update_core set. For this + * case we can use a B_DELWRI flush and immediately drop + * the inode flush lock because we can avoid the whole + * AIL state thing. It's OK to drop the flush lock now, + * because we've already locked the buffer and to do anything + * you really need both. + */ + if (iip != NULL) { + ASSERT(iip->ili_logged == 0); + ASSERT(iip->ili_last_fields == 0); + ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); + } + xfs_ifunlock(ip); + } + + return 0; + +corrupt_out: + return XFS_ERROR(EFSCORRUPTED); +} + +/* + * Flush all inactive inodes in mp. Return true if no user references + * were found, false otherwise. + */ +int +xfs_iflush_all( + xfs_mount_t *mp, + int flag) +{ + int busy; + int done; + int purged; + xfs_inode_t *ip; + vmap_t vmap; + vnode_t *vp; + + busy = done = 0; + while (!done) { + purged = 0; + XFS_MOUNT_ILOCK(mp); + ip = mp->m_inodes; + if (ip == NULL) { + break; + } + do { + /* Make sure we skip markers inserted by sync */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + + /* + * It's up to our caller to purge the root + * and quota vnodes later. + */ + vp = XFS_ITOV_NULL(ip); + + if (!vp) { + XFS_MOUNT_IUNLOCK(mp); + xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); + purged = 1; + break; + } + + if (vn_count(vp) != 0) { + if (vn_count(vp) == 1 && + (ip == mp->m_rootip || + (mp->m_quotainfo && + (ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino)))) { + + ip = ip->i_mnext; + continue; + } + if (!(flag & XFS_FLUSH_ALL)) { + ASSERT(0); + busy = 1; + done = 1; + break; + } + /* + * Ignore busy inodes but continue flushing + * others. + */ + ip = ip->i_mnext; + continue; + } + /* + * Sample vp mapping while holding mp locked on MP + * systems, so we don't purge a reclaimed or + * nonexistent vnode. We break from the loop + * since we know that we modify + * it by pulling ourselves from it in xfs_reclaim() + * called via vn_purge() below. Set ip to the next + * entry in the list anyway so we'll know below + * whether we reached the end or not. + */ + VMAP(vp, vmap); + XFS_MOUNT_IUNLOCK(mp); + + vn_purge(vp, &vmap); + + purged = 1; + break; + } while (ip != mp->m_inodes); + /* + * We need to distinguish between when we exit the loop + * after a purge and when we simply hit the end of the + * list. We can't use the (ip == mp->m_inodes) test, + * because when we purge an inode at the start of the list + * the next inode on the list becomes mp->m_inodes. That + * would cause such a test to bail out early. The purged + * variable tells us how we got out of the loop. + */ + if (!purged) { + done = 1; + } + } + XFS_MOUNT_IUNLOCK(mp); + return !busy; +} + + +/* + * xfs_iaccess: check accessibility of inode for mode. + */ +int +xfs_iaccess( + xfs_inode_t *ip, + mode_t mode, + cred_t *cr) +{ + int error; + mode_t orgmode = mode; + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + + /* + * Verify that the MAC policy allows the requested access. + */ + if ((error = _MAC_XFS_IACCESS(ip, mode, cr))) + return XFS_ERROR(error); + + if (mode & IWRITE) { + umode_t imode = inode->i_mode; + + if (IS_RDONLY(inode) && + (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) + return XFS_ERROR(EROFS); + } + + /* + * If there's an Access Control List it's used instead of + * the mode bits. + */ + if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) + return error ? XFS_ERROR(error) : 0; + + if (current->fsuid != ip->i_d.di_uid) { + mode >>= 3; + if (!in_group_p((gid_t)ip->i_d.di_gid)) + mode >>= 3; + } + + /* + * If the DACs are ok we don't need any capability check. + */ + if ((ip->i_d.di_mode & mode) == mode) + return 0; + /* + * Read/write DACs are always overridable. + * Executable DACs are overridable if at least one exec bit is set. + */ + if ((orgmode & (IREAD|IWRITE)) || (inode->i_mode & S_IXUGO)) + if (capable_cred(cr, CAP_DAC_OVERRIDE)) + return 0; + + if ((orgmode == IREAD) || + (((ip->i_d.di_mode & IFMT) == IFDIR) && + (!(orgmode & ~(IWRITE|IEXEC))))) { + if (capable_cred(cr, CAP_DAC_READ_SEARCH)) + return 0; +#ifdef NOISE + cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); +#endif /* NOISE */ + return XFS_ERROR(EACCES); + } + return XFS_ERROR(EACCES); +} + +/* + * Return whether or not it is OK to swap to the given file in the + * given range. Return 0 for OK and otherwise return the error. + * + * It is only OK to swap to a file if it has no holes, and all + * extents have been initialized. + * + * We use the vnode behavior chain prevent and allow primitives + * to ensure that the vnode chain stays coherent while we do this. + * This allows us to walk the chain down to the bottom where XFS + * lives without worrying about it changing out from under us. + */ +int +xfs_swappable( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + + ip = XFS_BHVTOI(bdp); + /* + * Verify that the file does not have any + * holes or unwritten exents. + */ + return xfs_bmap_check_swappable(ip); +} + +/* + * xfs_iroundup: round up argument to next power of two + */ +uint +xfs_iroundup( + uint v) +{ + int i; + uint m; + + if ((v & (v - 1)) == 0) + return v; + ASSERT((v & 0x80000000) == 0); + if ((v & (v + 1)) == 0) + return v + 1; + for (i = 0, m = 1; i < 31; i++, m <<= 1) { + if (v & m) + continue; + v |= m; + if ((v & (v + 1)) == 0) + return v + 1; + } + ASSERT(0); + return( 0 ); +} + +/* + * Change the requested timestamp in the given inode. + * We don't lock across timestamp updates, and we don't log them but + * we do record the fact that there is dirty information in core. + * + * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG + * with XFS_ICHGTIME_ACC to be sure that access time + * update will take. Calling first with XFS_ICHGTIME_ACC + * and then XFS_ICHGTIME_MOD may fail to modify the access + * timestamp if the filesystem is mounted noacctm. + */ +void +xfs_ichgtime(xfs_inode_t *ip, + int flags) +{ + timespec_t tv; + vnode_t *vp = XFS_ITOV(ip); + struct inode *inode = LINVFS_GET_IP(vp); + + /* + * We're not supposed to change timestamps in readonly-mounted + * filesystems. Throw it away if anyone asks us. + */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return; + + /* + * Don't update access timestamps on reads if mounted "noatime" + * Throw it away if anyone asks us. + */ + if (ip->i_mount->m_flags & XFS_MOUNT_NOATIME && + ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) + == XFS_ICHGTIME_ACC)) + return; + + nanotime(&tv); + if (flags & XFS_ICHGTIME_MOD) { + inode->i_mtime = ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; + } + if (flags & XFS_ICHGTIME_ACC) { + inode->i_atime = ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; + } + if (flags & XFS_ICHGTIME_CHG) { + inode->i_ctime = ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; + ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; + } + + /* + * We update the i_update_core field _after_ changing + * the timestamps in order to coordinate properly with + * xfs_iflush() so that we don't lose timestamp updates. + * This keeps us from having to hold the inode lock + * while doing this. We use the SYNCHRONIZE macro to + * ensure that the compiler does not reorder the update + * of i_update_core above the timestamp updates above. + */ + SYNCHRONIZE(); + ip->i_update_core = 1; + if (!(inode->i_state & I_LOCK)) + mark_inode_dirty(inode); +} + +#ifdef XFS_ILOCK_TRACE +void +xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) +{ + ktrace_enter(ip->i_lock_trace, + (void *)ip, + (void *)(__psint_t)lock, /* 1 = LOCK, 3=UNLOCK, etc */ + (void *)(__psint_t)lockflags, /* XFS_ILOCK_EXCL etc */ + (void *)ra, /* caller of ilock */ + (void *)(__psint_t)cpuid(), + (void *)(__psint_t)current_pid(), + 0,0,0,0,0,0,0,0,0,0); + +} +#endif /* ILOCK_TRACE */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_inode.h linux.22-ac2/fs/xfs/xfs_inode.h --- linux.vanilla/fs/xfs/xfs_inode.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_inode.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,556 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INODE_H__ +#define __XFS_INODE_H__ + +/* + * File incore extent information, present for each of data & attr forks. + */ +#define XFS_INLINE_EXTS 2 +#define XFS_INLINE_DATA 32 +typedef struct xfs_ifork { + int if_bytes; /* bytes in if_u1 */ + int if_real_bytes; /* bytes allocated in if_u1 */ + xfs_bmbt_block_t *if_broot; /* file's incore btree root */ + short if_broot_bytes; /* bytes allocated for root */ + unsigned char if_flags; /* per-fork flags */ + unsigned char if_ext_max; /* max # of extent records */ + xfs_extnum_t if_lastex; /* last if_extents used */ + union { + xfs_bmbt_rec_t *if_extents; /* linear map file exts */ + char *if_data; /* inline file data */ + } if_u1; + union { + xfs_bmbt_rec_t if_inline_ext[XFS_INLINE_EXTS]; + /* very small file extents */ + char if_inline_data[XFS_INLINE_DATA]; + /* very small file data */ + xfs_dev_t if_rdev; /* dev number if special */ + uuid_t if_uuid; /* mount point value */ + } if_u2; +} xfs_ifork_t; + +/* + * Flags for xfs_ichgtime(). + */ +#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ +#define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */ +#define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */ + +/* + * Per-fork incore inode flags. + */ +#define XFS_IFINLINE 0x0001 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ + +/* + * Flags for xfs_imap() and xfs_dilocate(). + */ +#define XFS_IMAP_LOOKUP 0x1 + +/* + * Maximum number of extent pointers in if_u1.if_extents. + */ +#define XFS_MAX_INCORE_EXTENTS 32768 + + +#ifdef __KERNEL__ +struct bhv_desc; +struct cred; +struct ktrace; +struct vnode; +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_bmbt_irec; +struct xfs_bmbt_block; +struct xfs_inode; +struct xfs_inode_log_item; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot; + + +/* + * This structure is used to communicate which extents of a file + * were holes when a write started from xfs_write_file() to + * xfs_strat_read(). This is necessary so that we can know which + * blocks need to be zeroed when they are read in in xfs_strat_read() + * if they weren\'t allocated when the buffer given to xfs_strat_read() + * was mapped. + * + * We keep a list of these attached to the inode. The list is + * protected by the inode lock and the fact that the io lock is + * held exclusively by writers. + */ +typedef struct xfs_gap { + struct xfs_gap *xg_next; + xfs_fileoff_t xg_offset_fsb; + xfs_extlen_t xg_count_fsb; +} xfs_gap_t; + +typedef struct dm_attrs_s { + __uint32_t da_dmevmask; /* DMIG event mask */ + __uint16_t da_dmstate; /* DMIG state info */ + __uint16_t da_pad; /* DMIG extra padding */ +} dm_attrs_t; + +typedef struct xfs_iocore { + void *io_obj; /* pointer to container + * inode or dcxvn structure */ + struct xfs_mount *io_mount; /* fs mount struct ptr */ +#ifdef DEBUG + mrlock_t *io_lock; /* inode IO lock */ + mrlock_t *io_iolock; /* inode IO lock */ +#endif + + /* I/O state */ + xfs_fsize_t io_new_size; /* sz when write completes */ + + /* Miscellaneous state. */ + unsigned int io_flags; /* IO related flags */ + + /* DMAPI state */ + dm_attrs_t io_dmattrs; + +} xfs_iocore_t; + +#define io_dmevmask io_dmattrs.da_dmevmask +#define io_dmstate io_dmattrs.da_dmstate + +#define XFS_IO_INODE(io) ((xfs_inode_t *) ((io)->io_obj)) +#define XFS_IO_DCXVN(io) ((dcxvn_t *) ((io)->io_obj)) + +/* + * Flags in the flags field + */ + +#define XFS_IOCORE_RT 0x1 + +/* + * xfs_iocore prototypes + */ + +extern void xfs_iocore_inode_init(struct xfs_inode *); +extern void xfs_iocore_inode_reinit(struct xfs_inode *); + + +/* + * This is the type used in the xfs inode hash table. + * An array of these is allocated for each mounted + * file system to hash the inodes for that file system. + */ +typedef struct xfs_ihash { + struct xfs_inode *ih_next; + rwlock_t ih_lock; + uint ih_version; +} xfs_ihash_t; + +/* + * Inode hashing and hash bucket locking. + */ +#define XFS_BUCKETS(mp) (37*(mp)->m_sb.sb_agcount-1) +#define XFS_IHASH(mp,ino) ((mp)->m_ihash + (((uint)ino) % (mp)->m_ihsize)) + +/* + * This is the xfs inode cluster hash. This hash is used by xfs_iflush to + * find inodes that share a cluster and can be flushed to disk at the same + * time. + */ + +typedef struct xfs_chashlist { + struct xfs_chashlist *chl_next; + struct xfs_inode *chl_ip; + xfs_daddr_t chl_blkno; /* starting block number of + * the cluster */ +#ifdef DEBUG + struct xfs_buf *chl_buf; /* debug: the inode buffer */ +#endif +} xfs_chashlist_t; + +typedef struct xfs_chash { + xfs_chashlist_t *ch_list; + lock_t ch_lock; +} xfs_chash_t; + + +/* + * This is the xfs in-core inode structure. + * Most of the on-disk inode is embedded in the i_d field. + * + * The extent pointers/inline file space, however, are managed + * separately. The memory for this information is pointed to by + * the if_u1 unions depending on the type of the data. + * This is used to linearize the array of extents for fast in-core + * access. This is used until the file's number of extents + * surpasses XFS_MAX_INCORE_EXTENTS, at which point all extent pointers + * are accessed through the buffer cache. + * + * Other state kept in the in-core inode is used for identification, + * locking, transactional updating, etc of the inode. + * + * Generally, we do not want to hold the i_rlock while holding the + * i_ilock. Hierarchy is i_iolock followed by i_rlock. + * + * xfs_iptr_t contains all the inode fields upto and including the + * i_mnext and i_mprev fields, it is used as a marker in the inode + * chain off the mount structure by xfs_sync calls. + */ + +typedef struct { + struct xfs_ihash *ip_hash; /* pointer to hash header */ + struct xfs_inode *ip_next; /* inode hash link forw */ + struct xfs_inode *ip_mnext; /* next inode in mount list */ + struct xfs_inode *ip_mprev; /* ptr to prev inode */ + struct xfs_inode **ip_prevp; /* ptr to prev i_next */ + struct xfs_mount *ip_mount; /* fs mount struct ptr */ +} xfs_iptr_t; + +typedef struct xfs_inode { + /* Inode linking and identification information. */ + struct xfs_ihash *i_hash; /* pointer to hash header */ + struct xfs_inode *i_next; /* inode hash link forw */ + struct xfs_inode *i_mnext; /* next inode in mount list */ + struct xfs_inode *i_mprev; /* ptr to prev inode */ + struct xfs_inode **i_prevp; /* ptr to prev i_next */ + struct xfs_mount *i_mount; /* fs mount struct ptr */ + struct list_head i_reclaim; /* reclaim list */ + struct bhv_desc i_bhv_desc; /* inode behavior descriptor*/ + struct xfs_dquot *i_udquot; /* user dquot */ + struct xfs_dquot *i_gdquot; /* group dquot */ + + /* Inode location stuff */ + xfs_ino_t i_ino; /* inode number (agno/agino)*/ + xfs_daddr_t i_blkno; /* blkno of inode buffer */ + ushort i_len; /* len of inode buffer */ + ushort i_boffset; /* off of inode in buffer */ + + /* Extent information. */ + xfs_ifork_t *i_afp; /* attribute fork pointer */ + xfs_ifork_t i_df; /* data fork */ + + /* Transaction and locking information. */ + struct xfs_trans *i_transp; /* ptr to owning transaction*/ + struct xfs_inode_log_item *i_itemp; /* logging information */ + mrlock_t i_lock; /* inode lock */ + mrlock_t i_iolock; /* inode IO lock */ + sema_t i_flock; /* inode flush lock */ + atomic_t i_pincount; /* inode pin count */ + wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ + struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ + struct xfs_inode *i_release; /* inode to unref */ + + /* I/O state */ + xfs_iocore_t i_iocore; /* I/O core */ + + /* Miscellaneous state. */ + unsigned short i_flags; /* see defined flags below */ + unsigned char i_update_core; /* timestamps/size is dirty */ + unsigned char i_update_size; /* di_size field is dirty */ + unsigned int i_gen; /* generation count */ + unsigned int i_delayed_blks; /* count of delay alloc blks */ + + xfs_dinode_core_t i_d; /* most of ondisk inode */ + xfs_chashlist_t *i_chash; /* cluster hash list header */ + struct xfs_inode *i_cnext; /* cluster hash link forward */ + struct xfs_inode *i_cprev; /* cluster hash link backward */ + +#ifdef DEBUG + /* Trace buffers per inode. */ + struct ktrace *i_xtrace; /* inode extent list trace */ + struct ktrace *i_btrace; /* inode bmap btree trace */ + struct ktrace *i_rwtrace; /* inode read/write trace */ + struct ktrace *i_strat_trace; /* inode strat_write trace */ + struct ktrace *i_lock_trace; /* inode lock/unlock trace */ + struct ktrace *i_dir_trace; /* inode directory trace */ +#endif /* DEBUG */ +} xfs_inode_t; + +#endif /* __KERNEL__ */ + + +/* + * Fork handling. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_PTR) +xfs_ifork_t *xfs_ifork_ptr(xfs_inode_t *ip, int w); +#define XFS_IFORK_PTR(ip,w) xfs_ifork_ptr(ip,w) +#else +#define XFS_IFORK_PTR(ip,w) ((w) == XFS_DATA_FORK ? &(ip)->i_df : (ip)->i_afp) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_Q) +int xfs_ifork_q(xfs_inode_t *ip); +#define XFS_IFORK_Q(ip) xfs_ifork_q(ip) +#else +#define XFS_IFORK_Q(ip) XFS_CFORK_Q(&(ip)->i_d) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_DSIZE) +int xfs_ifork_dsize(xfs_inode_t *ip); +#define XFS_IFORK_DSIZE(ip) xfs_ifork_dsize(ip) +#else +#define XFS_IFORK_DSIZE(ip) XFS_CFORK_DSIZE(&ip->i_d, ip->i_mount) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_ASIZE) +int xfs_ifork_asize(xfs_inode_t *ip); +#define XFS_IFORK_ASIZE(ip) xfs_ifork_asize(ip) +#else +#define XFS_IFORK_ASIZE(ip) XFS_CFORK_ASIZE(&ip->i_d, ip->i_mount) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_SIZE) +int xfs_ifork_size(xfs_inode_t *ip, int w); +#define XFS_IFORK_SIZE(ip,w) xfs_ifork_size(ip,w) +#else +#define XFS_IFORK_SIZE(ip,w) XFS_CFORK_SIZE(&ip->i_d, ip->i_mount, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FORMAT) +int xfs_ifork_format(xfs_inode_t *ip, int w); +#define XFS_IFORK_FORMAT(ip,w) xfs_ifork_format(ip,w) +#else +#define XFS_IFORK_FORMAT(ip,w) XFS_CFORK_FORMAT(&ip->i_d, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_FMT_SET) +void xfs_ifork_fmt_set(xfs_inode_t *ip, int w, int n); +#define XFS_IFORK_FMT_SET(ip,w,n) xfs_ifork_fmt_set(ip,w,n) +#else +#define XFS_IFORK_FMT_SET(ip,w,n) XFS_CFORK_FMT_SET(&ip->i_d, w, n) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXTENTS) +int xfs_ifork_nextents(xfs_inode_t *ip, int w); +#define XFS_IFORK_NEXTENTS(ip,w) xfs_ifork_nextents(ip,w) +#else +#define XFS_IFORK_NEXTENTS(ip,w) XFS_CFORK_NEXTENTS(&ip->i_d, w) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_IFORK_NEXT_SET) +void xfs_ifork_next_set(xfs_inode_t *ip, int w, int n); +#define XFS_IFORK_NEXT_SET(ip,w,n) xfs_ifork_next_set(ip,w,n) +#else +#define XFS_IFORK_NEXT_SET(ip,w,n) XFS_CFORK_NEXT_SET(&ip->i_d, w, n) +#endif + + +#ifdef __KERNEL__ + +/* + * In-core inode flags. + */ +#define XFS_IGRIO 0x0001 /* inode used for guaranteed rate i/o */ +#define XFS_IUIOSZ 0x0002 /* inode i/o sizes have been explicitly set */ +#define XFS_IQUIESCE 0x0004 /* we have started quiescing for this inode */ +#define XFS_IRECLAIM 0x0008 /* we have started reclaiming this inode */ + +/* + * Flags for inode locking. + */ +#define XFS_IOLOCK_EXCL 0x001 +#define XFS_IOLOCK_SHARED 0x002 +#define XFS_ILOCK_EXCL 0x004 +#define XFS_ILOCK_SHARED 0x008 +#define XFS_IUNLOCK_NONOTIFY 0x010 +#define XFS_EXTENT_TOKEN_RD 0x040 +#define XFS_SIZE_TOKEN_RD 0x080 +#define XFS_EXTSIZE_RD (XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD) +#define XFS_WILLLEND 0x100 /* Always acquire tokens for lending */ +#define XFS_EXTENT_TOKEN_WR (XFS_EXTENT_TOKEN_RD | XFS_WILLLEND) +#define XFS_SIZE_TOKEN_WR (XFS_SIZE_TOKEN_RD | XFS_WILLLEND) +#define XFS_EXTSIZE_WR (XFS_EXTSIZE_RD | XFS_WILLLEND) + + +#define XFS_LOCK_MASK \ + (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL | \ + XFS_ILOCK_SHARED | XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD | \ + XFS_WILLLEND) + +/* + * Flags for xfs_iflush() + */ +#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1 +#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2 +#define XFS_IFLUSH_SYNC 3 +#define XFS_IFLUSH_ASYNC 4 +#define XFS_IFLUSH_DELWRI 5 + +/* + * Flags for xfs_iflush_all. + */ +#define XFS_FLUSH_ALL 0x1 + +/* + * Flags for xfs_itruncate_start(). + */ +#define XFS_ITRUNC_DEFINITE 0x1 +#define XFS_ITRUNC_MAYBE 0x2 + +/* + * max file offset is 2^(31+PAGE_SHIFT) - 1 (due to linux page cache) + * + * NOTE: XFS itself can handle 2^63 - 1 (largest positive value of xfs_fsize_t) + * but this is the Linux limit. + */ +#define XFS_MAX_FILE_OFFSET MAX_LFS_FILESIZE + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOV) +struct vnode *xfs_itov(xfs_inode_t *ip); +#define XFS_ITOV(ip) xfs_itov(ip) +#else +#define XFS_ITOV(ip) BHV_TO_VNODE(XFS_ITOBHV(ip)) +#endif +#define XFS_ITOV_NULL(ip) BHV_TO_VNODE_NULL(XFS_ITOBHV(ip)) +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOBHV) +struct bhv_desc *xfs_itobhv(xfs_inode_t *ip); +#define XFS_ITOBHV(ip) xfs_itobhv(ip) +#else +#define XFS_ITOBHV(ip) ((struct bhv_desc *)(&((ip)->i_bhv_desc))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOI) +xfs_inode_t *xfs_bhvtoi(struct bhv_desc *bhvp); +#define XFS_BHVTOI(bhvp) xfs_bhvtoi(bhvp) +#else +#define XFS_BHVTOI(bhvp) \ + ((xfs_inode_t *)((char *)(bhvp) - \ + (char *)&(((xfs_inode_t *)0)->i_bhv_desc))) +#endif + +#define BHV_IS_XFS(bdp) (BHV_OPS(bdp) == &xfs_vnodeops) + +/* + * Pick the inode cluster hash bucket + * (m_chash is the same size as m_ihash) + */ +#define XFS_CHASH(mp,blk) ((mp)->m_chash + (((uint)blk) % (mp)->m_chsize)) + +/* + * For multiple groups support: if ISGID bit is set in the parent + * directory, group of new file is set to that of the parent, and + * new subdirectory gets ISGID bit from parent. + */ +#define XFS_INHERIT_GID(pip, vfsp) ((pip) != NULL && \ + (((vfsp)->vfs_flag & VFS_GRPID) || ((pip)->i_d.di_mode & ISGID))) + +/* + * xfs_iget.c prototypes. + */ +void xfs_ihash_init(struct xfs_mount *); +void xfs_ihash_free(struct xfs_mount *); +void xfs_chash_init(struct xfs_mount *); +void xfs_chash_free(struct xfs_mount *); +xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, + struct xfs_trans *); +void xfs_inode_lock_init(xfs_inode_t *, struct vnode *); +int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + uint, xfs_inode_t **, xfs_daddr_t); +void xfs_iput(xfs_inode_t *, uint); +void xfs_iput_new(xfs_inode_t *, uint); +void xfs_ilock(xfs_inode_t *, uint); +int xfs_ilock_nowait(xfs_inode_t *, uint); +void xfs_iunlock(xfs_inode_t *, uint); +void xfs_ilock_demote(xfs_inode_t *, uint); +void xfs_iflock(xfs_inode_t *); +int xfs_iflock_nowait(xfs_inode_t *); +uint xfs_ilock_map_shared(xfs_inode_t *); +void xfs_iunlock_map_shared(xfs_inode_t *, uint); +void xfs_ifunlock(xfs_inode_t *); +void xfs_ireclaim(xfs_inode_t *); +int xfs_finish_reclaim(xfs_inode_t *, int, int); +int xfs_finish_reclaim_all(struct xfs_mount *, int); + +/* + * xfs_inode.c prototypes. + */ +int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_dinode_t **, struct xfs_buf **, int *); +int xfs_itobp(struct xfs_mount *, struct xfs_trans *, + xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, + xfs_daddr_t); +int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, + xfs_inode_t **, xfs_daddr_t); +int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); +int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, nlink_t, + xfs_dev_t, struct cred *, xfs_prid_t, int, + struct xfs_buf **, boolean_t *, xfs_inode_t **); +void xfs_xlate_dinode_core(xfs_caddr_t, struct xfs_dinode_core *, int, + xfs_arch_t); +int xfs_ifree(struct xfs_trans *, xfs_inode_t *); +int xfs_atruncate_start(xfs_inode_t *); +void xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); +int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, + xfs_fsize_t, int, int); +int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); +int xfs_igrow_start(xfs_inode_t *, xfs_fsize_t, struct cred *); +void xfs_igrow_finish(struct xfs_trans *, xfs_inode_t *, + xfs_fsize_t, int); + +void xfs_idestroy_fork(xfs_inode_t *, int); +void xfs_idestroy(xfs_inode_t *); +void xfs_idata_realloc(xfs_inode_t *, int, int); +void xfs_iextract(xfs_inode_t *); +void xfs_iext_realloc(xfs_inode_t *, int, int); +void xfs_iroot_realloc(xfs_inode_t *, int, int); +void xfs_ipin(xfs_inode_t *); +void xfs_iunpin(xfs_inode_t *); +int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); +int xfs_iflush(xfs_inode_t *, uint); +int xfs_iflush_all(struct xfs_mount *, int); +int xfs_iaccess(xfs_inode_t *, mode_t, cred_t *); +uint xfs_iroundup(uint); +void xfs_ichgtime(xfs_inode_t *, int); +xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); +void xfs_lock_inodes(xfs_inode_t **, int, int, uint); + +#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) + +#ifdef DEBUG +void xfs_isize_check(struct xfs_mount *, xfs_inode_t *, xfs_fsize_t); +#else /* DEBUG */ +#define xfs_isize_check(mp, ip, isize) +#endif /* DEBUG */ + +#if defined(DEBUG) +void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); +#else +#define xfs_inobp_check(mp, bp) +#endif /* DEBUG */ + +extern struct kmem_zone *xfs_chashlist_zone; +extern struct kmem_zone *xfs_ifork_zone; +extern struct kmem_zone *xfs_inode_zone; +extern struct kmem_zone *xfs_ili_zone; +extern struct vnodeops xfs_vnodeops; + +#ifdef XFS_ILOCK_TRACE +#define XFS_ILOCK_KTRACE_SIZE 32 +void xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, + inst_t *ra); +#endif + +#endif /* __KERNEL__ */ + +#endif /* __XFS_INODE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_inode_item.c linux.22-ac2/fs/xfs/xfs_inode_item.c --- linux.vanilla/fs/xfs/xfs_inode_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_inode_item.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,1076 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * This file contains the implementation of the xfs_inode_log_item. + * It contains the item operations used to manipulate the inode log + * items as well as utility routines used by the inode specific + * transaction routines. + */ +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_rw.h" + + +kmem_zone_t *xfs_ili_zone; /* inode log item zone */ + +/* + * This returns the number of iovecs needed to log the given inode item. + * + * We need one iovec for the inode log format structure, one for the + * inode core, and possibly one for the inode data/extents/b-tree root + * and one for the inode attribute data/extents/b-tree root. + */ +STATIC uint +xfs_inode_item_size( + xfs_inode_log_item_t *iip) +{ + uint nvecs; + xfs_inode_t *ip; + + ip = iip->ili_inode; + nvecs = 2; + + /* + * Only log the data/extents/b-tree root if there is something + * left to log. + */ + iip->ili_format.ilf_fields |= XFS_ILOG_CORE; + + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) && + (ip->i_d.di_nextents > 0) && + (ip->i_df.if_bytes > 0)) { + ASSERT(ip->i_df.if_u1.if_extents != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t)); + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) && + (ip->i_df.if_broot_bytes > 0)) { + ASSERT(ip->i_df.if_broot != NULL); + nvecs++; + } else { + ASSERT(!(iip->ili_format.ilf_fields & + XFS_ILOG_DBROOT)); +#ifdef XFS_TRANS_DEBUG + if (iip->ili_root_size > 0) { + ASSERT(iip->ili_root_size == + ip->i_df.if_broot_bytes); + ASSERT(memcmp(iip->ili_orig_root, + ip->i_df.if_broot, + iip->ili_root_size) == 0); + } else { + ASSERT(ip->i_df.if_broot_bytes == 0); + } +#endif + iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT; + } + break; + + case XFS_DINODE_FMT_LOCAL: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID); + if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) && + (ip->i_df.if_bytes > 0)) { + ASSERT(ip->i_df.if_u1.if_data != NULL); + ASSERT(ip->i_d.di_size > 0); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA; + } + break; + + case XFS_DINODE_FMT_DEV: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_UUID); + break; + + case XFS_DINODE_FMT_UUID: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEXT | XFS_ILOG_DEV); + break; + + default: + ASSERT(0); + break; + } + + /* + * If there are no attributes associated with this file, + * then there cannot be anything more to log. + * Clear all attribute-related log flags. + */ + if (!XFS_IFORK_Q(ip)) { + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); + return nvecs; + } + + /* + * Log any necessary attribute data. + */ + switch (ip->i_d.di_aformat) { + case XFS_DINODE_FMT_EXTENTS: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) && + (ip->i_d.di_anextents > 0) && + (ip->i_afp->if_bytes > 0)) { + ASSERT(ip->i_afp->if_u1.if_extents != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT; + } + break; + + case XFS_DINODE_FMT_BTREE: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) && + (ip->i_afp->if_broot_bytes > 0)) { + ASSERT(ip->i_afp->if_broot != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT; + } + break; + + case XFS_DINODE_FMT_LOCAL: + iip->ili_format.ilf_fields &= + ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); + if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) && + (ip->i_afp->if_bytes > 0)) { + ASSERT(ip->i_afp->if_u1.if_data != NULL); + nvecs++; + } else { + iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA; + } + break; + + default: + ASSERT(0); + break; + } + + return nvecs; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given inode log item. It fills the first item with an inode + * log format structure, the second with the on-disk inode structure, + * and a possible third and/or fourth with the inode data/extents/b-tree + * root and inode attributes data/extents/b-tree root. + */ +STATIC void +xfs_inode_item_format( + xfs_inode_log_item_t *iip, + xfs_log_iovec_t *log_vector) +{ + uint nvecs; + xfs_log_iovec_t *vecp; + xfs_inode_t *ip; + size_t data_bytes; + xfs_bmbt_rec_t *ext_buffer; + int nrecs; + xfs_mount_t *mp; + + ip = iip->ili_inode; + vecp = log_vector; + + vecp->i_addr = (xfs_caddr_t)&iip->ili_format; + vecp->i_len = sizeof(xfs_inode_log_format_t); + vecp++; + nvecs = 1; + + /* + * Clear i_update_core if the timestamps (or any other + * non-transactional modification) need flushing/logging + * and we're about to log them with the rest of the core. + * + * This is the same logic as xfs_iflush() but this code can't + * run at the same time as xfs_iflush because we're in commit + * processing here and so we have the inode lock held in + * exclusive mode. Although it doesn't really matter + * for the timestamps if both routines were to grab the + * timestamps or not. That would be ok. + * + * We clear i_update_core before copying out the data. + * This is for coordination with our timestamp updates + * that don't hold the inode lock. They will always + * update the timestamps BEFORE setting i_update_core, + * so if we clear i_update_core after they set it we + * are guaranteed to see their updates to the timestamps + * either here. Likewise, if they set it after we clear it + * here, we'll see it either on the next commit of this + * inode or the next time the inode gets flushed via + * xfs_iflush(). This depends on strongly ordered memory + * semantics, but we have that. We use the SYNCHRONIZE + * macro to make sure that the compiler does not reorder + * the i_update_core access below the data copy below. + */ + if (ip->i_update_core) { + ip->i_update_core = 0; + SYNCHRONIZE(); + } + + /* + * We don't have to worry about re-ordering here because + * the update_size field is protected by the inode lock + * and we have that held in exclusive mode. + */ + if (ip->i_update_size) + ip->i_update_size = 0; + + vecp->i_addr = (xfs_caddr_t)&ip->i_d; + vecp->i_len = sizeof(xfs_dinode_core_t); + vecp++; + nvecs++; + iip->ili_format.ilf_fields |= XFS_ILOG_CORE; + + /* + * If this is really an old format inode, then we need to + * log it as such. This means that we have to copy the link + * count from the new field to the old. We don't have to worry + * about the new fields, because nothing trusts them as long as + * the old inode version number is there. If the superblock already + * has a new version number, then we don't bother converting back. + */ + mp = ip->i_mount; + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || + XFS_SB_VERSION_HASNLINK(&mp->m_sb)); + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + /* + * Convert it back. + */ + ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); + ip->i_d.di_onlink = ip->i_d.di_nlink; + } else { + /* + * The superblock version has already been bumped, + * so just make the conversion to the new inode + * format permanent. + */ + ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_onlink = 0; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + } + } + + switch (ip->i_d.di_format) { + case XFS_DINODE_FMT_EXTENTS: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DDATA | XFS_ILOG_DBROOT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) { + ASSERT(ip->i_df.if_bytes > 0); + ASSERT(ip->i_df.if_u1.if_extents != NULL); + ASSERT(ip->i_d.di_nextents > 0); + ASSERT(iip->ili_extents_buf == NULL); + nrecs = ip->i_df.if_bytes / + (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nrecs > 0); +#if ARCH_CONVERT == ARCH_NOCONVERT + if (nrecs == ip->i_d.di_nextents) { + /* + * There are no delayed allocation + * extents, so just point to the + * real extents array. + */ + vecp->i_addr = + (char *)(ip->i_df.if_u1.if_extents); + vecp->i_len = ip->i_df.if_bytes; + } else +#endif + { + /* + * There are delayed allocation extents + * in the inode, or we need to convert + * the extents to on disk format. + * Use xfs_iextents_copy() + * to copy only the real extents into + * a separate buffer. We'll free the + * buffer in the unlock routine. + */ + ext_buffer = kmem_alloc(ip->i_df.if_bytes, + KM_SLEEP); + iip->ili_extents_buf = ext_buffer; + vecp->i_addr = (xfs_caddr_t)ext_buffer; + vecp->i_len = xfs_iextents_copy(ip, ext_buffer, + XFS_DATA_FORK); + } + ASSERT(vecp->i_len <= ip->i_df.if_bytes); + iip->ili_format.ilf_dsize = vecp->i_len; + vecp++; + nvecs++; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DDATA | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { + ASSERT(ip->i_df.if_broot_bytes > 0); + ASSERT(ip->i_df.if_broot != NULL); + vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; + vecp->i_len = ip->i_df.if_broot_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; + } + break; + + case XFS_DINODE_FMT_LOCAL: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DEV | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) { + ASSERT(ip->i_df.if_bytes > 0); + ASSERT(ip->i_df.if_u1.if_data != NULL); + ASSERT(ip->i_d.di_size > 0); + + vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data; + /* + * Round i_bytes up to a word boundary. + * The underlying memory is guaranteed to + * to be there by xfs_idata_realloc(). + */ + data_bytes = roundup(ip->i_df.if_bytes, 4); + ASSERT((ip->i_df.if_real_bytes == 0) || + (ip->i_df.if_real_bytes == data_bytes)); + vecp->i_len = (int)data_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_dsize = (unsigned)data_bytes; + } + break; + + case XFS_DINODE_FMT_DEV: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DDATA | XFS_ILOG_UUID))); + if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { + iip->ili_format.ilf_u.ilfu_rdev = + ip->i_df.if_u2.if_rdev; + } + break; + + case XFS_DINODE_FMT_UUID: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | + XFS_ILOG_DDATA | XFS_ILOG_DEV))); + if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { + iip->ili_format.ilf_u.ilfu_uuid = + ip->i_df.if_u2.if_uuid; + } + break; + + default: + ASSERT(0); + break; + } + + /* + * If there are no attributes associated with the file, + * then we're done. + * Assert that no attribute-related log flags are set. + */ + if (!XFS_IFORK_Q(ip)) { + ASSERT(nvecs == iip->ili_item.li_desc->lid_size); + iip->ili_format.ilf_size = nvecs; + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); + return; + } + + switch (ip->i_d.di_aformat) { + case XFS_DINODE_FMT_EXTENTS: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { + ASSERT(ip->i_afp->if_bytes > 0); + ASSERT(ip->i_afp->if_u1.if_extents != NULL); + ASSERT(ip->i_d.di_anextents > 0); +#ifdef DEBUG + nrecs = ip->i_afp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t); +#endif + ASSERT(nrecs > 0); + ASSERT(nrecs == ip->i_d.di_anextents); +#if ARCH_CONVERT == ARCH_NOCONVERT + /* + * There are not delayed allocation extents + * for attributes, so just point at the array. + */ + vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents); + vecp->i_len = ip->i_afp->if_bytes; +#else + ASSERT(iip->ili_aextents_buf == NULL); + /* + * Need to endian flip before logging + */ + ext_buffer = kmem_alloc(ip->i_afp->if_bytes, + KM_SLEEP); + iip->ili_aextents_buf = ext_buffer; + vecp->i_addr = (xfs_caddr_t)ext_buffer; + vecp->i_len = xfs_iextents_copy(ip, ext_buffer, + XFS_ATTR_FORK); +#endif + iip->ili_format.ilf_asize = vecp->i_len; + vecp++; + nvecs++; + } + break; + + case XFS_DINODE_FMT_BTREE: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ADATA | XFS_ILOG_AEXT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { + ASSERT(ip->i_afp->if_broot_bytes > 0); + ASSERT(ip->i_afp->if_broot != NULL); + vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; + vecp->i_len = ip->i_afp->if_broot_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; + } + break; + + case XFS_DINODE_FMT_LOCAL: + ASSERT(!(iip->ili_format.ilf_fields & + (XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); + if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) { + ASSERT(ip->i_afp->if_bytes > 0); + ASSERT(ip->i_afp->if_u1.if_data != NULL); + + vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data; + /* + * Round i_bytes up to a word boundary. + * The underlying memory is guaranteed to + * to be there by xfs_idata_realloc(). + */ + data_bytes = roundup(ip->i_afp->if_bytes, 4); + ASSERT((ip->i_afp->if_real_bytes == 0) || + (ip->i_afp->if_real_bytes == data_bytes)); + vecp->i_len = (int)data_bytes; + vecp++; + nvecs++; + iip->ili_format.ilf_asize = (unsigned)data_bytes; + } + break; + + default: + ASSERT(0); + break; + } + + ASSERT(nvecs == iip->ili_item.li_desc->lid_size); + iip->ili_format.ilf_size = nvecs; +} + + +/* + * This is called to pin the inode associated with the inode log + * item in memory so it cannot be written out. Do this by calling + * xfs_ipin() to bump the pin count in the inode while holding the + * inode pin lock. + */ +STATIC void +xfs_inode_item_pin( + xfs_inode_log_item_t *iip) +{ + ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + xfs_ipin(iip->ili_inode); +} + + +/* + * This is called to unpin the inode associated with the inode log + * item which was previously pinned with a call to xfs_inode_item_pin(). + * Just call xfs_iunpin() on the inode to do this. + */ +/* ARGSUSED */ +STATIC void +xfs_inode_item_unpin( + xfs_inode_log_item_t *iip, + int stale) +{ + xfs_iunpin(iip->ili_inode); +} + +/* ARGSUSED */ +STATIC void +xfs_inode_item_unpin_remove( + xfs_inode_log_item_t *iip, + xfs_trans_t *tp) +{ + xfs_iunpin(iip->ili_inode); +} + +/* + * This is called to attempt to lock the inode associated with this + * inode log item, in preparation for the push routine which does the actual + * iflush. Don't sleep on the inode lock or the flush lock. + * + * If the flush lock is already held, indicating that the inode has + * been or is in the process of being flushed, then (ideally) we'd like to + * see if the inode's buffer is still incore, and if so give it a nudge. + * We delay doing so until the pushbuf routine, though, to avoid holding + * the AIL lock across a call to the blackhole which is the buffercache. + * Also we don't want to sleep in any device strategy routines, which can happen + * if we do the subsequent bawrite in here. + */ +STATIC uint +xfs_inode_item_trylock( + xfs_inode_log_item_t *iip) +{ + register xfs_inode_t *ip; + + ip = iip->ili_inode; + + if (xfs_ipincount(ip) > 0) { + return XFS_ITEM_PINNED; + } + + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + return XFS_ITEM_LOCKED; + } + + if (!xfs_iflock_nowait(ip)) { + /* + * If someone else isn't already trying to push the inode + * buffer, we get to do it. + */ + if (iip->ili_pushbuf_flag == 0) { + iip->ili_pushbuf_flag = 1; +#ifdef DEBUG + iip->ili_push_owner = get_thread_id(); +#endif + /* + * Inode is left locked in shared mode. + * Pushbuf routine gets to unlock it. + */ + return XFS_ITEM_PUSHBUF; + } else { + /* + * We hold the AIL_LOCK, so we must specify the + * NONOTIFY flag so that we won't double trip. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); + return XFS_ITEM_FLUSHING; + } + /* NOTREACHED */ + } +#ifdef DEBUG + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + ASSERT(iip->ili_format.ilf_fields != 0); + ASSERT(iip->ili_logged == 0); + ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL); + } +#endif + return XFS_ITEM_SUCCESS; +} + +/* + * Unlock the inode associated with the inode log item. + * Clear the fields of the inode and inode log item that + * are specific to the current transaction. If the + * hold flags is set, do not unlock the inode. + */ +STATIC void +xfs_inode_item_unlock( + xfs_inode_log_item_t *iip) +{ + uint hold; + uint iolocked; + uint lock_flags; + xfs_inode_t *ip; + + ASSERT(iip != NULL); + ASSERT(iip->ili_inode->i_itemp != NULL); + ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + ASSERT((!(iip->ili_inode->i_itemp->ili_flags & + XFS_ILI_IOLOCKED_EXCL)) || + ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE)); + ASSERT((!(iip->ili_inode->i_itemp->ili_flags & + XFS_ILI_IOLOCKED_SHARED)) || + ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS)); + /* + * Clear the transaction pointer in the inode. + */ + ip = iip->ili_inode; + ip->i_transp = NULL; + + /* + * If the inode needed a separate buffer with which to log + * its extents, then free it now. + */ + if (iip->ili_extents_buf != NULL) { + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); + ASSERT(ip->i_d.di_nextents > 0); + ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT); + ASSERT(ip->i_df.if_bytes > 0); + kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes); + iip->ili_extents_buf = NULL; + } + if (iip->ili_aextents_buf != NULL) { + ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); + ASSERT(ip->i_d.di_anextents > 0); + ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT); + ASSERT(ip->i_afp->if_bytes > 0); + kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes); + iip->ili_aextents_buf = NULL; + } + + /* + * Figure out if we should unlock the inode or not. + */ + hold = iip->ili_flags & XFS_ILI_HOLD; + + /* + * Before clearing out the flags, remember whether we + * are holding the inode's IO lock. + */ + iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY; + + /* + * Clear out the fields of the inode log item particular + * to the current transaction. + */ + iip->ili_ilock_recur = 0; + iip->ili_iolock_recur = 0; + iip->ili_flags = 0; + + /* + * Unlock the inode if XFS_ILI_HOLD was not set. + */ + if (!hold) { + lock_flags = XFS_ILOCK_EXCL; + if (iolocked & XFS_ILI_IOLOCKED_EXCL) { + lock_flags |= XFS_IOLOCK_EXCL; + } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) { + lock_flags |= XFS_IOLOCK_SHARED; + } + xfs_iput(iip->ili_inode, lock_flags); + } +} + +/* + * This is called to find out where the oldest active copy of the + * inode log item in the on disk log resides now that the last log + * write of it completed at the given lsn. Since we always re-log + * all dirty data in an inode, the latest copy in the on disk log + * is the only one that matters. Therefore, simply return the + * given lsn. + */ +/*ARGSUSED*/ +STATIC xfs_lsn_t +xfs_inode_item_committed( + xfs_inode_log_item_t *iip, + xfs_lsn_t lsn) +{ + return (lsn); +} + +/* + * The transaction with the inode locked has aborted. The inode + * must not be dirty within the transaction (unless we're forcibly + * shutting down). We simply unlock just as if the transaction + * had been cancelled. + */ +STATIC void +xfs_inode_item_abort( + xfs_inode_log_item_t *iip) +{ + xfs_inode_item_unlock(iip); + return; +} + + +/* + * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK + * failed to get the inode flush lock but did get the inode locked SHARED. + * Here we're trying to see if the inode buffer is incore, and if so whether it's + * marked delayed write. If that's the case, we'll initiate a bawrite on that + * buffer to expedite the process. + * + * We aren't holding the AIL_LOCK (or the flush lock) when this gets called, + * so it is inherently race-y. + */ +STATIC void +xfs_inode_item_pushbuf( + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + xfs_mount_t *mp; + xfs_buf_t *bp; + uint dopush; + + ip = iip->ili_inode; + + ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + + /* + * The ili_pushbuf_flag keeps others from + * trying to duplicate our effort. + */ + ASSERT(iip->ili_pushbuf_flag != 0); + ASSERT(iip->ili_push_owner == get_thread_id()); + + /* + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. + */ + if ((valusema(&(ip->i_flock)) > 0) || + ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return; + } + + mp = ip->i_mount; + bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, + iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK); + + if (bp != NULL) { + if (XFS_BUF_ISDELAYWRITE(bp)) { + /* + * We were racing with iflush because we don't hold + * the AIL_LOCK or the flush lock. However, at this point, + * we have the buffer, and we know that it's dirty. + * So, it's possible that iflush raced with us, and + * this item is already taken off the AIL. + * If not, we can flush it async. + */ + dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && + (valusema(&(ip->i_flock)) <= 0)); + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + xfs_buftrace("INODE ITEM PUSH", bp); + if (XFS_BUF_ISPINNED(bp)) { + xfs_log_force(mp, (xfs_lsn_t)0, + XFS_LOG_FORCE); + } + if (dopush) { + xfs_bawrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + xfs_buf_relse(bp); + } + return; + } + /* + * We have to be careful about resetting pushbuf flag too early (above). + * Even though in theory we can do it as soon as we have the buflock, + * we don't want others to be doing work needlessly. They'll come to + * this function thinking that pushing the buffer is their + * responsibility only to find that the buffer is still locked by + * another doing the same thing + */ + iip->ili_pushbuf_flag = 0; + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return; +} + + +/* + * This is called to asynchronously write the inode associated with this + * inode log item out to disk. The inode will already have been locked by + * a successful call to xfs_inode_item_trylock(). + */ +STATIC void +xfs_inode_item_push( + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + + ip = iip->ili_inode; + + ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + ASSERT(valusema(&(ip->i_flock)) <= 0); + /* + * Since we were able to lock the inode's flush lock and + * we found it on the AIL, the inode must be dirty. This + * is because the inode is removed from the AIL while still + * holding the flush lock in xfs_iflush_done(). Thus, if + * we found it in the AIL and were able to obtain the flush + * lock without sleeping, then there must not have been + * anyone in the process of flushing the inode. + */ + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || + iip->ili_format.ilf_fields != 0); + + /* + * Write out the inode. The completion routine ('iflush_done') will + * pull it from the AIL, mark it clean, unlock the flush lock. + */ + (void) xfs_iflush(ip, XFS_IFLUSH_DELWRI); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + return; +} + +/* + * XXX rcc - this one really has to do something. Probably needs + * to stamp in a new field in the incore inode. + */ +/* ARGSUSED */ +STATIC void +xfs_inode_item_committing( + xfs_inode_log_item_t *iip, + xfs_lsn_t lsn) +{ + iip->ili_last_lsn = lsn; + return; +} + +/* + * This is the ops vector shared by all buf log items. + */ +struct xfs_item_ops xfs_inode_item_ops = { + .iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size, + .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) + xfs_inode_item_format, + .iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin, + .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_inode_item_unpin, + .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) + xfs_inode_item_unpin_remove, + .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock, + .iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock, + .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_inode_item_committed, + .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push, + .iop_abort = (void(*)(xfs_log_item_t*))xfs_inode_item_abort, + .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf, + .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) + xfs_inode_item_committing +}; + + +/* + * Initialize the inode log item for a newly allocated (in-core) inode. + */ +void +xfs_inode_item_init( + xfs_inode_t *ip, + xfs_mount_t *mp) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_itemp == NULL); + iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); + + iip->ili_item.li_type = XFS_LI_INODE; + iip->ili_item.li_ops = &xfs_inode_item_ops; + iip->ili_item.li_mountp = mp; + iip->ili_inode = ip; + + /* + We have zeroed memory. No need ... + iip->ili_extents_buf = NULL; + iip->ili_pushbuf_flag = 0; + */ + + iip->ili_format.ilf_type = XFS_LI_INODE; + iip->ili_format.ilf_ino = ip->i_ino; + iip->ili_format.ilf_blkno = ip->i_blkno; + iip->ili_format.ilf_len = ip->i_len; + iip->ili_format.ilf_boffset = ip->i_boffset; +} + +/* + * Free the inode log item and any memory hanging off of it. + */ +void +xfs_inode_item_destroy( + xfs_inode_t *ip) +{ +#ifdef XFS_TRANS_DEBUG + if (ip->i_itemp->ili_root_size != 0) { + kmem_free(ip->i_itemp->ili_orig_root, + ip->i_itemp->ili_root_size); + } +#endif + kmem_zone_free(xfs_ili_zone, ip->i_itemp); +} + + +/* + * This is the inode flushing I/O completion routine. It is called + * from interrupt level when the buffer containing the inode is + * flushed to disk. It is responsible for removing the inode item + * from the AIL if it has not been re-logged, and unlocking the inode's + * flush lock. + */ +/*ARGSUSED*/ +void +xfs_iflush_done( + xfs_buf_t *bp, + xfs_inode_log_item_t *iip) +{ + xfs_inode_t *ip; + SPLDECL(s); + + ip = iip->ili_inode; + + /* + * We only want to pull the item from the AIL if it is + * actually there and its location in the log has not + * changed since we started the flush. Thus, we only bother + * if the ili_logged flag is set and the inode's lsn has not + * changed. First we check the lsn outside + * the lock since it's cheaper, and then we recheck while + * holding the lock before removing the inode from the AIL. + */ + if (iip->ili_logged && + (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { + AIL_LOCK(ip->i_mount, s); + if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(ip->i_mount, + (xfs_log_item_t*)iip, s); + } else { + AIL_UNLOCK(ip->i_mount, s); + } + } + + iip->ili_logged = 0; + + /* + * Clear the ili_last_fields bits now that we know that the + * data corresponding to them is safely on disk. + */ + iip->ili_last_fields = 0; + + /* + * Release the inode's flush lock since we're done with it. + */ + xfs_ifunlock(ip); + + return; +} + +/* + * This is the inode flushing abort routine. It is called + * from xfs_iflush when the filesystem is shutting down to clean + * up the inode state. + * It is responsible for removing the inode item + * from the AIL if it has not been re-logged, and unlocking the inode's + * flush lock. + */ +void +xfs_iflush_abort( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + xfs_mount_t *mp; + SPLDECL(s); + + iip = ip->i_itemp; + mp = ip->i_mount; + if (iip) { + if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { + AIL_LOCK(mp, s); + if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { + /* + * xfs_trans_delete_ail() drops the AIL lock. + */ + xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip, + s); + } else + AIL_UNLOCK(mp, s); + } + iip->ili_logged = 0; + /* + * Clear the ili_last_fields bits now that we know that the + * data corresponding to them is safely on disk. + */ + iip->ili_last_fields = 0; + /* + * Clear the inode logging fields so no more flushes are + * attempted. + */ + iip->ili_format.ilf_fields = 0; + } + /* + * Release the inode's flush lock since we're done with it. + */ + xfs_ifunlock(ip); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_inode_item.h linux.22-ac2/fs/xfs/xfs_inode_item.h --- linux.vanilla/fs/xfs/xfs_inode_item.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_inode_item.h 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INODE_ITEM_H__ +#define __XFS_INODE_ITEM_H__ + +/* + * This is the structure used to lay out an inode log item in the + * log. The size of the inline data/extents/b-tree root to be logged + * (if any) is indicated in the ilf_dsize field. Changes to this structure + * must be added on to the end. + * + * Convention for naming inode log item versions : The current version + * is always named XFS_LI_INODE. When an inode log item gets superseded, + * add the latest version of IRIX that will generate logs with that item + * to the version name. + * + * -Version 1 of this structure (XFS_LI_5_3_INODE) included up to the first + * union (ilf_u) field. This was released with IRIX 5.3-XFS. + * -Version 2 of this structure (XFS_LI_6_1_INODE) is currently the entire + * structure. This was released with IRIX 6.0.1-XFS and IRIX 6.1. + * -Version 3 of this structure (XFS_LI_INODE) is the same as version 2 + * so a new structure definition wasn't necessary. However, we had + * to add a new type because the inode cluster size changed from 4K + * to 8K and the version number had to be rev'ved to keep older kernels + * from trying to recover logs with the 8K buffers in them. The logging + * code can handle recovery on different-sized clusters now so hopefully + * this'll be the last time we need to change the inode log item just + * for a change in the inode cluster size. This new version was + * released with IRIX 6.2. + */ +typedef struct xfs_inode_log_format { + unsigned short ilf_type; /* inode log item type */ + unsigned short ilf_size; /* size of this item */ + uint ilf_fields; /* flags for fields logged */ + ushort ilf_asize; /* size of attr d/ext/root */ + ushort ilf_dsize; /* size of data/ext/root */ + xfs_ino_t ilf_ino; /* inode number */ + union { + xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/ + uuid_t ilfu_uuid; /* mount point value */ + } ilf_u; + __int64_t ilf_blkno; /* blkno of inode buffer */ + int ilf_len; /* len of inode buffer */ + int ilf_boffset; /* off of inode in buffer */ +} xfs_inode_log_format_t; + +/* Initial version shipped with IRIX 5.3-XFS */ +typedef struct xfs_inode_log_format_v1 { + unsigned short ilf_type; /* inode log item type */ + unsigned short ilf_size; /* size of this item */ + uint ilf_fields; /* flags for fields logged */ + uint ilf_dsize; /* size of data/ext/root */ + xfs_ino_t ilf_ino; /* inode number */ + union { + xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/ + uuid_t ilfu_uuid; /* mount point value */ + } ilf_u; +} xfs_inode_log_format_t_v1; + +/* + * Flags for xfs_trans_log_inode flags field. + */ +#define XFS_ILOG_CORE 0x001 /* log standard inode fields */ +#define XFS_ILOG_DDATA 0x002 /* log i_df.if_data */ +#define XFS_ILOG_DEXT 0x004 /* log i_df.if_extents */ +#define XFS_ILOG_DBROOT 0x008 /* log i_df.i_broot */ +#define XFS_ILOG_DEV 0x010 /* log the dev field */ +#define XFS_ILOG_UUID 0x020 /* log the uuid field */ +#define XFS_ILOG_ADATA 0x040 /* log i_af.if_data */ +#define XFS_ILOG_AEXT 0x080 /* log i_af.if_extents */ +#define XFS_ILOG_ABROOT 0x100 /* log i_af.i_broot */ + +#define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ + XFS_ILOG_DBROOT | XFS_ILOG_DEV | \ + XFS_ILOG_UUID | XFS_ILOG_ADATA | \ + XFS_ILOG_AEXT | XFS_ILOG_ABROOT) + +#define XFS_ILOG_DFORK (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ + XFS_ILOG_DBROOT) + +#define XFS_ILOG_AFORK (XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ + XFS_ILOG_ABROOT) + +#define XFS_ILOG_ALL (XFS_ILOG_CORE | XFS_ILOG_DDATA | \ + XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \ + XFS_ILOG_DEV | XFS_ILOG_UUID | \ + XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ + XFS_ILOG_ABROOT) + +#define XFS_ILI_HOLD 0x1 +#define XFS_ILI_IOLOCKED_EXCL 0x2 +#define XFS_ILI_IOLOCKED_SHARED 0x4 + +#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) + + +#ifdef __KERNEL__ + +struct xfs_buf; +struct xfs_bmbt_rec_64; +struct xfs_inode; +struct xfs_mount; + + +typedef struct xfs_inode_log_item { + xfs_log_item_t ili_item; /* common portion */ + struct xfs_inode *ili_inode; /* inode ptr */ + xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ + xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ + unsigned short ili_ilock_recur; /* lock recursion count */ + unsigned short ili_iolock_recur; /* lock recursion count */ + unsigned short ili_flags; /* misc flags */ + unsigned short ili_logged; /* flushed logged data */ + unsigned int ili_last_fields; /* fields when flushed */ + struct xfs_bmbt_rec_64 *ili_extents_buf; /* array of logged + data exts */ + struct xfs_bmbt_rec_64 *ili_aextents_buf; /* array of logged + attr exts */ + unsigned int ili_pushbuf_flag; /* one bit used in push_ail */ + +#ifdef DEBUG + uint64_t ili_push_owner; /* one who sets pushbuf_flag + above gets to push the buf */ +#endif +#ifdef XFS_TRANS_DEBUG + int ili_root_size; + char *ili_orig_root; +#endif + xfs_inode_log_format_t ili_format; /* logged structure */ +} xfs_inode_log_item_t; + + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FDATA) +int xfs_ilog_fdata(int w); +#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) +#else +#define XFS_ILOG_FDATA(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA) +#endif + +#endif /* __KERNEL__ */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FBROOT) +int xfs_ilog_fbroot(int w); +#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) +#else +#define XFS_ILOG_FBROOT(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ILOG_FEXT) +int xfs_ilog_fext(int w); +#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) +#else +#define XFS_ILOG_FEXT(w) \ + ((w) == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT) +#endif + +#ifdef __KERNEL__ + +void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); +void xfs_inode_item_destroy(struct xfs_inode *); +void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *); +void xfs_iflush_abort(struct xfs_inode *); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_INODE_ITEM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_inum.h linux.22-ac2/fs/xfs/xfs_inum.h --- linux.vanilla/fs/xfs/xfs_inum.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_inum.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_INUM_H__ +#define __XFS_INUM_H__ + +/* + * Inode number format: + * low inopblog bits - offset in block + * next agblklog bits - block number in ag + * next agno_log bits - ag number + * high agno_log-agblklog-inopblog bits - 0 + */ + +typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */ + +/* + * Useful inode bits for this kernel. + * Used in some places where having 64-bits in the 32-bit kernels + * costs too much. + */ +#if XFS_BIG_FILESYSTEMS +typedef xfs_ino_t xfs_intino_t; +#else +typedef __uint32_t xfs_intino_t; +#endif + +#define NULLFSINO ((xfs_ino_t)-1) +#define NULLAGINO ((xfs_agino_t)-1) + +struct xfs_mount; + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_MASK) +__uint32_t xfs_ino_mask(int k); +#define XFS_INO_MASK(k) xfs_ino_mask(k) +#else +#define XFS_INO_MASK(k) ((__uint32_t)((1ULL << (k)) - 1)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_OFFSET_BITS) +int xfs_ino_offset_bits(struct xfs_mount *mp); +#define XFS_INO_OFFSET_BITS(mp) xfs_ino_offset_bits(mp) +#else +#define XFS_INO_OFFSET_BITS(mp) ((mp)->m_sb.sb_inopblog) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGBNO_BITS) +int xfs_ino_agbno_bits(struct xfs_mount *mp); +#define XFS_INO_AGBNO_BITS(mp) xfs_ino_agbno_bits(mp) +#else +#define XFS_INO_AGBNO_BITS(mp) ((mp)->m_sb.sb_agblklog) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGINO_BITS) +int xfs_ino_agino_bits(struct xfs_mount *mp); +#define XFS_INO_AGINO_BITS(mp) xfs_ino_agino_bits(mp) +#else +#define XFS_INO_AGINO_BITS(mp) ((mp)->m_agino_log) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_AGNO_BITS) +int xfs_ino_agno_bits(struct xfs_mount *mp); +#define XFS_INO_AGNO_BITS(mp) xfs_ino_agno_bits(mp) +#else +#define XFS_INO_AGNO_BITS(mp) ((mp)->m_agno_log) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_BITS) +int xfs_ino_bits(struct xfs_mount *mp); +#define XFS_INO_BITS(mp) xfs_ino_bits(mp) +#else +#define XFS_INO_BITS(mp) (XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGNO) +xfs_agnumber_t xfs_ino_to_agno(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGNO(mp,i) xfs_ino_to_agno(mp,i) +#else +#define XFS_INO_TO_AGNO(mp,i) \ + ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGINO) +xfs_agino_t xfs_ino_to_agino(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGINO(mp,i) xfs_ino_to_agino(mp,i) +#else +#define XFS_INO_TO_AGINO(mp,i) \ + ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_AGBNO) +xfs_agblock_t xfs_ino_to_agbno(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_AGBNO(mp,i) xfs_ino_to_agbno(mp,i) +#else +#define XFS_INO_TO_AGBNO(mp,i) \ + (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \ + XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_OFFSET) +int xfs_ino_to_offset(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_OFFSET(mp,i) xfs_ino_to_offset(mp,i) +#else +#define XFS_INO_TO_OFFSET(mp,i) \ + ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INO_TO_FSB) +xfs_fsblock_t xfs_ino_to_fsb(struct xfs_mount *mp, xfs_ino_t i); +#define XFS_INO_TO_FSB(mp,i) xfs_ino_to_fsb(mp,i) +#else +#define XFS_INO_TO_FSB(mp,i) \ + XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_INO) +xfs_ino_t +xfs_agino_to_ino(struct xfs_mount *mp, xfs_agnumber_t a, xfs_agino_t i); +#define XFS_AGINO_TO_INO(mp,a,i) xfs_agino_to_ino(mp,a,i) +#else +#define XFS_AGINO_TO_INO(mp,a,i) \ + (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_AGBNO) +xfs_agblock_t xfs_agino_to_agbno(struct xfs_mount *mp, xfs_agino_t i); +#define XFS_AGINO_TO_AGBNO(mp,i) xfs_agino_to_agbno(mp,i) +#else +#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_AGINO_TO_OFFSET) +int xfs_agino_to_offset(struct xfs_mount *mp, xfs_agino_t i); +#define XFS_AGINO_TO_OFFSET(mp,i) xfs_agino_to_offset(mp,i) +#else +#define XFS_AGINO_TO_OFFSET(mp,i) \ + ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_OFFBNO_TO_AGINO) +xfs_agino_t xfs_offbno_to_agino(struct xfs_mount *mp, xfs_agblock_t b, int o); +#define XFS_OFFBNO_TO_AGINO(mp,b,o) xfs_offbno_to_agino(mp,b,o) +#else +#define XFS_OFFBNO_TO_AGINO(mp,b,o) \ + ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) +#endif + +#if XFS_BIG_FILESYSTEMS +#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) +#define XFS_INO64_OFFSET ((xfs_ino_t)(1ULL << 32)) +#else +#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 32) - 1ULL)) +#endif +#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) + +#endif /* __XFS_INUM_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_iocore.c linux.22-ac2/fs/xfs/xfs_iocore.c --- linux.vanilla/fs/xfs/xfs_iocore.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_iocore.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" +#include "xfs_dmapi.h" + + +STATIC xfs_fsize_t +xfs_size_fn( + xfs_inode_t *ip) +{ + return (ip->i_d.di_size); +} + +STATIC int +xfs_ioinit( + struct vfs *vfsp, + struct xfs_mount_args *mntargs, + int flags) +{ + return xfs_mountfs(vfsp, XFS_VFSTOM(vfsp), + vfsp->vfs_super->s_bdev->bd_dev, flags); +} + +xfs_ioops_t xfs_iocore_xfs = { + .xfs_ioinit = (xfs_ioinit_t) xfs_ioinit, + .xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi, + .xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof, + .xfs_iomap_write_direct = + (xfs_iomap_write_direct_t) xfs_iomap_write_direct, + .xfs_iomap_write_delay = + (xfs_iomap_write_delay_t) xfs_iomap_write_delay, + .xfs_iomap_write_allocate = + (xfs_iomap_write_allocate_t) xfs_iomap_write_allocate, + .xfs_iomap_write_unwritten = + (xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten, + .xfs_ilock = (xfs_lock_t) xfs_ilock, + .xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared, + .xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote, + .xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait, + .xfs_unlock = (xfs_unlk_t) xfs_iunlock, + .xfs_size_func = (xfs_size_t) xfs_size_fn, + .xfs_iodone = (xfs_iodone_t) fs_noerr, +}; + +void +xfs_iocore_inode_reinit( + xfs_inode_t *ip) +{ + xfs_iocore_t *io = &ip->i_iocore; + + io->io_flags = 0; + if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) + io->io_flags |= XFS_IOCORE_RT; + io->io_dmevmask = ip->i_d.di_dmevmask; + io->io_dmstate = ip->i_d.di_dmstate; +} + +void +xfs_iocore_inode_init( + xfs_inode_t *ip) +{ + xfs_iocore_t *io = &ip->i_iocore; + xfs_mount_t *mp = ip->i_mount; + + io->io_mount = mp; +#ifdef DEBUG + io->io_lock = &ip->i_lock; + io->io_iolock = &ip->i_iolock; +#endif + + io->io_obj = (void *)ip; + + xfs_iocore_inode_reinit(ip); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_itable.c linux.22-ac2/fs/xfs/xfs_itable.c --- linux.vanilla/fs/xfs/xfs_itable.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_itable.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,793 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_error.h" + +/* + * Return stat information for one inode. + * Return 0 if ok, else errno. + */ +int /* error status */ +xfs_bulkstat_one( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t ino, /* inode number to get data for */ + void *buffer, /* buffer to place output in */ + xfs_daddr_t bno, /* starting bno of inode cluster */ + void *dibuff, /* on-disk inode buffer */ + int *stat) /* BULKSTAT_RV_... */ +{ + xfs_bstat_t *buf; /* return buffer */ + int error; /* error value */ + xfs_dinode_t *dip; /* dinode inode pointer */ + xfs_dinode_core_t *dic; /* dinode core info pointer */ + xfs_inode_t *ip = NULL; /* incore inode pointer */ + xfs_arch_t arch; /* these are set according to */ + __uint16_t di_flags; /* temp */ + + buf = (xfs_bstat_t *)buffer; + dip = (xfs_dinode_t *)dibuff; + + if (! buf || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || + (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) { + *stat = BULKSTAT_RV_NOTHING; + return XFS_ERROR(EINVAL); + } + + if (dip == NULL) { + /* We're not being passed a pointer to a dinode. This happens + * if BULKSTAT_FG_IGET is selected. Do the iget. + */ + error = xfs_iget(mp, tp, ino, XFS_ILOCK_SHARED, &ip, bno); + if (error) { + *stat = BULKSTAT_RV_NOTHING; + return error; + } + ASSERT(ip != NULL); + ASSERT(ip->i_blkno != (xfs_daddr_t)0); + if (ip->i_d.di_mode == 0) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + *stat = BULKSTAT_RV_NOTHING; + return XFS_ERROR(ENOENT); + } + dic = &ip->i_d; + arch = ARCH_NOCONVERT; /* in-core! */ + ASSERT(dic != NULL); + + /* xfs_iget returns the following without needing + * further change. + */ + buf->bs_nlink = dic->di_nlink; + buf->bs_projid = dic->di_projid; + + } else { + dic = &dip->di_core; + ASSERT(dic != NULL); + + /* buffer dinode_core is in on-disk arch */ + arch = ARCH_CONVERT; + + /* + * The inode format changed when we moved the link count and + * made it 32 bits long. If this is an old format inode, + * convert it in memory to look like a new one. If it gets + * flushed to disk we will convert back before flushing or + * logging it. We zero out the new projid field and the old link + * count field. We'll handle clearing the pad field (the remains + * of the old uuid field) when we actually convert the inode to + * the new format. We don't change the version number so that we + * can distinguish this from a real new format inode. + */ + if (INT_GET(dic->di_version, arch) == XFS_DINODE_VERSION_1) { + buf->bs_nlink = INT_GET(dic->di_onlink, arch); + buf->bs_projid = 0; + } + else { + buf->bs_nlink = INT_GET(dic->di_nlink, arch); + buf->bs_projid = INT_GET(dic->di_projid, arch); + } + + } + + buf->bs_ino = ino; + buf->bs_mode = INT_GET(dic->di_mode, arch); + buf->bs_uid = INT_GET(dic->di_uid, arch); + buf->bs_gid = INT_GET(dic->di_gid, arch); + buf->bs_size = INT_GET(dic->di_size, arch); + buf->bs_atime.tv_sec = INT_GET(dic->di_atime.t_sec, arch); + buf->bs_atime.tv_nsec = INT_GET(dic->di_atime.t_nsec, arch); + buf->bs_mtime.tv_sec = INT_GET(dic->di_mtime.t_sec, arch); + buf->bs_mtime.tv_nsec = INT_GET(dic->di_mtime.t_nsec, arch); + buf->bs_ctime.tv_sec = INT_GET(dic->di_ctime.t_sec, arch); + buf->bs_ctime.tv_nsec = INT_GET(dic->di_ctime.t_nsec, arch); + /* + * convert di_flags to bs_xflags. + */ + di_flags=INT_GET(dic->di_flags, arch); + + buf->bs_xflags = + ((di_flags & XFS_DIFLAG_REALTIME) ? + XFS_XFLAG_REALTIME : 0) | + ((di_flags & XFS_DIFLAG_PREALLOC) ? + XFS_XFLAG_PREALLOC : 0) | + (XFS_CFORK_Q_ARCH(dic, arch) ? + XFS_XFLAG_HASATTR : 0); + + buf->bs_extsize = INT_GET(dic->di_extsize, arch) << mp->m_sb.sb_blocklog; + buf->bs_extents = INT_GET(dic->di_nextents, arch); + buf->bs_gen = INT_GET(dic->di_gen, arch); + memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); + buf->bs_dmevmask = INT_GET(dic->di_dmevmask, arch); + buf->bs_dmstate = INT_GET(dic->di_dmstate, arch); + buf->bs_aextents = INT_GET(dic->di_anextents, arch); + + switch (INT_GET(dic->di_format, arch)) { + case XFS_DINODE_FMT_DEV: + if ( ip ) { + buf->bs_rdev = ip->i_df.if_u2.if_rdev; + } else { + buf->bs_rdev = INT_GET(dip->di_u.di_dev, arch); + } + + buf->bs_blksize = BLKDEV_IOSIZE; + buf->bs_blocks = 0; + break; + case XFS_DINODE_FMT_LOCAL: + case XFS_DINODE_FMT_UUID: + buf->bs_rdev = 0; + buf->bs_blksize = mp->m_sb.sb_blocksize; + buf->bs_blocks = 0; + break; + case XFS_DINODE_FMT_EXTENTS: + case XFS_DINODE_FMT_BTREE: + buf->bs_rdev = 0; + buf->bs_blksize = mp->m_sb.sb_blocksize; + if ( ip ) { + buf->bs_blocks = INT_GET(dic->di_nblocks, arch) + ip->i_delayed_blks; + } else { + buf->bs_blocks = INT_GET(dic->di_nblocks, arch); + } + break; + } + + if (ip) { + xfs_iput(ip, XFS_ILOCK_SHARED); + } + + *stat = BULKSTAT_RV_DIDONE; + return 0; +} + +/* + * Return stat information in bulk (by-inode) for the filesystem. + */ +int /* error status */ +xfs_bulkstat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastinop, /* last inode returned */ + int *ubcountp, /* size of buffer/count returned */ + bulkstat_one_pf formatter, /* func that'd fill a single buf */ + size_t statstruct_size, /* sizeof struct filling */ + xfs_caddr_t ubuffer, /* buffer with inode stats */ + int flags, /* defined in xfs_itable.h */ + int *done) /* 1 if there're more stats to get */ +{ + xfs_agblock_t agbno=0;/* allocation group block number */ + xfs_buf_t *agbp; /* agi header buffer */ + xfs_agi_t *agi; /* agi header data */ + xfs_agino_t agino; /* inode # in allocation group */ + xfs_agnumber_t agno; /* allocation group number */ + xfs_daddr_t bno; /* inode cluster start daddr */ + int chunkidx; /* current index into inode chunk */ + int clustidx; /* current index into inode cluster */ + xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ + int end_of_ag; /* set if we've seen the ag end */ + int error; /* error code */ + int fmterror;/* bulkstat formatter result */ + __int32_t gcnt; /* current btree rec's count */ + xfs_inofree_t gfree; /* current btree rec's free mask */ + xfs_agino_t gino; /* current btree rec's start inode */ + int i; /* loop index */ + int icount; /* count of inodes good in irbuf */ + xfs_ino_t ino; /* inode number (filesystem) */ + xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ + xfs_inobt_rec_t *irbuf; /* start of irec buffer */ + xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ + xfs_ino_t lastino=0; /* last inode number returned */ + int nbcluster; /* # of blocks in a cluster */ + int nicluster; /* # of inodes in a cluster */ + int nimask; /* mask for inode clusters */ + int nirbuf; /* size of irbuf */ + int rval; /* return value error code */ + int tmp; /* result value from btree calls */ + int ubcount; /* size of user's buffer */ + int ubleft; /* spaces left in user's buffer */ + xfs_caddr_t ubufp; /* current pointer into user's buffer */ + xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ + xfs_dinode_t *dip; /* ptr into bp for specific inode */ + xfs_inode_t *ip; /* ptr to in-core inode struct */ + + /* + * Get the last inode value, see if there's nothing to do. + */ + ino = (xfs_ino_t)*lastinop; + dip = NULL; + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + if (agno >= mp->m_sb.sb_agcount || + ino != XFS_AGINO_TO_INO(mp, agno, agino)) { + *done = 1; + *ubcountp = 0; + return 0; + } + ubcount = ubleft = *ubcountp; + *ubcountp = 0; + *done = 0; + fmterror = 0; + ubufp = ubuffer; + nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? + mp->m_sb.sb_inopblock : + (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); + nimask = ~(nicluster - 1); + nbcluster = nicluster >> mp->m_sb.sb_inopblog; + /* + * Lock down the user's buffer. If a buffer was not sent, as in the case + * disk quota code calls here, we skip this. + */ +#if defined(HAVE_USERACC) + if (ubuffer && + (error = useracc(ubuffer, ubcount * statstruct_size, + (B_READ|B_PHYS), NULL))) { + return error; + } +#endif + /* + * Allocate a page-sized buffer for inode btree records. + * We could try allocating something smaller, but for normal + * calls we'll always (potentially) need the whole page. + */ + irbuf = kmem_alloc(NBPC, KM_SLEEP); + nirbuf = NBPC / sizeof(*irbuf); + /* + * Loop over the allocation groups, starting from the last + * inode returned; 0 means start of the allocation group. + */ + rval = 0; + while (ubleft > 0 && agno < mp->m_sb.sb_agcount) { + bp = NULL; + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + /* + * Skip this allocation group and go to the next one. + */ + agno++; + agino = 0; + continue; + } + agi = XFS_BUF_TO_AGI(agbp); + /* + * Allocate and initialize a btree cursor for ialloc btree. + */ + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, + (xfs_inode_t *)0, 0); + irbp = irbuf; + irbufend = irbuf + nirbuf; + end_of_ag = 0; + /* + * If we're returning in the middle of an allocation group, + * we need to get the remainder of the chunk we're in. + */ + if (agino > 0) { + /* + * Lookup the inode chunk that this inode lives in. + */ + error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); + if (!error && /* no I/O error */ + tmp && /* lookup succeeded */ + /* got the record, should always work */ + !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, + &gfree, &i, ARCH_NOCONVERT)) && + i == 1 && + /* this is the right chunk */ + agino < gino + XFS_INODES_PER_CHUNK && + /* lastino was not last in chunk */ + (chunkidx = agino - gino + 1) < + XFS_INODES_PER_CHUNK && + /* there are some left allocated */ + XFS_INOBT_MASKN(chunkidx, + XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { + /* + * Grab the chunk record. Mark all the + * uninteresting inodes (because they're + * before our start point) free. + */ + for (i = 0; i < chunkidx; i++) { + if (XFS_INOBT_MASK(i) & ~gfree) + gcnt++; + } + gfree |= XFS_INOBT_MASKN(0, chunkidx); + INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); + INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); + INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); + irbp++; + agino = gino + XFS_INODES_PER_CHUNK; + icount = XFS_INODES_PER_CHUNK - gcnt; + } else { + /* + * If any of those tests failed, bump the + * inode number (just in case). + */ + agino++; + icount = 0; + } + /* + * In any case, increment to the next record. + */ + if (!error) + error = xfs_inobt_increment(cur, 0, &tmp); + } else { + /* + * Start of ag. Lookup the first inode chunk. + */ + error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); + icount = 0; + } + /* + * Loop through inode btree records in this ag, + * until we run out of inodes or space in the buffer. + */ + while (irbp < irbufend && icount < ubcount) { + /* + * Loop as long as we're unable to read the + * inode btree. + */ + while (error) { + agino += XFS_INODES_PER_CHUNK; + if (XFS_AGINO_TO_AGBNO(mp, agino) >= + INT_GET(agi->agi_length, ARCH_CONVERT)) + break; + error = xfs_inobt_lookup_ge(cur, agino, 0, 0, + &tmp); + } + /* + * If ran off the end of the ag either with an error, + * or the normal way, set end and stop collecting. + */ + if (error || + (error = xfs_inobt_get_rec(cur, &gino, &gcnt, + &gfree, &i, ARCH_NOCONVERT)) || + i == 0) { + end_of_ag = 1; + break; + } + /* + * If this chunk has any allocated inodes, save it. + */ + if (gcnt < XFS_INODES_PER_CHUNK) { + INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); + INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); + INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); + irbp++; + icount += XFS_INODES_PER_CHUNK - gcnt; + } + /* + * Set agino to after this chunk and bump the cursor. + */ + agino = gino + XFS_INODES_PER_CHUNK; + error = xfs_inobt_increment(cur, 0, &tmp); + } + /* + * Drop the btree buffers and the agi buffer. + * We can't hold any of the locks these represent + * when calling iget. + */ + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + xfs_trans_brelse(tp, agbp); + /* + * Now format all the good inodes into the user's buffer. + */ + irbufend = irbp; + for (irbp = irbuf; irbp < irbufend && ubleft > 0; irbp++) { + /* + * Read-ahead the next chunk's worth of inodes. + */ + if (&irbp[1] < irbufend) { + /* + * Loop over all clusters in the next chunk. + * Do a readahead if there are any allocated + * inodes in that cluster. + */ + for (agbno = XFS_AGINO_TO_AGBNO(mp, + INT_GET(irbp[1].ir_startino, ARCH_CONVERT)), + chunkidx = 0; + chunkidx < XFS_INODES_PER_CHUNK; + chunkidx += nicluster, + agbno += nbcluster) { + if (XFS_INOBT_MASKN(chunkidx, + nicluster) & + ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT))) + xfs_btree_reada_bufs(mp, agno, + agbno, nbcluster); + } + } + /* + * Now process this chunk of inodes. + */ + for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; + ubleft > 0 && + INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; + chunkidx++, clustidx++, agino++) { + ASSERT(chunkidx < XFS_INODES_PER_CHUNK); + /* + * Recompute agbno if this is the + * first inode of the cluster. + * + * Careful with clustidx. There can be + * multple clusters per chunk, a single + * cluster per chunk or a cluster that has + * inodes represented from several different + * chunks (if blocksize is large). + * + * Because of this, the starting clustidx is + * initialized to zero in this loop but must + * later be reset after reading in the cluster + * buffer. + */ + if ((chunkidx & (nicluster - 1)) == 0) { + agbno = XFS_AGINO_TO_AGBNO(mp, + INT_GET(irbp->ir_startino, ARCH_CONVERT)) + + ((chunkidx & nimask) >> + mp->m_sb.sb_inopblog); + + if (flags & BULKSTAT_FG_QUICK) { + ino = XFS_AGINO_TO_INO(mp, agno, + agino); + bno = XFS_AGB_TO_DADDR(mp, agno, + agbno); + + /* + * Get the inode cluster buffer + */ + ASSERT(xfs_inode_zone != NULL); + ip = kmem_zone_zalloc(xfs_inode_zone, + KM_SLEEP); + ip->i_ino = ino; + ip->i_mount = mp; + if (bp) + xfs_trans_brelse(tp, bp); + error = xfs_itobp(mp, tp, ip, + &dip, &bp, bno); + if (!error) + clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; + kmem_zone_free(xfs_inode_zone, ip); + if (XFS_TEST_ERROR(error != 0, + mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, + XFS_RANDOM_BULKSTAT_READ_CHUNK)) { + bp = NULL; + break; + } + } + } + /* + * Skip if this inode is free. + */ + if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) + continue; + /* + * Count used inodes as free so we can tell + * when the chunk is used up. + */ + INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); + ino = XFS_AGINO_TO_INO(mp, agno, agino); + bno = XFS_AGB_TO_DADDR(mp, agno, agbno); + if (flags & BULKSTAT_FG_QUICK) { + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + (clustidx << mp->m_sb.sb_inodelog)); + + if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) + != XFS_DINODE_MAGIC + || !XFS_DINODE_GOOD_VERSION( + INT_GET(dip->di_core.di_version, ARCH_CONVERT))) + continue; + } + + /* + * Get the inode and fill in a single buffer. + * BULKSTAT_FG_QUICK uses dip to fill it in. + * BULKSTAT_FG_IGET uses igets. + * See: xfs_bulkstat_one & dm_bulkstat_one. + * This is also used to count inodes/blks, etc + * in xfs_qm_quotacheck. + */ + error = formatter(mp, tp, ino, ubufp, bno, dip, + &fmterror); + if (fmterror == BULKSTAT_RV_NOTHING) + continue; + if (fmterror == BULKSTAT_RV_GIVEUP) { + ubleft = 0; + ASSERT(error); + rval = error; + break; + } + if (ubufp) + ubufp += statstruct_size; + ubleft--; + lastino = ino; + } + } + + if (bp) + xfs_trans_brelse(tp, bp); + + /* + * Set up for the next loop iteration. + */ + if (ubleft > 0) { + if (end_of_ag) { + agno++; + agino = 0; + } else + agino = XFS_INO_TO_AGINO(mp, lastino); + } else + break; + } + /* + * Done, we're either out of filesystem or space to put the data. + */ + kmem_free(irbuf, NBPC); +#if defined(HAVE_USERACC) + if (ubuffer) + unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS)); +#endif + *ubcountp = ubcount - ubleft; + if (agno >= mp->m_sb.sb_agcount) { + /* + * If we ran out of filesystem, mark lastino as off + * the end of the filesystem, so the next call + * will return immediately. + */ + *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); + *done = 1; + } else + *lastinop = (xfs_ino_t)lastino; + + return rval; +} + +/* + * Return stat information in bulk (by-inode) for the filesystem. + * Special case for non-sequential one inode bulkstat. + */ +int /* error status */ +xfs_bulkstat_single( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t *lastinop, /* inode to return */ + xfs_caddr_t buffer, /* buffer with inode stats */ + int *done) /* 1 if there're more stats to get */ +{ + xfs_bstat_t bstat; /* one bulkstat result structure */ + int count; /* count value for bulkstat call */ + int error; /* return value */ + xfs_ino_t ino; /* filesystem inode number */ + int res; /* result from bs1 */ + + /* + * note that requesting valid inode numbers which are not allocated + * to inodes will most likely cause xfs_itobp to generate warning + * messages about bad magic numbers. This is ok. The fact that + * the inode isn't actually an inode is handled by the + * error check below. Done this way to make the usual case faster + * at the expense of the error case. + */ + + ino = (xfs_ino_t)*lastinop; + error = xfs_bulkstat_one(mp, NULL, ino, &bstat, 0, 0, &res); + if (error) { + /* + * Special case way failed, do it the "long" way + * to see if that works. + */ + (*lastinop)--; + count = 1; + if (xfs_bulkstat(mp, NULL, lastinop, &count, xfs_bulkstat_one, + sizeof(bstat), buffer, BULKSTAT_FG_IGET, done)) + return error; + if (count == 0 || (xfs_ino_t)*lastinop != ino) + return error == EFSCORRUPTED ? + XFS_ERROR(EINVAL) : error; + else + return 0; + } + *done = 0; + if (copy_to_user(buffer, &bstat, sizeof(bstat))) + return XFS_ERROR(EFAULT); + return 0; +} + +/* + * Return inode number table for the filesystem. + */ +int /* error status */ +xfs_inumbers( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastino, /* last inode returned */ + int *count, /* size of buffer/count returned */ + xfs_caddr_t ubuffer) /* buffer with inode descriptions */ +{ + xfs_buf_t *agbp; + xfs_agino_t agino; + xfs_agnumber_t agno; + int bcount; + xfs_inogrp_t *buffer; + int bufidx; + xfs_btree_cur_t *cur; + int error; + __int32_t gcnt; + xfs_inofree_t gfree; + xfs_agino_t gino; + int i; + xfs_ino_t ino; + int left; + int tmp; + + ino = (xfs_ino_t)*lastino; + agno = XFS_INO_TO_AGNO(mp, ino); + agino = XFS_INO_TO_AGINO(mp, ino); + left = *count; + *count = 0; + bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); + buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); + error = bufidx = 0; + cur = NULL; + agbp = NULL; + while (left > 0 && agno < mp->m_sb.sb_agcount) { + if (agbp == NULL) { + down_read(&mp->m_peraglock); + error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); + up_read(&mp->m_peraglock); + if (error) { + /* + * If we can't read the AGI of this ag, + * then just skip to the next one. + */ + ASSERT(cur == NULL); + agbp = NULL; + agno++; + agino = 0; + continue; + } + cur = xfs_btree_init_cursor(mp, tp, agbp, agno, + XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); + if (error) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + cur = NULL; + xfs_trans_brelse(tp, agbp); + agbp = NULL; + /* + * Move up the the last inode in the current + * chunk. The lookup_ge will always get + * us the first inode in the next chunk. + */ + agino += XFS_INODES_PER_CHUNK - 1; + continue; + } + } + if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, + &i, ARCH_NOCONVERT)) || + i == 0) { + xfs_trans_brelse(tp, agbp); + agbp = NULL; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + cur = NULL; + agno++; + agino = 0; + continue; + } + agino = gino + XFS_INODES_PER_CHUNK - 1; + buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); + buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; + buffer[bufidx].xi_allocmask = ~gfree; + bufidx++; + left--; + if (bufidx == bcount) { + if (copy_to_user(ubuffer, buffer, + bufidx * sizeof(*buffer))) { + error = XFS_ERROR(EFAULT); + break; + } + ubuffer += bufidx * sizeof(*buffer); + *count += bufidx; + bufidx = 0; + } + if (left) { + error = xfs_inobt_increment(cur, 0, &tmp); + if (error) { + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + cur = NULL; + xfs_trans_brelse(tp, agbp); + agbp = NULL; + /* + * The agino value has already been bumped. + * Just try to skip up to it. + */ + agino += XFS_INODES_PER_CHUNK; + continue; + } + } + } + if (!error) { + if (bufidx) { + if (copy_to_user(ubuffer, buffer, + bufidx * sizeof(*buffer))) + error = XFS_ERROR(EFAULT); + else + *count += bufidx; + } + *lastino = XFS_AGINO_TO_INO(mp, agno, agino); + } + kmem_free(buffer, bcount * sizeof(*buffer)); + if (cur) + xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : + XFS_BTREE_NOERROR)); + if (agbp) + xfs_trans_brelse(tp, agbp); + return error; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_itable.h linux.22-ac2/fs/xfs/xfs_itable.h --- linux.vanilla/fs/xfs/xfs_itable.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_itable.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_ITABLE_H__ +#define __XFS_ITABLE_H__ + +/* + * xfs_bulkstat() is used to fill in xfs_bstat structures as well as dm_stat + * structures (by the dmi library). This is a pointer to a formatter function + * that will iget the inode and fill in the appropriate structure. + * see xfs_bulkstat_one() and dm_bulkstat_one() in dmi_xfs.c + */ +typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, + struct xfs_trans *tp, + xfs_ino_t ino, + void *buffer, + xfs_daddr_t bno, + void *dip, + int *stat); +/* + * Values for stat return value. + */ +#define BULKSTAT_RV_NOTHING 0 +#define BULKSTAT_RV_DIDONE 1 +#define BULKSTAT_RV_GIVEUP 2 + +/* + * Values for bulkstat flag argument. + */ +#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ +#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ +#define BULKSTAT_FG_VFSLOCKED 0x4 /* Already have vfs lock */ + +/* + * Return stat information in bulk (by-inode) for the filesystem. + */ +int /* error status */ +xfs_bulkstat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *lastino, /* last inode returned */ + int *count, /* size of buffer/count returned */ + bulkstat_one_pf formatter, /* func that'd fill a single buf */ + size_t statstruct_size,/* sizeof struct that we're filling */ + xfs_caddr_t ubuffer, /* buffer with inode stats */ + int flags, /* flag to control access method */ + int *done); /* 1 if there're more stats to get */ + +int +xfs_bulkstat_single( + xfs_mount_t *mp, + xfs_ino_t *lastinop, + xfs_caddr_t buffer, + int *done); + +int +xfs_bulkstat_one( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + void *buffer, + xfs_daddr_t bno, + void *dibuff, + int *stat); + +int /* error status */ +xfs_inumbers( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_ino_t *last, /* last inode returned */ + int *count, /* size of buffer/count returned */ + xfs_caddr_t buffer);/* buffer with inode descriptions */ + +#endif /* __XFS_ITABLE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_log.c linux.22-ac2/fs/xfs/xfs_log.c --- linux.vanilla/fs/xfs/xfs_log.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_log.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,3628 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * High level interface routines for log manager + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_ag.h" +#include "xfs_sb.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_alloc_btree.h" +#include "xfs_log_recover.h" +#include "xfs_bit.h" +#include "xfs_rw.h" +#include "xfs_trans_priv.h" + + +#define xlog_write_adv_cnt(ptr, len, off, bytes) \ + { (ptr) += (bytes); \ + (len) -= (bytes); \ + (off) += (bytes);} + +/* Local miscellaneous function prototypes */ +STATIC int xlog_bdstrat_cb(struct xfs_buf *); +STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket, + xlog_in_core_t **, xfs_lsn_t *); +STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks); +STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); +STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); +STATIC void xlog_unalloc_log(xlog_t *log); +STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], + int nentries, xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn, + xlog_in_core_t **commit_iclog, + uint flags); + +/* local state machine functions */ +STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); +STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog); +STATIC int xlog_state_get_iclog_space(xlog_t *log, + int len, + xlog_in_core_t **iclog, + xlog_ticket_t *ticket, + int *continued_write, + int *logoffsetp); +STATIC void xlog_state_put_ticket(xlog_t *log, + xlog_ticket_t *tic); +STATIC int xlog_state_release_iclog(xlog_t *log, + xlog_in_core_t *iclog); +STATIC void xlog_state_switch_iclogs(xlog_t *log, + xlog_in_core_t *iclog, + int eventual_size); +STATIC int xlog_state_sync(xlog_t *log, xfs_lsn_t lsn, uint flags); +STATIC int xlog_state_sync_all(xlog_t *log, uint flags); +STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); + +/* local functions to manipulate grant head */ +STATIC int xlog_grant_log_space(xlog_t *log, + xlog_ticket_t *xtic); +STATIC void xlog_grant_push_ail(xfs_mount_t *mp, + int need_bytes); +STATIC void xlog_regrant_reserve_log_space(xlog_t *log, + xlog_ticket_t *ticket); +STATIC int xlog_regrant_write_log_space(xlog_t *log, + xlog_ticket_t *ticket); +STATIC void xlog_ungrant_log_space(xlog_t *log, + xlog_ticket_t *ticket); + + +/* local ticket functions */ +STATIC void xlog_state_ticket_alloc(xlog_t *log); +STATIC xlog_ticket_t *xlog_ticket_get(xlog_t *log, + int unit_bytes, + int count, + char clientid, + uint flags); +STATIC void xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket); + +/* local debug functions */ +#if defined(DEBUG) && !defined(XLOG_NOLOG) +STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr); +#ifdef XFSDEBUG +STATIC void xlog_verify_disk_cycle_no(xlog_t *log, xlog_in_core_t *iclog); +#endif +STATIC void xlog_verify_grant_head(xlog_t *log, int equals); +STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, + int count, boolean_t syncing); +STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, + xfs_lsn_t tail_lsn); +#else +#define xlog_verify_dest_ptr(a,b) +#define xlog_verify_disk_cycle_no(a,b) +#define xlog_verify_grant_head(a,b) +#define xlog_verify_iclog(a,b,c,d) +#define xlog_verify_tail_lsn(a,b,c) +#endif + +int xlog_iclogs_empty(xlog_t *log); + +#ifdef DEBUG +int xlog_do_error = 0; +int xlog_req_num = 0; +int xlog_error_mod = 33; +#endif + +#define XLOG_FORCED_SHUTDOWN(log) (log->l_flags & XLOG_IO_ERROR) + +/* + * 0 => disable log manager + * 1 => enable log manager + * 2 => enable log manager and log debugging + */ +#if defined(XLOG_NOLOG) || defined(DEBUG) +int xlog_debug = 1; +dev_t xlog_devt = 0; +#endif + +#if defined(XFS_LOG_TRACE) +void +xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string) +{ + if (! log->l_grant_trace) + log->l_grant_trace = ktrace_alloc(1024, KM_SLEEP); + + ktrace_enter(log->l_grant_trace, + (void *)tic, + (void *)log->l_reserve_headq, + (void *)log->l_write_headq, + (void *)((unsigned long)log->l_grant_reserve_cycle), + (void *)((unsigned long)log->l_grant_reserve_bytes), + (void *)((unsigned long)log->l_grant_write_cycle), + (void *)((unsigned long)log->l_grant_write_bytes), + (void *)((unsigned long)log->l_curr_cycle), + (void *)((unsigned long)log->l_curr_block), + (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn, ARCH_NOCONVERT)), + (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn, ARCH_NOCONVERT)), + (void *)string, + (void *)((unsigned long)13), + (void *)((unsigned long)14), + (void *)((unsigned long)15), + (void *)((unsigned long)16)); +} + +void +xlog_trace_tic(xlog_t *log, xlog_ticket_t *tic) +{ + if (! log->l_trace) + log->l_trace = ktrace_alloc(256, KM_SLEEP); + + ktrace_enter(log->l_trace, + (void *)tic, + (void *)((unsigned long)tic->t_curr_res), + (void *)((unsigned long)tic->t_unit_res), + (void *)((unsigned long)tic->t_ocnt), + (void *)((unsigned long)tic->t_cnt), + (void *)((unsigned long)tic->t_flags), + (void *)((unsigned long)7), + (void *)((unsigned long)8), + (void *)((unsigned long)9), + (void *)((unsigned long)10), + (void *)((unsigned long)11), + (void *)((unsigned long)12), + (void *)((unsigned long)13), + (void *)((unsigned long)14), + (void *)((unsigned long)15), + (void *)((unsigned long)16)); +} + +void +xlog_trace_iclog(xlog_in_core_t *iclog, uint state) +{ + pid_t pid; + + pid = current_pid(); + + if (!iclog->ic_trace) + iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); + ktrace_enter(iclog->ic_trace, + (void *)((unsigned long)state), + (void *)((unsigned long)pid), + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0, + (void *)0); +} + +#else +#define xlog_trace_loggrant(log,tic,string) +#define xlog_trace_iclog(iclog,state) +#endif /* XFS_LOG_TRACE */ + +/* + * NOTES: + * + * 1. currblock field gets updated at startup and after in-core logs + * marked as with WANT_SYNC. + */ + +/* + * This routine is called when a user of a log manager ticket is done with + * the reservation. If the ticket was ever used, then a commit record for + * the associated transaction is written out as a log operation header with + * no data. The flag XLOG_TIC_INITED is set when the first write occurs with + * a given ticket. If the ticket was one with a permanent reservation, then + * a few operations are done differently. Permanent reservation tickets by + * default don't release the reservation. They just commit the current + * transaction with the belief that the reservation is still needed. A flag + * must be passed in before permanent reservations are actually released. + * When these type of tickets are not released, they need to be set into + * the inited state again. By doing this, a start record will be written + * out when the next write occurs. + */ +xfs_lsn_t +xfs_log_done(xfs_mount_t *mp, + xfs_log_ticket_t xtic, + void **iclog, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *ticket = (xfs_log_ticket_t) xtic; + xfs_lsn_t lsn = 0; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + if (XLOG_FORCED_SHUTDOWN(log) || + /* + * If nothing was ever written, don't write out commit record. + * If we get an error, just continue and give back the log ticket. + */ + (((ticket->t_flags & XLOG_TIC_INITED) == 0) && + (xlog_commit_record(mp, ticket, + (xlog_in_core_t **)iclog, &lsn)))) { + lsn = (xfs_lsn_t) -1; + if (ticket->t_flags & XLOG_TIC_PERM_RESERV) { + flags |= XFS_LOG_REL_PERM_RESERV; + } + } + + + if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || + (flags & XFS_LOG_REL_PERM_RESERV)) { + /* + * Release ticket if not permanent reservation or a specifc + * request has been made to release a permanent reservation. + */ + xlog_ungrant_log_space(log, ticket); + xlog_state_put_ticket(log, ticket); + } else { + xlog_regrant_reserve_log_space(log, ticket); + } + + /* If this ticket was a permanent reservation and we aren't + * trying to release it, reset the inited flags; so next time + * we write, a start record will be written out. + */ + if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) && + (flags & XFS_LOG_REL_PERM_RESERV) == 0) + ticket->t_flags |= XLOG_TIC_INITED; + + return lsn; +} /* xfs_log_done */ + + +/* + * Force the in-core log to disk. If flags == XFS_LOG_SYNC, + * the force is done synchronously. + * + * Asynchronous forces are implemented by setting the WANT_SYNC + * bit in the appropriate in-core log and then returning. + * + * Synchronous forces are implemented with a semaphore. All callers + * to force a given lsn to disk will wait on a semaphore attached to the + * specific in-core log. When given in-core log finally completes its + * write to disk, that thread will wake up all threads waiting on the + * semaphore. + */ +int +xfs_log_force(xfs_mount_t *mp, + xfs_lsn_t lsn, + uint flags) +{ + int rval; + xlog_t *log = mp->m_log; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + ASSERT(flags & XFS_LOG_FORCE); + + XFS_STATS_INC(xfsstats.xs_log_force); + + if ((log->l_flags & XLOG_IO_ERROR) == 0) { + if (lsn == 0) + rval = xlog_state_sync_all(log, flags); + else + rval = xlog_state_sync(log, lsn, flags); + } else { + rval = XFS_ERROR(EIO); + } + + return rval; + +} /* xfs_log_force */ + + +/* + * This function will take a log sequence number and check to see if that + * lsn has been flushed to disk. If it has, then the callback function is + * called with the callback argument. If the relevant in-core log has not + * been synced to disk, we add the callback to the callback list of the + * in-core log. + */ +int +xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ + void *iclog_hndl, /* iclog to hang callback off */ + xfs_log_callback_t *cb) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; + int abortflg, spl; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + cb->cb_next = 0; + spl = LOG_LOCK(log); + abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); + if (!abortflg) { + ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || + (iclog->ic_state == XLOG_STATE_WANT_SYNC)); + cb->cb_next = 0; + *(iclog->ic_callback_tail) = cb; + iclog->ic_callback_tail = &(cb->cb_next); + } + LOG_UNLOCK(log, spl); + if (abortflg) { + cb->cb_func(cb->cb_arg, abortflg); + } + return 0; +} /* xfs_log_notify */ + +int +xfs_log_release_iclog(xfs_mount_t *mp, + void *iclog_hndl) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; + + if (xlog_state_release_iclog(log, iclog)) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + return(EIO); + } + + return 0; +} + +/* + * Initialize log manager data. This routine is intended to be called when + * a system boots up. It is not a per filesystem initialization. + * + * As you can see, we currently do nothing. + */ +int +xfs_log_init(void) +{ + return( 0 ); +} + + +/* + * 1. Reserve an amount of on-disk log space and return a ticket corresponding + * to the reservation. + * 2. Potentially, push buffers at tail of log to disk. + * + * Each reservation is going to reserve extra space for a log record header. + * When writes happen to the on-disk log, we don't subtract the length of the + * log record header from any reservation. By wasting space in each + * reservation, we prevent over allocation problems. + */ +int +xfs_log_reserve(xfs_mount_t *mp, + int unit_bytes, + int cnt, + xfs_log_ticket_t *ticket, + __uint8_t client, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *internal_ticket; + int retval; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + retval = 0; + ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); + ASSERT((flags & XFS_LOG_NOSLEEP) == 0); + + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + XFS_STATS_INC(xfsstats.xs_try_logspace); + + if (*ticket != NULL) { + ASSERT(flags & XFS_LOG_PERM_RESERV); + internal_ticket = (xlog_ticket_t *)*ticket; + xlog_grant_push_ail(mp, internal_ticket->t_unit_res); + retval = xlog_regrant_write_log_space(log, internal_ticket); + } else { + /* may sleep if need to allocate more tickets */ + internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, + client, flags); + *ticket = internal_ticket; + xlog_grant_push_ail(mp, + (internal_ticket->t_unit_res * + internal_ticket->t_cnt)); + retval = xlog_grant_log_space(log, internal_ticket); + } + + return retval; +} /* xfs_log_reserve */ + + +/* + * Mount a log filesystem + * + * mp - ubiquitous xfs mount point structure + * log_dev - device number of on-disk log device + * blk_offset - Start block # where block size is 512 bytes (BBSIZE) + * num_bblocks - Number of BBSIZE blocks in on-disk log + * + * Return error or zero. + */ +int +xfs_log_mount(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks) +{ + xlog_t *log; + + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) + cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); + else { + cmn_err(CE_NOTE, + "!Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", + mp->m_fsname); + ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY); + } + + mp->m_log = log = xlog_alloc_log(mp, log_dev, blk_offset, num_bblks); + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug) { + cmn_err(CE_NOTE, "log dev: 0x%x", log_dev); + return 0; + } +#endif + /* + * skip log recovery on a norecovery mount. pretend it all + * just worked. + */ + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { + int error; + vfs_t *vfsp = XFS_MTOVFS(mp); + int readonly = (vfsp->vfs_flag & VFS_RDONLY); + + if (readonly) + vfsp->vfs_flag &= ~VFS_RDONLY; + + error = xlog_recover(log, readonly); + + if (readonly) + vfsp->vfs_flag |= VFS_RDONLY; + if (error) { + cmn_err(CE_WARN, "XFS: log mount/recovery failed"); + xlog_unalloc_log(log); + return error; + } + } + + /* Normal transactions can now occur */ + log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + + /* End mounting message in xfs_log_mount_finish */ + return 0; +} /* xfs_log_mount */ + +/* + * Finish the recovery of the file system. This is separate from + * the xfs_log_mount() call, because it depends on the code in + * xfs_mountfs() to read in the root and real-time bitmap inodes + * between calling xfs_log_mount() and here. + * + * mp - ubiquitous xfs mount point structure + */ +int +xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) +{ + int error; + + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) + error = xlog_recover_finish(mp->m_log, mfsi_flags); + else { + error = 0; + ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY); + } + + return error; +} + +/* + * Unmount processing for the log. + */ +int +xfs_log_unmount(xfs_mount_t *mp) +{ + int error; + + error = xfs_log_unmount_write(mp); + xfs_log_unmount_dealloc(mp); + return (error); +} + +/* + * Final log writes as part of unmount. + * + * Mark the filesystem clean as unmount happens. Note that during relocation + * this routine needs to be executed as part of source-bag while the + * deallocation must not be done until source-end. + */ + +/* + * Unmount record used to have a string "Unmount filesystem--" in the + * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). + * We just write the magic number now since that particular field isn't + * currently architecture converted and "nUmount" is a bit foo. + * As far as I know, there weren't any dependencies on the old behaviour. + */ + +int +xfs_log_unmount_write(xfs_mount_t *mp) +{ + xlog_t *log = mp->m_log; + xlog_in_core_t *iclog; +#ifdef DEBUG + xlog_in_core_t *first_iclog; +#endif + xfs_log_iovec_t reg[1]; + xfs_log_ticket_t tic = 0; + xfs_lsn_t lsn; + int error; + SPLDECL(s); + + /* the data section must be 32 bit size aligned */ + struct { + __uint16_t magic; + __uint16_t pad1; + __uint32_t pad2; /* may as well make it 64 bits */ + } magic = { XLOG_UNMOUNT_TYPE, 0, 0 }; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (! xlog_debug && xlog_devt == log->l_dev) + return 0; +#endif + + /* + * Don't write out unmount record on read-only mounts. + * Or, if we are doing a forced umount (typically because of IO errors). + */ + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return 0; + + xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); + +#ifdef DEBUG + first_iclog = iclog = log->l_iclog; + do { + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { + ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); + ASSERT(iclog->ic_offset == 0); + } + iclog = iclog->ic_next; + } while (iclog != first_iclog); +#endif + if (! (XLOG_FORCED_SHUTDOWN(log))) { + reg[0].i_addr = (void*)&magic; + reg[0].i_len = sizeof(magic); + + error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); + if (!error) { + /* remove inited flag */ + ((xlog_ticket_t *)tic)->t_flags = 0; + error = xlog_write(mp, reg, 1, tic, &lsn, + NULL, XLOG_UNMOUNT_TRANS); + /* + * At this point, we're umounting anyway, + * so there's no point in transitioning log state + * to IOERROR. Just continue... + */ + } + + if (error) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_log_unmount: unmount record failed"); + } + + + s = LOG_LOCK(log); + iclog = log->l_iclog; + iclog->ic_refcnt++; + LOG_UNLOCK(log, s); + xlog_state_want_sync(log, iclog); + (void) xlog_state_release_iclog(log, iclog); + + s = LOG_LOCK(log); + if (!(iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY)) { + if (!XLOG_FORCED_SHUTDOWN(log)) { + sv_wait(&iclog->ic_forcesema, PMEM, + &log->l_icloglock, s); + } else { + LOG_UNLOCK(log, s); + } + } else { + LOG_UNLOCK(log, s); + } + if (tic) + xlog_state_put_ticket(log, tic); + } else { + /* + * We're already in forced_shutdown mode, couldn't + * even attempt to write out the unmount transaction. + * + * Go through the motions of sync'ing and releasing + * the iclog, even though no I/O will actually happen, + * we need to wait for other log I/O's that may already + * be in progress. Do this as a separate section of + * code so we'll know if we ever get stuck here that + * we're in this odd situation of trying to unmount + * a file system that went into forced_shutdown as + * the result of an unmount.. + */ + s = LOG_LOCK(log); + iclog = log->l_iclog; + iclog->ic_refcnt++; + LOG_UNLOCK(log, s); + + xlog_state_want_sync(log, iclog); + (void) xlog_state_release_iclog(log, iclog); + + s = LOG_LOCK(log); + + if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE + || iclog->ic_state == XLOG_STATE_DIRTY + || iclog->ic_state == XLOG_STATE_IOERROR) ) { + + sv_wait(&iclog->ic_forcesema, PMEM, + &log->l_icloglock, s); + } else { + LOG_UNLOCK(log, s); + } + } + + return 0; +} /* xfs_log_unmount_write */ + +/* + * Deallocate log structures for unmount/relocation. + */ +void +xfs_log_unmount_dealloc(xfs_mount_t *mp) +{ + xlog_unalloc_log(mp->m_log); +} + +/* + * Write region vectors to log. The write happens using the space reservation + * of the ticket (tic). It is not a requirement that all writes for a given + * transaction occur with one call to xfs_log_write(). + */ +int +xfs_log_write(xfs_mount_t * mp, + xfs_log_iovec_t reg[], + int nentries, + xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn) +{ + int error; + xlog_t *log = mp->m_log; +#if defined(DEBUG) || defined(XLOG_NOLOG) + + if (! xlog_debug && xlog_devt == log->l_dev) { + *start_lsn = 0; + return 0; + } +#endif + if (XLOG_FORCED_SHUTDOWN(log)) + return XFS_ERROR(EIO); + + if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + } + return (error); +} /* xfs_log_write */ + + +void +xfs_log_move_tail(xfs_mount_t *mp, + xfs_lsn_t tail_lsn) +{ + xlog_ticket_t *tic; + xlog_t *log = mp->m_log; + int need_bytes, free_bytes, cycle, bytes; + SPLDECL(s); + +#if defined(DEBUG) || defined(XLOG_NOLOG) + if (!xlog_debug && xlog_devt == log->l_dev) + return; +#endif + /* XXXsup tmp */ + if (XLOG_FORCED_SHUTDOWN(log)) + return; + ASSERT(!XFS_FORCED_SHUTDOWN(mp)); + + if (tail_lsn == 0) { + /* needed since sync_lsn is 64 bits */ + s = LOG_LOCK(log); + tail_lsn = log->l_last_sync_lsn; + LOG_UNLOCK(log, s); + } + + s = GRANT_LOCK(log); + + /* Also an illegal lsn. 1 implies that we aren't passing in a legal + * tail_lsn. + */ + if (tail_lsn != 1) + log->l_tail_lsn = tail_lsn; + + if ((tic = log->l_write_headq)) { +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("Recovery problem"); +#endif + cycle = log->l_grant_write_cycle; + bytes = log->l_grant_write_bytes; + free_bytes = xlog_space_left(log, cycle, bytes); + do { + ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); + + if (free_bytes < tic->t_unit_res) + break; + free_bytes -= tic->t_unit_res; + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_write_headq); + } + if ((tic = log->l_reserve_headq)) { +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("Recovery problem"); +#endif + cycle = log->l_grant_reserve_cycle; + bytes = log->l_grant_reserve_bytes; + free_bytes = xlog_space_left(log, cycle, bytes); + do { + if (tic->t_flags & XLOG_TIC_PERM_RESERV) + need_bytes = tic->t_unit_res*tic->t_cnt; + else + need_bytes = tic->t_unit_res; + if (free_bytes < need_bytes) + break; + free_bytes -= need_bytes; + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_reserve_headq); + } + GRANT_UNLOCK(log, s); +} /* xfs_log_move_tail */ + +/* + * Determine if we have a transaction that has gone to disk + * that needs to be covered. Log activity needs to be idle (no AIL and + * nothing in the iclogs). And, we need to be in the right state indicating + * something has gone out. + */ +int +xfs_log_need_covered(xfs_mount_t *mp) +{ + SPLDECL(s); + int needed = 0, gen; + xlog_t *log = mp->m_log; + + if (mp->m_frozen || XFS_FORCED_SHUTDOWN(mp)) + return 0; + + s = LOG_LOCK(log); + if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || + (log->l_covered_state == XLOG_STATE_COVER_NEED2)) + && !xfs_trans_first_ail(mp, &gen) + && xlog_iclogs_empty(log)) { + if (log->l_covered_state == XLOG_STATE_COVER_NEED) + log->l_covered_state = XLOG_STATE_COVER_DONE; + else { + ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); + log->l_covered_state = XLOG_STATE_COVER_DONE2; + } + needed = 1; + } + LOG_UNLOCK(log, s); + return(needed); +} + +/****************************************************************************** + * + * local routines + * + ****************************************************************************** + */ + +/* xfs_trans_tail_ail returns 0 when there is nothing in the list. + * The log manager must keep track of the last LR which was committed + * to disk. The lsn of this LR will become the new tail_lsn whenever + * xfs_trans_tail_ail returns 0. If we don't do this, we run into + * the situation where stuff could be written into the log but nothing + * was ever in the AIL when asked. Eventually, we panic since the + * tail hits the head. + * + * We may be holding the log iclog lock upon entering this routine. + */ +xfs_lsn_t +xlog_assign_tail_lsn(xfs_mount_t *mp) +{ + xfs_lsn_t tail_lsn; + SPLDECL(s); + xlog_t *log = mp->m_log; + + tail_lsn = xfs_trans_tail_ail(mp); + s = GRANT_LOCK(log); + if (tail_lsn != 0) + log->l_tail_lsn = tail_lsn; + else + tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; + GRANT_UNLOCK(log, s); + + return tail_lsn; +} /* xlog_assign_tail_lsn */ + + +/* + * Return the space in the log between the tail and the head. The head + * is passed in the cycle/bytes formal parms. In the special case where + * the reserve head has wrapped passed the tail, this calculation is no + * longer valid. In this case, just return 0 which means there is no space + * in the log. This works for all places where this function is called + * with the reserve head. Of course, if the write head were to ever + * wrap the tail, we should blow up. Rather than catch this case here, + * we depend on other ASSERTions in other parts of the code. XXXmiken + * + * This code also handles the case where the reservation head is behind + * the tail. The details of this case are described below, but the end + * result is that we return the size of the log as the amount of space left. + */ +int +xlog_space_left(xlog_t *log, int cycle, int bytes) +{ + int free_bytes; + int tail_bytes; + int tail_cycle; + + tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn, ARCH_NOCONVERT)); + tail_cycle = CYCLE_LSN(log->l_tail_lsn, ARCH_NOCONVERT); + if ((tail_cycle == cycle) && (bytes >= tail_bytes)) { + free_bytes = log->l_logsize - (bytes - tail_bytes); + } else if ((tail_cycle + 1) < cycle) { + return 0; + } else if (tail_cycle < cycle) { + ASSERT(tail_cycle == (cycle - 1)); + free_bytes = tail_bytes - bytes; + } else { + /* + * The reservation head is behind the tail. + * This can only happen when the AIL is empty so the tail + * is equal to the head and the l_roundoff value in the + * log structure is taking up the difference between the + * reservation head and the tail. The bytes accounted for + * by the l_roundoff field are temporarily 'lost' to the + * reservation mechanism, but they are cleaned up when the + * log buffers that created them are reused. These lost + * bytes are what allow the reservation head to fall behind + * the tail in the case that the log is 'empty'. + * In this case we just want to return the size of the + * log as the amount of space left. + */ +/* This assert does not take into account padding from striped log writes * + ASSERT((tail_cycle == (cycle + 1)) || + ((bytes + log->l_roundoff) >= tail_bytes)); +*/ + free_bytes = log->l_logsize; + } + return free_bytes; +} /* xlog_space_left */ + + +/* + * Log function which is called when an io completes. + * + * The log manager needs its own routine, in order to control what + * happens with the buffer after the write completes. + */ +void +xlog_iodone(xfs_buf_t *bp) +{ + xlog_in_core_t *iclog; + int aborted; + + iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + aborted = 0; + + /* + * Race to shutdown the filesystem if we see an error. + */ + if (XFS_BUF_GETERROR(bp)) { + /* Some versions of cpp barf on the recursive definition of + * ic_log -> hic_fields.ic_log and expand ic_log twice when + * it is passed through two macros. Workaround for broken cpp + */ + struct log *l; + xfs_ioerror_alert("xlog_iodone", + iclog->ic_log->l_mp, bp, XFS_BUF_ADDR(bp)); + XFS_BUF_STALE(bp); + l = iclog->ic_log; + xfs_force_shutdown(l->l_mp, XFS_LOG_IO_ERROR); + /* + * This flag will be propagated to the trans-committed + * callback routines to let them know that the log-commit + * didn't succeed. + */ + aborted = XFS_LI_ABORTED; + } else if (iclog->ic_state & XLOG_STATE_IOERROR) { + aborted = XFS_LI_ABORTED; + } + xlog_state_done_syncing(iclog, aborted); + if (!(XFS_BUF_ISASYNC(bp))) { + /* + * Corresponding psema() will be done in bwrite(). If we don't + * vsema() here, panic. + */ + XFS_BUF_V_IODONESEMA(bp); + } +} /* xlog_iodone */ + +/* + * The bdstrat callback function for log bufs. This gives us a central + * place to trap bufs in case we get hit by a log I/O error and need to + * shutdown. Actually, in practice, even when we didn't get a log error, + * we transition the iclogs to IOERROR state *after* flushing all existing + * iclogs to disk. This is because we don't want anymore new transactions to be + * started or completed afterwards. + */ +STATIC int +xlog_bdstrat_cb(struct xfs_buf *bp) +{ + xlog_in_core_t *iclog; + + iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); + + if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) { + /* note for irix bstrat will need struct bdevsw passed + * Fix the following macro if the code ever is merged + */ + XFS_bdstrat(bp); + return 0; + } + + xfs_buftrace("XLOG__BDSTRAT IOERROR", bp); + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_STALE(bp); + xfs_biodone(bp); + return (XFS_ERROR(EIO)); + + +} + +/* + * Return size of each in-core log record buffer. + * + * Low memory machines only get 2 16KB buffers. We don't want to waste + * memory here. However, all other machines get at least 2 32KB buffers. + * The number is hard coded because we don't care about the minimum + * memory size, just 32MB systems. + * + * If the filesystem blocksize is too large, we may need to choose a + * larger size since the directory code currently logs entire blocks. + * XXXmiken XXXcurtis + */ + +STATIC void +xlog_get_iclog_buffer_size(xfs_mount_t *mp, + xlog_t *log) +{ + int size; + int xhdrs; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + /* + * When logbufs == 0, someone has disabled the log from the FSTAB + * file. This is not a documented feature. We need to set xlog_debug + * to zero (this deactivates the log) and set xlog_devt to the + * appropriate dev_t. Only one filesystem may be affected as such + * since this is just a performance hack to test what we might be able + * to get if the log were not present. + */ + if (mp->m_logbufs == 0) { + xlog_debug = 0; + xlog_devt = log->l_dev; + log->l_iclog_bufs = XLOG_NUM_ICLOGS; + } else +#endif + { + /* + * This is the normal path. If m_logbufs == -1, then the + * admin has chosen to use the system defaults for logbuffers. + */ + if (mp->m_logbufs == -1) + log->l_iclog_bufs = XLOG_NUM_ICLOGS; + else + log->l_iclog_bufs = mp->m_logbufs; + +#if defined(DEBUG) || defined(XLOG_NOLOG) + /* We are reactivating a filesystem after it was active */ + if (log->l_dev == xlog_devt) { + xlog_devt = 1; + xlog_debug = 1; + } +#endif + } + + /* + * Buffer size passed in from mount system call. + */ + if (mp->m_logbsize != -1) { + size = log->l_iclog_size = mp->m_logbsize; + log->l_iclog_size_log = 0; + while (size != 1) { + log->l_iclog_size_log++; + size >>= 1; + } + + if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { + /* # headers = size / 32K + * one header holds cycles from 32K of data + */ + + xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; + if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) + xhdrs++; + log->l_iclog_hsize = xhdrs << BBSHIFT; + log->l_iclog_heads = xhdrs; + } else { + ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); + log->l_iclog_hsize = BBSIZE; + log->l_iclog_heads = 1; + } + return; + } + + /* + * Special case machines that have less than 32MB of memory. + * All machines with more memory use 32KB buffers. + */ + if (xfs_physmem <= btoc(32*1024*1024)) { + /* Don't change; min configuration */ + log->l_iclog_size = XLOG_RECORD_BSIZE; /* 16k */ + log->l_iclog_size_log = XLOG_RECORD_BSHIFT; + } else { + log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; /* 32k */ + log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; + } + + /* the default log size is 16k or 32k which is one header sector */ + log->l_iclog_hsize = BBSIZE; + log->l_iclog_heads = 1; + + /* + * For 16KB, we use 3 32KB buffers. For 32KB block sizes, we use + * 4 32KB buffers. For 64KB block sizes, we use 8 32KB buffers. + */ + if (mp->m_sb.sb_blocksize >= 16*1024) { + log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; + log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; + if (mp->m_logbufs == -1) { + switch (mp->m_sb.sb_blocksize) { + case 16*1024: /* 16 KB */ + log->l_iclog_bufs = 3; + break; + case 32*1024: /* 32 KB */ + log->l_iclog_bufs = 4; + break; + case 64*1024: /* 64 KB */ + log->l_iclog_bufs = 8; + break; + default: + xlog_panic("XFS: Illegal blocksize"); + break; + } + } + } +} /* xlog_get_iclog_buffer_size */ + + +/* + * This routine initializes some of the log structure for a given mount point. + * Its primary purpose is to fill in enough, so recovery can occur. However, + * some other stuff may be filled in too. + */ +STATIC xlog_t * +xlog_alloc_log(xfs_mount_t *mp, + dev_t log_dev, + xfs_daddr_t blk_offset, + int num_bblks) +{ + xlog_t *log; + xlog_rec_header_t *head; + xlog_in_core_t **iclogp; + xlog_in_core_t *iclog, *prev_iclog=NULL; + xfs_buf_t *bp; + int i; + int iclogsize; + + log = (void *)kmem_zalloc(sizeof(xlog_t), KM_SLEEP); + + log->l_mp = mp; + log->l_dev = log_dev; + log->l_logsize = BBTOB(num_bblks); + log->l_logBBstart = blk_offset; + log->l_logBBsize = num_bblks; + log->l_roundoff = 0; + log->l_covered_state = XLOG_STATE_COVER_IDLE; + log->l_flags |= XLOG_ACTIVE_RECOVERY; + + log->l_prev_block = -1; + ASSIGN_ANY_LSN(log->l_tail_lsn, 1, 0, ARCH_NOCONVERT); + /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ + log->l_last_sync_lsn = log->l_tail_lsn; + log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ + log->l_curr_block = 0; /* filled in by xlog_recover */ + log->l_grant_reserve_bytes = 0; + log->l_grant_reserve_cycle = 1; + log->l_grant_write_bytes = 0; + log->l_grant_write_cycle = 1; + log->l_quotaoffs_flag = 0; /* XFS_LI_QUOTAOFF logitems */ + + xlog_get_iclog_buffer_size(mp, log); + + bp = log->l_xbuf = XFS_getrbuf(0,mp); /* get my locked buffer */ /* mp needed for pagebuf/linux only */ + + XFS_BUF_SET_TARGET(bp, mp->m_logdev_targp); + XFS_BUF_SET_SIZE(bp, log->l_iclog_size); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + ASSERT(XFS_BUF_ISBUSY(log->l_xbuf)); + ASSERT(XFS_BUF_VALUSEMA(log->l_xbuf) <= 0); + spinlock_init(&log->l_icloglock, "iclog"); + spinlock_init(&log->l_grant_lock, "grhead_iclog"); + initnsema(&log->l_flushsema, 0, "ic-flush"); + xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ + + /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ + ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); + + iclogp = &log->l_iclog; + /* + * The amount of memory to allocate for the iclog structure is + * rather funky due to the way the structure is defined. It is + * done this way so that we can use different sizes for machines + * with different amounts of memory. See the definition of + * xlog_in_core_t in xfs_log_priv.h for details. + */ + iclogsize = log->l_iclog_size; + ASSERT(log->l_iclog_size >= 4096); + for (i=0; i < log->l_iclog_bufs; i++) { + *iclogp = (xlog_in_core_t *) + kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); + iclog = *iclogp; + iclog->hic_data = (xlog_in_core_2_t *) + kmem_alloc(iclogsize, KM_SLEEP); + + iclog->ic_prev = prev_iclog; + prev_iclog = iclog; + log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); + + head = &iclog->ic_header; + memset(head, 0, sizeof(xlog_rec_header_t)); + INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); + INT_SET(head->h_version, ARCH_CONVERT, + XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); + INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); + /* new fields */ + INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); + memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); + + bp = iclog->ic_bp = XFS_getrbuf(0,mp); /* my locked buffer */ /* mp need for pagebuf/linux only */ + XFS_BUF_SET_TARGET(bp, mp->m_logdev_targp); + XFS_BUF_SET_SIZE(bp, log->l_iclog_size); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); + + iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; + iclog->ic_state = XLOG_STATE_ACTIVE; + iclog->ic_log = log; + iclog->ic_callback_tail = &(iclog->ic_callback); + iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize; + + ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); + ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); + sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); + sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); + + iclogp = &iclog->ic_next; + } + *iclogp = log->l_iclog; /* complete ring */ + log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ + + return log; +} /* xlog_alloc_log */ + + +/* + * Write out the commit record of a transaction associated with the given + * ticket. Return the lsn of the commit record. + */ +STATIC int +xlog_commit_record(xfs_mount_t *mp, + xlog_ticket_t *ticket, + xlog_in_core_t **iclog, + xfs_lsn_t *commitlsnp) +{ + int error; + xfs_log_iovec_t reg[1]; + + reg[0].i_addr = 0; + reg[0].i_len = 0; + + ASSERT_ALWAYS(iclog); + if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp, + iclog, XLOG_COMMIT_TRANS))) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + } + return (error); +} /* xlog_commit_record */ + + +/* + * Push on the buffer cache code if we ever use more than 75% of the on-disk + * log space. This code pushes on the lsn which would supposedly free up + * the 25% which we want to leave free. We may need to adopt a policy which + * pushes on an lsn which is further along in the log once we reach the high + * water mark. In this manner, we would be creating a low water mark. + */ +void +xlog_grant_push_ail(xfs_mount_t *mp, + int need_bytes) +{ + xlog_t *log = mp->m_log; /* pointer to the log */ + xfs_lsn_t tail_lsn; /* lsn of the log tail */ + xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */ + int free_blocks; /* free blocks left to write to */ + int free_bytes; /* free bytes left to write to */ + int threshold_block; /* block in lsn we'd like to be at */ + int threshold_cycle; /* lsn cycle we'd like to be at */ + int free_threshold; + SPLDECL(s); + + ASSERT(BTOBB(need_bytes) < log->l_logBBsize); + + s = GRANT_LOCK(log); + free_bytes = xlog_space_left(log, + log->l_grant_reserve_cycle, + log->l_grant_reserve_bytes); + tail_lsn = log->l_tail_lsn; + free_blocks = BTOBBT(free_bytes); + + /* + * Set the threshold for the minimum number of free blocks in the + * log to the maximum of what the caller needs, one quarter of the + * log, and 256 blocks. + */ + free_threshold = BTOBB(need_bytes); + free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); + free_threshold = MAX(free_threshold, 256); + if (free_blocks < free_threshold) { + threshold_block = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) + free_threshold; + threshold_cycle = CYCLE_LSN(tail_lsn, ARCH_NOCONVERT); + if (threshold_block >= log->l_logBBsize) { + threshold_block -= log->l_logBBsize; + threshold_cycle += 1; + } + ASSIGN_ANY_LSN(threshold_lsn, threshold_cycle, + threshold_block, ARCH_NOCONVERT); + + /* Don't pass in an lsn greater than the lsn of the last + * log record known to be on disk. + */ + if (XFS_LSN_CMP_ARCH(threshold_lsn, log->l_last_sync_lsn, ARCH_NOCONVERT) > 0) + threshold_lsn = log->l_last_sync_lsn; + } + GRANT_UNLOCK(log, s); + + /* + * Get the transaction layer to kick the dirty buffers out to + * disk asynchronously. No point in trying to do this if + * the filesystem is shutting down. + */ + if (threshold_lsn && + !XLOG_FORCED_SHUTDOWN(log)) + xfs_trans_push_ail(mp, threshold_lsn); +} /* xlog_grant_push_ail */ + + +/* + * Flush out the in-core log (iclog) to the on-disk log in a synchronous or + * asynchronous fashion. Previously, we should have moved the current iclog + * ptr in the log to point to the next available iclog. This allows further + * write to continue while this code syncs out an iclog ready to go. + * Before an in-core log can be written out, the data section must be scanned + * to save away the 1st word of each BBSIZE block into the header. We replace + * it with the current cycle count. Each BBSIZE block is tagged with the + * cycle count because there in an implicit assumption that drives will + * guarantee that entire 512 byte blocks get written at once. In other words, + * we can't have part of a 512 byte block written and part not written. By + * tagging each block, we will know which blocks are valid when recovering + * after an unclean shutdown. + * + * This routine is single threaded on the iclog. No other thread can be in + * this routine with the same iclog. Changing contents of iclog can there- + * fore be done without grabbing the state machine lock. Updating the global + * log will require grabbing the lock though. + * + * The entire log manager uses a logical block numbering scheme. Only + * log_sync (and then only bwrite()) know about the fact that the log may + * not start with block zero on a given device. The log block start offset + * is added immediately before calling bwrite(). + */ + +int +xlog_sync(xlog_t *log, + xlog_in_core_t *iclog) +{ + xfs_caddr_t dptr; /* pointer to byte sized element */ + xfs_buf_t *bp; + int i, ops; + uint roundup; + uint count; /* byte count of bwrite */ + int split = 0; /* split write into two regions */ + int error; + + XFS_STATS_INC(xfsstats.xs_log_writes); + ASSERT(iclog->ic_refcnt == 0); + + /* Round out the log write size */ + if (iclog->ic_offset & BBMASK) { + /* count of 0 is already accounted for up in + * xlog_state_sync_all(). Once in this routine, + * operations on the iclog are single threaded. + * + * Difference between rounded up size and size + */ + count = iclog->ic_offset & BBMASK; + iclog->ic_roundoff += BBSIZE - count; + } + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + unsigned sunit = BTOBB(log->l_mp->m_sb.sb_logsunit); + if (!sunit) + sunit = 1; + + count = BTOBB(log->l_iclog_hsize + iclog->ic_offset); + if (count & (sunit - 1)) { + roundup = sunit - (count & (sunit - 1)); + } else { + roundup = 0; + } + iclog->ic_offset += BBTOB(roundup); + } + + log->l_roundoff += iclog->ic_roundoff; + + xlog_pack_data(log, iclog); /* put cycle number in every block */ + + /* real byte length */ + INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); + /* put ops count in correct order */ + ops = iclog->ic_header.h_num_logops; + INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); + + bp = iclog->ic_bp; + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); + XFS_BUF_SET_ADDR(bp, BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT)); + + /* Count is already rounded up to a BBSIZE above */ + count = iclog->ic_offset + iclog->ic_roundoff; + ASSERT((count & BBMASK) == 0); + + /* Add for LR header */ + count += log->l_iclog_hsize; + XFS_STATS_ADD(xfsstats.xs_log_blocks, BTOBB(count)); + + /* Do we need to split this write into 2 parts? */ + if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { + split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); + count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); + iclog->ic_bwritecnt = 2; /* split into 2 writes */ + } else { + iclog->ic_bwritecnt = 1; + } + XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); + XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + XFS_BUF_BUSY(bp); + XFS_BUF_ASYNC(bp); + /* + * Do a disk write cache flush for the log block. + * This is a bit of a sledgehammer, it would be better + * to use a tag barrier here that just prevents reordering. + * It may not be needed to flush the first split block in the log wrap + * case, but do it anyways to be safe -AK + */ + if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) + XFS_BUF_FLUSH(bp); + + ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); + ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); + + xlog_verify_iclog(log, iclog, count, B_TRUE); + + /* account for log which doesn't start at block #0 */ + XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); + /* + * Don't call xfs_bwrite here. We do log-syncs even when the filesystem + * is shutting down. + */ + XFS_BUF_WRITE(bp); + + if ((error = XFS_bwrite(bp))) { + xfs_ioerror_alert("xlog_sync", log->l_mp, bp, + XFS_BUF_ADDR(bp)); + return (error); + } + if (split) { + bp = iclog->ic_log->l_xbuf; + ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == + (unsigned long)1); + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); + XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ + XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ + (__psint_t)count), split); + XFS_BUF_SET_FSPRIVATE(bp, iclog); + XFS_BUF_BUSY(bp); + XFS_BUF_ASYNC(bp); + if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) + XFS_BUF_FLUSH(bp); + dptr = XFS_BUF_PTR(bp); + /* + * Bump the cycle numbers at the start of each block + * since this part of the buffer is at the start of + * a new cycle. Watch out for the header magic number + * case, though. + */ + for (i=0; il_logBBsize-1); + ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); + + /* account for internal log which does't start at block #0 */ + XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); + XFS_BUF_WRITE(bp); + if ((error = XFS_bwrite(bp))) { + xfs_ioerror_alert("xlog_sync (split)", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + return (error); + } + } + return (0); +} /* xlog_sync */ + + +/* + * Unallocate a log structure + */ +void +xlog_unalloc_log(xlog_t *log) +{ + xlog_in_core_t *iclog, *next_iclog; + xlog_ticket_t *tic, *next_tic; + int i; + + + iclog = log->l_iclog; + for (i=0; il_iclog_bufs; i++) { + sv_destroy(&iclog->ic_forcesema); + sv_destroy(&iclog->ic_writesema); + XFS_freerbuf(iclog->ic_bp); +#ifdef DEBUG + if (iclog->ic_trace != NULL) { + ktrace_free(iclog->ic_trace); + } +#endif + next_iclog = iclog->ic_next; + kmem_free(iclog->hic_data, log->l_iclog_size); + kmem_free(iclog, sizeof(xlog_in_core_t)); + iclog = next_iclog; + } + freesema(&log->l_flushsema); + spinlock_destroy(&log->l_icloglock); + spinlock_destroy(&log->l_grant_lock); + + /* XXXsup take a look at this again. */ + if ((log->l_ticket_cnt != log->l_ticket_tcnt) && + !XLOG_FORCED_SHUTDOWN(log)) { + xfs_fs_cmn_err(CE_WARN, log->l_mp, + "xlog_unalloc_log: (cnt: %d, total: %d)", + log->l_ticket_cnt, log->l_ticket_tcnt); + /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ + + } else { + tic = log->l_unmount_free; + while (tic) { + next_tic = tic->t_next; + kmem_free(tic, NBPP); + tic = next_tic; + } + } + XFS_freerbuf(log->l_xbuf); +#ifdef DEBUG + if (log->l_trace != NULL) { + ktrace_free(log->l_trace); + } + if (log->l_grant_trace != NULL) { + ktrace_free(log->l_grant_trace); + } +#endif + log->l_mp->m_log = NULL; + kmem_free(log, sizeof(xlog_t)); +} /* xlog_unalloc_log */ + +/* + * Update counters atomically now that memcpy is done. + */ +/* ARGSUSED */ +static inline void +xlog_state_finish_copy(xlog_t *log, + xlog_in_core_t *iclog, + int record_cnt, + int copy_bytes) +{ + SPLDECL(s); + + s = LOG_LOCK(log); + + iclog->ic_header.h_num_logops += record_cnt; + iclog->ic_offset += copy_bytes; + + LOG_UNLOCK(log, s); +} /* xlog_state_finish_copy */ + + + + +/* + * Write some region out to in-core log + * + * This will be called when writing externally provided regions or when + * writing out a commit record for a given transaction. + * + * General algorithm: + * 1. Find total length of this write. This may include adding to the + * lengths passed in. + * 2. Check whether we violate the tickets reservation. + * 3. While writing to this iclog + * A. Reserve as much space in this iclog as can get + * B. If this is first write, save away start lsn + * C. While writing this region: + * 1. If first write of transaction, write start record + * 2. Write log operation header (header per region) + * 3. Find out if we can fit entire region into this iclog + * 4. Potentially, verify destination memcpy ptr + * 5. Memcpy (partial) region + * 6. If partial copy, release iclog; otherwise, continue + * copying more regions into current iclog + * 4. Mark want sync bit (in simulation mode) + * 5. Release iclog for potential flush to on-disk log. + * + * ERRORS: + * 1. Panic if reservation is overrun. This should never happen since + * reservation amounts are generated internal to the filesystem. + * NOTES: + * 1. Tickets are single threaded data structures. + * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the + * syncing routine. When a single log_write region needs to span + * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set + * on all log operation writes which don't contain the end of the + * region. The XLOG_END_TRANS bit is used for the in-core log + * operation which contains the end of the continued log_write region. + * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, + * we don't really know exactly how much space will be used. As a result, + * we don't update ic_offset until the end when we know exactly how many + * bytes have been written out. + */ +int +xlog_write(xfs_mount_t * mp, + xfs_log_iovec_t reg[], + int nentries, + xfs_log_ticket_t tic, + xfs_lsn_t *start_lsn, + xlog_in_core_t **commit_iclog, + uint flags) +{ + xlog_t *log = mp->m_log; + xlog_ticket_t *ticket = (xlog_ticket_t *)tic; + xlog_op_header_t *logop_head; /* ptr to log operation header */ + xlog_in_core_t *iclog; /* ptr to current in-core log */ + __psint_t ptr; /* copy address into data region */ + int len; /* # xlog_write() bytes 2 still copy */ + int index; /* region index currently copying */ + int log_offset; /* offset (from 0) into data region */ + int start_rec_copy; /* # bytes to copy for start record */ + int partial_copy; /* did we split a region? */ + int partial_copy_len;/* # bytes copied if split region */ + int need_copy; /* # bytes need to memcpy this region */ + int copy_len; /* # bytes actually memcpy'ing */ + int copy_off; /* # bytes from entry start */ + int contwr; /* continued write of in-core log? */ + int firstwr = 0; /* first write of transaction */ + int error; + int record_cnt = 0, data_cnt = 0; + + partial_copy_len = partial_copy = 0; + + /* Calculate potential maximum space. Each region gets its own + * xlog_op_header_t and may need to be double word aligned. + */ + len = 0; + if (ticket->t_flags & XLOG_TIC_INITED) /* acct for start rec of xact */ + len += sizeof(xlog_op_header_t); + + for (index = 0; index < nentries; index++) { + len += sizeof(xlog_op_header_t); /* each region gets >= 1 */ + len += reg[index].i_len; + } + contwr = *start_lsn = 0; + + if (ticket->t_curr_res < len) { +#ifdef DEBUG + xlog_panic( + "xfs_log_write: reservation ran out. Need to up reservation"); +#else + /* Customer configurable panic */ + xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp, + "xfs_log_write: reservation ran out. Need to up reservation"); + /* If we did not panic, shutdown the filesystem */ + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); +#endif + } else + ticket->t_curr_res -= len; + + for (index = 0; index < nentries; ) { + if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket, + &contwr, &log_offset))) + return (error); + + ASSERT(log_offset <= iclog->ic_size - 1); + ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset); + + /* start_lsn is the first lsn written to. That's all we need. */ + if (! *start_lsn) + *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + + /* This loop writes out as many regions as can fit in the amount + * of space which was allocated by xlog_state_get_iclog_space(). + */ + while (index < nentries) { + ASSERT(reg[index].i_len % sizeof(__int32_t) == 0); + ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0); + start_rec_copy = 0; + + /* If first write for transaction, insert start record. + * We can't be trying to commit if we are inited. We can't + * have any "partial_copy" if we are inited. + */ + if (ticket->t_flags & XLOG_TIC_INITED) { + logop_head = (xlog_op_header_t *)ptr; + INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); + logop_head->oh_clientid = ticket->t_clientid; + INT_ZERO(logop_head->oh_len, ARCH_CONVERT); + logop_head->oh_flags = XLOG_START_TRANS; + INT_ZERO(logop_head->oh_res2, ARCH_CONVERT); + ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */ + firstwr = 1; /* increment log ops below */ + record_cnt++; + + start_rec_copy = sizeof(xlog_op_header_t); + xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy); + } + + /* Copy log operation header directly into data section */ + logop_head = (xlog_op_header_t *)ptr; + INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); + logop_head->oh_clientid = ticket->t_clientid; + INT_ZERO(logop_head->oh_res2, ARCH_CONVERT); + + /* header copied directly */ + xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t)); + + /* are we copying a commit or unmount record? */ + logop_head->oh_flags = flags; + + /* + * We've seen logs corrupted with bad transaction client + * ids. This makes sure that XFS doesn't generate them on. + * Turn this into an EIO and shut down the filesystem. + */ + switch (logop_head->oh_clientid) { + case XFS_TRANSACTION: + case XFS_VOLUME: + case XFS_LOG: + break; + default: + xfs_fs_cmn_err(CE_WARN, mp, + "Bad XFS transaction clientid 0x%x in ticket 0x%p", + logop_head->oh_clientid, tic); + return XFS_ERROR(EIO); + } + + /* Partial write last time? => (partial_copy != 0) + * need_copy is the amount we'd like to copy if everything could + * fit in the current memcpy. + */ + need_copy = reg[index].i_len - partial_copy_len; + + copy_off = partial_copy_len; + if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ + INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy); + if (partial_copy) + logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); + partial_copy_len = partial_copy = 0; + } else { /* partial write */ + copy_len = iclog->ic_size - log_offset; + INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len); + logop_head->oh_flags |= XLOG_CONTINUE_TRANS; + if (partial_copy) + logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; + partial_copy_len += copy_len; + partial_copy++; + len += sizeof(xlog_op_header_t); /* from splitting of region */ + /* account for new log op header */ + ticket->t_curr_res -= sizeof(xlog_op_header_t); + } + xlog_verify_dest_ptr(log, ptr); + + /* copy region */ + ASSERT(copy_len >= 0); + memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len); + xlog_write_adv_cnt(ptr, len, log_offset, copy_len); + + /* make copy_len total bytes copied, including headers */ + copy_len += start_rec_copy + sizeof(xlog_op_header_t); + record_cnt++; + data_cnt += contwr ? copy_len : 0; + firstwr = 0; + if (partial_copy) { /* copied partial region */ + /* already marked WANT_SYNC by xlog_state_get_iclog_space */ + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + record_cnt = data_cnt = 0; + if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + break; /* don't increment index */ + } else { /* copied entire region */ + index++; + partial_copy_len = partial_copy = 0; + + if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + record_cnt = data_cnt = 0; + xlog_state_want_sync(log, iclog); + if (commit_iclog) { + ASSERT(flags & XLOG_COMMIT_TRANS); + *commit_iclog = iclog; + } else if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + if (index == nentries) + return 0; /* we are done */ + else + break; + } + } /* if (partial_copy) */ + } /* while (index < nentries) */ + } /* for (index = 0; index < nentries; ) */ + ASSERT(len == 0); + + xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); + if (commit_iclog) { + ASSERT(flags & XLOG_COMMIT_TRANS); + *commit_iclog = iclog; + return 0; + } + return (xlog_state_release_iclog(log, iclog)); +} /* xlog_write */ + + +/***************************************************************************** + * + * State Machine functions + * + ***************************************************************************** + */ + +/* Clean iclogs starting from the head. This ordering must be + * maintained, so an iclog doesn't become ACTIVE beyond one that + * is SYNCING. This is also required to maintain the notion that we use + * a counting semaphore to hold off would be writers to the log when every + * iclog is trying to sync to disk. + * + * State Change: DIRTY -> ACTIVE + */ +void +xlog_state_clean_log(xlog_t *log) +{ + xlog_in_core_t *iclog; + int changed = 0; + + iclog = log->l_iclog; + do { + if (iclog->ic_state == XLOG_STATE_DIRTY) { + iclog->ic_state = XLOG_STATE_ACTIVE; + iclog->ic_offset = 0; + iclog->ic_callback = 0; /* don't need to free */ + /* + * If the number of ops in this iclog indicate it just + * contains the dummy transaction, we can + * change state into IDLE (the second time around). + * Otherwise we should change the state into + * NEED a dummy. + * We don't need to cover the dummy. + */ + if (!changed && + (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { + changed = 1; + } else { + /* + * We have two dirty iclogs so start over + * This could also be num of ops indicates + * this is not the dummy going out. + */ + changed = 2; + } + INT_ZERO(iclog->ic_header.h_num_logops, ARCH_CONVERT); + memset(iclog->ic_header.h_cycle_data, 0, + sizeof(iclog->ic_header.h_cycle_data)); + INT_ZERO(iclog->ic_header.h_lsn, ARCH_CONVERT); + } else if (iclog->ic_state == XLOG_STATE_ACTIVE) + /* do nothing */; + else + break; /* stop cleaning */ + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + + /* log is locked when we are called */ + /* + * Change state for the dummy log recording. + * We usually go to NEED. But we go to NEED2 if the changed indicates + * we are done writing the dummy record. + * If we are done with the second dummy recored (DONE2), then + * we go to IDLE. + */ + if (changed) { + switch (log->l_covered_state) { + case XLOG_STATE_COVER_IDLE: + case XLOG_STATE_COVER_NEED: + case XLOG_STATE_COVER_NEED2: + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + case XLOG_STATE_COVER_DONE: + if (changed == 1) + log->l_covered_state = XLOG_STATE_COVER_NEED2; + else + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + case XLOG_STATE_COVER_DONE2: + if (changed == 1) + log->l_covered_state = XLOG_STATE_COVER_IDLE; + else + log->l_covered_state = XLOG_STATE_COVER_NEED; + break; + + default: + ASSERT(0); + } + } +} /* xlog_state_clean_log */ + +STATIC xfs_lsn_t +xlog_get_lowest_lsn( + xlog_t *log) +{ + xlog_in_core_t *lsn_log; + xfs_lsn_t lowest_lsn, lsn; + + lsn_log = log->l_iclog; + lowest_lsn = 0; + do { + if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { + lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); + if ((lsn && !lowest_lsn) || + (XFS_LSN_CMP_ARCH(lsn, lowest_lsn, ARCH_NOCONVERT) < 0)) { + lowest_lsn = lsn; + } + } + lsn_log = lsn_log->ic_next; + } while (lsn_log != log->l_iclog); + return(lowest_lsn); +} + + +STATIC void +xlog_state_do_callback( + xlog_t *log, + int aborted, + xlog_in_core_t *ciclog) +{ + xlog_in_core_t *iclog; + xlog_in_core_t *first_iclog; /* used to know when we've + * processed all iclogs once */ + xfs_log_callback_t *cb, *cb_next; + int flushcnt = 0; + xfs_lsn_t lowest_lsn; + int ioerrors; /* counter: iclogs with errors */ + int loopdidcallbacks; /* flag: inner loop did callbacks*/ + int funcdidcallbacks; /* flag: function did callbacks */ + int repeats; /* for issuing console warnings if + * looping too many times */ + SPLDECL(s); + + s = LOG_LOCK(log); + first_iclog = iclog = log->l_iclog; + ioerrors = 0; + funcdidcallbacks = 0; + repeats = 0; + + do { + /* + * Scan all iclogs starting with the one pointed to by the + * log. Reset this starting point each time the log is + * unlocked (during callbacks). + * + * Keep looping through iclogs until one full pass is made + * without running any callbacks. + */ + first_iclog = log->l_iclog; + iclog = log->l_iclog; + loopdidcallbacks = 0; + repeats++; + + do { + + /* skip all iclogs in the ACTIVE & DIRTY states */ + if (iclog->ic_state & + (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { + iclog = iclog->ic_next; + continue; + } + + /* + * Between marking a filesystem SHUTDOWN and stopping + * the log, we do flush all iclogs to disk (if there + * wasn't a log I/O error). So, we do want things to + * go smoothly in case of just a SHUTDOWN w/o a + * LOG_IO_ERROR. + */ + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { + /* + * Can only perform callbacks in order. Since + * this iclog is not in the DONE_SYNC/ + * DO_CALLBACK state, we skip the rest and + * just try to clean up. If we set our iclog + * to DO_CALLBACK, we will not process it when + * we retry since a previous iclog is in the + * CALLBACK and the state cannot change since + * we are holding the LOG_LOCK. + */ + if (!(iclog->ic_state & + (XLOG_STATE_DONE_SYNC | + XLOG_STATE_DO_CALLBACK))) { + if (ciclog && (ciclog->ic_state == + XLOG_STATE_DONE_SYNC)) { + ciclog->ic_state = XLOG_STATE_DO_CALLBACK; + } + break; + } + /* + * We now have an iclog that is in either the + * DO_CALLBACK or DONE_SYNC states. The other + * states (WANT_SYNC, SYNCING, or CALLBACK were + * caught by the above if and are going to + * clean (i.e. we aren't doing their callbacks) + * see the above if. + */ + + /* + * We will do one more check here to see if we + * have chased our tail around. + */ + + lowest_lsn = xlog_get_lowest_lsn(log); + if (lowest_lsn && ( + XFS_LSN_CMP_ARCH( + lowest_lsn, + INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT), + ARCH_NOCONVERT + )<0)) { + iclog = iclog->ic_next; + continue; /* Leave this iclog for + * another thread */ + } + + iclog->ic_state = XLOG_STATE_CALLBACK; + + LOG_UNLOCK(log, s); + + /* l_last_sync_lsn field protected by + * GRANT_LOCK. Don't worry about iclog's lsn. + * No one else can be here except us. + */ + s = GRANT_LOCK(log); + ASSERT(XFS_LSN_CMP_ARCH( + log->l_last_sync_lsn, + INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT), + ARCH_NOCONVERT + )<=0); + log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + GRANT_UNLOCK(log, s); + + /* + * Keep processing entries in the callback list + * until we come around and it is empty. We + * need to atomically see that the list is + * empty and change the state to DIRTY so that + * we don't miss any more callbacks being added. + */ + s = LOG_LOCK(log); + } else { + ioerrors++; + } + cb = iclog->ic_callback; + + while (cb != 0) { + iclog->ic_callback_tail = &(iclog->ic_callback); + iclog->ic_callback = 0; + LOG_UNLOCK(log, s); + + /* perform callbacks in the order given */ + for (; cb != 0; cb = cb_next) { + cb_next = cb->cb_next; + cb->cb_func(cb->cb_arg, aborted); + } + s = LOG_LOCK(log); + cb = iclog->ic_callback; + } + + loopdidcallbacks++; + funcdidcallbacks++; + + ASSERT(iclog->ic_callback == 0); + if (!(iclog->ic_state & XLOG_STATE_IOERROR)) + iclog->ic_state = XLOG_STATE_DIRTY; + + /* + * Transition from DIRTY to ACTIVE if applicable. + * NOP if STATE_IOERROR. + */ + xlog_state_clean_log(log); + + /* wake up threads waiting in xfs_log_force() */ + sv_broadcast(&iclog->ic_forcesema); + + iclog = iclog->ic_next; + } while (first_iclog != iclog); + if (repeats && (repeats % 10) == 0) { + xfs_fs_cmn_err(CE_WARN, log->l_mp, + "xlog_state_do_callback: looping %d", repeats); + } + } while (!ioerrors && loopdidcallbacks); + + /* + * make one last gasp attempt to see if iclogs are being left in + * limbo.. + */ +#ifdef DEBUG + if (funcdidcallbacks) { + first_iclog = iclog = log->l_iclog; + do { + ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); + /* + * Terminate the loop if iclogs are found in states + * which will cause other threads to clean up iclogs. + * + * SYNCING - i/o completion will go through logs + * DONE_SYNC - interrupt thread should be waiting for + * LOG_LOCK + * IOERROR - give up hope all ye who enter here + */ + if (iclog->ic_state == XLOG_STATE_SYNCING || + iclog->ic_state == XLOG_STATE_DONE_SYNC || + iclog->ic_state == XLOG_STATE_IOERROR ) + break; + iclog = iclog->ic_next; + } while (first_iclog != iclog); + } +#endif + + if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) { + flushcnt = log->l_flushcnt; + log->l_flushcnt = 0; + } + LOG_UNLOCK(log, s); + while (flushcnt--) + vsema(&log->l_flushsema); +} /* xlog_state_do_callback */ + + +/* + * Finish transitioning this iclog to the dirty state. + * + * Make sure that we completely execute this routine only when this is + * the last call to the iclog. There is a good chance that iclog flushes, + * when we reach the end of the physical log, get turned into 2 separate + * calls to bwrite. Hence, one iclog flush could generate two calls to this + * routine. By using the reference count bwritecnt, we guarantee that only + * the second completion goes through. + * + * Callbacks could take time, so they are done outside the scope of the + * global state machine log lock. Assume that the calls to cvsema won't + * take a long time. At least we know it won't sleep. + */ +void +xlog_state_done_syncing( + xlog_in_core_t *iclog, + int aborted) +{ + xlog_t *log = iclog->ic_log; + SPLDECL(s); + + s = LOG_LOCK(log); + + ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || + iclog->ic_state == XLOG_STATE_IOERROR); + ASSERT(iclog->ic_refcnt == 0); + ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); + + + /* + * If we got an error, either on the first buffer, or in the case of + * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, + * and none should ever be attempted to be written to disk + * again. + */ + if (iclog->ic_state != XLOG_STATE_IOERROR) { + if (--iclog->ic_bwritecnt == 1) { + LOG_UNLOCK(log, s); + return; + } + iclog->ic_state = XLOG_STATE_DONE_SYNC; + } + + /* + * Someone could be sleeping prior to writing out the next + * iclog buffer, we wake them all, one will get to do the + * I/O, the others get to wait for the result. + */ + sv_broadcast(&iclog->ic_writesema); + LOG_UNLOCK(log, s); + xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ +} /* xlog_state_done_syncing */ + + +/* + * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must + * sleep. The flush semaphore is set to the number of in-core buffers and + * decremented around disk syncing. Therefore, if all buffers are syncing, + * this semaphore will cause new writes to sleep until a sync completes. + * Otherwise, this code just does p() followed by v(). This approximates + * a sleep/wakeup except we can't race. + * + * The in-core logs are used in a circular fashion. They are not used + * out-of-order even when an iclog past the head is free. + * + * return: + * * log_offset where xlog_write() can start writing into the in-core + * log's data space. + * * in-core log pointer to which xlog_write() should write. + * * boolean indicating this is a continued write to an in-core log. + * If this is the last write, then the in-core log's offset field + * needs to be incremented, depending on the amount of data which + * is copied. + */ +int +xlog_state_get_iclog_space(xlog_t *log, + int len, + xlog_in_core_t **iclogp, + xlog_ticket_t *ticket, + int *continued_write, + int *logoffsetp) +{ + SPLDECL(s); + int log_offset; + xlog_rec_header_t *head; + xlog_in_core_t *iclog; + int error; + +restart: + s = LOG_LOCK(log); + if (XLOG_FORCED_SHUTDOWN(log)) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + iclog = log->l_iclog; + if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { + log->l_flushcnt++; + LOG_UNLOCK(log, s); + xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); + XFS_STATS_INC(xfsstats.xs_log_noiclogs); + /* Ensure that log writes happen */ + psema(&log->l_flushsema, PINOD); + goto restart; + } + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); + head = &iclog->ic_header; + + iclog->ic_refcnt++; /* prevents sync */ + log_offset = iclog->ic_offset; + + /* On the 1st write to an iclog, figure out lsn. This works + * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are + * committing to. If the offset is set, that's how many blocks + * must be written. + */ + if (log_offset == 0) { + ticket->t_curr_res -= log->l_iclog_hsize; + INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); + ASSIGN_LSN(head->h_lsn, log, ARCH_CONVERT); + ASSERT(log->l_curr_block >= 0); + + /* round off error from last write with this iclog */ + ticket->t_curr_res -= iclog->ic_roundoff; + log->l_roundoff -= iclog->ic_roundoff; + iclog->ic_roundoff = 0; + } + + /* If there is enough room to write everything, then do it. Otherwise, + * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC + * bit is on, so this will get flushed out. Don't update ic_offset + * until you know exactly how many bytes get copied. Therefore, wait + * until later to update ic_offset. + * + * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's + * can fit into remaining data section. + */ + if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { + xlog_state_switch_iclogs(log, iclog, iclog->ic_size); + + /* If I'm the only one writing to this iclog, sync it to disk */ + if (iclog->ic_refcnt == 1) { + LOG_UNLOCK(log, s); + if ((error = xlog_state_release_iclog(log, iclog))) + return (error); + } else { + iclog->ic_refcnt--; + LOG_UNLOCK(log, s); + } + goto restart; + } + + /* Do we have enough room to write the full amount in the remainder + * of this iclog? Or must we continue a write on the next iclog and + * mark this iclog as completely taken? In the case where we switch + * iclogs (to mark it taken), this particular iclog will release/sync + * to disk in xlog_write(). + */ + if (len <= iclog->ic_size - iclog->ic_offset) { + *continued_write = 0; + iclog->ic_offset += len; + } else { + *continued_write = 1; + xlog_state_switch_iclogs(log, iclog, iclog->ic_size); + } + *iclogp = iclog; + + ASSERT(iclog->ic_offset <= iclog->ic_size); + LOG_UNLOCK(log, s); + + *logoffsetp = log_offset; + return 0; +} /* xlog_state_get_iclog_space */ + +/* + * Atomically get the log space required for a log ticket. + * + * Once a ticket gets put onto the reserveq, it will only return after + * the needed reservation is satisfied. + */ +STATIC int +xlog_grant_log_space(xlog_t *log, + xlog_ticket_t *tic) +{ + int free_bytes; + int need_bytes; + SPLDECL(s); +#ifdef DEBUG + xfs_lsn_t tail_lsn; +#endif + + +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("grant Recovery problem"); +#endif + + /* Is there space or do we need to sleep? */ + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); + + /* something is already sleeping; insert new transaction at end */ + if (log->l_reserve_headq) { + XLOG_INS_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: sleep 1"); + /* + * Gotta check this before going to sleep, while we're + * holding the grant lock. + */ + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + /* + * If we got an error, and the filesystem is shutting down, + * we'll catch it down below. So just continue... + */ + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: wake 1"); + s = GRANT_LOCK(log); + } + if (tic->t_flags & XFS_LOG_PERM_RESERV) + need_bytes = tic->t_unit_res*tic->t_ocnt; + else + need_bytes = tic->t_unit_res; + +redo: + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle, + log->l_grant_reserve_bytes); + if (free_bytes < need_bytes) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: sleep 2"); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_grant_log_space: wake 2"); + xlog_grant_push_ail(log->l_mp, need_bytes); + s = GRANT_LOCK(log); + goto redo; + } else if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + + /* we've got enough space */ + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r'); +#ifdef DEBUG + tail_lsn = log->l_tail_lsn; + /* + * Check to make sure the grant write head didn't just over lap the + * tail. If the cycles are the same, we can't be overlapping. + * Otherwise, make sure that the cycles differ by exactly one and + * check the byte count. + */ + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) != log->l_grant_write_cycle) { + ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)); + ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn, ARCH_NOCONVERT))); + } +#endif + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + return 0; + + error_return: + if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); + /* + * If we are failing, make sure the ticket doesn't have any + * current reservations. We don't want to add this back when + * the ticket/transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + GRANT_UNLOCK(log, s); + return XFS_ERROR(EIO); +} /* xlog_grant_log_space */ + + +/* + * Replenish the byte reservation required by moving the grant write head. + * + * + */ +STATIC int +xlog_regrant_write_log_space(xlog_t *log, + xlog_ticket_t *tic) +{ + SPLDECL(s); + int free_bytes, need_bytes; + xlog_ticket_t *ntic; +#ifdef DEBUG + xfs_lsn_t tail_lsn; +#endif + + tic->t_curr_res = tic->t_unit_res; + + if (tic->t_cnt > 0) + return (0); + +#ifdef DEBUG + if (log->l_flags & XLOG_ACTIVE_RECOVERY) + panic("regrant Recovery problem"); +#endif + + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); + + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + /* If there are other waiters on the queue then give them a + * chance at logspace before us. Wake up the first waiters, + * if we do not wake up all the waiters then go to sleep waiting + * for more free space, otherwise try to get some space for + * this transaction. + */ + + if ((ntic = log->l_write_headq)) { + free_bytes = xlog_space_left(log, log->l_grant_write_cycle, + log->l_grant_write_bytes); + do { + ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); + + if (free_bytes < ntic->t_unit_res) + break; + free_bytes -= ntic->t_unit_res; + sv_signal(&ntic->t_sema); + ntic = ntic->t_next; + } while (ntic != log->l_write_headq); + + if (ntic != log->l_write_headq) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_write_headq, tic); + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: sleep 1"); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, + &log->l_grant_lock, s); + + /* If we're shutting down, this tic is already + * off the queue */ + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: wake 1"); + xlog_grant_push_ail(log->l_mp, tic->t_unit_res); + s = GRANT_LOCK(log); + } + } + + need_bytes = tic->t_unit_res; + +redo: + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + + free_bytes = xlog_space_left(log, log->l_grant_write_cycle, + log->l_grant_write_bytes); + if (free_bytes < need_bytes) { + if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) + XLOG_INS_TICKETQ(log->l_write_headq, tic); + XFS_STATS_INC(xfsstats.xs_sleep_logspace); + sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); + + /* If we're shutting down, this tic is already off the queue */ + if (XLOG_FORCED_SHUTDOWN(log)) { + s = GRANT_LOCK(log); + goto error_return; + } + + xlog_trace_loggrant(log, tic, + "xlog_regrant_write_log_space: wake 2"); + xlog_grant_push_ail(log->l_mp, need_bytes); + s = GRANT_LOCK(log); + goto redo; + } else if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_write_headq, tic); + + XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */ +#ifdef DEBUG + tail_lsn = log->l_tail_lsn; + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) != log->l_grant_write_cycle) { + ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)); + ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn, ARCH_NOCONVERT))); + } +#endif + + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + return (0); + + + error_return: + if (tic->t_flags & XLOG_TIC_IN_Q) + XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); + xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); + /* + * If we are failing, make sure the ticket doesn't have any + * current reservations. We don't want to add this back when + * the ticket/transaction gets cancelled. + */ + tic->t_curr_res = 0; + tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ + GRANT_UNLOCK(log, s); + return XFS_ERROR(EIO); +} /* xlog_regrant_write_log_space */ + + +/* The first cnt-1 times through here we don't need to + * move the grant write head because the permanent + * reservation has reserved cnt times the unit amount. + * Release part of current permanent unit reservation and + * reset current reservation to be one units worth. Also + * move grant reservation head forward. + */ +STATIC void +xlog_regrant_reserve_log_space(xlog_t *log, + xlog_ticket_t *ticket) +{ + SPLDECL(s); + + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: enter"); + if (ticket->t_cnt > 0) + ticket->t_cnt--; + + s = GRANT_LOCK(log); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); + ticket->t_curr_res = ticket->t_unit_res; + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: sub current res"); + xlog_verify_grant_head(log, 1); + + /* just return if we still have some of the pre-reserved space */ + if (ticket->t_cnt > 0) { + GRANT_UNLOCK(log, s); + return; + } + + XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r'); + xlog_trace_loggrant(log, ticket, + "xlog_regrant_reserve_log_space: exit"); + xlog_verify_grant_head(log, 0); + GRANT_UNLOCK(log, s); + ticket->t_curr_res = ticket->t_unit_res; +} /* xlog_regrant_reserve_log_space */ + + +/* + * Give back the space left from a reservation. + * + * All the information we need to make a correct determination of space left + * is present. For non-permanent reservations, things are quite easy. The + * count should have been decremented to zero. We only need to deal with the + * space remaining in the current reservation part of the ticket. If the + * ticket contains a permanent reservation, there may be left over space which + * needs to be released. A count of N means that N-1 refills of the current + * reservation can be done before we need to ask for more space. The first + * one goes to fill up the first current reservation. Once we run out of + * space, the count will stay at zero and the only space remaining will be + * in the current reservation field. + */ +STATIC void +xlog_ungrant_log_space(xlog_t *log, + xlog_ticket_t *ticket) +{ + SPLDECL(s); + + if (ticket->t_cnt > 0) + ticket->t_cnt--; + + s = GRANT_LOCK(log); + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); + + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); + + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); + + /* If this is a permanent reservation ticket, we may be able to free + * up more space based on the remaining count. + */ + if (ticket->t_cnt > 0) { + ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); + XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w'); + XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r'); + } + + xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); + xlog_verify_grant_head(log, 1); + GRANT_UNLOCK(log, s); + xfs_log_move_tail(log->l_mp, 1); +} /* xlog_ungrant_log_space */ + + +/* + * Atomically put back used ticket. + */ +void +xlog_state_put_ticket(xlog_t *log, + xlog_ticket_t *tic) +{ + unsigned long s; + + s = LOG_LOCK(log); + xlog_ticket_put(log, tic); + LOG_UNLOCK(log, s); +} /* xlog_state_put_ticket */ + +/* + * Flush iclog to disk if this is the last reference to the given iclog and + * the WANT_SYNC bit is set. + * + * When this function is entered, the iclog is not necessarily in the + * WANT_SYNC state. It may be sitting around waiting to get filled. + * + * + */ +int +xlog_state_release_iclog(xlog_t *log, + xlog_in_core_t *iclog) +{ + SPLDECL(s); + int sync = 0; /* do we sync? */ + + xlog_assign_tail_lsn(log->l_mp); + + s = LOG_LOCK(log); + + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + ASSERT(iclog->ic_refcnt > 0); + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_WANT_SYNC); + + if (--iclog->ic_refcnt == 0 && + iclog->ic_state == XLOG_STATE_WANT_SYNC) { + sync++; + iclog->ic_state = XLOG_STATE_SYNCING; + INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); + xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); + /* cycle incremented when incrementing curr_block */ + } + + LOG_UNLOCK(log, s); + + /* + * We let the log lock go, so it's possible that we hit a log I/O + * error or someother SHUTDOWN condition that marks the iclog + * as XLOG_STATE_IOERROR before the bwrite. However, we know that + * this iclog has consistent data, so we ignore IOERROR + * flags after this point. + */ + if (sync) { + return xlog_sync(log, iclog); + } + return (0); + +} /* xlog_state_release_iclog */ + + +/* + * This routine will mark the current iclog in the ring as WANT_SYNC + * and move the current iclog pointer to the next iclog in the ring. + * When this routine is called from xlog_state_get_iclog_space(), the + * exact size of the iclog has not yet been determined. All we know is + * that every data block. We have run out of space in this log record. + */ +STATIC void +xlog_state_switch_iclogs(xlog_t *log, + xlog_in_core_t *iclog, + int eventual_size) +{ + uint roundup; + + ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); + if (!eventual_size) + eventual_size = iclog->ic_offset; + iclog->ic_state = XLOG_STATE_WANT_SYNC; + INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); + log->l_prev_block = log->l_curr_block; + log->l_prev_cycle = log->l_curr_cycle; + + /* roll log?: ic_offset changed later */ + log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); + + /* Round up to next log-sunit */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + if (log->l_curr_block & (log->l_mp->m_lstripemask - 1)) { + roundup = log->l_mp->m_lstripemask - + (log->l_curr_block & + (log->l_mp->m_lstripemask - 1)); + } else { + roundup = 0; + } + log->l_curr_block += roundup; + } + + if (log->l_curr_block >= log->l_logBBsize) { + log->l_curr_cycle++; + if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) + log->l_curr_cycle++; + log->l_curr_block -= log->l_logBBsize; + ASSERT(log->l_curr_block >= 0); + } + ASSERT(iclog == log->l_iclog); + log->l_iclog = iclog->ic_next; +} /* xlog_state_switch_iclogs */ + + +/* + * Write out all data in the in-core log as of this exact moment in time. + * + * Data may be written to the in-core log during this call. However, + * we don't guarantee this data will be written out. A change from past + * implementation means this routine will *not* write out zero length LRs. + * + * Basically, we try and perform an intelligent scan of the in-core logs. + * If we determine there is no flushable data, we just return. There is no + * flushable data if: + * + * 1. the current iclog is active and has no data; the previous iclog + * is in the active or dirty state. + * 2. the current iclog is drity, and the previous iclog is in the + * active or dirty state. + * + * We may sleep (call psema) if: + * + * 1. the current iclog is not in the active nor dirty state. + * 2. the current iclog dirty, and the previous iclog is not in the + * active nor dirty state. + * 3. the current iclog is active, and there is another thread writing + * to this particular iclog. + * 4. a) the current iclog is active and has no other writers + * b) when we return from flushing out this iclog, it is still + * not in the active nor dirty state. + */ +STATIC int +xlog_state_sync_all(xlog_t *log, uint flags) +{ + xlog_in_core_t *iclog; + xfs_lsn_t lsn; + SPLDECL(s); + + s = LOG_LOCK(log); + + iclog = log->l_iclog; + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + /* If the head iclog is not active nor dirty, we just attach + * ourselves to the head and go to sleep. + */ + if (iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY) { + /* + * If the head is dirty or (active and empty), then + * we need to look at the previous iclog. If the previous + * iclog is active or dirty we are done. There is nothing + * to sync out. Otherwise, we attach ourselves to the + * previous iclog and go to sleep. + */ + if (iclog->ic_state == XLOG_STATE_DIRTY || + (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) { + iclog = iclog->ic_prev; + if (iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY) + goto no_sleep; + else + goto maybe_sleep; + } else { + if (iclog->ic_refcnt == 0) { + /* We are the only one with access to this + * iclog. Flush it out now. There should + * be a roundoff of zero to show that someone + * has already taken care of the roundoff from + * the previous sync. + */ + ASSERT(iclog->ic_roundoff == 0); + iclog->ic_refcnt++; + lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); + xlog_state_switch_iclogs(log, iclog, 0); + LOG_UNLOCK(log, s); + + if (xlog_state_release_iclog(log, iclog)) + return XFS_ERROR(EIO); + s = LOG_LOCK(log); + if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && + iclog->ic_state != XLOG_STATE_DIRTY) + goto maybe_sleep; + else + goto no_sleep; + } else { + /* Someone else is writing to this iclog. + * Use its call to flush out the data. However, + * the other thread may not force out this LR, + * so we mark it WANT_SYNC. + */ + xlog_state_switch_iclogs(log, iclog, 0); + goto maybe_sleep; + } + } + } + + /* By the time we come around again, the iclog could've been filled + * which would give it another lsn. If we have a new lsn, just + * return because the relevant data has been flushed. + */ +maybe_sleep: + if (flags & XFS_LOG_SYNC) { + /* + * We must check if we're shutting down here, before + * we wait, while we're holding the LOG_LOCK. + * Then we check again after waking up, in case our + * sleep was disturbed by a bad news. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); + /* + * No need to grab the log lock here since we're + * only deciding whether or not to return EIO + * and the memory read should be atomic. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) + return XFS_ERROR(EIO); + + } else { + +no_sleep: + LOG_UNLOCK(log, s); + } + return 0; +} /* xlog_state_sync_all */ + + +/* + * Used by code which implements synchronous log forces. + * + * Find in-core log with lsn. + * If it is in the DIRTY state, just return. + * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC + * state and go to sleep or return. + * If it is in any other state, go to sleep or return. + * + * If filesystem activity goes to zero, the iclog will get flushed only by + * bdflush(). + */ +int +xlog_state_sync(xlog_t *log, + xfs_lsn_t lsn, + uint flags) +{ + xlog_in_core_t *iclog; + int already_slept = 0; + SPLDECL(s); + + +try_again: + s = LOG_LOCK(log); + iclog = log->l_iclog; + + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + + do { + if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { + iclog = iclog->ic_next; + continue; + } + + if (iclog->ic_state == XLOG_STATE_DIRTY) { + LOG_UNLOCK(log, s); + return 0; + } + + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + /* + * We sleep here if we haven't already slept (e.g. + * this is the first time we've looked at the correct + * iclog buf) and the buffer before us is going to + * be sync'ed. The reason for this is that if we + * are doing sync transactions here, by waiting for + * the previous I/O to complete, we can allow a few + * more transactions into this iclog before we close + * it down. + * + * Otherwise, we mark the buffer WANT_SYNC, and bump + * up the refcnt so we can release the log (which drops + * the ref count). The state switch keeps new transaction + * commits from using this buffer. When the current commits + * finish writing into the buffer, the refcount will drop to + * zero and the buffer will go out then. + */ + if (!already_slept && + (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC | + XLOG_STATE_SYNCING))) { + ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_prev->ic_writesema, PSWP, + &log->l_icloglock, s); + already_slept = 1; + goto try_again; + } else { + iclog->ic_refcnt++; + xlog_state_switch_iclogs(log, iclog, 0); + LOG_UNLOCK(log, s); + if (xlog_state_release_iclog(log, iclog)) + return XFS_ERROR(EIO); + s = LOG_LOCK(log); + } + } + + if ((flags & XFS_LOG_SYNC) && /* sleep */ + !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { + + /* + * Don't wait on the forcesema if we know that we've + * gotten a log write error. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) { + LOG_UNLOCK(log, s); + return XFS_ERROR(EIO); + } + XFS_STATS_INC(xfsstats.xs_log_force_sleep); + sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); + /* + * No need to grab the log lock here since we're + * only deciding whether or not to return EIO + * and the memory read should be atomic. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) + return XFS_ERROR(EIO); + } else { /* just return */ + LOG_UNLOCK(log, s); + } + return 0; + + } while (iclog != log->l_iclog); + + LOG_UNLOCK(log, s); + return (0); +} /* xlog_state_sync */ + + +/* + * Called when we want to mark the current iclog as being ready to sync to + * disk. + */ +void +xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) +{ + SPLDECL(s); + + s = LOG_LOCK(log); + + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + xlog_state_switch_iclogs(log, iclog, 0); + } else { + ASSERT(iclog->ic_state & + (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); + } + + LOG_UNLOCK(log, s); +} /* xlog_state_want_sync */ + + + +/***************************************************************************** + * + * TICKET functions + * + ***************************************************************************** + */ + +/* + * Algorithm doesn't take into account page size. ;-( + */ +STATIC void +xlog_state_ticket_alloc(xlog_t *log) +{ + xlog_ticket_t *t_list; + xlog_ticket_t *next; + xfs_caddr_t buf; + uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; + SPLDECL(s); + + /* + * The kmem_zalloc may sleep, so we shouldn't be holding the + * global lock. XXXmiken: may want to use zone allocator. + */ + buf = (xfs_caddr_t) kmem_zalloc(NBPP, 0); + + s = LOG_LOCK(log); + + /* Attach 1st ticket to Q, so we can keep track of allocated memory */ + t_list = (xlog_ticket_t *)buf; + t_list->t_next = log->l_unmount_free; + log->l_unmount_free = t_list++; + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + + /* Next ticket becomes first ticket attached to ticket free list */ + if (log->l_freelist != NULL) { + ASSERT(log->l_tail != NULL); + log->l_tail->t_next = t_list; + } else { + log->l_freelist = t_list; + } + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + + /* Cycle through rest of alloc'ed memory, building up free Q */ + for ( ; i > 0; i--) { + next = t_list + 1; + t_list->t_next = next; + t_list = next; + log->l_ticket_cnt++; + log->l_ticket_tcnt++; + } + t_list->t_next = 0; + log->l_tail = t_list; + LOG_UNLOCK(log, s); +} /* xlog_state_ticket_alloc */ + + +/* + * Put ticket into free list + * + * Assumption: log lock is held around this call. + */ +STATIC void +xlog_ticket_put(xlog_t *log, + xlog_ticket_t *ticket) +{ + sv_destroy(&ticket->t_sema); + + /* + * Don't think caching will make that much difference. It's + * more important to make debug easier. + */ +#if 0 + /* real code will want to use LIFO for caching */ + ticket->t_next = log->l_freelist; + log->l_freelist = ticket; + /* no need to clear fields */ +#else + /* When we debug, it is easier if tickets are cycled */ + ticket->t_next = 0; + if (log->l_tail != 0) { + log->l_tail->t_next = ticket; + } else { + ASSERT(log->l_freelist == 0); + log->l_freelist = ticket; + } + log->l_tail = ticket; +#endif /* DEBUG */ + log->l_ticket_cnt++; +} /* xlog_ticket_put */ + + +/* + * Grab ticket off freelist or allocation some more + */ +xlog_ticket_t * +xlog_ticket_get(xlog_t *log, + int unit_bytes, + int cnt, + char client, + uint xflags) +{ + xlog_ticket_t *tic; + SPLDECL(s); + + alloc: + if (log->l_freelist == NULL) + xlog_state_ticket_alloc(log); /* potentially sleep */ + + s = LOG_LOCK(log); + if (log->l_freelist == NULL) { + LOG_UNLOCK(log, s); + goto alloc; + } + tic = log->l_freelist; + log->l_freelist = tic->t_next; + if (log->l_freelist == NULL) + log->l_tail = NULL; + log->l_ticket_cnt--; + LOG_UNLOCK(log, s); + + /* + * Permanent reservations have up to 'cnt'-1 active log operations + * in the log. A unit in this case is the amount of space for one + * of these log operations. Normal reservations have a cnt of 1 + * and their unit amount is the total amount of space required. + * The following line of code adds one log record header length + * for each part of an operation which may fall on a different + * log record. + * + * One more XLOG_HEADER_SIZE is added to account for possible + * round off errors when syncing a LR to disk. The bytes are + * subtracted if the thread using this ticket is the first writer + * to a new LR. + * + * We add an extra log header for the possibility that the commit + * record is the first data written to a new log record. In this + * case it is separate from the rest of the transaction data and + * will be charged for the log record header. + */ + unit_bytes += log->l_iclog_hsize * (XLOG_BTOLRBB(unit_bytes) + 2); + + tic->t_unit_res = unit_bytes; + tic->t_curr_res = unit_bytes; + tic->t_cnt = cnt; + tic->t_ocnt = cnt; + tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff); + tic->t_clientid = client; + tic->t_flags = XLOG_TIC_INITED; + if (xflags & XFS_LOG_PERM_RESERV) + tic->t_flags |= XLOG_TIC_PERM_RESERV; + sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); + + return tic; +} /* xlog_ticket_get */ + + +/****************************************************************************** + * + * Log debug routines + * + ****************************************************************************** + */ +#if defined(DEBUG) && !defined(XLOG_NOLOG) +/* + * Make sure that the destination ptr is within the valid data region of + * one of the iclogs. This uses backup pointers stored in a different + * part of the log in case we trash the log structure. + */ +void +xlog_verify_dest_ptr(xlog_t *log, + __psint_t ptr) +{ + int i; + int good_ptr = 0; + + for (i=0; i < log->l_iclog_bufs; i++) { + if (ptr >= (__psint_t)log->l_iclog_bak[i] && + ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size) + good_ptr++; + } + if (! good_ptr) + xlog_panic("xlog_verify_dest_ptr: invalid ptr"); +} /* xlog_verify_dest_ptr */ + + +#ifdef XFSDEBUG +/* check split LR write */ +STATIC void +xlog_verify_disk_cycle_no(xlog_t *log, + xlog_in_core_t *iclog) +{ + xfs_buf_t *bp; + uint cycle_no; + xfs_daddr_t i; + + if (BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT) < 10) { + cycle_no = CYCLE_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT); + bp = xlog_get_bp(1, log->l_mp); + ASSERT(bp); + for (i = 0; i < BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT); i++) { + xlog_bread(log, i, 1, bp); + if (GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT) != cycle_no) + xlog_warn("XFS: xlog_verify_disk_cycle_no: bad cycle no"); + } + xlog_put_bp(bp); + } +} /* xlog_verify_disk_cycle_no */ +#endif + +STATIC void +xlog_verify_grant_head(xlog_t *log, int equals) +{ + if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) { + if (equals) + ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes); + else + ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes); + } else { + ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle); + ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes); + } +} /* xlog_verify_grant_head */ + +/* check if it will fit */ +STATIC void +xlog_verify_tail_lsn(xlog_t *log, + xlog_in_core_t *iclog, + xfs_lsn_t tail_lsn) +{ + int blocks; + + if (CYCLE_LSN(tail_lsn, ARCH_NOCONVERT) == log->l_prev_cycle) { + blocks = + log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn, ARCH_NOCONVERT)); + if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) + xlog_panic("xlog_verify_tail_lsn: ran out of log space"); + } else { + ASSERT(CYCLE_LSN(tail_lsn, ARCH_NOCONVERT)+1 == log->l_prev_cycle); + + if (BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) == log->l_prev_block) + xlog_panic("xlog_verify_tail_lsn: tail wrapped"); + + blocks = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT) - log->l_prev_block; + if (blocks < BTOBB(iclog->ic_offset) + 1) + xlog_panic("xlog_verify_tail_lsn: ran out of log space"); + } +} /* xlog_verify_tail_lsn */ + +/* + * Perform a number of checks on the iclog before writing to disk. + * + * 1. Make sure the iclogs are still circular + * 2. Make sure we have a good magic number + * 3. Make sure we don't have magic numbers in the data + * 4. Check fields of each log operation header for: + * A. Valid client identifier + * B. tid ptr value falls in valid ptr space (user space code) + * C. Length in log record header is correct according to the + * individual operation headers within record. + * 5. When a bwrite will occur within 5 blocks of the front of the physical + * log, check the preceding blocks of the physical log to make sure all + * the cycle numbers agree with the current cycle number. + */ +STATIC void +xlog_verify_iclog(xlog_t *log, + xlog_in_core_t *iclog, + int count, + boolean_t syncing) +{ + xlog_op_header_t *ophead; + xlog_in_core_t *icptr; + xfs_caddr_t ptr; + xfs_caddr_t base_ptr; + __psint_t field_offset; + __uint8_t clientid; + int len, i, j, k, op_len; + int idx; + SPLDECL(s); + + union ich { + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + }*xhdr; + + /* check validity of iclog pointers */ + s = LOG_LOCK(log); + icptr = log->l_iclog; + for (i=0; i < log->l_iclog_bufs; i++) { + if (icptr == 0) + xlog_panic("xlog_verify_iclog: illegal ptr"); + icptr = icptr->ic_next; + } + if (icptr != log->l_iclog) + xlog_panic("xlog_verify_iclog: corrupt iclog ring"); + LOG_UNLOCK(log, s); + + /* check log magic numbers */ + ptr = (xfs_caddr_t) &(iclog->ic_header); + if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) + xlog_panic("xlog_verify_iclog: illegal magic num"); + + for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; + ptr += BBSIZE) { + if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) + xlog_panic("xlog_verify_iclog: unexpected magic num"); + } + + /* check fields */ + len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); + ptr = iclog->ic_datap; + base_ptr = ptr; + ophead = (xlog_op_header_t *)ptr; + xhdr = (union ich*)&iclog->ic_header; + for (i = 0; i < len; i++) { + ophead = (xlog_op_header_t *)ptr; + + /* clientid is only 1 byte */ + field_offset = (__psint_t) + ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr); + if (syncing == B_FALSE || (field_offset & 0x1ff)) { + clientid = ophead->oh_clientid; + } else { + idx = BTOBB((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap); + if (idx > (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { + j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); + } else { + clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); + } + } + if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) + cmn_err(CE_WARN, "xlog_verify_iclog: illegal clientid %d op 0x%p offset 0x%x", clientid, ophead, field_offset); + + /* check length */ + field_offset = (__psint_t) + ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); + if (syncing == B_FALSE || (field_offset & 0x1ff)) { + op_len = INT_GET(ophead->oh_len, ARCH_CONVERT); + } else { + idx = BTOBB((__psint_t)&ophead->oh_len - + (__psint_t)iclog->ic_datap); + if (idx > (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { + j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); + } else { + op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); + } + } + ptr += sizeof(xlog_op_header_t) + op_len; + } +} /* xlog_verify_iclog */ +#endif /* DEBUG && !XLOG_NOLOG */ + +/* + * Mark all iclogs IOERROR. LOG_LOCK is held by the caller. + */ +STATIC int +xlog_state_ioerror( + xlog_t *log) +{ + xlog_in_core_t *iclog, *ic; + + iclog = log->l_iclog; + if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { + /* + * Mark all the incore logs IOERROR. + * From now on, no log flushes will result. + */ + ic = iclog; + do { + ic->ic_state = XLOG_STATE_IOERROR; + ic = ic->ic_next; + } while (ic != iclog); + return (0); + } + /* + * Return non-zero, if state transition has already happened. + */ + return (1); +} + +/* + * This is called from xfs_force_shutdown, when we're forcibly + * shutting down the filesystem, typically because of an IO error. + * Our main objectives here are to make sure that: + * a. the filesystem gets marked 'SHUTDOWN' for all interested + * parties to find out, 'atomically'. + * b. those who're sleeping on log reservations, pinned objects and + * other resources get woken up, and be told the bad news. + * c. nothing new gets queued up after (a) and (b) are done. + * d. if !logerror, flush the iclogs to disk, then seal them off + * for business. + */ +int +xfs_log_force_umount( + struct xfs_mount *mp, + int logerror) +{ + xlog_ticket_t *tic; + xlog_t *log; + int retval; + SPLDECL(s); + SPLDECL(s2); + + log = mp->m_log; + + /* + * If this happens during log recovery, don't worry about + * locking; the log isn't open for business yet. + */ + if (!log || + log->l_flags & XLOG_ACTIVE_RECOVERY) { + mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; + XFS_BUF_DONE(mp->m_sb_bp); + return (0); + } + + /* + * Somebody could've already done the hard work for us. + * No need to get locks for this. + */ + if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { + ASSERT(XLOG_FORCED_SHUTDOWN(log)); + return (1); + } + retval = 0; + /* + * We must hold both the GRANT lock and the LOG lock, + * before we mark the filesystem SHUTDOWN and wake + * everybody up to tell the bad news. + */ + s = GRANT_LOCK(log); + s2 = LOG_LOCK(log); + mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; + XFS_BUF_DONE(mp->m_sb_bp); + /* + * This flag is sort of redundant because of the mount flag, but + * it's good to maintain the separation between the log and the rest + * of XFS. + */ + log->l_flags |= XLOG_IO_ERROR; + + /* + * If we hit a log error, we want to mark all the iclogs IOERROR + * while we're still holding the loglock. + */ + if (logerror) + retval = xlog_state_ioerror(log); + LOG_UNLOCK(log, s2); + + /* + * We don't want anybody waiting for log reservations + * after this. That means we have to wake up everybody + * queued up on reserve_headq as well as write_headq. + * In addition, we make sure in xlog_{re}grant_log_space + * that we don't enqueue anything once the SHUTDOWN flag + * is set, and this action is protected by the GRANTLOCK. + */ + if ((tic = log->l_reserve_headq)) { + do { + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_reserve_headq); + } + + if ((tic = log->l_write_headq)) { + do { + sv_signal(&tic->t_sema); + tic = tic->t_next; + } while (tic != log->l_write_headq); + } + GRANT_UNLOCK(log, s); + + if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { + ASSERT(!logerror); + /* + * Force the incore logs to disk before shutting the + * log down completely. + */ + xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC); + s2 = LOG_LOCK(log); + retval = xlog_state_ioerror(log); + LOG_UNLOCK(log, s2); + } + /* + * Wake up everybody waiting on xfs_log_force. + * Callback all log item committed functions as if the + * log writes were completed. + */ + xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); + +#ifdef XFSERRORDEBUG + { + xlog_in_core_t *iclog; + + s = LOG_LOCK(log); + iclog = log->l_iclog; + do { + ASSERT(iclog->ic_callback == 0); + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + LOG_UNLOCK(log, s); + } +#endif + /* return non-zero if log IOERROR transition had already happened */ + return (retval); +} + +int +xlog_iclogs_empty(xlog_t *log) +{ + xlog_in_core_t *iclog; + + iclog = log->l_iclog; + do { + /* endianness does not matter here, zero is zero in + * any language. + */ + if (iclog->ic_header.h_num_logops) + return(0); + iclog = iclog->ic_next; + } while (iclog != log->l_iclog); + return(1); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_log.h linux.22-ac2/fs/xfs/xfs_log.h --- linux.vanilla/fs/xfs/xfs_log.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_log.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LOG_H__ +#define __XFS_LOG_H__ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define LSN_FIELD_CYCLE(arch) (((arch)==ARCH_NOCONVERT)?1:0) +#define LSN_FIELD_BLOCK(arch) (((arch)==ARCH_NOCONVERT)?0:1) +#else +#define LSN_FIELD_CYCLE(arch) (0) +#define LSN_FIELD_BLOCK(arch) (1) +#endif + +/* get lsn fields */ + +#define CYCLE_LSN(lsn,arch) (INT_GET(((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)], arch)) +#define BLOCK_LSN(lsn,arch) (INT_GET(((uint *)&(lsn))[LSN_FIELD_BLOCK(arch)], arch)) +/* this is used in a spot where we might otherwise double-endian-flip */ +#define CYCLE_LSN_NOCONV(lsn,arch) (((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)]) + +#ifdef __KERNEL__ +/* + * By comparing each compnent, we don't have to worry about extra + * endian issues in treating two 32 bit numbers as one 64 bit number + */ +static +#if defined(__GNUC__) && (__GNUC__ == 2) && (__GNUC_MINOR__ == 95) +__attribute__((unused)) /* gcc 2.95 miscompiles this when inlined */ +#else +__inline__ +#endif +xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2, xfs_arch_t arch) +{ + if (CYCLE_LSN(lsn1, arch) != CYCLE_LSN(lsn2, arch)) + return (CYCLE_LSN(lsn1, arch)> XLOG_RECORD_BSHIFT) +#endif + +#define XLOG_HEADER_SIZE 512 + +#define XLOG_TOTAL_REC_SHIFT(log) \ + BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ + XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) + +/* + * set lsns + */ + +#define ASSIGN_LSN_CYCLE(lsn,cycle,arch) \ + INT_SET(((uint *)&(lsn))[LSN_FIELD_CYCLE(arch)], arch, (cycle)); +#define ASSIGN_LSN_BLOCK(lsn,block,arch) \ + INT_SET(((uint *)&(lsn))[LSN_FIELD_BLOCK(arch)], arch, (block)); +#define ASSIGN_ANY_LSN(lsn,cycle,block,arch) \ + { \ + ASSIGN_LSN_CYCLE(lsn,cycle,arch); \ + ASSIGN_LSN_BLOCK(lsn,block,arch); \ + } +#define ASSIGN_LSN(lsn,log,arch) \ + ASSIGN_ANY_LSN(lsn,(log)->l_curr_cycle,(log)->l_curr_block,arch); + +#define XLOG_SET(f,b) (((f) & (b)) == (b)) + +#define GET_CYCLE(ptr, arch) \ + (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \ + INT_GET(*((uint *)(ptr)+1), arch) : \ + INT_GET(*(uint *)(ptr), arch) \ + ) + +#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) + + +#ifdef __KERNEL__ +/* + * get client id from packed copy. + * + * this hack is here because the xlog_pack code copies four bytes + * of xlog_op_header containing the fields oh_clientid, oh_flags + * and oh_res2 into the packed copy. + * + * later on this four byte chunk is treated as an int and the + * client id is pulled out. + * + * this has endian issues, of course. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define GET_CLIENT_ID(i,arch) \ + ((i) & 0xff) +#else +#define GET_CLIENT_ID(i,arch) \ + ((i) >> 24) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_SUB_SPACE) +void xlog_grant_sub_space(struct log *log, int bytes, int type); +#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ + xlog_grant_sub_space(log,bytes,type) +#else +#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ + { \ + if (type == 'w') { \ + (log)->l_grant_write_bytes -= (bytes); \ + if ((log)->l_grant_write_bytes < 0) { \ + (log)->l_grant_write_bytes += (log)->l_logsize; \ + (log)->l_grant_write_cycle--; \ + } \ + } else { \ + (log)->l_grant_reserve_bytes -= (bytes); \ + if ((log)->l_grant_reserve_bytes < 0) { \ + (log)->l_grant_reserve_bytes += (log)->l_logsize;\ + (log)->l_grant_reserve_cycle--; \ + } \ + } \ + } +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XLOG_GRANT_ADD_SPACE) +void xlog_grant_add_space(struct log *log, int bytes, int type); +#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ + xlog_grant_add_space(log,bytes,type) +#else +#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ + { \ + if (type == 'w') { \ + (log)->l_grant_write_bytes += (bytes); \ + if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ + (log)->l_grant_write_bytes -= (log)->l_logsize; \ + (log)->l_grant_write_cycle++; \ + } \ + } else { \ + (log)->l_grant_reserve_bytes += (bytes); \ + if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \ + (log)->l_grant_reserve_bytes -= (log)->l_logsize;\ + (log)->l_grant_reserve_cycle++; \ + } \ + } \ + } +#endif +#define XLOG_INS_TICKETQ(q,tic) \ + { \ + if (q) { \ + (tic)->t_next = (q); \ + (tic)->t_prev = (q)->t_prev; \ + (q)->t_prev->t_next = (tic); \ + (q)->t_prev = (tic); \ + } else { \ + (tic)->t_prev = (tic)->t_next = (tic); \ + (q) = (tic); \ + } \ + (tic)->t_flags |= XLOG_TIC_IN_Q; \ + } +#define XLOG_DEL_TICKETQ(q,tic) \ + { \ + if ((tic) == (tic)->t_next) { \ + (q) = NULL; \ + } else { \ + (q) = (tic)->t_next; \ + (tic)->t_next->t_prev = (tic)->t_prev; \ + (tic)->t_prev->t_next = (tic)->t_next; \ + } \ + (tic)->t_next = (tic)->t_prev = NULL; \ + (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ + } + + +#define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) +#define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) +#define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock) +#define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s) + +#define xlog_panic(s) {cmn_err(CE_PANIC, s); } +#define xlog_exit(s) {cmn_err(CE_PANIC, s); } +#define xlog_warn(s) {cmn_err(CE_WARN, s); } + +/* + * In core log state + */ +#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */ +#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */ +#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */ +#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */ +#define XLOG_STATE_DO_CALLBACK \ + 0x0010 /* Process callback functions */ +#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */ +#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/ +#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */ +#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */ +#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */ +#endif /* __KERNEL__ */ + +/* + * Flags to log operation header + * + * The first write of a new transaction will be preceded with a start + * record, XLOG_START_TRANS. Once a transaction is committed, a commit + * record is written, XLOG_COMMIT_TRANS. If a single region can not fit into + * the remainder of the current active in-core log, it is split up into + * multiple regions. Each partial region will be marked with a + * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS. + * + */ +#define XLOG_START_TRANS 0x01 /* Start a new transaction */ +#define XLOG_COMMIT_TRANS 0x02 /* Commit this transaction */ +#define XLOG_CONTINUE_TRANS 0x04 /* Cont this trans into new region */ +#define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */ +#define XLOG_END_TRANS 0x10 /* End a continued transaction */ +#define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */ +#define XLOG_SKIP_TRANS (XLOG_COMMIT_TRANS | XLOG_CONTINUE_TRANS | \ + XLOG_WAS_CONT_TRANS | XLOG_END_TRANS | \ + XLOG_UNMOUNT_TRANS) + +#ifdef __KERNEL__ +/* + * Flags to log ticket + */ +#define XLOG_TIC_INITED 0x1 /* has been initialized */ +#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ +#define XLOG_TIC_IN_Q 0x4 +#endif /* __KERNEL__ */ + +#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ + +/* + * Flags for log structure + */ +#define XLOG_CHKSUM_MISMATCH 0x1 /* used only during recovery */ +#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */ +#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ +#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being + shutdown */ +typedef __uint32_t xlog_tid_t; + + +#ifdef __KERNEL__ +/* + * Below are states for covering allocation transactions. + * By covering, we mean changing the h_tail_lsn in the last on-disk + * log write such that no allocation transactions will be re-done during + * recovery after a system crash. Recovery starts at the last on-disk + * log write. + * + * These states are used to insert dummy log entries to cover + * space allocation transactions which can undo non-transactional changes + * after a crash. Writes to a file with space + * already allocated do not result in any transactions. Allocations + * might include space beyond the EOF. So if we just push the EOF a + * little, the last transaction for the file could contain the wrong + * size. If there is no file system activity, after an allocation + * transaction, and the system crashes, the allocation transaction + * will get replayed and the file will be truncated. This could + * be hours/days/... after the allocation occurred. + * + * The fix for this is to do two dummy transactions when the + * system is idle. We need two dummy transaction because the h_tail_lsn + * in the log record header needs to point beyond the last possible + * non-dummy transaction. The first dummy changes the h_tail_lsn to + * the first transaction before the dummy. The second dummy causes + * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. + * + * These dummy transactions get committed when everything + * is idle (after there has been some activity). + * + * There are 5 states used to control this. + * + * IDLE -- no logging has been done on the file system or + * we are done covering previous transactions. + * NEED -- logging has occurred and we need a dummy transaction + * when the log becomes idle. + * DONE -- we were in the NEED state and have committed a dummy + * transaction. + * NEED2 -- we detected that a dummy transaction has gone to the + * on disk log with no other transactions. + * DONE2 -- we committed a dummy transaction when in the NEED2 state. + * + * There are two places where we switch states: + * + * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. + * We commit the dummy transaction and switch to DONE or DONE2, + * respectively. In all other states, we don't do anything. + * + * 2.) When we finish writing the on-disk log (xlog_state_clean_log). + * + * No matter what state we are in, if this isn't the dummy + * transaction going out, the next state is NEED. + * So, if we aren't in the DONE or DONE2 states, the next state + * is NEED. We can't be finishing a write of the dummy record + * unless it was committed and the state switched to DONE or DONE2. + * + * If we are in the DONE state and this was a write of the + * dummy transaction, we move to NEED2. + * + * If we are in the DONE2 state and this was a write of the + * dummy transaction, we move to IDLE. + * + * + * Writing only one dummy transaction can get appended to + * one file space allocation. When this happens, the log recovery + * code replays the space allocation and a file could be truncated. + * This is why we have the NEED2 and DONE2 states before going idle. + */ + +#define XLOG_STATE_COVER_IDLE 0 +#define XLOG_STATE_COVER_NEED 1 +#define XLOG_STATE_COVER_DONE 2 +#define XLOG_STATE_COVER_NEED2 3 +#define XLOG_STATE_COVER_DONE2 4 + +#define XLOG_COVER_OPS 5 + +typedef struct xlog_ticket { + sv_t t_sema; /* sleep on this semaphore :20 */ + struct xlog_ticket *t_next; /* : 4 */ + struct xlog_ticket *t_prev; /* : 4 */ + xlog_tid_t t_tid; /* transaction identifier : 4 */ + int t_curr_res; /* current reservation in bytes : 4 */ + int t_unit_res; /* unit reservation in bytes : 4 */ + __uint8_t t_ocnt; /* original count : 1 */ + __uint8_t t_cnt; /* current count : 1 */ + __uint8_t t_clientid; /* who does this belong to; : 1 */ + __uint8_t t_flags; /* properties of reservation : 1 */ +} xlog_ticket_t; +#endif + + +typedef struct xlog_op_header { + xlog_tid_t oh_tid; /* transaction id of operation : 4 b */ + int oh_len; /* bytes in data region : 4 b */ + __uint8_t oh_clientid; /* who sent me this : 1 b */ + __uint8_t oh_flags; /* : 1 b */ + ushort oh_res2; /* 32 bit align : 2 b */ +} xlog_op_header_t; + + +/* valid values for h_fmt */ +#define XLOG_FMT_UNKNOWN 0 +#define XLOG_FMT_LINUX_LE 1 +#define XLOG_FMT_LINUX_BE 2 +#define XLOG_FMT_IRIX_BE 3 + +/* our fmt */ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define XLOG_FMT XLOG_FMT_LINUX_LE +#else +#if __BYTE_ORDER == __BIG_ENDIAN +#define XLOG_FMT XLOG_FMT_LINUX_BE +#else +#error unknown byte order +#endif +#endif + +typedef struct xlog_rec_header { + uint h_magicno; /* log record (LR) identifier : 4 */ + uint h_cycle; /* write cycle of log : 4 */ + int h_version; /* LR version : 4 */ + int h_len; /* len in bytes; should be 64-bit aligned: 4 */ + xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ + xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ + uint h_chksum; /* may not be used; non-zero if used : 4 */ + int h_prev_block; /* block number to previous LR : 4 */ + int h_num_logops; /* number of log operations in this LR : 4 */ + uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; + /* new fields */ + int h_fmt; /* format of log record : 4 */ + uuid_t h_fs_uuid; /* uuid of FS : 16 */ + int h_size; /* iclog size : 4 */ +} xlog_rec_header_t; + +typedef struct xlog_rec_ext_header { + uint xh_cycle; /* write cycle of log : 4 */ + uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ +} xlog_rec_ext_header_t; +#ifdef __KERNEL__ +/* + * - A log record header is 512 bytes. There is plenty of room to grow the + * xlog_rec_header_t into the reserved space. + * - ic_data follows, so a write to disk can start at the beginning of + * the iclog. + * - ic_forcesema is used to implement synchronous forcing of the iclog to disk. + * - ic_next is the pointer to the next iclog in the ring. + * - ic_bp is a pointer to the buffer used to write this incore log to disk. + * - ic_log is a pointer back to the global log structure. + * - ic_callback is a linked list of callback function/argument pairs to be + * called after an iclog finishes writing. + * - ic_size is the full size of the header plus data. + * - ic_offset is the current number of bytes written to in this iclog. + * - ic_refcnt is bumped when someone is writing to the log. + * - ic_state is the state of the iclog. + */ +typedef struct xlog_iclog_fields { + sv_t ic_forcesema; + sv_t ic_writesema; + struct xlog_in_core *ic_next; + struct xlog_in_core *ic_prev; + struct xfs_buf *ic_bp; + struct log *ic_log; + xfs_log_callback_t *ic_callback; + xfs_log_callback_t **ic_callback_tail; +#ifdef DEBUG + struct ktrace *ic_trace; +#endif + int ic_size; + int ic_offset; + int ic_refcnt; + int ic_roundoff; + int ic_bwritecnt; + ushort_t ic_state; + char *ic_datap; /* pointer to iclog data */ +} xlog_iclog_fields_t; + +typedef struct xlog_in_core2 { + union { + xlog_rec_header_t hic_header; + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } ic_h; +} xlog_in_core_2_t; + +typedef struct xlog_in_core { + xlog_iclog_fields_t hic_fields; + xlog_in_core_2_t *hic_data; +} xlog_in_core_t; + +/* + * Defines to save our code from this glop. + */ +#define ic_forcesema hic_fields.ic_forcesema +#define ic_writesema hic_fields.ic_writesema +#define ic_next hic_fields.ic_next +#define ic_prev hic_fields.ic_prev +#define ic_bp hic_fields.ic_bp +#define ic_log hic_fields.ic_log +#define ic_callback hic_fields.ic_callback +#define ic_callback_tail hic_fields.ic_callback_tail +#define ic_trace hic_fields.ic_trace +#define ic_size hic_fields.ic_size +#define ic_offset hic_fields.ic_offset +#define ic_refcnt hic_fields.ic_refcnt +#define ic_roundoff hic_fields.ic_roundoff +#define ic_bwritecnt hic_fields.ic_bwritecnt +#define ic_state hic_fields.ic_state +#define ic_datap hic_fields.ic_datap +#define ic_header hic_data->ic_h.hic_header + +/* + * The reservation head lsn is not made up of a cycle number and block number. + * Instead, it uses a cycle number and byte number. Logs don't expect to + * overflow 31 bits worth of byte offset, so using a byte number will mean + * that round off problems won't occur when releasing partial reservations. + */ +typedef struct log { + /* The following block of fields are changed while holding icloglock */ + sema_t l_flushsema; /* iclog flushing semaphore */ + int l_flushcnt; /* # of procs waiting on this sema */ + int l_ticket_cnt; /* free ticket count */ + int l_ticket_tcnt; /* total ticket count */ + int l_covered_state;/* state of "covering disk log entries" */ + xlog_ticket_t *l_freelist; /* free list of tickets */ + xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ + xlog_ticket_t *l_tail; /* free list of tickets */ + xlog_in_core_t *l_iclog; /* head log queue */ + lock_t l_icloglock; /* grab to change iclog state */ + xfs_lsn_t l_tail_lsn; /* lsn of 1st LR w/ unflush buffers */ + xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ + struct xfs_mount *l_mp; /* mount point */ + struct xfs_buf *l_xbuf; /* extra buffer for log wrapping */ + dev_t l_dev; /* dev_t of log */ + xfs_daddr_t l_logBBstart; /* start block of log */ + int l_logsize; /* size of log in bytes */ + int l_logBBsize; /* size of log in 512 byte chunks */ + int l_roundoff; /* round off error of all iclogs */ + int l_curr_cycle; /* Cycle number of log writes */ + int l_prev_cycle; /* Cycle # b4 last block increment */ + int l_curr_block; /* current logical block of log */ + int l_prev_block; /* previous logical block of log */ + int l_iclog_size; /* size of log in bytes */ + int l_iclog_size_log;/* log power size of log */ + int l_iclog_bufs; /* number of iclog buffers */ + + /* The following field are used for debugging; need to hold icloglock */ + char *l_iclog_bak[XLOG_MAX_ICLOGS]; + + /* The following block of fields are changed while holding grant_lock */ + lock_t l_grant_lock; /* protects below fields */ + xlog_ticket_t *l_reserve_headq; /* */ + xlog_ticket_t *l_write_headq; /* */ + int l_grant_reserve_cycle; /* */ + int l_grant_reserve_bytes; /* */ + int l_grant_write_cycle; /* */ + int l_grant_write_bytes; /* */ + + /* The following fields don't need locking */ +#ifdef DEBUG + struct ktrace *l_trace; + struct ktrace *l_grant_trace; +#endif + uint l_flags; + uint l_quotaoffs_flag;/* XFS_DQ_*, if QUOTAOFFs found */ + struct xfs_buf_cancel **l_buf_cancel_table; + int l_iclog_hsize; /* size of iclog header */ + int l_iclog_heads; /* number of iclog header sectors */ +} xlog_t; + + +/* common routines */ +extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); +extern int xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk); +extern int xlog_find_tail(xlog_t *log, + xfs_daddr_t *head_blk, + xfs_daddr_t *tail_blk, + int readonly); +extern int xlog_print_find_oldest(xlog_t *log, xfs_daddr_t *last_blk); +extern int xlog_recover(xlog_t *log, int readonly); +extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); +extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog); +extern struct xfs_buf *xlog_get_bp(int,xfs_mount_t *); +extern void xlog_put_bp(struct xfs_buf *); +extern int xlog_bread(xlog_t *, xfs_daddr_t blkno, int bblks, struct xfs_buf *bp); +extern void xlog_recover_process_iunlinks(xlog_t *log); + +#define XLOG_TRACE_GRAB_FLUSH 1 +#define XLOG_TRACE_REL_FLUSH 2 +#define XLOG_TRACE_SLEEP_FLUSH 3 +#define XLOG_TRACE_WAKE_FLUSH 4 + +#endif /* __KERNEL__ */ + +#endif /* __XFS_LOG_PRIV_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_log_recover.c linux.22-ac2/fs/xfs/xfs_log_recover.c --- linux.vanilla/fs/xfs/xfs_log_recover.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_log_recover.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,3882 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_sb.h" +#include "xfs_trans.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_imap.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_ialloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_error.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_alloc_btree.h" +#include "xfs_log_recover.h" +#include "xfs_extfree_item.h" +#include "xfs_trans_priv.h" +#include "xfs_bit.h" +#include "xfs_quota.h" +#include "xfs_rw.h" + +STATIC int xlog_find_zeroed(struct log *log, xfs_daddr_t *blk_no); + +STATIC int xlog_clear_stale_blocks(xlog_t *log, xfs_lsn_t tail_lsn); +STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q, + xlog_recover_item_t *item); + +#if defined(DEBUG) +STATIC void xlog_recover_check_summary(xlog_t *log); +STATIC void xlog_recover_check_ail(xfs_mount_t *mp, xfs_log_item_t *lip, + int gen); +#else +#define xlog_recover_check_summary(log) +#define xlog_recover_check_ail(mp, lip, gen) +#endif /* DEBUG */ + + +xfs_buf_t * +xlog_get_bp(int num_bblks, xfs_mount_t *mp) +{ + xfs_buf_t *bp; + + ASSERT(num_bblks > 0); + + bp = XFS_ngetrbuf(BBTOB(num_bblks),mp); + return bp; +} /* xlog_get_bp */ + + +void +xlog_put_bp(xfs_buf_t *bp) +{ + XFS_nfreerbuf(bp); +} /* xlog_put_bp */ + + +/* + * nbblks should be uint, but oh well. Just want to catch that 32-bit length. + */ +int +xlog_bread(xlog_t *log, + xfs_daddr_t blk_no, + int nbblks, + xfs_buf_t *bp) +{ + int error; + + ASSERT(log); + ASSERT(nbblks > 0); + ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); + ASSERT(bp); + + XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); + XFS_BUF_READ(bp); + XFS_BUF_BUSY(bp); + XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); + XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); + + xfsbdstrat(log->l_mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xlog_bread", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + return (error); + } + return error; +} /* xlog_bread */ + + +/* + * Write out the buffer at the given block for the given number of blocks. + * The buffer is kept locked across the write and is returned locked. + * This can only be used for synchronous log writes. + */ +int +xlog_bwrite( + xlog_t *log, + int blk_no, + int nbblks, + xfs_buf_t *bp) +{ + int error; + + ASSERT(nbblks > 0); + ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); + + XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); + XFS_BUF_ZEROFLAGS(bp); + XFS_BUF_BUSY(bp); + XFS_BUF_HOLD(bp); + XFS_BUF_PSEMA(bp, PRIBIO); + XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); + XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); + + if ((error = xfs_bwrite(log->l_mp, bp))) + xfs_ioerror_alert("xlog_bwrite", log->l_mp, + bp, XFS_BUF_ADDR(bp)); + + return (error); +} /* xlog_bwrite */ + +#ifdef DEBUG +/* + * check log record header for recovery + */ +static void +xlog_header_check_dump(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + int b; + + printk("%s: SB : uuid = ", __FUNCTION__); + for (b=0;b<16;b++) printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]); + printk(", fmt = %d\n",XLOG_FMT); + printk(" log : uuid = "); + for (b=0;b<16;b++) printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]); + printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT)); +} +#endif + +/* + * check log record header for recovery + */ + +STATIC int +xlog_header_check_recover(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + + /* + * IRIX doesn't write the h_fmt field and leaves it zeroed + * (XLOG_FMT_UNKNOWN). This stops us from trying to recover + * a dirty log created in IRIX. + */ + + if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) { + xlog_warn("XFS: dirty log written in incompatible format - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_recover(1)", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { + xlog_warn("XFS: dirty log entry has mismatched uuid - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_recover(2)", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +/* + * read the head block of the log and check the header + */ + +STATIC int +xlog_header_check_mount(xfs_mount_t *mp, xlog_rec_header_t *head) +{ + ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + + if (uuid_is_nil(&head->h_fs_uuid)) { + + /* + * IRIX doesn't write the h_fs_uuid or h_fmt fields. If + * h_fs_uuid is nil, we assume this log was last mounted + * by IRIX and continue. + */ + + xlog_warn("XFS: nil uuid in log - IRIX style log"); + + } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { + xlog_warn("XFS: log has mismatched uuid - can't recover"); +#ifdef DEBUG + xlog_header_check_dump(mp, head); +#endif + XFS_ERROR_REPORT("xlog_header_check_mount", + XFS_ERRLEVEL_HIGH, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + return 0; +} + +STATIC void +xlog_recover_iodone( + struct xfs_buf *bp) +{ + xfs_mount_t *mp; + ASSERT(XFS_BUF_FSPRIVATE(bp, void *)); + + if (XFS_BUF_GETERROR(bp)) { + /* + * We're not going to bother about retrying + * this during recovery. One strike! + */ + mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *); + xfs_ioerror_alert("xlog_recover_iodone", + mp, bp, XFS_BUF_ADDR(bp)); + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + XFS_BUF_SET_FSPRIVATE(bp, NULL); + XFS_BUF_CLR_IODONE_FUNC(bp); + xfs_biodone(bp); +} + +/* + * This routine finds (to an approximation) the first block in the physical + * log which contains the given cycle. It uses a binary search algorithm. + * Note that the algorithm can not be perfect because the disk will not + * necessarily be perfect. + */ +int +xlog_find_cycle_start(xlog_t *log, + xfs_buf_t *bp, + xfs_daddr_t first_blk, + xfs_daddr_t *last_blk, + uint cycle) +{ + xfs_daddr_t mid_blk; + uint mid_cycle; + int error; + + mid_blk = BLK_AVG(first_blk, *last_blk); + while (mid_blk != first_blk && mid_blk != *last_blk) { + if ((error = xlog_bread(log, mid_blk, 1, bp))) + return error; + mid_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (mid_cycle == cycle) { + *last_blk = mid_blk; + /* last_half_cycle == mid_cycle */ + } else { + first_blk = mid_blk; + /* first_half_cycle == mid_cycle */ + } + mid_blk = BLK_AVG(first_blk, *last_blk); + } + ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) || + (mid_blk == *last_blk && mid_blk-1 == first_blk)); + + return 0; +} /* xlog_find_cycle_start */ + + +/* + * Check that the range of blocks does not contain the cycle number + * given. The scan needs to occur from front to back and the ptr into the + * region must be updated since a later routine will need to perform another + * test. If the region is completely good, we end up returning the same + * last block number. + * + * Set blkno to -1 if we encounter no errors. This is an invalid block number + * since we don't ever expect logs to get this large. + */ + +STATIC int +xlog_find_verify_cycle( xlog_t *log, + xfs_daddr_t start_blk, + int nbblks, + uint stop_on_cycle_no, + xfs_daddr_t *new_blk) +{ + xfs_daddr_t i, j; + uint cycle; + xfs_buf_t *bp; + char *buf = NULL; + int error = 0; + xfs_daddr_t bufblks; + + bufblks = 1 << ffs(nbblks); + + while (!(bp = xlog_get_bp(bufblks, log->l_mp))) { + /* can't get enough memory to do everything in one big buffer */ + bufblks >>= 1; + if (!bufblks) + return ENOMEM; + } + + for (i = start_blk; i < start_blk + nbblks; i += bufblks) { + int bcount; + + bcount = min(bufblks, (start_blk + nbblks - i)); + + if ((error = xlog_bread(log, i, bcount, bp))) + goto out; + + buf = XFS_BUF_PTR(bp); + for (j = 0; j < bcount; j++) { + cycle = GET_CYCLE(buf, ARCH_CONVERT); + if (cycle == stop_on_cycle_no) { + *new_blk = i+j; + goto out; + } + + buf += BBSIZE; + } + } + + *new_blk = -1; + +out: + xlog_put_bp(bp); + + return error; +} /* xlog_find_verify_cycle */ + + +/* + * Potentially backup over partial log record write. + * + * In the typical case, last_blk is the number of the block directly after + * a good log record. Therefore, we subtract one to get the block number + * of the last block in the given buffer. extra_bblks contains the number + * of blocks we would have read on a previous read. This happens when the + * last log record is split over the end of the physical log. + * + * extra_bblks is the number of blocks potentially verified on a previous + * call to this routine. + */ + +STATIC int +xlog_find_verify_log_record(xlog_t *log, + xfs_daddr_t start_blk, + xfs_daddr_t *last_blk, + int extra_bblks) +{ + xfs_daddr_t i; + xfs_buf_t *bp; + char *buf = NULL; + xlog_rec_header_t *head = NULL; + int error = 0; + int smallmem = 0; + int num_blks = *last_blk - start_blk; + int xhdrs; + + ASSERT(start_blk != 0 || *last_blk != start_blk); + + if (!(bp = xlog_get_bp(num_blks, log->l_mp))) { + if (!(bp = xlog_get_bp(1, log->l_mp))) + return ENOMEM; + smallmem = 1; + buf = XFS_BUF_PTR(bp); + } else { + if ((error = xlog_bread(log, start_blk, num_blks, bp))) + goto out; + buf = XFS_BUF_PTR(bp) + ((num_blks - 1) << BBSHIFT); + } + + for (i = (*last_blk) - 1; i >= 0; i--) { + if (i < start_blk) { + /* legal log record not found */ + xlog_warn("XFS: Log inconsistent (didn't find previous header)"); + ASSERT(0); + error = XFS_ERROR(EIO); + goto out; + } + + if (smallmem && (error = xlog_bread(log, i, 1, bp))) + goto out; + head = (xlog_rec_header_t*)buf; + + if (INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) + break; + + if (!smallmem) + buf -= BBSIZE; + } + + /* + * We hit the beginning of the physical log & still no header. Return + * to caller. If caller can handle a return of -1, then this routine + * will be called again for the end of the physical log. + */ + if (i == -1) { + error = -1; + goto out; + } + + /* we have the final block of the good log (the first block + * of the log record _before_ the head. So we check the uuid. + */ + + if ((error = xlog_header_check_mount(log->l_mp, head))) + goto out; + + /* + * We may have found a log record header before we expected one. + * last_blk will be the 1st block # with a given cycle #. We may end + * up reading an entire log record. In this case, we don't want to + * reset last_blk. Only when last_blk points in the middle of a log + * record do we update last_blk. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + uint h_size = INT_GET(head->h_size, ARCH_CONVERT); + + xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + xhdrs++; + } else { + xhdrs = 1; + } + + if (*last_blk - i + extra_bblks + != BTOBB(INT_GET(head->h_len, ARCH_CONVERT))+xhdrs) + *last_blk = i; + +out: + xlog_put_bp(bp); + + return error; +} /* xlog_find_verify_log_record */ + +/* + * Head is defined to be the point of the log where the next log write + * write could go. This means that incomplete LR writes at the end are + * eliminated when calculating the head. We aren't guaranteed that previous + * LR have complete transactions. We only know that a cycle number of + * current cycle number -1 won't be present in the log if we start writing + * from our current block number. + * + * last_blk contains the block number of the first block with a given + * cycle number. + * + * Also called from xfs_log_print.c + * + * Return: zero if normal, non-zero if error. + */ +int +xlog_find_head(xlog_t *log, + xfs_daddr_t *return_head_blk) +{ + xfs_buf_t *bp; + xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; + int num_scan_bblks; + uint first_half_cycle, last_half_cycle; + uint stop_on_cycle; + int error, log_bbnum = log->l_logBBsize; + + /* Is the end of the log device zeroed? */ + if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { + *return_head_blk = first_blk; + + /* is the whole lot zeroed? */ + if (!first_blk) { + /* Linux XFS shouldn't generate totally zeroed logs - + * mkfs etc write a dummy unmount record to a fresh + * log so we can store the uuid in there + */ + xlog_warn("XFS: totally zeroed log"); + } + + return 0; + } else if (error) { + xlog_warn("XFS: empty log check failed"); + return error; + } + + first_blk = 0; /* get cycle # of 1st block */ + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if ((error = xlog_bread(log, 0, 1, bp))) + goto bp_err; + first_half_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + + last_blk = head_blk = log_bbnum-1; /* get cycle # of last block */ + if ((error = xlog_bread(log, last_blk, 1, bp))) + goto bp_err; + last_half_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + ASSERT(last_half_cycle != 0); + + /* + * If the 1st half cycle number is equal to the last half cycle number, + * then the entire log is stamped with the same cycle number. In this + * case, head_blk can't be set to zero (which makes sense). The below + * math doesn't work out properly with head_blk equal to zero. Instead, + * we set it to log_bbnum which is an illegal block number, but this + * value makes the math correct. If head_blk doesn't changed through + * all the tests below, *head_blk is set to zero at the very end rather + * than log_bbnum. In a sense, log_bbnum and zero are the same block + * in a circular file. + */ + if (first_half_cycle == last_half_cycle) { + /* + * In this case we believe that the entire log should have cycle + * number last_half_cycle. We need to scan backwards from the + * end verifying that there are no holes still containing + * last_half_cycle - 1. If we find such a hole, then the start + * of that hole will be the new head. The simple case looks like + * x | x ... | x - 1 | x + * Another case that fits this picture would be + * x | x + 1 | x ... | x + * In this case the head really is somwhere at the end of the + * log, as one of the latest writes at the beginning was incomplete. + * One more case is + * x | x + 1 | x ... | x - 1 | x + * This is really the combination of the above two cases, and the + * head has to end up at the start of the x-1 hole at the end of + * the log. + * + * In the 256k log case, we will read from the beginning to the + * end of the log and search for cycle numbers equal to x-1. We + * don't worry about the x+1 blocks that we encounter, because + * we know that they cannot be the head since the log started with + * x. + */ + head_blk = log_bbnum; + stop_on_cycle = last_half_cycle - 1; + } else { + /* + * In this case we want to find the first block with cycle number + * matching last_half_cycle. We expect the log to be some + * variation on + * x + 1 ... | x ... + * The first block with cycle number x (last_half_cycle) will be + * where the new head belongs. First we do a binary search for + * the first occurrence of last_half_cycle. The binary search + * may not be totally accurate, so then we scan back from there + * looking for occurrences of last_half_cycle before us. If + * that backwards scan wraps around the beginning of the log, + * then we look for occurrences of last_half_cycle - 1 at the + * end of the log. The cases we're looking for look like + * x + 1 ... | x | x + 1 | x ... + * ^ binary search stopped here + * or + * x + 1 ... | x ... | x - 1 | x + * <---------> less than scan distance + */ + stop_on_cycle = last_half_cycle; + if ((error = xlog_find_cycle_start(log, bp, first_blk, + &head_blk, last_half_cycle))) + goto bp_err; + } + + /* + * Now validate the answer. Scan back some number of maximum possible + * blocks and make sure each one has the expected cycle number. The + * maximum is determined by the total possible amount of buffering + * in the in-core log. The following number can be made tighter if + * we actually look at the block size of the filesystem. + */ + num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); + if (head_blk >= num_scan_bblks) { + /* + * We are guaranteed that the entire check can be performed + * in one buffer. + */ + start_blk = head_blk - num_scan_bblks; + if ((error = xlog_find_verify_cycle(log, start_blk, num_scan_bblks, + stop_on_cycle, &new_blk))) + goto bp_err; + if (new_blk != -1) + head_blk = new_blk; + } else { /* need to read 2 parts of log */ + /* + * We are going to scan backwards in the log in two parts. First + * we scan the physical end of the log. In this part of the log, + * we are looking for blocks with cycle number last_half_cycle - 1. + * If we find one, then we know that the log starts there, as we've + * found a hole that didn't get written in going around the end + * of the physical log. The simple case for this is + * x + 1 ... | x ... | x - 1 | x + * <---------> less than scan distance + * If all of the blocks at the end of the log have cycle number + * last_half_cycle, then we check the blocks at the start of the + * log looking for occurrences of last_half_cycle. If we find one, + * then our current estimate for the location of the first + * occurrence of last_half_cycle is wrong and we move back to the + * hole we've found. This case looks like + * x + 1 ... | x | x + 1 | x ... + * ^ binary search stopped here + * Another case we need to handle that only occurs in 256k logs is + * x + 1 ... | x ... | x+1 | x ... + * ^ binary search stops here + * In a 256k log, the scan at the end of the log will see the x+1 + * blocks. We need to skip past those since that is certainly not + * the head of the log. By searching for last_half_cycle-1 we + * accomplish that. + */ + start_blk = log_bbnum - num_scan_bblks + head_blk; + ASSERT(head_blk <= INT_MAX && (xfs_daddr_t) num_scan_bblks-head_blk >= 0); + if ((error = xlog_find_verify_cycle(log, start_blk, + num_scan_bblks-(int)head_blk, (stop_on_cycle - 1), + &new_blk))) + goto bp_err; + if (new_blk != -1) { + head_blk = new_blk; + goto bad_blk; + } + + /* + * Scan beginning of log now. The last part of the physical log + * is good. This scan needs to verify that it doesn't find the + * last_half_cycle. + */ + start_blk = 0; + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_cycle(log, start_blk, (int) head_blk, + stop_on_cycle, &new_blk))) + goto bp_err; + if (new_blk != -1) + head_blk = new_blk; + } + +bad_blk: + /* + * Now we need to make sure head_blk is not pointing to a block in + * the middle of a log record. + */ + num_scan_bblks = BTOBB(XLOG_MAX_RECORD_BSIZE); + if (head_blk >= num_scan_bblks) { + start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ + + /* start ptr at last block ptr before head_blk */ + if ((error = xlog_find_verify_log_record(log, + start_blk, + &head_blk, + 0)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + } else { + start_blk = 0; + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_log_record(log, + start_blk, + &head_blk, + 0)) == -1) { + /* We hit the beginning of the log during our search */ + start_blk = log_bbnum - num_scan_bblks + head_blk; + new_blk = log_bbnum; + ASSERT(start_blk <= INT_MAX && (xfs_daddr_t) log_bbnum-start_blk >= 0); + ASSERT(head_blk <= INT_MAX); + if ((error = xlog_find_verify_log_record(log, + start_blk, + &new_blk, + (int)head_blk)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + if (new_blk != log_bbnum) + head_blk = new_blk; + } else if (error) + goto bp_err; + } + + xlog_put_bp(bp); + if (head_blk == log_bbnum) + *return_head_blk = 0; + else + *return_head_blk = head_blk; + /* + * When returning here, we have a good block number. Bad block + * means that during a previous crash, we didn't have a clean break + * from cycle number N to cycle number N-1. In this case, we need + * to find the first block with cycle number N-1. + */ + return 0; + +bp_err: + xlog_put_bp(bp); + + if (error) + xlog_warn("XFS: failed to find log head"); + + return error; +} /* xlog_find_head */ + +/* + * Find the sync block number or the tail of the log. + * + * This will be the block number of the last record to have its + * associated buffers synced to disk. Every log record header has + * a sync lsn embedded in it. LSNs hold block numbers, so it is easy + * to get a sync block number. The only concern is to figure out which + * log record header to believe. + * + * The following algorithm uses the log record header with the largest + * lsn. The entire log record does not need to be valid. We only care + * that the header is valid. + * + * We could speed up search by using current head_blk buffer, but it is not + * available. + */ +int +xlog_find_tail(xlog_t *log, + xfs_daddr_t *head_blk, + xfs_daddr_t *tail_blk, + int readonly) +{ + xlog_rec_header_t *rhead; + xlog_op_header_t *op_head; + xfs_buf_t *bp; + int error, i, found; + xfs_daddr_t umount_data_blk; + xfs_daddr_t after_umount_blk; + xfs_lsn_t tail_lsn; + int hblks; + + found = 0; + + /* + * Find previous log record + */ + if ((error = xlog_find_head(log, head_blk))) + return error; + + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if (*head_blk == 0) { /* special case */ + if ((error = xlog_bread(log, 0, 1, bp))) + goto bread_err; + if (GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT) == 0) { + *tail_blk = 0; + /* leave all other log inited values alone */ + goto exit; + } + } + + /* + * Search backwards looking for log record header block + */ + ASSERT(*head_blk < INT_MAX); + for (i = (int)(*head_blk) - 1; i >= 0; i--) { + if ((error = xlog_bread(log, i, 1, bp))) + goto bread_err; + if (XLOG_HEADER_MAGIC_NUM == + INT_GET(*(uint *)(XFS_BUF_PTR(bp)), ARCH_CONVERT)) { + found = 1; + break; + } + } + /* + * If we haven't found the log record header block, start looking + * again from the end of the physical log. XXXmiken: There should be + * a check here to make sure we didn't search more than N blocks in + * the previous code. + */ + if (!found) { + for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { + if ((error = xlog_bread(log, i, 1, bp))) + goto bread_err; + if (XLOG_HEADER_MAGIC_NUM == + INT_GET(*(uint*)(XFS_BUF_PTR(bp)), ARCH_CONVERT)) { + found = 2; + break; + } + } + } + if (!found) { + xlog_warn("XFS: xlog_find_tail: couldn't find sync record"); + ASSERT(0); + return XFS_ERROR(EIO); + } + + /* find blk_no of tail of log */ + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(bp); + *tail_blk = BLOCK_LSN(rhead->h_tail_lsn, ARCH_CONVERT); + + /* + * Reset log values according to the state of the log when we + * crashed. In the case where head_blk == 0, we bump curr_cycle + * one because the next write starts a new cycle rather than + * continuing the cycle of the last good log record. At this + * point we have guaranteed that all partial log records have been + * accounted for. Therefore, we know that the last good log record + * written was complete and ended exactly on the end boundary + * of the physical log. + */ + log->l_prev_block = i; + log->l_curr_block = (int)*head_blk; + log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT); + if (found == 2) + log->l_curr_cycle++; + log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT); + log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT); + log->l_grant_reserve_cycle = log->l_curr_cycle; + log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); + log->l_grant_write_cycle = log->l_curr_cycle; + log->l_grant_write_bytes = BBTOB(log->l_curr_block); + + /* + * Look for unmount record. If we find it, then we know there + * was a clean unmount. Since 'i' could be the last block in + * the physical log, we convert to a log block before comparing + * to the head_blk. + * + * Save the current tail lsn to use to pass to + * xlog_clear_stale_blocks() below. We won't want to clear the + * unmount record if there is one, so we pass the lsn of the + * unmount record rather than the block after it. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + int h_size = INT_GET(rhead->h_size, ARCH_CONVERT); + int h_version = INT_GET(rhead->h_version, ARCH_CONVERT); + + if ((h_version & XLOG_VERSION_2) && + (h_size > XLOG_HEADER_CYCLE_SIZE)) { + hblks = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + hblks++; + } else { + hblks = 1; + } + } else { + hblks = 1; + } + after_umount_blk = (i + hblks + (int) + BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize; + tail_lsn = log->l_tail_lsn; + if (*head_blk == after_umount_blk && + INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) { + umount_data_blk = (i + hblks) % log->l_logBBsize; + if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { + goto bread_err; + } + op_head = (xlog_op_header_t *)XFS_BUF_PTR(bp); + if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { + /* + * Set tail and last sync so that newly written + * log records will point recovery to after the + * current unmount record. + */ + ASSIGN_ANY_LSN(log->l_tail_lsn, log->l_curr_cycle, + after_umount_blk, ARCH_NOCONVERT); + ASSIGN_ANY_LSN(log->l_last_sync_lsn, log->l_curr_cycle, + after_umount_blk, ARCH_NOCONVERT); + *tail_blk = after_umount_blk; + } + } + +#ifdef __KERNEL__ + /* + * Make sure that there are no blocks in front of the head + * with the same cycle number as the head. This can happen + * because we allow multiple outstanding log writes concurrently, + * and the later writes might make it out before earlier ones. + * + * We use the lsn from before modifying it so that we'll never + * overwrite the unmount record after a clean unmount. + * + * Do this only if we are going to recover the filesystem + * + * NOTE: This used to say "if (!readonly)" + * However on Linux, we can & do recover a read-only filesystem. + * We only skip recovery if NORECOVERY is specified on mount, + * in which case we would not be here. + * + * But... if the -device- itself is readonly, just skip this. + * We can't recover this device anyway, so it won't matter. + */ + + if (!is_read_only(log->l_mp->m_logdev_targp->pbr_kdev)) { + error = xlog_clear_stale_blocks(log, tail_lsn); + } +#endif + +bread_err: +exit: + xlog_put_bp(bp); + + if (error) + xlog_warn("XFS: failed to locate log tail"); + + return error; +} /* xlog_find_tail */ + + +/* + * Is the log zeroed at all? + * + * The last binary search should be changed to perform an X block read + * once X becomes small enough. You can then search linearly through + * the X blocks. This will cut down on the number of reads we need to do. + * + * If the log is partially zeroed, this routine will pass back the blkno + * of the first block with cycle number 0. It won't have a complete LR + * preceding it. + * + * Return: + * 0 => the log is completely written to + * -1 => use *blk_no as the first block of the log + * >0 => error has occurred + */ +int +xlog_find_zeroed(struct log *log, + xfs_daddr_t *blk_no) +{ + xfs_buf_t *bp; + uint first_cycle, last_cycle; + xfs_daddr_t new_blk, last_blk, start_blk; + xfs_daddr_t num_scan_bblks; + int error, log_bbnum = log->l_logBBsize; + + /* check totally zeroed log */ + bp = xlog_get_bp(1,log->l_mp); + if (!bp) + return ENOMEM; + if ((error = xlog_bread(log, 0, 1, bp))) + goto bp_err; + first_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (first_cycle == 0) { /* completely zeroed log */ + *blk_no = 0; + xlog_put_bp(bp); + return -1; + } + + /* check partially zeroed log */ + if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) + goto bp_err; + last_cycle = GET_CYCLE(XFS_BUF_PTR(bp), ARCH_CONVERT); + if (last_cycle != 0) { /* log completely written to */ + xlog_put_bp(bp); + return 0; + } else if (first_cycle != 1) { + /* + * If the cycle of the last block is zero, the cycle of + * the first block must be 1. If it's not, maybe we're + * not looking at a log... Bail out. + */ + xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)"); + return XFS_ERROR(EINVAL); + } + + /* we have a partially zeroed log */ + last_blk = log_bbnum-1; + if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) + goto bp_err; + + /* + * Validate the answer. Because there is no way to guarantee that + * the entire log is made up of log records which are the same size, + * we scan over the defined maximum blocks. At this point, the maximum + * is not chosen to mean anything special. XXXmiken + */ + num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); + ASSERT(num_scan_bblks <= INT_MAX); + + if (last_blk < num_scan_bblks) + num_scan_bblks = last_blk; + start_blk = last_blk - num_scan_bblks; + + /* + * We search for any instances of cycle number 0 that occur before + * our current estimate of the head. What we're trying to detect is + * 1 ... | 0 | 1 | 0... + * ^ binary search ends here + */ + if ((error = xlog_find_verify_cycle(log, start_blk, + (int)num_scan_bblks, 0, &new_blk))) + goto bp_err; + if (new_blk != -1) + last_blk = new_blk; + + /* + * Potentially backup over partial log record write. We don't need + * to search the end of the log because we know it is zero. + */ + if ((error = xlog_find_verify_log_record(log, start_blk, + &last_blk, 0)) == -1) { + error = XFS_ERROR(EIO); + goto bp_err; + } else if (error) + goto bp_err; + + *blk_no = last_blk; +bp_err: + xlog_put_bp(bp); + if (error) + return error; + return -1; +} /* xlog_find_zeroed */ + +/* + * This is simply a subroutine used by xlog_clear_stale_blocks() below + * to initialize a buffer full of empty log record headers and write + * them into the log. + */ +STATIC int +xlog_write_log_records( + xlog_t *log, + int cycle, + int start_block, + int blocks, + int tail_cycle, + int tail_block) +{ + xlog_rec_header_t *recp; + int i, j; + int end_block = start_block + blocks; + int error = 0; + xfs_buf_t *bp; + char *buf; + int bufblks; + + bufblks = 1 << ffs(blocks); + while (!(bp = xlog_get_bp(bufblks, log->l_mp))) { + bufblks >>= 1; + if (!bufblks) + return ENOMEM; + } + + buf = XFS_BUF_PTR(bp); + recp = (xlog_rec_header_t*)buf; + + memset(buf, 0, BBSIZE); + INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); + INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); + INT_SET(recp->h_version, ARCH_CONVERT, + XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); + ASSIGN_ANY_LSN(recp->h_tail_lsn, tail_cycle, tail_block, ARCH_CONVERT); + + for (i = start_block; i < end_block; i += bufblks) { + int bcount = min(bufblks, end_block - start_block); + /* with plenty of memory, we duplicate the block + * right through the buffer and modify each entry + */ + ASSIGN_ANY_LSN(recp->h_lsn, cycle, i, ARCH_CONVERT); + for (j = 1; j < bcount; j++) { + buf += BBSIZE; + recp = (xlog_rec_header_t*)buf; + memcpy(buf, XFS_BUF_PTR(bp), BBSIZE); + ASSIGN_ANY_LSN(recp->h_lsn, cycle, i+j, ARCH_CONVERT); + } + /* then write the whole lot out at once */ + error = xlog_bwrite(log, start_block, bcount, bp); + start_block += bcount; + buf = XFS_BUF_PTR(bp); + recp = (xlog_rec_header_t*)buf; + } + xlog_put_bp(bp); + + return error; +} + +/* + * This routine is called to blow away any incomplete log writes out + * in front of the log head. We do this so that we won't become confused + * if we come up, write only a little bit more, and then crash again. + * If we leave the partial log records out there, this situation could + * cause us to think those partial writes are valid blocks since they + * have the current cycle number. We get rid of them by overwriting them + * with empty log records with the old cycle number rather than the + * current one. + * + * The tail lsn is passed in rather than taken from + * the log so that we will not write over the unmount record after a + * clean unmount in a 512 block log. Doing so would leave the log without + * any valid log records in it until a new one was written. If we crashed + * during that time we would not be able to recover. + */ +STATIC int +xlog_clear_stale_blocks( + xlog_t *log, + xfs_lsn_t tail_lsn) +{ + int tail_cycle, head_cycle; + int tail_block, head_block; + int tail_distance, max_distance; + int distance; + int error; + + tail_cycle = CYCLE_LSN(tail_lsn, ARCH_NOCONVERT); + tail_block = BLOCK_LSN(tail_lsn, ARCH_NOCONVERT); + head_cycle = log->l_curr_cycle; + head_block = log->l_curr_block; + + /* + * Figure out the distance between the new head of the log + * and the tail. We want to write over any blocks beyond the + * head that we may have written just before the crash, but + * we don't want to overwrite the tail of the log. + */ + if (head_cycle == tail_cycle) { + /* + * The tail is behind the head in the physical log, + * so the distance from the head to the tail is the + * distance from the head to the end of the log plus + * the distance from the beginning of the log to the + * tail. + */ + if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { + XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + tail_distance = tail_block + (log->l_logBBsize - head_block); + } else { + /* + * The head is behind the tail in the physical log, + * so the distance from the head to the tail is just + * the tail block minus the head block. + */ + if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ + XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + tail_distance = tail_block - head_block; + } + + /* + * If the head is right up against the tail, we can't clear + * anything. + */ + if (tail_distance <= 0) { + ASSERT(tail_distance == 0); + return 0; + } + + max_distance = XLOG_TOTAL_REC_SHIFT(log); + /* + * Take the smaller of the maximum amount of outstanding I/O + * we could have and the distance to the tail to clear out. + * We take the smaller so that we don't overwrite the tail and + * we don't waste all day writing from the head to the tail + * for no reason. + */ + max_distance = MIN(max_distance, tail_distance); + + if ((head_block + max_distance) <= log->l_logBBsize) { + /* + * We can stomp all the blocks we need to without + * wrapping around the end of the log. Just do it + * in a single write. Use the cycle number of the + * current cycle minus one so that the log will look like: + * n ... | n - 1 ... + */ + error = xlog_write_log_records(log, (head_cycle - 1), + head_block, max_distance, tail_cycle, + tail_block); + if (error) + return error; + } else { + /* + * We need to wrap around the end of the physical log in + * order to clear all the blocks. Do it in two separate + * I/Os. The first write should be from the head to the + * end of the physical log, and it should use the current + * cycle number minus one just like above. + */ + distance = log->l_logBBsize - head_block; + error = xlog_write_log_records(log, (head_cycle - 1), + head_block, distance, tail_cycle, + tail_block); + + if (error) + return error; + + /* + * Now write the blocks at the start of the physical log. + * This writes the remainder of the blocks we want to clear. + * It uses the current cycle number since we're now on the + * same cycle as the head so that we get: + * n ... n ... | n - 1 ... + * ^^^^^ blocks we're writing + */ + distance = max_distance - (log->l_logBBsize - head_block); + error = xlog_write_log_records(log, head_cycle, 0, distance, + tail_cycle, tail_block); + if (error) + return error; + } + + return 0; +} + +/****************************************************************************** + * + * Log recover routines + * + ****************************************************************************** + */ + +STATIC xlog_recover_t * +xlog_recover_find_tid(xlog_recover_t *q, + xlog_tid_t tid) +{ + xlog_recover_t *p = q; + + while (p != NULL) { + if (p->r_log_tid == tid) + break; + p = p->r_next; + } + return p; +} /* xlog_recover_find_tid */ + + +STATIC void +xlog_recover_put_hashq(xlog_recover_t **q, + xlog_recover_t *trans) +{ + trans->r_next = *q; + *q = trans; +} /* xlog_recover_put_hashq */ + + +STATIC void +xlog_recover_add_item(xlog_recover_item_t **itemq) +{ + xlog_recover_item_t *item; + + item = kmem_zalloc(sizeof(xlog_recover_item_t), 0); + xlog_recover_insert_item_backq(itemq, item); +} /* xlog_recover_add_item */ + + +STATIC int +xlog_recover_add_to_cont_trans(xlog_recover_t *trans, + xfs_caddr_t dp, + int len) +{ + xlog_recover_item_t *item; + xfs_caddr_t ptr, old_ptr; + int old_len; + + item = trans->r_itemq; + if (item == 0) { + /* finish copying rest of trans header */ + xlog_recover_add_item(&trans->r_itemq); + ptr = (xfs_caddr_t)&trans->r_theader+sizeof(xfs_trans_header_t)-len; + memcpy(ptr, dp, len); /* d, s, l */ + return 0; + } + item = item->ri_prev; + + old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; + old_len = item->ri_buf[item->ri_cnt-1].i_len; + + ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0); + memcpy(&ptr[old_len], dp, len); /* d, s, l */ + item->ri_buf[item->ri_cnt-1].i_len += len; + item->ri_buf[item->ri_cnt-1].i_addr = ptr; + return 0; +} /* xlog_recover_add_to_cont_trans */ + + +/* The next region to add is the start of a new region. It could be + * a whole region or it could be the first part of a new region. Because + * of this, the assumption here is that the type and size fields of all + * format structures fit into the first 32 bits of the structure. + * + * This works because all regions must be 32 bit aligned. Therefore, we + * either have both fields or we have neither field. In the case we have + * neither field, the data part of the region is zero length. We only have + * a log_op_header and can throw away the header since a new one will appear + * later. If we have at least 4 bytes, then we can determine how many regions + * will appear in the current log item. + */ +STATIC int +xlog_recover_add_to_trans(xlog_recover_t *trans, + xfs_caddr_t dp, + int len) +{ + xfs_inode_log_format_t *in_f; /* any will do */ + xlog_recover_item_t *item; + xfs_caddr_t ptr; + + if (!len) + return 0; + item = trans->r_itemq; + if (item == 0) { + ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); + if (len == sizeof(xfs_trans_header_t)) + xlog_recover_add_item(&trans->r_itemq); + memcpy(&trans->r_theader, dp, len); /* d, s, l */ + return 0; + } + + ptr = kmem_alloc(len, 0); + memcpy(ptr, dp, len); + in_f = (xfs_inode_log_format_t *)ptr; + + if (item->ri_prev->ri_total != 0 && + item->ri_prev->ri_total == item->ri_prev->ri_cnt) { + xlog_recover_add_item(&trans->r_itemq); + } + item = trans->r_itemq; + item = item->ri_prev; + + if (item->ri_total == 0) { /* first region to be added */ + item->ri_total = in_f->ilf_size; + ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM); + item->ri_buf = kmem_zalloc((item->ri_total * + sizeof(xfs_log_iovec_t)), 0); + } + ASSERT(item->ri_total > item->ri_cnt); + /* Description region is ri_buf[0] */ + item->ri_buf[item->ri_cnt].i_addr = ptr; + item->ri_buf[item->ri_cnt].i_len = len; + item->ri_cnt++; + return 0; +} /* xlog_recover_add_to_trans */ + + +STATIC void +xlog_recover_new_tid(xlog_recover_t **q, + xlog_tid_t tid, + xfs_lsn_t lsn) +{ + xlog_recover_t *trans; + + trans = kmem_zalloc(sizeof(xlog_recover_t), 0); + trans->r_log_tid = tid; + trans->r_lsn = lsn; + xlog_recover_put_hashq(q, trans); +} /* xlog_recover_new_tid */ + + +STATIC int +xlog_recover_unlink_tid(xlog_recover_t **q, + xlog_recover_t *trans) +{ + xlog_recover_t *tp; + int found = 0; + + ASSERT(trans != 0); + if (trans == *q) { + *q = (*q)->r_next; + } else { + tp = *q; + while (tp != 0) { + if (tp->r_next == trans) { + found = 1; + break; + } + tp = tp->r_next; + } + if (!found) { + xlog_warn( + "XFS: xlog_recover_unlink_tid: trans not found"); + ASSERT(0); + return XFS_ERROR(EIO); + } + tp->r_next = tp->r_next->r_next; + } + return 0; +} /* xlog_recover_unlink_tid */ + +STATIC void +xlog_recover_insert_item_backq(xlog_recover_item_t **q, + xlog_recover_item_t *item) +{ + if (*q == 0) { + item->ri_prev = item->ri_next = item; + *q = item; + } else { + item->ri_next = *q; + item->ri_prev = (*q)->ri_prev; + (*q)->ri_prev = item; + item->ri_prev->ri_next = item; + } +} /* xlog_recover_insert_item_backq */ + +STATIC void +xlog_recover_insert_item_frontq(xlog_recover_item_t **q, + xlog_recover_item_t *item) +{ + xlog_recover_insert_item_backq(q, item); + *q = item; +} /* xlog_recover_insert_item_frontq */ + +STATIC int +xlog_recover_reorder_trans(xlog_t *log, + xlog_recover_t *trans) +{ + xlog_recover_item_t *first_item, *itemq, *itemq_next; + + first_item = itemq = trans->r_itemq; + trans->r_itemq = NULL; + do { + itemq_next = itemq->ri_next; + switch (ITEM_TYPE(itemq)) { + case XFS_LI_BUF: + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: { + xlog_recover_insert_item_frontq(&trans->r_itemq, itemq); + break; + } + case XFS_LI_INODE: + case XFS_LI_6_1_INODE: + case XFS_LI_5_3_INODE: + case XFS_LI_DQUOT: + case XFS_LI_QUOTAOFF: + case XFS_LI_EFD: + case XFS_LI_EFI: { + xlog_recover_insert_item_backq(&trans->r_itemq, itemq); + break; + } + default: { + xlog_warn( + "XFS: xlog_recover_reorder_trans: unrecognized type of log operation"); + ASSERT(0); + return XFS_ERROR(EIO); + } + } + itemq = itemq_next; + } while (first_item != itemq); + return 0; +} /* xlog_recover_reorder_trans */ + + +/* + * Build up the table of buf cancel records so that we don't replay + * cancelled data in the second pass. For buffer records that are + * not cancel records, there is nothing to do here so we just return. + * + * If we get a cancel record which is already in the table, this indicates + * that the buffer was cancelled multiple times. In order to ensure + * that during pass 2 we keep the record in the table until we reach its + * last occurrence in the log, we keep a reference count in the cancel + * record in the table to tell us how many times we expect to see this + * record during the second pass. + */ +STATIC void +xlog_recover_do_buffer_pass1(xlog_t *log, + xfs_buf_log_format_t *buf_f) +{ + xfs_buf_cancel_t *bcp; + xfs_buf_cancel_t *nextp; + xfs_buf_cancel_t *prevp; + xfs_buf_cancel_t **bucket; + xfs_buf_log_format_v1_t *obuf_f; + xfs_daddr_t blkno=0; + uint len=0; + ushort flags=0; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + len = buf_f->blf_len; + flags = buf_f->blf_flags; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = (xfs_daddr_t) obuf_f->blf_blkno; + len = obuf_f->blf_len; + flags = obuf_f->blf_flags; + break; + } + + /* + * If this isn't a cancel buffer item, then just return. + */ + if (!(flags & XFS_BLI_CANCEL)) { + return; + } + + /* + * Insert an xfs_buf_cancel record into the hash table of + * them. If there is already an identical record, bump + * its reference count. + */ + bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % + XLOG_BC_TABLE_SIZE]; + /* + * If the hash bucket is empty then just insert a new record into + * the bucket. + */ + if (*bucket == NULL) { + bcp = (xfs_buf_cancel_t*)kmem_alloc(sizeof(xfs_buf_cancel_t), + KM_SLEEP); + bcp->bc_blkno = blkno; + bcp->bc_len = len; + bcp->bc_refcount = 1; + bcp->bc_next = NULL; + *bucket = bcp; + return; + } + + /* + * The hash bucket is not empty, so search for duplicates of our + * record. If we find one them just bump its refcount. If not + * then add us at the end of the list. + */ + prevp = NULL; + nextp = *bucket; + while (nextp != NULL) { + if (nextp->bc_blkno == blkno && nextp->bc_len == len) { + nextp->bc_refcount++; + return; + } + prevp = nextp; + nextp = nextp->bc_next; + } + ASSERT(prevp != NULL); + bcp = (xfs_buf_cancel_t*)kmem_alloc(sizeof(xfs_buf_cancel_t), + KM_SLEEP); + bcp->bc_blkno = blkno; + bcp->bc_len = len; + bcp->bc_refcount = 1; + bcp->bc_next = NULL; + prevp->bc_next = bcp; +} + +/* + * Check to see whether the buffer being recovered has a corresponding + * entry in the buffer cancel record table. If it does then return 1 + * so that it will be cancelled, otherwise return 0. If the buffer is + * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement + * the refcount on the entry in the table and remove it from the table + * if this is the last reference. + * + * We remove the cancel record from the table when we encounter its + * last occurrence in the log so that if the same buffer is re-used + * again after its last cancellation we actually replay the changes + * made at that point. + */ +STATIC int +xlog_recover_do_buffer_pass2(xlog_t *log, + xfs_buf_log_format_t *buf_f) +{ + xfs_buf_cancel_t *bcp; + xfs_buf_cancel_t *prevp; + xfs_buf_cancel_t **bucket; + xfs_buf_log_format_v1_t *obuf_f; + xfs_daddr_t blkno=0; + ushort flags=0; + uint len=0; + + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + flags = buf_f->blf_flags; + len = buf_f->blf_len; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = (xfs_daddr_t) obuf_f->blf_blkno; + flags = obuf_f->blf_flags; + len = (xfs_daddr_t) obuf_f->blf_len; + break; + } + if (log->l_buf_cancel_table == NULL) { + /* + * There is nothing in the table built in pass one, + * so this buffer must not be cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; + } + + bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % + XLOG_BC_TABLE_SIZE]; + bcp = *bucket; + if (bcp == NULL) { + /* + * There is no corresponding entry in the table built + * in pass one, so this buffer has not been cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; + } + + /* + * Search for an entry in the buffer cancel table that + * matches our buffer. + */ + prevp = NULL; + while (bcp != NULL) { + if (bcp->bc_blkno == blkno && bcp->bc_len == len) { + /* + * We've go a match, so return 1 so that the + * recovery of this buffer is cancelled. + * If this buffer is actually a buffer cancel + * log item, then decrement the refcount on the + * one in the table and remove it if this is the + * last reference. + */ + if (flags & XFS_BLI_CANCEL) { + bcp->bc_refcount--; + if (bcp->bc_refcount == 0) { + if (prevp == NULL) { + *bucket = bcp->bc_next; + } else { + prevp->bc_next = bcp->bc_next; + } + kmem_free(bcp, + sizeof(xfs_buf_cancel_t)); + } + } + return 1; + } + prevp = bcp; + bcp = bcp->bc_next; + } + /* + * We didn't find a corresponding entry in the table, so + * return 0 so that the buffer is NOT cancelled. + */ + ASSERT(!(flags & XFS_BLI_CANCEL)); + return 0; +} + + +/* + * Perform recovery for a buffer full of inodes. In these buffers, + * the only data which should be recovered is that which corresponds + * to the di_next_unlinked pointers in the on disk inode structures. + * The rest of the data for the inodes is always logged through the + * inodes themselves rather than the inode buffer and is recovered + * in xlog_recover_do_inode_trans(). + * + * The only time when buffers full of inodes are fully recovered is + * when the buffer is full of newly allocated inodes. In this case + * the buffer will not be marked as an inode buffer and so will be + * sent to xlog_recover_do_reg_buffer() below during recovery. + */ +STATIC int +xlog_recover_do_inode_buffer(xfs_mount_t *mp, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + int i; + int item_index; + int bit; + int nbits; + int reg_buf_offset; + int reg_buf_bytes; + int next_unlinked_offset; + int inodes_per_buf; + xfs_agino_t *logged_nextp; + xfs_agino_t *buffer_nextp; + xfs_buf_log_format_v1_t *obuf_f; + unsigned int *data_map=NULL; + unsigned int map_size=0; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + data_map = obuf_f->blf_data_map; + map_size = obuf_f->blf_map_size; + break; + } + /* + * Set the variables corresponding to the current region to + * 0 so that we'll initialize them on the first pass through + * the loop. + */ + reg_buf_offset = 0; + reg_buf_bytes = 0; + bit = 0; + nbits = 0; + item_index = 0; + inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; + for (i = 0; i < inodes_per_buf; i++) { + next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + + offsetof(xfs_dinode_t, di_next_unlinked); + + while (next_unlinked_offset >= + (reg_buf_offset + reg_buf_bytes)) { + /* + * The next di_next_unlinked field is beyond + * the current logged region. Find the next + * logged region that contains or is beyond + * the current di_next_unlinked field. + */ + bit += nbits; + bit = xfs_next_bit(data_map, map_size, bit); + + /* + * If there are no more logged regions in the + * buffer, then we're done. + */ + if (bit == -1) { + return 0; + } + + nbits = xfs_contig_bits(data_map, map_size, + bit); + reg_buf_offset = bit << XFS_BLI_SHIFT; + reg_buf_bytes = nbits << XFS_BLI_SHIFT; + item_index++; + } + + /* + * If the current logged region starts after the current + * di_next_unlinked field, then move on to the next + * di_next_unlinked field. + */ + if (next_unlinked_offset < reg_buf_offset) { + continue; + } + + ASSERT(item->ri_buf[item_index].i_addr != NULL); + ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0); + ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp)); + + /* + * The current logged region contains a copy of the + * current di_next_unlinked field. Extract its value + * and copy it to the buffer copy. + */ + logged_nextp = (xfs_agino_t *) + ((char *)(item->ri_buf[item_index].i_addr) + + (next_unlinked_offset - reg_buf_offset)); + if (unlikely(*logged_nextp == 0)) { + xfs_fs_cmn_err(CE_ALERT, mp, + "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", + item, bp); + XFS_ERROR_REPORT("xlog_recover_do_inode_buf", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, + next_unlinked_offset); + INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp); + } + + return 0; +} /* xlog_recover_do_inode_buffer */ + +/* + * Perform a 'normal' buffer recovery. Each logged region of the + * buffer should be copied over the corresponding region in the + * given buffer. The bitmap in the buf log format structure indicates + * where to place the logged data. + */ +/*ARGSUSED*/ +STATIC void +xlog_recover_do_reg_buffer(xfs_mount_t *mp, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + int i; + int bit; + int nbits; + xfs_buf_log_format_v1_t *obuf_f; + unsigned int *data_map=NULL; + unsigned int map_size=0; + int error; + + switch (buf_f->blf_type) { + case XFS_LI_BUF: + data_map = buf_f->blf_data_map; + map_size = buf_f->blf_map_size; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + data_map = obuf_f->blf_data_map; + map_size = obuf_f->blf_map_size; + break; + } + bit = 0; + i = 1; /* 0 is the buf format structure */ + while (1) { + bit = xfs_next_bit(data_map, map_size, bit); + if (bit == -1) + break; + nbits = xfs_contig_bits(data_map, map_size, bit); + ASSERT(item->ri_buf[i].i_addr != 0); + ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0); + ASSERT(XFS_BUF_COUNT(bp) >= + ((uint)bit << XFS_BLI_SHIFT)+(nbits<blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { + error = xfs_qm_dqcheck((xfs_disk_dquot_t *) + item->ri_buf[i].i_addr, + -1, 0, XFS_QMOPT_DOWARN, + "dquot_buf_recover"); + } + if (!error) + memcpy(xfs_buf_offset(bp, + (uint)bit << XFS_BLI_SHIFT), /* dest */ + item->ri_buf[i].i_addr, /* source */ + nbits<ri_total); +} /* xlog_recover_do_reg_buffer */ + +/* + * Do some primitive error checking on ondisk dquot data structures. + */ +int +xfs_qm_dqcheck( + xfs_disk_dquot_t *ddq, + xfs_dqid_t id, + uint type, /* used only when IO_dorepair is true */ + uint flags, + char *str) +{ + xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; + int errs = 0; + + /* + * We can encounter an uninitialized dquot buffer for 2 reasons: + * 1. If we crash while deleting the quotainode(s), and those blks got + * used for user data. This is because we take the path of regular + * file deletion; however, the size field of quotainodes is never + * updated, so all the tricks that we play in itruncate_finish + * don't quite matter. + * + * 2. We don't play the quota buffers when there's a quotaoff logitem. + * But the allocation will be replayed so we'll end up with an + * uninitialized quota block. + * + * This is all fine; things are still consistent, and we haven't lost + * any quota information. Just don't complain about bad dquot blks. + */ + if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", + str, id, + INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC); + errs++; + } + if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", + str, id, + INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION); + errs++; + } + + if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER && + INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : XFS dquot ID 0x%x, unknown flags 0x%x", + str, id, INT_GET(ddq->d_flags, ARCH_CONVERT)); + errs++; + } + + if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : ondisk-dquot 0x%x, ID mismatch: " + "0x%x expected, found id 0x%x", + str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT)); + errs++; + } + + if (! errs) { + if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) && + INT_GET(ddq->d_bcount, ARCH_CONVERT) >= + INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(ddq->d_btimer, ARCH_CONVERT) && + !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : Dquot ID 0x%x (0x%x) " + "BLK TIMER NOT STARTED", + str, (int) + INT_GET(ddq->d_id, ARCH_CONVERT), ddq); + errs++; + } + } + if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) && + INT_GET(ddq->d_icount, ARCH_CONVERT) >= + INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) { + if (INT_ISZERO(ddq->d_itimer, ARCH_CONVERT) && + !INT_ISZERO(ddq->d_id, ARCH_CONVERT)) { + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_ALERT, + "%s : Dquot ID 0x%x (0x%x) " + "INODE TIMER NOT STARTED", + str, (int) + INT_GET(ddq->d_id, ARCH_CONVERT), ddq); + errs++; + } + } + } + + if (!errs || !(flags & XFS_QMOPT_DQREPAIR)) + return errs; + + if (flags & XFS_QMOPT_DOWARN) + cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id); + + /* + * Typically, a repair is only requested by quotacheck. + */ + ASSERT(id != -1); + ASSERT(flags & XFS_QMOPT_DQREPAIR); + memset(d, 0, sizeof(xfs_dqblk_t)); + INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC); + INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION); + INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id); + INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type); + + return errs; +} + +/* + * Perform a dquot buffer recovery. + * Simple algorithm: if we have found a QUOTAOFF logitem of the same type + * (ie. USR or GRP), then just toss this buffer away; don't recover it. + * Else, treat it as a regular buffer and do recovery. + */ +STATIC void +xlog_recover_do_dquot_buffer( + xfs_mount_t *mp, + xlog_t *log, + xlog_recover_item_t *item, + xfs_buf_t *bp, + xfs_buf_log_format_t *buf_f) +{ + uint type; + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (mp->m_qflags == 0) { + return; + } + + type = 0; + if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF) + type |= XFS_DQ_USER; + if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF) + type |= XFS_DQ_GROUP; + /* + * This type of quotas was turned off, so ignore this buffer + */ + if (log->l_quotaoffs_flag & type) + return; + + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); +} + +/* + * This routine replays a modification made to a buffer at runtime. + * There are actually two types of buffer, regular and inode, which + * are handled differently. Inode buffers are handled differently + * in that we only recover a specific set of data from them, namely + * the inode di_next_unlinked fields. This is because all other inode + * data is actually logged via inode records and any data we replay + * here which overlaps that may be stale. + * + * When meta-data buffers are freed at run time we log a buffer item + * with the XFS_BLI_CANCEL bit set to indicate that previous copies + * of the buffer in the log should not be replayed at recovery time. + * This is so that if the blocks covered by the buffer are reused for + * file data before we crash we don't end up replaying old, freed + * meta-data into a user's file. + * + * To handle the cancellation of buffer log items, we make two passes + * over the log during recovery. During the first we build a table of + * those buffers which have been cancelled, and during the second we + * only replay those buffers which do not have corresponding cancel + * records in the table. See xlog_recover_do_buffer_pass[1,2] above + * for more details on the implementation of the table of cancel records. + */ +STATIC int +xlog_recover_do_buffer_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_buf_log_format_t *buf_f; + xfs_buf_log_format_v1_t *obuf_f; + xfs_mount_t *mp; + xfs_buf_t *bp; + int error; + int cancel; + xfs_daddr_t blkno; + int len; + ushort flags; + + buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; + + if (pass == XLOG_RECOVER_PASS1) { + /* + * In this pass we're only looking for buf items + * with the XFS_BLI_CANCEL bit set. + */ + xlog_recover_do_buffer_pass1(log, buf_f); + return 0; + } else { + /* + * In this pass we want to recover all the buffers + * which have not been cancelled and are not + * cancellation buffers themselves. The routine + * we call here will tell us whether or not to + * continue with the replay of this buffer. + */ + cancel = xlog_recover_do_buffer_pass2(log, buf_f); + if (cancel) { + return 0; + } + } + switch (buf_f->blf_type) { + case XFS_LI_BUF: + blkno = buf_f->blf_blkno; + len = buf_f->blf_len; + flags = buf_f->blf_flags; + break; + case XFS_LI_6_1_BUF: + case XFS_LI_5_3_BUF: + obuf_f = (xfs_buf_log_format_v1_t*)buf_f; + blkno = obuf_f->blf_blkno; + len = obuf_f->blf_len; + flags = obuf_f->blf_flags; + break; + default: + xfs_fs_cmn_err(CE_ALERT, log->l_mp, + "xfs_log_recover: unknown buffer type 0x%x, dev 0x%x", + buf_f->blf_type, log->l_dev); + XFS_ERROR_REPORT("xlog_recover_do_buffer_trans", + XFS_ERRLEVEL_LOW, log->l_mp); + return XFS_ERROR(EFSCORRUPTED); + } + + mp = log->l_mp; + if (flags & XFS_BLI_INODE_BUF) { + bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len, + XFS_BUF_LOCK); + } else { + bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0); + } + if (XFS_BUF_ISERROR(bp)) { + xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } + + error = 0; + if (flags & XFS_BLI_INODE_BUF) { + error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); + } else if (flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) { + xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); + } else { + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); + } + if (error) + return XFS_ERROR(error); + + /* + * Perform delayed write on the buffer. Asynchronous writes will be + * slower when taking into account all the buffers to be flushed. + * + * Also make sure that only inode buffers with good sizes stay in + * the buffer cache. The kernel moves inodes in buffers of 1 block + * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode + * buffers in the log can be a different size if the log was generated + * by an older kernel using unclustered inode buffers or a newer kernel + * running with a different inode cluster size. Regardless, if the + * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) + * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep + * the buffer out of the buffer cache so that the buffer won't + * overlap with future reads of those inodes. + */ + if (XFS_DINODE_MAGIC == + INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) && + (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, + (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { + XFS_BUF_STALE(bp); + error = xfs_bwrite(mp, bp); + } else { + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + } + + return (error); +} /* xlog_recover_do_buffer_trans */ + +STATIC int +xlog_recover_do_inode_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_inode_log_format_t *in_f; + xfs_mount_t *mp; + xfs_buf_t *bp; + xfs_imap_t imap; + xfs_dinode_t *dip; + xfs_ino_t ino; + int len; + xfs_caddr_t src; + xfs_caddr_t dest; + int error; + int attr_index; + uint fields; + xfs_dinode_core_t *dicp; + + if (pass == XLOG_RECOVER_PASS1) { + return 0; + } + + in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr; + ino = in_f->ilf_ino; + mp = log->l_mp; + if (ITEM_TYPE(item) == XFS_LI_INODE) { + imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno; + imap.im_len = in_f->ilf_len; + imap.im_boffset = in_f->ilf_boffset; + } else { + /* + * It's an old inode format record. We don't know where + * its cluster is located on disk, and we can't allow + * xfs_imap() to figure it out because the inode btrees + * are not ready to be used. Therefore do not pass the + * XFS_IMAP_LOOKUP flag to xfs_imap(). This will give + * us only the single block in which the inode lives + * rather than its cluster, so we must make sure to + * invalidate the buffer when we write it out below. + */ + imap.im_blkno = 0; + xfs_imap(log->l_mp, 0, ino, &imap, 0); + } + bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len, + XFS_BUF_LOCK); + if (XFS_BUF_ISERROR(bp)) { + xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, + bp, imap.im_blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } + error = 0; + ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); + dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + + /* + * Make sure the place we're flushing out to really looks + * like an inode! + */ + if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) { + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", + dip, bp, ino); + XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr); + if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", + item, ino); + XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely((dicp->di_mode & IFMT) == IFREG)) { + if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && + (dicp->di_format != XFS_DINODE_FMT_BTREE)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", + item, dip, bp, ino); + return XFS_ERROR(EFSCORRUPTED); + } + } else if (unlikely((dicp->di_mode & IFMT) == IFDIR)) { + if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && + (dicp->di_format != XFS_DINODE_FMT_BTREE) && + (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", + item, dip, bp, ino); + return XFS_ERROR(EFSCORRUPTED); + } + } + if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", + item, dip, bp, ino, + dicp->di_nextents + dicp->di_anextents, + dicp->di_nblocks); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", + item, dip, bp, ino, dicp->di_forkoff); + return XFS_ERROR(EFSCORRUPTED); + } + if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) { + XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", + XFS_ERRLEVEL_LOW, mp, dicp); + xfs_buf_relse(bp); + xfs_fs_cmn_err(CE_ALERT, mp, + "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p", + item->ri_buf[1].i_len, item); + return XFS_ERROR(EFSCORRUPTED); + } + + /* The core is in in-core format */ + xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core, + (xfs_dinode_core_t*)item->ri_buf[1].i_addr, + -1, ARCH_CONVERT); + /* the rest is in on-disk format */ + if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) { + memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t), + item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t), + item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t)); + } + + fields = in_f->ilf_fields; + switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { + case XFS_ILOG_DEV: + INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev); + + break; + case XFS_ILOG_UUID: + dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid; + break; + } + + if (in_f->ilf_size == 2) + goto write_inode_buffer; + len = item->ri_buf[2].i_len; + src = item->ri_buf[2].i_addr; + ASSERT(in_f->ilf_size <= 4); + ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); + ASSERT(!(fields & XFS_ILOG_DFORK) || + (len == in_f->ilf_dsize)); + + switch (fields & XFS_ILOG_DFORK) { + case XFS_ILOG_DDATA: + case XFS_ILOG_DEXT: + memcpy(&dip->di_u, src, len); + break; + + case XFS_ILOG_DBROOT: + xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + &(dip->di_u.di_bmbt), + XFS_DFORK_DSIZE(dip, mp)); + break; + + default: + /* + * There are no data fork flags set. + */ + ASSERT((fields & XFS_ILOG_DFORK) == 0); + break; + } + + /* + * If we logged any attribute data, recover it. There may or + * may not have been any other non-core data logged in this + * transaction. + */ + if (in_f->ilf_fields & XFS_ILOG_AFORK) { + if (in_f->ilf_fields & XFS_ILOG_DFORK) { + attr_index = 3; + } else { + attr_index = 2; + } + len = item->ri_buf[attr_index].i_len; + src = item->ri_buf[attr_index].i_addr; + ASSERT(len == in_f->ilf_asize); + + switch (in_f->ilf_fields & XFS_ILOG_AFORK) { + case XFS_ILOG_ADATA: + case XFS_ILOG_AEXT: + dest = XFS_DFORK_APTR(dip); + ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); + memcpy(dest, src, len); + break; + + case XFS_ILOG_ABROOT: + dest = XFS_DFORK_APTR(dip); + xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + (xfs_bmdr_block_t*)dest, + XFS_DFORK_ASIZE(dip, mp)); + break; + + default: + xlog_warn("XFS: xlog_recover_do_inode_trans: Illegal flag"); + ASSERT(0); + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + } + + +write_inode_buffer: + if (ITEM_TYPE(item) == XFS_LI_INODE) { + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + } else { + XFS_BUF_STALE(bp); + error = xfs_bwrite(mp, bp); + } + + return (error); +} /* xlog_recover_do_inode_trans */ + + +/* + * Recover QUOTAOFF records. We simply make a note of it in the xlog_t + * structure, so that we know not to do any dquot item or dquot buffer recovery, + * of that type. + */ +STATIC int +xlog_recover_do_quotaoff_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_qoff_logformat_t *qoff_f; + + if (pass == XLOG_RECOVER_PASS2) { + return (0); + } + + qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr; + ASSERT(qoff_f); + + /* + * The logitem format's flag tells us if this was user quotaoff, + * group quotaoff or both. + */ + if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_USER; + if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) + log->l_quotaoffs_flag |= XFS_DQ_GROUP; + + return (0); +} + + +/* + * Recover a dquot record + */ +STATIC int +xlog_recover_do_dquot_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_mount_t *mp; + xfs_buf_t *bp; + struct xfs_disk_dquot *ddq, *recddq; + int error; + xfs_dq_logformat_t *dq_f; + uint type; + + if (pass == XLOG_RECOVER_PASS1) { + return 0; + } + mp = log->l_mp; + + /* + * Filesystems are required to send in quota flags at mount time. + */ + if (mp->m_qflags == 0) + return (0); + + recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; + ASSERT(recddq); + /* + * This type of quotas was turned off, so ignore this record. + */ + type = INT_GET(recddq->d_flags, ARCH_CONVERT) & + (XFS_DQ_USER | XFS_DQ_GROUP); + ASSERT(type); + if (log->l_quotaoffs_flag & type) + return (0); + + /* + * At this point we know that quota was _not_ turned off. + * Since the mount flags are not indicating to us otherwise, this + * must mean that quota is on, and the dquot needs to be replayed. + * Remember that we may not have fully recovered the superblock yet, + * so we can't do the usual trick of looking at the SB quota bits. + * + * The other possibility, of course, is that the quota subsystem was + * removed since the last mount - ENOSYS. + */ + dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr; + ASSERT(dq_f); + if ((error = xfs_qm_dqcheck(recddq, + dq_f->qlf_id, + 0, XFS_QMOPT_DOWARN, + "xlog_recover_do_dquot_trans (log copy)"))) { + return XFS_ERROR(EIO); + } + ASSERT(dq_f->qlf_len == 1); + + error = xfs_read_buf(mp, mp->m_ddev_targp, + dq_f->qlf_blkno, + XFS_FSB_TO_BB(mp, dq_f->qlf_len), + 0, &bp); + if (error) { + xfs_ioerror_alert("xlog_recover_do..(read#3)", mp, + bp, dq_f->qlf_blkno); + return error; + } + ASSERT(bp); + ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); + + /* + * At least the magic num portion should be on disk because this + * was among a chunk of dquots created earlier, and we did some + * minimal initialization then. + */ + if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, + "xlog_recover_do_dquot_trans")) { + xfs_buf_relse(bp); + return XFS_ERROR(EIO); + } + + memcpy(ddq, recddq, item->ri_buf[1].i_len); + + ASSERT(dq_f->qlf_size == 2); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || + XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); + XFS_BUF_SET_FSPRIVATE(bp, mp); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + xfs_bdwrite(mp, bp); + + return (0); +} /* xlog_recover_do_dquot_trans */ + +/* + * This routine is called to create an in-core extent free intent + * item from the efi format structure which was logged on disk. + * It allocates an in-core efi, copies the extents from the format + * structure into it, and adds the efi to the AIL with the given + * LSN. + */ +STATIC void +xlog_recover_do_efi_trans(xlog_t *log, + xlog_recover_item_t *item, + xfs_lsn_t lsn, + int pass) +{ + xfs_mount_t *mp; + xfs_efi_log_item_t *efip; + xfs_efi_log_format_t *efi_formatp; + SPLDECL(s); + + if (pass == XLOG_RECOVER_PASS1) { + return; + } + + efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr; + ASSERT(item->ri_buf[0].i_len == + (sizeof(xfs_efi_log_format_t) + + ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)))); + + mp = log->l_mp; + efip = xfs_efi_init(mp, efi_formatp->efi_nextents); + memcpy((char *)&(efip->efi_format), (char *)efi_formatp, + sizeof(xfs_efi_log_format_t) + + ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))); + efip->efi_next_extent = efi_formatp->efi_nextents; + efip->efi_flags |= XFS_EFI_COMMITTED; + + AIL_LOCK(mp,s); + /* + * xfs_trans_update_ail() drops the AIL lock. + */ + xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s); +} /* xlog_recover_do_efi_trans */ + + +/* + * This routine is called when an efd format structure is found in + * a committed transaction in the log. It's purpose is to cancel + * the corresponding efi if it was still in the log. To do this + * it searches the AIL for the efi with an id equal to that in the + * efd format structure. If we find it, we remove the efi from the + * AIL and free it. + */ +STATIC void +xlog_recover_do_efd_trans(xlog_t *log, + xlog_recover_item_t *item, + int pass) +{ + xfs_mount_t *mp; + xfs_efd_log_format_t *efd_formatp; + xfs_efi_log_item_t *efip=NULL; + xfs_log_item_t *lip; + int gen; + int nexts; + __uint64_t efi_id; + SPLDECL(s); + + if (pass == XLOG_RECOVER_PASS1) { + return; + } + + efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr; + ASSERT(item->ri_buf[0].i_len == + (sizeof(xfs_efd_log_format_t) + + ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t)))); + efi_id = efd_formatp->efd_efi_id; + + /* + * Search for the efi with the id in the efd format structure + * in the AIL. + */ + mp = log->l_mp; + AIL_LOCK(mp,s); + lip = xfs_trans_first_ail(mp, &gen); + while (lip != NULL) { + if (lip->li_type == XFS_LI_EFI) { + efip = (xfs_efi_log_item_t *)lip; + if (efip->efi_format.efi_id == efi_id) { + /* + * xfs_trans_delete_ail() drops the + * AIL lock. + */ + xfs_trans_delete_ail(mp, lip, s); + break; + } + } + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + } + if (lip == NULL) { + AIL_UNLOCK(mp, s); + } + + /* + * If we found it, then free it up. If it wasn't there, it + * must have been overwritten in the log. Oh well. + */ + if (lip != NULL) { + nexts = efip->efi_format.efi_nextents; + if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { + kmem_free(lip, sizeof(xfs_efi_log_item_t) + + ((nexts - 1) * sizeof(xfs_extent_t))); + } else { + kmem_zone_free(xfs_efi_zone, efip); + } + } +} /* xlog_recover_do_efd_trans */ + +/* + * Perform the transaction + * + * If the transaction modifies a buffer or inode, do it now. Otherwise, + * EFIs and EFDs get queued up by adding entries into the AIL for them. + */ +STATIC int +xlog_recover_do_trans(xlog_t *log, + xlog_recover_t *trans, + int pass) +{ + int error = 0; + xlog_recover_item_t *item, *first_item; + + if ((error = xlog_recover_reorder_trans(log, trans))) + return error; + first_item = item = trans->r_itemq; + do { + /* + * we don't need to worry about the block number being + * truncated in > 1 TB buffers because in user-land, + * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so + * the blkno's will get through the user-mode buffer + * cache properly. The only bad case is o32 kernels + * where xfs_daddr_t is 32-bits but mount will warn us + * off a > 1 TB filesystem before we get here. + */ + if ((ITEM_TYPE(item) == XFS_LI_BUF) || + (ITEM_TYPE(item) == XFS_LI_6_1_BUF) || + (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) { + if ((error = xlog_recover_do_buffer_trans(log, item, + pass))) + break; + } else if ((ITEM_TYPE(item) == XFS_LI_INODE) || + (ITEM_TYPE(item) == XFS_LI_6_1_INODE) || + (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) { + if ((error = xlog_recover_do_inode_trans(log, item, + pass))) + break; + } else if (ITEM_TYPE(item) == XFS_LI_EFI) { + xlog_recover_do_efi_trans(log, item, trans->r_lsn, + pass); + } else if (ITEM_TYPE(item) == XFS_LI_EFD) { + xlog_recover_do_efd_trans(log, item, pass); + } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) { + if ((error = xlog_recover_do_dquot_trans(log, item, + pass))) + break; + } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) { + if ((error = xlog_recover_do_quotaoff_trans(log, item, + pass))) + break; + } else { + xlog_warn("XFS: xlog_recover_do_trans"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + item = item->ri_next; + } while (first_item != item); + + return error; +} /* xlog_recover_do_trans */ + + +/* + * Free up any resources allocated by the transaction + * + * Remember that EFIs, EFDs, and IUNLINKs are handled later. + */ +STATIC void +xlog_recover_free_trans(xlog_recover_t *trans) +{ + xlog_recover_item_t *first_item, *item, *free_item; + int i; + + item = first_item = trans->r_itemq; + do { + free_item = item; + item = item->ri_next; + /* Free the regions in the item. */ + for (i = 0; i < free_item->ri_cnt; i++) { + kmem_free(free_item->ri_buf[i].i_addr, + free_item->ri_buf[i].i_len); + } + /* Free the item itself */ + kmem_free(free_item->ri_buf, + (free_item->ri_total * sizeof(xfs_log_iovec_t))); + kmem_free(free_item, sizeof(xlog_recover_item_t)); + } while (first_item != item); + /* Free the transaction recover structure */ + kmem_free(trans, sizeof(xlog_recover_t)); +} /* xlog_recover_free_trans */ + + +STATIC int +xlog_recover_commit_trans(xlog_t *log, + xlog_recover_t **q, + xlog_recover_t *trans, + int pass) +{ + int error; + + if ((error = xlog_recover_unlink_tid(q, trans))) + return error; + if ((error = xlog_recover_do_trans(log, trans, pass))) + return error; + xlog_recover_free_trans(trans); /* no error */ + return 0; +} /* xlog_recover_commit_trans */ + + +/*ARGSUSED*/ +STATIC int +xlog_recover_unmount_trans(xlog_recover_t *trans) +{ + /* Do nothing now */ + xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR"); + return( 0 ); +} /* xlog_recover_unmount_trans */ + + +/* + * There are two valid states of the r_state field. 0 indicates that the + * transaction structure is in a normal state. We have either seen the + * start of the transaction or the last operation we added was not a partial + * operation. If the last operation we added to the transaction was a + * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. + * + * NOTE: skip LRs with 0 data length. + */ +STATIC int +xlog_recover_process_data(xlog_t *log, + xlog_recover_t *rhash[], + xlog_rec_header_t *rhead, + xfs_caddr_t dp, + int pass) +{ + xfs_caddr_t lp = dp+INT_GET(rhead->h_len, ARCH_CONVERT); + int num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT); + xlog_op_header_t *ohead; + xlog_recover_t *trans; + xlog_tid_t tid; + int error; + unsigned long hash; + uint flags; + + /* check the log format matches our own - else we can't recover */ + if (xlog_header_check_recover(log->l_mp, rhead)) + return (XFS_ERROR(EIO)); + + while ((dp < lp) && num_logops) { + ASSERT(dp + sizeof(xlog_op_header_t) <= lp); + ohead = (xlog_op_header_t *)dp; + dp += sizeof(xlog_op_header_t); + if (ohead->oh_clientid != XFS_TRANSACTION && + ohead->oh_clientid != XFS_LOG) { + xlog_warn("XFS: xlog_recover_process_data: bad clientid"); + ASSERT(0); + return (XFS_ERROR(EIO)); + } + tid = INT_GET(ohead->oh_tid, ARCH_CONVERT); + hash = XLOG_RHASH(tid); + trans = xlog_recover_find_tid(rhash[hash], tid); + if (trans == NULL) { /* not found; add new tid */ + if (ohead->oh_flags & XLOG_START_TRANS) + xlog_recover_new_tid(&rhash[hash], tid, INT_GET(rhead->h_lsn, ARCH_CONVERT)); + } else { + ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp); + flags = ohead->oh_flags & ~XLOG_END_TRANS; + if (flags & XLOG_WAS_CONT_TRANS) + flags &= ~XLOG_CONTINUE_TRANS; + switch (flags) { + case XLOG_COMMIT_TRANS: { + error = xlog_recover_commit_trans(log, &rhash[hash], + trans, pass); + break; + } + case XLOG_UNMOUNT_TRANS: { + error = xlog_recover_unmount_trans(trans); + break; + } + case XLOG_WAS_CONT_TRANS: { + error = xlog_recover_add_to_cont_trans(trans, dp, + INT_GET(ohead->oh_len, ARCH_CONVERT)); + break; + } + case XLOG_START_TRANS : { + xlog_warn("XFS: xlog_recover_process_data: bad transaction"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + case 0: + case XLOG_CONTINUE_TRANS: { + error = xlog_recover_add_to_trans(trans, dp, + INT_GET(ohead->oh_len, ARCH_CONVERT)); + break; + } + default: { + xlog_warn("XFS: xlog_recover_process_data: bad flag"); + ASSERT(0); + error = XFS_ERROR(EIO); + break; + } + } /* switch */ + if (error) + return error; + } /* if */ + dp += INT_GET(ohead->oh_len, ARCH_CONVERT); + num_logops--; + } + return( 0 ); +} /* xlog_recover_process_data */ + + +/* + * Process an extent free intent item that was recovered from + * the log. We need to free the extents that it describes. + */ +STATIC void +xlog_recover_process_efi(xfs_mount_t *mp, + xfs_efi_log_item_t *efip) +{ + xfs_efd_log_item_t *efdp; + xfs_trans_t *tp; + int i; + xfs_extent_t *extp; + xfs_fsblock_t startblock_fsb; + + ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED)); + + /* + * First check the validity of the extents described by the + * EFI. If any are bad, then assume that all are bad and + * just toss the EFI. + */ + for (i = 0; i < efip->efi_format.efi_nextents; i++) { + extp = &(efip->efi_format.efi_extents[i]); + startblock_fsb = XFS_BB_TO_FSB(mp, + XFS_FSB_TO_DADDR(mp, extp->ext_start)); + if ((startblock_fsb == 0) || + (extp->ext_len == 0) || + (startblock_fsb >= mp->m_sb.sb_dblocks) || + (extp->ext_len >= mp->m_sb.sb_agblocks)) { + /* + * This will pull the EFI from the AIL and + * free the memory associated with it. + */ + xfs_efi_release(efip, efip->efi_format.efi_nextents); + return; + } + } + + tp = xfs_trans_alloc(mp, 0); + xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); + efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); + + for (i = 0; i < efip->efi_format.efi_nextents; i++) { + extp = &(efip->efi_format.efi_extents[i]); + xfs_free_extent(tp, extp->ext_start, extp->ext_len); + xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, + extp->ext_len); + } + + efip->efi_flags |= XFS_EFI_RECOVERED; + xfs_trans_commit(tp, 0, NULL); +} /* xlog_recover_process_efi */ + + +/* + * Verify that once we've encountered something other than an EFI + * in the AIL that there are no more EFIs in the AIL. + */ +#if defined(DEBUG) +STATIC void +xlog_recover_check_ail(xfs_mount_t *mp, + xfs_log_item_t *lip, + int gen) +{ + int orig_gen; + + orig_gen = gen; + do { + ASSERT(lip->li_type != XFS_LI_EFI); + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + /* + * The check will be bogus if we restart from the + * beginning of the AIL, so ASSERT that we don't. + * We never should since we're holding the AIL lock + * the entire time. + */ + ASSERT(gen == orig_gen); + } while (lip != NULL); +} +#endif /* DEBUG */ + + +/* + * When this is called, all of the EFIs which did not have + * corresponding EFDs should be in the AIL. What we do now + * is free the extents associated with each one. + * + * Since we process the EFIs in normal transactions, they + * will be removed at some point after the commit. This prevents + * us from just walking down the list processing each one. + * We'll use a flag in the EFI to skip those that we've already + * processed and use the AIL iteration mechanism's generation + * count to try to speed this up at least a bit. + * + * When we start, we know that the EFIs are the only things in + * the AIL. As we process them, however, other items are added + * to the AIL. Since everything added to the AIL must come after + * everything already in the AIL, we stop processing as soon as + * we see something other than an EFI in the AIL. + */ +STATIC void +xlog_recover_process_efis(xlog_t *log) +{ + xfs_log_item_t *lip; + xfs_efi_log_item_t *efip; + int gen; + xfs_mount_t *mp; + SPLDECL(s); + + mp = log->l_mp; + AIL_LOCK(mp,s); + + lip = xfs_trans_first_ail(mp, &gen); + while (lip != NULL) { + /* + * We're done when we see something other than an EFI. + */ + if (lip->li_type != XFS_LI_EFI) { + xlog_recover_check_ail(mp, lip, gen); + break; + } + + /* + * Skip EFIs that we've already processed. + */ + efip = (xfs_efi_log_item_t *)lip; + if (efip->efi_flags & XFS_EFI_RECOVERED) { + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + continue; + } + + AIL_UNLOCK(mp, s); + xlog_recover_process_efi(mp, efip); + AIL_LOCK(mp,s); + lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + } + AIL_UNLOCK(mp, s); +} /* xlog_recover_process_efis */ + + +/* + * This routine performs a transaction to null out a bad inode pointer + * in an agi unlinked inode hash bucket. + */ +STATIC void +xlog_recover_clear_agi_bucket( + xfs_mount_t *mp, + xfs_agnumber_t agno, + int bucket) +{ + xfs_trans_t *tp; + xfs_agi_t *agi; + xfs_buf_t *agibp; + int offset; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); + xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0); + + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &agibp); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return; + } + + agi = XFS_BUF_TO_AGI(agibp); + if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + return; + } + ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); + + INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + + (void) xfs_trans_commit(tp, 0, NULL); +} /* xlog_recover_clear_agi_bucket */ + + +/* + * xlog_iunlink_recover + * + * This is called during recovery to process any inodes which + * we unlinked but not freed when the system crashed. These + * inodes will be on the lists in the AGI blocks. What we do + * here is scan all the AGIs and fully truncate and free any + * inodes found on the lists. Each inode is removed from the + * lists when it has been fully truncated and is freed. The + * freeing of the inode and its removal from the list must be + * atomic. + */ +void +xlog_recover_process_iunlinks(xlog_t *log) +{ + xfs_mount_t *mp; + xfs_agnumber_t agno; + xfs_agi_t *agi; + xfs_buf_t *agibp; + xfs_buf_t *ibp; + xfs_dinode_t *dip; + xfs_inode_t *ip; + xfs_agino_t agino; + xfs_ino_t ino; + int bucket; + int error; + uint mp_dmevmask; + + mp = log->l_mp; + + /* + * Prevent any DMAPI event from being sent while in this function. + */ + mp_dmevmask = mp->m_dmevmask; + mp->m_dmevmask = 0; + + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { + /* + * Find the agi for this ag. + */ + agibp = xfs_buf_read(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)", + log->l_mp, agibp, + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp))); + } + agi = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == + INT_GET(agi->agi_magicnum, ARCH_CONVERT)); + + for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { + + agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT); + while (agino != NULLAGINO) { + + /* + * Release the agi buffer so that it can + * be acquired in the normal course of the + * transaction to truncate and free the inode. + */ + xfs_buf_relse(agibp); + + ino = XFS_AGINO_TO_INO(mp, agno, agino); + error = xfs_iget(mp, NULL, ino, 0, &ip, 0); + ASSERT(error || (ip != NULL)); + + if (!error) { + /* + * Get the on disk inode to find the + * next inode in the bucket. + */ + error = xfs_itobp(mp, NULL, ip, &dip, + &ibp, 0); + ASSERT(error || (dip != NULL)); + } + + if (!error) { + ASSERT(ip->i_d.di_nlink == 0); + + /* setup for the next pass */ + agino = INT_GET(dip->di_next_unlinked, + ARCH_CONVERT); + xfs_buf_relse(ibp); + /* + * Prevent any DMAPI event from + * being sent when the + * reference on the inode is + * dropped. + */ + ip->i_d.di_dmevmask = 0; + + /* + * If this is a new inode, handle + * it specially. Otherwise, + * just drop our reference to the + * inode. If there are no + * other references, this will + * send the inode to + * xfs_inactive() which will + * truncate the file and free + * the inode. + */ + if (ip->i_d.di_mode == 0) + xfs_iput_new(ip, 0); + else + VN_RELE(XFS_ITOV(ip)); + } else { + /* + * We can't read in the inode + * this bucket points to, or + * this inode is messed up. Just + * ditch this bucket of inodes. We + * will lose some inodes and space, + * but at least we won't hang. Call + * xlog_recover_clear_agi_bucket() + * to perform a transaction to clear + * the inode pointer in the bucket. + */ + xlog_recover_clear_agi_bucket(mp, agno, + bucket); + + agino = NULLAGINO; + } + + /* + * Reacquire the agibuffer and continue around + * the loop. + */ + agibp = xfs_buf_read(mp->m_ddev_targp, + XFS_AG_DADDR(mp, agno, + XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert( + "xlog_recover_process_iunlinks(#2)", + log->l_mp, agibp, + XFS_AG_DADDR(mp, agno, + XFS_AGI_DADDR(mp))); + } + agi = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == INT_GET( + agi->agi_magicnum, ARCH_CONVERT)); + } + } + + /* + * Release the buffer for the current agi so we can + * go on to the next one. + */ + xfs_buf_relse(agibp); + } + + mp->m_dmevmask = mp_dmevmask; + +} /* xlog_recover_process_iunlinks */ + + +/* + * Stamp cycle number in every block + * + * This routine is also called in xfs_log.c + */ +/*ARGSUSED*/ +void +xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog) +{ + int i, j, k; + int size = iclog->ic_offset + iclog->ic_roundoff; + xfs_caddr_t dp; + union ich { + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } *xhdr; + uint cycle_lsn; + +#ifdef DEBUG + uint *up; + uint chksum = 0; + + up = (uint *)iclog->ic_datap; + /* divide length by 4 to get # words */ + for (i=0; i> 2; i++) { + chksum ^= INT_GET(*up, ARCH_CONVERT); + up++; + } + INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum); +#endif /* DEBUG */ + + cycle_lsn = CYCLE_LSN_NOCONV(iclog->ic_header.h_lsn, ARCH_CONVERT); + + dp = iclog->ic_datap; + for (i = 0; i < BTOBB(size) && + i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { + iclog->ic_header.h_cycle_data[i] = *(uint *)dp; + *(uint *)dp = cycle_lsn; + dp += BBSIZE; + } + + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + xhdr = (union ich*)&iclog->ic_header; + for ( ; i < BTOBB(size); i++) { + j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp; + *(uint *)dp = cycle_lsn; + dp += BBSIZE; + } + + for (i = 1; i < log->l_iclog_heads; i++) { + xhdr[i].hic_xheader.xh_cycle = cycle_lsn; + } + } + +} /* xlog_pack_data */ + + +/*ARGSUSED*/ +STATIC void +xlog_unpack_data(xlog_rec_header_t *rhead, + xfs_caddr_t dp, + xlog_t *log) +{ + int i, j, k; + union ich { + xlog_rec_header_t hic_header; + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; + } *xhdr; + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + uint *up = (uint *)dp; + uint chksum = 0; +#endif + + for (i=0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) && + i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { + *(uint *)dp = *(uint *)&rhead->h_cycle_data[i]; + dp += BBSIZE; + } + + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + xhdr = (union ich*)rhead; + for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) { + j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); + *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; + dp += BBSIZE; + } + } + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + /* divide length by 4 to get # words */ + for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) { + chksum ^= INT_GET(*up, ARCH_CONVERT); + up++; + } + if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) { + if (!INT_ISZERO(rhead->h_chksum, ARCH_CONVERT) || + ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { + cmn_err(CE_DEBUG, + "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)", + INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum); + cmn_err(CE_DEBUG, +"XFS: Disregard message if filesystem was created with non-DEBUG kernel"); + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + cmn_err(CE_DEBUG, + "XFS: LogR this is a LogV2 filesystem"); + } + log->l_flags |= XLOG_CHKSUM_MISMATCH; + } + } +#endif /* DEBUG && XFS_LOUD_RECOVERY */ +} /* xlog_unpack_data */ + + +/* + * Read the log from tail to head and process the log records found. + * Handle the two cases where the tail and head are in the same cycle + * and where the active portion of the log wraps around the end of + * the physical log separately. The pass parameter is passed through + * to the routines called to process the data and is not looked at + * here. + */ +STATIC int +xlog_do_recovery_pass(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk, + int pass) +{ + xlog_rec_header_t *rhead; + xfs_daddr_t blk_no; + xfs_caddr_t bufaddr; + xfs_buf_t *hbp, *dbp; + int error, h_size; + int bblks, split_bblks; + int hblks, split_hblks, wrapped_hblks; + xlog_recover_t *rhash[XLOG_RHASH_SIZE]; + + error = 0; + + + /* + * Read the header of the tail block and get the iclog buffer size from + * h_size. Use this to tell how many sectors make up the log header. + */ + if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { + /* + * When using variable length iclogs, read first sector of iclog + * header and extract the header size from it. Get a new hbp that + * is the correct size. + */ + hbp = xlog_get_bp(1, log->l_mp); + if (!hbp) + return ENOMEM; + if ((error = xlog_bread(log, tail_blk, 1, hbp))) + goto bread_err1; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == + XLOG_HEADER_MAGIC_NUM); + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & (~XLOG_VERSION_OKBITS)) != 0) { + xlog_warn("XFS: xlog_do_recovery_pass: unrecognised log version number."); + error = XFS_ERROR(EIO); + goto bread_err1; + } + h_size = INT_GET(rhead->h_size, ARCH_CONVERT); + + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & XLOG_VERSION_2) && + (h_size > XLOG_HEADER_CYCLE_SIZE)) { + hblks = h_size / XLOG_HEADER_CYCLE_SIZE; + if (h_size % XLOG_HEADER_CYCLE_SIZE) + hblks++; + xlog_put_bp(hbp); + hbp = xlog_get_bp(hblks, log->l_mp); + } else { + hblks=1; + } + } else { + hblks=1; + hbp = xlog_get_bp(1, log->l_mp); + h_size = XLOG_BIG_RECORD_BSIZE; + } + + if (!hbp) + return ENOMEM; + dbp = xlog_get_bp(BTOBB(h_size),log->l_mp); + if (!dbp) { + xlog_put_bp(hbp); + return ENOMEM; + } + + memset(rhash, 0, sizeof(rhash)); + if (tail_blk <= head_blk) { + for (blk_no = tail_blk; blk_no < head_blk; ) { + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); /* blocks in data section */ + + if (unlikely((INT_GET(rhead->h_magicno, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) || + (BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) > INT_MAX)) || + (bblks <= 0) || + (blk_no > log->l_logBBsize))) { + XFS_ERROR_REPORT("xlog_do_recovery_pass(1)", + XFS_ERRLEVEL_LOW, log->l_mp); + error = EFSCORRUPTED; + goto bread_err2; + } + + if ((INT_GET(rhead->h_version, ARCH_CONVERT) & (~XLOG_VERSION_OKBITS)) != 0) { + xlog_warn("XFS: xlog_do_recovery_pass: unrecognised log version number."); + error = XFS_ERROR(EIO); + goto bread_err2; + } + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); /* blocks in data section */ + if (bblks > 0) { + if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) + goto bread_err2; + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + } + blk_no += (bblks+hblks); + } + } else { + /* + * Perform recovery around the end of the physical log. When the head + * is not on the same cycle number as the tail, we can't do a sequential + * recovery as above. + */ + blk_no = tail_blk; + while (blk_no < log->l_logBBsize) { + /* + * Check for header wrapping around physical end-of-log + */ + wrapped_hblks = 0; + if (blk_no+hblks <= log->l_logBBsize) { + /* Read header in one read */ + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + } else { + /* This log record is split across physical end of log */ + split_hblks = 0; + if (blk_no != log->l_logBBsize) { + /* some data is before physical end of log */ + ASSERT(blk_no <= INT_MAX); + split_hblks = log->l_logBBsize - (int)blk_no; + ASSERT(split_hblks > 0); + if ((error = xlog_bread(log, blk_no, split_hblks, hbp))) + goto bread_err2; + } + bufaddr = XFS_BUF_PTR(hbp); + XFS_BUF_SET_PTR(hbp, bufaddr + BBTOB(split_hblks), + BBTOB(hblks - split_hblks)); + wrapped_hblks = hblks - split_hblks; + if ((error = xlog_bread(log, 0, wrapped_hblks, hbp))) + goto bread_err2; + XFS_BUF_SET_PTR(hbp, bufaddr, hblks); + } + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); + + /* LR body must have data or it wouldn't have been written */ + ASSERT(bblks > 0); + blk_no += hblks; /* successfully read header */ + + if (unlikely((INT_GET(rhead->h_magicno, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) || + (BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) > INT_MAX)) || + (bblks <= 0))) { + XFS_ERROR_REPORT("xlog_do_recovery_pass(2)", + XFS_ERRLEVEL_LOW, log->l_mp); + error = EFSCORRUPTED; + goto bread_err2; + } + + /* Read in data for log record */ + if (blk_no+bblks <= log->l_logBBsize) { + if ((error = xlog_bread(log, blk_no, bblks, dbp))) + goto bread_err2; + } else { + /* This log record is split across physical end of log */ + split_bblks = 0; + if (blk_no != log->l_logBBsize) { + + /* some data is before physical end of log */ + ASSERT(blk_no <= INT_MAX); + split_bblks = log->l_logBBsize - (int)blk_no; + ASSERT(split_bblks > 0); + if ((error = xlog_bread(log, blk_no, split_bblks, dbp))) + goto bread_err2; + } + bufaddr = XFS_BUF_PTR(dbp); + XFS_BUF_SET_PTR(dbp, bufaddr + BBTOB(split_bblks), + BBTOB(bblks - split_bblks)); + if ((error = xlog_bread(log, wrapped_hblks, + bblks - split_bblks, dbp))) + goto bread_err2; + XFS_BUF_SET_PTR(dbp, bufaddr, XLOG_BIG_RECORD_BSIZE); + } + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + blk_no += bblks; + } + + ASSERT(blk_no >= log->l_logBBsize); + blk_no -= log->l_logBBsize; + + /* read first part of physical log */ + while (blk_no < head_blk) { + if ((error = xlog_bread(log, blk_no, hblks, hbp))) + goto bread_err2; + rhead = (xlog_rec_header_t *)XFS_BUF_PTR(hbp); + ASSERT(INT_GET(rhead->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); + ASSERT(BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT) <= INT_MAX)); + bblks = (int) BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); + ASSERT(bblks > 0); + if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) + goto bread_err2; + xlog_unpack_data(rhead, XFS_BUF_PTR(dbp), log); + if ((error = xlog_recover_process_data(log, rhash, + rhead, XFS_BUF_PTR(dbp), + pass))) + goto bread_err2; + blk_no += (bblks+hblks); + } + } + +bread_err2: + xlog_put_bp(dbp); +bread_err1: + xlog_put_bp(hbp); + + return error; +} + +/* + * Do the recovery of the log. We actually do this in two phases. + * The two passes are necessary in order to implement the function + * of cancelling a record written into the log. The first pass + * determines those things which have been cancelled, and the + * second pass replays log items normally except for those which + * have been cancelled. The handling of the replay and cancellations + * takes place in the log item type specific routines. + * + * The table of items which have cancel records in the log is allocated + * and freed at this level, since only here do we know when all of + * the log recovery has been completed. + */ +STATIC int +xlog_do_log_recovery(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk) +{ + int error; +#ifdef DEBUG + int i; +#endif + + /* + * First do a pass to find all of the cancelled buf log items. + * Store them in the buf_cancel_table for use in the second pass. + */ + log->l_buf_cancel_table = + (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE * + sizeof(xfs_buf_cancel_t*), + KM_SLEEP); + error = xlog_do_recovery_pass(log, head_blk, tail_blk, + XLOG_RECOVER_PASS1); + if (error != 0) { + kmem_free(log->l_buf_cancel_table, + XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); + log->l_buf_cancel_table = NULL; + return error; + } + /* + * Then do a second pass to actually recover the items in the log. + * When it is complete free the table of buf cancel items. + */ + error = xlog_do_recovery_pass(log, head_blk, tail_blk, + XLOG_RECOVER_PASS2); +#ifdef DEBUG + for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) { + ASSERT(log->l_buf_cancel_table[i] == NULL); + } +#endif /* DEBUG */ + kmem_free(log->l_buf_cancel_table, + XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*)); + log->l_buf_cancel_table = NULL; + + return error; +} + +/* + * Do the actual recovery + */ +STATIC int +xlog_do_recover(xlog_t *log, + xfs_daddr_t head_blk, + xfs_daddr_t tail_blk) +{ + int error; + xfs_buf_t *bp; + xfs_sb_t *sbp; + + /* + * First replay the images in the log. + */ + error = xlog_do_log_recovery(log, head_blk, tail_blk); + if (error) { + return error; + } + + XFS_bflush(log->l_mp->m_ddev_targp); + + /* + * If IO errors happened during recovery, bail out. + */ + if (XFS_FORCED_SHUTDOWN(log->l_mp)) { + return (EIO); + } + + /* + * We now update the tail_lsn since much of the recovery has completed + * and there may be space available to use. If there were no extent + * or iunlinks, we can free up the entire log and set the tail_lsn to + * be the last_sync_lsn. This was set in xlog_find_tail to be the + * lsn of the last known good LR on disk. If there are extent frees + * or iunlinks they will have some entries in the AIL; so we look at + * the AIL to determine how to set the tail_lsn. + */ + xlog_assign_tail_lsn(log->l_mp); + + /* + * Now that we've finished replaying all buffer and inode + * updates, re-read in the superblock. + */ + bp = xfs_getsb(log->l_mp, 0); + XFS_BUF_UNDONE(bp); + XFS_BUF_READ(bp); + xfsbdstrat(log->l_mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xlog_do_recover", + log->l_mp, bp, XFS_BUF_ADDR(bp)); + ASSERT(0); + xfs_buf_relse(bp); + return error; + } + + /* Convert superblock from on-disk format */ + sbp = &log->l_mp->m_sb; + xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, ARCH_CONVERT, XFS_SB_ALL_BITS); + ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); + ASSERT(XFS_SB_GOOD_VERSION(sbp)); + xfs_buf_relse(bp); + + xlog_recover_check_summary(log); + + /* Normal transactions can now occur */ + log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + return 0; +} /* xlog_do_recover */ + +/* + * Perform recovery and re-initialize some log variables in xlog_find_tail. + * + * Return error or zero. + */ +int +xlog_recover(xlog_t *log, int readonly) +{ + xfs_daddr_t head_blk, tail_blk; + int error; + + /* find the tail of the log */ + + if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly))) + return error; + + if (tail_blk != head_blk) { +#ifndef __KERNEL__ + extern xfs_daddr_t HEAD_BLK, TAIL_BLK; + head_blk = HEAD_BLK; + tail_blk = TAIL_BLK; +#endif + /* There used to be a comment here: + * + * disallow recovery on read-only mounts. note -- mount + * checks for ENOSPC and turns it into an intelligent + * error message. + * ...but this is no longer true. Now, unless you specify + * NORECOVERY (in which case this function would never be + * called), we just go ahead and recover. We do this all + * under the vfs layer, so we can get away with it unless + * the device itself is read-only, in which case we fail. + */ +#ifdef __KERNEL__ + if ((error = xfs_dev_is_read_only(log->l_mp, + "recovery required"))) { + return error; + } +#else + if (readonly) { + return ENOSPC; + } +#endif + +#ifdef __KERNEL__ +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Starting XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#else + cmn_err(CE_NOTE, + "!Starting XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#endif +#endif + error = xlog_do_recover(log, head_blk, tail_blk); + log->l_flags |= XLOG_RECOVERY_NEEDED; + } + return error; +} /* xlog_recover */ + + +/* + * In the first part of recovery we replay inodes and buffers and build + * up the list of extent free items which need to be processed. Here + * we process the extent free items and clean up the on disk unlinked + * inode lists. This is separated from the first part of recovery so + * that the root and real-time bitmap inodes can be read in from disk in + * between the two stages. This is necessary so that we can free space + * in the real-time portion of the file system. + */ +int +xlog_recover_finish(xlog_t *log, int mfsi_flags) +{ + /* + * Now we're ready to do the transactions needed for the + * rest of recovery. Start with completing all the extent + * free intent records and then process the unlinked inode + * lists. At this point, we essentially run in normal mode + * except that we're still performing recovery actions + * rather than accepting new requests. + */ + if (log->l_flags & XLOG_RECOVERY_NEEDED) { + xlog_recover_process_efis(log); + /* + * Sync the log to get all the EFIs out of the AIL. + * This isn't absolutely necessary, but it helps in + * case the unlink transactions would have problems + * pushing the EFIs out of the way. + */ + xfs_log_force(log->l_mp, (xfs_lsn_t)0, + (XFS_LOG_FORCE | XFS_LOG_SYNC)); + + if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) { + + xlog_recover_process_iunlinks(log); + } + + xlog_recover_check_summary(log); + +#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) + cmn_err(CE_NOTE, + "Ending XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#else + cmn_err(CE_NOTE, + "!Ending XFS recovery on filesystem: %s (dev: %d/%d)", + log->l_mp->m_fsname, MAJOR(log->l_dev), + MINOR(log->l_dev)); +#endif + log->l_flags &= ~XLOG_RECOVERY_NEEDED; + } else { + cmn_err(CE_DEBUG, + "!Ending clean XFS mount for filesystem: %s", + log->l_mp->m_fsname); + } + return 0; +} /* xlog_recover_finish */ + + +#if defined(DEBUG) +/* + * Read all of the agf and agi counters and check that they + * are consistent with the superblock counters. + */ +void +xlog_recover_check_summary(xlog_t *log) +{ + xfs_mount_t *mp; + xfs_agf_t *agfp; + xfs_agi_t *agip; + xfs_buf_t *agfbp; + xfs_buf_t *agibp; + xfs_daddr_t agfdaddr; + xfs_daddr_t agidaddr; + xfs_buf_t *sbbp; +#ifdef XFS_LOUD_RECOVERY + xfs_sb_t *sbp; +#endif + xfs_agnumber_t agno; + __uint64_t freeblks; + __uint64_t itotal; + __uint64_t ifree; + + mp = log->l_mp; + + freeblks = 0LL; + itotal = 0LL; + ifree = 0LL; + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { + agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)); + agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr, + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agfbp)) { + xfs_ioerror_alert("xlog_recover_check_summary(agf)", + mp, agfbp, agfdaddr); + } + agfp = XFS_BUF_TO_AGF(agfbp); + ASSERT(XFS_AGF_MAGIC == + INT_GET(agfp->agf_magicnum, ARCH_CONVERT)); + ASSERT(XFS_AGF_GOOD_VERSION( + INT_GET(agfp->agf_versionnum, ARCH_CONVERT))); + ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno); + + freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) + + INT_GET(agfp->agf_flcount, ARCH_CONVERT); + xfs_buf_relse(agfbp); + + agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); + agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr, + XFS_FSS_TO_BB(mp, 1), 0); + if (XFS_BUF_ISERROR(agibp)) { + xfs_ioerror_alert("xlog_recover_check_summary(agi)", + log->l_mp, agibp, agidaddr); + } + agip = XFS_BUF_TO_AGI(agibp); + ASSERT(XFS_AGI_MAGIC == + INT_GET(agip->agi_magicnum, ARCH_CONVERT)); + ASSERT(XFS_AGI_GOOD_VERSION( + INT_GET(agip->agi_versionnum, ARCH_CONVERT))); + ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno); + + itotal += INT_GET(agip->agi_count, ARCH_CONVERT); + ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT); + xfs_buf_relse(agibp); + } + + sbbp = xfs_getsb(mp, 0); +#ifdef XFS_LOUD_RECOVERY + sbp = XFS_BUF_TO_SBP(sbbp); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_icount %Lu itotal %Lu", + sbp->sb_icount, itotal); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu", + sbp->sb_ifree, ifree); + cmn_err(CE_NOTE, + "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu", + sbp->sb_fdblocks, freeblks); +#if 0 + /* + * This is turned off until I account for the allocation + * btree blocks which live in free space. + */ + ASSERT(sbp->sb_icount == itotal); + ASSERT(sbp->sb_ifree == ifree); + ASSERT(sbp->sb_fdblocks == freeblks); +#endif +#endif + xfs_buf_relse(sbbp); +} +#endif /* DEBUG */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_log_recover.h linux.22-ac2/fs/xfs/xfs_log_recover.h --- linux.vanilla/fs/xfs/xfs_log_recover.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_log_recover.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_LOG_RECOVER_H__ +#define __XFS_LOG_RECOVER_H__ + +/* + * Macros, structures, prototypes for internal log manager use. + */ + +#define XLOG_RHASH_BITS 4 +#define XLOG_RHASH_SIZE 16 +#define XLOG_RHASH_SHIFT 2 +#define XLOG_RHASH(tid) \ + ((((__uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1)) + +#define XLOG_MAX_REGIONS_IN_ITEM (XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK / 2 + 1) + + +/* + * item headers are in ri_buf[0]. Additional buffers follow. + */ +typedef struct xlog_recover_item { + struct xlog_recover_item *ri_next; + struct xlog_recover_item *ri_prev; + int ri_type; + int ri_cnt; /* count of regions found */ + int ri_total; /* total regions */ + xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */ +} xlog_recover_item_t; + +struct xlog_tid; +typedef struct xlog_recover { + struct xlog_recover *r_next; + xlog_tid_t r_log_tid; /* log's transaction id */ + xfs_trans_header_t r_theader; /* trans header for partial */ + int r_state; /* not needed */ + xfs_lsn_t r_lsn; /* xact lsn */ + xlog_recover_item_t *r_itemq; /* q for items */ +} xlog_recover_t; + +#define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr) + +/* + * This is the number of entries in the l_buf_cancel_table used during + * recovery. + */ +#define XLOG_BC_TABLE_SIZE 64 + +#define XLOG_RECOVER_PASS1 1 +#define XLOG_RECOVER_PASS2 2 + +#endif /* __XFS_LOG_RECOVER_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_mac.c linux.22-ac2/fs/xfs/xfs_mac.c --- linux.vanilla/fs/xfs/xfs_mac.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_mac.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" + +static xfs_mac_label_t *mac_low_high_lp; +static xfs_mac_label_t *mac_high_low_lp; +static xfs_mac_label_t *mac_admin_high_lp; +static xfs_mac_label_t *mac_equal_equal_lp; + +/* + * Test for the existence of a MAC label as efficiently as possible. + */ +int +xfs_mac_vhaslabel( + vnode_t *vp) +{ + int error; + int len = sizeof(xfs_mac_label_t); + int flags = ATTR_KERNOVAL|ATTR_ROOT; + + VOP_ATTR_GET(vp, SGI_MAC_FILE, NULL, &len, flags, sys_cred, error); + return (error == 0); +} + +int +xfs_mac_iaccess(xfs_inode_t *ip, mode_t mode, struct cred *cr) +{ + xfs_mac_label_t mac; + xfs_mac_label_t *mp = mac_high_low_lp; + + if (cr == NULL || sys_cred == NULL ) { + return EACCES; + } + + if (xfs_attr_fetch(ip, SGI_MAC_FILE, (char *)&mac, sizeof(mac)) == 0) { + if ((mp = mac_add_label(&mac)) == NULL) { + return mac_access(mac_high_low_lp, cr, mode); + } + } + + return mac_access(mp, cr, mode); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_mac.h linux.22-ac2/fs/xfs/xfs_mac.h --- linux.vanilla/fs/xfs/xfs_mac.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_mac.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_MAC_H__ +#define __XFS_MAC_H__ + +/* + * Mandatory Access Control + * + * Layout of a composite MAC label: + * ml_list contains the list of categories (MSEN) followed by the list of + * divisions (MINT). This is actually a header for the data structure which + * will have an ml_list with more than one element. + * + * ------------------------------- + * | ml_msen_type | ml_mint_type | + * ------------------------------- + * | ml_level | ml_grade | + * ------------------------------- + * | ml_catcount | + * ------------------------------- + * | ml_divcount | + * ------------------------------- + * | category 1 | + * | . . . | + * | category N | (where N = ml_catcount) + * ------------------------------- + * | division 1 | + * | . . . | + * | division M | (where M = ml_divcount) + * ------------------------------- + */ +#define XFS_MAC_MAX_SETS 250 +typedef struct xfs_mac_label { + __uint8_t ml_msen_type; /* MSEN label type */ + __uint8_t ml_mint_type; /* MINT label type */ + __uint8_t ml_level; /* Hierarchical level */ + __uint8_t ml_grade; /* Hierarchical grade */ + __uint16_t ml_catcount; /* Category count */ + __uint16_t ml_divcount; /* Division count */ + /* Category set, then Division set */ + __uint16_t ml_list[XFS_MAC_MAX_SETS]; +} xfs_mac_label_t; + +/* MSEN label type names. Choose an upper case ASCII character. */ +#define XFS_MSEN_ADMIN_LABEL 'A' /* Admin: lowm_ail_lock, "xfs_ail"); + spinlock_init(&mp->m_sb_lock, "xfs_sb"); + mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock"); + initnsema(&mp->m_growlock, 1, "xfs_grow"); + /* + * Initialize the AIL. + */ + xfs_trans_ail_init(mp); + + /* Init freeze sync structures */ + spinlock_init(&mp->m_freeze_lock, "xfs_freeze"); + init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0); + atomic_set(&mp->m_active_trans, 0); + + return mp; +} + +/* + * Free up the resources associated with a mount structure. Assume that + * the structure was initially zeroed, so we can tell which fields got + * initialized. + */ +void +xfs_mount_free( + xfs_mount_t *mp, + int remove_bhv) +{ + if (mp->m_ihash) + xfs_ihash_free(mp); + if (mp->m_chash) + xfs_chash_free(mp); + + if (mp->m_perag) { + int agno; + + for (agno = 0; agno < mp->m_maxagi; agno++) + if (mp->m_perag[agno].pagb_list) + kmem_free(mp->m_perag[agno].pagb_list, + sizeof(xfs_perag_busy_t) * + XFS_PAGB_NUM_SLOTS); + kmem_free(mp->m_perag, + sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); + } + + AIL_LOCK_DESTROY(&mp->m_ail_lock); + spinlock_destroy(&mp->m_sb_lock); + mutex_destroy(&mp->m_ilock); + freesema(&mp->m_growlock); + if (mp->m_quotainfo) + XFS_QM_DONE(mp); + + if (mp->m_fsname != NULL) + kmem_free(mp->m_fsname, mp->m_fsname_len); + + if (remove_bhv) { + struct vfs *vfsp = XFS_MTOVFS(mp); + + bhv_remove_all_vfsops(vfsp, 0); + VFS_REMOVEBHV(vfsp, &mp->m_bhv); + } + + spinlock_destroy(&mp->m_freeze_lock); + sv_destroy(&mp->m_wait_unfreeze); + kmem_free(mp, sizeof(xfs_mount_t)); +} + + +/* + * Check the validity of the SB found. + */ +STATIC int +xfs_mount_validate_sb( + xfs_mount_t *mp, + xfs_sb_t *sbp) +{ + /* + * If the log device and data device have the + * same device number, the log is internal. + * Consequently, the sb_logstart should be non-zero. If + * we have a zero sb_logstart in this case, we may be trying to mount + * a volume filesystem in a non-volume manner. + */ + if (sbp->sb_magicnum != XFS_SB_MAGIC) { + cmn_err(CE_WARN, "XFS: bad magic number"); + return XFS_ERROR(EWRONGFS); + } + + if (!XFS_SB_GOOD_VERSION(sbp)) { + cmn_err(CE_WARN, "XFS: bad version"); + return XFS_ERROR(EWRONGFS); + } + + if (unlikely(sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { + cmn_err(CE_WARN, "XFS: filesystem is marked as having an external log; specify logdev on the\nmount command line."); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)", + XFS_ERRLEVEL_HIGH, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + if (unlikely(sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { + cmn_err(CE_WARN, "XFS: filesystem is marked as having an internal log; don't specify logdev on\nthe mount command line."); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)", + XFS_ERRLEVEL_HIGH, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * More sanity checking. These were stolen directly from + * xfs_repair. + */ + if (unlikely( + sbp->sb_agcount <= 0 || + sbp->sb_sectsize < XFS_MIN_SECTORSIZE || + sbp->sb_sectsize > XFS_MAX_SECTORSIZE || + sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || + sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || + sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || + sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || + sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || + sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || + sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || + (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || + (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || + sbp->sb_imax_pct > 100)) { + cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); + XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", + XFS_ERRLEVEL_LOW, mp, sbp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Sanity check AG count, size fields against data size field + */ + if (unlikely( + sbp->sb_dblocks == 0 || + sbp->sb_dblocks > + (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || + sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * + sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { + cmn_err(CE_WARN, "XFS: SB sanity check 2 failed"); + XFS_ERROR_REPORT("xfs_mount_validate_sb(4)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + +#if !XFS_BIG_FILESYSTEMS + if (sbp->sb_dblocks > INT_MAX || sbp->sb_rblocks > INT_MAX) { + cmn_err(CE_WARN, +"XFS: File systems greater than 1TB not supported on this system."); + return XFS_ERROR(E2BIG); + } +#endif + + if (unlikely(sbp->sb_inprogress)) { + cmn_err(CE_WARN, "XFS: file system busy"); + XFS_ERROR_REPORT("xfs_mount_validate_sb(5)", + XFS_ERRLEVEL_LOW, mp); + return XFS_ERROR(EFSCORRUPTED); + } + + /* + * Until this is fixed only page-sized or smaller data blocks work. + */ + if (sbp->sb_blocksize > PAGE_SIZE) { + cmn_err(CE_WARN, + "XFS: Attempted to mount file system with blocksize %d bytes", + sbp->sb_blocksize); + cmn_err(CE_WARN, + "XFS: Only page-sized (%d) or less blocksizes currently work.", + PAGE_SIZE); + return XFS_ERROR(ENOSYS); + } + + return 0; +} + +void +xfs_initialize_perag(xfs_mount_t *mp, int agcount) +{ + int index, max_metadata; + xfs_perag_t *pag; + xfs_agino_t agino; + xfs_ino_t ino; + xfs_sb_t *sbp = &mp->m_sb; + xfs_ino_t max_inum = XFS_MAXINUMBER_32; + + /* Check to see if the filesystem can overflow 32 bit inodes */ + agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); + ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); + + /* Clear the mount flag if no inode can overflow 32 bits + * on this filesystem. + */ + if (ino <= max_inum) { + mp->m_flags &= ~XFS_MOUNT_32BITINODES; + } + + /* If we can overflow then setup the ag headers accordingly */ + if (mp->m_flags & XFS_MOUNT_32BITINODES) { + /* Calculate how much should be reserved for inodes to + * meet the max inode percentage. + */ + if (mp->m_maxicount) { + __uint64_t icount; + + icount = sbp->sb_dblocks * sbp->sb_imax_pct; + do_div(icount, 100); + icount += sbp->sb_agblocks - 1; + do_div(icount, mp->m_ialloc_blks); + max_metadata = icount; + } else { + max_metadata = agcount; + } + for (index = 0; index < agcount; index++) { + ino = XFS_AGINO_TO_INO(mp, index, agino); + if (ino > max_inum) { + index++; + break; + } + + /* This ag is prefered for inodes */ + pag = &mp->m_perag[index]; + pag->pagi_inodeok = 1; + if (index < max_metadata) + pag->pagf_metadata = 1; + } + } else { + /* Setup default behavior for smaller filesystems */ + for (index = 0; index < agcount; index++) { + pag = &mp->m_perag[index]; + pag->pagi_inodeok = 1; + } + } + mp->m_maxagi = index; +} + +/* + * xfs_xlatesb + * + * data - on disk version of sb + * sb - a superblock + * dir - conversion direction: <0 - convert sb to buf + * >0 - convert buf to sb + * arch - architecture to read/write from/to buf + * fields - which fields to copy (bitmask) + */ +void +xfs_xlatesb( + void *data, + xfs_sb_t *sb, + int dir, + xfs_arch_t arch, + __int64_t fields) +{ + xfs_caddr_t buf_ptr; + xfs_caddr_t mem_ptr; + xfs_sb_field_t f; + int first; + int size; + + ASSERT(dir); + ASSERT(fields); + + if (!fields) + return; + + buf_ptr = (xfs_caddr_t)data; + mem_ptr = (xfs_caddr_t)sb; + + while (fields) { + f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); + first = xfs_sb_info[f].offset; + size = xfs_sb_info[f + 1].offset - first; + + ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); + + if (arch == ARCH_NOCONVERT || + size == 1 || + xfs_sb_info[f].type == 1) { + if (dir > 0) { + memcpy(mem_ptr + first, buf_ptr + first, size); + } else { + memcpy(buf_ptr + first, mem_ptr + first, size); + } + } else { + switch (size) { + case 2: + INT_XLATE(*(__uint16_t*)(buf_ptr+first), + *(__uint16_t*)(mem_ptr+first), + dir, arch); + break; + case 4: + INT_XLATE(*(__uint32_t*)(buf_ptr+first), + *(__uint32_t*)(mem_ptr+first), + dir, arch); + break; + case 8: + INT_XLATE(*(__uint64_t*)(buf_ptr+first), + *(__uint64_t*)(mem_ptr+first), dir, arch); + break; + default: + ASSERT(0); + } + } + + fields &= ~(1LL << f); + } +} + +/* + * xfs_readsb + * + * Does the initial read of the superblock. + */ +int +xfs_readsb(xfs_mount_t *mp) +{ + unsigned int sector_size; + unsigned int extra_flags; + xfs_buf_t *bp; + xfs_sb_t *sbp; + int error; + + ASSERT(mp->m_sb_bp == NULL); + ASSERT(mp->m_ddev_targp != NULL); + + /* + * Allocate a (locked) buffer to hold the superblock. + * This will be kept around at all times to optimize + * access to the superblock. + */ + sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); + extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; + + bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, + BTOBB(sector_size), extra_flags); + ASSERT(bp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + + /* + * Initialize the mount structure from the superblock. + * But first do some basic consistency checking. + */ + sbp = XFS_BUF_TO_SBP(bp); + xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, + ARCH_CONVERT, XFS_SB_ALL_BITS); + + error = xfs_mount_validate_sb(mp, &(mp->m_sb)); + if (error) { + cmn_err(CE_WARN, "XFS: SB validate failed"); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + return error; + } + + /* + * We must be able to do sector-sized and sector-aligned IO. + */ + if (sector_size > mp->m_sb.sb_sectsize) { + cmn_err(CE_WARN, + "XFS: device supports only %u byte sectors (not %u)", + sector_size, mp->m_sb.sb_sectsize); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + return XFS_ERROR(ENOSYS); + } + + /* + * If device sector size is smaller than the superblock size, + * re-read the superblock so the buffer is correctly sized. + */ + if (sector_size < mp->m_sb.sb_sectsize) { + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + sector_size = mp->m_sb.sb_sectsize; + bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, + BTOBB(sector_size), extra_flags); + ASSERT(bp); + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + } + + mp->m_sb_bp = bp; + xfs_buf_relse(bp); + ASSERT(XFS_BUF_VALUSEMA(bp) > 0); + return 0; +} + + +/* + * xfs_mount_common + * + * Mount initialization code establishing various mount + * fields from the superblock associated with the given + * mount structure + */ +void +xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) +{ + int i; + + mp->m_agfrotor = mp->m_agirotor = 0; + spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); + mp->m_maxagi = mp->m_sb.sb_agcount; + mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; + mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; + mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; + mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; + mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; + mp->m_litino = sbp->sb_inodesize - + ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t)); + mp->m_blockmask = sbp->sb_blocksize - 1; + mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; + mp->m_blockwmask = mp->m_blockwsize - 1; + INIT_LIST_HEAD(&mp->m_del_inodes); + + + if (XFS_SB_VERSION_HASLOGV2(sbp)) { + if (sbp->sb_logsunit <= 1) { + mp->m_lstripemask = 1; + } else { + mp->m_lstripemask = + 1 << xfs_highbit32(sbp->sb_logsunit >> BBSHIFT); + } + } + + /* + * Setup for attributes, in case they get created. + * This value is for inodes getting attributes for the first time, + * the per-inode value is for old attribute values. + */ + ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048); + switch (sbp->sb_inodesize) { + case 256: + mp->m_attroffset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(2); + break; + case 512: + case 1024: + case 2048: + mp->m_attroffset = XFS_BMDR_SPACE_CALC(12); + break; + default: + ASSERT(0); + } + ASSERT(mp->m_attroffset < XFS_LITINO(mp)); + + for (i = 0; i < 2; i++) { + mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_alloc, i == 0); + mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_alloc, i == 0); + } + for (i = 0; i < 2; i++) { + mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_bmbt, i == 0); + mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_bmbt, i == 0); + } + for (i = 0; i < 2; i++) { + mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, + xfs_inobt, i == 0); + mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, + xfs_inobt, i == 0); + } + + mp->m_bsize = XFS_FSB_TO_BB(mp, 1); + mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, + sbp->sb_inopblock); + mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; +} + +/* + * xfs_mountfs + * + * This function does the following on an initial mount of a file system: + * - reads the superblock from disk and init the mount struct + * - if we're a 32-bit kernel, do a size check on the superblock + * so we don't mount terabyte filesystems + * - init mount struct realtime fields + * - allocate inode hash table for fs + * - init directory manager + * - perform recovery and init the log manager + */ +int +xfs_mountfs( + vfs_t *vfsp, + xfs_mount_t *mp, + dev_t dev, + int mfsi_flags) +{ + xfs_buf_t *bp; + xfs_sb_t *sbp = &(mp->m_sb); + xfs_inode_t *rip; + vnode_t *rvp = 0; + int readio_log, writeio_log; + vmap_t vmap; + xfs_daddr_t d; + __uint64_t ret64; + __int64_t update_flags; + uint quotamount, quotaflags; + int agno, noio; + int uuid_mounted = 0; + int error = 0; + + noio = dev == 0 && mp->m_sb_bp != NULL; + if (mp->m_sb_bp == NULL) { + if ((error = xfs_readsb(mp))) { + return (error); + } + } + xfs_mount_common(mp, sbp); + + /* + * Check if sb_agblocks is aligned at stripe boundary + * If sb_agblocks is NOT aligned turn off m_dalign since + * allocator alignment is within an ag, therefore ag has + * to be aligned at stripe boundary. + */ + update_flags = 0LL; + if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { + /* + * If stripe unit and stripe width are not multiples + * of the fs blocksize turn off alignment. + */ + if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || + (BBTOB(mp->m_swidth) & mp->m_blockmask)) { + if (mp->m_flags & XFS_MOUNT_RETERR) { + cmn_err(CE_WARN, + "XFS: alignment check 1 failed"); + error = XFS_ERROR(EINVAL); + goto error1; + } + } else { + /* + * Convert the stripe unit and width to FSBs. + */ + mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); + if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { + if (mp->m_flags & XFS_MOUNT_RETERR) { + error = XFS_ERROR(EINVAL); + goto error1; + } + mp->m_dalign = 0; + mp->m_swidth = 0; + } else if (mp->m_dalign) { + mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); + } else { + if (mp->m_flags & XFS_MOUNT_RETERR) { + cmn_err(CE_WARN, + "XFS: alignment check 3 failed"); + error = XFS_ERROR(EINVAL); + goto error1; + } + mp->m_swidth = 0; + } + } + + /* + * Update superblock with new values + * and log changes + */ + if (XFS_SB_VERSION_HASDALIGN(sbp)) { + if (sbp->sb_unit != mp->m_dalign) { + sbp->sb_unit = mp->m_dalign; + update_flags |= XFS_SB_UNIT; + } + if (sbp->sb_width != mp->m_swidth) { + sbp->sb_width = mp->m_swidth; + update_flags |= XFS_SB_WIDTH; + } + } + } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && + XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) { + mp->m_dalign = sbp->sb_unit; + mp->m_swidth = sbp->sb_width; + } + + xfs_alloc_compute_maxlevels(mp); + xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); + xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); + xfs_ialloc_compute_maxlevels(mp); + + if (sbp->sb_imax_pct) { + __uint64_t icount; + + /* Make sure the maximum inode count is a multiple of the + * units we allocate inodes in. + */ + + icount = sbp->sb_dblocks * sbp->sb_imax_pct; + do_div(icount, 100); + do_div(icount, mp->m_ialloc_blks); + mp->m_maxicount = (icount * mp->m_ialloc_blks) << + sbp->sb_inopblog; + } else + mp->m_maxicount = 0; + + /* + * XFS uses the uuid from the superblock as the unique + * identifier for fsid. We can not use the uuid from the volume + * since a single partition filesystem is identical to a single + * partition volume/filesystem. + */ + if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && + (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { + if (xfs_uuid_mount(mp)) { + error = XFS_ERROR(EINVAL); + goto error1; + } + uuid_mounted=1; + ret64 = uuid_hash64(&sbp->sb_uuid); + memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64)); + } + + /* + * Set the default minimum read and write sizes unless + * already specified in a mount option. + * We use smaller I/O sizes when the file system + * is being used for NFS service (wsync mount option). + */ + if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { + if (mp->m_flags & XFS_MOUNT_WSYNC) { + readio_log = XFS_WSYNC_READIO_LOG; + writeio_log = XFS_WSYNC_WRITEIO_LOG; + } else { + readio_log = XFS_READIO_LOG_LARGE; + writeio_log = XFS_WRITEIO_LOG_LARGE; + } + } else { + readio_log = mp->m_readio_log; + writeio_log = mp->m_writeio_log; + } + + /* + * Set the number of readahead buffers to use based on + * physical memory size. + */ + if (xfs_physmem <= 4096) /* <= 16MB */ + mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB; + else if (xfs_physmem <= 8192) /* <= 32MB */ + mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB; + else + mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32; + if (sbp->sb_blocklog > readio_log) { + mp->m_readio_log = sbp->sb_blocklog; + } else { + mp->m_readio_log = readio_log; + } + mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); + if (sbp->sb_blocklog > writeio_log) { + mp->m_writeio_log = sbp->sb_blocklog; + } else { + mp->m_writeio_log = writeio_log; + } + mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); + + /* + * Set the inode cluster size based on the physical memory + * size. This may still be overridden by the file system + * block size if it is larger than the chosen cluster size. + */ + if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */ + mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE; + } else { + mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; + } + /* + * Set whether we're using inode alignment. + */ + if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && + mp->m_sb.sb_inoalignmt >= + XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) + mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; + else + mp->m_inoalign_mask = 0; + /* + * If we are using stripe alignment, check whether + * the stripe unit is a multiple of the inode alignment + */ + if (mp->m_dalign && mp->m_inoalign_mask && + !(mp->m_dalign & mp->m_inoalign_mask)) + mp->m_sinoalign = mp->m_dalign; + else + mp->m_sinoalign = 0; + /* + * Check that the data (and log if separate) are an ok size. + */ + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { + cmn_err(CE_WARN, "XFS: size check 1 failed"); + error = XFS_ERROR(E2BIG); + goto error1; + } + if (!noio) { + error = xfs_read_buf(mp, mp->m_ddev_targp, + d - XFS_FSS_TO_BB(mp, 1), + XFS_FSS_TO_BB(mp, 1), 0, &bp); + if (!error) { + xfs_buf_relse(bp); + } else { + cmn_err(CE_WARN, "XFS: size check 2 failed"); + if (error == ENOSPC) { + error = XFS_ERROR(E2BIG); + } + goto error1; + } + } + + if (!noio && ((mfsi_flags & XFS_MFSI_CLIENT) == 0) && + mp->m_logdev_targp != mp->m_ddev_targp) { + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { + cmn_err(CE_WARN, "XFS: size check 3 failed"); + error = XFS_ERROR(E2BIG); + goto error1; + } + error = xfs_read_buf(mp, mp->m_logdev_targp, + d - XFS_FSB_TO_BB(mp, 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (!error) { + xfs_buf_relse(bp); + } else { + cmn_err(CE_WARN, "XFS: size check 3 failed"); + if (error == ENOSPC) { + error = XFS_ERROR(E2BIG); + } + goto error1; + } + } + + /* + * Initialize realtime fields in the mount structure + */ + if ((error = xfs_rtmount_init(mp))) { + cmn_err(CE_WARN, "XFS: RT mount failed"); + goto error1; + } + + /* + * For client case we are done now + */ + if (mfsi_flags & XFS_MFSI_CLIENT) { + return(0); + } + + /* + * Copies the low order bits of the timestamp and the randomly + * set "sequence" number out of a UUID. + */ + uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); + + /* + * The vfs structure needs to have a file system independent + * way of checking for the invariant file system ID. Since it + * can't look at mount structures it has a pointer to the data + * in the mount structure. + * + * File systems that don't support user level file handles (i.e. + * all of them except for XFS) will leave vfs_altfsid as NULL. + */ + vfsp->vfs_altfsid = (fsid_t *)mp->m_fixedfsid; + mp->m_dmevmask = 0; /* not persistent; set after each mount */ + + /* + * Select the right directory manager. + */ + mp->m_dirops = + XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ? + xfsv2_dirops : + xfsv1_dirops; + + /* + * Initialize directory manager's entries. + */ + XFS_DIR_MOUNT(mp); + + /* + * Initialize the attribute manager's entries. + */ + mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; + + /* + * Initialize the precomputed transaction reservations values. + */ + xfs_trans_init(mp); + if (noio) { + ASSERT((mfsi_flags & XFS_MFSI_CLIENT) == 0); + return 0; + } + + /* + * Allocate and initialize the inode hash table for this + * file system. + */ + xfs_ihash_init(mp); + xfs_chash_init(mp); + + /* + * Allocate and initialize the per-ag data. + */ + init_rwsem(&mp->m_peraglock); + mp->m_perag = + kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); + + xfs_initialize_perag(mp, sbp->sb_agcount); + + /* + * log's mount-time initialization. Perform 1st part recovery if needed + */ + if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */ + error = xfs_log_mount(mp, mp->m_logdev_targp->pbr_dev, + XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), + XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); + if (error) { + cmn_err(CE_WARN, "XFS: log mount failed"); + goto error2; + } + } else { /* No log has been defined */ + cmn_err(CE_WARN, "XFS: no log defined"); + XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp); + error = XFS_ERROR(EFSCORRUPTED); + goto error2; + } + + /* + * Get and sanity-check the root inode. + * Save the pointer to it in the mount structure. + */ + error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_ILOCK_EXCL, &rip, 0); + if (error) { + cmn_err(CE_WARN, "XFS: failed to read root inode"); + goto error3; + } + + ASSERT(rip != NULL); + rvp = XFS_ITOV(rip); + VMAP(rvp, vmap); + + if (unlikely((rip->i_d.di_mode & IFMT) != IFDIR)) { + cmn_err(CE_WARN, "XFS: corrupted root inode"); + prdev("Root inode %llu is not a directory", + mp->m_dev, (unsigned long long)rip->i_ino); + xfs_iunlock(rip, XFS_ILOCK_EXCL); + XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, + mp); + error = XFS_ERROR(EFSCORRUPTED); + goto error4; + } + mp->m_rootip = rip; /* save it */ + + xfs_iunlock(rip, XFS_ILOCK_EXCL); + + /* + * Initialize realtime inode pointers in the mount structure + */ + if ((error = xfs_rtmount_inodes(mp))) { + /* + * Free up the root inode. + */ + cmn_err(CE_WARN, "XFS: failed to read RT inodes"); + goto error4; + } + + /* + * If fs is not mounted readonly, then update the superblock + * unit and width changes. + */ + if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY)) + xfs_mount_log_sbunit(mp, update_flags); + + /* + * Initialise the XFS quota management subsystem for this mount + */ + if ((error = XFS_QM_INIT(mp, "amount, "aflags))) + goto error4; + + /* + * Finish recovering the file system. This part needed to be + * delayed until after the root and real-time bitmap inodes + * were consistently read in. + */ + error = xfs_log_mount_finish(mp, mfsi_flags); + if (error) { + cmn_err(CE_WARN, "XFS: log mount finish failed"); + goto error4; + } + + /* + * Complete the quota initialisation, post-log-replay component. + */ + if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags))) + goto error4; + + return 0; + + error4: + /* + * Free up the root inode. + */ + VN_RELE(rvp); + vn_purge(rvp, &vmap); + error3: + xfs_log_unmount_dealloc(mp); + error2: + xfs_ihash_free(mp); + xfs_chash_free(mp); + for (agno = 0; agno < sbp->sb_agcount; agno++) + if (mp->m_perag[agno].pagb_list) + kmem_free(mp->m_perag[agno].pagb_list, + sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS); + kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t)); + mp->m_perag = NULL; + /* FALLTHROUGH */ + error1: + if (uuid_mounted) + xfs_uuid_unmount(mp); + xfs_freesb(mp); + return error; +} + +/* + * xfs_unmountfs + * + * This flushes out the inodes,dquots and the superblock, unmounts the + * log and makes sure that incore structures are freed. + */ +int +xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) +{ + struct vfs *vfsp = XFS_MTOVFS(mp); +#if defined(DEBUG) || defined(INDUCE_IO_ERROR) + int64_t fsid; +#endif + + xfs_iflush_all(mp, XFS_FLUSH_ALL); + + XFS_QM_DQPURGEALL(mp, + XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING); + + /* + * Flush out the log synchronously so that we know for sure + * that nothing is pinned. This is important because bflush() + * will skip pinned buffers. + */ + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + + xfs_binval(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + xfs_binval(mp->m_rtdev_targp); + } + + xfs_unmountfs_writesb(mp); + + xfs_log_unmount(mp); /* Done! No more fs ops. */ + + xfs_freesb(mp); + + /* + * All inodes from this mount point should be freed. + */ + ASSERT(mp->m_inodes == NULL); + + /* + * We may have bufs that are in the process of getting written still. + * We must wait for the I/O completion of those. The sync flag here + * does a two pass iteration thru the bufcache. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + xfs_incore_relse(mp->m_ddev_targp, 0, 1); /* synchronous */ + } + + xfs_unmountfs_close(mp, cr); + if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) + xfs_uuid_unmount(mp); + +#if defined(DEBUG) || defined(INDUCE_IO_ERROR) + /* + * clear all error tags on this filesystem + */ + memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t)); + xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0); +#endif + XFS_IODONE(vfsp); + xfs_mount_free(mp, 1); + return 0; +} + +void +xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr) +{ + int have_logdev = (mp->m_logdev_targp != mp->m_ddev_targp); + + if (mp->m_ddev_targp) { + xfs_free_buftarg(mp->m_ddev_targp); + mp->m_ddev_targp = NULL; + } + if (mp->m_rtdev_targp) { + xfs_blkdev_put(mp->m_rtdev_targp->pbr_bdev); + xfs_free_buftarg(mp->m_rtdev_targp); + mp->m_rtdev_targp = NULL; + } + if (mp->m_logdev_targp && have_logdev) { + xfs_blkdev_put(mp->m_logdev_targp->pbr_bdev); + xfs_free_buftarg(mp->m_logdev_targp); + mp->m_logdev_targp = NULL; + } +} + +int +xfs_unmountfs_writesb(xfs_mount_t *mp) +{ + xfs_buf_t *sbp; + xfs_sb_t *sb; + int error = 0; + + /* + * skip superblock write if fs is read-only, or + * if we are doing a forced umount. + */ + sbp = xfs_getsb(mp, 0); + if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || + XFS_FORCED_SHUTDOWN(mp))) { + /* + * mark shared-readonly if desired + */ + sb = XFS_BUF_TO_SBP(sbp); + if (mp->m_mk_sharedro) { + if (!(sb->sb_flags & XFS_SBF_READONLY)) + sb->sb_flags |= XFS_SBF_READONLY; + if (!XFS_SB_VERSION_HASSHARED(sb)) + XFS_SB_VERSION_ADDSHARED(sb); + xfs_fs_cmn_err(CE_NOTE, mp, + "Unmounting, marking shared read-only"); + } + XFS_BUF_UNDONE(sbp); + XFS_BUF_UNREAD(sbp); + XFS_BUF_UNDELAYWRITE(sbp); + XFS_BUF_WRITE(sbp); + XFS_BUF_UNASYNC(sbp); + ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); + xfsbdstrat(mp, sbp); + /* Nevermind errors we might get here. */ + error = xfs_iowait(sbp); + if (error) + xfs_ioerror_alert("xfs_unmountfs_writesb", + mp, sbp, XFS_BUF_ADDR(sbp)); + if (error && mp->m_mk_sharedro) + xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); + } + xfs_buf_relse(sbp); + return (error); +} + +/* + * xfs_mod_sb() can be used to copy arbitrary changes to the + * in-core superblock into the superblock buffer to be logged. + * It does not provide the higher level of locking that is + * needed to protect the in-core superblock from concurrent + * access. + */ +void +xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) +{ + xfs_buf_t *bp; + int first; + int last; + xfs_mount_t *mp; + xfs_sb_t *sbp; + xfs_sb_field_t f; + + ASSERT(fields); + if (!fields) + return; + mp = tp->t_mountp; + bp = xfs_trans_getsb(tp, mp, 0); + sbp = XFS_BUF_TO_SBP(bp); + first = sizeof(xfs_sb_t); + last = 0; + + /* translate/copy */ + + xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, ARCH_CONVERT, fields); + + /* find modified range */ + + f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); + ASSERT((1LL << f) & XFS_SB_MOD_BITS); + first = xfs_sb_info[f].offset; + + f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); + ASSERT((1LL << f) & XFS_SB_MOD_BITS); + last = xfs_sb_info[f + 1].offset - 1; + + xfs_trans_log_buf(tp, bp, first, last); +} + +/* + * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply + * a delta to a specified field in the in-core superblock. Simply + * switch on the field indicated and apply the delta to that field. + * Fields are not allowed to dip below zero, so if the delta would + * do this do not apply it and return EINVAL. + * + * The SB_LOCK must be held when this routine is called. + */ +STATIC int +xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, + int delta, int rsvd) +{ + int scounter; /* short counter for 32 bit fields */ + long long lcounter; /* long counter for 64 bit fields */ + long long res_used, rem; + + /* + * With the in-core superblock spin lock held, switch + * on the indicated field. Apply the delta to the + * proper field. If the fields value would dip below + * 0, then do not apply the delta and return EINVAL. + */ + switch (field) { + case XFS_SBS_ICOUNT: + lcounter = (long long)mp->m_sb.sb_icount; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_icount = lcounter; + return (0); + case XFS_SBS_IFREE: + lcounter = (long long)mp->m_sb.sb_ifree; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_ifree = lcounter; + return (0); + case XFS_SBS_FDBLOCKS: + + lcounter = (long long)mp->m_sb.sb_fdblocks; + res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); + + if (delta > 0) { /* Putting blocks back */ + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + rem = delta - res_used; + mp->m_resblks_avail = mp->m_resblks; + lcounter += rem; + } + } else { /* Taking blocks away */ + + lcounter += delta; + + /* + * If were out of blocks, use any available reserved blocks if + * were allowed to. + */ + + if (lcounter < 0) { + if (rsvd) { + lcounter = (long long)mp->m_resblks_avail + delta; + if (lcounter < 0) { + return (XFS_ERROR(ENOSPC)); + } + mp->m_resblks_avail = lcounter; + return (0); + } else { /* not reserved */ + return (XFS_ERROR(ENOSPC)); + } + } + } + + mp->m_sb.sb_fdblocks = lcounter; + return (0); + case XFS_SBS_FREXTENTS: + lcounter = (long long)mp->m_sb.sb_frextents; + lcounter += delta; + if (lcounter < 0) { + return (XFS_ERROR(ENOSPC)); + } + mp->m_sb.sb_frextents = lcounter; + return (0); + case XFS_SBS_DBLOCKS: + lcounter = (long long)mp->m_sb.sb_dblocks; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_dblocks = lcounter; + return (0); + case XFS_SBS_AGCOUNT: + scounter = mp->m_sb.sb_agcount; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_agcount = scounter; + return (0); + case XFS_SBS_IMAX_PCT: + scounter = mp->m_sb.sb_imax_pct; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_imax_pct = scounter; + return (0); + case XFS_SBS_REXTSIZE: + scounter = mp->m_sb.sb_rextsize; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextsize = scounter; + return (0); + case XFS_SBS_RBMBLOCKS: + scounter = mp->m_sb.sb_rbmblocks; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rbmblocks = scounter; + return (0); + case XFS_SBS_RBLOCKS: + lcounter = (long long)mp->m_sb.sb_rblocks; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rblocks = lcounter; + return (0); + case XFS_SBS_REXTENTS: + lcounter = (long long)mp->m_sb.sb_rextents; + lcounter += delta; + if (lcounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextents = lcounter; + return (0); + case XFS_SBS_REXTSLOG: + scounter = mp->m_sb.sb_rextslog; + scounter += delta; + if (scounter < 0) { + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } + mp->m_sb.sb_rextslog = scounter; + return (0); + default: + ASSERT(0); + return (XFS_ERROR(EINVAL)); + } +} + +/* + * xfs_mod_incore_sb() is used to change a field in the in-core + * superblock structure by the specified delta. This modification + * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() + * routine to do the work. + */ +int +xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) +{ + unsigned long s; + int status; + + s = XFS_SB_LOCK(mp); + status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + XFS_SB_UNLOCK(mp, s); + return (status); +} + +/* + * xfs_mod_incore_sb_batch() is used to change more than one field + * in the in-core superblock structure at a time. This modification + * is protected by a lock internal to this module. The fields and + * changes to those fields are specified in the array of xfs_mod_sb + * structures passed in. + * + * Either all of the specified deltas will be applied or none of + * them will. If any modified field dips below 0, then all modifications + * will be backed out and EINVAL will be returned. + */ +int +xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) +{ + unsigned long s; + int status=0; + xfs_mod_sb_t *msbp; + + /* + * Loop through the array of mod structures and apply each + * individually. If any fail, then back out all those + * which have already been applied. Do all of this within + * the scope of the SB_LOCK so that all of the changes will + * be atomic. + */ + s = XFS_SB_LOCK(mp); + msbp = &msb[0]; + for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { + /* + * Apply the delta at index n. If it fails, break + * from the loop so we'll fall into the undo loop + * below. + */ + status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, + msbp->msb_delta, rsvd); + if (status != 0) { + break; + } + } + + /* + * If we didn't complete the loop above, then back out + * any changes made to the superblock. If you add code + * between the loop above and here, make sure that you + * preserve the value of status. Loop back until + * we step below the beginning of the array. Make sure + * we don't touch anything back there. + */ + if (status != 0) { + msbp--; + while (msbp >= msb) { + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, -(msbp->msb_delta), rsvd); + ASSERT(status == 0); + msbp--; + } + } + XFS_SB_UNLOCK(mp, s); + return (status); +} + +/* + * xfs_getsb() is called to obtain the buffer for the superblock. + * The buffer is returned locked and read in from disk. + * The buffer should be released with a call to xfs_brelse(). + * + * If the flags parameter is BUF_TRYLOCK, then we'll only return + * the superblock buffer if it can be locked without sleeping. + * If it can't then we'll return NULL. + */ +xfs_buf_t * +xfs_getsb( + xfs_mount_t *mp, + int flags) +{ + xfs_buf_t *bp; + + ASSERT(mp->m_sb_bp != NULL); + bp = mp->m_sb_bp; + if (flags & XFS_BUF_TRYLOCK) { + if (!XFS_BUF_CPSEMA(bp)) { + return NULL; + } + } else { + XFS_BUF_PSEMA(bp, PRIBIO); + } + XFS_BUF_HOLD(bp); + ASSERT(XFS_BUF_ISDONE(bp)); + return (bp); +} + +/* + * Used to free the superblock along various error paths. + */ +void +xfs_freesb( + xfs_mount_t *mp) +{ + xfs_buf_t *bp; + + /* + * Use xfs_getsb() so that the buffer will be locked + * when we call xfs_buf_relse(). + */ + bp = xfs_getsb(mp, 0); + XFS_BUF_UNMANAGE(bp); + xfs_buf_relse(bp); + mp->m_sb_bp = NULL; +} + +/* + * See if the UUID is unique among mounted XFS filesystems. + * Mount fails if UUID is nil or a FS with the same UUID is already mounted. + */ +STATIC int +xfs_uuid_mount( + xfs_mount_t *mp) +{ + if (uuid_is_nil(&mp->m_sb.sb_uuid)) { + cmn_err(CE_WARN, + "XFS: Filesystem %s has nil UUID - can't mount", + mp->m_fsname); + return -1; + } + if (!uuid_table_insert(&mp->m_sb.sb_uuid)) { + cmn_err(CE_WARN, + "XFS: Filesystem %s has duplicate UUID - can't mount", + mp->m_fsname); + return -1; + } + return 0; +} + +/* + * Remove filesystem from the UUID table. + */ +STATIC void +xfs_uuid_unmount( + xfs_mount_t *mp) +{ + uuid_table_remove(&mp->m_sb.sb_uuid); +} + +/* + * Used to log changes to the superblock unit and width fields which could + * be altered by the mount options. Only the first superblock is updated. + */ +STATIC void +xfs_mount_log_sbunit( + xfs_mount_t *mp, + __int64_t fields) +{ + xfs_trans_t *tp; + + ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID)); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); + if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, + XFS_DEFAULT_LOG_COUNT)) { + xfs_trans_cancel(tp, 0); + return; + } + xfs_mod_sb(tp, fields); + xfs_trans_commit(tp, 0, NULL); +} + +/* Functions to lock access out of the filesystem for forced + * shutdown or snapshot. + */ + +void +xfs_start_freeze( + xfs_mount_t *mp, + int level) +{ + unsigned long s = mutex_spinlock(&mp->m_freeze_lock); + + mp->m_frozen = level; + mutex_spinunlock(&mp->m_freeze_lock, s); + + if (level == XFS_FREEZE_TRANS) { + while (atomic_read(&mp->m_active_trans) > 0) + delay(100); + } +} + +void +xfs_finish_freeze( + xfs_mount_t *mp) +{ + unsigned long s = mutex_spinlock(&mp->m_freeze_lock); + + if (mp->m_frozen) { + mp->m_frozen = 0; + sv_broadcast(&mp->m_wait_unfreeze); + } + + mutex_spinunlock(&mp->m_freeze_lock, s); +} + +void +xfs_check_frozen( + xfs_mount_t *mp, + bhv_desc_t *bdp, + int level) +{ + unsigned long s; + + if (mp->m_frozen) { + s = mutex_spinlock(&mp->m_freeze_lock); + + if (mp->m_frozen < level) { + mutex_spinunlock(&mp->m_freeze_lock, s); + } else { + sv_wait(&mp->m_wait_unfreeze, 0, &mp->m_freeze_lock, s); + } + } + + if (level == XFS_FREEZE_TRANS) + atomic_inc(&mp->m_active_trans); +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_mount.h linux.22-ac2/fs/xfs/xfs_mount.h --- linux.vanilla/fs/xfs/xfs_mount.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_mount.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,582 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_MOUNT_H__ +#define __XFS_MOUNT_H__ + + +typedef struct xfs_trans_reservations { + uint tr_write; /* extent alloc trans */ + uint tr_itruncate; /* truncate trans */ + uint tr_rename; /* rename trans */ + uint tr_link; /* link trans */ + uint tr_remove; /* unlink trans */ + uint tr_symlink; /* symlink trans */ + uint tr_create; /* create trans */ + uint tr_mkdir; /* mkdir trans */ + uint tr_ifree; /* inode free trans */ + uint tr_ichange; /* inode update trans */ + uint tr_growdata; /* fs data section grow trans */ + uint tr_swrite; /* sync write inode trans */ + uint tr_addafork; /* cvt inode to attributed trans */ + uint tr_writeid; /* write setuid/setgid file */ + uint tr_attrinval; /* attr fork buffer invalidation */ + uint tr_attrset; /* set/create an attribute */ + uint tr_attrrm; /* remove an attribute */ + uint tr_clearagi; /* clear bad agi unlinked ino bucket */ + uint tr_growrtalloc; /* grow realtime allocations */ + uint tr_growrtzero; /* grow realtime zeroing */ + uint tr_growrtfree; /* grow realtime freeing */ +} xfs_trans_reservations_t; + + +#ifndef __KERNEL__ +/* + * Moved here from xfs_ag.h to avoid reordering header files + */ +#define XFS_DADDR_TO_AGNO(mp,d) \ + ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) +#define XFS_DADDR_TO_AGBNO(mp,d) \ + ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) +#else +struct cred; +struct vfs; +struct vnode; +struct xfs_mount_args; +struct xfs_ihash; +struct xfs_chash; +struct xfs_inode; +struct xfs_perag; +struct xfs_iocore; +struct xfs_bmbt_irec; +struct xfs_bmap_free; + +#define SPLDECL(s) unsigned long s +#define AIL_LOCK_T lock_t +#define AIL_LOCKINIT(x,y) spinlock_init(x,y) +#define AIL_LOCK_DESTROY(x) spinlock_destroy(x) +#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock) +#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s) + + +/* + * Prototypes and functions for the Data Migration subsystem. + */ + +typedef int (*xfs_send_data_t)(int, struct bhv_desc *, + xfs_off_t, size_t, int, vrwlock_t *); +typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); +typedef int (*xfs_send_destroy_t)(struct bhv_desc *, dm_right_t); +typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct bhv_desc *, + dm_right_t, struct bhv_desc *, dm_right_t, + char *, char *, mode_t, int, int); +typedef void (*xfs_send_unmount_t)(struct vfs *, struct vnode *, + dm_right_t, mode_t, int, int); + +typedef struct xfs_dmops { + xfs_send_data_t xfs_send_data; + xfs_send_mmap_t xfs_send_mmap; + xfs_send_destroy_t xfs_send_destroy; + xfs_send_namesp_t xfs_send_namesp; + xfs_send_unmount_t xfs_send_unmount; +} xfs_dmops_t; + +#define XFS_SEND_DATA(mp, ev,bdp,off,len,fl,lock) \ + (*(mp)->m_dm_ops.xfs_send_data)(ev,bdp,off,len,fl,lock) +#define XFS_SEND_MMAP(mp, vma,fl) \ + (*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl) +#define XFS_SEND_DESTROY(mp, bdp,right) \ + (*(mp)->m_dm_ops.xfs_send_destroy)(bdp,right) +#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ + (*(mp)->m_dm_ops.xfs_send_namesp)(ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) +#define XFS_SEND_UNMOUNT(mp, vfsp,vp,right,mode,rval,fl) \ + (*(mp)->m_dm_ops.xfs_send_unmount)(vfsp,vp,right,mode,rval,fl) + + +/* + * Prototypes and functions for the Quota Management subsystem. + */ + +struct xfs_dquot; +struct xfs_dqtrxops; +struct xfs_quotainfo; + +typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); +typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); +typedef int (*xfs_qmunmount_t)(struct xfs_mount *); +typedef void (*xfs_qmdone_t)(struct xfs_mount *); +typedef void (*xfs_dqrele_t)(struct xfs_dquot *); +typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint); +typedef void (*xfs_dqdetach_t)(struct xfs_inode *); +typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint); +typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *, + struct xfs_inode *, uid_t, gid_t, uint, + struct xfs_dquot **, struct xfs_dquot **); +typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot *, struct xfs_dquot *); +typedef int (*xfs_dqvoprename_t)(struct xfs_inode **); +typedef struct xfs_dquot * (*xfs_dqvopchown_t)( + struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot **, struct xfs_dquot *); +typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *, + struct xfs_dquot *, struct xfs_dquot *, uint); + +typedef struct xfs_qmops { + xfs_qminit_t xfs_qminit; + xfs_qmdone_t xfs_qmdone; + xfs_qmmount_t xfs_qmmount; + xfs_qmunmount_t xfs_qmunmount; + xfs_dqrele_t xfs_dqrele; + xfs_dqattach_t xfs_dqattach; + xfs_dqdetach_t xfs_dqdetach; + xfs_dqpurgeall_t xfs_dqpurgeall; + xfs_dqvopalloc_t xfs_dqvopalloc; + xfs_dqvopcreate_t xfs_dqvopcreate; + xfs_dqvoprename_t xfs_dqvoprename; + xfs_dqvopchown_t xfs_dqvopchown; + xfs_dqvopchownresv_t xfs_dqvopchownresv; + struct xfs_dqtrxops *xfs_dqtrxops; +} xfs_qmops_t; + +#define XFS_QM_INIT(mp, mnt, fl) \ + (*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl) +#define XFS_QM_MOUNT(mp, mnt, fl) \ + (*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl) +#define XFS_QM_UNMOUNT(mp) \ + (*(mp)->m_qm_ops.xfs_qmunmount)(mp) +#define XFS_QM_DONE(mp) \ + (*(mp)->m_qm_ops.xfs_qmdone)(mp) +#define XFS_QM_DQRELE(mp, dq) \ + (*(mp)->m_qm_ops.xfs_dqrele)(dq) +#define XFS_QM_DQATTACH(mp, ip, fl) \ + (*(mp)->m_qm_ops.xfs_dqattach)(ip, fl) +#define XFS_QM_DQDETACH(mp, ip) \ + (*(mp)->m_qm_ops.xfs_dqdetach)(ip) +#define XFS_QM_DQPURGEALL(mp, fl) \ + (*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl) +#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \ + (*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2) +#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \ + (*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2) +#define XFS_QM_DQVOPRENAME(mp, ip) \ + (*(mp)->m_qm_ops.xfs_dqvoprename)(ip) +#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \ + (*(mp)->m_qm_ops.xfs_dqvopchown)(tp, ip, dqp, dq) +#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \ + (*(mp)->m_qm_ops.xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl) + + +/* + * Prototypes and functions for I/O core modularization. + */ + +typedef int (*xfs_ioinit_t)(struct vfs *, + struct xfs_mount_args *, int); +typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *, + xfs_fileoff_t, xfs_filblks_t, int, + xfs_fsblock_t *, xfs_extlen_t, + struct xfs_bmbt_irec *, int *, + struct xfs_bmap_free *); +typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *); +typedef int (*xfs_iomap_write_direct_t)( + void *, loff_t, size_t, int, + struct xfs_bmbt_irec *, int *, int); +typedef int (*xfs_iomap_write_delay_t)( + void *, loff_t, size_t, int, + struct xfs_bmbt_irec *, int *); +typedef int (*xfs_iomap_write_allocate_t)( + void *, struct xfs_bmbt_irec *, int *); +typedef int (*xfs_iomap_write_unwritten_t)( + void *, loff_t, size_t); +typedef uint (*xfs_lck_map_shared_t)(void *); +typedef void (*xfs_lock_t)(void *, uint); +typedef void (*xfs_lock_demote_t)(void *, uint); +typedef int (*xfs_lock_nowait_t)(void *, uint); +typedef void (*xfs_unlk_t)(void *, unsigned int); +typedef xfs_fsize_t (*xfs_size_t)(void *); +typedef xfs_fsize_t (*xfs_iodone_t)(struct vfs *); + +typedef struct xfs_ioops { + xfs_ioinit_t xfs_ioinit; + xfs_bmapi_t xfs_bmapi_func; + xfs_bmap_eof_t xfs_bmap_eof_func; + xfs_iomap_write_direct_t xfs_iomap_write_direct; + xfs_iomap_write_delay_t xfs_iomap_write_delay; + xfs_iomap_write_allocate_t xfs_iomap_write_allocate; + xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten; + xfs_lock_t xfs_ilock; + xfs_lck_map_shared_t xfs_lck_map_shared; + xfs_lock_demote_t xfs_ilock_demote; + xfs_lock_nowait_t xfs_ilock_nowait; + xfs_unlk_t xfs_unlock; + xfs_size_t xfs_size_func; + xfs_iodone_t xfs_iodone; +} xfs_ioops_t; + +#define XFS_IOINIT(vfsp, args, flags) \ + (*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags) +#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \ + (*(mp)->m_io_ops.xfs_bmapi_func) \ + (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist) +#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \ + (*(mp)->m_io_ops.xfs_bmap_eof_func) \ + ((io)->io_obj, endoff, whichfork, eof) +#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\ + (*(mp)->m_io_ops.xfs_iomap_write_direct) \ + ((io)->io_obj, offset, count, flags, mval, nmap, found) +#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \ + (*(mp)->m_io_ops.xfs_iomap_write_delay) \ + ((io)->io_obj, offset, count, flags, mval, nmap) +#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \ + (*(mp)->m_io_ops.xfs_iomap_write_allocate) \ + ((io)->io_obj, mval, nmap) +#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \ + (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \ + ((io)->io_obj, offset, count) +#define XFS_LCK_MAP_SHARED(mp, io) \ + (*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj) +#define XFS_ILOCK(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode) +#define XFS_ILOCK_NOWAIT(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode) +#define XFS_IUNLOCK(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode) +#define XFS_ILOCK_DEMOTE(mp, io, mode) \ + (*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode) +#define XFS_SIZE(mp, io) \ + (*(mp)->m_io_ops.xfs_size_func)((io)->io_obj) +#define XFS_IODONE(vfsp) \ + (*(mp)->m_io_ops.xfs_iodone)(vfsp) + + +typedef struct xfs_mount { + bhv_desc_t m_bhv; /* vfs xfs behavior */ + xfs_tid_t m_tid; /* next unused tid for fs */ + AIL_LOCK_T m_ail_lock; /* fs AIL mutex */ + xfs_ail_entry_t m_ail; /* fs active log item list */ + uint m_ail_gen; /* fs AIL generation count */ + xfs_sb_t m_sb; /* copy of fs superblock */ + lock_t m_sb_lock; /* sb counter mutex */ + struct xfs_buf *m_sb_bp; /* buffer for superblock */ + char *m_fsname; /* filesystem name */ + int m_fsname_len; /* strlen of fs name */ + int m_bsize; /* fs logical block size */ + xfs_agnumber_t m_agfrotor; /* last ag where space found */ + xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ + lock_t m_agirotor_lock;/* .. and lock protecting it */ + xfs_agnumber_t m_maxagi; /* highest inode alloc group */ + int m_ihsize; /* size of next field */ + struct xfs_ihash *m_ihash; /* fs private inode hash table*/ + struct xfs_inode *m_inodes; /* active inode list */ + struct list_head m_del_inodes; /* inodes to reclaim */ + mutex_t m_ilock; /* inode list mutex */ + uint m_ireclaims; /* count of calls to reclaim*/ + uint m_readio_log; /* min read size log bytes */ + uint m_readio_blocks; /* min read size blocks */ + uint m_writeio_log; /* min write size log bytes */ + uint m_writeio_blocks; /* min write size blocks */ + void *m_log; /* log specific stuff */ + int m_logbufs; /* number of log buffers */ + int m_logbsize; /* size of each log buffer */ + uint m_rsumlevels; /* rt summary levels */ + uint m_rsumsize; /* size of rt summary, bytes */ + struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ + struct xfs_inode *m_rsumip; /* pointer to summary inode */ + struct xfs_inode *m_rootip; /* pointer to root directory */ + struct xfs_quotainfo *m_quotainfo; /* disk quota information */ + xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ + xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ + xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ +#define m_dev m_ddev_targp->pbr_dev + __uint8_t m_dircook_elog; /* log d-cookie entry bits */ + __uint8_t m_blkbit_log; /* blocklog + NBBY */ + __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ + __uint8_t m_agno_log; /* log #ag's */ + __uint8_t m_agino_log; /* #bits for agino in inum */ + __uint8_t m_nreadaheads; /* #readahead buffers */ + __uint16_t m_inode_cluster_size;/* min inode buf size */ + uint m_blockmask; /* sb_blocksize-1 */ + uint m_blockwsize; /* sb_blocksize in words */ + uint m_blockwmask; /* blockwsize-1 */ + uint m_alloc_mxr[2]; /* XFS_ALLOC_BLOCK_MAXRECS */ + uint m_alloc_mnr[2]; /* XFS_ALLOC_BLOCK_MINRECS */ + uint m_bmap_dmxr[2]; /* XFS_BMAP_BLOCK_DMAXRECS */ + uint m_bmap_dmnr[2]; /* XFS_BMAP_BLOCK_DMINRECS */ + uint m_inobt_mxr[2]; /* XFS_INOBT_BLOCK_MAXRECS */ + uint m_inobt_mnr[2]; /* XFS_INOBT_BLOCK_MINRECS */ + uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ + uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ + uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ + struct xfs_perag *m_perag; /* per-ag accounting info */ + struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ + sema_t m_growlock; /* growfs mutex */ + int m_fixedfsid[2]; /* unchanged for life of FS */ + uint m_dmevmask; /* DMI events for this FS */ + uint m_flags; /* global mount flags */ + uint m_attroffset; /* inode attribute offset */ + uint m_dir_node_ents; /* #entries in a dir danode */ + uint m_attr_node_ents; /* #entries in attr danode */ + int m_ialloc_inos; /* inodes in inode allocation */ + int m_ialloc_blks; /* blocks in inode allocation */ + int m_litino; /* size of inode union area */ + int m_inoalign_mask;/* mask sb_inoalignmt if used */ + uint m_qflags; /* quota status flags */ + xfs_trans_reservations_t m_reservations;/* precomputed res values */ + __uint64_t m_maxicount; /* maximum inode count */ + __uint64_t m_resblks; /* total reserved blocks */ + __uint64_t m_resblks_avail;/* available reserved blocks */ +#if XFS_BIG_FILESYSTEMS + xfs_ino_t m_inoadd; /* add value for ino64_offset */ +#endif + int m_dalign; /* stripe unit */ + int m_swidth; /* stripe width */ + int m_lstripemask; /* log stripe mask */ + int m_sinoalign; /* stripe unit inode alignmnt */ + int m_attr_magicpct;/* 37% of the blocksize */ + int m_dir_magicpct; /* 37% of the dir blocksize */ + __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ + __uint8_t m_inode_quiesce;/* call quiesce on new inodes. + field governed by m_ilock */ + __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ + __uint8_t m_dirversion; /* 1 or 2 */ + xfs_dirops_t m_dirops; /* table of dir funcs */ + int m_dirblksize; /* directory block sz--bytes */ + int m_dirblkfsbs; /* directory block sz--fsbs */ + xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */ + xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */ + xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */ + int m_chsize; /* size of next field */ + struct xfs_chash *m_chash; /* fs private inode per-cluster + * hash table */ + struct xfs_dmops m_dm_ops; /* vector of DMI ops */ + struct xfs_qmops m_qm_ops; /* vector of XQM ops */ + struct xfs_ioops m_io_ops; /* vector of I/O ops */ + lock_t m_freeze_lock; /* Lock for m_frozen */ + uint m_frozen; /* FS frozen for shutdown or + * snapshot */ + sv_t m_wait_unfreeze;/* waiting to unfreeze */ + atomic_t m_active_trans; /* number trans frozen */ +} xfs_mount_t; + +/* + * Flags for m_flags. + */ +#define XFS_MOUNT_WSYNC 0x00000001 /* for nfs - all metadata ops + must be synchronous except + for space allocations */ +#if XFS_BIG_FILESYSTEMS +#define XFS_MOUNT_INO64 0x00000002 +#endif + /* 0x00000004 -- currently unused */ + /* 0x00000008 -- currently unused */ +#define XFS_MOUNT_FS_SHUTDOWN 0x00000010 /* atomic stop of all filesystem + operations, typically for + disk errors in metadata */ +#define XFS_MOUNT_NOATIME 0x00000020 /* don't modify inode access + times on reads */ +#define XFS_MOUNT_RETERR 0x00000040 /* return alignment errors to + user */ +#define XFS_MOUNT_NOALIGN 0x00000080 /* turn off stripe alignment + allocations */ + /* 0x00000100 -- currently unused */ + /* 0x00000200 -- currently unused */ +#define XFS_MOUNT_NORECOVERY 0x00000400 /* no recovery - dirty fs */ +#define XFS_MOUNT_SHARED 0x00000800 /* shared mount */ +#define XFS_MOUNT_DFLT_IOSIZE 0x00001000 /* set default i/o size */ +#define XFS_MOUNT_OSYNCISOSYNC 0x00002000 /* o_sync is REALLY o_sync */ + /* osyncisdsync is now default*/ +#define XFS_MOUNT_NOUUID 0x00004000 /* ignore uuid during mount */ +#define XFS_MOUNT_32BITINODES 0x00008000 /* do not create inodes above + * 32 bits in size */ +#define XFS_MOUNT_NOLOGFLUSH 0x00010000 + +#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) + +/* + * Default minimum read and write sizes. + */ +#define XFS_READIO_LOG_LARGE 16 +#define XFS_WRITEIO_LOG_LARGE 16 +/* + * Default allocation size + */ +#define XFS_WRITE_IO_LOG 16 + +/* + * Max and min values for UIO and mount-option defined I/O sizes; + * min value can't be less than a page. Currently unused. + */ +#define XFS_MAX_IO_LOG 16 /* 64K */ +#define XFS_MIN_IO_LOG PAGE_SHIFT + +/* + * Synchronous read and write sizes. This should be + * better for NFSv2 wsync filesystems. + */ +#define XFS_WSYNC_READIO_LOG 15 /* 32K */ +#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */ + +#define xfs_force_shutdown(m,f) \ + VFS_FORCE_SHUTDOWN((XFS_MTOVFS(m)), f, __FILE__, __LINE__) + +/* + * Flags sent to xfs_force_shutdown. + */ +#define XFS_METADATA_IO_ERROR 0x1 +#define XFS_LOG_IO_ERROR 0x2 +#define XFS_FORCE_UMOUNT 0x4 +#define XFS_CORRUPT_INCORE 0x8 /* Corrupt in-memory data structures */ +#define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ + +/* + * xflags for xfs_syncsub + */ +#define XFS_XSYNC_RELOC 0x01 + +/* + * Flags for xfs_mountfs + */ +#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ +#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */ +#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */ + /* log recovery */ + +/* + * Macros for getting from mount to vfs and back. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_MTOVFS) +struct vfs *xfs_mtovfs(xfs_mount_t *mp); +#define XFS_MTOVFS(mp) xfs_mtovfs(mp) +#else +#define XFS_MTOVFS(mp) (bhvtovfs(&(mp)->m_bhv)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BHVTOM) +xfs_mount_t *xfs_bhvtom(bhv_desc_t *bdp); +#define XFS_BHVTOM(bdp) xfs_bhvtom(bdp) +#else +#define XFS_BHVTOM(bdp) ((xfs_mount_t *)BHV_PDATA(bdp)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_VFSTOM) +xfs_mount_t *xfs_vfstom(vfs_t *vfs); +#define XFS_VFSTOM(vfs) xfs_vfstom(vfs) +#else +#define XFS_VFSTOM(vfs) \ + (XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfs), &xfs_vfsops))) +#endif + + +/* + * Moved here from xfs_ag.h to avoid reordering header files + */ + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGNO) +xfs_agnumber_t xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) +#else + +static inline xfs_agnumber_t XFS_DADDR_TO_AGNO(xfs_mount_t *mp, xfs_daddr_t d) +{ + d = XFS_BB_TO_FSBT(mp, d); + do_div(d, mp->m_sb.sb_agblocks); + return (xfs_agnumber_t) d; +} + +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_AGBNO) +xfs_agblock_t xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_AGBNO(mp,d) xfs_daddr_to_agbno(mp,d) +#else + +static inline xfs_agblock_t XFS_DADDR_TO_AGBNO(xfs_mount_t *mp, xfs_daddr_t d) +{ + d = XFS_BB_TO_FSBT(mp, d); + return (xfs_agblock_t) do_div(d, mp->m_sb.sb_agblocks); +} + +#endif + +/* + * This structure is for use by the xfs_mod_incore_sb_batch() routine. + */ +typedef struct xfs_mod_sb { + xfs_sb_field_t msb_field; /* Field to modify, see below */ + int msb_delta; /* Change to make to specified field */ +} xfs_mod_sb_t; + +#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock), PINOD) +#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) +#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) +#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) + +extern xfs_mount_t *xfs_mount_init(void); +extern void xfs_mod_sb(xfs_trans_t *, __int64_t); +extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv); +extern int xfs_mountfs(struct vfs *, xfs_mount_t *mp, dev_t, int); + +extern int xfs_unmountfs(xfs_mount_t *, struct cred *); +extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); +extern int xfs_unmountfs_writesb(xfs_mount_t *); +extern int xfs_unmount_flush(xfs_mount_t *, int); +extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); +extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, + uint, int); +extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); +extern int xfs_readsb(xfs_mount_t *mp); +extern void xfs_freesb(xfs_mount_t *); +extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int); +extern int xfs_syncsub(xfs_mount_t *, int, int, int *); +extern void xfs_initialize_perag(xfs_mount_t *, int); +extern void xfs_xlatesb(void *, struct xfs_sb *, int, xfs_arch_t, + __int64_t); + +/* + * Flags for freeze operations. + */ +#define XFS_FREEZE_WRITE 1 +#define XFS_FREEZE_TRANS 2 + +extern void xfs_start_freeze(xfs_mount_t *, int); +extern void xfs_finish_freeze(xfs_mount_t *); +extern void xfs_check_frozen(xfs_mount_t *, bhv_desc_t *, int); + +extern struct vfsops xfs_vfsops; +extern struct vnodeops xfs_vnodeops; + +extern struct xfs_dmops xfs_dmcore_xfs; +extern struct xfs_qmops xfs_qmcore_xfs; +extern struct xfs_ioops xfs_iocore_xfs; + +extern int xfs_init(void); +extern void xfs_cleanup(void); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_MOUNT_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_qmops.c linux.22-ac2/fs/xfs/xfs_qmops.c --- linux.vanilla/fs/xfs/xfs_qmops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_qmops.c 2003-06-29 16:09:21.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#include "xfs.h" + +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" + + +#ifndef CONFIG_XFS_QUOTA +STATIC struct xfs_dquot * +xfs_dqvopchown_default( + struct xfs_trans *tp, + struct xfs_inode *ip, + struct xfs_dquot **dqp, + struct xfs_dquot *dq) +{ + return NULL; +} + +xfs_qmops_t xfs_qmcore_xfs = { + .xfs_qminit = (xfs_qminit_t) fs_noerr, + .xfs_qmdone = (xfs_qmdone_t) fs_noerr, + .xfs_qmmount = (xfs_qmmount_t) fs_noerr, + .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr, + .xfs_dqrele = (xfs_dqrele_t) fs_noerr, + .xfs_dqattach = (xfs_dqattach_t) fs_noerr, + .xfs_dqdetach = (xfs_dqdetach_t) fs_noerr, + .xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr, + .xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr, + .xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr, + .xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr, + .xfs_dqvopchown = xfs_dqvopchown_default, + .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr, +}; +#endif /* CONFIG_XFS_QUOTA */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_quota.h linux.22-ac2/fs/xfs/xfs_quota.h --- linux.vanilla/fs/xfs/xfs_quota.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_quota.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_QUOTA_H__ +#define __XFS_QUOTA_H__ + +/* + * The ondisk form of a dquot structure. + */ +#define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ +#define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */ + +/* + * uid_t and gid_t are hard-coded to 32 bits in the inode. + * Hence, an 'id' in a dquot is 32 bits.. + */ +typedef __int32_t xfs_dqid_t; + +/* + * Eventhough users may not have quota limits occupying all 64-bits, + * they may need 64-bit accounting. Hence, 64-bit quota-counters, + * and quota-limits. This is a waste in the common case, but hey ... + */ +typedef __uint64_t xfs_qcnt_t; +typedef __uint16_t xfs_qwarncnt_t; + +/* + * This is the main portion of the on-disk representation of quota + * information for a user. This is the q_core of the xfs_dquot_t that + * is kept in kernel memory. We pad this with some more expansion room + * to construct the on disk structure. + */ +typedef struct xfs_disk_dquot { +/*16*/ u_int16_t d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ +/*8 */ u_int8_t d_version; /* dquot version */ +/*8 */ u_int8_t d_flags; /* XFS_DQ_USER/PROJ/GROUP */ +/*32*/ xfs_dqid_t d_id; /* user,project,group id */ +/*64*/ xfs_qcnt_t d_blk_hardlimit;/* absolute limit on disk blks */ +/*64*/ xfs_qcnt_t d_blk_softlimit;/* preferred limit on disk blks */ +/*64*/ xfs_qcnt_t d_ino_hardlimit;/* maximum # allocated inodes */ +/*64*/ xfs_qcnt_t d_ino_softlimit;/* preferred inode limit */ +/*64*/ xfs_qcnt_t d_bcount; /* disk blocks owned by the user */ +/*64*/ xfs_qcnt_t d_icount; /* inodes owned by the user */ +/*32*/ __int32_t d_itimer; /* zero if within inode limits if not, + this is when we refuse service */ +/*32*/ __int32_t d_btimer; /* similar to above; for disk blocks */ +/*16*/ xfs_qwarncnt_t d_iwarns; /* warnings issued wrt num inodes */ +/*16*/ xfs_qwarncnt_t d_bwarns; /* warnings issued wrt disk blocks */ +/*32*/ __int32_t d_pad0; /* 64 bit align */ +/*64*/ xfs_qcnt_t d_rtb_hardlimit;/* absolute limit on realtime blks */ +/*64*/ xfs_qcnt_t d_rtb_softlimit;/* preferred limit on RT disk blks */ +/*64*/ xfs_qcnt_t d_rtbcount; /* realtime blocks owned */ +/*32*/ __int32_t d_rtbtimer; /* similar to above; for RT disk blocks */ +/*16*/ xfs_qwarncnt_t d_rtbwarns; /* warnings issued wrt RT disk blocks */ +/*16*/ __uint16_t d_pad; +} xfs_disk_dquot_t; + +/* + * This is what goes on disk. This is separated from the xfs_disk_dquot because + * carrying the unnecessary padding would be a waste of memory. + */ +typedef struct xfs_dqblk { + xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */ + char dd_fill[32]; /* filling for posterity */ +} xfs_dqblk_t; + +/* + * flags for q_flags field in the dquot. + */ +#define XFS_DQ_USER 0x0001 /* a user quota */ +/* #define XFS_DQ_PROJ 0x0002 -- project quota (IRIX) */ +#define XFS_DQ_GROUP 0x0004 /* a group quota */ +#define XFS_DQ_FLOCKED 0x0008 /* flush lock taken */ +#define XFS_DQ_DIRTY 0x0010 /* dquot is dirty */ +#define XFS_DQ_WANT 0x0020 /* for lookup/reclaim race */ +#define XFS_DQ_INACTIVE 0x0040 /* dq off mplist & hashlist */ +#define XFS_DQ_MARKER 0x0080 /* sentinel */ + +/* + * In the worst case, when both user and group quotas are on, + * we can have a max of three dquots changing in a single transaction. + */ +#define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3) + + +/* + * These are the structures used to lay out dquots and quotaoff + * records on the log. Quite similar to those of inodes. + */ + +/* + * log format struct for dquots. + * The first two fields must be the type and size fitting into + * 32 bits : log_recovery code assumes that. + */ +typedef struct xfs_dq_logformat { + __uint16_t qlf_type; /* dquot log item type */ + __uint16_t qlf_size; /* size of this item */ + xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */ + __int64_t qlf_blkno; /* blkno of dquot buffer */ + __int32_t qlf_len; /* len of dquot buffer */ + __uint32_t qlf_boffset; /* off of dquot in buffer */ +} xfs_dq_logformat_t; + +/* + * log format struct for QUOTAOFF records. + * The first two fields must be the type and size fitting into + * 32 bits : log_recovery code assumes that. + * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer + * to the first and ensures that the first logitem is taken out of the AIL + * only when the last one is securely committed. + */ +typedef struct xfs_qoff_logformat { + unsigned short qf_type; /* quotaoff log item type */ + unsigned short qf_size; /* size of this item */ + unsigned int qf_flags; /* USR and/or GRP */ + char qf_pad[12]; /* padding for future */ +} xfs_qoff_logformat_t; + + +/* + * Disk quotas status in m_qflags, and also sb_qflags. 16 bits. + */ +#define XFS_UQUOTA_ACCT 0x0001 /* user quota accounting ON */ +#define XFS_UQUOTA_ENFD 0x0002 /* user quota limits enforced */ +#define XFS_UQUOTA_CHKD 0x0004 /* quotacheck run on usr quotas */ +#define XFS_PQUOTA_ACCT 0x0008 /* (IRIX) project quota accounting ON */ +#define XFS_GQUOTA_ENFD 0x0010 /* group quota limits enforced */ +#define XFS_GQUOTA_CHKD 0x0020 /* quotacheck run on grp quotas */ +#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ + +/* + * Incore only flags for quotaoff - these bits get cleared when quota(s) + * are in the process of getting turned off. These flags are in m_qflags but + * never in sb_qflags. + */ +#define XFS_UQUOTA_ACTIVE 0x0080 /* uquotas are being turned off */ +#define XFS_GQUOTA_ACTIVE 0x0100 /* gquotas are being turned off */ + +/* + * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees + * quota will be not be switched off as long as that inode lock is held. + */ +#define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ + XFS_GQUOTA_ACTIVE)) +#define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) +#define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) + +/* + * Flags to tell various functions what to do. Not all of these are meaningful + * to a single function. None of these XFS_QMOPT_* flags are meant to have + * persistent values (ie. their values can and will change between versions) + */ +#define XFS_QMOPT_DQLOCK 0x0000001 /* dqlock */ +#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */ +#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */ +#define XFS_QMOPT_GQUOTA 0x0000008 /* group dquot requested */ +#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ +#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */ +#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */ +#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */ +#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ +#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ +#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if necessary */ +#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */ +#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot, if damaged. */ + +/* + * flags to xfs_trans_mod_dquot to indicate which field needs to be + * modified. + */ +#define XFS_QMOPT_RES_REGBLKS 0x0010000 +#define XFS_QMOPT_RES_RTBLKS 0x0020000 +#define XFS_QMOPT_BCOUNT 0x0040000 +#define XFS_QMOPT_ICOUNT 0x0080000 +#define XFS_QMOPT_RTBCOUNT 0x0100000 +#define XFS_QMOPT_DELBCOUNT 0x0200000 +#define XFS_QMOPT_DELRTBCOUNT 0x0400000 +#define XFS_QMOPT_RES_INOS 0x0800000 + +/* + * flags for dqflush and dqflush_all. + */ +#define XFS_QMOPT_SYNC 0x1000000 +#define XFS_QMOPT_ASYNC 0x2000000 +#define XFS_QMOPT_DELWRI 0x4000000 + +/* + * flags for dqalloc. + */ +#define XFS_QMOPT_INHERIT 0x8000000 + +/* + * flags to xfs_trans_mod_dquot. + */ +#define XFS_TRANS_DQ_RES_BLKS XFS_QMOPT_RES_REGBLKS +#define XFS_TRANS_DQ_RES_RTBLKS XFS_QMOPT_RES_RTBLKS +#define XFS_TRANS_DQ_RES_INOS XFS_QMOPT_RES_INOS +#define XFS_TRANS_DQ_BCOUNT XFS_QMOPT_BCOUNT +#define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT +#define XFS_TRANS_DQ_ICOUNT XFS_QMOPT_ICOUNT +#define XFS_TRANS_DQ_RTBCOUNT XFS_QMOPT_RTBCOUNT +#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT + + +#define XFS_QMOPT_QUOTALL (XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA) +#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) + +#ifdef __KERNEL__ +/* + * This check is done typically without holding the inode lock; + * that may seem racey, but it is harmless in the context that it is used. + * The inode cannot go inactive as long a reference is kept, and + * therefore if dquot(s) were attached, they'll stay consistent. + * If, for example, the ownership of the inode changes while + * we didn't have the inode locked, the appropriate dquot(s) will be + * attached atomically. + */ +#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\ + (ip)->i_udquot == NULL) || \ + (XFS_IS_GQUOTA_ON(mp) && \ + (ip)->i_gdquot == NULL)) + +#define XFS_QM_NEED_QUOTACHECK(mp) ((XFS_IS_UQUOTA_ON(mp) && \ + (mp->m_sb.sb_qflags & \ + XFS_UQUOTA_CHKD) == 0) || \ + (XFS_IS_GQUOTA_ON(mp) && \ + (mp->m_sb.sb_qflags & \ + XFS_GQUOTA_CHKD) == 0)) + +#define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ + XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ + XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD) +#define XFS_MOUNT_QUOTA_MASK (XFS_MOUNT_QUOTA_ALL | XFS_UQUOTA_ACTIVE | \ + XFS_GQUOTA_ACTIVE) + + +/* + * The structure kept inside the xfs_trans_t keep track of dquot changes + * within a transaction and apply them later. + */ +typedef struct xfs_dqtrx { + struct xfs_dquot *qt_dquot; /* the dquot this refers to */ + ulong qt_blk_res; /* blks reserved on a dquot */ + ulong qt_blk_res_used; /* blks used from the reservation */ + ulong qt_ino_res; /* inode reserved on a dquot */ + ulong qt_ino_res_used; /* inodes used from the reservation */ + long qt_bcount_delta; /* dquot blk count changes */ + long qt_delbcnt_delta; /* delayed dquot blk count changes */ + long qt_icount_delta; /* dquot inode count changes */ + ulong qt_rtblk_res; /* # blks reserved on a dquot */ + ulong qt_rtblk_res_used;/* # blks used from reservation */ + long qt_rtbcount_delta;/* dquot realtime blk changes */ + long qt_delrtb_delta; /* delayed RT blk count changes */ +} xfs_dqtrx_t; + +/* + * Dquot transaction functions, used if quota is enabled. + */ +typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *); +typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *, + struct xfs_inode *, uint, long); +typedef void (*qo_free_dqinfo_t)(struct xfs_trans *); +typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *); +typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *); +typedef int (*qo_reserve_quota_nblks_t)( + struct xfs_trans *, struct xfs_mount *, + struct xfs_inode *, long, long, uint); +typedef int (*qo_reserve_quota_bydquots_t)( + struct xfs_trans *, struct xfs_mount *, + struct xfs_dquot *, struct xfs_dquot *, + long, long, uint); +typedef struct xfs_dqtrxops { + qo_dup_dqinfo_t qo_dup_dqinfo; + qo_free_dqinfo_t qo_free_dqinfo; + qo_mod_dquot_byino_t qo_mod_dquot_byino; + qo_apply_dquot_deltas_t qo_apply_dquot_deltas; + qo_reserve_quota_nblks_t qo_reserve_quota_nblks; + qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots; + qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots; +} xfs_dqtrxops_t; + +#define XFS_DQTRXOP(mp, tp, op, args...) \ + ((mp)->m_qm_ops.xfs_dqtrxops ? \ + ((mp)->m_qm_ops.xfs_dqtrxops->op)(tp, ## args) : 0) + +#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \ + XFS_DQTRXOP(mp, otp, qo_dup_dqinfo, ntp) +#define XFS_TRANS_FREE_DQINFO(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_free_dqinfo) +#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \ + XFS_DQTRXOP(mp, tp, qo_mod_dquot_byino, ip, field, delta) +#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_apply_dquot_deltas) +#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \ + XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl) +#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \ + XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl) +#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \ + XFS_DQTRXOP(mp, tp, qo_unreserve_and_mod_dquots) + +#define XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \ + XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_RESERVE_BLKQUOTA_FORCE(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \ + XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES) +#define XFS_TRANS_UNRESERVE_BLKQUOTA(mp, tp, ip, nblks) \ + XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), 0, \ + XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ + XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \ + f | XFS_QMOPT_RES_REGBLKS) +#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ + XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \ + f | XFS_QMOPT_RES_REGBLKS) + +extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); + +extern struct bhv_vfsops xfs_qmops; + +extern void xfs_qm_init(void); +extern void xfs_qm_exit(void); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_QUOTA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_rename.c linux.22-ac2/fs/xfs/xfs_rename.c --- linux.vanilla/fs/xfs/xfs_rename.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_rename.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_quota.h" +#include "xfs_rw.h" +#include "xfs_utils.h" +#include "xfs_trans_space.h" +#include "xfs_da_btree.h" +#include "xfs_dir_leaf.h" +#include "xfs_dmapi.h" + + +/* + * Given an array of up to 4 inode pointers, unlock the pointed to inodes. + * If there are fewer than 4 entries in the array, the empty entries will + * be at the end and will have NULL pointers in them. + */ +STATIC void +xfs_rename_unlock4( + xfs_inode_t **i_tab, + uint lock_mode) +{ + int i; + + xfs_iunlock(i_tab[0], lock_mode); + for (i = 1; i < 4; i++) { + if (i_tab[i] == NULL) { + break; + } + /* + * Watch out for duplicate entries in the table. + */ + if (i_tab[i] != i_tab[i-1]) { + xfs_iunlock(i_tab[i], lock_mode); + } + } +} + +#ifdef DEBUG +int xfs_rename_skip, xfs_rename_nskip; +#endif + +/* + * The following routine will acquire the locks required for a rename + * operation. The code understands the semantics of renames and will + * validate that name1 exists under dp1 & that name2 may or may not + * exist under dp2. + * + * We are renaming dp1/name1 to dp2/name2. + * + * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success. + */ +STATIC int +xfs_lock_for_rename( + xfs_inode_t *dp1, /* old (source) directory inode */ + xfs_inode_t *dp2, /* new (target) directory inode */ + vname_t *vname1,/* old entry name */ + vname_t *vname2,/* new entry name */ + xfs_inode_t **ipp1, /* inode of old entry */ + xfs_inode_t **ipp2, /* inode of new entry, if it + already exists, NULL otherwise. */ + xfs_inode_t **i_tab,/* array of inode returned, sorted */ + int *num_inodes) /* number of inodes in array */ +{ + xfs_inode_t *ip1, *ip2, *temp; + xfs_ino_t inum1, inum2; + int error; + int i, j; + uint lock_mode; + int diff_dirs = (dp1 != dp2); + + ip2 = NULL; + + /* + * First, find out the current inums of the entries so that we + * can determine the initial locking order. We'll have to + * sanity check stuff after all the locks have been acquired + * to see if we still have the right inodes, directories, etc. + */ + lock_mode = xfs_ilock_map_shared(dp1); + error = xfs_get_dir_entry(vname1, &ip1); + if (error) { + xfs_iunlock_map_shared(dp1, lock_mode); + return error; + } + + inum1 = ip1->i_ino; + + ASSERT(ip1); + ITRACE(ip1); + + /* + * Unlock dp1 and lock dp2 if they are different. + */ + + if (diff_dirs) { + xfs_iunlock_map_shared(dp1, lock_mode); + lock_mode = xfs_ilock_map_shared(dp2); + } + + error = xfs_dir_lookup_int(XFS_ITOBHV(dp2), lock_mode, + vname2, &inum2, &ip2); + if (error == ENOENT) { /* target does not need to exist. */ + inum2 = 0; + } else if (error) { + /* + * If dp2 and dp1 are the same, the next line unlocks dp1. + * Got it? + */ + xfs_iunlock_map_shared(dp2, lock_mode); + IRELE (ip1); + return error; + } else { + ITRACE(ip2); + } + + /* + * i_tab contains a list of pointers to inodes. We initialize + * the table here & we'll sort it. We will then use it to + * order the acquisition of the inode locks. + * + * Note that the table may contain duplicates. e.g., dp1 == dp2. + */ + i_tab[0] = dp1; + i_tab[1] = dp2; + i_tab[2] = ip1; + if (inum2 == 0) { + *num_inodes = 3; + i_tab[3] = NULL; + } else { + *num_inodes = 4; + i_tab[3] = ip2; + } + + /* + * Sort the elements via bubble sort. (Remember, there are at + * most 4 elements to sort, so this is adequate.) + */ + for (i=0; i < *num_inodes; i++) { + for (j=1; j < *num_inodes; j++) { + if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { + temp = i_tab[j]; + i_tab[j] = i_tab[j-1]; + i_tab[j-1] = temp; + } + } + } + + /* + * We have dp2 locked. If it isn't first, unlock it. + * If it is first, tell xfs_lock_inodes so it can skip it + * when locking. if dp1 == dp2, xfs_lock_inodes will skip both + * since they are equal. xfs_lock_inodes needs all these inodes + * so that it can unlock and retry if there might be a dead-lock + * potential with the log. + */ + + if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) { +#ifdef DEBUG + xfs_rename_skip++; +#endif + xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED); + } else { +#ifdef DEBUG + xfs_rename_nskip++; +#endif + xfs_iunlock_map_shared(dp2, lock_mode); + xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED); + } + + /* + * Set the return value. Null out any unused entries in i_tab. + */ + *ipp1 = *ipp2 = NULL; + for (i=0; i < *num_inodes; i++) { + if (i_tab[i]->i_ino == inum1) { + *ipp1 = i_tab[i]; + } + if (i_tab[i]->i_ino == inum2) { + *ipp2 = i_tab[i]; + } + } + for (;i < 4; i++) { + i_tab[i] = NULL; + } + return 0; +} + + +int rename_which_error_return = 0; + +/* + * xfs_rename + */ +int +xfs_rename( + bhv_desc_t *src_dir_bdp, + vname_t *src_vname, + vnode_t *target_dir_vp, + vname_t *target_vname, + cred_t *credp) +{ + xfs_trans_t *tp; + xfs_inode_t *src_dp, *target_dp, *src_ip, *target_ip; + xfs_mount_t *mp; + int new_parent; /* moving to a new dir */ + int src_is_directory; /* src_name is a directory */ + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + xfs_inode_t *inodes[4]; + int target_ip_dropped = 0; /* dropped target_ip link? */ + vnode_t *src_dir_vp; + bhv_desc_t *target_dir_bdp; + int spaceres; + int target_link_zero = 0; + int num_inodes; + char *src_name = VNAME(src_vname); + char *target_name = VNAME(target_vname); + int src_namelen = VNAMELEN(src_vname); + int target_namelen = VNAMELEN(target_vname); + + src_dir_vp = BHV_TO_VNODE(src_dir_bdp); + vn_trace_entry(src_dir_vp, "xfs_rename", (inst_t *)__return_address); + vn_trace_entry(target_dir_vp, "xfs_rename", (inst_t *)__return_address); + + /* + * Find the XFS behavior descriptor for the target directory + * vnode since it was not handed to us. + */ + target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp), + &xfs_vnodeops); + if (target_dir_bdp == NULL) { + return XFS_ERROR(EXDEV); + } + + src_dp = XFS_BHVTOI(src_dir_bdp); + target_dp = XFS_BHVTOI(target_dir_bdp); + mp = src_dp->i_mount; + + if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) || + DM_EVENT_ENABLED(target_dir_vp->v_vfsp, + target_dp, DM_EVENT_RENAME)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME, + src_dir_bdp, DM_RIGHT_NULL, + target_dir_bdp, DM_RIGHT_NULL, + src_name, target_name, + 0, 0, 0); + if (error) { + return error; + } + } + /* Return through std_return after this point. */ + + /* + * Lock all the participating inodes. Depending upon whether + * the target_name exists in the target directory, and + * whether the target directory is the same as the source + * directory, we can lock from 2 to 4 inodes. + * xfs_lock_for_rename() will return ENOENT if src_name + * does not exist in the source directory. + */ + tp = NULL; + error = xfs_lock_for_rename(src_dp, target_dp, src_vname, + target_vname, &src_ip, &target_ip, inodes, + &num_inodes); + + if (error) { + rename_which_error_return = __LINE__; + /* + * We have nothing locked, no inode references, and + * no transaction, so just get out. + */ + goto std_return; + } + + ASSERT(src_ip != NULL); + + if ((src_ip->i_d.di_mode & IFMT) == IFDIR) { + /* + * Check for link count overflow on target_dp + */ + if (target_ip == NULL && (src_dp != target_dp) && + target_dp->i_d.di_nlink >= XFS_MAXLINK) { + rename_which_error_return = __LINE__; + error = XFS_ERROR(EMLINK); + xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + goto rele_return; + } + } + + new_parent = (src_dp != target_dp); + src_is_directory = ((src_ip->i_d.di_mode & IFMT) == IFDIR); + + /* + * Drop the locks on our inodes so that we can do the ancestor + * check if necessary and start the transaction. + */ + xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + + XFS_BMAP_INIT(&free_list, &first_block); + tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen); + error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); + if (error == ENOSPC) { + spaceres = 0; + error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); + } + if (error) { + rename_which_error_return = __LINE__; + xfs_trans_cancel(tp, 0); + goto rele_return; + } + + /* + * Attach the dquots to the inodes + */ + if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) { + xfs_trans_cancel(tp, cancel_flags); + rename_which_error_return = __LINE__; + goto rele_return; + } + + /* + * Reacquire the inode locks we dropped above. + */ + xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL); + + /* + * Join all the inodes to the transaction. From this point on, + * we can rely on either trans_commit or trans_cancel to unlock + * them. Note that we need to add a vnode reference to the + * directories since trans_commit & trans_cancel will decrement + * them when they unlock the inodes. Also, we need to be careful + * not to add an inode to the transaction more than once. + */ + VN_HOLD(src_dir_vp); + xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); + if (new_parent) { + VN_HOLD(target_dir_vp); + xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); + } + if ((src_ip != src_dp) && (src_ip != target_dp)) { + xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); + } + if ((target_ip != NULL) && + (target_ip != src_ip) && + (target_ip != src_dp) && + (target_ip != target_dp)) { + xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); + } + + /* + * Set up the target. + */ + if (target_ip == NULL) { + /* + * If there's no space reservation, check the entry will + * fit before actually inserting it. + */ + if (spaceres == 0 && + (error = XFS_DIR_CANENTER(mp, tp, target_dp, target_name, + target_namelen))) { + rename_which_error_return = __LINE__; + goto error_return; + } + /* + * If target does not exist and the rename crosses + * directories, adjust the target directory link count + * to account for the ".." reference from the new entry. + */ + error = XFS_DIR_CREATENAME(mp, tp, target_dp, target_name, + target_namelen, src_ip->i_ino, + &first_block, &free_list, spaceres); + if (error == ENOSPC) { + rename_which_error_return = __LINE__; + goto error_return; + } + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + if (new_parent && src_is_directory) { + error = xfs_bumplink(tp, target_dp); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + } else { /* target_ip != NULL */ + + /* + * If target exists and it's a directory, check that both + * target and source are directories and that target can be + * destroyed, or that neither is a directory. + */ + if ((target_ip->i_d.di_mode & IFMT) == IFDIR) { + /* + * Make sure target dir is empty. + */ + if (!(XFS_DIR_ISEMPTY(target_ip->i_mount, target_ip)) || + (target_ip->i_d.di_nlink > 2)) { + error = XFS_ERROR(EEXIST); + rename_which_error_return = __LINE__; + goto error_return; + } + } + + /* + * Link the source inode under the target name. + * If the source inode is a directory and we are moving + * it across directories, its ".." entry will be + * inconsistent until we replace that down below. + * + * In case there is already an entry with the same + * name at the destination directory, remove it first. + */ + error = XFS_DIR_REPLACE(mp, tp, target_dp, target_name, + target_namelen, src_ip->i_ino, &first_block, + &free_list, spaceres); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Decrement the link count on the target since the target + * dir no longer points to it. + */ + error = xfs_droplink(tp, target_ip); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return;; + } + target_ip_dropped = 1; + + if (src_is_directory) { + /* + * Drop the link from the old "." entry. + */ + error = xfs_droplink(tp, target_ip); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + + /* Do this test while we still hold the locks */ + target_link_zero = (target_ip)->i_d.di_nlink==0; + + } /* target_ip != NULL */ + + /* + * Remove the source. + */ + if (new_parent && src_is_directory) { + + /* + * Rewrite the ".." entry to point to the new + * directory. + */ + error = XFS_DIR_REPLACE(mp, tp, src_ip, "..", 2, + target_dp->i_ino, &first_block, + &free_list, spaceres); + ASSERT(error != EEXIST); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + } else { + /* + * We always want to hit the ctime on the source inode. + * We do it in the if clause above for the 'new_parent && + * src_is_directory' case, and here we get all the other + * cases. This isn't strictly required by the standards + * since the source inode isn't really being changed, + * but old unix file systems did it and some incremental + * backup programs won't work without it. + */ + xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); + } + + /* + * Adjust the link count on src_dp. This is necessary when + * renaming a directory, either within one parent when + * the target existed, or across two parent directories. + */ + if (src_is_directory && (new_parent || target_ip != NULL)) { + + /* + * Decrement link count on src_directory since the + * entry that's moved no longer points to it. + */ + error = xfs_droplink(tp, src_dp); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + } + + error = XFS_DIR_REMOVENAME(mp, tp, src_dp, src_name, src_namelen, + src_ip->i_ino, &first_block, &free_list, spaceres); + if (error) { + rename_which_error_return = __LINE__; + goto abort_return; + } + xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Update the generation counts on all the directory inodes + * that we're modifying. + */ + src_dp->i_gen++; + xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); + + if (new_parent) { + target_dp->i_gen++; + xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); + } + + /* + * If there was a target inode, take an extra reference on + * it here so that it doesn't go to xfs_inactive() from + * within the commit. + */ + if (target_ip != NULL) { + IHOLD(target_ip); + } + + /* + * If this is a synchronous mount, make sure that the + * rename transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + /* + * Take refs. for vop_link_removed calls below. No need to worry + * about directory refs. because the caller holds them. + * + * Do holds before the xfs_bmap_finish since it might rele them down + * to zero. + */ + + if (target_ip_dropped) + IHOLD(target_ip); + IHOLD(src_ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + if (target_ip != NULL) { + IRELE(target_ip); + } + if (target_ip_dropped) { + IRELE(target_ip); + } + IRELE(src_ip); + goto std_return; + } + + /* + * trans_commit will unlock src_ip, target_ip & decrement + * the vnode references. + */ + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (target_ip != NULL) { + xfs_refcache_purge_ip(target_ip); + IRELE(target_ip); + } + /* + * Let interposed file systems know about removed links. + */ + if (target_ip_dropped) { + VOP_LINK_REMOVED(XFS_ITOV(target_ip), target_dir_vp, + target_link_zero); + IRELE(target_ip); + } + + FSC_NOTIFY_NAME_CHANGED(XFS_ITOV(src_ip)); + + IRELE(src_ip); + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit */ +std_return: + if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) || + DM_EVENT_ENABLED(target_dir_vp->v_vfsp, + target_dp, DM_EVENT_POSTRENAME)) { + (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME, + src_dir_bdp, DM_RIGHT_NULL, + target_dir_bdp, DM_RIGHT_NULL, + src_name, target_name, + 0, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, cancel_flags); + goto std_return; + + rele_return: + IRELE(src_ip); + if (target_ip != NULL) { + IRELE(target_ip); + } + goto std_return; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_rtalloc.c linux.22-ac2/fs/xfs/xfs_rtalloc.c --- linux.vanilla/fs/xfs/xfs_rtalloc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_rtalloc.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,2471 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +/* + * Free realtime space allocation for XFS. + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_fsops.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include "xfs_inode_item.h" +#include "xfs_trans_space.h" + + +/* + * Prototypes for internal functions. + */ + + +STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *); +STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int, + xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *); +STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, int, xfs_rtblock_t *, int *); +STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_rtblock_t, xfs_rtblock_t *); +STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_rtblock_t, xfs_rtblock_t *); +STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int, + xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *); +STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t, + xfs_extlen_t, int); +STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, + xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *); + +/* + * Internal functions. + */ + +/* + * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. + */ +STATIC int +xfs_lowbit32( + __uint32_t v) +{ + return ffs(v)-1; +} + +/* + * Allocate space to the bitmap or summary file, and zero it, for growfs. + */ +STATIC int /* error */ +xfs_growfs_rt_alloc( + xfs_mount_t *mp, /* file system mount point */ + xfs_extlen_t oblocks, /* old count of blocks */ + xfs_extlen_t nblocks, /* new count of blocks */ + xfs_ino_t ino) /* inode number (bitmap/summary) */ +{ + xfs_fileoff_t bno; /* block number in file */ + xfs_buf_t *bp; /* temporary buffer for zeroing */ + int cancelflags; /* flags for xfs_trans_cancel */ + int committed; /* transaction committed flag */ + xfs_daddr_t d; /* disk block address */ + int error; /* error return value */ + xfs_fsblock_t firstblock; /* first block allocated in xaction */ + xfs_bmap_free_t flist; /* list of freed blocks */ + xfs_fsblock_t fsbno; /* filesystem block for bno */ + xfs_inode_t *ip; /* pointer to incore inode */ + xfs_bmbt_irec_t map; /* block map output */ + int nmap; /* number of block maps */ + int resblks; /* space reservation */ + xfs_trans_t *tp; /* transaction pointer */ + + /* + * Allocate space to the file, as necessary. + */ + while (oblocks < nblocks) { + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC); + resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks); + cancelflags = 0; + /* + * Reserve space & log for one extent added to the file. + */ + if ((error = xfs_trans_reserve(tp, resblks, + XFS_GROWRTALLOC_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_DEFAULT_PERM_LOG_COUNT))) + goto error_exit; + cancelflags = XFS_TRANS_RELEASE_LOG_RES; + /* + * Lock the inode. + */ + if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL, &ip))) + goto error_exit; + XFS_BMAP_INIT(&flist, &firstblock); + /* + * Allocate blocks to the bitmap file. + */ + nmap = 1; + cancelflags |= XFS_TRANS_ABORT; + error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks, + XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock, + resblks, &map, &nmap, &flist); + if (!error && nmap < 1) + error = XFS_ERROR(ENOSPC); + if (error) + goto error_exit; + /* + * Free any blocks freed up in the transaction, then commit. + */ + error = xfs_bmap_finish(&tp, &flist, firstblock, &committed); + if (error) + goto error_exit; + xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + /* + * Now we need to clear the allocated blocks. + * Do this one block per transaction, to keep it simple. + */ + cancelflags = 0; + for (bno = map.br_startoff, fsbno = map.br_startblock; + bno < map.br_startoff + map.br_blockcount; + bno++, fsbno++) { + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO); + /* + * Reserve log for one block zeroing. + */ + if ((error = xfs_trans_reserve(tp, 0, + XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0))) + goto error_exit; + /* + * Lock the bitmap inode. + */ + if ((error = xfs_trans_iget(mp, tp, ino, XFS_ILOCK_EXCL, + &ip))) + goto error_exit; + /* + * Get a buffer for the block. + */ + d = XFS_FSB_TO_DADDR(mp, fsbno); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, + mp->m_bsize, 0); + if (bp == NULL) { + error = XFS_ERROR(EIO); + goto error_exit; + } + memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); + xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); + /* + * Commit the transaction. + */ + xfs_trans_commit(tp, 0, NULL); + } + /* + * Go on to the next extent, if any. + */ + oblocks = map.br_startoff + map.br_blockcount; + } + return 0; +error_exit: + xfs_trans_cancel(tp, cancelflags); + return error; +} + +/* + * Attempt to allocate an extent minlen<=len<=maxlen starting from + * bitmap block bbno. If we don't get maxlen then use prod to trim + * the length, if given. Returns error; returns starting block in *rtblock. + * The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_block( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_rtblock_t *nextp, /* out: next block to try */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + xfs_rtblock_t besti; /* best rtblock found so far */ + xfs_rtblock_t bestlen; /* best length found so far */ + xfs_rtblock_t end; /* last rtblock in chunk */ + int error; /* error value */ + xfs_rtblock_t i; /* current rtblock trying */ + xfs_rtblock_t next; /* next rtblock to try */ + int stat; /* status from internal calls */ + + /* + * Loop over all the extents starting in this bitmap block, + * looking for one that's long enough. + */ + for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0, + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; + i <= end; + i++) { + /* + * See if there's a free extent of maxlen starting at i. + * If it's not so then next will contain the first non-free. + */ + error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat); + if (error) { + return error; + } + if (stat) { + /* + * i for maxlen is all free, allocate and return that. + */ + error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp, + rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = i; + return 0; + } + /* + * In the case where we have a variable-sized allocation + * request, figure out how big this free piece is, + * and if it's big enough for the minimum, and the best + * so far, remember it. + */ + if (minlen < maxlen) { + xfs_rtblock_t thislen; /* this extent size */ + + thislen = next - i; + if (thislen >= minlen && thislen > bestlen) { + besti = i; + bestlen = thislen; + } + } + /* + * If not done yet, find the start of the next free space. + */ + if (next < end) { + error = xfs_rtfind_forw(mp, tp, next, end, &i); + if (error) { + return error; + } + } else + break; + } + /* + * Searched the whole thing & didn't find a maxlen free extent. + */ + if (minlen < maxlen && besti != -1) { + xfs_extlen_t p; /* amount to trim length by */ + + /* + * If size should be a multiple of prod, make that so. + */ + if (prod > 1 && (p = do_mod(bestlen, prod))) + bestlen -= p; + /* + * Allocate besti for bestlen & return that. + */ + error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb); + if (error) { + return error; + } + *len = bestlen; + *rtblock = besti; + return 0; + } + /* + * Allocation failed. Set *nextp to the next block to try. + */ + *nextp = next; + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, starting at block + * bno. If we don't get maxlen then use prod to trim the length, if given. + * Returns error; returns starting block in *rtblock. + * The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_exact( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + xfs_extlen_t i; /* extent length trimmed due to prod */ + int isfree; /* extent is free */ + xfs_rtblock_t next; /* next block to try (dummy) */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * Check if the range in question (for maxlen) is free. + */ + error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree); + if (error) { + return error; + } + if (isfree) { + /* + * If it is, allocate it and return success. + */ + error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = bno; + return 0; + } + /* + * If not, allocate what there is, if it's at least minlen. + */ + maxlen = next - bno; + if (maxlen < minlen) { + /* + * Failed, return failure status. + */ + *rtblock = NULLRTBLOCK; + return 0; + } + /* + * Trim off tail of extent, if prod is specified. + */ + if (prod > 1 && (i = maxlen % prod)) { + maxlen -= i; + if (maxlen < minlen) { + /* + * Now we can't do it, return failure status. + */ + *rtblock = NULLRTBLOCK; + return 0; + } + } + /* + * Allocate what we can and return it. + */ + error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); + if (error) { + return error; + } + *len = maxlen; + *rtblock = bno; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, starting as near + * to bno as possible. If we don't get maxlen then use prod to trim + * the length, if given. The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_near( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int any; /* any useful extents from summary */ + xfs_rtblock_t bbno; /* bitmap block number */ + int error; /* error value */ + int i; /* bitmap block offset (loop control) */ + int j; /* secondary loop control */ + int log2len; /* log2 of minlen */ + xfs_rtblock_t n; /* next block to try */ + xfs_rtblock_t r; /* result block */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * If the block number given is off the end, silently set it to + * the last block. + */ + if (bno >= mp->m_sb.sb_rextents) + bno = mp->m_sb.sb_rextents - 1; + /* + * Try the exact allocation first. + */ + error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len, + rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If the exact allocation worked, return that. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + bbno = XFS_BITTOBLOCK(mp, bno); + i = 0; + log2len = xfs_highbit32(minlen); + /* + * Loop over all bitmap blocks (bbno + i is current block). + */ + for (;;) { + /* + * Get summary information of extents of all useful levels + * starting in this bitmap block. + */ + error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1, + bbno + i, rbpp, rsb, &any); + if (error) { + return error; + } + /* + * If there are any useful extents starting here, try + * allocating one. + */ + if (any) { + /* + * On the positive side of the starting location. + */ + if (i >= 0) { + /* + * Try to allocate an extent starting in + * this block. + */ + error = xfs_rtallocate_extent_block(mp, tp, + bbno + i, minlen, maxlen, len, &n, rbpp, + rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return it. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + /* + * On the negative side of the starting location. + */ + else { /* i < 0 */ + /* + * Loop backwards through the bitmap blocks from + * the starting point-1 up to where we are now. + * There should be an extent which ends in this + * bitmap block and is long enough. + */ + for (j = -1; j > i; j--) { + /* + * Grab the summary information for + * this bitmap block. + */ + error = xfs_rtany_summary(mp, tp, + log2len, mp->m_rsumlevels - 1, + bbno + j, rbpp, rsb, &any); + if (error) { + return error; + } + /* + * If there's no extent given in the + * summary that means the extent we + * found must carry over from an + * earlier block. If there is an + * extent given, we've already tried + * that allocation, don't do it again. + */ + if (any) + continue; + error = xfs_rtallocate_extent_block(mp, + tp, bbno + j, minlen, maxlen, + len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it works, return the extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + /* + * There weren't intervening bitmap blocks + * with a long enough extent, or the + * allocation didn't work for some reason + * (i.e. it's a little * too short). + * Try to allocate from the summary block + * that we found. + */ + error = xfs_rtallocate_extent_block(mp, tp, + bbno + i, minlen, maxlen, len, &n, rbpp, + rsb, prod, &r); + if (error) { + return error; + } + /* + * If it works, return the extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + } + } + /* + * Loop control. If we were on the positive side, and there's + * still more blocks on the negative side, go there. + */ + if (i > 0 && (int)bbno - i >= 0) + i = -i; + /* + * If positive, and no more negative, but there are more + * positive, go there. + */ + else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1) + i++; + /* + * If negative or 0 (just started), and there are positive + * blocks to go, go there. The 0 case moves to block 1. + */ + else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1) + i = 1 - i; + /* + * If negative or 0 and there are more negative blocks, + * go there. + */ + else if (i <= 0 && (int)bbno + i > 0) + i--; + /* + * Must be done. Return failure. + */ + else + break; + } + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Allocate an extent of length minlen<=len<=maxlen, with no position + * specified. If we don't get maxlen then use prod to trim + * the length, if given. The lengths are all in rtextents. + */ +STATIC int /* error */ +xfs_rtallocate_extent_size( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + int i; /* bitmap block number */ + int l; /* level number (loop control) */ + xfs_rtblock_t n; /* next block to be tried */ + xfs_rtblock_t r; /* result block number */ + xfs_suminfo_t sum; /* summary information for extents */ + + ASSERT(minlen % prod == 0 && maxlen % prod == 0); + /* + * Loop over all the levels starting with maxlen. + * At each level, look at all the bitmap blocks, to see if there + * are extents starting there that are long enough (>= maxlen). + * Note, only on the initial level can the allocation fail if + * the summary says there's an extent. + */ + for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) { + /* + * Loop over all the bitmap blocks. + */ + for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + /* + * Get the summary for this level/block. + */ + error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, + &sum); + if (error) { + return error; + } + /* + * Nothing there, on to the next block. + */ + if (!sum) + continue; + /* + * Try allocating the extent. + */ + error = xfs_rtallocate_extent_block(mp, tp, i, maxlen, + maxlen, len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return that. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + /* + * If the "next block to try" returned from the + * allocator is beyond the next bitmap block, + * skip to that bitmap block. + */ + if (XFS_BITTOBLOCK(mp, n) > i + 1) + i = XFS_BITTOBLOCK(mp, n) - 1; + } + } + /* + * Didn't find any maxlen blocks. Try smaller ones, unless + * we're asking for a fixed size extent. + */ + if (minlen > --maxlen) { + *rtblock = NULLRTBLOCK; + return 0; + } + /* + * Loop over sizes, from maxlen down to minlen. + * This time, when we do the allocations, allow smaller ones + * to succeed. + */ + for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) { + /* + * Loop over all the bitmap blocks, try an allocation + * starting in that block. + */ + for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + /* + * Get the summary information for this level/block. + */ + error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, + &sum); + if (error) { + return error; + } + /* + * If nothing there, go on to next. + */ + if (!sum) + continue; + /* + * Try the allocation. Make sure the specified + * minlen/maxlen are in the possible range for + * this summary level. + */ + error = xfs_rtallocate_extent_block(mp, tp, i, + XFS_RTMAX(minlen, 1 << l), + XFS_RTMIN(maxlen, (1 << (l + 1)) - 1), + len, &n, rbpp, rsb, prod, &r); + if (error) { + return error; + } + /* + * If it worked, return that extent. + */ + if (r != NULLRTBLOCK) { + *rtblock = r; + return 0; + } + /* + * If the "next block to try" returned from the + * allocator is beyond the next bitmap block, + * skip to that bitmap block. + */ + if (XFS_BITTOBLOCK(mp, n) > i + 1) + i = XFS_BITTOBLOCK(mp, n) - 1; + } + } + /* + * Got nothing, return failure. + */ + *rtblock = NULLRTBLOCK; + return 0; +} + +/* + * Mark an extent specified by start and len allocated. + * Updates all the summary information as well as the bitmap. + */ +STATIC int /* error */ +xfs_rtallocate_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* start block to allocate */ + xfs_extlen_t len, /* length to allocate */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_rtblock_t end; /* end of the allocated extent */ + int error; /* error value */ + xfs_rtblock_t postblock; /* first block allocated > end */ + xfs_rtblock_t preblock; /* first block allocated < start */ + + end = start + len - 1; + /* + * Assume we're allocating out of the middle of a free extent. + * We need to find the beginning and end of the extent so we can + * properly update the summary. + */ + error = xfs_rtfind_back(mp, tp, start, 0, &preblock); + if (error) { + return error; + } + /* + * Find the next allocated block (end of free extent). + */ + error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, + &postblock); + if (error) { + return error; + } + /* + * Decrement the summary information corresponding to the entire + * (old) free extent. + */ + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock + 1 - preblock), + XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb); + if (error) { + return error; + } + /* + * If there are blocks not being allocated at the front of the + * old extent, add summary data for them to be free. + */ + if (preblock < start) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(start - preblock), + XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * If there are blocks not being allocated at the end of the + * old extent, add summary data for them to be free. + */ + if (postblock > end) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock - end), + XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * Modify the bitmap to mark this extent allocated. + */ + error = xfs_rtmodify_range(mp, tp, start, len, 0); + return error; +} + +/* + * Return whether there are any free extents in the size range given + * by low and high, for the bitmap block bbno. + */ +STATIC int /* error */ +xfs_rtany_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + int low, /* low log2 extent size */ + int high, /* high log2 extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + int *stat) /* out: any good extents here? */ +{ + int error; /* error value */ + int log; /* loop counter, log2 of ext. size */ + xfs_suminfo_t sum; /* summary data */ + + /* + * Loop over logs of extent sizes. Order is irrelevant. + */ + for (log = low; log <= high; log++) { + /* + * Get one summary datum. + */ + error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum); + if (error) { + return error; + } + /* + * If there are any, return success. + */ + if (sum) { + *stat = 1; + return 0; + } + } + /* + * Found nothing, return failure. + */ + *stat = 0; + return 0; +} + +/* + * Get a buffer for the bitmap or summary file block specified. + * The buffer is returned read and locked. + */ +STATIC int /* error */ +xfs_rtbuf_get( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t block, /* block number in bitmap or summary */ + int issum, /* is summary not bitmap */ + xfs_buf_t **bpp) /* output: buffer for the block */ +{ + xfs_buf_t *bp; /* block buffer, result */ + xfs_daddr_t d; /* disk addr of block */ + int error; /* error value */ + xfs_fsblock_t fsb; /* fs block number for block */ + xfs_inode_t *ip; /* bitmap or summary inode */ + + ip = issum ? mp->m_rsumip : mp->m_rbmip; + /* + * Map from the file offset (block) and inode number to the + * file system block. + */ + error = xfs_bmapi_single(tp, ip, XFS_DATA_FORK, &fsb, block); + if (error) { + return error; + } + ASSERT(fsb != NULLFSBLOCK); + /* + * Convert to disk address for buffer cache. + */ + d = XFS_FSB_TO_DADDR(mp, fsb); + /* + * Read the buffer. + */ + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, + mp->m_bsize, 0, &bp); + if (error) { + return error; + } + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + *bpp = bp; + return 0; +} + +#ifdef DEBUG +/* + * Check that the given extent (block range) is allocated already. + */ +STATIC int /* error */ +xfs_rtcheck_alloc_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* out: 1 for allocated, 0 for not */ +{ + xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */ + + return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat); +} +#endif + +#ifdef DEBUG +/* + * Check whether the given block in the bitmap has the given value. + */ +STATIC int /* 1 for matches, 0 for not */ +xfs_rtcheck_bit( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* bit (block) to check */ + int val) /* 1 for free, 0 for allocated */ +{ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* pointer into the buffer */ + /* REFERENCED */ + int error; /* error value */ + xfs_rtword_t wdiff; /* difference between bit & expected */ + int word; /* word number in the buffer */ + xfs_rtword_t wval; /* word value from buffer */ + + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BITTOWORD(mp, start); + bit = (int)(start & (XFS_NBWORD - 1)); + wval = bufp[word]; + xfs_trans_brelse(tp, bp); + wdiff = (wval ^ -val) & ((xfs_rtword_t)1 << bit); + return !wdiff; +} +#endif /* DEBUG */ + +#if 0 +/* + * Check that the given extent (block range) is free already. + */ +STATIC int /* error */ +xfs_rtcheck_free_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* out: 1 for free, 0 for not */ +{ + xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */ + + return xfs_rtcheck_range(mp, tp, bno, len, 1, &new, stat); +} +#endif + +/* + * Check that the given range is either all allocated (val = 0) or + * all free (val = 1). + */ +STATIC int /* error */ +xfs_rtcheck_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block number of extent */ + xfs_extlen_t len, /* length of extent */ + int val, /* 1 for free, 0 for allocated */ + xfs_rtblock_t *new, /* out: first block not matching */ + int *stat) /* out: 1 for matches, 0 for not */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t lastbit; /* last useful bit in word */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute starting bitmap block number + */ + block = XFS_BITTOBLOCK(mp, start); + /* + * Read the bitmap block. + */ + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Compute the starting word's address, and starting bit. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + /* + * 0 (allocated) => all zero's; 1 (free) => all one's. + */ + val = -val; + /* + * If not starting on a word boundary, deal with the first + * (partial) word. + */ + if (bit) { + /* + * Compute first bit not examined. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + /* + * Mask of relevant bits. + */ + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ val) & mask)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i = XFS_RTLOBIT(wdiff) - bit; + *new = start + i; + *stat = 0; + return 0; + } + i = lastbit - bit; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ val)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *new = start + i; + *stat = 0; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Mask of relevant bits. + */ + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ val) & mask)) { + /* + * Different, compute first wrong bit and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *new = start + i; + *stat = 0; + return 0; + } else + i = len; + } + /* + * Successful, return. + */ + xfs_trans_brelse(tp, bp); + *new = start + i; + *stat = 1; + return 0; +} + +/* + * Copy and transform the summary file, given the old and new + * parameters in the mount structures. + */ +STATIC int /* error */ +xfs_rtcopy_summary( + xfs_mount_t *omp, /* old file system mount point */ + xfs_mount_t *nmp, /* new file system mount point */ + xfs_trans_t *tp) /* transaction pointer */ +{ + xfs_rtblock_t bbno; /* bitmap block number */ + xfs_buf_t *bp; /* summary buffer */ + int error; /* error return value */ + int log; /* summary level number (log length) */ + xfs_suminfo_t sum; /* summary data */ + xfs_fsblock_t sumbno; /* summary block number */ + + bp = NULL; + for (log = omp->m_rsumlevels - 1; log >= 0; log--) { + for (bbno = omp->m_sb.sb_rbmblocks - 1; + (xfs_srtblock_t)bbno >= 0; + bbno--) { + error = xfs_rtget_summary(omp, tp, log, bbno, &bp, + &sumbno, &sum); + if (error) + return error; + if (sum == 0) + continue; + error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum, + &bp, &sumbno); + if (error) + return error; + error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum, + &bp, &sumbno); + if (error) + return error; + ASSERT(sum > 0); + } + } + return 0; +} + +/* + * Searching backward from start to limit, find the first block whose + * allocated/free state is different from start's. + */ +STATIC int /* error */ +xfs_rtfind_back( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to look at */ + xfs_rtblock_t limit, /* last block to look at */ + xfs_rtblock_t *rtblock) /* out: start block found */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t firstbit; /* first useful bit in the word */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t len; /* length of inspected area */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t want; /* mask for "good" values */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute and read in starting bitmap block for starting block. + */ + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Get the first word's index & point to it. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + len = start - limit + 1; + /* + * Compute match value, based on the bit at start: if 1 (free) + * then all-ones, else all-zeroes. + */ + want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0; + /* + * If the starting position is not word-aligned, deal with the + * partial word. + */ + if (bit < XFS_NBWORD - 1) { + /* + * Calculate first (leftmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0); + mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) << + firstbit; + /* + * Calculate the difference between the value there + * and what we're looking for. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different. Mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i = bit - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } + i = bit - firstbit + 1; + /* + * Go on to previous block if that's where the previous word is + * and we need the previous word. + */ + if (--word == -1 && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BLOCKWMASK(mp); + b = &bufp[word]; + } else { + /* + * Go on to the previous word in the buffer. + */ + b--; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the previous one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ want)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to previous block if that's where the previous word is + * and we need the previous word. + */ + if (--word == -1 && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = XFS_BLOCKWMASK(mp); + b = &bufp[word]; + } else { + /* + * Go on to the previous word in the buffer. + */ + b--; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if (len - i) { + /* + * Calculate first (leftmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + firstbit = XFS_NBWORD - (len - i); + mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff); + *rtblock = start - i + 1; + return 0; + } else + i = len; + } + /* + * No match, return that we scanned the whole area. + */ + xfs_trans_brelse(tp, bp); + *rtblock = start - i + 1; + return 0; +} + +/* + * Searching forward from start to limit, find the first block whose + * allocated/free state is different from start's. + */ +STATIC int /* error */ +xfs_rtfind_forw( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to look at */ + xfs_rtblock_t limit, /* last block to look at */ + xfs_rtblock_t *rtblock) /* out: start block found */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtblock_t i; /* current bit number rel. to start */ + xfs_rtblock_t lastbit; /* last useful bit in the word */ + xfs_rtblock_t len; /* length of inspected area */ + xfs_rtword_t mask; /* mask of relevant bits for value */ + xfs_rtword_t want; /* mask for "good" values */ + xfs_rtword_t wdiff; /* difference from wanted value */ + int word; /* word number in the buffer */ + + /* + * Compute and read in starting bitmap block for starting block. + */ + block = XFS_BITTOBLOCK(mp, start); + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Get the first word's index & point to it. + */ + word = XFS_BITTOWORD(mp, start); + b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + len = limit - start + 1; + /* + * Compute match value, based on the bit at start: if 1 (free) + * then all-ones, else all-zeroes. + */ + want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0; + /* + * If the starting position is not word-aligned, deal with the + * partial word. + */ + if (bit) { + /* + * Calculate last (rightmost) bit number to look at, + * and mask for all the relevant bits in this word. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Calculate the difference between the value there + * and what we're looking for. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different. Mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i = XFS_RTLOBIT(wdiff) - bit; + *rtblock = start + i - 1; + return 0; + } + i = lastbit - bit; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the previous one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the previous word in the buffer. + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = *b ^ want)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *rtblock = start + i - 1; + return 0; + } + i += XFS_NBWORD; + /* + * Go on to next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * If done with this block, get the next one. + */ + xfs_trans_brelse(tp, bp); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer. + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Calculate mask for all the relevant bits in this word. + */ + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Compute difference between actual and desired value. + */ + if ((wdiff = (*b ^ want) & mask)) { + /* + * Different, mark where we are and return. + */ + xfs_trans_brelse(tp, bp); + i += XFS_RTLOBIT(wdiff); + *rtblock = start + i - 1; + return 0; + } else + i = len; + } + /* + * No match, return that we scanned the whole area. + */ + xfs_trans_brelse(tp, bp); + *rtblock = start + i - 1; + return 0; +} + +/* + * Mark an extent specified by start and len freed. + * Updates all the summary information as well as the bitmap. + */ +STATIC int /* error */ +xfs_rtfree_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to free */ + xfs_extlen_t len, /* length to free */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_rtblock_t end; /* end of the freed extent */ + int error; /* error value */ + xfs_rtblock_t postblock; /* first block freed > end */ + xfs_rtblock_t preblock; /* first block freed < start */ + + end = start + len - 1; + /* + * Modify the bitmap to mark this extent freed. + */ + error = xfs_rtmodify_range(mp, tp, start, len, 1); + if (error) { + return error; + } + /* + * Assume we're freeing out of the middle of an allocated extent. + * We need to find the beginning and end of the extent so we can + * properly update the summary. + */ + error = xfs_rtfind_back(mp, tp, start, 0, &preblock); + if (error) { + return error; + } + /* + * Find the next allocated block (end of allocated extent). + */ + error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, + &postblock); + /* + * If there are blocks not being freed at the front of the + * old extent, add summary data for them to be allocated. + */ + if (preblock < start) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(start - preblock), + XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * If there are blocks not being freed at the end of the + * old extent, add summary data for them to be allocated. + */ + if (postblock > end) { + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock - end), + XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb); + if (error) { + return error; + } + } + /* + * Increment the summary information corresponding to the entire + * (new) free extent. + */ + error = xfs_rtmodify_summary(mp, tp, + XFS_RTBLOCKLOG(postblock + 1 - preblock), + XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb); + return error; +} + +/* + * Read and return the summary information for a given extent size, + * bitmap block combination. + * Keeps track of a current summary block, so we don't keep reading + * it from the buffer cache. + */ +STATIC int /* error */ +xfs_rtget_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + int log, /* log2 of extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb, /* in/out: summary block number */ + xfs_suminfo_t *sum) /* out: summary info for this block */ +{ + xfs_buf_t *bp; /* buffer for summary block */ + int error; /* error value */ + xfs_fsblock_t sb; /* summary fsblock */ + int so; /* index into the summary file */ + xfs_suminfo_t *sp; /* pointer to returned data */ + + /* + * Compute entry number in the summary file. + */ + so = XFS_SUMOFFS(mp, log, bbno); + /* + * Compute the block number in the summary file. + */ + sb = XFS_SUMOFFSTOBLOCK(mp, so); + /* + * If we have an old buffer, and the block number matches, use that. + */ + if (rbpp && *rbpp && *rsb == sb) + bp = *rbpp; + /* + * Otherwise we have to get the buffer. + */ + else { + /* + * If there was an old one, get rid of it first. + */ + if (rbpp && *rbpp) + xfs_trans_brelse(tp, *rbpp); + error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); + if (error) { + return error; + } + /* + * Remember this buffer and block for the next call. + */ + if (rbpp) { + *rbpp = bp; + *rsb = sb; + } + } + /* + * Point to the summary information & copy it out. + */ + sp = XFS_SUMPTR(mp, bp, so); + *sum = *sp; + /* + * Drop the buffer if we're not asked to remember it. + */ + if (!rbpp) + xfs_trans_brelse(tp, bp); + return 0; +} + +/* + * Set the given range of bitmap bits to the given value. + * Do whatever I/O and logging is required. + */ +STATIC int /* error */ +xfs_rtmodify_range( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to modify */ + xfs_extlen_t len, /* length of extent to modify */ + int val) /* 1 for free, 0 for allocated */ +{ + xfs_rtword_t *b; /* current word in buffer */ + int bit; /* bit number in the word */ + xfs_rtblock_t block; /* bitmap block number */ + xfs_buf_t *bp; /* buf for the block */ + xfs_rtword_t *bufp; /* starting word in buffer */ + int error; /* error value */ + xfs_rtword_t *first; /* first used word in the buffer */ + int i; /* current bit number rel. to start */ + int lastbit; /* last useful bit in word */ + xfs_rtword_t mask; /* mask o frelevant bits for value */ + int word; /* word number in the buffer */ + + /* + * Compute starting bitmap block number. + */ + block = XFS_BITTOBLOCK(mp, start); + /* + * Read the bitmap block, and point to its data. + */ + error = xfs_rtbuf_get(mp, tp, block, 0, &bp); + if (error) { + return error; + } + bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + /* + * Compute the starting word's address, and starting bit. + */ + word = XFS_BITTOWORD(mp, start); + first = b = &bufp[word]; + bit = (int)(start & (XFS_NBWORD - 1)); + /* + * 0 (allocated) => all zeroes; 1 (free) => all ones. + */ + val = -val; + /* + * If not starting on a word boundary, deal with the first + * (partial) word. + */ + if (bit) { + /* + * Compute first bit not changed and mask of relevant bits. + */ + lastbit = XFS_RTMIN(bit + len, XFS_NBWORD); + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + /* + * Set/clear the active bits. + */ + if (val) + *b |= mask; + else + *b &= ~mask; + i = lastbit - bit; + /* + * Go on to the next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * Log the changed part of this block. + * Get the next one. + */ + xfs_trans_log_buf(tp, bp, + (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp)); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer + */ + b++; + } + } else { + /* + * Starting on a word boundary, no partial word. + */ + i = 0; + } + /* + * Loop over whole words in buffers. When we use up one buffer + * we move on to the next one. + */ + while (len - i >= XFS_NBWORD) { + /* + * Set the word value correctly. + */ + *b = val; + i += XFS_NBWORD; + /* + * Go on to the next block if that's where the next word is + * and we need the next word. + */ + if (++word == XFS_BLOCKWSIZE(mp) && i < len) { + /* + * Log the changed part of this block. + * Get the next one. + */ + xfs_trans_log_buf(tp, bp, + (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp)); + error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); + if (error) { + return error; + } + first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + word = 0; + } else { + /* + * Go on to the next word in the buffer + */ + b++; + } + } + /* + * If not ending on a word boundary, deal with the last + * (partial) word. + */ + if ((lastbit = len - i)) { + /* + * Compute a mask of relevant bits. + */ + bit = 0; + mask = ((xfs_rtword_t)1 << lastbit) - 1; + /* + * Set/clear the active bits. + */ + if (val) + *b |= mask; + else + *b &= ~mask; + b++; + } + /* + * Log any remaining changed bytes. + */ + if (b > first) + xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp), + (uint)((char *)b - (char *)bufp - 1)); + return 0; +} + +/* + * Read and modify the summary information for a given extent size, + * bitmap block combination. + * Keeps track of a current summary block, so we don't keep reading + * it from the buffer cache. + */ +STATIC int /* error */ +xfs_rtmodify_summary( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + int log, /* log2 of extent size */ + xfs_rtblock_t bbno, /* bitmap block number */ + int delta, /* change to make to summary info */ + xfs_buf_t **rbpp, /* in/out: summary block buffer */ + xfs_fsblock_t *rsb) /* in/out: summary block number */ +{ + xfs_buf_t *bp; /* buffer for the summary block */ + int error; /* error value */ + xfs_fsblock_t sb; /* summary fsblock */ + int so; /* index into the summary file */ + xfs_suminfo_t *sp; /* pointer to returned data */ + + /* + * Compute entry number in the summary file. + */ + so = XFS_SUMOFFS(mp, log, bbno); + /* + * Compute the block number in the summary file. + */ + sb = XFS_SUMOFFSTOBLOCK(mp, so); + /* + * If we have an old buffer, and the block number matches, use that. + */ + if (rbpp && *rbpp && *rsb == sb) + bp = *rbpp; + /* + * Otherwise we have to get the buffer. + */ + else { + /* + * If there was an old one, get rid of it first. + */ + if (rbpp && *rbpp) + xfs_trans_brelse(tp, *rbpp); + error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); + if (error) { + return error; + } + /* + * Remember this buffer and block for the next call. + */ + if (rbpp) { + *rbpp = bp; + *rsb = sb; + } + } + /* + * Point to the summary information, modify and log it. + */ + sp = XFS_SUMPTR(mp, bp, so); + *sp += delta; + xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)), + (uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1)); + return 0; +} + +/* + * Visible (exported) functions. + */ + +/* + * Grow the realtime area of the filesystem. + */ +int +xfs_growfs_rt( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_growfs_rt_t *in) /* growfs rt input struct */ +{ + xfs_rtblock_t bmbno; /* bitmap block number */ + xfs_buf_t *bp; /* temporary buffer */ + int cancelflags; /* flags for xfs_trans_cancel */ + int error; /* error return value */ + xfs_inode_t *ip; /* bitmap inode, used as lock */ + xfs_mount_t *nmp; /* new (fake) mount structure */ + xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ + xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ + xfs_drtbno_t nrextents; /* new number of realtime extents */ + uint8_t nrextslog; /* new log2 of sb_rextents */ + xfs_extlen_t nrsumblocks; /* new number of summary blocks */ + uint nrsumlevels; /* new rt summary levels */ + uint nrsumsize; /* new size of rt summary, bytes */ + xfs_sb_t *nsbp; /* new superblock */ + xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */ + xfs_extlen_t rsumblocks; /* current number of rt summary blks */ + xfs_sb_t *sbp; /* old superblock */ + xfs_fsblock_t sumbno; /* summary block number */ + xfs_trans_t *tp; /* transaction pointer */ + + sbp = &mp->m_sb; + /* + * Initial error checking. + */ + if (mp->m_rtdev_targp || mp->m_rbmip == NULL || + (nrblocks = in->newblocks) <= sbp->sb_rblocks || + (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) + return XFS_ERROR(EINVAL); + /* + * Read in the last block of the device, make sure it exists. + */ + error = xfs_read_buf(mp, mp->m_rtdev_targp, + XFS_FSB_TO_BB(mp, in->newblocks - 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (error) + return error; + ASSERT(bp); + xfs_buf_relse(bp); + /* + * Calculate new parameters. These are the final values to be reached. + */ + nrextents = do_div(nrblocks, in->extsize); + nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize); + nrextslog = xfs_highbit32(nrextents); + nrsumlevels = nrextslog + 1; + nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks; + nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); + nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); + /* + * New summary size can't be more than half the size of + * the log. This prevents us from getting a log overflow, + * since we'll log basically the whole summary file at once. + */ + if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) + return XFS_ERROR(EINVAL); + /* + * Get the old block counts for bitmap and summary inodes. + * These can't change since other growfs callers are locked out. + */ + rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_d.di_size); + rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_d.di_size); + /* + * Allocate space to the bitmap and summary files, as necessary. + */ + if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, + mp->m_sb.sb_rbmino))) + return error; + if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, + mp->m_sb.sb_rsumino))) + return error; + nmp = NULL; + /* + * Loop over the bitmap blocks. + * We will do everything one bitmap block at a time. + * Skip the current block if it is exactly full. + * This also deals with the case where there were no rtextents before. + */ + for (bmbno = sbp->sb_rbmblocks - + ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); + bmbno < nrbmblocks; + bmbno++) { + /* + * Allocate a new (fake) mount/sb. + */ + nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); + *nmp = *mp; + nsbp = &nmp->m_sb; + /* + * Calculate new sb and mount fields for this round. + */ + nsbp->sb_rextsize = in->extsize; + nsbp->sb_rbmblocks = bmbno + 1; + nsbp->sb_rblocks = + XFS_RTMIN(nrblocks, + nsbp->sb_rbmblocks * NBBY * + nsbp->sb_blocksize * nsbp->sb_rextsize); + nsbp->sb_rextents = do_div(nsbp->sb_rblocks, nsbp->sb_rextsize); + nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); + nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; + nrsumsize = + (uint)sizeof(xfs_suminfo_t) * nrsumlevels * + nsbp->sb_rbmblocks; + nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); + nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); + /* + * Start a transaction, get the log reservation. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE); + cancelflags = 0; + if ((error = xfs_trans_reserve(tp, 0, + XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0))) + goto error_exit; + /* + * Lock out other callers by grabbing the bitmap inode lock. + */ + if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, + XFS_ILOCK_EXCL, &ip))) + goto error_exit; + ASSERT(ip == mp->m_rbmip); + /* + * Update the bitmap inode's size. + */ + mp->m_rbmip->i_d.di_size = + nsbp->sb_rbmblocks * nsbp->sb_blocksize; + xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); + cancelflags |= XFS_TRANS_ABORT; + /* + * Get the summary inode into the transaction. + */ + if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, + XFS_ILOCK_EXCL, &ip))) + goto error_exit; + ASSERT(ip == mp->m_rsumip); + /* + * Update the summary inode's size. + */ + mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; + xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); + /* + * Copy summary data from old to new sizes. + * Do this when the real size (not block-aligned) changes. + */ + if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks || + mp->m_rsumlevels != nmp->m_rsumlevels) { + error = xfs_rtcopy_summary(mp, nmp, tp); + if (error) + goto error_exit; + } + /* + * Update superblock fields. + */ + if (nsbp->sb_rextsize != sbp->sb_rextsize) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE, + nsbp->sb_rextsize - sbp->sb_rextsize); + if (nsbp->sb_rbmblocks != sbp->sb_rbmblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS, + nsbp->sb_rbmblocks - sbp->sb_rbmblocks); + if (nsbp->sb_rblocks != sbp->sb_rblocks) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS, + nsbp->sb_rblocks - sbp->sb_rblocks); + if (nsbp->sb_rextents != sbp->sb_rextents) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS, + nsbp->sb_rextents - sbp->sb_rextents); + if (nsbp->sb_rextslog != sbp->sb_rextslog) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG, + nsbp->sb_rextslog - sbp->sb_rextslog); + /* + * Free new extent. + */ + bp = NULL; + error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, + nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); + if (error) + goto error_exit; + /* + * Mark more blocks free in the superblock. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, + nsbp->sb_rextents - sbp->sb_rextents); + /* + * Free the fake mp structure. + */ + kmem_free(nmp, sizeof(*nmp)); + nmp = NULL; + /* + * Update mp values into the real mp structure. + */ + mp->m_rsumlevels = nrsumlevels; + mp->m_rsumsize = nrsumsize; + /* + * Commit the transaction. + */ + xfs_trans_commit(tp, 0, NULL); + } + return 0; + + /* + * Error paths come here. + */ +error_exit: + if (nmp) + kmem_free(nmp, sizeof(*nmp)); + xfs_trans_cancel(tp, cancelflags); + return error; +} + +/* + * Allocate an extent in the realtime subvolume, with the usual allocation + * parameters. The length units are all in realtime extents, as is the + * result block number. + */ +int /* error */ +xfs_rtallocate_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_alloctype_t type, /* allocation type XFS_ALLOCTYPE... */ + int wasdel, /* was a delayed allocation extent */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock) /* out: start block allocated */ +{ + int error; /* error value */ + xfs_inode_t *ip; /* inode for bitmap file */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_rtblock_t r; /* result allocated block */ + xfs_fsblock_t sb; /* summary file block number */ + xfs_buf_t *sumbp; /* summary file block buffer */ + + ASSERT(minlen > 0 && minlen <= maxlen); + mp = tp->t_mountp; + /* + * If prod is set then figure out what to do to minlen and maxlen. + */ + if (prod > 1) { + xfs_extlen_t i; + + if ((i = maxlen % prod)) + maxlen -= i; + if ((i = minlen % prod)) + minlen += prod - i; + if (maxlen < minlen) { + *rtblock = NULLRTBLOCK; + return 0; + } + } + /* + * Lock out other callers by grabbing the bitmap inode lock. + */ + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) { + return error; + } + sumbp = NULL; + /* + * Allocate by size, or near another block, or exactly at some block. + */ + switch (type) { + case XFS_ALLOCTYPE_ANY_AG: + error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len, + &sumbp, &sb, prod, &r); + break; + case XFS_ALLOCTYPE_NEAR_BNO: + error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen, + len, &sumbp, &sb, prod, &r); + break; + case XFS_ALLOCTYPE_THIS_BNO: + error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, + len, &sumbp, &sb, prod, &r); + break; + default: + ASSERT(0); + } + if (error) { + return error; + } + /* + * If it worked, update the superblock. + */ + if (r != NULLRTBLOCK) { + long slen = (long)*len; + + ASSERT(*len >= minlen && *len <= maxlen); + if (wasdel) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FREXTENTS, -slen); + else + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, -slen); + } + *rtblock = r; + return 0; +} + +/* + * Free an extent in the realtime subvolume. Length is expressed in + * realtime extents, as is the block number. + */ +int /* error */ +xfs_rtfree_extent( + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to free */ + xfs_extlen_t len) /* length of extent freed */ +{ + int error; /* error value */ + xfs_inode_t *ip; /* bitmap file inode */ + xfs_mount_t *mp; /* file system mount structure */ + xfs_fsblock_t sb; /* summary file block number */ + xfs_buf_t *sumbp; /* summary file block buffer */ + + mp = tp->t_mountp; + /* + * Synchronize by locking the bitmap inode. + */ + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) { + return error; + } +#if defined(__KERNEL__) && defined(DEBUG) + /* + * Check to see that this whole range is currently allocated. + */ + { + int stat; /* result from checking range */ + + error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat); + if (error) { + return error; + } + ASSERT(stat); + } +#endif + sumbp = NULL; + /* + * Free the range of realtime blocks. + */ + error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb); + if (error) { + return error; + } + /* + * Mark more blocks free in the superblock. + */ + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len); + /* + * If we've now freed all the blocks, reset the file sequence + * number to 0. + */ + if (tp->t_frextents_delta + mp->m_sb.sb_frextents == + mp->m_sb.sb_rextents) { + if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) + ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; + *(__uint64_t *)&ip->i_d.di_atime = 0; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + } + return 0; +} + +/* + * Initialize realtime fields in the mount structure. + */ +int /* error */ +xfs_rtmount_init( + xfs_mount_t *mp) /* file system mount structure */ +{ + xfs_buf_t *bp; /* buffer for last block of subvolume */ + xfs_daddr_t d; /* address of last block of subvolume */ + int error; /* error return value */ + xfs_sb_t *sbp; /* filesystem superblock copy in mount */ + + sbp = &mp->m_sb; + if (sbp->sb_rblocks == 0) + return 0; + if (mp->m_rtdev_targp == NULL) { + cmn_err(CE_WARN, + "XFS: This filesystem has a realtime volume, use rtdev=device option"); + return XFS_ERROR(ENODEV); + } + mp->m_rsumlevels = sbp->sb_rextslog + 1; + mp->m_rsumsize = + (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels * + sbp->sb_rbmblocks; + mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize); + mp->m_rbmip = mp->m_rsumip = NULL; + /* + * Check that the realtime section is an ok size. + */ + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); + if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { + cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu", + (unsigned long long) XFS_BB_TO_FSB(mp, d), + (unsigned long long) mp->m_sb.sb_rblocks); + return XFS_ERROR(E2BIG); + } + error = xfs_read_buf(mp, mp->m_rtdev_targp, + d - XFS_FSB_TO_BB(mp, 1), + XFS_FSB_TO_BB(mp, 1), 0, &bp); + if (error) { + cmn_err(CE_WARN, + "XFS: realtime mount -- xfs_read_buf failed, returned %d", error); + if (error == ENOSPC) + return XFS_ERROR(E2BIG); + return error; + } + xfs_buf_relse(bp); + return 0; +} + +/* + * Get the bitmap and summary inodes into the mount structure + * at mount time. + */ +int /* error */ +xfs_rtmount_inodes( + xfs_mount_t *mp) /* file system mount structure */ +{ + int error; /* error return value */ + xfs_sb_t *sbp; + + sbp = &mp->m_sb; + if (sbp->sb_rbmino == NULLFSINO) + return 0; + error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, &mp->m_rbmip, 0); + if (error) + return error; + ASSERT(mp->m_rbmip != NULL); + ASSERT(sbp->sb_rsumino != NULLFSINO); + error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, &mp->m_rsumip, 0); + if (error) { + vnode_t *rbmvp; /* vnode for bitmap file */ + vmap_t vmap; /* vmap to delete vnode */ + + rbmvp = XFS_ITOV(mp->m_rbmip); + VMAP(rbmvp, vmap); + VN_RELE(rbmvp); + vn_purge(rbmvp, &vmap); + return error; + } + ASSERT(mp->m_rsumip != NULL); + return 0; +} + +/* + * Pick an extent for allocation at the start of a new realtime file. + * Use the sequence number stored in the atime field of the bitmap inode. + * Translate this to a fraction of the rtextents, and return the product + * of rtextents and the fraction. + * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ... + */ +int /* error */ +xfs_rtpick_extent( + xfs_mount_t *mp, /* file system mount point */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_extlen_t len, /* allocation length (rtextents) */ + xfs_rtblock_t *pick) /* result rt extent */ +{ + xfs_rtblock_t b; /* result block */ + int error; /* error return value */ + xfs_inode_t *ip; /* bitmap incore inode */ + int log2; /* log of sequence number */ + __uint64_t resid; /* residual after log removed */ + __uint64_t seq; /* sequence number of file creation */ + __uint64_t *seqp; /* pointer to seqno in inode */ + + error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, XFS_ILOCK_EXCL, &ip); + if (error) + return error; + ASSERT(ip == mp->m_rbmip); + seqp = (__uint64_t *)&ip->i_d.di_atime; + if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) { + ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; + *seqp = 0; + } + seq = *seqp; + if ((log2 = xfs_highbit64(seq)) == -1) + b = 0; + else { + resid = seq - (1ULL << log2); + b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >> + (log2 + 1); + if (b >= mp->m_sb.sb_rextents) + b = do_mod(b, mp->m_sb.sb_rextents); + if (b + len > mp->m_sb.sb_rextents) + b = mp->m_sb.sb_rextents - len; + } + *seqp = seq + 1; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + *pick = b; + return 0; +} + +#ifdef DEBUG +/* + * Debug code: print out the value of a range in the bitmap. + */ +void +xfs_rtprint_range( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to print */ + xfs_extlen_t len) /* length to print */ +{ + xfs_extlen_t i; /* block number in the extent */ + + printk("%Ld: ", (long long)start); + for (i = 0; i < len; i++) + printk("%d", xfs_rtcheck_bit(mp, tp, start + i, 1)); + printk("\n"); +} + +/* + * Debug code: print the summary file. + */ +void +xfs_rtprint_summary( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp) /* transaction pointer */ +{ + xfs_suminfo_t c; /* summary data */ + xfs_rtblock_t i; /* bitmap block number */ + int l; /* summary information level */ + int p; /* flag for printed anything */ + xfs_fsblock_t sb; /* summary block number */ + xfs_buf_t *sumbp; /* summary block buffer */ + + sumbp = NULL; + for (l = 0; l < mp->m_rsumlevels; l++) { + for (p = 0, i = 0; i < mp->m_sb.sb_rbmblocks; i++) { + (void)xfs_rtget_summary(mp, tp, l, i, &sumbp, &sb, &c); + if (c) { + if (!p) { + printk("%Ld-%Ld:", 1LL << l, + XFS_RTMIN((1LL << l) + + ((1LL << l) - 1LL), + mp->m_sb.sb_rextents)); + p = 1; + } + printk(" %Ld:%d", (long long)i, c); + } + } + if (p) + printk("\n"); + } + if (sumbp) + xfs_trans_brelse(tp, sumbp); +} +#endif /* DEBUG */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_rtalloc.h linux.22-ac2/fs/xfs/xfs_rtalloc.h --- linux.vanilla/fs/xfs/xfs_rtalloc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_rtalloc.h 2003-09-01 13:54:21.000000000 +0100 @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_RTALLOC_H__ +#define __XFS_RTALLOC_H__ + +struct xfs_mount; +struct xfs_trans; + +#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) + +/* Min and max rt extent sizes, specified in bytes */ +#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ +#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */ +#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4KB */ + +/* + * Constants for bit manipulations. + */ +#define XFS_NBBYLOG 3 /* log2(NBBY) */ +#define XFS_WORDLOG 2 /* log2(sizeof(xfs_rtword_t)) */ +#define XFS_NBWORDLOG (XFS_NBBYLOG + XFS_WORDLOG) +#define XFS_NBWORD (1 << XFS_NBWORDLOG) +#define XFS_WORDMASK ((1 << XFS_WORDLOG) - 1) + +#define XFS_BLOCKSIZE(mp) ((mp)->m_sb.sb_blocksize) +#define XFS_BLOCKMASK(mp) ((mp)->m_blockmask) +#define XFS_BLOCKWSIZE(mp) ((mp)->m_blockwsize) +#define XFS_BLOCKWMASK(mp) ((mp)->m_blockwmask) + +/* + * Summary and bit manipulation macros. + */ +#define XFS_SUMOFFS(mp,ls,bb) ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb))) +#define XFS_SUMOFFSTOBLOCK(mp,s) \ + (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) +#define XFS_SUMPTR(mp,bp,so) \ + ((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \ + (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) + +#define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) +#define XFS_BLOCKTOBIT(mp,bb) ((bb) << (mp)->m_blkbit_log) +#define XFS_BITTOWORD(mp,bi) \ + ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp))) + +#define XFS_RTMIN(a,b) ((a) < (b) ? (a) : (b)) +#define XFS_RTMAX(a,b) ((a) > (b) ? (a) : (b)) + +#define XFS_RTLOBIT(w) xfs_lowbit32(w) +#define XFS_RTHIBIT(w) xfs_highbit32(w) + +#if XFS_BIG_FILESYSTEMS +#define XFS_RTBLOCKLOG(b) xfs_highbit64(b) +#else +#define XFS_RTBLOCKLOG(b) xfs_highbit32(b) +#endif + + +#ifdef __KERNEL__ + +#ifdef CONFIG_XFS_RT +/* + * Function prototypes for exported functions. + */ + +/* + * Allocate an extent in the realtime subvolume, with the usual allocation + * parameters. The length units are all in realtime extents, as is the + * result block number. + */ +int /* error */ +xfs_rtallocate_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to allocate */ + xfs_extlen_t minlen, /* minimum length to allocate */ + xfs_extlen_t maxlen, /* maximum length to allocate */ + xfs_extlen_t *len, /* out: actual length allocated */ + xfs_alloctype_t type, /* allocation type XFS_ALLOCTYPE... */ + int wasdel, /* was a delayed allocation extent */ + xfs_extlen_t prod, /* extent product factor */ + xfs_rtblock_t *rtblock); /* out: start block allocated */ + +/* + * Free an extent in the realtime subvolume. Length is expressed in + * realtime extents, as is the block number. + */ +int /* error */ +xfs_rtfree_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to free */ + xfs_extlen_t len); /* length of extent freed */ + +/* + * Initialize realtime fields in the mount structure. + */ +int /* error */ +xfs_rtmount_init( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Get the bitmap and summary inodes into the mount structure + * at mount time. + */ +int /* error */ +xfs_rtmount_inodes( + struct xfs_mount *mp); /* file system mount structure */ + +/* + * Pick an extent for allocation at the start of a new realtime file. + * Use the sequence number stored in the atime field of the bitmap inode. + * Translate this to a fraction of the rtextents, and return the product + * of rtextents and the fraction. + * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ... + */ +int /* error */ +xfs_rtpick_extent( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_extlen_t len, /* allocation length (rtextents) */ + xfs_rtblock_t *pick); /* result rt extent */ + +/* + * Debug code: print out the value of a range in the bitmap. + */ +void +xfs_rtprint_range( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t start, /* starting block to print */ + xfs_extlen_t len); /* length to print */ + +/* + * Debug code: print the summary file. + */ +void +xfs_rtprint_summary( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp); /* transaction pointer */ + +/* + * Grow the realtime area of the filesystem. + */ +int +xfs_growfs_rt( + struct xfs_mount *mp, /* file system mount structure */ + xfs_growfs_rt_t *in); /* user supplied growfs struct */ + +#else +# define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb) (ENOSYS) +# define xfs_rtfree_extent(t,b,l) (ENOSYS) +# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) +# define xfs_growfs_rt(mp,in) (ENOSYS) +# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) +# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) +#endif /* CONFIG_XFS_RT */ + +#endif /* __KERNEL__ */ + +#endif /* __XFS_RTALLOC_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_rw.c linux.22-ac2/fs/xfs/xfs_rw.c --- linux.vanilla/fs/xfs/xfs_rw.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_rw.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,761 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_attr.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_acl.h" +#include "xfs_mac.h" +#include "xfs_attr.h" +#include "xfs_error.h" +#include "xfs_buf_item.h" +#include "xfs_rw.h" + +/* + * This is a subroutine for xfs_write() and other writers (xfs_ioctl) + * which clears the setuid and setgid bits when a file is written. + */ +int +xfs_write_clear_setuid( + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + xfs_trans_t *tp; + int error; + + mp = ip->i_mount; + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); + if ((error = xfs_trans_reserve(tp, 0, + XFS_WRITEID_LOG_RES(mp), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + ip->i_d.di_mode &= ~ISUID; + + /* + * Note that we don't have to worry about mandatory + * file locking being disabled here because we only + * clear the ISGID bit if the Group execute bit is + * on, but if it was on then mandatory locking wouldn't + * have been enabled. + */ + if (ip->i_d.di_mode & (IEXEC >> 3)) { + ip->i_d.di_mode &= ~ISGID; + } + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; +} + +/* + * Force a shutdown of the filesystem instantly while keeping + * the filesystem consistent. We don't do an unmount here; just shutdown + * the shop, make sure that absolutely nothing persistent happens to + * this filesystem after this point. + */ + +void +xfs_do_force_shutdown( + bhv_desc_t *bdp, + int flags, + char *fname, + int lnnum) +{ + int logerror; + xfs_mount_t *mp; + + mp = XFS_BHVTOM(bdp); + logerror = flags & XFS_LOG_IO_ERROR; + + if (!(flags & XFS_FORCE_UMOUNT)) { + cmn_err(CE_NOTE, + "xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%p", + mp->m_fsname,flags,lnnum,fname,__return_address); + } + /* + * No need to duplicate efforts. + */ + if (XFS_FORCED_SHUTDOWN(mp) && !logerror) + return; + + /* + * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't + * queue up anybody new on the log reservations, and wakes up + * everybody who's sleeping on log reservations and tells + * them the bad news. + */ + if (xfs_log_force_umount(mp, logerror)) + return; + + if (flags & XFS_CORRUPT_INCORE) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp, + "Corruption of in-memory data detected. Shutting down filesystem: %s", + mp->m_fsname); + } else if (!(flags & XFS_FORCE_UMOUNT)) { + if (logerror) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp, + "Log I/O Error Detected. Shutting down filesystem: %s", + mp->m_fsname); + } else if (!(flags & XFS_SHUTDOWN_REMOTE_REQ)) { + xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp, + "I/O Error Detected. Shutting down filesystem: %s", + mp->m_fsname); + } + } + if (!(flags & XFS_FORCE_UMOUNT)) { + cmn_err(CE_ALERT, + "Please umount the filesystem, and rectify the problem(s)"); + } +} + + +/* + * Called when we want to stop a buffer from getting written or read. + * We attach the EIO error, muck with its flags, and call biodone + * so that the proper iodone callbacks get called. + */ +int +xfs_bioerror( + xfs_buf_t *bp) +{ + +#ifdef XFSERRORDEBUG + ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); +#endif + + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + */ + xfs_buftrace("XFS IOERROR", bp); + XFS_BUF_ERROR(bp, EIO); + /* + * We're calling biodone, so delete B_DONE flag. Either way + * we have to call the iodone callback, and calling biodone + * probably is the best way since it takes care of + * GRIO as well. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + xfs_biodone(bp); + + return (EIO); +} + +/* + * Same as xfs_bioerror, except that we are releasing the buffer + * here ourselves, and avoiding the biodone call. + * This is meant for userdata errors; metadata bufs come with + * iodone functions attached, so that we can track down errors. + */ +int +xfs_bioerror_relse( + xfs_buf_t *bp) +{ + int64_t fl; + + ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); + ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); + + xfs_buftrace("XFS IOERRELSE", bp); + fl = XFS_BUF_BFLAGS(bp); + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + * + * chunkhold expects B_DONE to be set, whether + * we actually finish the I/O or not. We don't want to + * change that interface. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_STALE(bp); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + if (!(fl & XFS_B_ASYNC)) { + /* + * Mark b_error and B_ERROR _both_. + * Lot's of chunkcache code assumes that. + * There's no reason to mark error for + * ASYNC buffers. + */ + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_V_IODONESEMA(bp); + } else { + xfs_buf_relse(bp); + } + return (EIO); +} +/* + * Prints out an ALERT message about I/O error. + */ +void +xfs_ioerror_alert( + char *func, + struct xfs_mount *mp, + xfs_buf_t *bp, + xfs_daddr_t blkno) +{ + cmn_err(CE_ALERT, + "I/O error in filesystem (\"%s\") meta-data dev 0x%x block 0x%llx" + " (\"%s\") error %d buf count %u", + (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, + XFS_BUF_TARGET_DEV(bp), + (__uint64_t)blkno, + func, + XFS_BUF_GETERROR(bp), + XFS_BUF_COUNT(bp)); +} + +/* + * This isn't an absolute requirement, but it is + * just a good idea to call xfs_read_buf instead of + * directly doing a read_buf call. For one, we shouldn't + * be doing this disk read if we are in SHUTDOWN state anyway, + * so this stops that from happening. Secondly, this does all + * the error checking stuff and the brelse if appropriate for + * the caller, so the code can be a little leaner. + */ + +int +xfs_read_buf( + struct xfs_mount *mp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + xfs_buf_t **bpp) +{ + xfs_buf_t *bp; + int error; + + if (flags) + bp = xfs_buf_read_flags(target, blkno, len, flags); + else + bp = xfs_buf_read(target, blkno, len, flags); + if (!bp) + return XFS_ERROR(EIO); + error = XFS_BUF_GETERROR(bp); + if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) { + *bpp = bp; + } else { + *bpp = NULL; + if (error) { + xfs_ioerror_alert("xfs_read_buf", mp, bp, XFS_BUF_ADDR(bp)); + } else { + error = XFS_ERROR(EIO); + } + if (bp) { + XFS_BUF_UNDONE(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_STALE(bp); + /* + * brelse clears B_ERROR and b_error + */ + xfs_buf_relse(bp); + } + } + return (error); +} + +/* + * Wrapper around bwrite() so that we can trap + * write errors, and act accordingly. + */ +int +xfs_bwrite( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + int error; + + /* + * XXXsup how does this work for quotas. + */ + XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); + XFS_BUF_SET_FSPRIVATE3(bp, mp); + XFS_BUF_WRITE(bp); + + if ((error = XFS_bwrite(bp))) { + ASSERT(mp); + /* + * Cannot put a buftrace here since if the buffer is not + * B_HOLD then we will brelse() the buffer before returning + * from bwrite and we could be tracing a buffer that has + * been reused. + */ + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + return (error); +} + +/* + * xfs_inval_cached_pages() + * This routine is responsible for keeping direct I/O and buffered I/O + * somewhat coherent. From here we make sure that we're at least + * temporarily holding the inode I/O lock exclusively and then call + * the page cache to flush and invalidate any cached pages. If there + * are no cached pages this routine will be very quick. + */ +void +xfs_inval_cached_pages( + vnode_t *vp, + xfs_iocore_t *io, + xfs_off_t offset, + int write, + int relock) +{ + xfs_mount_t *mp; + + if (!VN_CACHED(vp)) { + return; + } + + mp = io->io_mount; + + /* + * We need to get the I/O lock exclusively in order + * to safely invalidate pages and mappings. + */ + if (relock) { + XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED); + XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL); + } + + /* Writing beyond EOF creates a hole that must be zeroed */ + if (write && (offset > XFS_SIZE(mp, io))) { + xfs_fsize_t isize; + + XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + isize = XFS_SIZE(mp, io); + if (offset > isize) { + xfs_zero_eof(vp, io, offset, isize, offset); + } + XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + } + + VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED); + if (relock) { + XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); + } +} + + + +spinlock_t xfs_refcache_lock = SPIN_LOCK_UNLOCKED; +xfs_inode_t **xfs_refcache; +int xfs_refcache_size; +int xfs_refcache_index; +int xfs_refcache_busy; +int xfs_refcache_count; + +/* + * Insert the given inode into the reference cache. + */ +void +xfs_refcache_insert( + xfs_inode_t *ip) +{ + vnode_t *vp; + xfs_inode_t *release_ip; + xfs_inode_t **refcache; + + ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE)); + + /* + * If an unmount is busy blowing entries out of the cache, + * then don't bother. + */ + if (xfs_refcache_busy) { + return; + } + + /* + * If we tuned the refcache down to zero, don't do anything. + */ + if (!xfs_refcache_size) { + return; + } + + /* + * The inode is already in the refcache, so don't bother + * with it. + */ + if (ip->i_refcache != NULL) { + return; + } + + vp = XFS_ITOV(ip); + /* ASSERT(vp->v_count > 0); */ + VN_HOLD(vp); + + /* + * We allocate the reference cache on use so that we don't + * waste the memory on systems not being used as NFS servers. + */ + if (xfs_refcache == NULL) { + refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *), + KM_SLEEP); + } else { + refcache = NULL; + } + + spin_lock(&xfs_refcache_lock); + + /* + * If we allocated memory for the refcache above and it still + * needs it, then use the memory we allocated. Otherwise we'll + * free the memory below. + */ + if (refcache != NULL) { + if (xfs_refcache == NULL) { + xfs_refcache = refcache; + refcache = NULL; + } + } + + /* + * If an unmount is busy clearing out the cache, don't add new + * entries to it. + */ + if (xfs_refcache_busy) { + spin_unlock(&xfs_refcache_lock); + VN_RELE(vp); + /* + * If we allocated memory for the refcache above but someone + * else beat us to using it, then free the memory now. + */ + if (refcache != NULL) { + kmem_free(refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + return; + } + release_ip = xfs_refcache[xfs_refcache_index]; + if (release_ip != NULL) { + release_ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + } + xfs_refcache[xfs_refcache_index] = ip; + ASSERT(ip->i_refcache == NULL); + ip->i_refcache = &(xfs_refcache[xfs_refcache_index]); + xfs_refcache_count++; + ASSERT(xfs_refcache_count <= xfs_refcache_size); + xfs_refcache_index++; + if (xfs_refcache_index == xfs_refcache_size) { + xfs_refcache_index = 0; + } + spin_unlock(&xfs_refcache_lock); + + /* + * Save the pointer to the inode to be released so that we can + * VN_RELE it once we've dropped our inode locks in xfs_rwunlock(). + * The pointer may be NULL, but that's OK. + */ + ip->i_release = release_ip; + + /* + * If we allocated memory for the refcache above but someone + * else beat us to using it, then free the memory now. + */ + if (refcache != NULL) { + kmem_free(refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + return; +} + + +/* + * If the given inode is in the reference cache, purge its entry and + * release the reference on the vnode. + */ +void +xfs_refcache_purge_ip( + xfs_inode_t *ip) +{ + vnode_t *vp; + int error; + + /* + * If we're not pointing to our entry in the cache, then + * we must not be in the cache. + */ + if (ip->i_refcache == NULL) { + return; + } + + spin_lock(&xfs_refcache_lock); + if (ip->i_refcache == NULL) { + spin_unlock(&xfs_refcache_lock); + return; + } + + /* + * Clear both our pointer to the cache entry and its pointer + * back to us. + */ + ASSERT(*(ip->i_refcache) == ip); + *(ip->i_refcache) = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + spin_unlock(&xfs_refcache_lock); + + vp = XFS_ITOV(ip); + /* ASSERT(vp->v_count > 1); */ + VOP_RELEASE(vp, error); + VN_RELE(vp); + + return; +} + + +/* + * This is called from the XFS unmount code to purge all entries for the + * given mount from the cache. It uses the refcache busy counter to + * make sure that new entries are not added to the cache as we purge them. + */ +void +xfs_refcache_purge_mp( + xfs_mount_t *mp) +{ + vnode_t *vp; + int error, i; + xfs_inode_t *ip; + + if (xfs_refcache == NULL) { + return; + } + + spin_lock(&xfs_refcache_lock); + /* + * Bumping the busy counter keeps new entries from being added + * to the cache. We use a counter since multiple unmounts could + * be in here simultaneously. + */ + xfs_refcache_busy++; + + for (i = 0; i < xfs_refcache_size; i++) { + ip = xfs_refcache[i]; + if ((ip != NULL) && (ip->i_mount == mp)) { + xfs_refcache[i] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + spin_unlock(&xfs_refcache_lock); + vp = XFS_ITOV(ip); + VOP_RELEASE(vp, error); + VN_RELE(vp); + spin_lock(&xfs_refcache_lock); + } + } + + xfs_refcache_busy--; + ASSERT(xfs_refcache_busy >= 0); + spin_unlock(&xfs_refcache_lock); +} + + +/* + * This is called from the XFS sync code to ensure that the refcache + * is emptied out over time. We purge a small number of entries with + * each call. + */ +void +xfs_refcache_purge_some(xfs_mount_t *mp) +{ + int error, i; + xfs_inode_t *ip; + int iplist_index; + xfs_inode_t **iplist; + int purge_count; + + if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) { + return; + } + + iplist_index = 0; + purge_count = xfs_params.refcache_purge; + iplist = (xfs_inode_t **)kmem_zalloc(purge_count * + sizeof(xfs_inode_t *), KM_SLEEP); + + spin_lock(&xfs_refcache_lock); + + /* + * Store any inodes we find in the next several entries + * into the iplist array to be released after dropping + * the spinlock. We always start looking from the currently + * oldest place in the cache. We move the refcache index + * forward as we go so that we are sure to eventually clear + * out the entire cache when the system goes idle. + */ + for (i = 0; i < purge_count; i++) { + ip = xfs_refcache[xfs_refcache_index]; + if (ip != NULL) { + xfs_refcache[xfs_refcache_index] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + iplist[iplist_index] = ip; + iplist_index++; + } + xfs_refcache_index++; + if (xfs_refcache_index == xfs_refcache_size) { + xfs_refcache_index = 0; + } + } + + spin_unlock(&xfs_refcache_lock); + + /* + * Now drop the inodes we collected. + */ + for (i = 0; i < iplist_index; i++) { + VOP_RELEASE(XFS_ITOV(iplist[i]), error); + VN_RELE(XFS_ITOV(iplist[i])); + } + + kmem_free(iplist, purge_count * + sizeof(xfs_inode_t *)); +} + +/* + * This is called when the refcache is dynamically resized + * via a sysctl. + * + * If the new size is smaller than the old size, purge all + * entries in slots greater than the new size, and move + * the index if necessary. + * + * If the refcache hasn't even been allocated yet, or the + * new size is larger than the old size, just set the value + * of xfs_refcache_size. + */ + +void +xfs_refcache_resize(int xfs_refcache_new_size) +{ + int i; + xfs_inode_t *ip; + int iplist_index = 0; + xfs_inode_t **iplist; + int error; + + /* + * If the new size is smaller than the current size, + * purge entries to create smaller cache, and + * reposition index if necessary. + * Don't bother if no refcache yet. + */ + if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) { + + iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *), KM_SLEEP); + + spin_lock(&xfs_refcache_lock); + + for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) { + ip = xfs_refcache[i]; + if (ip != NULL) { + xfs_refcache[i] = NULL; + ip->i_refcache = NULL; + xfs_refcache_count--; + ASSERT(xfs_refcache_count >= 0); + iplist[iplist_index] = ip; + iplist_index++; + } + } + + xfs_refcache_size = xfs_refcache_new_size; + + /* + * Move index to beginning of cache if it's now past the end + */ + if (xfs_refcache_index >= xfs_refcache_new_size) + xfs_refcache_index = 0; + + spin_unlock(&xfs_refcache_lock); + + /* + * Now drop the inodes we collected. + */ + for (i = 0; i < iplist_index; i++) { + VOP_RELEASE(XFS_ITOV(iplist[i]), error); + VN_RELE(XFS_ITOV(iplist[i])); + } + + kmem_free(iplist, XFS_REFCACHE_SIZE_MAX * + sizeof(xfs_inode_t *)); + } else { + spin_lock(&xfs_refcache_lock); + xfs_refcache_size = xfs_refcache_new_size; + spin_unlock(&xfs_refcache_lock); + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_rw.h linux.22-ac2/fs/xfs/xfs_rw.h --- linux.vanilla/fs/xfs/xfs_rw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_rw.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_RW_H__ +#define __XFS_RW_H__ + +struct bhv_desc; +struct bmapval; +struct xfs_buf; +struct cred; +struct uio; +struct vnode; +struct xfs_inode; +struct xfs_iocore; +struct xfs_mount; +struct xfs_trans; + +/* + * Maximum count of bmaps used by read and write paths. + */ +#define XFS_MAX_RW_NBMAPS 4 + +/* + * Counts of readahead buffers to use based on physical memory size. + * None of these should be more than XFS_MAX_RW_NBMAPS. + */ +#define XFS_RW_NREADAHEAD_16MB 2 +#define XFS_RW_NREADAHEAD_32MB 3 +#define XFS_RW_NREADAHEAD_K32 4 +#define XFS_RW_NREADAHEAD_K64 4 + +/* + * Maximum size of a buffer that we\'ll map. Making this + * too big will degrade performance due to the number of + * pages which need to be gathered. Making it too small + * will prevent us from doing large I/O\'s to hardware that + * needs it. + * + * This is currently set to 512 KB. + */ +#define XFS_MAX_BMAP_LEN_BB 1024 +#define XFS_MAX_BMAP_LEN_BYTES 524288 + +/* + * Maximum size (in inodes) for the nfs refcache + */ +#define XFS_REFCACHE_SIZE_MAX 512 + + +/* + * Convert the given file system block to a disk block. + * We have to treat it differently based on whether the + * file is a real time file or not, because the bmap code + * does. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DB) +xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb); +#define XFS_FSB_TO_DB(ip,fsb) xfs_fsb_to_db(ip,fsb) +#else +#define XFS_FSB_TO_DB(ip,fsb) \ + (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) ? \ + (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))) +#endif + +#define XFS_FSB_TO_DB_IO(io,fsb) \ + (((io)->io_flags & XFS_IOCORE_RT) ? \ + XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ + XFS_FSB_TO_DADDR((io)->io_mount, (fsb))) + +/* + * Defines for the trace mechanisms in xfs_rw.c. + */ +#define XFS_RW_KTRACE_SIZE 64 +#define XFS_STRAT_KTRACE_SIZE 64 +#define XFS_STRAT_GTRACE_SIZE 512 + +#define XFS_READ_ENTER 1 +#define XFS_WRITE_ENTER 2 +#define XFS_IOMAP_READ_ENTER 3 +#define XFS_IOMAP_WRITE_ENTER 4 +#define XFS_IOMAP_READ_MAP 5 +#define XFS_IOMAP_WRITE_MAP 6 +#define XFS_IOMAP_WRITE_NOSPACE 7 +#define XFS_ITRUNC_START 8 +#define XFS_ITRUNC_FINISH1 9 +#define XFS_ITRUNC_FINISH2 10 +#define XFS_CTRUNC1 11 +#define XFS_CTRUNC2 12 +#define XFS_CTRUNC3 13 +#define XFS_CTRUNC4 14 +#define XFS_CTRUNC5 15 +#define XFS_CTRUNC6 16 +#define XFS_BUNMAPI 17 +#define XFS_INVAL_CACHED 18 +#define XFS_DIORD_ENTER 19 +#define XFS_DIOWR_ENTER 20 + +#if defined(XFS_ALL_TRACE) +#define XFS_RW_TRACE +#define XFS_STRAT_TRACE +#endif + +#if !defined(DEBUG) +#undef XFS_RW_TRACE +#undef XFS_STRAT_TRACE +#endif + +/* + * Prototypes for functions in xfs_rw.c. + */ + +int +xfs_write_clear_setuid( + struct xfs_inode *ip); + +int +xfs_bwrite( + struct xfs_mount *mp, + struct xfs_buf *bp); + +void +xfs_inval_cached_pages( + struct vnode *vp, + struct xfs_iocore *io, + xfs_off_t offset, + int write, + int relock); + +void +xfs_refcache_insert( + struct xfs_inode *ip); + +void +xfs_refcache_purge_ip( + struct xfs_inode *ip); + +void +xfs_refcache_purge_mp( + struct xfs_mount *mp); + +void +xfs_refcache_purge_some( + struct xfs_mount *mp); + +void +xfs_refcache_resize( + int xfs_refcache_new_size); + +int +xfs_bioerror( + struct xfs_buf *b); + +int +xfs_bioerror_relse( + struct xfs_buf *b); + +int +xfs_read_buf( + struct xfs_mount *mp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + struct xfs_buf **bpp); + +void +xfs_ioerror_alert( + char *func, + struct xfs_mount *mp, + xfs_buf_t *bp, + xfs_daddr_t blkno); + + +/* + * Prototypes for functions in xfs_vnodeops.c. + */ + +int +xfs_rwlock( + bhv_desc_t *bdp, + vrwlock_t write_lock); + +void +xfs_rwunlock( + bhv_desc_t *bdp, + vrwlock_t write_lock); + +int +xfs_change_file_space( + bhv_desc_t *bdp, + int cmd, + xfs_flock64_t *bf, + xfs_off_t offset, + cred_t *credp, + int flags); + +int +xfs_set_dmattrs( + bhv_desc_t *bdp, + u_int evmask, + u_int16_t state, + cred_t *credp); + +#endif /* __XFS_RW_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_sb.h linux.22-ac2/fs/xfs/xfs_sb.h --- linux.vanilla/fs/xfs/xfs_sb.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_sb.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_SB_H__ +#define __XFS_SB_H__ + +/* + * Super block + * Fits into a sector-sized buffer at address 0 of each allocation group. + * Only the first of these is ever updated except during growfs. + */ + +struct xfs_buf; +struct xfs_mount; + +#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */ +#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */ +#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */ +#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */ +#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */ +#define XFS_SB_VERSION_NUMBITS 0x000f +#define XFS_SB_VERSION_ALLFBITS 0xfff0 +#define XFS_SB_VERSION_SASHFBITS 0xf000 +#define XFS_SB_VERSION_REALFBITS 0x0ff0 +#define XFS_SB_VERSION_ATTRBIT 0x0010 +#define XFS_SB_VERSION_NLINKBIT 0x0020 +#define XFS_SB_VERSION_QUOTABIT 0x0040 +#define XFS_SB_VERSION_ALIGNBIT 0x0080 +#define XFS_SB_VERSION_DALIGNBIT 0x0100 +#define XFS_SB_VERSION_SHAREDBIT 0x0200 +#define XFS_SB_VERSION_LOGV2BIT 0x0400 +#define XFS_SB_VERSION_SECTORBIT 0x0800 +#define XFS_SB_VERSION_EXTFLGBIT 0x1000 +#define XFS_SB_VERSION_DIRV2BIT 0x2000 +#define XFS_SB_VERSION_OKSASHFBITS \ + (XFS_SB_VERSION_EXTFLGBIT | \ + XFS_SB_VERSION_DIRV2BIT) +#define XFS_SB_VERSION_OKREALFBITS \ + (XFS_SB_VERSION_ATTRBIT | \ + XFS_SB_VERSION_NLINKBIT | \ + XFS_SB_VERSION_QUOTABIT | \ + XFS_SB_VERSION_ALIGNBIT | \ + XFS_SB_VERSION_DALIGNBIT | \ + XFS_SB_VERSION_SHAREDBIT | \ + XFS_SB_VERSION_LOGV2BIT | \ + XFS_SB_VERSION_SECTORBIT) +#define XFS_SB_VERSION_OKSASHBITS \ + (XFS_SB_VERSION_NUMBITS | \ + XFS_SB_VERSION_REALFBITS | \ + XFS_SB_VERSION_OKSASHFBITS) +#define XFS_SB_VERSION_OKREALBITS \ + (XFS_SB_VERSION_NUMBITS | \ + XFS_SB_VERSION_OKREALFBITS | \ + XFS_SB_VERSION_OKSASHFBITS) +#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag) \ + (((ia) || (dia) || (extflag) || (dirv2) || (na)) ? \ + (XFS_SB_VERSION_4 | \ + ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \ + ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \ + ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \ + ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \ + ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \ + ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0)) : \ + XFS_SB_VERSION_1) + +typedef struct xfs_sb +{ + __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ + __uint32_t sb_blocksize; /* logical block size, bytes */ + xfs_drfsbno_t sb_dblocks; /* number of data blocks */ + xfs_drfsbno_t sb_rblocks; /* number of realtime blocks */ + xfs_drtbno_t sb_rextents; /* number of realtime extents */ + uuid_t sb_uuid; /* file system unique id */ + xfs_dfsbno_t sb_logstart; /* starting block of log if internal */ + xfs_ino_t sb_rootino; /* root inode number */ + xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ + xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */ + xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */ + xfs_agblock_t sb_agblocks; /* size of an allocation group */ + xfs_agnumber_t sb_agcount; /* number of allocation groups */ + xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */ + xfs_extlen_t sb_logblocks; /* number of log blocks */ + __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */ + __uint16_t sb_sectsize; /* volume sector size, bytes */ + __uint16_t sb_inodesize; /* inode size, bytes */ + __uint16_t sb_inopblock; /* inodes per block */ + char sb_fname[12]; /* file system name */ + __uint8_t sb_blocklog; /* log2 of sb_blocksize */ + __uint8_t sb_sectlog; /* log2 of sb_sectsize */ + __uint8_t sb_inodelog; /* log2 of sb_inodesize */ + __uint8_t sb_inopblog; /* log2 of sb_inopblock */ + __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */ + __uint8_t sb_rextslog; /* log2 of sb_rextents */ + __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */ + __uint8_t sb_imax_pct; /* max % of fs for inode space */ + /* statistics */ + /* + * These fields must remain contiguous. If you really + * want to change their layout, make sure you fix the + * code in xfs_trans_apply_sb_deltas(). + */ + __uint64_t sb_icount; /* allocated inodes */ + __uint64_t sb_ifree; /* free inodes */ + __uint64_t sb_fdblocks; /* free data blocks */ + __uint64_t sb_frextents; /* free realtime extents */ + /* + * End contiguous fields. + */ + xfs_ino_t sb_uquotino; /* user quota inode */ + xfs_ino_t sb_gquotino; /* group quota inode */ + __uint16_t sb_qflags; /* quota flags */ + __uint8_t sb_flags; /* misc. flags */ + __uint8_t sb_shared_vn; /* shared version number */ + xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */ + __uint32_t sb_unit; /* stripe or raid unit */ + __uint32_t sb_width; /* stripe or raid width */ + __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */ + __uint8_t sb_logsectlog; /* log2 of the log sector size */ + __uint16_t sb_logsectsize; /* sector size for the log, bytes */ + __uint32_t sb_logsunit; /* stripe unit size for the log */ +} xfs_sb_t; + +/* + * Sequence number values for the fields. + */ +typedef enum { + XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS, + XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO, + XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS, + XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS, + XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE, + XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG, + XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG, + XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT, + XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO, + XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN, + XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG, + XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT, + XFS_SBS_FIELDCOUNT +} xfs_sb_field_t; + +/* + * Mask values, defined based on the xfs_sb_field_t values. + * Only define the ones we're using. + */ +#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x) +#define XFS_SB_UUID XFS_SB_MVAL(UUID) +#define XFS_SB_FNAME XFS_SB_MVAL(FNAME) +#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO) +#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO) +#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO) +#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM) +#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO) +#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO) +#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS) +#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN) +#define XFS_SB_UNIT XFS_SB_MVAL(UNIT) +#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH) +#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT) +#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1) +#define XFS_SB_MOD_BITS \ + (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \ + XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ + XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH) + +/* + * Misc. Flags - warning - these will be cleared by xfs_repair unless + * a feature bit is set when the flag is used. + */ +#define XFS_SBF_NOFLAGS 0x00 /* no flags set */ +#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */ + +/* + * define max. shared version we can interoperate with + */ +#define XFS_SB_MAX_SHARED_VN 0 + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_NUM) +int xfs_sb_version_num(xfs_sb_t *sbp); +#define XFS_SB_VERSION_NUM(sbp) xfs_sb_version_num(sbp) +#else +#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_GOOD_VERSION) +int xfs_sb_good_version(xfs_sb_t *sbp); +#define XFS_SB_GOOD_VERSION(sbp) xfs_sb_good_version(sbp) +#else +#define XFS_SB_GOOD_VERSION_INT(sbp) \ + ((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) +#ifdef __KERNEL__ +#define XFS_SB_GOOD_VERSION(sbp) \ + (XFS_SB_GOOD_VERSION_INT(sbp) && \ + (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN) )) +#else +/* + * extra 2 paren's here (( to unconfuse paren-matching editors + * like vi because XFS_SB_GOOD_VERSION_INT is a partial expression + * and the two XFS_SB_GOOD_VERSION's each 2 more close paren's to + * complete the expression. + */ +#define XFS_SB_GOOD_VERSION(sbp) \ + (XFS_SB_GOOD_VERSION_INT(sbp) && \ + (!((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) || \ + (sbp)->sb_shared_vn <= XFS_SB_MAX_SHARED_VN)) )) +#endif /* __KERNEL__ */ +#endif + +#define XFS_SB_GOOD_SASH_VERSION(sbp) \ + ((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKSASHBITS))) + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TONEW) +unsigned xfs_sb_version_tonew(unsigned v); +#define XFS_SB_VERSION_TONEW(v) xfs_sb_version_tonew(v) +#else +#define XFS_SB_VERSION_TONEW(v) \ + ((((v) == XFS_SB_VERSION_1) ? \ + 0 : \ + (((v) == XFS_SB_VERSION_2) ? \ + XFS_SB_VERSION_ATTRBIT : \ + (XFS_SB_VERSION_ATTRBIT | XFS_SB_VERSION_NLINKBIT))) | \ + XFS_SB_VERSION_4) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_TOOLD) +unsigned xfs_sb_version_toold(unsigned v); +#define XFS_SB_VERSION_TOOLD(v) xfs_sb_version_toold(v) +#else +#define XFS_SB_VERSION_TOOLD(v) \ + (((v) & (XFS_SB_VERSION_QUOTABIT | XFS_SB_VERSION_ALIGNBIT)) ? \ + 0 : \ + (((v) & XFS_SB_VERSION_NLINKBIT) ? \ + XFS_SB_VERSION_3 : \ + (((v) & XFS_SB_VERSION_ATTRBIT) ? \ + XFS_SB_VERSION_2 : \ + XFS_SB_VERSION_1))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASATTR) +int xfs_sb_version_hasattr(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASATTR(sbp) xfs_sb_version_hasattr(sbp) +#else +#define XFS_SB_VERSION_HASATTR(sbp) \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_2) || \ + ((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_ATTRBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDATTR) +void xfs_sb_version_addattr(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDATTR(sbp) xfs_sb_version_addattr(sbp) +#else +#define XFS_SB_VERSION_ADDATTR(sbp) \ + ((sbp)->sb_versionnum = \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_1) ? \ + XFS_SB_VERSION_2 : \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) ? \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_ATTRBIT) : \ + (XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT)))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASNLINK) +int xfs_sb_version_hasnlink(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASNLINK(sbp) xfs_sb_version_hasnlink(sbp) +#else +#define XFS_SB_VERSION_HASNLINK(sbp) \ + (((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_NLINKBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDNLINK) +void xfs_sb_version_addnlink(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDNLINK(sbp) xfs_sb_version_addnlink(sbp) +#else +#define XFS_SB_VERSION_ADDNLINK(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum <= XFS_SB_VERSION_2 ? \ + XFS_SB_VERSION_3 : \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_NLINKBIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASQUOTA) +int xfs_sb_version_hasquota(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASQUOTA(sbp) xfs_sb_version_hasquota(sbp) +#else +#define XFS_SB_VERSION_HASQUOTA(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_QUOTABIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDQUOTA) +void xfs_sb_version_addquota(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDQUOTA(sbp) xfs_sb_version_addquota(sbp) +#else +#define XFS_SB_VERSION_ADDQUOTA(sbp) \ + ((sbp)->sb_versionnum = \ + (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 ? \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_QUOTABIT) : \ + (XFS_SB_VERSION_TONEW((sbp)->sb_versionnum) | \ + XFS_SB_VERSION_QUOTABIT))) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASALIGN) +int xfs_sb_version_hasalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASALIGN(sbp) xfs_sb_version_hasalign(sbp) +#else +#define XFS_SB_VERSION_HASALIGN(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_ALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBALIGN) +void xfs_sb_version_subalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBALIGN(sbp) xfs_sb_version_subalign(sbp) +#else +#define XFS_SB_VERSION_SUBALIGN(sbp) \ + ((sbp)->sb_versionnum = \ + XFS_SB_VERSION_TOOLD((sbp)->sb_versionnum & ~XFS_SB_VERSION_ALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDALIGN) +int xfs_sb_version_hasdalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASDALIGN(sbp) xfs_sb_version_hasdalign(sbp) +#else +#define XFS_SB_VERSION_HASDALIGN(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_DALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDDALIGN) +int xfs_sb_version_adddalign(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDDALIGN(sbp) xfs_sb_version_adddalign(sbp) +#else +#define XFS_SB_VERSION_ADDDALIGN(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_DALIGNBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSHARED) +int xfs_sb_version_hasshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASSHARED(sbp) xfs_sb_version_hasshared(sbp) +#else +#define XFS_SB_VERSION_HASSHARED(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDSHARED) +int xfs_sb_version_addshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDSHARED(sbp) xfs_sb_version_addshared(sbp) +#else +#define XFS_SB_VERSION_ADDSHARED(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBSHARED) +int xfs_sb_version_subshared(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBSHARED(sbp) xfs_sb_version_subshared(sbp) +#else +#define XFS_SB_VERSION_SUBSHARED(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum & ~XFS_SB_VERSION_SHAREDBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASDIRV2) +int xfs_sb_version_hasdirv2(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASDIRV2(sbp) xfs_sb_version_hasdirv2(sbp) +#else +#define XFS_SB_VERSION_HASDIRV2(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_DIRV2BIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASLOGV2) +int xfs_sb_version_haslogv2(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASLOGV2(sbp) xfs_sb_version_haslogv2(sbp) +#else +#define XFS_SB_VERSION_HASLOGV2(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_LOGV2BIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASEXTFLGBIT) +int xfs_sb_version_hasextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASEXTFLGBIT(sbp) xfs_sb_version_hasextflgbit(sbp) +#else +#define XFS_SB_VERSION_HASEXTFLGBIT(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_ADDEXTFLGBIT) +int xfs_sb_version_addextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp) xfs_sb_version_addextflgbit(sbp) +#else +#define XFS_SB_VERSION_ADDEXTFLGBIT(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum | XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_SUBEXTFLGBIT) +int xfs_sb_version_subextflgbit(xfs_sb_t *sbp); +#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp) xfs_sb_version_subextflgbit(sbp) +#else +#define XFS_SB_VERSION_SUBEXTFLGBIT(sbp) \ + ((sbp)->sb_versionnum = \ + ((sbp)->sb_versionnum & ~XFS_SB_VERSION_EXTFLGBIT)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_VERSION_HASSECTOR) +int xfs_sb_version_hassector(xfs_sb_t *sbp); +#define XFS_SB_VERSION_HASSECTOR(sbp) xfs_sb_version_hassector(sbp) +#else +#define XFS_SB_VERSION_HASSECTOR(sbp) \ + ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + ((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT)) +#endif + +/* + * end of superblock version macros + */ + +#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_BLOCK) +xfs_agblock_t xfs_sb_block(struct xfs_mount *mp); +#define XFS_SB_BLOCK(mp) xfs_sb_block(mp) +#else +#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_HDR_BLOCK) +xfs_agblock_t xfs_hdr_block(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_HDR_BLOCK(mp,d) xfs_hdr_block(mp,d) +#else +#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp,d))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_DADDR_TO_FSB) +xfs_fsblock_t xfs_daddr_to_fsb(struct xfs_mount *mp, xfs_daddr_t d); +#define XFS_DADDR_TO_FSB(mp,d) xfs_daddr_to_fsb(mp,d) +#else +#define XFS_DADDR_TO_FSB(mp,d) \ + XFS_AGB_TO_FSB(mp, XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_FSB_TO_DADDR) +xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); +#define XFS_FSB_TO_DADDR(mp,fsbno) xfs_fsb_to_daddr(mp,fsbno) +#else +#define XFS_FSB_TO_DADDR(mp,fsbno) \ + XFS_AGB_TO_DADDR(mp, XFS_FSB_TO_AGNO(mp,fsbno), \ + XFS_FSB_TO_AGBNO(mp,fsbno)) +#endif + +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP) +xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp); +#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp) +#else +#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp)) +#endif + +/* + * File system sector to basic block conversions. + */ +#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log) +#define XFS_BB_TO_FSS(mp,bb) \ + (((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log) +#define XFS_BB_TO_FSST(mp,bb) ((bb) >> (mp)->m_sectbb_log) + +/* + * File system sector to byte conversions. + */ +#define XFS_FSS_TO_B(mp,sectno) ((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog) +#define XFS_B_TO_FSST(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog) + +/* + * File system block to basic block conversions. + */ +#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log) +#define XFS_BB_TO_FSB(mp,bb) \ + (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log) +#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log) +#define XFS_BB_FSB_OFFSET(mp,bb) ((bb) & ((mp)->m_bsize - 1)) + +/* + * File system block to byte conversions. + */ +#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog) +#define XFS_B_TO_FSB(mp,b) \ + ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog) +#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog) +#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask) + +#endif /* __XFS_SB_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_ail.c linux.22-ac2/fs/xfs/xfs_trans_ail.c --- linux.vanilla/fs/xfs/xfs_trans_ail.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_ail.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,597 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_log.h" +#include "xfs_trans_priv.h" +#include "xfs_error.h" + +STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *); +STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); + +#ifdef XFSDEBUG +STATIC void xfs_ail_check(xfs_ail_entry_t *); +#else +#define xfs_ail_check(a) +#endif /* XFSDEBUG */ + + +/* + * This is called by the log manager code to determine the LSN + * of the tail of the log. This is exactly the LSN of the first + * item in the AIL. If the AIL is empty, then this function + * returns 0. + * + * We need the AIL lock in order to get a coherent read of the + * lsn of the last item in the AIL. + */ +xfs_lsn_t +xfs_trans_tail_ail( + xfs_mount_t *mp) +{ + xfs_lsn_t lsn; + xfs_log_item_t *lip; + SPLDECL(s); + + AIL_LOCK(mp,s); + lip = xfs_ail_min(&(mp->m_ail)); + if (lip == NULL) { + lsn = (xfs_lsn_t)0; + } else { + lsn = lip->li_lsn; + } + AIL_UNLOCK(mp, s); + + return lsn; +} + +/* + * xfs_trans_push_ail + * + * This routine is called to move the tail of the AIL + * forward. It does this by trying to flush items in the AIL + * whose lsns are below the given threshold_lsn. + * + * The routine returns the lsn of the tail of the log. + */ +xfs_lsn_t +xfs_trans_push_ail( + xfs_mount_t *mp, + xfs_lsn_t threshold_lsn) +{ + xfs_lsn_t lsn; + xfs_log_item_t *lip; + int gen; + int restarts; + int lock_result; + int flush_log; + SPLDECL(s); + +#define XFS_TRANS_PUSH_AIL_RESTARTS 10 + + AIL_LOCK(mp,s); + lip = xfs_trans_first_ail(mp, &gen); + if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { + /* + * Just return if the AIL is empty. + */ + AIL_UNLOCK(mp, s); + return (xfs_lsn_t)0; + } + + XFS_STATS_INC(xfsstats.xs_push_ail); + + /* + * While the item we are looking at is below the given threshold + * try to flush it out. Make sure to limit the number of times + * we allow xfs_trans_next_ail() to restart scanning from the + * beginning of the list. We'd like not to stop until we've at least + * tried to push on everything in the AIL with an LSN less than + * the given threshold. However, we may give up before that if + * we realize that we've been holding the AIL_LOCK for 'too long', + * blocking interrupts. Currently, too long is < 500us roughly. + */ + flush_log = 0; + restarts = 0; + while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && + (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { + /* + * If we can lock the item without sleeping, unlock + * the AIL lock and flush the item. Then re-grab the + * AIL lock so we can look for the next item on the + * AIL. Since we unlock the AIL while we flush the + * item, the next routine may start over again at the + * the beginning of the list if anything has changed. + * That is what the generation count is for. + * + * If we can't lock the item, either its holder will flush + * it or it is already being flushed or it is being relogged. + * In any of these case it is being taken care of and we + * can just skip to the next item in the list. + */ + lock_result = IOP_TRYLOCK(lip); + switch (lock_result) { + case XFS_ITEM_SUCCESS: + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_success); + IOP_PUSH(lip); + AIL_LOCK(mp,s); + break; + + case XFS_ITEM_PUSHBUF: + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_pushbuf); +#ifdef XFSRACEDEBUG + delay_for_intr(); + delay(300); +#endif + ASSERT(lip->li_ops->iop_pushbuf); + ASSERT(lip); + IOP_PUSHBUF(lip); + AIL_LOCK(mp,s); + break; + + case XFS_ITEM_PINNED: + XFS_STATS_INC(xfsstats.xs_push_ail_pinned); + flush_log = 1; + break; + + case XFS_ITEM_LOCKED: + XFS_STATS_INC(xfsstats.xs_push_ail_locked); + break; + + case XFS_ITEM_FLUSHING: + XFS_STATS_INC(xfsstats.xs_push_ail_flushing); + break; + + default: + ASSERT(0); + break; + } + + lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); + if (lip == NULL) { + break; + } + if (XFS_FORCED_SHUTDOWN(mp)) { + /* + * Just return if we shut down during the last try. + */ + AIL_UNLOCK(mp, s); + return (xfs_lsn_t)0; + } + + } + + if (flush_log) { + /* + * If something we need to push out was pinned, then + * push out the log so it will become unpinned and + * move forward in the AIL. + */ + AIL_UNLOCK(mp, s); + XFS_STATS_INC(xfsstats.xs_push_ail_flush); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + AIL_LOCK(mp, s); + } + + lip = xfs_ail_min(&(mp->m_ail)); + if (lip == NULL) { + lsn = (xfs_lsn_t)0; + } else { + lsn = lip->li_lsn; + } + + AIL_UNLOCK(mp, s); + return lsn; +} /* xfs_trans_push_ail */ + + +/* + * This is to be called when an item is unlocked that may have + * been in the AIL. It will wake up the first member of the AIL + * wait list if this item's unlocking might allow it to progress. + * If the item is in the AIL, then we need to get the AIL lock + * while doing our checking so we don't race with someone going + * to sleep waiting for this event in xfs_trans_push_ail(). + */ +void +xfs_trans_unlocked_item( + xfs_mount_t *mp, + xfs_log_item_t *lip) +{ + xfs_log_item_t *min_lip; + + /* + * If we're forcibly shutting down, we may have + * unlocked log items arbitrarily. The last thing + * we want to do is to move the tail of the log + * over some potentially valid data. + */ + if (!(lip->li_flags & XFS_LI_IN_AIL) || + XFS_FORCED_SHUTDOWN(mp)) { + return; + } + + /* + * This is the one case where we can call into xfs_ail_min() + * without holding the AIL lock because we only care about the + * case where we are at the tail of the AIL. If the object isn't + * at the tail, it doesn't matter what result we get back. This + * is slightly racy because since we were just unlocked, we could + * go to sleep between the call to xfs_ail_min and the call to + * xfs_log_move_tail, have someone else lock us, commit to us disk, + * move us out of the tail of the AIL, and then we wake up. However, + * the call to xfs_log_move_tail() doesn't do anything if there's + * not enough free space to wake people up so we're safe calling it. + */ + min_lip = xfs_ail_min(&mp->m_ail); + + if (min_lip == lip) + xfs_log_move_tail(mp, 1); +} /* xfs_trans_unlocked_item */ + + +/* + * Update the position of the item in the AIL with the new + * lsn. If it is not yet in the AIL, add it. Otherwise, move + * it to its new position by removing it and re-adding it. + * + * Wakeup anyone with an lsn less than the item's lsn. If the item + * we move in the AIL is the minimum one, update the tail lsn in the + * log manager. + * + * Increment the AIL's generation count to indicate that the tree + * has changed. + * + * This function must be called with the AIL lock held. The lock + * is dropped before returning, so the caller must pass in the + * cookie returned by AIL_LOCK. + */ +void +xfs_trans_update_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + xfs_lsn_t lsn, + unsigned long s) +{ + xfs_ail_entry_t *ailp; + xfs_log_item_t *dlip=NULL; + xfs_log_item_t *mlip; /* ptr to minimum lip */ + + ailp = &(mp->m_ail); + mlip = xfs_ail_min(ailp); + + if (lip->li_flags & XFS_LI_IN_AIL) { + dlip = xfs_ail_delete(ailp, lip); + ASSERT(dlip == lip); + } else { + lip->li_flags |= XFS_LI_IN_AIL; + } + + lip->li_lsn = lsn; + + xfs_ail_insert(ailp, lip); + mp->m_ail_gen++; + + if (mlip == dlip) { + mlip = xfs_ail_min(&(mp->m_ail)); + AIL_UNLOCK(mp, s); + xfs_log_move_tail(mp, mlip->li_lsn); + } else { + AIL_UNLOCK(mp, s); + } + + +} /* xfs_trans_update_ail */ + +/* + * Delete the given item from the AIL. It must already be in + * the AIL. + * + * Wakeup anyone with an lsn less than item's lsn. If the item + * we delete in the AIL is the minimum one, update the tail lsn in the + * log manager. + * + * Clear the IN_AIL flag from the item, reset its lsn to 0, and + * bump the AIL's generation count to indicate that the tree + * has changed. + * + * This function must be called with the AIL lock held. The lock + * is dropped before returning, so the caller must pass in the + * cookie returned by AIL_LOCK. + */ +void +xfs_trans_delete_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + unsigned long s) +{ + xfs_ail_entry_t *ailp; + xfs_log_item_t *dlip; + xfs_log_item_t *mlip; + + if (lip->li_flags & XFS_LI_IN_AIL) { + ailp = &(mp->m_ail); + mlip = xfs_ail_min(ailp); + dlip = xfs_ail_delete(ailp, lip); + ASSERT(dlip == lip); + + + lip->li_flags &= ~XFS_LI_IN_AIL; + lip->li_lsn = 0; + mp->m_ail_gen++; + + if (mlip == dlip) { + mlip = xfs_ail_min(&(mp->m_ail)); + AIL_UNLOCK(mp, s); + xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); + } else { + AIL_UNLOCK(mp, s); + } + } + else { + /* + * If the file system is not being shutdown, we are in + * serious trouble if we get to this stage. + */ + if (XFS_FORCED_SHUTDOWN(mp)) + AIL_UNLOCK(mp, s); + else { + xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, + "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL"); + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); + AIL_UNLOCK(mp, s); + } + } +} + + + +/* + * Return the item in the AIL with the smallest lsn. + * Return the current tree generation number for use + * in calls to xfs_trans_next_ail(). + */ +xfs_log_item_t * +xfs_trans_first_ail( + xfs_mount_t *mp, + int *gen) +{ + xfs_log_item_t *lip; + + lip = xfs_ail_min(&(mp->m_ail)); + *gen = (int)mp->m_ail_gen; + + return (lip); +} + +/* + * If the generation count of the tree has not changed since the + * caller last took something from the AIL, then return the elmt + * in the tree which follows the one given. If the count has changed, + * then return the minimum elmt of the AIL and bump the restarts counter + * if one is given. + */ +xfs_log_item_t * +xfs_trans_next_ail( + xfs_mount_t *mp, + xfs_log_item_t *lip, + int *gen, + int *restarts) +{ + xfs_log_item_t *nlip; + + ASSERT(mp && lip && gen); + if (mp->m_ail_gen == *gen) { + nlip = xfs_ail_next(&(mp->m_ail), lip); + } else { + nlip = xfs_ail_min(&(mp->m_ail)); + *gen = (int)mp->m_ail_gen; + if (restarts != NULL) { + XFS_STATS_INC(xfsstats.xs_push_ail_restarts); + (*restarts)++; + } + } + + return (nlip); +} + + +/* + * The active item list (AIL) is a doubly linked list of log + * items sorted by ascending lsn. The base of the list is + * a forw/back pointer pair embedded in the xfs mount structure. + * The base is initialized with both pointers pointing to the + * base. This case always needs to be distinguished, because + * the base has no lsn to look at. We almost always insert + * at the end of the list, so on inserts we search from the + * end of the list to find where the new item belongs. + */ + +/* + * Initialize the doubly linked list to point only to itself. + */ +void +xfs_trans_ail_init( + xfs_mount_t *mp) +{ + mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail); + mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail); +} + +/* + * Insert the given log item into the AIL. + * We almost always insert at the end of the list, so on inserts + * we search from the end of the list to find where the + * new item belongs. + */ +STATIC void +xfs_ail_insert( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + xfs_log_item_t *next_lip; + + /* + * If the list is empty, just insert the item. + */ + if (base->ail_back == (xfs_log_item_t*)base) { + base->ail_forw = lip; + base->ail_back = lip; + lip->li_ail.ail_forw = (xfs_log_item_t*)base; + lip->li_ail.ail_back = (xfs_log_item_t*)base; + return; + } + + next_lip = base->ail_back; + while ((next_lip != (xfs_log_item_t*)base) && + (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) { + next_lip = next_lip->li_ail.ail_back; + } + ASSERT((next_lip == (xfs_log_item_t*)base) || + (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); + lip->li_ail.ail_forw = next_lip->li_ail.ail_forw; + lip->li_ail.ail_back = next_lip; + next_lip->li_ail.ail_forw = lip; + lip->li_ail.ail_forw->li_ail.ail_back = lip; + + xfs_ail_check(base); + return; +} + +/* + * Delete the given item from the AIL. Return a pointer to the item. + */ +/*ARGSUSED*/ +STATIC xfs_log_item_t * +xfs_ail_delete( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; + lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; + lip->li_ail.ail_forw = NULL; + lip->li_ail.ail_back = NULL; + + xfs_ail_check(base); + return lip; +} + +/* + * Return a pointer to the first item in the AIL. + * If the AIL is empty, then return NULL. + */ +STATIC xfs_log_item_t * +xfs_ail_min( + xfs_ail_entry_t *base) +/* ARGSUSED */ +{ + register xfs_log_item_t *forw = base->ail_forw; + if (forw == (xfs_log_item_t*)base) { + return NULL; + } + return forw; +} + +/* + * Return a pointer to the item which follows + * the given item in the AIL. If the given item + * is the last item in the list, then return NULL. + */ +STATIC xfs_log_item_t * +xfs_ail_next( + xfs_ail_entry_t *base, + xfs_log_item_t *lip) +/* ARGSUSED */ +{ + if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) { + return NULL; + } + return lip->li_ail.ail_forw; + +} + +#ifdef XFSDEBUG +/* + * Check that the list is sorted as it should be. + */ +STATIC void +xfs_ail_check( + xfs_ail_entry_t *base) +{ + xfs_log_item_t *lip; + xfs_log_item_t *prev_lip; + + lip = base->ail_forw; + if (lip == (xfs_log_item_t*)base) { + /* + * Make sure the pointers are correct when the list + * is empty. + */ + ASSERT(base->ail_back == (xfs_log_item_t*)base); + return; + } + + /* + * Walk the list checking forward and backward pointers, + * lsn ordering, and that every entry has the XFS_LI_IN_AIL + * flag set. + */ + prev_lip = (xfs_log_item_t*)base; + while (lip != (xfs_log_item_t*)base) { + if (prev_lip != (xfs_log_item_t*)base) { + ASSERT(prev_lip->li_ail.ail_forw == lip); + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); + } + ASSERT(lip->li_ail.ail_back == prev_lip); + ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); + prev_lip = lip; + lip = lip->li_ail.ail_forw; + } + ASSERT(lip == (xfs_log_item_t*)base); + ASSERT(base->ail_back == prev_lip); +} +#endif /* XFSDEBUG */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_buf.c linux.22-ac2/fs/xfs/xfs_trans_buf.c --- linux.vanilla/fs/xfs/xfs_trans_buf.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_buf.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1100 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_error.h" +#include "xfs_rw.h" + + +STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, + xfs_daddr_t, int); +STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *, + xfs_daddr_t, int); + + +/* + * Get and lock the buffer for the caller if it is not already + * locked within the given transaction. If it is already locked + * within the transaction, just increment its lock recursion count + * and return a pointer to it. + * + * Use the fast path function xfs_trans_buf_item_match() or the buffer + * cache routine incore_match() to find the buffer + * if it is already owned by this transaction. + * + * If we don't already own the buffer, use get_buf() to get it. + * If it doesn't yet have an associated xfs_buf_log_item structure, + * then allocate one and add the item to this transaction. + * + * If the transaction pointer is NULL, make this just a normal + * get_buf() call. + */ +xfs_buf_t * +xfs_trans_get_buf(xfs_trans_t *tp, + xfs_buftarg_t *target_dev, + xfs_daddr_t blkno, + int len, + uint flags) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + if (flags == 0) + flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + + /* + * Default to a normal get_buf() call if the tp is NULL. + */ + if (tp == NULL) { + bp = xfs_buf_get_flags(target_dev, blkno, len, + flags | BUF_BUSY); + return(bp); + } + + /* + * If we find the buffer in the cache with this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the buffer to the caller. + */ + if (tp->t_items.lic_next == NULL) { + bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); + } else { + bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len); + } + if (bp != NULL) { + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { + xfs_buftrace("TRANS GET RECUR SHUT", bp); + XFS_BUF_SUPER_STALE(bp); + } + /* + * If the buffer is stale then it was binval'ed + * since last read. This doesn't matter since the + * caller isn't allowed to use the data anyway. + */ + else if (XFS_BUF_ISSTALE(bp)) { + xfs_buftrace("TRANS GET RECUR STALE", bp); + ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); + } + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_recur++; + xfs_buftrace("TRANS GET RECUR", bp); + xfs_buf_item_trace("GET RECUR", bip); + return (bp); + } + + /* + * We always specify the BUF_BUSY flag within a transaction so + * that get_buf does not try to push out a delayed write buffer + * which might cause another transaction to take place (if the + * buffer was delayed alloc). Such recursive transactions can + * easily deadlock with our current transaction as well as cause + * us to run out of stack space. + */ + bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY); + if (bp == NULL) { + return NULL; + } + + ASSERT(!XFS_BUF_GETERROR(bp)); + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buftrace("TRANS GET", bp); + xfs_buf_item_trace("GET", bip); + return (bp); +} + +/* + * Get and lock the superblock buffer of this file system for the + * given transaction. + * + * We don't need to use incore_match() here, because the superblock + * buffer is a private buffer which we keep a pointer to in the + * mount structure. + */ +xfs_buf_t * +xfs_trans_getsb(xfs_trans_t *tp, + struct xfs_mount *mp, + int flags) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + /* + * Default to just trying to lock the superblock buffer + * if tp is NULL. + */ + if (tp == NULL) { + return (xfs_getsb(mp, flags)); + } + + /* + * If the superblock buffer already has this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the buffer to the caller. + */ + bp = mp->m_sb_bp; + if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) { + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(bip != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_recur++; + xfs_buf_item_trace("GETSB RECUR", bip); + return (bp); + } + + bp = xfs_getsb(mp, flags); + if (bp == NULL) { + return NULL; + } + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, mp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buf_item_trace("GETSB", bip); + return (bp); +} + +#ifdef DEBUG +dev_t xfs_error_dev = 0; +int xfs_do_error; +int xfs_req_num; +int xfs_error_mod = 33; +#endif + +/* + * Get and lock the buffer for the caller if it is not already + * locked within the given transaction. If it has not yet been + * read in, read it from disk. If it is already locked + * within the transaction and already read in, just increment its + * lock recursion count and return a pointer to it. + * + * Use the fast path function xfs_trans_buf_item_match() or the buffer + * cache routine incore_match() to find the buffer + * if it is already owned by this transaction. + * + * If we don't already own the buffer, use read_buf() to get it. + * If it doesn't yet have an associated xfs_buf_log_item structure, + * then allocate one and add the item to this transaction. + * + * If the transaction pointer is NULL, make this just a normal + * read_buf() call. + */ +int +xfs_trans_read_buf( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len, + uint flags, + xfs_buf_t **bpp) +{ + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + int error; + + if (flags == 0) + flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + + /* + * Default to a normal get_buf() call if the tp is NULL. + */ + if (tp == NULL) { + bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + if (!bp) + return XFS_ERROR(ENOMEM); + + if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + return error; + } +#ifdef DEBUG + if (xfs_do_error && (bp != NULL)) { + if (xfs_error_dev == target->pbr_dev) { + if (((xfs_req_num++) % xfs_error_mod) == 0) { + xfs_buf_relse(bp); + printk("Returning error!\n"); + return XFS_ERROR(EIO); + } + } + } +#endif + if (XFS_FORCED_SHUTDOWN(mp)) + goto shutdown_abort; + *bpp = bp; + return 0; + } + + /* + * If we find the buffer in the cache with this transaction + * pointer in its b_fsprivate2 field, then we know we already + * have it locked. If it is already read in we just increment + * the lock recursion count and return the buffer to the caller. + * If the buffer is not yet read in, then we read it in, increment + * the lock recursion count, and return it to the caller. + */ + if (tp->t_items.lic_next == NULL) { + bp = xfs_trans_buf_item_match(tp, target, blkno, len); + } else { + bp = xfs_trans_buf_item_match_all(tp, target, blkno, len); + } + if (bp != NULL) { + ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT((XFS_BUF_ISERROR(bp)) == 0); + if (!(XFS_BUF_ISDONE(bp))) { + xfs_buftrace("READ_BUF_INCORE !DONE", bp); + ASSERT(!XFS_BUF_ISASYNC(bp)); + XFS_BUF_READ(bp); + xfsbdstrat(tp->t_mountp, bp); + xfs_iowait(bp); + if (XFS_BUF_GETERROR(bp) != 0) { + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + error = XFS_BUF_GETERROR(bp); + xfs_buf_relse(bp); + /* + * We can gracefully recover from most + * read errors. Ones we can't are those + * that happen after the transaction's + * already dirty. + */ + if (tp->t_flags & XFS_TRANS_DIRTY) + xfs_force_shutdown(tp->t_mountp, + XFS_METADATA_IO_ERROR); + return error; + } + } + /* + * We never locked this buf ourselves, so we shouldn't + * brelse it either. Just get out. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); + *bpp = NULL; + return XFS_ERROR(EIO); + } + + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip->bli_recur++; + + ASSERT(atomic_read(&bip->bli_refcount) > 0); + xfs_buf_item_trace("READ RECUR", bip); + *bpp = bp; + return 0; + } + + /* + * We always specify the BUF_BUSY flag within a transaction so + * that get_buf does not try to push out a delayed write buffer + * which might cause another transaction to take place (if the + * buffer was delayed alloc). Such recursive transactions can + * easily deadlock with our current transaction as well as cause + * us to run out of stack space. + */ + bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + if (bp == NULL) { + *bpp = NULL; + return 0; + } + if (XFS_BUF_GETERROR(bp) != 0) { + XFS_BUF_SUPER_STALE(bp); + xfs_buftrace("READ ERROR", bp); + error = XFS_BUF_GETERROR(bp); + + xfs_ioerror_alert("xfs_trans_read_buf", mp, + bp, blkno); + if (tp->t_flags & XFS_TRANS_DIRTY) + xfs_force_shutdown(tp->t_mountp, XFS_METADATA_IO_ERROR); + xfs_buf_relse(bp); + return error; + } +#ifdef DEBUG + if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { + if (xfs_error_dev == target->pbr_dev) { + if (((xfs_req_num++) % xfs_error_mod) == 0) { + xfs_force_shutdown(tp->t_mountp, + XFS_METADATA_IO_ERROR); + xfs_buf_relse(bp); + printk("Returning error in trans!\n"); + return XFS_ERROR(EIO); + } + } + } +#endif + if (XFS_FORCED_SHUTDOWN(mp)) + goto shutdown_abort; + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + + /* + * Set the recursion count for the buffer within this transaction + * to 0. + */ + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + bip->bli_recur = 0; + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buftrace("TRANS READ", bp); + xfs_buf_item_trace("READ", bip); + *bpp = bp; + return 0; + +shutdown_abort: + /* + * the theory here is that buffer is good but we're + * bailing out because the filesystem is being forcibly + * shut down. So we should leave the b_flags alone since + * the buffer's not staled and just get out. + */ +#if defined(DEBUG) + if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) + cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); +#endif + ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != + (XFS_B_STALE|XFS_B_DELWRI)); + + xfs_buftrace("READ_BUF XFSSHUTDN", bp); + xfs_buf_relse(bp); + *bpp = NULL; + return XFS_ERROR(EIO); +} + + +/* + * Release the buffer bp which was previously acquired with one of the + * xfs_trans_... buffer allocation routines if the buffer has not + * been modified within this transaction. If the buffer is modified + * within this transaction, do decrement the recursion count but do + * not release the buffer even if the count goes to 0. If the buffer is not + * modified within the transaction, decrement the recursion count and + * release the buffer if the recursion count goes to 0. + * + * If the buffer is to be released and it was not modified before + * this transaction began, then free the buf_log_item associated with it. + * + * If the transaction pointer is NULL, make this just a normal + * brelse() call. + */ +void +xfs_trans_brelse(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + xfs_log_item_t *lip; + xfs_log_item_desc_t *lidp; + + /* + * Default to a normal brelse() call if the tp is NULL. + */ + if (tp == NULL) { + ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + /* + * If there's a buf log item attached to the buffer, + * then let the AIL know that the buffer is being + * unlocked. + */ + if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { + lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (lip->li_type == XFS_LI_BUF) { + bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); + xfs_trans_unlocked_item( + bip->bli_item.li_mountp, + lip); + } + } + xfs_buf_relse(bp); + return; + } + + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip->bli_item.li_type == XFS_LI_BUF); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + /* + * Find the item descriptor pointing to this buffer's + * log item. It must be there. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + /* + * If the release is just for a recursive lock, + * then decrement the count and return. + */ + if (bip->bli_recur > 0) { + bip->bli_recur--; + xfs_buf_item_trace("RELSE RECUR", bip); + return; + } + + /* + * If the buffer is dirty within this transaction, we can't + * release it until we commit. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) { + xfs_buf_item_trace("RELSE DIRTY", bip); + return; + } + + /* + * If the buffer has been invalidated, then we can't release + * it until the transaction commits to disk unless it is re-dirtied + * as part of this transaction. This prevents us from pulling + * the item from the AIL before we should. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + xfs_buf_item_trace("RELSE STALE", bip); + return; + } + + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + xfs_buf_item_trace("RELSE", bip); + + /* + * Free up the log item descriptor tracking the released item. + */ + xfs_trans_free_item(tp, lidp); + + /* + * Clear the hold flag in the buf log item if it is set. + * We wouldn't want the next user of the buffer to + * get confused. + */ + if (bip->bli_flags & XFS_BLI_HOLD) { + bip->bli_flags &= ~XFS_BLI_HOLD; + } + + /* + * Drop our reference to the buf log item. + */ + atomic_dec(&bip->bli_refcount); + + /* + * If the buf item is not tracking data in the log, then + * we must free it before releasing the buffer back to the + * free pool. Before releasing the buffer to the free pool, + * clear the transaction pointer in b_fsprivate2 to dissolve + * its relation to this transaction. + */ + if (!xfs_buf_item_dirty(bip)) { +/*** + ASSERT(bp->b_pincount == 0); +***/ + ASSERT(atomic_read(&bip->bli_refcount) == 0); + ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); + ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); + xfs_buf_item_relse(bp); + bip = NULL; + } + XFS_BUF_SET_FSPRIVATE2(bp, NULL); + + /* + * If we've still got a buf log item on the buffer, then + * tell the AIL that the buffer is being unlocked. + */ + if (bip != NULL) { + xfs_trans_unlocked_item(bip->bli_item.li_mountp, + (xfs_log_item_t*)bip); + } + + xfs_buf_relse(bp); + return; +} + +/* + * Add the locked buffer to the transaction. + * The buffer must be locked, and it cannot be associated with any + * transaction. + * + * If the buffer does not yet have a buf log item associated with it, + * then allocate one for it. Then add the buf item to the transaction. + */ +void +xfs_trans_bjoin(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + + /* + * The xfs_buf_log_item pointer is stored in b_fsprivate. If + * it doesn't have one yet, then allocate one and initialize it. + * The checks to see if one is there are in xfs_buf_item_init(). + */ + xfs_buf_item_init(bp, tp->t_mountp); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); + + /* + * Take a reference for this transaction on the buf item. + */ + atomic_inc(&bip->bli_refcount); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); + + /* + * Initialize b_fsprivate2 so we can find it with incore_match() + * in xfs_trans_get_buf() and friends above. + */ + XFS_BUF_SET_FSPRIVATE2(bp, tp); + + xfs_buf_item_trace("BJOIN", bip); +} + +/* + * Mark the buffer as not needing to be unlocked when the buf item's + * IOP_UNLOCK() routine is called. The buffer must already be locked + * and associated with the given transaction. + */ +/* ARGSUSED */ +void +xfs_trans_bhold(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_flags |= XFS_BLI_HOLD; + xfs_buf_item_trace("BHOLD", bip); +} + +/* + * This function is used to indicate that the buffer should not be + * unlocked until the transaction is committed to disk. Since we + * are going to keep the lock held, make the transaction synchronous + * so that the lock is not held too long. + * + * It uses the log item descriptor flag XFS_LID_SYNC_UNLOCK to + * delay the buf items's unlock call until the transaction is + * committed to disk or aborted. + */ +void +xfs_trans_bhold_until_committed(xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + lidp->lid_flags |= XFS_LID_SYNC_UNLOCK; + xfs_buf_item_trace("BHOLD UNTIL COMMIT", bip); + + xfs_trans_set_sync(tp); +} + +/* + * This is called to mark bytes first through last inclusive of the given + * buffer as needing to be logged when the transaction is committed. + * The buffer must already be associated with the given transaction. + * + * First and last are numbers relative to the beginning of this buffer, + * so the first byte in the buffer is numbered 0 regardless of the + * value of b_blkno. + */ +void +xfs_trans_log_buf(xfs_trans_t *tp, + xfs_buf_t *bp, + uint first, + uint last) +{ + xfs_buf_log_item_t *bip; + xfs_log_item_desc_t *lidp; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); + ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || + (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); + + /* + * Mark the buffer as needing to be written out eventually, + * and set its iodone function to remove the buffer's buf log + * item from the AIL and free it when the buffer is flushed + * to disk. See xfs_buf_attach_iodone() for more details + * on li_cb and xfs_buf_iodone_callbacks(). + * If we end up aborting this transaction, we trap this buffer + * inside the b_bdstrat callback so that this won't get written to + * disk. + */ + XFS_BUF_DELAYWRITE(bp); + XFS_BUF_DONE(bp); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; + + /* + * If we invalidated the buffer within this transaction, then + * cancel the invalidation now that we're dirtying the buffer + * again. There are no races with the code in xfs_buf_item_unpin(), + * because we have a reference to the buffer this entire time. + */ + if (bip->bli_flags & XFS_BLI_STALE) { + xfs_buf_item_trace("BLOG UNSTALE", bip); + bip->bli_flags &= ~XFS_BLI_STALE; + /* note this will have to change for page_buf interface... unstale isn't really an option RMC */ + ASSERT(XFS_BUF_ISSTALE(bp)); + XFS_BUF_UNSTALE(bp); + bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL; + } + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + lidp->lid_flags &= ~XFS_LID_BUF_STALE; + bip->bli_flags |= XFS_BLI_LOGGED; + xfs_buf_item_log(bip, first, last); + xfs_buf_item_trace("BLOG", bip); +} + + +/* + * This called to invalidate a buffer that is being used within + * a transaction. Typically this is because the blocks in the + * buffer are being freed, so we need to prevent it from being + * written out when we're done. Allowing it to be written again + * might overwrite data in the free blocks if they are reallocated + * to a file. + * + * We prevent the buffer from being written out by clearing the + * B_DELWRI flag. We can't always + * get rid of the buf log item at this point, though, because + * the buffer may still be pinned by another transaction. If that + * is the case, then we'll wait until the buffer is committed to + * disk for the last time (we can tell by the ref count) and + * free it in xfs_buf_item_unpin(). Until it is cleaned up we + * will keep the buffer locked so that the buffer and buf log item + * are not reused. + */ +void +xfs_trans_binval( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); + ASSERT(lidp != NULL); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + if (bip->bli_flags & XFS_BLI_STALE) { + /* + * If the buffer is already invalidated, then + * just return. + */ + ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); + ASSERT(XFS_BUF_ISSTALE(bp)); + ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); + ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF)); + ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); + ASSERT(lidp->lid_flags & XFS_LID_DIRTY); + ASSERT(tp->t_flags & XFS_TRANS_DIRTY); + xfs_buftrace("XFS_BINVAL RECUR", bp); + xfs_buf_item_trace("BINVAL RECUR", bip); + return; + } + + /* + * Clear the dirty bit in the buffer and set the STALE flag + * in the buf log item. The STALE flag will be used in + * xfs_buf_item_unpin() to determine if it should clean up + * when the last reference to the buf item is given up. + * We set the XFS_BLI_CANCEL flag in the buf log format structure + * and log the buf item. This will be used at recovery time + * to determine that copies of the buffer in the log before + * this should not be replayed. + * We mark the item descriptor and the transaction dirty so + * that we'll hold the buffer until after the commit. + * + * Since we're invalidating the buffer, we also clear the state + * about which parts of the buffer have been logged. We also + * clear the flag indicating that this is an inode buffer since + * the data in the buffer will no longer be valid. + * + * We set the stale bit in the buffer as well since we're getting + * rid of it. + */ + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_STALE(bp); + bip->bli_flags |= XFS_BLI_STALE; + bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY); + bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF; + bip->bli_format.blf_flags |= XFS_BLI_CANCEL; + memset((char *)(bip->bli_format.blf_data_map), 0, + (bip->bli_format.blf_map_size * sizeof(uint))); + lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; + tp->t_flags |= XFS_TRANS_DIRTY; + xfs_buftrace("XFS_BINVAL", bp); + xfs_buf_item_trace("BINVAL", bip); +} + +/* + * This call is used to indicate that the buffer contains on-disk + * inodes which must be handled specially during recovery. They + * require special handling because only the di_next_unlinked from + * the inodes in the buffer should be recovered. The rest of the + * data in the buffer is logged via the inodes themselves. + * + * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log + * format structure so that we'll know what to do at recovery time. + */ +/* ARGSUSED */ +void +xfs_trans_inode_buf( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF; +} + + +/* + * Mark the buffer as being one which contains newly allocated + * inodes. We need to make sure that even if this buffer is + * relogged as an 'inode buf' we still recover all of the inode + * images in the face of a crash. This works in coordination with + * xfs_buf_item_committed() to ensure that the buffer remains in the + * AIL at its original location even after it has been relogged. + */ +/* ARGSUSED */ +void +xfs_trans_inode_alloc_buf( + xfs_trans_t *tp, + xfs_buf_t *bp) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); + + bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; +} + + +/* + * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of + * dquots. However, unlike in inode buffer recovery, dquot buffers get + * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). + * The only thing that makes dquot buffers different from regular + * buffers is that we must not replay dquot bufs when recovering + * if a _corresponding_ quotaoff has happened. We also have to distinguish + * between usr dquot bufs and grp dquot bufs, because usr and grp quotas + * can be turned off independently. + */ +/* ARGSUSED */ +void +xfs_trans_dquot_buf( + xfs_trans_t *tp, + xfs_buf_t *bp, + uint type) +{ + xfs_buf_log_item_t *bip; + + ASSERT(XFS_BUF_ISBUSY(bp)); + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(type == XFS_BLI_UDQUOT_BUF || + type == XFS_BLI_GDQUOT_BUF); + + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(atomic_read(&bip->bli_refcount) > 0); + + bip->bli_format.blf_flags |= type; +} + +/* + * Check to see if a buffer matching the given parameters is already + * a part of the given transaction. Only check the first, embedded + * chunk, since we don't want to spend all day scanning large transactions. + */ +STATIC xfs_buf_t * +xfs_trans_buf_item_match( + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *blip; + xfs_buf_t *bp; + int i; + + bp = NULL; + len = BBTOB(len); + licp = &tp->t_items; + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + for (i = 0; i < licp->lic_unused; i++) { + /* + * Skip unoccupied slots. + */ + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + blip = (xfs_buf_log_item_t *)lidp->lid_item; + if (blip->bli_item.li_type != XFS_LI_BUF) { + continue; + } + + bp = blip->bli_buf; + if ((XFS_BUF_TARGET(bp) == target) && + (XFS_BUF_ADDR(bp) == blkno) && + (XFS_BUF_COUNT(bp) == len)) { + /* + * We found it. Break out and + * return the pointer to the buffer. + */ + break; + } else { + bp = NULL; + } + } + } + return bp; +} + +/* + * Check to see if a buffer matching the given parameters is already + * a part of the given transaction. Check all the chunks, we + * want to be thorough. + */ +STATIC xfs_buf_t * +xfs_trans_buf_item_match_all( + xfs_trans_t *tp, + xfs_buftarg_t *target, + xfs_daddr_t blkno, + int len) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_buf_log_item_t *blip; + xfs_buf_t *bp; + int i; + + bp = NULL; + len = BBTOB(len); + for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { + if (XFS_LIC_ARE_ALL_FREE(licp)) { + ASSERT(licp == &tp->t_items); + ASSERT(licp->lic_next == NULL); + return NULL; + } + for (i = 0; i < licp->lic_unused; i++) { + /* + * Skip unoccupied slots. + */ + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lidp = XFS_LIC_SLOT(licp, i); + blip = (xfs_buf_log_item_t *)lidp->lid_item; + if (blip->bli_item.li_type != XFS_LI_BUF) { + continue; + } + + bp = blip->bli_buf; + if ((XFS_BUF_TARGET(bp) == target) && + (XFS_BUF_ADDR(bp) == blkno) && + (XFS_BUF_COUNT(bp) == len)) { + /* + * We found it. Break out and + * return the pointer to the buffer. + */ + return bp; + } + } + } + return NULL; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans.c linux.22-ac2/fs/xfs/xfs_trans.c --- linux.vanilla/fs/xfs/xfs_trans.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1307 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_error.h" +#include "xfs_trans_priv.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_quota.h" +#include "xfs_trans_space.h" + + +STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); +STATIC uint xfs_trans_count_vecs(xfs_trans_t *); +STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *); +STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); +STATIC void xfs_trans_committed(xfs_trans_t *, int); +STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); +STATIC void xfs_trans_free(xfs_trans_t *); + +kmem_zone_t *xfs_trans_zone; + + +/* + * Initialize the precomputed transaction reservation values + * in the mount structure. + */ +void +xfs_trans_init( + xfs_mount_t *mp) +{ + xfs_trans_reservations_t *resp; + + resp = &(mp->m_reservations); + resp->tr_write = + (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_itruncate = + (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_rename = + (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); + resp->tr_remove = + (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_symlink = + (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_create = + (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_mkdir = + (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ifree = + (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ichange = + (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); + resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); + resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp); + resp->tr_addafork = + (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp); + resp->tr_attrset = + (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrrm = + (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); + resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp); + resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp); + resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp); +} + +/* + * This routine is called to allocate a transaction structure. + * The type parameter indicates the type of the transaction. These + * are enumerated in xfs_trans.h. + * + * Dynamically allocate the transaction structure from the transaction + * zone, initialize it, and return it to the caller. + */ +xfs_trans_t * +xfs_trans_alloc( + xfs_mount_t *mp, + uint type) +{ + xfs_check_frozen(mp, NULL, XFS_FREEZE_TRANS); + return (_xfs_trans_alloc(mp, type)); + +} + +xfs_trans_t * +_xfs_trans_alloc( + xfs_mount_t *mp, + uint type) +{ + xfs_trans_t *tp; + ASSERT(xfs_trans_zone != NULL); + tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + tp->t_dqinfo = NULL; + + /* + * Initialize the transaction structure. + */ + tp->t_magic = XFS_TRANS_MAGIC; + tp->t_type = type; + tp->t_mountp = mp; + tp->t_items_free = XFS_LIC_NUM_SLOTS; + tp->t_busy_free = XFS_LBC_NUM_SLOTS; + XFS_LIC_INIT(&(tp->t_items)); + XFS_LBC_INIT(&(tp->t_busy)); + + return (tp); +} + +/* + * This is called to create a new transaction which will share the + * permanent log reservation of the given transaction. The remaining + * unused block and rt extent reservations are also inherited. This + * implies that the original transaction is no longer allowed to allocate + * blocks. Locks and log items, however, are no inherited. They must + * be added to the new transaction explicitly. + */ +xfs_trans_t * +xfs_trans_dup( + xfs_trans_t *tp) +{ + xfs_trans_t *ntp; + + ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + + /* + * Initialize the new transaction structure. + */ + ntp->t_magic = XFS_TRANS_MAGIC; + ntp->t_type = tp->t_type; + ntp->t_mountp = tp->t_mountp; + ntp->t_items_free = XFS_LIC_NUM_SLOTS; + ntp->t_busy_free = XFS_LBC_NUM_SLOTS; + XFS_LIC_INIT(&(ntp->t_items)); + XFS_LBC_INIT(&(ntp->t_busy)); + + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + +#if defined(XLOG_NOLOG) || defined(DEBUG) + ASSERT(!xlog_debug || tp->t_ticket != NULL); +#else + ASSERT(tp->t_ticket != NULL); +#endif + ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); + ntp->t_ticket = tp->t_ticket; + ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; + tp->t_blk_res = tp->t_blk_res_used; + ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; + tp->t_rtx_res = tp->t_rtx_res_used; + + XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); + + atomic_inc(&tp->t_mountp->m_active_trans); + return ntp; +} + +/* + * This is called to reserve free disk blocks and log space for the + * given transaction. This must be done before allocating any resources + * within the transaction. + * + * This will return ENOSPC if there are not enough blocks available. + * It will sleep waiting for available log space. + * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which + * is used by long running transactions. If any one of the reservations + * fails then they will all be backed out. + * + * This does not do quota reservations. That typically is done by the + * caller afterwards. + */ +int +xfs_trans_reserve( + xfs_trans_t *tp, + uint blocks, + uint logspace, + uint rtextents, + uint flags, + uint logcount) +{ + int log_flags; + int error; + int rsvd; + + error = 0; + rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + + /* Mark this thread as being in a transaction */ + current->flags |= PF_FSTRANS; + + /* + * Attempt to reserve the needed disk blocks by decrementing + * the number needed from the number available. This will + * fail if the count would go below zero. + */ + if (blocks > 0) { + error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, + -blocks, rsvd); + if (error != 0) { + current->flags &= ~PF_FSTRANS; + return (XFS_ERROR(ENOSPC)); + } + tp->t_blk_res += blocks; + } + + /* + * Reserve the log space needed for this transaction. + */ + if (logspace > 0) { + ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); + ASSERT((tp->t_log_count == 0) || + (tp->t_log_count == logcount)); + if (flags & XFS_TRANS_PERM_LOG_RES) { + log_flags = XFS_LOG_PERM_RESERV; + tp->t_flags |= XFS_TRANS_PERM_LOG_RES; + } else { + ASSERT(tp->t_ticket == NULL); + ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); + log_flags = 0; + } + + error = xfs_log_reserve(tp->t_mountp, logspace, logcount, + &tp->t_ticket, + XFS_TRANSACTION, log_flags); + if (error) { + goto undo_blocks; + } + tp->t_log_res = logspace; + tp->t_log_count = logcount; + } + + /* + * Attempt to reserve the needed realtime extents by decrementing + * the number needed from the number available. This will + * fail if the count would go below zero. + */ + if (rtextents > 0) { + error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, + -rtextents, rsvd); + if (error) { + error = XFS_ERROR(ENOSPC); + goto undo_log; + } + tp->t_rtx_res += rtextents; + } + + return 0; + + /* + * Error cases jump to one of these labels to undo any + * reservations which have already been performed. + */ +undo_log: + if (logspace > 0) { + if (flags & XFS_TRANS_PERM_LOG_RES) { + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); + tp->t_ticket = NULL; + tp->t_log_res = 0; + tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; + } + +undo_blocks: + if (blocks > 0) { + (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, + blocks, rsvd); + tp->t_blk_res = 0; + } + + current->flags &= ~PF_FSTRANS; + + return (error); +} + + +/* + * This is called to set the a callback to be called when the given + * transaction is committed to disk. The transaction pointer and the + * argument pointer will be passed to the callback routine. + * + * Only one callback can be associated with any single transaction. + */ +void +xfs_trans_callback( + xfs_trans_t *tp, + xfs_trans_callback_t callback, + void *arg) +{ + ASSERT(tp->t_callback == NULL); + tp->t_callback = callback; + tp->t_callarg = arg; +} + + +/* + * Record the indicated change to the given field for application + * to the file system's superblock when the transaction commits. + * For now, just store the change in the transaction structure. + * + * Mark the transaction structure to indicate that the superblock + * needs to be updated before committing. + */ +void +xfs_trans_mod_sb( + xfs_trans_t *tp, + uint field, + long delta) +{ + + switch (field) { + case XFS_TRANS_SB_ICOUNT: + ASSERT(delta > 0); + tp->t_icount_delta += delta; + break; + case XFS_TRANS_SB_IFREE: + tp->t_ifree_delta += delta; + break; + case XFS_TRANS_SB_FDBLOCKS: + /* + * Track the number of blocks allocated in the + * transaction. Make sure it does not exceed the + * number reserved. + */ + if (delta < 0) { + tp->t_blk_res_used += (uint)-delta; + ASSERT(tp->t_blk_res_used <= tp->t_blk_res); + } + tp->t_fdblocks_delta += delta; + break; + case XFS_TRANS_SB_RES_FDBLOCKS: + /* + * The allocation has already been applied to the + * in-core superblock's counter. This should only + * be applied to the on-disk superblock. + */ + ASSERT(delta < 0); + tp->t_res_fdblocks_delta += delta; + break; + case XFS_TRANS_SB_FREXTENTS: + /* + * Track the number of blocks allocated in the + * transaction. Make sure it does not exceed the + * number reserved. + */ + if (delta < 0) { + tp->t_rtx_res_used += (uint)-delta; + ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); + } + tp->t_frextents_delta += delta; + break; + case XFS_TRANS_SB_RES_FREXTENTS: + /* + * The allocation has already been applied to the + * in-core superblocks's counter. This should only + * be applied to the on-disk superblock. + */ + ASSERT(delta < 0); + tp->t_res_frextents_delta += delta; + break; + case XFS_TRANS_SB_DBLOCKS: + ASSERT(delta > 0); + tp->t_dblocks_delta += delta; + break; + case XFS_TRANS_SB_AGCOUNT: + ASSERT(delta > 0); + tp->t_agcount_delta += delta; + break; + case XFS_TRANS_SB_IMAXPCT: + tp->t_imaxpct_delta += delta; + break; + case XFS_TRANS_SB_REXTSIZE: + tp->t_rextsize_delta += delta; + break; + case XFS_TRANS_SB_RBMBLOCKS: + tp->t_rbmblocks_delta += delta; + break; + case XFS_TRANS_SB_RBLOCKS: + tp->t_rblocks_delta += delta; + break; + case XFS_TRANS_SB_REXTENTS: + tp->t_rextents_delta += delta; + break; + case XFS_TRANS_SB_REXTSLOG: + tp->t_rextslog_delta += delta; + break; + default: + ASSERT(0); + return; + } + + tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); +} + +/* + * xfs_trans_apply_sb_deltas() is called from the commit code + * to bring the superblock buffer into the current transaction + * and modify it as requested by earlier calls to xfs_trans_mod_sb(). + * + * For now we just look at each field allowed to change and change + * it if necessary. + */ +STATIC void +xfs_trans_apply_sb_deltas( + xfs_trans_t *tp) +{ + xfs_sb_t *sbp; + xfs_buf_t *bp; + int whole = 0; + + bp = xfs_trans_getsb(tp, tp->t_mountp, 0); + sbp = XFS_BUF_TO_SBP(bp); + + /* + * Check that superblock mods match the mods made to AGF counters. + */ + ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == + (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + + tp->t_ag_btree_delta)); + + if (tp->t_icount_delta != 0) { + INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); + } + if (tp->t_ifree_delta != 0) { + INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); + } + + if (tp->t_fdblocks_delta != 0) { + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); + } + if (tp->t_res_fdblocks_delta != 0) { + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); + } + + if (tp->t_frextents_delta != 0) { + INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta); + } + if (tp->t_dblocks_delta != 0) { + INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta); + whole = 1; + } + if (tp->t_agcount_delta != 0) { + INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta); + whole = 1; + } + if (tp->t_imaxpct_delta != 0) { + INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta); + whole = 1; + } + if (tp->t_rextsize_delta != 0) { + INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta); + whole = 1; + } + if (tp->t_rbmblocks_delta != 0) { + INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta); + whole = 1; + } + if (tp->t_rblocks_delta != 0) { + INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta); + whole = 1; + } + if (tp->t_rextents_delta != 0) { + INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta); + whole = 1; + } + if (tp->t_rextslog_delta != 0) { + INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta); + whole = 1; + } + + if (whole) + /* + * Log the whole thing, the fields are discontiguous. + */ + xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); + else + /* + * Since all the modifiable fields are contiguous, we + * can get away with this. + */ + xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount), + offsetof(xfs_sb_t, sb_frextents) + + sizeof(sbp->sb_frextents) - 1); + + XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1; +} + +/* + * xfs_trans_unreserve_and_mod_sb() is called to release unused + * reservations and apply superblock counter changes to the in-core + * superblock. + * + * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). + */ +void +xfs_trans_unreserve_and_mod_sb( + xfs_trans_t *tp) +{ + xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ + xfs_mod_sb_t *msbp; + /* REFERENCED */ + int error; + int rsvd; + + msbp = msb; + rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; + + /* + * Release any reserved blocks. Any that were allocated + * will be taken back again by fdblocks_delta below. + */ + if (tp->t_blk_res > 0) { + msbp->msb_field = XFS_SBS_FDBLOCKS; + msbp->msb_delta = tp->t_blk_res; + msbp++; + } + + /* + * Release any reserved real time extents . Any that were + * allocated will be taken back again by frextents_delta below. + */ + if (tp->t_rtx_res > 0) { + msbp->msb_field = XFS_SBS_FREXTENTS; + msbp->msb_delta = tp->t_rtx_res; + msbp++; + } + + /* + * Apply any superblock modifications to the in-core version. + * The t_res_fdblocks_delta and t_res_frextents_delta fields are + * explicity NOT applied to the in-core superblock. + * The idea is that that has already been done. + */ + if (tp->t_flags & XFS_TRANS_SB_DIRTY) { + if (tp->t_icount_delta != 0) { + msbp->msb_field = XFS_SBS_ICOUNT; + msbp->msb_delta = (int)tp->t_icount_delta; + msbp++; + } + if (tp->t_ifree_delta != 0) { + msbp->msb_field = XFS_SBS_IFREE; + msbp->msb_delta = (int)tp->t_ifree_delta; + msbp++; + } + if (tp->t_fdblocks_delta != 0) { + msbp->msb_field = XFS_SBS_FDBLOCKS; + msbp->msb_delta = (int)tp->t_fdblocks_delta; + msbp++; + } + if (tp->t_frextents_delta != 0) { + msbp->msb_field = XFS_SBS_FREXTENTS; + msbp->msb_delta = (int)tp->t_frextents_delta; + msbp++; + } + if (tp->t_dblocks_delta != 0) { + msbp->msb_field = XFS_SBS_DBLOCKS; + msbp->msb_delta = (int)tp->t_dblocks_delta; + msbp++; + } + if (tp->t_agcount_delta != 0) { + msbp->msb_field = XFS_SBS_AGCOUNT; + msbp->msb_delta = (int)tp->t_agcount_delta; + msbp++; + } + if (tp->t_imaxpct_delta != 0) { + msbp->msb_field = XFS_SBS_IMAX_PCT; + msbp->msb_delta = (int)tp->t_imaxpct_delta; + msbp++; + } + if (tp->t_rextsize_delta != 0) { + msbp->msb_field = XFS_SBS_REXTSIZE; + msbp->msb_delta = (int)tp->t_rextsize_delta; + msbp++; + } + if (tp->t_rbmblocks_delta != 0) { + msbp->msb_field = XFS_SBS_RBMBLOCKS; + msbp->msb_delta = (int)tp->t_rbmblocks_delta; + msbp++; + } + if (tp->t_rblocks_delta != 0) { + msbp->msb_field = XFS_SBS_RBLOCKS; + msbp->msb_delta = (int)tp->t_rblocks_delta; + msbp++; + } + if (tp->t_rextents_delta != 0) { + msbp->msb_field = XFS_SBS_REXTENTS; + msbp->msb_delta = (int)tp->t_rextents_delta; + msbp++; + } + if (tp->t_rextslog_delta != 0) { + msbp->msb_field = XFS_SBS_REXTSLOG; + msbp->msb_delta = (int)tp->t_rextslog_delta; + msbp++; + } + } + + /* + * If we need to change anything, do it. + */ + if (msbp > msb) { + error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, + (uint)(msbp - msb), rsvd); + ASSERT(error == 0); + } +} + + +/* + * xfs_trans_commit + * + * Commit the given transaction to the log a/synchronously. + * + * XFS disk error handling mechanism is not based on a typical + * transaction abort mechanism. Logically after the filesystem + * gets marked 'SHUTDOWN', we can't let any new transactions + * be durable - ie. committed to disk - because some metadata might + * be inconsistent. In such cases, this returns an error, and the + * caller may assume that all locked objects joined to the transaction + * have already been unlocked as if the commit had succeeded. + * It's illegal to reference the transaction structure after this call. + */ + /*ARGSUSED*/ +int +xfs_trans_commit( + xfs_trans_t *tp, + uint flags, + xfs_lsn_t *commit_lsn_p) +{ + xfs_log_iovec_t *log_vector; + int nvec; + xfs_mount_t *mp; + xfs_lsn_t commit_lsn; + /* REFERENCED */ + int error; + int log_flags; + int sync; +#define XFS_TRANS_LOGVEC_COUNT 16 + xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT]; +#if defined(XLOG_NOLOG) || defined(DEBUG) + static xfs_lsn_t trans_lsn = 1; +#endif + void *commit_iclog; + int shutdown; + + commit_lsn = -1; + + /* + * Determine whether this commit is releasing a permanent + * log reservation or not. + */ + if (flags & XFS_TRANS_RELEASE_LOG_RES) { + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + mp = tp->t_mountp; + + /* + * If there is nothing to be logged by the transaction, + * then unlock all of the items associated with the + * transaction and free the transaction structure. + * Also make sure to return any reserved blocks to + * the free pool. + */ +shut_us_down: + shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0; + if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) { + xfs_trans_unreserve_and_mod_sb(tp); + /* + * It is indeed possible for the transaction to be + * not dirty but the dqinfo portion to be. All that + * means is that we have some (non-persistent) quota + * reservations that need to be unreserved. + */ + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); + if (tp->t_ticket) { + commit_lsn = xfs_log_done(mp, tp->t_ticket, + NULL, log_flags); + if (commit_lsn == -1 && !shutdown) + shutdown = XFS_ERROR(EIO); + } + xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); + XFS_STATS_INC(xfsstats.xs_trans_empty); + if (commit_lsn_p) + *commit_lsn_p = commit_lsn; + current->flags &= ~PF_FSTRANS; + return (shutdown); + } +#if defined(XLOG_NOLOG) || defined(DEBUG) + ASSERT(!xlog_debug || tp->t_ticket != NULL); +#else + ASSERT(tp->t_ticket != NULL); +#endif + + /* + * If we need to update the superblock, then do it now. + */ + if (tp->t_flags & XFS_TRANS_SB_DIRTY) { + xfs_trans_apply_sb_deltas(tp); + } + XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); + + /* + * Ask each log item how many log_vector entries it will + * need so we can figure out how many to allocate. + * Try to avoid the kmem_alloc() call in the common case + * by using a vector from the stack when it fits. + */ + nvec = xfs_trans_count_vecs(tp); + + if (nvec == 0) { + xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); + goto shut_us_down; + } + + + if (nvec <= XFS_TRANS_LOGVEC_COUNT) { + log_vector = log_vector_fast; + } else { + log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec * + sizeof(xfs_log_iovec_t), + KM_SLEEP); + } + + /* + * Fill in the log_vector and pin the logged items, and + * then write the transaction to the log. + */ + xfs_trans_fill_vecs(tp, log_vector); + + /* + * Ignore errors here. xfs_log_done would do the right thing. + * We need to put the ticket, etc. away. + */ + error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, + &(tp->t_lsn)); + +#if defined(XLOG_NOLOG) || defined(DEBUG) + if (xlog_debug) { + commit_lsn = xfs_log_done(mp, tp->t_ticket, + &commit_iclog, log_flags); + } else { + commit_lsn = 0; + tp->t_lsn = trans_lsn++; + } +#else + /* + * This is the regular case. At this point (after the call finishes), + * the transaction is committed incore and could go out to disk at + * any time. However, all the items associated with the transaction + * are still locked and pinned in memory. + */ + commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags); +#endif + + tp->t_commit_lsn = commit_lsn; + if (nvec > XFS_TRANS_LOGVEC_COUNT) { + kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); + } + + if (commit_lsn_p) + *commit_lsn_p = commit_lsn; + + /* + * If we got a log write error. Unpin the logitems that we + * had pinned, clean up, free trans structure, and return error. + */ + if (error || commit_lsn == -1) { + xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT); + current->flags &= ~PF_FSTRANS; + return XFS_ERROR(EIO); + } + + /* + * Once the transaction has committed, unused + * reservations need to be released and changes to + * the superblock need to be reflected in the in-core + * version. Do that now. + */ + xfs_trans_unreserve_and_mod_sb(tp); + + sync = tp->t_flags & XFS_TRANS_SYNC; + + /* + * Tell the LM to call the transaction completion routine + * when the log write with LSN commit_lsn completes (e.g. + * when the transaction commit really hits the on-disk log). + * After this call we cannot reference tp, because the call + * can happen at any time and the call will free the transaction + * structure pointed to by tp. The only case where we call + * the completion routine (xfs_trans_committed) directly is + * if the log is turned off on a debug kernel or we're + * running in simulation mode (the log is explicitly turned + * off). + */ +#if defined(XLOG_NOLOG) || defined(DEBUG) + if (xlog_debug) { + tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; + tp->t_logcb.cb_arg = tp; + error = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); + } else { + xfs_trans_committed(tp, 0); + } +#else + tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; + tp->t_logcb.cb_arg = tp; + + /* We need to pass the iclog buffer which was used for the + * transaction commit record into this function, and attach + * the callback to it. The callback must be attached before + * the items are unlocked to avoid racing with other threads + * waiting for an item to unlock. + */ + error = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); +#endif + + /* + * Once all the items of the transaction have been copied + * to the in core log and the callback is attached, the + * items can be unlocked. + * + * This will free descriptors pointing to items which were + * not logged since there is nothing more to do with them. + * For items which were logged, we will keep pointers to them + * so they can be unpinned after the transaction commits to disk. + * This will also stamp each modified meta-data item with + * the commit lsn of this transaction for dependency tracking + * purposes. + */ + xfs_trans_unlock_items(tp, commit_lsn); + + /* + * Now that the xfs_trans_committed callback has been attached, + * and the items are released we can finally allow the iclog to + * go to disk. + */ + error = xfs_log_release_iclog(mp, commit_iclog); + + /* + * If the transaction needs to be synchronous, then force the + * log out now and wait for it. + */ + if (sync) { + if (!error) + error = xfs_log_force(mp, commit_lsn, + XFS_LOG_FORCE | XFS_LOG_SYNC); + XFS_STATS_INC(xfsstats.xs_trans_sync); + } else { + XFS_STATS_INC(xfsstats.xs_trans_async); + } + + /* mark this thread as no longer being in a transaction */ + current->flags &= ~PF_FSTRANS; + + return (error); +} + + +/* + * Total up the number of log iovecs needed to commit this + * transaction. The transaction itself needs one for the + * transaction header. Ask each dirty item in turn how many + * it needs to get the total. + */ +STATIC uint +xfs_trans_count_vecs( + xfs_trans_t *tp) +{ + int nvecs; + xfs_log_item_desc_t *lidp; + + nvecs = 1; + lidp = xfs_trans_first_item(tp); + ASSERT(lidp != NULL); + + /* In the non-debug case we need to start bailing out if we + * didn't find a log_item here, return zero and let trans_commit + * deal with it. + */ + if (lidp == NULL) + return 0; + + while (lidp != NULL) { + /* + * Skip items which aren't dirty in this transaction. + */ + if (!(lidp->lid_flags & XFS_LID_DIRTY)) { + lidp = xfs_trans_next_item(tp, lidp); + continue; + } + lidp->lid_size = IOP_SIZE(lidp->lid_item); + nvecs += lidp->lid_size; + lidp = xfs_trans_next_item(tp, lidp); + } + + return nvecs; +} + +/* + * Called from the trans_commit code when we notice that + * the filesystem is in the middle of a forced shutdown. + */ +STATIC void +xfs_trans_uncommit( + xfs_trans_t *tp, + uint flags) +{ + xfs_log_item_desc_t *lidp; + + for (lidp = xfs_trans_first_item(tp); + lidp != NULL; + lidp = xfs_trans_next_item(tp, lidp)) { + /* + * Unpin all but those that aren't dirty. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) + IOP_UNPIN_REMOVE(lidp->lid_item, tp); + } + + xfs_trans_unreserve_and_mod_sb(tp); + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); + + xfs_trans_free_items(tp, flags); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); +} + +/* + * Fill in the vector with pointers to data to be logged + * by this transaction. The transaction header takes + * the first vector, and then each dirty item takes the + * number of vectors it indicated it needed in xfs_trans_count_vecs(). + * + * As each item fills in the entries it needs, also pin the item + * so that it cannot be flushed out until the log write completes. + */ +STATIC void +xfs_trans_fill_vecs( + xfs_trans_t *tp, + xfs_log_iovec_t *log_vector) +{ + xfs_log_item_desc_t *lidp; + xfs_log_iovec_t *vecp; + uint nitems; + + /* + * Skip over the entry for the transaction header, we'll + * fill that in at the end. + */ + vecp = log_vector + 1; /* pointer arithmetic */ + + nitems = 0; + lidp = xfs_trans_first_item(tp); + ASSERT(lidp != NULL); + while (lidp != NULL) { + /* + * Skip items which aren't dirty in this transaction. + */ + if (!(lidp->lid_flags & XFS_LID_DIRTY)) { + lidp = xfs_trans_next_item(tp, lidp); + continue; + } + /* + * The item may be marked dirty but not log anything. + * This can be used to get called when a transaction + * is committed. + */ + if (lidp->lid_size) { + nitems++; + } + IOP_FORMAT(lidp->lid_item, vecp); + vecp += lidp->lid_size; /* pointer arithmetic */ + IOP_PIN(lidp->lid_item); + lidp = xfs_trans_next_item(tp, lidp); + } + + /* + * Now that we've counted the number of items in this + * transaction, fill in the transaction header. + */ + tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC; + tp->t_header.th_type = tp->t_type; + tp->t_header.th_num_items = nitems; + log_vector->i_addr = (xfs_caddr_t)&tp->t_header; + log_vector->i_len = sizeof(xfs_trans_header_t); +} + + +/* + * Unlock all of the transaction's items and free the transaction. + * The transaction must not have modified any of its items, because + * there is no way to restore them to their previous state. + * + * If the transaction has made a log reservation, make sure to release + * it as well. + */ +void +xfs_trans_cancel( + xfs_trans_t *tp, + int flags) +{ + int log_flags; +#ifdef DEBUG + xfs_log_item_chunk_t *licp; + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + int i; +#endif + + /* + * See if the caller is being too lazy to figure out if + * the transaction really needs an abort. + */ + if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) + flags &= ~XFS_TRANS_ABORT; + /* + * See if the caller is relying on us to shut down the + * filesystem. This happens in paths where we detect + * corruption and decide to give up. + */ + if ((tp->t_flags & XFS_TRANS_DIRTY) && + !XFS_FORCED_SHUTDOWN(tp->t_mountp)) + xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE); +#ifdef DEBUG + if (!(flags & XFS_TRANS_ABORT)) { + licp = &(tp->t_items); + while (licp != NULL) { + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lip = lidp->lid_item; + if (!XFS_FORCED_SHUTDOWN(tp->t_mountp)) + ASSERT(!(lip->li_type == XFS_LI_EFD)); + } + licp = licp->lic_next; + } + } +#endif + xfs_trans_unreserve_and_mod_sb(tp); + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); + + if (tp->t_ticket) { + if (flags & XFS_TRANS_RELEASE_LOG_RES) { + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + log_flags = XFS_LOG_REL_PERM_RESERV; + } else { + log_flags = 0; + } + xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); + } + xfs_trans_free_items(tp, flags); + xfs_trans_free_busy(tp); + xfs_trans_free(tp); + + /* mark this thread as no longer being in a transaction */ + current->flags &= ~PF_FSTRANS; +} + + +/* + * Free the transaction structure. If there is more clean up + * to do when the structure is freed, add it here. + */ +STATIC void +xfs_trans_free( + xfs_trans_t *tp) +{ + atomic_dec(&tp->t_mountp->m_active_trans); + XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); + kmem_zone_free(xfs_trans_zone, tp); +} + + +/* + * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). + * + * This is typically called by the LM when a transaction has been fully + * committed to disk. It needs to unpin the items which have + * been logged by the transaction and update their positions + * in the AIL if necessary. + * This also gets called when the transactions didn't get written out + * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. + * + * Call xfs_trans_chunk_committed() to process the items in + * each chunk. + */ +STATIC void +xfs_trans_committed( + xfs_trans_t *tp, + int abortflag) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_slot_t *lbsp; + int i; + + /* + * Call the transaction's completion callback if there + * is one. + */ + if (tp->t_callback != NULL) { + tp->t_callback(tp, tp->t_callarg); + } + + /* + * Special case the chunk embedded in the transaction. + */ + licp = &(tp->t_items); + if (!(XFS_LIC_ARE_ALL_FREE(licp))) { + xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); + } + + /* + * Process the items in each chunk in turn. + */ + licp = licp->lic_next; + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); + next_licp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + licp = next_licp; + } + + /* + * Clear all the per-AG busy list items listed in this transaction + */ + lbcp = &tp->t_busy; + while (lbcp != NULL) { + for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { + if (!XFS_LBC_ISFREE(lbcp, i)) { + xfs_alloc_clear_busy(tp, lbsp->lbc_ag, + lbsp->lbc_idx); + } + } + lbcp = lbcp->lbc_next; + } + xfs_trans_free_busy(tp); + + /* + * That's it for the transaction structure. Free it. + */ + xfs_trans_free(tp); +} + +/* + * This is called to perform the commit processing for each + * item described by the given chunk. + * + * The commit processing consists of unlocking items which were + * held locked with the SYNC_UNLOCK attribute, calling the committed + * routine of each logged item, updating the item's position in the AIL + * if necessary, and unpinning each item. If the committed routine + * returns -1, then do nothing further with the item because it + * may have been freed. + * + * Since items are unlocked when they are copied to the incore + * log, it is possible for two transactions to be completing + * and manipulating the same item simultaneously. The AIL lock + * will protect the lsn field of each item. The value of this + * field can never go backwards. + * + * We unpin the items after repositioning them in the AIL, because + * otherwise they could be immediately flushed and we'd have to race + * with the flusher trying to pull the item from the AIL as we add it. + */ +STATIC void +xfs_trans_chunk_committed( + xfs_log_item_chunk_t *licp, + xfs_lsn_t lsn, + int aborted) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + xfs_lsn_t item_lsn; + struct xfs_mount *mp; + int i; + SPLDECL(s); + + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + lip = lidp->lid_item; + if (aborted) + lip->li_flags |= XFS_LI_ABORTED; + + if (lidp->lid_flags & XFS_LID_SYNC_UNLOCK) { + IOP_UNLOCK(lip); + } + + /* + * Send in the ABORTED flag to the COMMITTED routine + * so that it knows whether the transaction was aborted + * or not. + */ + item_lsn = IOP_COMMITTED(lip, lsn); + + /* + * If the committed routine returns -1, make + * no more references to the item. + */ + if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { + continue; + } + + /* + * If the returned lsn is greater than what it + * contained before, update the location of the + * item in the AIL. If it is not, then do nothing. + * Items can never move backwards in the AIL. + * + * While the new lsn should usually be greater, it + * is possible that a later transaction completing + * simultaneously with an earlier one using the + * same item could complete first with a higher lsn. + * This would cause the earlier transaction to fail + * the test below. + */ + mp = lip->li_mountp; + AIL_LOCK(mp,s); + if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { + /* + * This will set the item's lsn to item_lsn + * and update the position of the item in + * the AIL. + * + * xfs_trans_update_ail() drops the AIL lock. + */ + xfs_trans_update_ail(mp, lip, item_lsn, s); + } else { + AIL_UNLOCK(mp, s); + } + + /* + * Now that we've repositioned the item in the AIL, + * unpin it so it can be flushed. Pass information + * about buffer stale state down from the log item + * flags, if anyone else stales the buffer we do not + * want to pay any attention to it. + */ + IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); + } +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_extfree.c linux.22-ac2/fs/xfs/xfs_trans_extfree.c --- linux.vanilla/fs/xfs/xfs_trans_extfree.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_extfree.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_extfree_item.h" + +/* + * This routine is called to allocate an "extent free intention" + * log item that will hold nextents worth of extents. The + * caller must use all nextents extents, because we are not + * flexible about this at all. + */ +xfs_efi_log_item_t * +xfs_trans_get_efi(xfs_trans_t *tp, + uint nextents) +{ + xfs_efi_log_item_t *efip; + + ASSERT(tp != NULL); + ASSERT(nextents > 0); + + efip = xfs_efi_init(tp->t_mountp, nextents); + ASSERT(efip != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip); + + return (efip); +} + +/* + * This routine is called to indicate that the described + * extent is to be logged as needing to be freed. It should + * be called once for each extent to be freed. + */ +void +xfs_trans_log_efi_extent(xfs_trans_t *tp, + xfs_efi_log_item_t *efip, + xfs_fsblock_t start_block, + xfs_extlen_t ext_len) +{ + xfs_log_item_desc_t *lidp; + uint next_extent; + xfs_extent_t *extp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + next_extent = efip->efi_next_extent; + ASSERT(next_extent < efip->efi_format.efi_nextents); + extp = &(efip->efi_format.efi_extents[next_extent]); + extp->ext_start = start_block; + extp->ext_len = ext_len; + efip->efi_next_extent++; +} + + +/* + * This routine is called to allocate an "extent free done" + * log item that will hold nextents worth of extents. The + * caller must use all nextents extents, because we are not + * flexible about this at all. + */ +xfs_efd_log_item_t * +xfs_trans_get_efd(xfs_trans_t *tp, + xfs_efi_log_item_t *efip, + uint nextents) +{ + xfs_efd_log_item_t *efdp; + + ASSERT(tp != NULL); + ASSERT(nextents > 0); + + efdp = xfs_efd_init(tp->t_mountp, efip, nextents); + ASSERT(efdp != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp); + + return (efdp); +} + +/* + * This routine is called to indicate that the described + * extent is to be logged as having been freed. It should + * be called once for each extent freed. + */ +void +xfs_trans_log_efd_extent(xfs_trans_t *tp, + xfs_efd_log_item_t *efdp, + xfs_fsblock_t start_block, + xfs_extlen_t ext_len) +{ + xfs_log_item_desc_t *lidp; + uint next_extent; + xfs_extent_t *extp; + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + next_extent = efdp->efd_next_extent; + ASSERT(next_extent < efdp->efd_format.efd_nextents); + extp = &(efdp->efd_format.efd_extents[next_extent]); + extp->ext_start = start_block; + extp->ext_len = ext_len; + efdp->efd_next_extent++; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans.h linux.22-ac2/fs/xfs/xfs_trans.h --- linux.vanilla/fs/xfs/xfs_trans.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1029 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_H__ +#define __XFS_TRANS_H__ + +/* + * This is the structure written in the log at the head of + * every transaction. It identifies the type and id of the + * transaction, and contains the number of items logged by + * the transaction so we know how many to expect during recovery. + * + * Do not change the below structure without redoing the code in + * xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans(). + */ +typedef struct xfs_trans_header { + uint th_magic; /* magic number */ + uint th_type; /* transaction type */ + __int32_t th_tid; /* transaction id (unused) */ + uint th_num_items; /* num items logged by trans */ +} xfs_trans_header_t; + +#define XFS_TRANS_HEADER_MAGIC 0x5452414e /* TRAN */ + +/* + * Log item types. + */ +#define XFS_LI_5_3_BUF 0x1234 /* v1 bufs, 1-block inode buffers */ +#define XFS_LI_5_3_INODE 0x1235 /* 1-block inode buffers */ +#define XFS_LI_EFI 0x1236 +#define XFS_LI_EFD 0x1237 +#define XFS_LI_IUNLINK 0x1238 +#define XFS_LI_6_1_INODE 0x1239 /* 4K non-aligned inode bufs */ +#define XFS_LI_6_1_BUF 0x123a /* v1, 4K inode buffers */ +#define XFS_LI_INODE 0x123b /* aligned ino chunks, var-size ibufs */ +#define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */ +#define XFS_LI_DQUOT 0x123d +#define XFS_LI_QUOTAOFF 0x123e + +/* + * Transaction types. Used to distinguish types of buffers. + */ +#define XFS_TRANS_SETATTR_NOT_SIZE 1 +#define XFS_TRANS_SETATTR_SIZE 2 +#define XFS_TRANS_INACTIVE 3 +#define XFS_TRANS_CREATE 4 +#define XFS_TRANS_CREATE_TRUNC 5 +#define XFS_TRANS_TRUNCATE_FILE 6 +#define XFS_TRANS_REMOVE 7 +#define XFS_TRANS_LINK 8 +#define XFS_TRANS_RENAME 9 +#define XFS_TRANS_MKDIR 10 +#define XFS_TRANS_RMDIR 11 +#define XFS_TRANS_SYMLINK 12 +#define XFS_TRANS_SET_DMATTRS 13 +#define XFS_TRANS_GROWFS 14 +#define XFS_TRANS_STRAT_WRITE 15 +#define XFS_TRANS_DIOSTRAT 16 +#define XFS_TRANS_WRITE_SYNC 17 +#define XFS_TRANS_WRITEID 18 +#define XFS_TRANS_ADDAFORK 19 +#define XFS_TRANS_ATTRINVAL 20 +#define XFS_TRANS_ATRUNCATE 21 +#define XFS_TRANS_ATTR_SET 22 +#define XFS_TRANS_ATTR_RM 23 +#define XFS_TRANS_ATTR_FLAG 24 +#define XFS_TRANS_CLEAR_AGI_BUCKET 25 +#define XFS_TRANS_QM_SBCHANGE 26 +/* + * Dummy entries since we use the transaction type to index into the + * trans_type[] in xlog_recover_print_trans_head() + */ +#define XFS_TRANS_DUMMY1 27 +#define XFS_TRANS_DUMMY2 28 +#define XFS_TRANS_QM_QUOTAOFF 29 +#define XFS_TRANS_QM_DQALLOC 30 +#define XFS_TRANS_QM_SETQLIM 31 +#define XFS_TRANS_QM_DQCLUSTER 32 +#define XFS_TRANS_QM_QINOCREATE 33 +#define XFS_TRANS_QM_QUOTAOFF_END 34 +#define XFS_TRANS_SB_UNIT 35 +#define XFS_TRANS_FSYNC_TS 36 +#define XFS_TRANS_GROWFSRT_ALLOC 37 +#define XFS_TRANS_GROWFSRT_ZERO 38 +#define XFS_TRANS_GROWFSRT_FREE 39 +#define XFS_TRANS_SWAPEXT 40 +/* new transaction types need to be reflected in xfs_logprint(8) */ + + +#ifdef __KERNEL__ +struct xfs_buf; +struct xfs_buftarg; +struct xfs_efd_log_item; +struct xfs_efi_log_item; +struct xfs_inode; +struct xfs_item_ops; +struct xfs_log_iovec; +struct xfs_log_item; +struct xfs_log_item_desc; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot_acct; + +typedef struct xfs_ail_entry { + struct xfs_log_item *ail_forw; /* AIL forw pointer */ + struct xfs_log_item *ail_back; /* AIL back pointer */ +} xfs_ail_entry_t; + +/* + * This structure is passed as a parameter to xfs_trans_push_ail() + * and is used to track the what LSN the waiting processes are + * waiting to become unused. + */ +typedef struct xfs_ail_ticket { + xfs_lsn_t at_lsn; /* lsn waitin for */ + struct xfs_ail_ticket *at_forw; /* wait list ptr */ + struct xfs_ail_ticket *at_back; /* wait list ptr */ + sv_t at_sema; /* wait sema */ +} xfs_ail_ticket_t; + + +typedef struct xfs_log_item { + xfs_ail_entry_t li_ail; /* AIL pointers */ + xfs_lsn_t li_lsn; /* last on-disk lsn */ + struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ + struct xfs_mount *li_mountp; /* ptr to fs mount */ + uint li_type; /* item type */ + uint li_flags; /* misc flags */ + struct xfs_log_item *li_bio_list; /* buffer item list */ + void (*li_cb)(struct xfs_buf *, + struct xfs_log_item *); + /* buffer item iodone */ + /* callback func */ + struct xfs_item_ops *li_ops; /* function list */ +} xfs_log_item_t; + +#define XFS_LI_IN_AIL 0x1 +#define XFS_LI_ABORTED 0x2 + +typedef struct xfs_item_ops { + uint (*iop_size)(xfs_log_item_t *); + void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); + void (*iop_pin)(xfs_log_item_t *); + void (*iop_unpin)(xfs_log_item_t *, int); + void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *); + uint (*iop_trylock)(xfs_log_item_t *); + void (*iop_unlock)(xfs_log_item_t *); + xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); + void (*iop_push)(xfs_log_item_t *); + void (*iop_abort)(xfs_log_item_t *); + void (*iop_pushbuf)(xfs_log_item_t *); + void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); +} xfs_item_ops_t; + +#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) +#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) +#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) +#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags) +#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp) +#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) +#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) +#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) +#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip) +#define IOP_ABORT(ip) (*(ip)->li_ops->iop_abort)(ip) +#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip) +#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn) + +/* + * Return values for the IOP_TRYLOCK() routines. + */ +#define XFS_ITEM_SUCCESS 0 +#define XFS_ITEM_PINNED 1 +#define XFS_ITEM_LOCKED 2 +#define XFS_ITEM_FLUSHING 3 +#define XFS_ITEM_PUSHBUF 4 + +#endif /* __KERNEL__ */ + +/* + * This structure is used to track log items associated with + * a transaction. It points to the log item and keeps some + * flags to track the state of the log item. It also tracks + * the amount of space needed to log the item it describes + * once we get to commit processing (see xfs_trans_commit()). + */ +typedef struct xfs_log_item_desc { + xfs_log_item_t *lid_item; + ushort lid_size; + unsigned char lid_flags; + unsigned char lid_index; +} xfs_log_item_desc_t; + +#define XFS_LID_DIRTY 0x1 +#define XFS_LID_PINNED 0x2 +#define XFS_LID_SYNC_UNLOCK 0x4 +#define XFS_LID_BUF_STALE 0x8 + +/* + * This structure is used to maintain a chunk list of log_item_desc + * structures. The free field is a bitmask indicating which descriptors + * in this chunk's array are free. The unused field is the first value + * not used since this chunk was allocated. + */ +#define XFS_LIC_NUM_SLOTS 15 +typedef struct xfs_log_item_chunk { + struct xfs_log_item_chunk *lic_next; + ushort lic_free; + ushort lic_unused; + xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS]; +} xfs_log_item_chunk_t; + +#define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1) +#define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1) + + +/* + * Initialize the given chunk. Set the chunk's free descriptor mask + * to indicate that all descriptors are free. The caller gets to set + * lic_unused to the right value (0 matches all free). The + * lic_descs.lid_index values are set up as each desc is allocated. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT) +void xfs_lic_init(xfs_log_item_chunk_t *cp); +#define XFS_LIC_INIT(cp) xfs_lic_init(cp) +#else +#define XFS_LIC_INIT(cp) ((cp)->lic_free = XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_INIT_SLOT) +void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot) +#else +#define XFS_LIC_INIT_SLOT(cp,slot) \ + ((cp)->lic_descs[slot].lid_index = (unsigned char)(slot)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_VACANCY) +int xfs_lic_vacancy(xfs_log_item_chunk_t *cp); +#define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp) +#else +#define XFS_LIC_VACANCY(cp) (((cp)->lic_free) & XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ALL_FREE) +void xfs_lic_all_free(xfs_log_item_chunk_t *cp); +#define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp) +#else +#define XFS_LIC_ALL_FREE(cp) ((cp)->lic_free = XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ARE_ALL_FREE) +int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp); +#define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp) +#else +#define XFS_LIC_ARE_ALL_FREE(cp) (((cp)->lic_free & XFS_LIC_FREEMASK) ==\ + XFS_LIC_FREEMASK) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_ISFREE) +int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot) +#else +#define XFS_LIC_ISFREE(cp,slot) ((cp)->lic_free & (1 << (slot))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_CLAIM) +void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot) +#else +#define XFS_LIC_CLAIM(cp,slot) ((cp)->lic_free &= ~(1 << (slot))) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_RELSE) +void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot) +#else +#define XFS_LIC_RELSE(cp,slot) ((cp)->lic_free |= 1 << (slot)) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_SLOT) +xfs_log_item_desc_t *xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot); +#define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot) +#else +#define XFS_LIC_SLOT(cp,slot) (&((cp)->lic_descs[slot])) +#endif +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_SLOT) +int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp); +#define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp) +#else +#define XFS_LIC_DESC_TO_SLOT(dp) ((uint)((dp)->lid_index)) +#endif +/* + * Calculate the address of a chunk given a descriptor pointer: + * dp - dp->lid_index give the address of the start of the lic_descs array. + * From this we subtract the offset of the lic_descs field in a chunk. + * All of this yields the address of the chunk, which is + * cast to a chunk pointer. + */ +#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_LIC_DESC_TO_CHUNK) +xfs_log_item_chunk_t *xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp); +#define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp) +#else +#define XFS_LIC_DESC_TO_CHUNK(dp) ((xfs_log_item_chunk_t*) \ + (((xfs_caddr_t)((dp) - (dp)->lid_index)) -\ + (xfs_caddr_t)(((xfs_log_item_chunk_t*) \ + 0)->lic_descs))) +#endif + +#ifdef __KERNEL__ +/* + * This structure is used to maintain a list of block ranges that have been + * freed in the transaction. The ranges are listed in the perag[] busy list + * between when they're freed and the transaction is committed to disk. + */ + +typedef struct xfs_log_busy_slot { + xfs_agnumber_t lbc_ag; + ushort lbc_idx; /* index in perag.busy[] */ +} xfs_log_busy_slot_t; + +#define XFS_LBC_NUM_SLOTS 31 +typedef struct xfs_log_busy_chunk { + struct xfs_log_busy_chunk *lbc_next; + uint lbc_free; /* bitmask of free slots */ + ushort lbc_unused; /* first unused */ + xfs_log_busy_slot_t lbc_busy[XFS_LBC_NUM_SLOTS]; +} xfs_log_busy_chunk_t; + +#define XFS_LBC_MAX_SLOT (XFS_LBC_NUM_SLOTS - 1) +#define XFS_LBC_FREEMASK ((1U << XFS_LBC_NUM_SLOTS) - 1) + +#define XFS_LBC_INIT(cp) ((cp)->lbc_free = XFS_LBC_FREEMASK) +#define XFS_LBC_CLAIM(cp, slot) ((cp)->lbc_free &= ~(1 << (slot))) +#define XFS_LBC_SLOT(cp, slot) (&((cp)->lbc_busy[(slot)])) +#define XFS_LBC_VACANCY(cp) (((cp)->lbc_free) & XFS_LBC_FREEMASK) +#define XFS_LBC_ISFREE(cp, slot) ((cp)->lbc_free & (1 << (slot))) + +/* + * This is the type of function which can be given to xfs_trans_callback() + * to be called upon the transaction's commit to disk. + */ +typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *); + +/* + * This is the structure maintained for every active transaction. + */ +typedef struct xfs_trans { + unsigned int t_magic; /* magic number */ + xfs_log_callback_t t_logcb; /* log callback struct */ + struct xfs_trans *t_forw; /* async list pointers */ + struct xfs_trans *t_back; /* async list pointers */ + unsigned int t_type; /* transaction type */ + unsigned int t_log_res; /* amt of log space resvd */ + unsigned int t_log_count; /* count for perm log res */ + unsigned int t_blk_res; /* # of blocks resvd */ + unsigned int t_blk_res_used; /* # of resvd blocks used */ + unsigned int t_rtx_res; /* # of rt extents resvd */ + unsigned int t_rtx_res_used; /* # of resvd rt extents used */ + xfs_log_ticket_t t_ticket; /* log mgr ticket */ + sema_t t_sema; /* sema for commit completion */ + xfs_lsn_t t_lsn; /* log seq num of start of + * transaction. */ + xfs_lsn_t t_commit_lsn; /* log seq num of end of + * transaction. */ + struct xfs_mount *t_mountp; /* ptr to fs mount struct */ + struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */ + xfs_trans_callback_t t_callback; /* transaction callback */ + void *t_callarg; /* callback arg */ + unsigned int t_flags; /* misc flags */ + long t_icount_delta; /* superblock icount change */ + long t_ifree_delta; /* superblock ifree change */ + long t_fdblocks_delta; /* superblock fdblocks chg */ + long t_res_fdblocks_delta; /* on-disk only chg */ + long t_frextents_delta;/* superblock freextents chg*/ + long t_res_frextents_delta; /* on-disk only chg */ + long t_ag_freeblks_delta; /* debugging counter */ + long t_ag_flist_delta; /* debugging counter */ + long t_ag_btree_delta; /* debugging counter */ + long t_dblocks_delta;/* superblock dblocks change */ + long t_agcount_delta;/* superblock agcount change */ + long t_imaxpct_delta;/* superblock imaxpct change */ + long t_rextsize_delta;/* superblock rextsize chg */ + long t_rbmblocks_delta;/* superblock rbmblocks chg */ + long t_rblocks_delta;/* superblock rblocks change */ + long t_rextents_delta;/* superblocks rextents chg */ + long t_rextslog_delta;/* superblocks rextslog chg */ + unsigned int t_items_free; /* log item descs free */ + xfs_log_item_chunk_t t_items; /* first log item desc chunk */ + xfs_trans_header_t t_header; /* header for in-log trans */ + unsigned int t_busy_free; /* busy descs free */ + xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ +} xfs_trans_t; + +#endif /* __KERNEL__ */ + + +#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ +/* + * Values for t_flags. + */ +#define XFS_TRANS_DIRTY 0x01 /* something needs to be logged */ +#define XFS_TRANS_SB_DIRTY 0x02 /* superblock is modified */ +#define XFS_TRANS_PERM_LOG_RES 0x04 /* xact took a permanent log res */ +#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */ +#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */ +#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */ + +/* + * Values for call flags parameter. + */ +#define XFS_TRANS_NOSLEEP 0x1 +#define XFS_TRANS_WAIT 0x2 +#define XFS_TRANS_RELEASE_LOG_RES 0x4 +#define XFS_TRANS_ABORT 0x8 + +/* + * Field values for xfs_trans_mod_sb. + */ +#define XFS_TRANS_SB_ICOUNT 0x00000001 +#define XFS_TRANS_SB_IFREE 0x00000002 +#define XFS_TRANS_SB_FDBLOCKS 0x00000004 +#define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008 +#define XFS_TRANS_SB_FREXTENTS 0x00000010 +#define XFS_TRANS_SB_RES_FREXTENTS 0x00000020 +#define XFS_TRANS_SB_DBLOCKS 0x00000040 +#define XFS_TRANS_SB_AGCOUNT 0x00000080 +#define XFS_TRANS_SB_IMAXPCT 0x00000100 +#define XFS_TRANS_SB_REXTSIZE 0x00000200 +#define XFS_TRANS_SB_RBMBLOCKS 0x00000400 +#define XFS_TRANS_SB_RBLOCKS 0x00000800 +#define XFS_TRANS_SB_REXTENTS 0x00001000 +#define XFS_TRANS_SB_REXTSLOG 0x00002000 + + +/* + * Various log reservation values. + * These are based on the size of the file system block + * because that is what most transactions manipulate. + * Each adds in an additional 128 bytes per item logged to + * try to account for the overhead of the transaction mechanism. + * + * Note: + * Most of the reservations underestimate the number of allocation + * groups into which they could free extents in the xfs_bmap_finish() + * call. This is because the number in the worst case is quite high + * and quite unusual. In order to fix this we need to change + * xfs_bmap_finish() to free extents in only a single AG at a time. + * This will require changes to the EFI code as well, however, so that + * the EFI for the extents not freed is logged again in each transaction. + * See bug 261917. + */ + +/* + * Per-extent log reservation for the allocation btree changes + * involved in freeing or allocating an extent. + * 2 trees * (2 blocks/level * max depth - 1) * block size + */ +#define XFS_ALLOCFREE_LOG_RES(mp,nx) \ + ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1))) +#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \ + ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1))) + +/* + * Per-directory log reservation for any directory change. + * dir blocks: (1 btree block per level + data block + free block) * dblock size + * bmap btree: (levels + 2) * max depth * block size + * v2 directory blocks can be fragmented below the dirblksize down to the fsb + * size, so account for that in the DAENTER macros. + */ +#define XFS_DIROP_LOG_RES(mp) \ + (XFS_FSB_TO_B(mp, XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK)) + \ + (XFS_FSB_TO_B(mp, XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1))) +#define XFS_DIROP_LOG_COUNT(mp) \ + (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \ + XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1) + +/* + * In a write transaction we can allocate a maximum of 2 + * extents. This gives: + * the inode getting the new extents: inode size + * the inode\'s bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join: + * the agfs of the ags containing the blocks: 2 * sector size + * the agfls of the ags containing the blocks: 2 * sector size + * the super block free block counter: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_WRITE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))),\ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_WRITE_LOG_RES(mp) ((mp)->m_reservations.tr_write) + +/* + * In truncating a file we free up to two extents at once. We can modify: + * the inode being truncated: inode size + * the inode\'s bmap btree: (max depth + 1) * block size + * And the bmap_finish transaction can free the blocks and bmap blocks: + * the agf for each of the ags: 4 * sector size + * the agfl for each of the ags: 4 * sector size + * the super block to reflect the freed blocks: sector size + * worst case split in allocation btrees per extent assuming 4 extents: + * 4 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ITRUNCATE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + \ + (128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ + ((4 * (mp)->m_sb.sb_sectsize) + \ + (4 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 4) + \ + (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))))) + +#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate) + +/* + * In renaming a files we can modify: + * the four inodes involved: 4 * inode size + * the two directory btrees: 2 * (max depth + v2) * dir block size + * the two directory bmap btrees: 2 * max depth * block size + * And the bmap_finish transaction can free dir and bmap blocks (two sets + * of bmap blocks) giving: + * the agf for the ags in which the blocks live: 3 * sector size + * the agfl for the ags in which the blocks live: 3 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_RENAME_LOG_RES(mp) \ + (MAX( \ + ((4 * (mp)->m_sb.sb_inodesize) + \ + (2 * XFS_DIROP_LOG_RES(mp)) + \ + (128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp)))), \ + ((3 * (mp)->m_sb.sb_sectsize) + \ + (3 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 3) + \ + (128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3)))))) + +#define XFS_RENAME_LOG_RES(mp) ((mp)->m_reservations.tr_rename) + +/* + * For creating a link to an inode: + * the parent directory inode: inode size + * the linked inode: inode size + * the directory btree could split: (max depth + v2) * dir block size + * the directory bmap btree could join or split: (max depth + v2) * blocksize + * And the bmap_finish transaction can free some bmap blocks giving: + * the agf for the ag in which the blocks live: sector size + * the agfl for the ag in which the blocks live: sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_LINK_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ + ((mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_LINK_LOG_RES(mp) ((mp)->m_reservations.tr_link) + +/* + * For removing a directory entry we can modify: + * the parent directory inode: inode size + * the removed inode: inode size + * the directory btree could join: (max depth + v2) * dir block size + * the directory bmap btree could join or split: (max depth + v2) * blocksize + * And the bmap_finish transaction can free the dir and bmap blocks giving: + * the agf for the ag in which the blocks live: 2 * sector size + * the agfl for the ag in which the blocks live: 2 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_REMOVE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_REMOVE_LOG_RES(mp) ((mp)->m_reservations.tr_remove) + +/* + * For symlink we can modify: + * the parent directory inode: inode size + * the new inode: inode size + * the inode btree entry: 1 block + * the directory btree: (max depth + v2) * dir block size + * the directory inode\'s bmap btree: (max depth + v2) * block size + * the blocks for the symlink: 1 KB + * Or in the first xact we allocate some inodes giving: + * the agi and agf of the ag getting the new inodes: 2 * sectorsize + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize + * the inode btree: max depth * blocksize + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_SYMLINK_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B(mp, 1) + \ + XFS_DIROP_LOG_RES(mp) + \ + 1024 + \ + (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \ + (2 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ + XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink) + +/* + * For create we can modify: + * the parent directory inode: inode size + * the new inode: inode size + * the inode btree entry: block size + * the superblock for the nlink flag: sector size + * the directory btree: (max depth + v2) * dir block size + * the directory inode\'s bmap btree: (max depth + v2) * block size + * Or in the first xact we allocate some inodes giving: + * the agi and agf of the ag getting the new inodes: 2 * sectorsize + * the superblock for the nlink flag: sector size + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize + * the inode btree: max depth * blocksize + * the allocation btrees: 2 trees * (max depth - 1) * block size + */ +#define XFS_CALC_CREATE_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B(mp, 1) + \ + XFS_DIROP_LOG_RES(mp) + \ + (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \ + (3 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ + XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) + +#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create) + +/* + * Making a new directory is the same as creating a new file. + */ +#define XFS_CALC_MKDIR_LOG_RES(mp) XFS_CALC_CREATE_LOG_RES(mp) + +#define XFS_MKDIR_LOG_RES(mp) ((mp)->m_reservations.tr_mkdir) + +/* + * In freeing an inode we can modify: + * the inode being freed: inode size + * the super block free inode counter: sector size + * the agi hash list and counters: sector size + * the inode btree entry: block size + * the on disk inode before ours in the agi hash list: inode cluster size + */ +#define XFS_CALC_IFREE_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), 1) + \ + MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \ + (128 * 5)) + +#define XFS_IFREE_LOG_RES(mp) ((mp)->m_reservations.tr_ifree) + +/* + * When only changing the inode we log the inode and possibly the superblock + * We also add a bit of slop for the transaction stuff. + */ +#define XFS_CALC_ICHANGE_LOG_RES(mp) ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + 512) + +#define XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange) + +/* + * Growing the data section of the filesystem. + * superblock + * agi and agf + * allocation btrees + */ +#define XFS_CALC_GROWDATA_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize * 3 + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_GROWDATA_LOG_RES(mp) ((mp)->m_reservations.tr_growdata) + +/* + * Growing the rt section of the filesystem. + * In the first set of transactions (ALLOC) we allocate space to the + * bitmap or summary files. + * superblock: sector size + * agf of the ag from which the extent is allocated: sector size + * bmap btree for bitmap/summary inode: max depth * blocksize + * bitmap/summary inode: inode size + * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize + */ +#define XFS_CALC_GROWRTALLOC_LOG_RES(mp) \ + (2 * (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ + (mp)->m_sb.sb_inodesize + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * \ + (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_GROWRTALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_growrtalloc) + +/* + * Growing the rt section of the filesystem. + * In the second set of transactions (ZERO) we zero the new metadata blocks. + * one bitmap/summary block: blocksize + */ +#define XFS_CALC_GROWRTZERO_LOG_RES(mp) \ + ((mp)->m_sb.sb_blocksize + 128) + +#define XFS_GROWRTZERO_LOG_RES(mp) ((mp)->m_reservations.tr_growrtzero) + +/* + * Growing the rt section of the filesystem. + * In the third set of transactions (FREE) we update metadata without + * allocating any new blocks. + * superblock: sector size + * bitmap inode: inode size + * summary inode: inode size + * one bitmap block: blocksize + * summary blocks: new summary size + */ +#define XFS_CALC_GROWRTFREE_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize + \ + 2 * (mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_blocksize + \ + (mp)->m_rsumsize + \ + (128 * 5)) + +#define XFS_GROWRTFREE_LOG_RES(mp) ((mp)->m_reservations.tr_growrtfree) + +/* + * Logging the inode modification timestamp on a synchronous write. + * inode + */ +#define XFS_CALC_SWRITE_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + 128) + +#define XFS_SWRITE_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Logging the inode timestamps on an fsync -- same as SWRITE + * as long as SWRITE logs the entire inode core + */ +#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Logging the inode mode bits when writing a setuid/setgid file + * inode + */ +#define XFS_CALC_WRITEID_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + 128) + +#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) + +/* + * Converting the inode from non-attributed to attributed. + * the inode being converted: inode size + * agf block and superblock (for block allocation) + * the new block (directory sized) + * bmap blocks for the new directory block + * allocation btrees + */ +#define XFS_CALC_ADDAFORK_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize * 2 + \ + (mp)->m_dirblksize + \ + (XFS_DIR_IS_V1(mp) ? 0 : \ + XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1))) + \ + XFS_ALLOCFREE_LOG_RES(mp, 1) + \ + (128 * (4 + \ + (XFS_DIR_IS_V1(mp) ? 0 : \ + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \ + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) + +#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork) + +/* + * Removing the attribute fork of a file + * the inode being truncated: inode size + * the inode\'s bmap btree: max depth * block size + * And the bmap_finish transaction can free the blocks and bmap blocks: + * the agf for each of the ags: 4 * sector size + * the agfl for each of the ags: 4 * sector size + * the super block to reflect the freed blocks: sector size + * worst case split in allocation btrees per extent assuming 4 extents: + * 4 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ATTRINVAL_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ + (128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))), \ + ((4 * (mp)->m_sb.sb_sectsize) + \ + (4 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 4) + \ + (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))))) + +#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval) + +/* + * Setting an attribute. + * the inode getting the attribute + * the superblock for allocations + * the agfs extents are allocated from + * the attribute btree * max depth + * the inode allocation btree + * Since attribute transaction space is dependent on the size of the attribute, + * the calculation is done partially at mount time and partially at runtime. + */ +#define XFS_CALC_ATTRSET_LOG_RES(mp) \ + ((mp)->m_sb.sb_inodesize + \ + (mp)->m_sb.sb_sectsize + \ + XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ + (128 * (2 + XFS_DA_NODE_MAXDEPTH))) + +#define XFS_ATTRSET_LOG_RES(mp, ext) \ + ((mp)->m_reservations.tr_attrset + \ + (ext * (mp)->m_sb.sb_sectsize) + \ + (ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \ + (128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))))) + +/* + * Removing an attribute. + * the inode: inode size + * the attribute btree could join: max depth * block size + * the inode bmap btree could join or split: max depth * block size + * And the bmap_finish transaction can free the attr blocks freed giving: + * the agf for the ag in which the blocks live: 2 * sector size + * the agfl for the ag in which the blocks live: 2 * sector size + * the superblock for the free block count: sector size + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +#define XFS_CALC_ATTRRM_LOG_RES(mp) \ + (MAX( \ + ((mp)->m_sb.sb_inodesize + \ + XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ + XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ + (128 * (1 + XFS_DA_NODE_MAXDEPTH + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ + ((2 * (mp)->m_sb.sb_sectsize) + \ + (2 * (mp)->m_sb.sb_sectsize) + \ + (mp)->m_sb.sb_sectsize + \ + XFS_ALLOCFREE_LOG_RES(mp, 2) + \ + (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) + +#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm) + +/* + * Clearing a bad agino number in an agi hash bucket. + */ +#define XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp) \ + ((mp)->m_sb.sb_sectsize + 128) + +#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi) + + +/* + * Various log count values. + */ +#define XFS_DEFAULT_LOG_COUNT 1 +#define XFS_DEFAULT_PERM_LOG_COUNT 2 +#define XFS_ITRUNCATE_LOG_COUNT 2 +#define XFS_CREATE_LOG_COUNT 2 +#define XFS_MKDIR_LOG_COUNT 3 +#define XFS_SYMLINK_LOG_COUNT 3 +#define XFS_REMOVE_LOG_COUNT 2 +#define XFS_LINK_LOG_COUNT 2 +#define XFS_RENAME_LOG_COUNT 2 +#define XFS_WRITE_LOG_COUNT 2 +#define XFS_ADDAFORK_LOG_COUNT 2 +#define XFS_ATTRINVAL_LOG_COUNT 1 +#define XFS_ATTRSET_LOG_COUNT 3 +#define XFS_ATTRRM_LOG_COUNT 3 + +/* + * Here we centralize the specification of XFS meta-data buffer + * reference count values. This determine how hard the buffer + * cache tries to hold onto the buffer. + */ +#define XFS_AGF_REF 4 +#define XFS_AGI_REF 4 +#define XFS_AGFL_REF 3 +#define XFS_INO_BTREE_REF 3 +#define XFS_ALLOC_BTREE_REF 2 +#define XFS_BMAP_BTREE_REF 2 +#define XFS_DIR_BTREE_REF 2 +#define XFS_ATTR_BTREE_REF 1 +#define XFS_INO_REF 1 +#define XFS_DQUOT_REF 1 + +#ifdef __KERNEL__ +/* + * XFS transaction mechanism exported interfaces that are + * actually macros. + */ +#define xfs_trans_get_log_res(tp) ((tp)->t_log_res) +#define xfs_trans_get_log_count(tp) ((tp)->t_log_count) +#define xfs_trans_get_block_res(tp) ((tp)->t_blk_res) +#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC) + +#ifdef DEBUG +#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (long)d) +#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (long)d) +#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (long)d) +#else +#define xfs_trans_agblocks_delta(tp, d) +#define xfs_trans_agflist_delta(tp, d) +#define xfs_trans_agbtree_delta(tp, d) +#endif + +/* + * XFS transaction mechanism exported interfaces. + */ +void xfs_trans_init(struct xfs_mount *); +xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *xfs_trans_dup(xfs_trans_t *); +int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, + uint, uint); +void xfs_trans_callback(xfs_trans_t *, + void (*)(xfs_trans_t *, void *), void *); +void xfs_trans_mod_sb(xfs_trans_t *, uint, long); +struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t, + int, uint); +int xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *, + struct xfs_buftarg *, xfs_daddr_t, int, uint, + struct xfs_buf **); +struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int); + +void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_bhold_until_committed(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); +void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); +void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); +int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, + xfs_ino_t , uint, struct xfs_inode **); +void xfs_trans_iput(xfs_trans_t *, struct xfs_inode *, uint); +void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint); +void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *); +void xfs_trans_ihold_release(xfs_trans_t *, struct xfs_inode *); +void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); +void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint); +struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint); +void xfs_efi_release(struct xfs_efi_log_item *, uint); +void xfs_trans_log_efi_extent(xfs_trans_t *, + struct xfs_efi_log_item *, + xfs_fsblock_t, + xfs_extlen_t); +struct xfs_efd_log_item *xfs_trans_get_efd(xfs_trans_t *, + struct xfs_efi_log_item *, + uint); +void xfs_trans_log_efd_extent(xfs_trans_t *, + struct xfs_efd_log_item *, + xfs_fsblock_t, + xfs_extlen_t); +int xfs_trans_commit(xfs_trans_t *, uint flags, xfs_lsn_t *); +void xfs_trans_cancel(xfs_trans_t *, int); +void xfs_trans_ail_init(struct xfs_mount *); +xfs_lsn_t xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); +xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); +void xfs_trans_unlocked_item(struct xfs_mount *, + xfs_log_item_t *); +xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + xfs_extlen_t idx); + +#endif /* __KERNEL__ */ + +#endif /* __XFS_TRANS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_inode.c linux.22-ac2/fs/xfs/xfs_trans_inode.c --- linux.vanilla/fs/xfs/xfs_trans_inode.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_inode.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_trans_priv.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" + +#ifdef XFS_TRANS_DEBUG +STATIC void +xfs_trans_inode_broot_debug( + xfs_inode_t *ip); +#else +#define xfs_trans_inode_broot_debug(ip) +#endif + + +/* + * Get and lock the inode for the caller if it is not already + * locked within the given transaction. If it is already locked + * within the transaction, just increment its lock recursion count + * and return a pointer to it. + * + * For an inode to be locked in a transaction, the inode lock, as + * opposed to the io lock, must be taken exclusively. This ensures + * that the inode can be involved in only 1 transaction at a time. + * Lock recursion is handled on the io lock, but only for lock modes + * of equal or lesser strength. That is, you can recur on the io lock + * held EXCL with a SHARED request but not vice versa. Also, if + * the inode is already a part of the transaction then you cannot + * go from not holding the io lock to having it EXCL or SHARED. + * + * Use the inode cache routine xfs_inode_incore() to find the inode + * if it is already owned by this transaction. + * + * If we don't already own the inode, use xfs_iget() to get it. + * Since the inode log item structure is embedded in the incore + * inode structure and is initialized when the inode is brought + * into memory, there is nothing to do with it here. + * + * If the given transaction pointer is NULL, just call xfs_iget(). + * This simplifies code which must handle both cases. + */ +int +xfs_trans_iget( + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint lock_flags, + xfs_inode_t **ipp) +{ + int error; + xfs_inode_t *ip; + xfs_inode_log_item_t *iip; + + /* + * If the transaction pointer is NULL, just call the normal + * xfs_iget(). + */ + if (tp == NULL) { + return (xfs_iget(mp, NULL, ino, lock_flags, ipp, 0)); + } + + /* + * If we find the inode in core with this transaction + * pointer in its i_transp field, then we know we already + * have it locked. In this case we just increment the lock + * recursion count and return the inode to the caller. + * Assert that the inode is already locked in the mode requested + * by the caller. We cannot do lock promotions yet, so + * die if someone gets this wrong. + */ + if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) { + /* + * Make sure that the inode lock is held EXCL and + * that the io lock is never upgraded when the inode + * is already a part of the transaction. + */ + ASSERT(ip->i_itemp != NULL); + ASSERT(lock_flags & XFS_ILOCK_EXCL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); + + if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { + ip->i_itemp->ili_iolock_recur++; + } + if (lock_flags & XFS_ILOCK_EXCL) { + ip->i_itemp->ili_ilock_recur++; + } + *ipp = ip; + return 0; + } + + ASSERT(lock_flags & XFS_ILOCK_EXCL); + error = xfs_iget(tp->t_mountp, tp, ino, lock_flags, &ip, 0); + if (error) { + return error; + } + ASSERT(ip != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + if (ip->i_itemp == NULL) + xfs_inode_item_init(ip, mp); + iip = ip->i_itemp; + (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); + + xfs_trans_inode_broot_debug(ip); + + /* + * If the IO lock has been acquired, mark that in + * the inode log item so we'll know to unlock it + * when the transaction commits. + */ + ASSERT(iip->ili_flags == 0); + if (lock_flags & XFS_IOLOCK_EXCL) { + iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; + } + + /* + * Initialize i_transp so we can find it with xfs_inode_incore() + * above. + */ + ip->i_transp = tp; + + *ipp = ip; + return 0; +} + + +/* + * Release the inode ip which was previously acquired with xfs_trans_iget() + * or added with xfs_trans_ijoin(). This will decrement the lock + * recursion count of the inode item. If the count goes to less than 0, + * the inode will be unlocked and disassociated from the transaction. + * + * If the inode has been modified within the transaction, it will not be + * unlocked until the transaction commits. + */ +void +xfs_trans_iput( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint lock_flags) +{ + xfs_inode_log_item_t *iip; + xfs_log_item_desc_t *lidp; + + /* + * If the transaction pointer is NULL, just call xfs_iput(). + */ + if (tp == NULL) { + xfs_iput(ip, lock_flags); + } + + ASSERT(ip->i_transp == tp); + iip = ip->i_itemp; + ASSERT(iip != NULL); + + /* + * Find the item descriptor pointing to this inode's + * log item. It must be there. + */ + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)iip); + ASSERT(lidp != NULL); + ASSERT(lidp->lid_item == (xfs_log_item_t*)iip); + + /* + * Be consistent about the bookkeeping for the inode's + * io lock, but it doesn't mean much really. + */ + ASSERT((iip->ili_flags & XFS_ILI_IOLOCKED_ANY) != XFS_ILI_IOLOCKED_ANY); + if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { + ASSERT(iip->ili_flags & XFS_ILI_IOLOCKED_ANY); + ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || + (iip->ili_flags & XFS_ILI_IOLOCKED_EXCL)); + ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || + (iip->ili_flags & + (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED))); + if (iip->ili_iolock_recur > 0) { + iip->ili_iolock_recur--; + } + } + + /* + * If the release is just for a recursive lock on the inode lock, + * then decrement the count and return. We can assert that + * the caller is dropping an EXCL lock on the inode, because + * inode must be locked EXCL within transactions. + */ + ASSERT(lock_flags & XFS_ILOCK_EXCL); + if (iip->ili_ilock_recur > 0) { + iip->ili_ilock_recur--; + return; + } + ASSERT(iip->ili_iolock_recur == 0); + + /* + * If the inode was dirtied within this transaction, it cannot + * be released until the transaction commits. + */ + if (lidp->lid_flags & XFS_LID_DIRTY) { + return; + } + + xfs_trans_free_item(tp, lidp); + + /* + * Clear the hold and iolocked flags in the inode log item. + * We wouldn't want the next user of the inode to + * get confused. Assert that if the iolocked flag is set + * in the item then we are unlocking it in the call to xfs_iput() + * below. + */ + ASSERT((!(iip->ili_flags & XFS_ILI_IOLOCKED_ANY)) || + (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED))); + if (iip->ili_flags & (XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY)) { + iip->ili_flags &= ~(XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY); + } + + /* + * Unlike xfs_brelse() the inode log item cannot be + * freed, because it is embedded within the inode. + * All we have to do is release the inode. + */ + xfs_iput(ip, lock_flags); + return; +} + + +/* + * Add the locked inode to the transaction. + * The inode must be locked, and it cannot be associated with any + * transaction. The caller must specify the locks already held + * on the inode. + */ +void +xfs_trans_ijoin( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint lock_flags) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_transp == NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(lock_flags & XFS_ILOCK_EXCL); + if (ip->i_itemp == NULL) + xfs_inode_item_init(ip, ip->i_mount); + iip = ip->i_itemp; + ASSERT(iip->ili_flags == 0); + ASSERT(iip->ili_ilock_recur == 0); + ASSERT(iip->ili_iolock_recur == 0); + + /* + * Get a log_item_desc to point at the new item. + */ + (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip)); + + xfs_trans_inode_broot_debug(ip); + + /* + * If the IO lock is already held, mark that in the inode log item. + */ + if (lock_flags & XFS_IOLOCK_EXCL) { + iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; + } else if (lock_flags & XFS_IOLOCK_SHARED) { + iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; + } + + /* + * Initialize i_transp so we can find it with xfs_inode_incore() + * in xfs_trans_iget() above. + */ + ip->i_transp = tp; +} + + + +/* + * Mark the inode as not needing to be unlocked when the inode item's + * IOP_UNLOCK() routine is called. The inode must already be locked + * and associated with the given transaction. + */ +/*ARGSUSED*/ +void +xfs_trans_ihold( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + ip->i_itemp->ili_flags |= XFS_ILI_HOLD; +} + +/* + * Cancel the previous inode hold request made on this inode + * for this transaction. + */ +/*ARGSUSED*/ +void +xfs_trans_ihold_release( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); + + ip->i_itemp->ili_flags &= ~XFS_ILI_HOLD; +} + + +/* + * This is called to mark the fields indicated in fieldmask as needing + * to be logged when the transaction is committed. The inode must + * already be associated with the given transaction. + * + * The values for fieldmask are defined in xfs_inode_item.h. We always + * log all of the core inode if any of it has changed, and we always log + * all of the inline data/extents/b-tree root if any of them has changed. + */ +void +xfs_trans_log_inode( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint flags) +{ + xfs_log_item_desc_t *lidp; + + ASSERT(ip->i_transp == tp); + ASSERT(ip->i_itemp != NULL); + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + + lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp)); + ASSERT(lidp != NULL); + + tp->t_flags |= XFS_TRANS_DIRTY; + lidp->lid_flags |= XFS_LID_DIRTY; + + /* + * Always OR in the bits from the ili_last_fields field. + * This is to coordinate with the xfs_iflush() and xfs_iflush_done() + * routines in the eventual clearing of the ilf_fields bits. + * See the big comment in xfs_iflush() for an explanation of + * this coorination mechanism. + */ + flags |= ip->i_itemp->ili_last_fields; + ip->i_itemp->ili_format.ilf_fields |= flags; +} + +#ifdef XFS_TRANS_DEBUG +/* + * Keep track of the state of the inode btree root to make sure we + * log it properly. + */ +STATIC void +xfs_trans_inode_broot_debug( + xfs_inode_t *ip) +{ + xfs_inode_log_item_t *iip; + + ASSERT(ip->i_itemp != NULL); + iip = ip->i_itemp; + if (iip->ili_root_size != 0) { + ASSERT(iip->ili_orig_root != NULL); + kmem_free(iip->ili_orig_root, iip->ili_root_size); + iip->ili_root_size = 0; + iip->ili_orig_root = NULL; + } + if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { + ASSERT((ip->i_df.if_broot != NULL) && + (ip->i_df.if_broot_bytes > 0)); + iip->ili_root_size = ip->i_df.if_broot_bytes; + iip->ili_orig_root = + (char*)kmem_alloc(iip->ili_root_size, KM_SLEEP); + memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot), + iip->ili_root_size); + } +} +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_item.c linux.22-ac2/fs/xfs/xfs_trans_item.c --- linux.vanilla/fs/xfs/xfs_trans_item.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_item.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" + +STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, + int, int, xfs_lsn_t); + +/* + * This is called to add the given log item to the transaction's + * list of log items. It must find a free log item descriptor + * or allocate a new one and add the item to that descriptor. + * The function returns a pointer to item descriptor used to point + * to the new item. The log item will now point to its new descriptor + * with its li_desc field. + */ +xfs_log_item_desc_t * +xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_chunk_t *licp; + int i=0; + + /* + * If there are no free descriptors, allocate a new chunk + * of them and put it at the front of the chunk list. + */ + if (tp->t_items_free == 0) { + licp = (xfs_log_item_chunk_t*) + kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP); + ASSERT(licp != NULL); + /* + * Initialize the chunk, and then + * claim the first slot in the newly allocated chunk. + */ + XFS_LIC_INIT(licp); + XFS_LIC_CLAIM(licp, 0); + licp->lic_unused = 1; + XFS_LIC_INIT_SLOT(licp, 0); + lidp = XFS_LIC_SLOT(licp, 0); + + /* + * Link in the new chunk and update the free count. + */ + licp->lic_next = tp->t_items.lic_next; + tp->t_items.lic_next = licp; + tp->t_items_free = XFS_LIC_NUM_SLOTS - 1; + + /* + * Initialize the descriptor and the generic portion + * of the log item. + * + * Point the new slot at this item and return it. + * Also point the log item at its currently active + * descriptor and set the item's mount pointer. + */ + lidp->lid_item = lip; + lidp->lid_flags = 0; + lidp->lid_size = 0; + lip->li_desc = lidp; + lip->li_mountp = tp->t_mountp; + return (lidp); + } + + /* + * Find the free descriptor. It is somewhere in the chunklist + * of descriptors. + */ + licp = &tp->t_items; + while (licp != NULL) { + if (XFS_LIC_VACANCY(licp)) { + if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { + i = licp->lic_unused; + ASSERT(XFS_LIC_ISFREE(licp, i)); + break; + } + for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { + if (XFS_LIC_ISFREE(licp, i)) + break; + } + ASSERT(i <= XFS_LIC_MAX_SLOT); + break; + } + licp = licp->lic_next; + } + ASSERT(licp != NULL); + /* + * If we find a free descriptor, claim it, + * initialize it, and return it. + */ + XFS_LIC_CLAIM(licp, i); + if (licp->lic_unused <= i) { + licp->lic_unused = i + 1; + XFS_LIC_INIT_SLOT(licp, i); + } + lidp = XFS_LIC_SLOT(licp, i); + tp->t_items_free--; + lidp->lid_item = lip; + lidp->lid_flags = 0; + lidp->lid_size = 0; + lip->li_desc = lidp; + lip->li_mountp = tp->t_mountp; + return (lidp); +} + +/* + * Free the given descriptor. + * + * This requires setting the bit in the chunk's free mask corresponding + * to the given slot. + */ +void +xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) +{ + uint slot; + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t **licpp; + + slot = XFS_LIC_DESC_TO_SLOT(lidp); + licp = XFS_LIC_DESC_TO_CHUNK(lidp); + XFS_LIC_RELSE(licp, slot); + lidp->lid_item->li_desc = NULL; + tp->t_items_free++; + + /* + * If there are no more used items in the chunk and this is not + * the chunk embedded in the transaction structure, then free + * the chunk. First pull it from the chunk list and then + * free it back to the heap. We didn't bother with a doubly + * linked list here because the lists should be very short + * and this is not a performance path. It's better to save + * the memory of the extra pointer. + * + * Also decrement the transaction structure's count of free items + * by the number in a chunk since we are freeing an empty chunk. + */ + if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) { + licpp = &(tp->t_items.lic_next); + while (*licpp != licp) { + ASSERT(*licpp != NULL); + licpp = &((*licpp)->lic_next); + } + *licpp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + tp->t_items_free -= XFS_LIC_NUM_SLOTS; + } +} + +/* + * This is called to find the descriptor corresponding to the given + * log item. It returns a pointer to the descriptor. + * The log item MUST have a corresponding descriptor in the given + * transaction. This routine does not return NULL, it panics. + * + * The descriptor pointer is kept in the log item's li_desc field. + * Just return it. + */ +/*ARGSUSED*/ +xfs_log_item_desc_t * +xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip) +{ + ASSERT(lip->li_desc != NULL); + + return (lip->li_desc); +} + + +/* + * Return a pointer to the first descriptor in the chunk list. + * This does not return NULL if there are none, it panics. + * + * The first descriptor must be in either the first or second chunk. + * This is because the only chunk allowed to be empty is the first. + * All others are freed when they become empty. + * + * At some point this and xfs_trans_next_item() should be optimized + * to quickly look at the mask to determine if there is anything to + * look at. + */ +xfs_log_item_desc_t * +xfs_trans_first_item(xfs_trans_t *tp) +{ + xfs_log_item_chunk_t *licp; + int i; + + licp = &tp->t_items; + /* + * If it's not in the first chunk, skip to the second. + */ + if (XFS_LIC_ARE_ALL_FREE(licp)) { + licp = licp->lic_next; + } + + /* + * Return the first non-free descriptor in the chunk. + */ + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); + return(NULL); +} + + +/* + * Given a descriptor, return the next descriptor in the chunk list. + * This returns NULL if there are no more used descriptors in the list. + * + * We do this by first locating the chunk in which the descriptor resides, + * and then scanning forward in the chunk and the list for the next + * used descriptor. + */ +/*ARGSUSED*/ +xfs_log_item_desc_t * +xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) +{ + xfs_log_item_chunk_t *licp; + int i; + + licp = XFS_LIC_DESC_TO_CHUNK(lidp); + + /* + * First search the rest of the chunk. The for loop keeps us + * from referencing things beyond the end of the chunk. + */ + for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + + /* + * Now search the next chunk. It must be there, because the + * next chunk would have been freed if it were empty. + * If there is no next chunk, return NULL. + */ + if (licp->lic_next == NULL) { + return (NULL); + } + + licp = licp->lic_next; + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + for (i = 0; i < licp->lic_unused; i++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + + return (XFS_LIC_SLOT(licp, i)); + } + ASSERT(0); + /* NOTREACHED */ + return 0; /* keep gcc quite */ +} + +/* + * This is called to unlock all of the items of a transaction and to free + * all the descriptors of that transaction. + * + * It walks the list of descriptors and unlocks each item. It frees + * each chunk except that embedded in the transaction as it goes along. + */ +void +xfs_trans_free_items( + xfs_trans_t *tp, + int flags) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + int abort; + + abort = flags & XFS_TRANS_ABORT; + licp = &tp->t_items; + /* + * Special case the embedded chunk so we don't free it below. + */ + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); + XFS_LIC_ALL_FREE(licp); + licp->lic_unused = 0; + } + licp = licp->lic_next; + + /* + * Unlock each item in each chunk and free the chunks. + */ + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); + next_licp = licp->lic_next; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + licp = next_licp; + } + + /* + * Reset the transaction structure's free item count. + */ + tp->t_items_free = XFS_LIC_NUM_SLOTS; + tp->t_items.lic_next = NULL; +} + + + +/* + * This is called to unlock the items associated with a transaction. + * Items which were not logged should be freed. + * Those which were logged must still be tracked so they can be unpinned + * when the transaction commits. + */ +void +xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) +{ + xfs_log_item_chunk_t *licp; + xfs_log_item_chunk_t *next_licp; + xfs_log_item_chunk_t **licpp; + int freed; + + freed = 0; + licp = &tp->t_items; + + /* + * Special case the embedded chunk so we don't free. + */ + if (!XFS_LIC_ARE_ALL_FREE(licp)) { + freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); + } + licpp = &(tp->t_items.lic_next); + licp = licp->lic_next; + + /* + * Unlock each item in each chunk, free non-dirty descriptors, + * and free empty chunks. + */ + while (licp != NULL) { + ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); + freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); + next_licp = licp->lic_next; + if (XFS_LIC_ARE_ALL_FREE(licp)) { + *licpp = next_licp; + kmem_free(licp, sizeof(xfs_log_item_chunk_t)); + freed -= XFS_LIC_NUM_SLOTS; + } else { + licpp = &(licp->lic_next); + } + ASSERT(*licpp == next_licp); + licp = next_licp; + } + + /* + * Fix the free descriptor count in the transaction. + */ + tp->t_items_free += freed; +} + +/* + * Unlock each item pointed to by a descriptor in the given chunk. + * Stamp the commit lsn into each item if necessary. + * Free descriptors pointing to items which are not dirty if freeing_chunk + * is zero. If freeing_chunk is non-zero, then we need to unlock all + * items in the chunk including those with XFS_LID_SYNC_UNLOCK set. + * Return the number of descriptors freed. + */ +STATIC int +xfs_trans_unlock_chunk( + xfs_log_item_chunk_t *licp, + int freeing_chunk, + int abort, + xfs_lsn_t commit_lsn) +{ + xfs_log_item_desc_t *lidp; + xfs_log_item_t *lip; + int i; + int freed; + + freed = 0; + lidp = licp->lic_descs; + for (i = 0; i < licp->lic_unused; i++, lidp++) { + if (XFS_LIC_ISFREE(licp, i)) { + continue; + } + lip = lidp->lid_item; + lip->li_desc = NULL; + + if (commit_lsn != NULLCOMMITLSN) + IOP_COMMITTING(lip, commit_lsn); + + /* XXXsup */ + if (abort) + lip->li_flags |= XFS_LI_ABORTED; + + /* if (abort) { + IOP_ABORT(lip); + } else */ + if (!(lidp->lid_flags & XFS_LID_SYNC_UNLOCK) || + freeing_chunk || abort) { + IOP_UNLOCK(lip); + } + + /* + * Free the descriptor if the item is not dirty + * within this transaction and the caller is not + * going to just free the entire thing regardless. + */ + if (!(freeing_chunk) && + (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { + XFS_LIC_RELSE(licp, i); + freed++; + } + } + + return (freed); +} + + +/* + * This is called to add the given busy item to the transaction's + * list of busy items. It must find a free busy item descriptor + * or allocate a new one and add the item to that descriptor. + * The function returns a pointer to busy descriptor used to point + * to the new busy entry. The log busy entry will now point to its new + * descriptor with its ???? field. + */ +xfs_log_busy_slot_t * +xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx) +{ + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_slot_t *lbsp; + int i=0; + + /* + * If there are no free descriptors, allocate a new chunk + * of them and put it at the front of the chunk list. + */ + if (tp->t_busy_free == 0) { + lbcp = (xfs_log_busy_chunk_t*) + kmem_alloc(sizeof(xfs_log_busy_chunk_t), KM_SLEEP); + ASSERT(lbcp != NULL); + /* + * Initialize the chunk, and then + * claim the first slot in the newly allocated chunk. + */ + XFS_LBC_INIT(lbcp); + XFS_LBC_CLAIM(lbcp, 0); + lbcp->lbc_unused = 1; + lbsp = XFS_LBC_SLOT(lbcp, 0); + + /* + * Link in the new chunk and update the free count. + */ + lbcp->lbc_next = tp->t_busy.lbc_next; + tp->t_busy.lbc_next = lbcp; + tp->t_busy_free = XFS_LIC_NUM_SLOTS - 1; + + /* + * Initialize the descriptor and the generic portion + * of the log item. + * + * Point the new slot at this item and return it. + * Also point the log item at its currently active + * descriptor and set the item's mount pointer. + */ + lbsp->lbc_ag = ag; + lbsp->lbc_idx = idx; + return (lbsp); + } + + /* + * Find the free descriptor. It is somewhere in the chunklist + * of descriptors. + */ + lbcp = &tp->t_busy; + while (lbcp != NULL) { + if (XFS_LBC_VACANCY(lbcp)) { + if (lbcp->lbc_unused <= XFS_LBC_MAX_SLOT) { + i = lbcp->lbc_unused; + break; + } else { + /* out-of-order vacancy */ + printk("OOO vacancy lbcp 0x%p\n", lbcp); + ASSERT(0); + } + } + lbcp = lbcp->lbc_next; + } + ASSERT(lbcp != NULL); + /* + * If we find a free descriptor, claim it, + * initialize it, and return it. + */ + XFS_LBC_CLAIM(lbcp, i); + if (lbcp->lbc_unused <= i) { + lbcp->lbc_unused = i + 1; + } + lbsp = XFS_LBC_SLOT(lbcp, i); + tp->t_busy_free--; + lbsp->lbc_ag = ag; + lbsp->lbc_idx = idx; + return (lbsp); +} + + +/* + * xfs_trans_free_busy + * Free all of the busy lists from a transaction + */ +void +xfs_trans_free_busy(xfs_trans_t *tp) +{ + xfs_log_busy_chunk_t *lbcp; + xfs_log_busy_chunk_t *lbcq; + + lbcp = tp->t_busy.lbc_next; + while (lbcp != NULL) { + lbcq = lbcp->lbc_next; + kmem_free(lbcp, sizeof(xfs_log_busy_chunk_t)); + lbcp = lbcq; + } + + XFS_LBC_INIT(&tp->t_busy); + tp->t_busy.lbc_unused = 0; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_priv.h linux.22-ac2/fs/xfs/xfs_trans_priv.h --- linux.vanilla/fs/xfs/xfs_trans_priv.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_priv.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_PRIV_H__ +#define __XFS_TRANS_PRIV_H__ + +struct xfs_log_item; +struct xfs_log_item_desc; +struct xfs_mount; +struct xfs_trans; + +/* + * From xfs_trans_item.c + */ +struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *, + struct xfs_log_item *); +void xfs_trans_free_item(struct xfs_trans *, + struct xfs_log_item_desc *); +struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *, + struct xfs_log_item *); +struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *); +struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *, + struct xfs_log_item_desc *); +void xfs_trans_free_items(struct xfs_trans *, int); +void xfs_trans_unlock_items(struct xfs_trans *, + xfs_lsn_t); +void xfs_trans_free_busy(xfs_trans_t *tp); +xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + xfs_extlen_t idx); + +/* + * From xfs_trans_ail.c + */ +void xfs_trans_update_ail(struct xfs_mount *, + struct xfs_log_item *, xfs_lsn_t, + unsigned long); +void xfs_trans_delete_ail(struct xfs_mount *, + struct xfs_log_item *, unsigned long); +struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); +struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, + struct xfs_log_item *, int *, int *); + + +#endif /* __XFS_TRANS_PRIV_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_trans_space.h linux.22-ac2/fs/xfs/xfs_trans_space.h --- linux.vanilla/fs/xfs/xfs_trans_space.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_trans_space.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TRANS_SPACE_H__ +#define __XFS_TRANS_SPACE_H__ + +/* + * Components of space reservations. + */ +#define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) \ + (((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0])) +#define XFS_EXTENTADD_SPACE_RES(mp,w) (XFS_BM_MAXLEVELS(mp,w) - 1) +#define XFS_NEXTENTADD_SPACE_RES(mp,b,w)\ + (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \ + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \ + XFS_EXTENTADD_SPACE_RES(mp,w)) +#define XFS_DAENTER_1B(mp,w) ((w) == XFS_DATA_FORK ? (mp)->m_dirblkfsbs : 1) +#define XFS_DAENTER_DBS(mp,w) \ + (XFS_DA_NODE_MAXDEPTH + \ + ((XFS_DIR_IS_V2(mp) && (w) == XFS_DATA_FORK) ? 2 : 0)) +#define XFS_DAENTER_BLOCKS(mp,w) \ + (XFS_DAENTER_1B(mp,w) * XFS_DAENTER_DBS(mp,w)) +#define XFS_DAENTER_BMAP1B(mp,w) \ + XFS_NEXTENTADD_SPACE_RES(mp, XFS_DAENTER_1B(mp, w), w) +#define XFS_DAENTER_BMAPS(mp,w) \ + (XFS_DAENTER_DBS(mp,w) * XFS_DAENTER_BMAP1B(mp,w)) +#define XFS_DAENTER_SPACE_RES(mp,w) \ + (XFS_DAENTER_BLOCKS(mp,w) + XFS_DAENTER_BMAPS(mp,w)) +#define XFS_DAREMOVE_SPACE_RES(mp,w) XFS_DAENTER_BMAPS(mp,w) +#define XFS_DIRENTER_MAX_SPLIT(mp,nl) \ + (((mp)->m_sb.sb_blocksize == 512 && \ + XFS_DIR_IS_V1(mp) && \ + (nl) >= XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN) ? 2 : 1) +#define XFS_DIRENTER_SPACE_RES(mp,nl) \ + (XFS_DAENTER_SPACE_RES(mp, XFS_DATA_FORK) * \ + XFS_DIRENTER_MAX_SPLIT(mp,nl)) +#define XFS_DIRREMOVE_SPACE_RES(mp) \ + XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) +#define XFS_IALLOC_SPACE_RES(mp) \ + (XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp)-1) + +/* + * Space reservation values for various transactions. + */ +#define XFS_ADDAFORK_SPACE_RES(mp) \ + ((mp)->m_dirblkfsbs + \ + (XFS_DIR_IS_V1(mp) ? 0 : XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK))) +#define XFS_ATTRRM_SPACE_RES(mp) \ + XFS_DAREMOVE_SPACE_RES(mp, XFS_ATTR_FORK) +/* This macro is not used - see inline code in xfs_attr_set */ +#define XFS_ATTRSET_SPACE_RES(mp, v) \ + (XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) + XFS_B_TO_FSB(mp, v)) +#define XFS_CREATE_SPACE_RES(mp,nl) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_DIOSTRAT_SPACE_RES(mp, v) \ + (XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + (v)) +#define XFS_GROWFS_SPACE_RES(mp) \ + (2 * XFS_AG_MAXLEVELS(mp)) +#define XFS_GROWFSRT_SPACE_RES(mp,b) \ + ((b) + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK)) +#define XFS_LINK_SPACE_RES(mp,nl) \ + XFS_DIRENTER_SPACE_RES(mp,nl) +#define XFS_MKDIR_SPACE_RES(mp,nl) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_QM_DQALLOC_SPACE_RES(mp) \ + (XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + \ + XFS_DQUOT_CLUSTER_SIZE_FSB) +#define XFS_QM_QINOCREATE_SPACE_RES(mp) \ + XFS_IALLOC_SPACE_RES(mp) +#define XFS_REMOVE_SPACE_RES(mp) \ + XFS_DIRREMOVE_SPACE_RES(mp) +#define XFS_RENAME_SPACE_RES(mp,nl) \ + (XFS_DIRREMOVE_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) +#define XFS_SYMLINK_SPACE_RES(mp,nl,b) \ + (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b)) + +#endif /* __XFS_TRANS_SPACE_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_types.h linux.22-ac2/fs/xfs/xfs_types.h --- linux.vanilla/fs/xfs/xfs_types.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_types.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_TYPES_H__ +#define __XFS_TYPES_H__ + +#ifdef __KERNEL__ + +/* + * POSIX Extensions + */ +typedef unsigned char uchar_t; +typedef unsigned short ushort_t; +typedef unsigned int uint_t; +typedef unsigned long ulong_t; + +/* + * Additional type declarations for XFS + */ +typedef signed char __int8_t; +typedef unsigned char __uint8_t; +typedef signed short int __int16_t; +typedef unsigned short int __uint16_t; +typedef signed int __int32_t; +typedef unsigned int __uint32_t; +typedef signed long long int __int64_t; +typedef unsigned long long int __uint64_t; + +typedef enum { B_FALSE,B_TRUE } boolean_t; +typedef __int64_t prid_t; /* project ID */ +typedef __uint32_t inst_t; /* an instruction */ + +typedef __u64 xfs_off_t; +typedef __u64 xfs_ino_t; /* type */ +typedef __s64 xfs_daddr_t; /* type */ +typedef char * xfs_caddr_t; /* type */ +typedef __u32 xfs_dev_t; + +/* __psint_t is the same size as a pointer */ +#if (BITS_PER_LONG == 32) +typedef __int32_t __psint_t; +typedef __uint32_t __psunsigned_t; +#elif (BITS_PER_LONG == 64) +typedef __int64_t __psint_t; +typedef __uint64_t __psunsigned_t; +#else +#error BITS_PER_LONG must be 32 or 64 +#endif + +#endif /* __KERNEL__ */ + +/* + * Some types are conditional based on the selected configuration. + * Set XFS_BIG_FILESYSTEMS=1 or 0 depending on the desired configuration. + * XFS_BIG_FILESYSTEMS needs daddr_t to be 64 bits + * + * On linux right now we are limited to 2^32 512 byte blocks in a + * filesystem, Once this limit is changed, setting this to 1 + * will allow XFS to go larger. With BIG_FILESYSTEMS set to 0 + * a 4K block filesystem could still theoretically be 16Gbytes + * long, so on an ia32 box the 32 bit page index will then be + * the limiting factor. + */ + +#if defined(CONFIG_LBD) || (defined(HAVE_SECTOR_T) && (BITS_PER_LONG == 64)) +# ifndef XFS_BIG_FILESYSTEMS +# define XFS_BIG_FILESYSTEMS 1 +# endif +#else +# ifndef XFS_BIG_FILESYSTEMS +# define XFS_BIG_FILESYSTEMS 0 +# endif +#endif + +typedef __uint32_t xfs_agblock_t; /* blockno in alloc. group */ +typedef __uint32_t xfs_extlen_t; /* extent length in blocks */ +typedef __uint32_t xfs_agnumber_t; /* allocation group number */ +typedef __int32_t xfs_extnum_t; /* # of extents in a file */ +typedef __int16_t xfs_aextnum_t; /* # extents in an attribute fork */ +typedef __int64_t xfs_fsize_t; /* bytes in a file */ +typedef __uint64_t xfs_ufsize_t; /* unsigned bytes in a file */ + +typedef __int32_t xfs_suminfo_t; /* type of bitmap summary info */ +typedef __int32_t xfs_rtword_t; /* word type for bitmap manipulations */ + +typedef __int64_t xfs_lsn_t; /* log sequence number */ +typedef __int32_t xfs_tid_t; /* transaction identifier */ + +typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ +typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ + +typedef __uint16_t xfs_prid_t; /* prid_t truncated to 16bits in XFS */ + +/* + * These types are 64 bits on disk but are either 32 or 64 bits in memory. + * Disk based types: + */ +typedef __uint64_t xfs_dfsbno_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint64_t xfs_drfsbno_t; /* blockno in filesystem (raw) */ +typedef __uint64_t xfs_drtbno_t; /* extent (block) in realtime area */ +typedef __uint64_t xfs_dfiloff_t; /* block number in a file */ +typedef __uint64_t xfs_dfilblks_t; /* number of blocks in a file */ + +/* + * Memory based types are conditional. + */ +#if XFS_BIG_FILESYSTEMS +typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ +typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ +typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ +#else +typedef __uint32_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ +typedef __uint32_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ +typedef __uint32_t xfs_rtblock_t; /* extent (block) in realtime area */ +typedef __int32_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ +#endif +typedef __uint64_t xfs_fileoff_t; /* block number in a file */ +typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ +typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ + +typedef __uint8_t xfs_arch_t; /* architecture of an xfs fs */ + +/* + * Null values for the types. + */ +#define NULLDFSBNO ((xfs_dfsbno_t)-1) +#define NULLDRFSBNO ((xfs_drfsbno_t)-1) +#define NULLDRTBNO ((xfs_drtbno_t)-1) +#define NULLDFILOFF ((xfs_dfiloff_t)-1) + +#define NULLFSBLOCK ((xfs_fsblock_t)-1) +#define NULLRFSBLOCK ((xfs_rfsblock_t)-1) +#define NULLRTBLOCK ((xfs_rtblock_t)-1) +#define NULLFILEOFF ((xfs_fileoff_t)-1) + +#define NULLAGBLOCK ((xfs_agblock_t)-1) +#define NULLAGNUMBER ((xfs_agnumber_t)-1) +#define NULLEXTNUM ((xfs_extnum_t)-1) + +#define NULLCOMMITLSN ((xfs_lsn_t)-1) + +/* + * Max values for extlen, extnum, aextnum. + */ +#define MAXEXTLEN ((xfs_extlen_t)0x001fffff) /* 21 bits */ +#define MAXEXTNUM ((xfs_extnum_t)0x7fffffff) /* signed int */ +#define MAXAEXTNUM ((xfs_aextnum_t)0x7fff) /* signed short */ + +/* + * MAXNAMELEN is the length (including the terminating null) of + * the longest permissible file (component) name. + */ +#define MAXNAMELEN 256 + +typedef struct xfs_dirent { /* data from readdir() */ + xfs_ino_t d_ino; /* inode number of entry */ + xfs_off_t d_off; /* offset of disk directory entry */ + unsigned short d_reclen; /* length of this record */ + char d_name[1]; /* name of file */ +} xfs_dirent_t; + +#define DIRENTBASESIZE (((xfs_dirent_t *)0)->d_name - (char *)0) +#define DIRENTSIZE(namelen) \ + ((DIRENTBASESIZE + (namelen) + \ + sizeof(xfs_off_t)) & ~(sizeof(xfs_off_t) - 1)) + +typedef enum { + XFS_LOOKUP_EQi, XFS_LOOKUP_LEi, XFS_LOOKUP_GEi +} xfs_lookup_t; + +typedef enum { + XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_BMAPi, XFS_BTNUM_INOi, + XFS_BTNUM_MAX +} xfs_btnum_t; + + +/* + * Juggle IRIX device numbers - still used in ondisk structures + */ +#define XFS_DEV_BITSMAJOR 14 +#define XFS_DEV_BITSMINOR 18 +#define XFS_DEV_MAXMAJ 0x1ff +#define XFS_DEV_MAXMIN 0x3ffff +#define XFS_DEV_MAJOR(dev) ((int)(((unsigned)(dev)>>XFS_DEV_BITSMINOR) \ + & XFS_DEV_MAXMAJ)) +#define XFS_DEV_MINOR(dev) ((int)((dev)&XFS_DEV_MAXMIN)) +#define XFS_MKDEV(major,minor) ((xfs_dev_t)(((major)<i_mount, NULL, dp, + VNAME(dentry), VNAMELEN(dentry), inum); + if (!error) { + /* + * Unlock the directory. We do this because we can't + * hold the directory lock while doing the vn_get() + * in xfs_iget(). Doing so could cause us to hold + * a lock while waiting for the inode to finish + * being inactive while it's waiting for a log + * reservation in the inactive routine. + */ + xfs_iunlock(dp, lock_mode); + error = xfs_iget(dp->i_mount, NULL, *inum, 0, ipp, 0); + xfs_ilock(dp, lock_mode); + + if (error) { + *ipp = NULL; + } else if ((*ipp)->i_d.di_mode == 0) { + /* + * The inode has been freed. Something is + * wrong so just get out of here. + */ + xfs_iunlock(dp, lock_mode); + xfs_iput_new(*ipp, 0); + *ipp = NULL; + xfs_ilock(dp, lock_mode); + error = XFS_ERROR(ENOENT); + } + } + return error; +} + +/* + * Allocates a new inode from disk and return a pointer to the + * incore copy. This routine will internally commit the current + * transaction and allocate a new one if the Space Manager needed + * to do an allocation to replenish the inode free-list. + * + * This routine is designed to be called from xfs_create and + * xfs_create_dir. + * + */ +int +xfs_dir_ialloc( + xfs_trans_t **tpp, /* input: current transaction; + output: may be a new transaction. */ + xfs_inode_t *dp, /* directory within whose allocate + the inode. */ + mode_t mode, + nlink_t nlink, + xfs_dev_t rdev, + cred_t *credp, + prid_t prid, /* project id */ + int okalloc, /* ok to allocate new space */ + xfs_inode_t **ipp, /* pointer to inode; it will be + locked. */ + int *committed) + +{ + xfs_trans_t *tp; + xfs_trans_t *ntp; + xfs_inode_t *ip; + xfs_buf_t *ialloc_context = NULL; + boolean_t call_again = B_FALSE; + int code; + uint log_res; + uint log_count; + void *dqinfo; + uint tflags; + + tp = *tpp; + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + + /* + * xfs_ialloc will return a pointer to an incore inode if + * the Space Manager has an available inode on the free + * list. Otherwise, it will do an allocation and replenish + * the freelist. Since we can only do one allocation per + * transaction without deadlocks, we will need to commit the + * current transaction and start a new one. We will then + * need to call xfs_ialloc again to get the inode. + * + * If xfs_ialloc did an allocation to replenish the freelist, + * it returns the bp containing the head of the freelist as + * ialloc_context. We will hold a lock on it across the + * transaction commit so that no other process can steal + * the inode(s) that we've just allocated. + */ + code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc, + &ialloc_context, &call_again, &ip); + + /* + * Return an error if we were unable to allocate a new inode. + * This should only happen if we run out of space on disk or + * encounter a disk error. + */ + if (code) { + *ipp = NULL; + return code; + } + if (!call_again && (ip == NULL)) { + *ipp = NULL; + return XFS_ERROR(ENOSPC); + } + + /* + * If call_again is set, then we were unable to get an + * inode in one operation. We need to commit the current + * transaction and call xfs_ialloc() again. It is guaranteed + * to succeed the second time. + */ + if (call_again) { + + /* + * Normally, xfs_trans_commit releases all the locks. + * We call bhold to hang on to the ialloc_context across + * the commit. Holding this buffer prevents any other + * processes from doing any allocations in this + * allocation group. + */ + xfs_trans_bhold(tp, ialloc_context); + /* + * Save the log reservation so we can use + * them in the next transaction. + */ + log_res = xfs_trans_get_log_res(tp); + log_count = xfs_trans_get_log_count(tp); + + /* + * We want the quota changes to be associated with the next + * transaction, NOT this one. So, detach the dqinfo from this + * and attach it to the next transaction. + */ + dqinfo = NULL; + tflags = 0; + if (tp->t_dqinfo) { + dqinfo = (void *)tp->t_dqinfo; + tp->t_dqinfo = NULL; + tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY; + tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY); + } + + ntp = xfs_trans_dup(tp); + code = xfs_trans_commit(tp, 0, NULL); + tp = ntp; + if (committed != NULL) { + *committed = 1; + } + /* + * If we get an error during the commit processing, + * release the buffer that is still held and return + * to the caller. + */ + if (code) { + xfs_buf_relse(ialloc_context); + if (dqinfo) { + tp->t_dqinfo = dqinfo; + XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); + } + *tpp = ntp; + *ipp = NULL; + return code; + } + code = xfs_trans_reserve(tp, 0, log_res, 0, + XFS_TRANS_PERM_LOG_RES, log_count); + /* + * Re-attach the quota info that we detached from prev trx. + */ + if (dqinfo) { + tp->t_dqinfo = dqinfo; + tp->t_flags |= tflags; + } + + if (code) { + xfs_buf_relse(ialloc_context); + *tpp = ntp; + *ipp = NULL; + return code; + } + xfs_trans_bjoin(tp, ialloc_context); + + /* + * Call ialloc again. Since we've locked out all + * other allocations in this allocation group, + * this call should always succeed. + */ + code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, + okalloc, &ialloc_context, &call_again, &ip); + + /* + * If we get an error at this point, return to the caller + * so that the current transaction can be aborted. + */ + if (code) { + *tpp = tp; + *ipp = NULL; + return code; + } + ASSERT ((!call_again) && (ip != NULL)); + + } else { + if (committed != NULL) { + *committed = 0; + } + } + + *ipp = ip; + *tpp = tp; + + return 0; +} + +/* + * Decrement the link count on an inode & log the change. + * If this causes the link count to go to zero, initiate the + * logging activity required to truncate a file. + */ +int /* error */ +xfs_droplink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + int error; + + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + + ASSERT (ip->i_d.di_nlink > 0); + ip->i_d.di_nlink--; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + error = 0; + if (ip->i_d.di_nlink == 0) { + /* + * We're dropping the last link to this file. + * Move the on-disk inode to the AGI unlinked list. + * From xfs_inactive() we will pull the inode from + * the list and free it. + */ + error = xfs_iunlink(tp, ip); + } + return error; +} + +/* + * This gets called when the inode's version needs to be changed from 1 to 2. + * Currently this happens when the nlink field overflows the old 16-bit value + * or when chproj is called to change the project for the first time. + * As a side effect the superblock version will also get rev'd + * to contain the NLINK bit. + */ +void +xfs_bump_ino_vers2( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + xfs_mount_t *mp; + unsigned long s; + + ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); + + ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_onlink = 0; + memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); + mp = tp->t_mountp; + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + s = XFS_SB_LOCK(mp); + if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { + XFS_SB_VERSION_ADDNLINK(&mp->m_sb); + XFS_SB_UNLOCK(mp, s); + xfs_mod_sb(tp, XFS_SB_VERSIONNUM); + } else { + XFS_SB_UNLOCK(mp, s); + } + } + /* Caller must log the inode */ +} + +/* + * Increment the link count on an inode & log the change. + */ +int +xfs_bumplink( + xfs_trans_t *tp, + xfs_inode_t *ip) +{ + if (ip->i_d.di_nlink >= XFS_MAXLINK) + return XFS_ERROR(EMLINK); + xfs_ichgtime(ip, XFS_ICHGTIME_CHG); + + ASSERT(ip->i_d.di_nlink > 0); + ip->i_d.di_nlink++; + if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && + (ip->i_d.di_nlink > XFS_MAXLINK_1)) { + /* + * The inode has increased its number of links beyond + * what can fit in an old format inode. It now needs + * to be converted to a version 2 inode with a 32 bit + * link count. If this is the first inode in the file + * system to do this, then we need to bump the superblock + * version number as well. + */ + xfs_bump_ino_vers2(tp, ip); + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return 0; +} + +/* + * Try to truncate the given file to 0 length. Currently called + * only out of xfs_remove when it has to truncate a file to free + * up space for the remove to proceed. + */ +int +xfs_truncate_file( + xfs_mount_t *mp, + xfs_inode_t *ip) +{ + xfs_trans_t *tp; + int error; + +#ifdef QUOTADEBUG + /* + * This is called to truncate the quotainodes too. + */ + if (XFS_IS_UQUOTA_ON(mp)) { + if (ip->i_ino != mp->m_sb.sb_uquotino) + ASSERT(ip->i_udquot); + } + if (XFS_IS_GQUOTA_ON(mp)) { + if (ip->i_ino != mp->m_sb.sb_gquotino) + ASSERT(ip->i_gdquot); + } +#endif + /* + * Make the call to xfs_itruncate_start before starting the + * transaction, because we cannot make the call while we're + * in a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0); + + tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + } + + /* + * Follow the normal truncate locking protocol. Since we + * hold the inode in the transaction, we know that it's number + * of references will stay constant. + */ + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + /* + * Signal a sync xaction. The only case where that isn't + * the case is if we're truncating an already unlinked file + * on a wsync fs. In that case, we know the blocks can't + * reappear in the file because the links to file are + * permanently toast. Currently, we're always going to + * want a sync transaction because this code is being + * called from places where nlink is guaranteed to be 1 + * but I'm leaving the tests in to protect against future + * changes -- rcc. + */ + error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0, + XFS_DATA_FORK, + ((ip->i_d.di_nlink != 0 || + !(mp->m_flags & XFS_MOUNT_WSYNC)) + ? 1 : 0)); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + } else { + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, + NULL); + } + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + return error; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_utils.h linux.22-ac2/fs/xfs/xfs_utils.h --- linux.vanilla/fs/xfs/xfs_utils.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_utils.h 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef __XFS_UTILS_H__ +#define __XFS_UTILS_H__ + +#define IRELE(ip) VN_RELE(XFS_ITOV(ip)) +#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) +#define ITRACE(ip) vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \ + (inst_t *)__return_address) + +extern int xfs_rename (bhv_desc_t *, vname_t *, vnode_t *, vname_t *, cred_t *); +extern int xfs_get_dir_entry (vname_t *, xfs_inode_t **); +extern int xfs_dir_lookup_int (bhv_desc_t *, uint, vname_t *, xfs_ino_t *, + xfs_inode_t **); +extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *); +extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, nlink_t, + xfs_dev_t, cred_t *, prid_t, int, + xfs_inode_t **, int *); +extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *); +extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *); +extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *); + +#endif /* __XFS_UTILS_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_vfsops.c linux.22-ac2/fs/xfs/xfs_vfsops.c --- linux.vanilla/fs/xfs/xfs_vfsops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_vfsops.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,1857 @@ +/* + * XFS filesystem operations. + * + * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_ag.h" +#include "xfs_error.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_rw.h" +#include "xfs_buf_item.h" +#include "xfs_extfree_item.h" +#include "xfs_quota.h" +#include "xfs_dmapi.h" +#include "xfs_dir2_trace.h" +#include "xfs_acl.h" +#include "xfs_attr.h" +#include "xfs_clnt.h" +#include "xfs_log_priv.h" + +STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); + +int +xfs_init(void) +{ + extern kmem_zone_t *xfs_da_state_zone; + extern kmem_zone_t *xfs_bmap_free_item_zone; + extern kmem_zone_t *xfs_btree_cur_zone; + extern kmem_zone_t *xfs_inode_zone; + extern kmem_zone_t *xfs_chashlist_zone; + extern kmem_zone_t *xfs_trans_zone; + extern kmem_zone_t *xfs_buf_item_zone; + extern kmem_zone_t *xfs_efd_zone; + extern kmem_zone_t *xfs_efi_zone; + extern kmem_zone_t *xfs_dabuf_zone; +#ifdef DEBUG_NOT + extern ktrace_t *xfs_alloc_trace_buf; + extern ktrace_t *xfs_bmap_trace_buf; + extern ktrace_t *xfs_bmbt_trace_buf; + extern ktrace_t *xfs_dir_trace_buf; + extern ktrace_t *xfs_attr_trace_buf; + extern ktrace_t *xfs_dir2_trace_buf; +#endif /* DEBUG */ +#ifdef XFS_DABUF_DEBUG + extern lock_t xfs_dabuf_global_lock; +#endif + extern int xfs_refcache_size; + +#ifdef XFS_DABUF_DEBUG + spinlock_init(&xfs_dabuf_global_lock, "xfsda"); +#endif + + /* + * Initialize all of the zone allocators we use. + */ + xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), + "xfs_bmap_free_item"); + xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), + "xfs_btree_cur"); + xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode"); + xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); + xfs_da_state_zone = + kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); + xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); + + /* + * The size of the zone allocated buf log item is the maximum + * size possible under XFS. This wastes a little bit of memory, + * but it is much faster. + */ + xfs_buf_item_zone = + kmem_zone_init((sizeof(xfs_buf_log_item_t) + + (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / + NBWORD) * sizeof(int))), + "xfs_buf_item"); + xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + "xfs_efd_item"); + xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + "xfs_efi_item"); + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); + xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), + "xfs_chashlist"); + _ACL_ZONE_INIT(xfs_acl_zone, "xfs_acl"); + +#ifdef CONFIG_XFS_VNODE_TRACING + ktrace_init(VNODE_TRACE_SIZE); +#else +#ifdef DEBUG + ktrace_init(64); +#endif +#endif + + /* + * Allocate global trace buffers. + */ +#ifdef XFS_ALLOC_TRACE + xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMAP_TRACE + xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_BMBT_TRACE + xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR_TRACE + xfs_dir_trace_buf = ktrace_alloc(XFS_DIR_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_ATTR_TRACE + xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP); +#endif +#ifdef XFS_DIR2_TRACE + xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_SLEEP); +#endif + + xfs_dir_startup(); + +#if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) + xfs_error_test_init(); +#endif /* DEBUG || INDUCE_IO_ERROR */ + + xfs_init_procfs(); + xfs_sysctl_register(); + + xfs_refcache_size = xfs_params.refcache_size; + + /* + * The inode hash table is created on a per mounted + * file system bases. + */ + + return 0; +} + +void +xfs_cleanup(void) +{ + extern kmem_zone_t *xfs_bmap_free_item_zone; + extern kmem_zone_t *xfs_btree_cur_zone; + extern kmem_zone_t *xfs_inode_zone; + extern kmem_zone_t *xfs_trans_zone; + extern kmem_zone_t *xfs_da_state_zone; + extern kmem_zone_t *xfs_dabuf_zone; + extern kmem_zone_t *xfs_efd_zone; + extern kmem_zone_t *xfs_efi_zone; + extern kmem_zone_t *xfs_buf_item_zone; + extern kmem_zone_t *xfs_chashlist_zone; + extern xfs_inode_t **xfs_refcache; + + xfs_cleanup_procfs(); + xfs_sysctl_unregister(); + if (xfs_refcache) { + kmem_free(xfs_refcache, + XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *)); + } + + kmem_cache_destroy(xfs_bmap_free_item_zone); + kmem_cache_destroy(xfs_btree_cur_zone); + kmem_cache_destroy(xfs_inode_zone); + kmem_cache_destroy(xfs_trans_zone); + kmem_cache_destroy(xfs_da_state_zone); + kmem_cache_destroy(xfs_dabuf_zone); + kmem_cache_destroy(xfs_buf_item_zone); + kmem_cache_destroy(xfs_efd_zone); + kmem_cache_destroy(xfs_efi_zone); + kmem_cache_destroy(xfs_ifork_zone); + kmem_cache_destroy(xfs_ili_zone); + kmem_cache_destroy(xfs_chashlist_zone); + _ACL_ZONE_DESTROY(xfs_acl_zone); +#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING)) + ktrace_uninit(); +#endif +} + +/* + * xfs_start_flags + * + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock has _not_ yet been read in. + */ +STATIC int +xfs_start_flags( + struct xfs_mount_args *ap, + struct xfs_mount *mp, + int ronly) +{ + /* Values are in BBs */ + if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { + /* + * At this point the superblock has not been read + * in, therefore we do not know the block size. + * Before, the mount call ends we will convert + * these to FSBs. + */ + mp->m_dalign = ap->sunit; + mp->m_swidth = ap->swidth; + } + + if (ap->logbufs != 0 && ap->logbufs != -1 && + (ap->logbufs < XLOG_NUM_ICLOGS || + ap->logbufs > XLOG_MAX_ICLOGS)) { + cmn_err(CE_WARN, + "XFS: invalid logbufs value: %d [not %d-%d]", + ap->logbufs, XLOG_NUM_ICLOGS, XLOG_MAX_ICLOGS); + return XFS_ERROR(EINVAL); + } + mp->m_logbufs = ap->logbufs; + if (ap->logbufsize != -1 && + ap->logbufsize != 16 * 1024 && + ap->logbufsize != 32 * 1024 && + ap->logbufsize != 64 * 1024 && + ap->logbufsize != 128 * 1024 && + ap->logbufsize != 256 * 1024) { + cmn_err(CE_WARN, + "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", + ap->logbufsize); + return XFS_ERROR(EINVAL); + } + mp->m_logbsize = ap->logbufsize; + mp->m_fsname_len = strlen(ap->fsname) + 1; + mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); + strcpy(mp->m_fsname, ap->fsname); + + /* + * Pull in the 'wsync' and 'ino64' mount options before we do the real + * work of mounting and recovery. The arg pointer will + * be NULL when we are being called from the root mount code. + */ + if (ap->flags & XFSMNT_WSYNC) + mp->m_flags |= XFS_MOUNT_WSYNC; +#if XFS_BIG_FILESYSTEMS + if (ap->flags & XFSMNT_INO64) { + mp->m_flags |= XFS_MOUNT_INO64; + mp->m_inoadd = XFS_INO64_OFFSET; + } +#endif + if (ap->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; + + if (ap->flags & XFSMNT_RETERR) + mp->m_flags |= XFS_MOUNT_RETERR; + + if (ap->flags & XFSMNT_NOALIGN) + mp->m_flags |= XFS_MOUNT_NOALIGN; + + if (ap->flags & XFSMNT_OSYNCISOSYNC) + mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; + + if (ap->flags & XFSMNT_32BITINODES) + mp->m_flags |= XFS_MOUNT_32BITINODES; + + if (ap->flags & XFSMNT_IOSIZE) { + if (ap->iosizelog > XFS_MAX_IO_LOG || + ap->iosizelog < XFS_MIN_IO_LOG) { + cmn_err(CE_WARN, + "XFS: invalid log iosize: %d [not %d-%d]", + ap->iosizelog, XFS_MIN_IO_LOG, + XFS_MAX_IO_LOG); + return XFS_ERROR(EINVAL); + } + + mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; + mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; + } + + /* + * no recovery flag requires a read-only mount + */ + if (ap->flags & XFSMNT_NORECOVERY) { + if (!ronly) { + cmn_err(CE_WARN, + "XFS: tried to mount a FS read-write without recovery!"); + return XFS_ERROR(EINVAL); + } + mp->m_flags |= XFS_MOUNT_NORECOVERY; + } + + if (ap->flags & XFSMNT_NOUUID) + mp->m_flags |= XFS_MOUNT_NOUUID; + if (ap->flags & XFSMNT_NOLOGFLUSH) + mp->m_flags |= XFS_MOUNT_NOLOGFLUSH; + + return 0; +} + +/* + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock _has_ now been read in. + */ +STATIC int +xfs_finish_flags( + struct xfs_mount_args *ap, + struct xfs_mount *mp, + int ronly) +{ + /* Fail a mount where the logbuf is smaller then the log stripe */ + if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { + if (((ap->logbufsize == -1) && + (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) || + (ap->logbufsize < mp->m_sb.sb_logsunit)) { + cmn_err(CE_WARN, + "XFS: logbuf size must be greater than or equal to log stripe size"); + return XFS_ERROR(EINVAL); + } + } else { + /* Fail a mount if the logbuf is larger than 32K */ + if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) { + cmn_err(CE_WARN, + "XFS: logbuf size for version 1 logs must be 16K or 32K"); + return XFS_ERROR(EINVAL); + } + } + + /* + * prohibit r/w mounts of read-only filesystems + */ + if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { + cmn_err(CE_WARN, + "XFS: cannot mount a read-only filesystem as read-write"); + return XFS_ERROR(EROFS); + } + + /* + * disallow mount attempts with (IRIX) project quota enabled + */ + if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT)) { + cmn_err(CE_WARN, + "XFS: cannot mount a filesystem with IRIX project quota enabled"); + return XFS_ERROR(ENOSYS); + } + + /* + * check for shared mount. + */ + if (ap->flags & XFSMNT_SHARED) { + if (!XFS_SB_VERSION_HASSHARED(&mp->m_sb)) + return XFS_ERROR(EINVAL); + + /* + * For IRIX 6.5, shared mounts must have the shared + * version bit set, have the persistent readonly + * field set, must be version 0 and can only be mounted + * read-only. + */ + if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) || + (mp->m_sb.sb_shared_vn != 0)) + return XFS_ERROR(EINVAL); + + mp->m_flags |= XFS_MOUNT_SHARED; + + /* + * Shared XFS V0 can't deal with DMI. Return EINVAL. + */ + if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) + return XFS_ERROR(EINVAL); + } + + return 0; +} + +/* + * xfs_mount + * + * The file system configurations are: + * (1) device (partition) with data and internal log + * (2) logical volume with data and log subvolumes. + * (3) logical volume with data, log, and realtime subvolumes. + * + * We only have to handle opening the log and realtime volumes here if + * they are present. The data subvolume has already been opened by + * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev. + */ +STATIC int +xfs_mount( + struct bhv_desc *bhvp, + struct xfs_mount_args *args, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bhvp); + struct bhv_desc *p; + struct xfs_mount *mp = XFS_BHVTOM(bhvp); + struct block_device *ddev, *logdev, *rtdev; + int ronly = (vfsp->vfs_flag & VFS_RDONLY); + int flags = 0, error; + + ddev = vfsp->vfs_super->s_bdev; + logdev = rtdev = NULL; + + /* + * Open real time and log devices - order is important. + */ + if (args->logname[0]) { + error = xfs_blkdev_get(mp, args->logname, &logdev); + if (error) + return error; + } + if (args->rtname[0]) { + error = xfs_blkdev_get(mp, args->rtname, &rtdev); + if (error) { + xfs_blkdev_put(logdev); + return error; + } + + if (rtdev == ddev || rtdev == logdev) { + cmn_err(CE_WARN, + "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); + xfs_blkdev_put(logdev); + xfs_blkdev_put(rtdev); + return EINVAL; + } + } + + /* + * Setup xfs_mount function vectors from available behaviors + */ + p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM); + mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_xfs; + p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM); + mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_xfs; + p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO); + mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs; + + /* + * Setup xfs_mount buffer target pointers + */ + mp->m_ddev_targp = xfs_alloc_buftarg(ddev); + if (rtdev) + mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev); + mp->m_logdev_targp = (logdev && logdev != ddev) ? + xfs_alloc_buftarg(logdev) : mp->m_ddev_targp; + + /* + * Setup flags based on mount(2) options and then the superblock + */ + error = xfs_start_flags(args, mp, ronly); + if (error) + goto error; + error = xfs_readsb(mp); + if (error) + goto error; + error = xfs_finish_flags(args, mp, ronly); + if (error) { + xfs_freesb(mp); + goto error; + } + + /* + * Setup xfs_mount buffer target pointers based on superblock + */ + xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + if (logdev && logdev != ddev) { + unsigned int log_sector_size = BBSIZE; + + if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) + log_sector_size = mp->m_sb.sb_logsectsize; + xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, + log_sector_size); + } + if (rtdev) + xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + + if (!(error = XFS_IOINIT(vfsp, args, flags))) + return 0; + + error: + xfs_binval(mp->m_ddev_targp); + if (logdev != NULL && logdev != ddev) { + xfs_binval(mp->m_logdev_targp); + } + if (rtdev != NULL) { + xfs_binval(mp->m_rtdev_targp); + } + xfs_unmountfs_close(mp, NULL); + return error; +} + +STATIC int +xfs_unmount( + bhv_desc_t *bdp, + int flags, + cred_t *credp) +{ + struct vfs *vfsp = bhvtovfs(bdp); + xfs_mount_t *mp = XFS_BHVTOM(bdp); + xfs_inode_t *rip; + vnode_t *rvp; + int unmount_event_wanted = 0; + int unmount_event_flags = 0; + int xfs_unmountfs_needed = 0; + int error; + + rip = mp->m_rootip; + rvp = XFS_ITOV(rip); + + if (vfsp->vfs_flag & VFS_DMI) { + bhv_desc_t *rbdp; + + rbdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(rvp), &xfs_vnodeops); + error = XFS_SEND_NAMESP(mp, DM_EVENT_PREUNMOUNT, + rbdp, DM_RIGHT_NULL, rbdp, DM_RIGHT_NULL, + NULL, NULL, 0, 0, + (mp->m_dmevmask & (1<m_dmevmask & (1<m_ddev_targp); + error = xfs_unmount_flush(mp, 0); + if (error) + goto out; + + ASSERT(vn_count(rvp) == 1); + + /* + * Drop the reference count + */ + VN_RELE(rvp); + + /* + * If we're forcing a shutdown, typically because of a media error, + * we want to make sure we invalidate dirty pages that belong to + * referenced vnodes as well. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + error = xfs_sync(&mp->m_bhv, + (SYNC_WAIT | SYNC_CLOSE), credp); + ASSERT(error != EFSCORRUPTED); + } + xfs_unmountfs_needed = 1; + +out: + /* Send DMAPI event, if required. + * Then do xfs_unmountfs() if needed. + * Then return error (or zero). + */ + if (unmount_event_wanted) { + /* Note: mp structure must still exist for + * XFS_SEND_UNMOUNT() call. + */ + XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL, + DM_RIGHT_NULL, 0, error, unmount_event_flags); + } + if (xfs_unmountfs_needed) { + /* + * Call common unmount function to flush to disk + * and free the super block buffer & mount structures. + */ + xfs_unmountfs(mp, credp); + } + + return XFS_ERROR(error); +} + +STATIC int +xfs_mntupdate( + bhv_desc_t *bdp, + int *flags, + struct xfs_mount_args *args) +{ + struct vfs *vfsp = bhvtovfs(bdp); + xfs_mount_t *mp = XFS_BHVTOM(bdp); + int pincount, error; + + if (args->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; + else + mp->m_flags &= ~XFS_MOUNT_NOATIME; + + if (!(vfsp->vfs_flag & VFS_RDONLY)) { + VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); + } + + if (*flags & MS_RDONLY) { + xfs_refcache_purge_mp(mp); + pagebuf_delwri_flush(mp->m_ddev_targp, 0, NULL); + xfs_finish_reclaim_all(mp, 0); + + do { + VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error); + pagebuf_delwri_flush(mp->m_ddev_targp, PBDF_WAIT, + &pincount); + } while (pincount); + + /* Ok now write out an unmount record */ + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); + vfsp->vfs_flag |= VFS_RDONLY; + } else { + vfsp->vfs_flag &= ~VFS_RDONLY; + } + + return 0; +} + +/* + * xfs_unmount_flush implements a set of flush operation on special + * inodes, which are needed as a separate set of operations so that + * they can be called as part of relocation process. + */ +int +xfs_unmount_flush( + xfs_mount_t *mp, /* Mount structure we are getting + rid of. */ + int relocation) /* Called from vfs relocation. */ +{ + xfs_inode_t *rip = mp->m_rootip; + xfs_inode_t *rbmip; + xfs_inode_t *rsumip = NULL; + vnode_t *rvp = XFS_ITOV(rip); + int error; + + xfs_ilock(rip, XFS_ILOCK_EXCL); + xfs_iflock(rip); + + /* + * Flush out the real time inodes. + */ + if ((rbmip = mp->m_rbmip) != NULL) { + xfs_ilock(rbmip, XFS_ILOCK_EXCL); + xfs_iflock(rbmip); + error = xfs_iflush(rbmip, XFS_IFLUSH_SYNC); + xfs_iunlock(rbmip, XFS_ILOCK_EXCL); + + if (error == EFSCORRUPTED) + goto fscorrupt_out; + + ASSERT(vn_count(XFS_ITOV(rbmip)) == 1); + + rsumip = mp->m_rsumip; + xfs_ilock(rsumip, XFS_ILOCK_EXCL); + xfs_iflock(rsumip); + error = xfs_iflush(rsumip, XFS_IFLUSH_SYNC); + xfs_iunlock(rsumip, XFS_ILOCK_EXCL); + + if (error == EFSCORRUPTED) + goto fscorrupt_out; + + ASSERT(vn_count(XFS_ITOV(rsumip)) == 1); + } + + /* + * Synchronously flush root inode to disk + */ + error = xfs_iflush(rip, XFS_IFLUSH_SYNC); + if (error == EFSCORRUPTED) + goto fscorrupt_out2; + + if (vn_count(rvp) != 1 && !relocation) { + xfs_iunlock(rip, XFS_ILOCK_EXCL); + return XFS_ERROR(EBUSY); + } + + /* + * Release dquot that rootinode, rbmino and rsumino might be holding, + * flush and purge the quota inodes. + */ + error = XFS_QM_UNMOUNT(mp); + if (error == EFSCORRUPTED) + goto fscorrupt_out2; + + if (rbmip) { + VN_RELE(XFS_ITOV(rbmip)); + VN_RELE(XFS_ITOV(rsumip)); + } + + xfs_iunlock(rip, XFS_ILOCK_EXCL); + return 0; + +fscorrupt_out: + xfs_ifunlock(rip); + +fscorrupt_out2: + xfs_iunlock(rip, XFS_ILOCK_EXCL); + + return XFS_ERROR(EFSCORRUPTED); +} + +/* + * xfs_root extracts the root vnode from a vfs. + * + * vfsp -- the vfs struct for the desired file system + * vpp -- address of the caller's vnode pointer which should be + * set to the desired fs root vnode + */ +STATIC int +xfs_root( + bhv_desc_t *bdp, + vnode_t **vpp) +{ + vnode_t *vp; + + vp = XFS_ITOV((XFS_BHVTOM(bdp))->m_rootip); + VN_HOLD(vp); + *vpp = vp; + return 0; +} + +/* + * xfs_statvfs + * + * Fill in the statvfs structure for the given file system. We use + * the superblock lock in the mount structure to ensure a consistent + * snapshot of the counters returned. + */ +STATIC int +xfs_statvfs( + bhv_desc_t *bdp, + struct statfs *statp, + vnode_t *vp) +{ + __uint64_t fakeinos; + xfs_extlen_t lsize; + xfs_mount_t *mp; + xfs_sb_t *sbp; + unsigned long s; + + mp = XFS_BHVTOM(bdp); + sbp = &(mp->m_sb); + + statp->f_type = XFS_SB_MAGIC; + + s = XFS_SB_LOCK(mp); + statp->f_bsize = sbp->sb_blocksize; + lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; + statp->f_blocks = sbp->sb_dblocks - lsize; + statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks; + fakeinos = statp->f_bfree << sbp->sb_inopblog; +#if XFS_BIG_FILESYSTEMS + fakeinos += mp->m_inoadd; +#endif + statp->f_files = + MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); + if (mp->m_maxicount) +#if XFS_BIG_FILESYSTEMS + if (!mp->m_inoadd) +#endif + statp->f_files = + MIN(statp->f_files, (long)mp->m_maxicount); + statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); + XFS_SB_UNLOCK(mp, s); + + statp->f_fsid.val[0] = mp->m_dev; + statp->f_fsid.val[1] = 0; + statp->f_namelen = MAXNAMELEN - 1; + + return 0; +} + + +/* + * xfs_sync flushes any pending I/O to file system vfsp. + * + * This routine is called by vfs_sync() to make sure that things make it + * out to disk eventually, on sync() system calls to flush out everything, + * and when the file system is unmounted. For the vfs_sync() case, all + * we really need to do is sync out the log to make all of our meta-data + * updates permanent (except for timestamps). For calls from pflushd(), + * dirty pages are kept moving by calling pdflush() on the inodes + * containing them. We also flush the inodes that we can lock without + * sleeping and the superblock if we can lock it without sleeping from + * vfs_sync() so that items at the tail of the log are always moving out. + * + * Flags: + * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want + * to sleep if we can help it. All we really need + * to do is ensure that the log is synced at least + * periodically. We also push the inodes and + * superblock if we can lock them without sleeping + * and they are not pinned. + * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not + * set, then we really want to lock each inode and flush + * it. + * SYNC_WAIT - All the flushes that take place in this call should + * be synchronous. + * SYNC_DELWRI - This tells us to push dirty pages associated with + * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to + * determine if they should be flushed sync, async, or + * delwri. + * SYNC_CLOSE - This flag is passed when the system is being + * unmounted. We should sync and invalidate everthing. + * SYNC_FSDATA - This indicates that the caller would like to make + * sure the superblock is safe on disk. We can ensure + * this by simply makeing sure the log gets flushed + * if SYNC_BDFLUSH is set, and by actually writing it + * out otherwise. + * + */ +/*ARGSUSED*/ +STATIC int +xfs_sync( + bhv_desc_t *bdp, + int flags, + cred_t *credp) +{ + xfs_mount_t *mp; + + mp = XFS_BHVTOM(bdp); + return (xfs_syncsub(mp, flags, 0, NULL)); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic VFS_SYNC + * interface as explained above under xfs_sync. In the interests of not + * changing interfaces within the 6.5 family, additional internallly- + * required functions are specified within a separate xflags parameter, + * only available by calling this routine. + * + */ +STATIC int +xfs_sync_inodes( + xfs_mount_t *mp, + int flags, + int xflags, + int *bypassed) +{ + xfs_inode_t *ip = NULL; + xfs_inode_t *ip_next; + xfs_buf_t *bp; + vnode_t *vp = NULL; + vmap_t vmap; + int error; + int last_error; + uint64_t fflag; + uint lock_flags; + uint base_lock_flags; + boolean_t mount_locked; + boolean_t vnode_refed; + int preempt; + xfs_dinode_t *dip; + xfs_iptr_t *ipointer; +#ifdef DEBUG + boolean_t ipointer_in = B_FALSE; + +#define IPOINTER_SET ipointer_in = B_TRUE +#define IPOINTER_CLR ipointer_in = B_FALSE +#else +#define IPOINTER_SET +#define IPOINTER_CLR +#endif + + +/* Insert a marker record into the inode list after inode ip. The list + * must be locked when this is called. After the call the list will no + * longer be locked. + */ +#define IPOINTER_INSERT(ip, mp) { \ + ASSERT(ipointer_in == B_FALSE); \ + ipointer->ip_mnext = ip->i_mnext; \ + ipointer->ip_mprev = ip; \ + ip->i_mnext = (xfs_inode_t *)ipointer; \ + ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ + preempt = 0; \ + XFS_MOUNT_IUNLOCK(mp); \ + mount_locked = B_FALSE; \ + IPOINTER_SET; \ + } + +/* Remove the marker from the inode list. If the marker was the only item + * in the list then there are no remaining inodes and we should zero out + * the whole list. If we are the current head of the list then move the head + * past us. + */ +#define IPOINTER_REMOVE(ip, mp) { \ + ASSERT(ipointer_in == B_TRUE); \ + if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ + ip = ipointer->ip_mnext; \ + ip->i_mprev = ipointer->ip_mprev; \ + ipointer->ip_mprev->i_mnext = ip; \ + if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ + mp->m_inodes = ip; \ + } \ + } else { \ + ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ + mp->m_inodes = NULL; \ + ip = NULL; \ + } \ + IPOINTER_CLR; \ + } + +#define XFS_PREEMPT_MASK 0x7f + + if (bypassed) + *bypassed = 0; + if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) + return 0; + error = 0; + last_error = 0; + preempt = 0; + + /* Allocate a reference marker */ + ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); + + fflag = XFS_B_ASYNC; /* default is don't wait */ + if (flags & SYNC_BDFLUSH) + fflag = XFS_B_DELWRI; + if (flags & SYNC_WAIT) + fflag = 0; /* synchronous overrides all */ + + base_lock_flags = XFS_ILOCK_SHARED; + if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { + /* + * We need the I/O lock if we're going to call any of + * the flush/inval routines. + */ + base_lock_flags |= XFS_IOLOCK_SHARED; + } + + XFS_MOUNT_ILOCK(mp); + + ip = mp->m_inodes; + + mount_locked = B_TRUE; + vnode_refed = B_FALSE; + + IPOINTER_CLR; + + do { + ASSERT(ipointer_in == B_FALSE); + ASSERT(vnode_refed == B_FALSE); + + lock_flags = base_lock_flags; + + /* + * There were no inodes in the list, just break out + * of the loop. + */ + if (ip == NULL) { + break; + } + + /* + * We found another sync thread marker - skip it + */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + + vp = XFS_ITOV_NULL(ip); + + /* + * If the vnode is gone then this is being torn down, + * call reclaim if it is flushed, else let regular flush + * code deal with it later in the loop. + */ + + if (vp == NULL) { + /* Skip ones already in reclaim */ + if (ip->i_flags & XFS_IRECLAIM) { + ip = ip->i_mnext; + continue; + } + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { + ip = ip->i_mnext; + } else if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + IPOINTER_INSERT(ip, mp); + + xfs_finish_reclaim(ip, 1, + XFS_IFLUSH_DELWRI_ELSE_ASYNC); + + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + } else { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + ip = ip->i_mnext; + } + continue; + } + + if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { + XFS_MOUNT_IUNLOCK(mp); + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return 0; + } + + /* + * If this is just vfs_sync() or pflushd() calling + * then we can skip inodes for which it looks like + * there is nothing to do. Since we don't have the + * inode locked this is racey, but these are periodic + * calls so it doesn't matter. For the others we want + * to know for sure, so we at least try to lock them. + */ + if (flags & SYNC_BDFLUSH) { + if (((ip->i_itemp == NULL) || + !(ip->i_itemp->ili_format.ilf_fields & + XFS_ILOG_ALL)) && + (ip->i_update_core == 0)) { + ip = ip->i_mnext; + continue; + } + } + + /* + * Try to lock without sleeping. We're out of order with + * the inode list lock here, so if we fail we need to drop + * the mount lock and try again. If we're called from + * bdflush() here, then don't bother. + * + * The inode lock here actually coordinates with the + * almost spurious inode lock in xfs_ireclaim() to prevent + * the vnode we handle here without a reference from + * being freed while we reference it. If we lock the inode + * while it's on the mount list here, then the spurious inode + * lock in xfs_ireclaim() after the inode is pulled from + * the mount list will sleep until we release it here. + * This keeps the vnode from being freed while we reference + * it. It is also cheaper and simpler than actually doing + * a vn_get() for every inode we touch here. + */ + if (xfs_ilock_nowait(ip, lock_flags) == 0) { + + if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { + ip = ip->i_mnext; + continue; + } + + /* + * We need to unlock the inode list lock in order + * to lock the inode. Insert a marker record into + * the inode list to remember our position, dropping + * the lock is now done inside the IPOINTER_INSERT + * macro. + * + * We also use the inode list lock to protect us + * in taking a snapshot of the vnode version number + * for use in calling vn_get(). + */ + VMAP(vp, vmap); + IPOINTER_INSERT(ip, mp); + + vp = vn_get(vp, &vmap); + if (vp == NULL) { + /* + * The vnode was reclaimed once we let go + * of the inode list lock. Skip to the + * next list entry. Remove the marker. + */ + + XFS_MOUNT_ILOCK(mp); + + mount_locked = B_TRUE; + vnode_refed = B_FALSE; + + IPOINTER_REMOVE(ip, mp); + + continue; + } + + xfs_ilock(ip, lock_flags); + + ASSERT(vp == XFS_ITOV(ip)); + ASSERT(ip->i_mount == mp); + + vnode_refed = B_TRUE; + } + + /* From here on in the loop we may have a marker record + * in the inode list. + */ + + if ((flags & SYNC_CLOSE) && (vp != NULL)) { + /* + * This is the shutdown case. We just need to + * flush and invalidate all the pages associated + * with the inode. Drop the inode lock since + * we can't hold it across calls to the buffer + * cache. + * + * We don't set the VREMAPPING bit in the vnode + * here, because we don't hold the vnode lock + * exclusively. It doesn't really matter, though, + * because we only come here when we're shutting + * down anyway. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (XFS_FORCED_SHUTDOWN(mp)) { + VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); + } else { + VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF); + } + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + } else if ((flags & SYNC_DELWRI) && (vp != NULL)) { + if (VN_DIRTY(vp)) { + /* We need to have dropped the lock here, + * so insert a marker if we have not already + * done so. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + /* + * Drop the inode lock since we can't hold it + * across calls to the buffer cache. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, + fflag, FI_NONE, error); + xfs_ilock(ip, XFS_ILOCK_SHARED); + } + + } + + if (flags & SYNC_BDFLUSH) { + if ((flags & SYNC_ATTR) && + ((ip->i_update_core) || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0)))) { + + /* Insert marker and drop lock if not already + * done. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + /* + * We don't want the periodic flushing of the + * inodes by vfs_sync() to interfere with + * I/O to the file, especially read I/O + * where it is only the access time stamp + * that is being flushed out. To prevent + * long periods where we have both inode + * locks held shared here while reading the + * inode's buffer in from disk, we drop the + * inode lock while reading in the inode + * buffer. We have to release the buffer + * and reacquire the inode lock so that they + * are acquired in the proper order (inode + * locks first). The buffer will go at the + * end of the lru chain, though, so we can + * expect it to still be there when we go + * for it again in xfs_iflush(). + */ + if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + error = xfs_itobp(mp, NULL, ip, + &dip, &bp, 0); + if (!error) { + xfs_buf_relse(bp); + } else { + /* Bailing out, remove the + * marker and free it. + */ + XFS_MOUNT_ILOCK(mp); + + IPOINTER_REMOVE(ip, mp); + + XFS_MOUNT_IUNLOCK(mp); + + ASSERT(!(lock_flags & + XFS_IOLOCK_SHARED)); + + kmem_free(ipointer, + sizeof(xfs_iptr_t)); + return (0); + } + + /* + * Since we dropped the inode lock, + * the inode may have been reclaimed. + * Therefore, we reacquire the mount + * lock and check to see if we were the + * inode reclaimed. If this happened + * then the ipointer marker will no + * longer point back at us. In this + * case, move ip along to the inode + * after the marker, remove the marker + * and continue. + */ + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + + if (ip != ipointer->ip_mprev) { + IPOINTER_REMOVE(ip, mp); + + ASSERT(!vnode_refed); + ASSERT(!(lock_flags & + XFS_IOLOCK_SHARED)); + continue; + } + + ASSERT(ip->i_mount == mp); + + if (xfs_ilock_nowait(ip, + XFS_ILOCK_SHARED) == 0) { + ASSERT(ip->i_mount == mp); + /* + * We failed to reacquire + * the inode lock without + * sleeping, so just skip + * the inode for now. We + * clear the ILOCK bit from + * the lock_flags so that we + * won't try to drop a lock + * we don't hold below. + */ + lock_flags &= ~XFS_ILOCK_SHARED; + IPOINTER_REMOVE(ip_next, mp); + } else if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + ASSERT(ip->i_mount == mp); + /* + * Since this is vfs_sync() + * calling we only flush the + * inode out if we can lock + * it without sleeping and + * it is not pinned. Drop + * the mount lock here so + * that we don't hold it for + * too long. We already have + * a marker in the list here. + */ + XFS_MOUNT_IUNLOCK(mp); + mount_locked = B_FALSE; + error = xfs_iflush(ip, + XFS_IFLUSH_DELWRI); + } else { + ASSERT(ip->i_mount == mp); + IPOINTER_REMOVE(ip_next, mp); + } + } + + } + + } else { + if ((flags & SYNC_ATTR) && + ((ip->i_update_core) || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0)))) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + if (flags & SYNC_WAIT) { + xfs_iflock(ip); + error = xfs_iflush(ip, + XFS_IFLUSH_SYNC); + } else { + /* + * If we can't acquire the flush + * lock, then the inode is already + * being flushed so don't bother + * waiting. If we can lock it then + * do a delwri flush so we can + * combine multiple inode flushes + * in each disk write. + */ + if (xfs_iflock_nowait(ip)) { + error = xfs_iflush(ip, + XFS_IFLUSH_DELWRI); + } + else if (bypassed) + (*bypassed)++; + } + } + } + + if (lock_flags != 0) { + xfs_iunlock(ip, lock_flags); + } + + if (vnode_refed) { + /* + * If we had to take a reference on the vnode + * above, then wait until after we've unlocked + * the inode to release the reference. This is + * because we can be already holding the inode + * lock when VN_RELE() calls xfs_inactive(). + * + * Make sure to drop the mount lock before calling + * VN_RELE() so that we don't trip over ourselves if + * we have to go for the mount lock again in the + * inactive code. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + VN_RELE(vp); + + vnode_refed = B_FALSE; + } + + if (error) { + last_error = error; + } + + /* + * bail out if the filesystem is corrupted. + */ + if (error == EFSCORRUPTED) { + if (!mount_locked) { + XFS_MOUNT_ILOCK(mp); + IPOINTER_REMOVE(ip, mp); + } + XFS_MOUNT_IUNLOCK(mp); + ASSERT(ipointer_in == B_FALSE); + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return XFS_ERROR(error); + } + + /* Let other threads have a chance at the mount lock + * if we have looped many times without dropping the + * lock. + */ + if ((++preempt & XFS_PREEMPT_MASK) == 0) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + } + + if (mount_locked == B_FALSE) { + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + continue; + } + + ASSERT(ipointer_in == B_FALSE); + ip = ip->i_mnext; + + } while (ip != mp->m_inodes); + + XFS_MOUNT_IUNLOCK(mp); + + ASSERT(ipointer_in == B_FALSE); + + kmem_free(ipointer, sizeof(xfs_iptr_t)); + return XFS_ERROR(last_error); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic VFS_SYNC + * interface as explained above under xfs_sync. In the interests of not + * changing interfaces within the 6.5 family, additional internallly- + * required functions are specified within a separate xflags parameter, + * only available by calling this routine. + * + */ +int +xfs_syncsub( + xfs_mount_t *mp, + int flags, + int xflags, + int *bypassed) +{ + int error = 0; + int last_error = 0; + uint log_flags = XFS_LOG_FORCE; + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + /* + * Sync out the log. This ensures that the log is periodically + * flushed even if there is not enough activity to fill it up. + */ + if (flags & SYNC_WAIT) + log_flags |= XFS_LOG_SYNC; + + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + + if (flags & (SYNC_ATTR|SYNC_DELWRI)) { + if (flags & SYNC_BDFLUSH) + xfs_finish_reclaim_all(mp, 1); + else + error = xfs_sync_inodes(mp, flags, xflags, bypassed); + } + + /* + * Flushing out dirty data above probably generated more + * log activity, so if this isn't vfs_sync() then flush + * the log again. + */ + if (flags & SYNC_DELWRI) { + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + } + + if (flags & SYNC_FSDATA) { + /* + * If this is vfs_sync() then only sync the superblock + * if we can lock it without sleeping and it is not pinned. + */ + if (flags & SYNC_BDFLUSH) { + bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); + if (bp != NULL) { + bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); + if ((bip != NULL) && + xfs_buf_item_dirty(bip)) { + if (!(XFS_BUF_ISPINNED(bp))) { + XFS_BUF_ASYNC(bp); + error = xfs_bwrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + xfs_buf_relse(bp); + } + } + } else { + bp = xfs_getsb(mp, 0); + /* + * If the buffer is pinned then push on the log so + * we won't get stuck waiting in the write for + * someone, maybe ourselves, to flush the log. + * Even though we just pushed the log above, we + * did not have the superblock buffer locked at + * that point so it can become pinned in between + * there and here. + */ + if (XFS_BUF_ISPINNED(bp)) + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + if (!(flags & SYNC_WAIT)) + XFS_BUF_BFLAGS(bp) |= XFS_B_ASYNC; + error = xfs_bwrite(mp, bp); + } + if (error) { + last_error = error; + } + } + + /* + * If this is the periodic sync, then kick some entries out of + * the reference cache. This ensures that idle entries are + * eventually kicked out of the cache. + */ + if (flags & SYNC_REFCACHE) { + xfs_refcache_purge_some(mp); + } + + /* + * Now check to see if the log needs a "dummy" transaction. + */ + + if (xfs_log_need_covered(mp)) { + xfs_trans_t *tp; + xfs_inode_t *ip; + + /* + * Put a dummy transaction in the log to tell + * recovery that all others are OK. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + if ((error = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + + ip = mp->m_rootip; + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp, 0, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + } + + /* + * When shutting down, we need to insure that the AIL is pushed + * to disk or the filesystem can appear corrupt from the PROM. + */ + if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) { + XFS_bflush(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + XFS_bflush(mp->m_rtdev_targp); + } + } + + return XFS_ERROR(last_error); +} + +/* + * xfs_vget - called by DMAPI to get vnode from file handle + */ +STATIC int +xfs_vget( + bhv_desc_t *bdp, + vnode_t **vpp, + fid_t *fidp) +{ + xfs_fid_t *xfid; + xfs_inode_t *ip; + int error; + xfs_ino_t ino; + unsigned int igen; + xfs_mount_t *mp; + + xfid = (struct xfs_fid *)fidp; + if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { + ino = xfid->xfs_fid_ino; + igen = xfid->xfs_fid_gen; + } else { + /* + * Invalid. Since handles can be created in user space + * and passed in via gethandle(), this is not cause for + * a panic. + */ + return XFS_ERROR(EINVAL); + } + mp = XFS_BHVTOM(bdp); + error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0); + if (error) { + *vpp = NULL; + return error; + } + if (ip == NULL) { + *vpp = NULL; + return XFS_ERROR(EIO); + } + + if (ip->i_d.di_mode == 0 || (igen && (ip->i_d.di_gen != igen))) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + *vpp = NULL; + return XFS_ERROR(ENOENT); + } + + *vpp = XFS_ITOV(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; +} + + +#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ +#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ +#define MNTOPT_LOGDEV "logdev" /* log device */ +#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ +#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ +#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ +#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ +#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ +#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ +#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ +#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ +#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ +#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ +#define MNTOPT_NOLOGFLUSH "nologflush" /* don't hard flush on log writes */ +#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ + + +int +xfs_parseargs( + struct bhv_desc *bhv, + char *options, + struct xfs_mount_args *args, + int update) +{ + struct vfs *vfsp = bhvtovfs(bhv); + char *this_char, *value, *eov; + int dsunit, dswidth, vol_dsunit, vol_dswidth; + int iosize; + + if (!options) + return 0; + + iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; + + while ((this_char = strsep(&options, ",")) != NULL) { + if (!*this_char) + continue; + if ((value = strchr(this_char, '=')) != NULL) + *value++ = 0; + + if (!strcmp(this_char, MNTOPT_LOGBUFS)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGBUFS); + return -EINVAL; + } + args->logbufs = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { + int last, in_kilobytes = 0; + + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGBSIZE); + return -EINVAL; + } + last = strlen(value) - 1; + if (value[last] == 'K' || value[last] == 'k') { + in_kilobytes = 1; + value[last] = '\0'; + } + args->logbufsize = simple_strtoul(value, &eov, 10); + if (in_kilobytes) + args->logbufsize <<= 10; + } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_LOGDEV); + return -EINVAL; + } + strncpy(args->logname, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_MTPT)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_MTPT); + return -EINVAL; + } + strncpy(args->mtpt, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_RTDEV)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_RTDEV); + return -EINVAL; + } + strncpy(args->rtname, value, MAXNAMELEN); + } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_BIOSIZE); + return -EINVAL; + } + iosize = simple_strtoul(value, &eov, 10); + args->flags |= XFSMNT_IOSIZE; + args->iosizelog = (uint8_t) iosize; + } else if (!strcmp(this_char, MNTOPT_WSYNC)) { + args->flags |= XFSMNT_WSYNC; + } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { + args->flags |= XFSMNT_OSYNCISOSYNC; + } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { + args->flags |= XFSMNT_NORECOVERY; + } else if (!strcmp(this_char, MNTOPT_INO64)) { + args->flags |= XFSMNT_INO64; +#ifndef XFS_BIG_FILESYSTEMS + printk("XFS: %s option not allowed on this system\n", + MNTOPT_INO64); + return -EINVAL; +#endif + } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { + args->flags |= XFSMNT_NOALIGN; + } else if (!strcmp(this_char, MNTOPT_SUNIT)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_SUNIT); + return -EINVAL; + } + dsunit = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { + if (!value || !*value) { + printk("XFS: %s option requires an argument\n", + MNTOPT_SWIDTH); + return -EINVAL; + } + dswidth = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_NOUUID)) { + args->flags |= XFSMNT_NOUUID; + } else if (!strcmp(this_char, MNTOPT_NOLOGFLUSH)) { + args->flags |= XFSMNT_NOLOGFLUSH; + } else if (!strcmp(this_char, "osyncisdsync")) { + /* no-op, this is now the default */ +printk("XFS: osyncisdsync is now the default, option is deprecated.\n"); + } else if (!strcmp(this_char, "irixsgid")) { +printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n"); + } else { + printk("XFS: unknown mount option [%s].\n", this_char); + return -EINVAL; + } + } + + if (args->flags & XFSMNT_NORECOVERY) { + if ((vfsp->vfs_flag & VFS_RDONLY) == 0) { + printk("XFS: no-recovery mounts must be read-only.\n"); + return -EINVAL; + } + } + + if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { + printk( + "XFS: sunit and swidth options incompatible with the noalign option\n"); + return -EINVAL; + } + + if ((dsunit && !dswidth) || (!dsunit && dswidth)) { + printk("XFS: sunit and swidth must be specified together\n"); + return -EINVAL; + } + + if (dsunit && (dswidth % dsunit != 0)) { + printk( + "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)\n", + dswidth, dsunit); + return -EINVAL; + } + + if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { + if (dsunit) { + args->sunit = dsunit; + args->flags |= XFSMNT_RETERR; + } else { + args->sunit = vol_dsunit; + } + dswidth ? (args->swidth = dswidth) : + (args->swidth = vol_dswidth); + } else { + args->sunit = args->swidth = 0; + } + + return 0; +} + +int +xfs_showargs( + struct bhv_desc *bhv, + struct seq_file *m) +{ + static struct proc_xfs_info { + int flag; + char *str; + } xfs_info[] = { + /* the few simple ones we can get from the mount struct */ + { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, + { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, + { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, + { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, + { 0, NULL } + }; + struct proc_xfs_info *xfs_infop; + struct xfs_mount *mp = XFS_BHVTOM(bhv); + + for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { + if (mp->m_flags & xfs_infop->flag) + seq_puts(m, xfs_infop->str); + } + + if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) + seq_printf(m, "," MNTOPT_BIOSIZE "=%d", mp->m_writeio_log); + + if (mp->m_logbufs > 0) + seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); + + if (mp->m_logbsize > 0) + seq_printf(m, "," MNTOPT_LOGBSIZE "=%d", mp->m_logbsize); + + if (mp->m_ddev_targp->pbr_dev != mp->m_logdev_targp->pbr_dev) + seq_printf(m, "," MNTOPT_LOGDEV "=%s", + bdevname(mp->m_logdev_targp->pbr_dev)); + + if (mp->m_rtdev_targp && + mp->m_ddev_targp->pbr_dev != mp->m_rtdev_targp->pbr_dev) + seq_printf(m, "," MNTOPT_RTDEV "=%s", + bdevname(mp->m_rtdev_targp->pbr_dev)); + + if (mp->m_dalign > 0) + seq_printf(m, "," MNTOPT_SUNIT "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); + + if (mp->m_swidth > 0) + seq_printf(m, "," MNTOPT_SWIDTH "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + + return 0; +} + + +vfsops_t xfs_vfsops = { + BHV_IDENTITY_INIT(VFS_BHV_XFS,VFS_POSITION_XFS), + .vfs_parseargs = xfs_parseargs, + .vfs_showargs = xfs_showargs, + .vfs_mount = xfs_mount, + .vfs_unmount = xfs_unmount, + .vfs_mntupdate = xfs_mntupdate, + .vfs_root = xfs_root, + .vfs_statvfs = xfs_statvfs, + .vfs_sync = xfs_sync, + .vfs_vget = xfs_vget, + .vfs_dmapiops = (vfs_dmapiops_t)fs_nosys, + .vfs_quotactl = (vfs_quotactl_t)fs_nosys, + .vfs_get_inode = xfs_get_inode, + .vfs_init_vnode = xfs_initialize_vnode, + .vfs_force_shutdown = xfs_do_force_shutdown, +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/fs/xfs/xfs_vnodeops.c linux.22-ac2/fs/xfs/xfs_vnodeops.c --- linux.vanilla/fs/xfs/xfs_vnodeops.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/fs/xfs/xfs_vnodeops.c 2003-06-29 16:09:23.000000000 +0100 @@ -0,0 +1,4789 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ + +#include "xfs.h" +#include "xfs_macros.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_itable.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_alloc.h" +#include "xfs_attr_sf.h" +#include "xfs_dir_sf.h" +#include "xfs_dir2_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode_item.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_rw.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_rtalloc.h" +#include "xfs_quota.h" +#include "xfs_utils.h" +#include "xfs_trans_space.h" +#include "xfs_dir_leaf.h" +#include "xfs_dmapi.h" +#include "xfs_mac.h" +#include "xfs_log_priv.h" + + +/* + * The maximum pathlen is 1024 bytes. Since the minimum file system + * blocksize is 512 bytes, we can get a max of 2 extents back from + * bmapi. + */ +#define SYMLINK_MAPS 2 + +extern int xfs_ioctl(bhv_desc_t *, struct inode *, struct file *, + unsigned int, unsigned long); + + +/* + * For xfs, we check that the file isn't too big to be opened by this kernel. + * No other open action is required for regular files. Devices are handled + * through the specfs file system, pipes through fifofs. Device and + * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively, + * when a new vnode is first looked up or created. + */ +STATIC int +xfs_open( + bhv_desc_t *bdp, + cred_t *credp) +{ + int mode; + vnode_t *vp; + xfs_inode_t *ip; + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + + /* + * If it's a directory with any blocks, read-ahead block 0 + * as we're almost certain to have the next operation be a read there. + */ + if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) { + mode = xfs_ilock_map_shared(ip); + if (ip->i_d.di_nextents > 0) + (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); + xfs_iunlock(ip, mode); + } + return 0; +} + + +/* + * xfs_getattr + */ +STATIC int +xfs_getattr( + bhv_desc_t *bdp, + vattr_t *vap, + int flags, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_mount_t *mp; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + if (!(flags & ATTR_LAZY)) + xfs_ilock(ip, XFS_ILOCK_SHARED); + + vap->va_size = ip->i_d.di_size; + if (vap->va_mask == XFS_AT_SIZE) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + vap->va_nblocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + vap->va_fsid = mp->m_dev; +#if XFS_BIG_FILESYSTEMS + vap->va_nodeid = ip->i_ino + mp->m_inoadd; +#else + vap->va_nodeid = ip->i_ino; +#endif + vap->va_nlink = ip->i_d.di_nlink; + + /* + * Quick exit for non-stat callers + */ + if ((vap->va_mask & + ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID| + XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + + /* + * Copy from in-core inode. + */ + vap->va_type = vp->v_type; + vap->va_mode = ip->i_d.di_mode & MODEMASK; + vap->va_uid = ip->i_d.di_uid; + vap->va_gid = ip->i_d.di_gid; + vap->va_projid = ip->i_d.di_projid; + + /* + * Check vnode type block/char vs. everything else. + * Do it with bitmask because that's faster than looking + * for multiple values individually. + */ + if (((1 << vp->v_type) & ((1<va_rdev = 0; + + if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { + +#if 0 + /* Large block sizes confuse various + * user space programs, so letting the + * stripe size through is not a good + * idea for now. + */ + vap->va_blksize = mp->m_swidth ? + /* + * If the underlying volume is a stripe, then + * return the stripe width in bytes as the + * recommended I/O size. + */ + (mp->m_swidth << mp->m_sb.sb_blocklog) : + /* + * Return the largest of the preferred buffer + * sizes since doing small I/Os into larger + * buffers causes buffers to be decommissioned. + * The value returned is in bytes. + */ + (1 << (int)MAX(mp->m_readio_log, + mp->m_writeio_log)); + +#else + vap->va_blksize = + /* + * Return the largest of the preferred buffer + * sizes since doing small I/Os into larger + * buffers causes buffers to be decommissioned. + * The value returned is in bytes. + */ + 1 << (int)MAX(mp->m_readio_log, + mp->m_writeio_log); +#endif + } else { + + /* + * If the file blocks are being allocated from a + * realtime partition, then return the inode's + * realtime extent size or the realtime volume's + * extent size. + */ + vap->va_blksize = ip->i_d.di_extsize ? + (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : + (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); + } + } else { + vap->va_rdev = ip->i_df.if_u2.if_rdev; + vap->va_blksize = BLKDEV_IOSIZE; + } + + vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; + vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec; + vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; + vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; + vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; + vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; + + /* + * Exit for stat callers. See if any of the rest of the fields + * to be filled in are needed. + */ + if ((vap->va_mask & + (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| + XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) { + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; + } + /* + * convert di_flags to xflags + */ + vap->va_xflags = + ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? + XFS_XFLAG_REALTIME : 0) | + ((ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) ? + XFS_XFLAG_PREALLOC : 0) | + (XFS_IFORK_Q(ip) ? + XFS_XFLAG_HASATTR : 0); + vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog; + vap->va_nextents = + (ip->i_df.if_flags & XFS_IFEXTENTS) ? + ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) : + ip->i_d.di_nextents; + if (ip->i_afp != NULL) + vap->va_anextents = + (ip->i_afp->if_flags & XFS_IFEXTENTS) ? + ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) : + ip->i_d.di_anextents; + else + vap->va_anextents = 0; + vap->va_gencount = ip->i_d.di_gen; + vap->va_vcode = 0L; + + if (!(flags & ATTR_LAZY)) + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return 0; +} + + +/* + * xfs_setattr + */ +STATIC int +xfs_setattr( + bhv_desc_t *bdp, + vattr_t *vap, + int flags, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int mask; + int code; + uint lock_flags; + uint commit_flags=0; + uid_t uid=0, iuid=0; + gid_t gid=0, igid=0; + int timeflags = 0; + vnode_t *vp; + xfs_prid_t projid=0, iprojid=0; + int mandlock_before, mandlock_after; + struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; + int file_owner; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + /* + * Cannot set certain attributes. + */ + mask = vap->va_mask; + if (mask & XFS_AT_NOSET) { + return XFS_ERROR(EINVAL); + } + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* + * Timestamps do not need to be logged and hence do not + * need to be done within a transaction. + */ + if (mask & XFS_AT_UPDTIMES) { + ASSERT((mask & ~XFS_AT_UPDTIMES) == 0); + timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) | + ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) | + ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0); + xfs_ichgtime(ip, timeflags); + return 0; + } + + olddquot1 = olddquot2 = NULL; + udqp = gdqp = NULL; + + /* + * If disk quotas is on, we make sure that the dquots do exist on disk, + * before we start any other transactions. Trying to do this later + * is messy. We don't care to take a readlock to look at the ids + * in inode here, because we can't hold it across the trans_reserve. + * If the IDs do change before we take the ilock, we're covered + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) { + uint qflags = 0; + + if (mask & XFS_AT_UID) { + uid = vap->va_uid; + qflags |= XFS_QMOPT_UQUOTA; + } else { + uid = ip->i_d.di_uid; + } + if (mask & XFS_AT_GID) { + gid = vap->va_gid; + qflags |= XFS_QMOPT_GQUOTA; + } else { + gid = ip->i_d.di_gid; + } + /* + * We take a reference when we initialize udqp and gdqp, + * so it is important that we never blindly double trip on + * the same variable. See xfs_create() for an example. + */ + ASSERT(udqp == NULL); + ASSERT(gdqp == NULL); + code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp); + if (code) + return (code); + } + + /* + * For the other attributes, we acquire the inode lock and + * first do an error checking pass. + */ + tp = NULL; + lock_flags = XFS_ILOCK_EXCL; + if (!(mask & XFS_AT_SIZE)) { + if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) || + (mp->m_flags & XFS_MOUNT_WSYNC)) { + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + commit_flags = 0; + if ((code = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), 0, + 0, 0))) { + lock_flags = 0; + goto error_return; + } + } + } else { + if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) && + !(flags & ATTR_DMI)) { + code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, bdp, + vap->va_size, 0, AT_DELAY_FLAG(flags), NULL); + if (code) { + lock_flags = 0; + goto error_return; + } + } + lock_flags |= XFS_IOLOCK_EXCL; + } + + xfs_ilock(ip, lock_flags); + + if (_MAC_XFS_IACCESS(ip, MACWRITE, credp)) { + code = XFS_ERROR(EACCES); + goto error_return; + } + + /* boolean: are we the file owner? */ + file_owner = (current->fsuid == ip->i_d.di_uid); + + /* + * Change various properties of a file. + * Only the owner or users with CAP_FOWNER + * capability may do these things. + */ + if (mask & + (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID| + XFS_AT_GID|XFS_AT_PROJID)) { + /* + * CAP_FOWNER overrides the following restrictions: + * + * The user ID of the calling process must be equal + * to the file owner ID, except in cases where the + * CAP_FSETID capability is applicable. + */ + if (!file_owner && !capable(CAP_FOWNER)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + + /* + * CAP_FSETID overrides the following restrictions: + * + * The effective user ID of the calling process shall match + * the file owner when setting the set-user-ID and + * set-group-ID bits on that file. + * + * The effective group ID or one of the supplementary group + * IDs of the calling process shall match the group owner of + * the file when setting the set-group-ID bit on that file + */ + if (mask & XFS_AT_MODE) { + mode_t m = 0; + + if ((vap->va_mode & ISUID) && !file_owner) + m |= ISUID; + if ((vap->va_mode & ISGID) && + !in_group_p((gid_t)ip->i_d.di_gid)) + m |= ISGID; +#if 0 + /* Linux allows this, Irix doesn't. */ + if ((vap->va_mode & ISVTX) && vp->v_type != VDIR) + m |= ISVTX; +#endif + if (m && !capable(CAP_FSETID)) + vap->va_mode &= ~m; + } + } + + /* + * Change file ownership. Must be the owner or privileged. + * If the system was configured with the "restricted_chown" + * option, the owner is not permitted to give away the file, + * and can change the group id only to a group of which he + * or she is a member. + */ + if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { + /* + * These IDs could have changed since we last looked at them. + * But, we're assured that if the ownership did change + * while we didn't have the inode locked, inode's dquot(s) + * would have changed also. + */ + iuid = ip->i_d.di_uid; + iprojid = ip->i_d.di_projid; + igid = ip->i_d.di_gid; + gid = (mask & XFS_AT_GID) ? vap->va_gid : igid; + uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid; + projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid : + iprojid; + + /* + * CAP_CHOWN overrides the following restrictions: + * + * If _POSIX_CHOWN_RESTRICTED is defined, this capability + * shall override the restriction that a process cannot + * change the user ID of a file it owns and the restriction + * that the group ID supplied to the chown() function + * shall be equal to either the group ID or one of the + * supplementary group IDs of the calling process. + * + * XXX: How does restricted_chown affect projid? + */ + if (restricted_chown && + (iuid != uid || (igid != gid && + !in_group_p((gid_t)gid))) && + !capable(CAP_CHOWN)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + /* + * Do a quota reservation only if uid or gid is actually + * going to change. + */ + if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || + (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { + ASSERT(tp); + code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, + capable(CAP_FOWNER) ? + XFS_QMOPT_FORCE_RES : 0); + if (code) /* out of quota */ + goto error_return; + } + } + + /* + * Truncate file. Must have write permission and not be a directory. + */ + if (mask & XFS_AT_SIZE) { + /* Short circuit the truncate case for zero length files */ + if ((vap->va_size == 0) && + (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + lock_flags &= ~XFS_ILOCK_EXCL; + if (mask & XFS_AT_CTIME) + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + code = 0; + goto error_return; + } + + if (vp->v_type == VDIR) { + code = XFS_ERROR(EISDIR); + goto error_return; + } else if (vp->v_type != VREG) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + /* + * Make sure that the dquots are attached to the inode. + */ + if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED))) + goto error_return; + } + + /* + * Change file access or modified times. + */ + if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { + if (!file_owner) { + if ((flags & ATTR_UTIME) && + !capable(CAP_FOWNER)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + } + } + + /* + * Change extent size or realtime flag. + */ + if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { + /* + * Can't change extent size if any extents are allocated. + */ + if (ip->i_d.di_nextents && (mask & XFS_AT_EXTSIZE) && + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != + vap->va_extsize) ) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + + /* + * Can't set extent size unless the file is marked, or + * about to be marked as a realtime file. + * + * This check will be removed when fixed size extents + * with buffered data writes is implemented. + * + */ + if ((mask & XFS_AT_EXTSIZE) && + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != + vap->va_extsize) && + (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || + ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME))))) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + + /* + * Can't change realtime flag if any extents are allocated. + */ + if (ip->i_d.di_nextents && (mask & XFS_AT_XFLAGS) && + (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != + (vap->va_xflags & XFS_XFLAG_REALTIME)) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + /* + * Extent size must be a multiple of the appropriate block + * size, if set at all. + */ + if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { + xfs_extlen_t size; + + if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || + ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME))) { + size = mp->m_sb.sb_rextsize << + mp->m_sb.sb_blocklog; + } else { + size = mp->m_sb.sb_blocksize; + } + if (vap->va_extsize % size) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + /* + * If realtime flag is set then must have realtime data. + */ + if ((mask & XFS_AT_XFLAGS) && + (vap->va_xflags & XFS_XFLAG_REALTIME)) { + if ((mp->m_sb.sb_rblocks == 0) || + (mp->m_sb.sb_rextsize == 0) || + (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + } + + /* + * Now we can make the changes. Before we join the inode + * to the transaction, if XFS_AT_SIZE is set then take care of + * the part of the truncation that must be done without the + * inode lock. This needs to be done before joining the inode + * to the transaction, because the inode cannot be unlocked + * once it is a part of the transaction. + */ + if (mask & XFS_AT_SIZE) { + if (vap->va_size > ip->i_d.di_size) { + code = xfs_igrow_start(ip, vap->va_size, credp); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } else if (vap->va_size < ip->i_d.di_size) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, + (xfs_fsize_t)vap->va_size); + code = 0; + } else { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + code = 0; + } + if (code) { + ASSERT(tp == NULL); + lock_flags &= ~XFS_ILOCK_EXCL; + ASSERT(lock_flags == XFS_IOLOCK_EXCL); + goto error_return; + } + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); + if ((code = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return code; + } + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + + if (tp) { + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ihold(tp, ip); + } + + /* determine whether mandatory locking mode changes */ + mandlock_before = MANDLOCK(vp, ip->i_d.di_mode); + + /* + * Truncate file. Must have write permission and not be a directory. + */ + if (mask & XFS_AT_SIZE) { + if (vap->va_size > ip->i_d.di_size) { + xfs_igrow_finish(tp, ip, vap->va_size, + !(flags & ATTR_DMI)); + } else if ((vap->va_size < ip->i_d.di_size) || + ((vap->va_size == 0) && ip->i_d.di_nextents)) { + /* + * signal a sync transaction unless + * we're truncating an already unlinked + * file on a wsync filesystem + */ + code = xfs_itruncate_finish(&tp, ip, + (xfs_fsize_t)vap->va_size, + XFS_DATA_FORK, + ((ip->i_d.di_nlink != 0 || + !(mp->m_flags & XFS_MOUNT_WSYNC)) + ? 1 : 0)); + if (code) { + goto abort_return; + } + } + /* + * Have to do this even if the file's size doesn't change. + */ + timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; + } + + /* + * Change file access modes. + */ + if (mask & XFS_AT_MODE) { + ip->i_d.di_mode &= IFMT; + ip->i_d.di_mode |= vap->va_mode & ~IFMT; + + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + /* + * Change file ownership. Must be the owner or privileged. + * If the system was configured with the "restricted_chown" + * option, the owner is not permitted to give away the file, + * and can change the group id only to a group of which he + * or she is a member. + */ + if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { + /* + * CAP_FSETID overrides the following restrictions: + * + * The set-user-ID and set-group-ID bits of a file will be + * cleared upon successful return from chown() + */ + if ((ip->i_d.di_mode & (ISUID|ISGID)) && + !capable(CAP_FSETID)) { + ip->i_d.di_mode &= ~(ISUID|ISGID); + } + + /* + * Change the ownerships and register quota modifications + * in the transaction. + */ + if (iuid != uid) { + if (XFS_IS_UQUOTA_ON(mp)) { + ASSERT(mask & XFS_AT_UID); + ASSERT(udqp); + olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, + &ip->i_udquot, udqp); + } + ip->i_d.di_uid = uid; + } + if (igid != gid) { + if (XFS_IS_GQUOTA_ON(mp)) { + ASSERT(mask & XFS_AT_GID); + ASSERT(gdqp); + olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, + &ip->i_gdquot, gdqp); + } + ip->i_d.di_gid = gid; + } + if (iprojid != projid) { + ip->i_d.di_projid = projid; + /* + * We may have to rev the inode as well as + * the superblock version number since projids didn't + * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. + */ + if (ip->i_d.di_version == XFS_DINODE_VERSION_1) + xfs_bump_ino_vers2(tp, ip); + } + + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + + /* + * Change file access or modified times. + */ + if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { + if (mask & XFS_AT_ATIME) { + ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; + ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; + ip->i_update_core = 1; + timeflags &= ~XFS_ICHGTIME_ACC; + } + if (mask & XFS_AT_MTIME) { + ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; + timeflags &= ~XFS_ICHGTIME_MOD; + timeflags |= XFS_ICHGTIME_CHG; + } + if (tp && (flags & ATTR_UTIME)) + xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); + } + + /* + * Change XFS-added attributes. + */ + if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) { + if (mask & XFS_AT_EXTSIZE) { + /* + * Converting bytes to fs blocks. + */ + ip->i_d.di_extsize = vap->va_extsize >> + mp->m_sb.sb_blocklog; + } + if (mask & XFS_AT_XFLAGS) { + ip->i_d.di_flags = 0; + if (vap->va_xflags & XFS_XFLAG_REALTIME) { + ip->i_d.di_flags |= XFS_DIFLAG_REALTIME; + ip->i_iocore.io_flags |= XFS_IOCORE_RT; + } + /* can't set PREALLOC this way, just ignore it */ + } + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } + + /* + * Change file inode change time only if XFS_AT_CTIME set + * AND we have been called by a DMI function. + */ + + if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) { + ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; + ip->i_update_core = 1; + timeflags &= ~XFS_ICHGTIME_CHG; + } + + /* + * Send out timestamp changes that need to be set to the + * current time. Not done when called by a DMI function. + */ + if (timeflags && !(flags & ATTR_DMI)) + xfs_ichgtime(ip, timeflags); + + XFS_STATS_INC(xfsstats.xs_ig_attrchg); + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + * This is slightly sub-optimal in that truncates require + * two sync transactions instead of one for wsync filesytems. + * One for the truncate and one for the timestamps since we + * don't want to change the timestamps unless we're sure the + * truncate worked. Truncates are less than 1% of the laddis + * mix so this probably isn't worth the trouble to optimize. + */ + code = 0; + if (tp) { + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + + code = xfs_trans_commit(tp, commit_flags, NULL); + } + + /* + * If the (regular) file's mandatory locking mode changed, then + * notify the vnode. We do this under the inode lock to prevent + * racing calls to vop_vnode_change. + */ + mandlock_after = MANDLOCK(vp, ip->i_d.di_mode); + if (mandlock_before != mandlock_after) { + VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING, + mandlock_after); + } + + xfs_iunlock(ip, lock_flags); + + /* + * Release any dquot(s) the inode had kept before chown. + */ + XFS_QM_DQRELE(mp, olddquot1); + XFS_QM_DQRELE(mp, olddquot2); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (code) { + return code; + } + + if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) && + !(flags & ATTR_DMI)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, NULL, NULL, + 0, 0, AT_DELAY_FLAG(flags)); + } + return 0; + + abort_return: + commit_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + if (tp) { + xfs_trans_cancel(tp, commit_flags); + } + if (lock_flags != 0) { + xfs_iunlock(ip, lock_flags); + } + return code; +} + + +/* + * xfs_access + * Null conversion from vnode mode bits to inode mode bits, as in efs. + */ +STATIC int +xfs_access( + bhv_desc_t *bdp, + int mode, + cred_t *credp) +{ + xfs_inode_t *ip; + int error; + + vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, + (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_iaccess(ip, mode, credp); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; +} + + +/* + * xfs_readlink + * + */ +STATIC int +xfs_readlink( + bhv_desc_t *bdp, + uio_t *uiop, + cred_t *credp) +{ + xfs_inode_t *ip; + int count; + xfs_off_t offset; + int pathlen; + vnode_t *vp; + int error = 0; + xfs_mount_t *mp; + int nmaps; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + xfs_daddr_t d; + int byte_cnt; + int n; + xfs_buf_t *bp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + ASSERT((ip->i_d.di_mode & IFMT) == IFLNK); + + offset = uiop->uio_offset; + count = uiop->uio_resid; + + if (offset < 0) { + error = XFS_ERROR(EINVAL); + goto error_return; + } + if (count <= 0) { + error = 0; + goto error_return; + } + + if (!(uiop->uio_fmode & FINVIS)) { + xfs_ichgtime(ip, XFS_ICHGTIME_ACC); + } + + /* + * See if the symlink is stored inline. + */ + pathlen = (int)ip->i_d.di_size; + + if (ip->i_df.if_flags & XFS_IFINLINE) { + error = uiomove(ip->i_df.if_u1.if_data, pathlen, UIO_READ, uiop); + } + else { + /* + * Symlink not inline. Call bmap to get it in. + */ + nmaps = SYMLINK_MAPS; + + error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), + 0, NULL, 0, mval, &nmaps, NULL); + + if (error) { + goto error_return; + } + + for (n = 0; n < nmaps; n++) { + d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); + byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); + bp = xfs_buf_read(mp->m_ddev_targp, d, + BTOBB(byte_cnt), 0); + error = XFS_BUF_GETERROR(bp); + if (error) { + xfs_ioerror_alert("xfs_readlink", + ip->i_mount, bp, XFS_BUF_ADDR(bp)); + xfs_buf_relse(bp); + goto error_return; + } + if (pathlen < byte_cnt) + byte_cnt = pathlen; + pathlen -= byte_cnt; + + error = uiomove(XFS_BUF_PTR(bp), byte_cnt, + UIO_READ, uiop); + xfs_buf_relse (bp); + } + + } + + +error_return: + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + return error; +} + + +/* + * xfs_fsync + * + * This is called to sync the inode and its data out to disk. + * We need to hold the I/O lock while flushing the data, and + * the inode lock while flushing the inode. The inode lock CANNOT + * be held while flushing the data, so acquire after we're done + * with that. + */ +STATIC int +xfs_fsync( + bhv_desc_t *bdp, + int flag, + cred_t *credp, + xfs_off_t start, + xfs_off_t stop) +{ + xfs_inode_t *ip; + int error; + int error2; + int syncall; + vnode_t *vp; + xfs_trans_t *tp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + + ASSERT(start >= 0 && stop >= -1); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return XFS_ERROR(EIO); + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + syncall = error = error2 = 0; + + if (stop == -1) { + ASSERT(start >= 0); + if (start == 0) + syncall = 1; + stop = xfs_file_last_byte(ip); + } + + /* + * If we're invalidating, always flush since we want to + * tear things down. Otherwise, don't flush anything if + * we're not dirty. + */ + if (flag & FSYNC_INVAL) { + if (ip->i_df.if_flags & XFS_IFEXTENTS && + ip->i_df.if_bytes > 0) { + VOP_FLUSHINVAL_PAGES(vp, start, -1, FI_REMAPF_LOCKED); + } + ASSERT(syncall == 0 || (VN_CACHED(vp) == 0)); + } else { + /* + * In the non-invalidating case, calls to fsync() do not + * flush all the dirty mmap'd pages. That requires a + * call to msync(). + */ + VOP_FLUSH_PAGES(vp, start, -1, + (flag & FSYNC_WAIT) ? 0 : XFS_B_ASYNC, + FI_NONE, error2); + } + + if (error2) { + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return XFS_ERROR(error2); + } + + /* + * We always need to make sure that the required inode state + * is safe on disk. The vnode might be clean but because + * of committed transactions that haven't hit the disk yet. + * Likewise, there could be unflushed non-transactional + * changes to the inode core that have to go to disk. + * + * The following code depends on one assumption: that + * any transaction that changes an inode logs the core + * because it has to change some field in the inode core + * (typically nextents or nblocks). That assumption + * implies that any transactions against an inode will + * catch any non-transactional updates. If inode-altering + * transactions exist that violate this assumption, the + * code breaks. Right now, it figures that if the involved + * update_* field is clear and the inode is unpinned, the + * inode is clean. Either it's been flushed or it's been + * committed and the commit has hit the disk unpinning the inode. + * (Note that xfs_inode_item_format() called at commit clears + * the update_* fields.) + */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + + /* If we are flushing data then we care about update_size + * being set, otherwise we care about update_core + */ + if ((flag & FSYNC_DATA) ? + (ip->i_update_size == 0) : + (ip->i_update_core == 0)) { + /* + * Timestamps/size haven't changed since last inode + * flush or inode transaction commit. That means + * either nothing got written or a transaction + * committed which caught the updates. If the + * latter happened and the transaction hasn't + * hit the disk yet, the inode will be still + * be pinned. If it is, force the log. + */ + + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED); + + if (xfs_ipincount(ip)) { + xfs_log_force(ip->i_mount, (xfs_lsn_t)0, + XFS_LOG_FORCE | + ((flag & FSYNC_WAIT) + ? XFS_LOG_SYNC : 0)); + } + error = 0; + } else { + /* + * Kick off a transaction to log the inode + * core to get the updates. Make it + * sync if FSYNC_WAIT is passed in (which + * is done by everybody but specfs). The + * sync transaction will also force the log. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); + if ((error = xfs_trans_reserve(tp, 0, + XFS_FSYNC_TS_LOG_RES(ip->i_mount), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Note - it's possible that we might have pushed + * ourselves out of the way during trans_reserve + * which would flush the inode. But there's no + * guarantee that the inode buffer has actually + * gone out yet (it's delwri). Plus the buffer + * could be pinned anyway if it's part of an + * inode in another recent transaction. So we + * play it safe and fire off the transaction anyway. + */ + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + if (flag & FSYNC_WAIT) + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0, NULL); + + xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); + } + return error; +} + + +#if 0 +/* + * This is a utility routine for xfs_inactive. It is called when a + * transaction attempting to free up the disk space for a file encounters + * an error. It cancels the old transaction and starts up a new one + * to be used to free up the inode. It also sets the inode size and extent + * counts to 0 and frees up any memory being used to store inline data, + * extents, or btree roots. + */ +STATIC void +xfs_itruncate_cleanup( + xfs_trans_t **tpp, + xfs_inode_t *ip, + int commit_flags, + int fork) +{ + xfs_mount_t *mp; + /* REFERENCED */ + int error; + + mp = ip->i_mount; + if (*tpp) { + xfs_trans_cancel(*tpp, commit_flags | XFS_TRANS_ABORT); + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + *tpp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + error = xfs_trans_reserve(*tpp, 0, XFS_IFREE_LOG_RES(mp), 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + return; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ijoin(*tpp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(*tpp, ip); + + xfs_idestroy_fork(ip, fork); + + if (fork == XFS_DATA_FORK) { + ip->i_d.di_nblocks = 0; + ip->i_d.di_nextents = 0; + ip->i_d.di_size = 0; + } else { + ip->i_d.di_anextents = 0; + } + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); +} +#endif + +/* + * This is called by xfs_inactive to free any blocks beyond eof, + * when the link count isn't zero. + */ +STATIC int +xfs_inactive_free_eofblocks( + xfs_mount_t *mp, + xfs_inode_t *ip) +{ + xfs_trans_t *tp; + int error; + xfs_fileoff_t end_fsb; + xfs_fileoff_t last_fsb; + xfs_filblks_t map_len; + int nimaps; + xfs_bmbt_irec_t imap; + + /* + * Figure out if there are any blocks beyond the end + * of the file. If not, then there is nothing to do. + */ + end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_d.di_size)); + last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAX_FILE_OFFSET); + map_len = last_fsb - end_fsb; + if (map_len <= 0) + return (0); + + nimaps = 1; + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, + NULL, 0, &imap, &nimaps, NULL); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (!error && (nimaps != 0) && + (imap.br_startblock != HOLESTARTBLOCK)) { + /* + * Attach the dquots to the inode up front. + */ + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return (error); + + /* + * There are blocks after the end of file. + * Free them up now by truncating the file to + * its current size. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + + /* + * Do the xfs_itruncate_start() call before + * reserving any log space because + * itruncate_start will call into the buffer + * cache and we can't + * do that within a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, + ip->i_d.di_size); + + error = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, + XFS_IOLOCK_EXCL | + XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + error = xfs_itruncate_finish(&tp, ip, + ip->i_d.di_size, + XFS_DATA_FORK, + 0); + /* + * If we get an error at this point we + * simply don't bother truncating the file. + */ + if (error) { + xfs_trans_cancel(tp, + (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + } else { + error = xfs_trans_commit(tp, + XFS_TRANS_RELEASE_LOG_RES, + NULL); + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + } + return (error); +} + +/* + * Free a symlink that has blocks associated with it. + */ +STATIC int +xfs_inactive_symlink_rmt( + xfs_inode_t *ip, + xfs_trans_t **tpp) +{ + xfs_buf_t *bp; + int committed; + int done; + int error; + xfs_fsblock_t first_block; + xfs_bmap_free_t free_list; + int i; + xfs_mount_t *mp; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + int nmaps; + xfs_trans_t *ntp; + int size; + xfs_trans_t *tp; + + tp = *tpp; + mp = ip->i_mount; + ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); + /* + * We're freeing a symlink that has some + * blocks allocated to it. Free the + * blocks here. We know that we've got + * either 1 or 2 extents and that we can + * free them all in one bunmapi call. + */ + ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + *tpp = NULL; + return error; + } + /* + * Lock the inode, fix the size, and join it to the transaction. + * Hold it so in the normal path, we still have it locked for + * the second transaction. In the error paths we need it + * held so the cancel won't rele it, see below. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + size = (int)ip->i_d.di_size; + ip->i_d.di_size = 0; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + /* + * Find the block(s) so we can inval and unmap them. + */ + done = 0; + XFS_BMAP_INIT(&free_list, &first_block); + nmaps = sizeof(mval) / sizeof(mval[0]); + if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), + XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, + &free_list))) + goto error0; + /* + * Invalidate the block(s). + */ + for (i = 0; i < nmaps; i++) { + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), + XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); + xfs_trans_binval(tp, bp); + } + /* + * Unmap the dead block(s) to the free_list. + */ + if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, + &first_block, &free_list, &done))) + goto error1; + ASSERT(done); + /* + * Commit the first transaction. This logs the EFI and the inode. + */ + if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed))) + goto error1; + /* + * The transaction must have been committed, since there were + * actually extents freed by xfs_bunmapi. See xfs_bmap_finish. + * The new tp has the extent freeing and EFDs. + */ + ASSERT(committed); + /* + * The first xact was committed, so add the inode to the new one. + * Mark it dirty so it will be logged and moved forward in the log as + * part of every commit. + */ + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + /* + * Get a new, empty transaction to return to our caller. + */ + ntp = xfs_trans_dup(tp); + /* + * Commit the transaction containing extent freeing and EFD's. + * If we get an error on the commit here or on the reserve below, + * we need to unlock the inode since the new transaction doesn't + * have the inode attached. + */ + error = xfs_trans_commit(tp, 0, NULL); + tp = ntp; + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + goto error0; + } + /* + * Remove the memory for extent descriptions (just bookkeeping). + */ + if (ip->i_df.if_bytes) + xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); + ASSERT(ip->i_df.if_bytes == 0); + /* + * Put an itruncate log reservation in the new transaction + * for our caller. + */ + if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + goto error0; + } + /* + * Return with the inode locked but not joined to the transaction. + */ + *tpp = tp; + return 0; + + error1: + xfs_bmap_cancel(&free_list); + error0: + /* + * Have to come here with the inode locked and either + * (held and in the transaction) or (not in the transaction). + * If the inode isn't held then cancel would iput it, but + * that's wrong since this is inactive and the vnode ref + * count is 0 already. + * Cancel won't do anything to the inode if held, but it still + * needs to be locked until the cancel is done, if it was + * joined to the transaction. + */ + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + *tpp = NULL; + return error; + +} + +STATIC int +xfs_inactive_symlink_local( + xfs_inode_t *ip, + xfs_trans_t **tpp) +{ + int error; + + ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); + /* + * We're freeing a symlink which fit into + * the inode. Just free the memory used + * to hold the old symlink. + */ + error = xfs_trans_reserve(*tpp, 0, + XFS_ITRUNCATE_LOG_RES(ip->i_mount), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + + if (error) { + xfs_trans_cancel(*tpp, 0); + *tpp = NULL; + return (error); + } + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + /* + * Zero length symlinks _can_ exist. + */ + if (ip->i_df.if_bytes > 0) { + xfs_idata_realloc(ip, + -(ip->i_df.if_bytes), + XFS_DATA_FORK); + ASSERT(ip->i_df.if_bytes == 0); + } + return (0); +} + +/* + * + */ +STATIC int +xfs_inactive_attrs( + xfs_inode_t *ip, + xfs_trans_t **tpp, + int *commitflags) +{ + xfs_trans_t *tp; + int error; + xfs_mount_t *mp; + + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + tp = *tpp; + mp = ip->i_mount; + ASSERT(ip->i_d.di_forkoff != 0); + xfs_trans_commit(tp, *commitflags, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + *commitflags = 0; + + error = xfs_attr_inactive(ip); + if (error) { + *tpp = NULL; + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); /* goto out*/ + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + error = xfs_trans_reserve(tp, 0, + XFS_IFREE_LOG_RES(mp), + 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + *tpp = NULL; + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (error); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_idestroy_fork(ip, XFS_ATTR_FORK); + + ASSERT(ip->i_d.di_anextents == 0); + + *tpp = tp; + return (0); +} + +STATIC int +xfs_release( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + vnode_t *vp; + xfs_mount_t *mp; + int error; + + vp = BHV_TO_VNODE(bdp); + ip = XFS_BHVTOI(bdp); + + if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) { + return 0; + } + + /* If this is a read-only mount, don't do this (would generate I/O) */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + return 0; + + /* If we are in the NFS reference cache then don't do this now */ + if (ip->i_refcache) + return 0; + + mp = ip->i_mount; + + if (ip->i_d.di_nlink != 0) { + if ((((ip->i_d.di_mode & IFMT) == IFREG) && + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && + (ip->i_df.if_flags & XFS_IFEXTENTS)) && + (!(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC))) { + if ((error = xfs_inactive_free_eofblocks(mp, ip))) + return (error); + /* Update linux inode block count after free above */ + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + ip->i_d.di_nblocks + ip->i_delayed_blks); + } + } + + return 0; +} + +/* + * xfs_inactive + * + * This is called when the vnode reference count for the vnode + * goes to zero. If the file has been unlinked, then it must + * now be truncated. Also, we clear all of the read-ahead state + * kept for the inode here since the file is now closed. + */ +STATIC int +xfs_inactive( + bhv_desc_t *bdp, + cred_t *credp) +{ + xfs_inode_t *ip; + vnode_t *vp; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + int commit_flags; + int truncate; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + + /* + * If the inode is already free, then there can be nothing + * to clean up here. + */ + if (ip->i_d.di_mode == 0) { + ASSERT(ip->i_df.if_real_bytes == 0); + ASSERT(ip->i_df.if_broot_bytes == 0); + return VN_INACTIVE_CACHE; + } + + /* + * Only do a truncate if it's a regular file with + * some actual space in it. It's OK to look at the + * inode's fields without the lock because we're the + * only one with a reference to the inode. + */ + truncate = ((ip->i_d.di_nlink == 0) && + ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) && + ((ip->i_d.di_mode & IFMT) == IFREG)); + + mp = ip->i_mount; + + if (ip->i_d.di_nlink == 0 && + DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) { + (void) XFS_SEND_DESTROY(mp, bdp, DM_RIGHT_NULL); + } + + error = 0; + + /* If this is a read-only mount, don't do this (would generate I/O) */ + if (vp->v_vfsp->vfs_flag & VFS_RDONLY) + goto out; + + if (ip->i_d.di_nlink != 0) { + if ((((ip->i_d.di_mode & IFMT) == IFREG) && + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && + (ip->i_df.if_flags & XFS_IFEXTENTS)) && + (!(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) || + (ip->i_delayed_blks != 0))) { + if ((error = xfs_inactive_free_eofblocks(mp, ip))) + return (VN_INACTIVE_CACHE); + /* Update linux inode block count after free above */ + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + ip->i_d.di_nblocks + ip->i_delayed_blks); + } + goto out; + } + + ASSERT(ip->i_d.di_nlink == 0); + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return (VN_INACTIVE_CACHE); + + tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); + if (truncate) { + /* + * Do the xfs_itruncate_start() call before + * reserving any log space because itruncate_start + * will call into the buffer cache and we can't + * do that within a transaction. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); + + error = xfs_trans_reserve(tp, 0, + XFS_ITRUNCATE_LOG_RES(mp), + 0, XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) { + /* Don't call itruncate_cleanup */ + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return (VN_INACTIVE_CACHE); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * normally, we have to run xfs_itruncate_finish sync. + * But if filesystem is wsync and we're in the inactive + * path, then we know that nlink == 0, and that the + * xaction that made nlink == 0 is permanently committed + * since xfs_remove runs as a synchronous transaction. + */ + error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, + (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + + if (error) { + xfs_trans_cancel(tp, commit_flags | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + return (VN_INACTIVE_CACHE); + } + } else if ((ip->i_d.di_mode & IFMT) == IFLNK) { + + /* + * If we get an error while cleaning up a + * symlink we bail out. + */ + error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ? + xfs_inactive_symlink_rmt(ip, &tp) : + xfs_inactive_symlink_local(ip, &tp); + + if (error) { + ASSERT(tp == NULL); + return (VN_INACTIVE_CACHE); + } + + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + + } else { + error = xfs_trans_reserve(tp, 0, + XFS_IFREE_LOG_RES(mp), + 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + ASSERT(XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + return (VN_INACTIVE_CACHE); + } + + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + commit_flags = 0; + } + + /* + * If there are attributes associated with the file + * then blow them away now. The code calls a routine + * that recursively deconstructs the attribute fork. + * We need to just commit the current transaction + * because we can't use it for xfs_attr_inactive(). + */ + if (ip->i_d.di_anextents > 0) { + error = xfs_inactive_attrs(ip, &tp, &commit_flags); + /* + * If we got an error, the transaction is already + * cancelled, and the inode is unlocked. Just get out. + */ + if (error) + return (VN_INACTIVE_CACHE); + } else if (ip->i_afp) { + xfs_idestroy_fork(ip, XFS_ATTR_FORK); + } + + /* + * Free the inode. + */ + error = xfs_ifree(tp, ip); + if (error) { + /* + * If we fail to free the inode, shut down. The cancel + * might do that, we need to make sure. Otherwise the + * inode might be lost for a long time or forever. + */ + if (!XFS_FORCED_SHUTDOWN(mp)) { + cmn_err(CE_NOTE, + "xfs_inactive: xfs_ifree() returned an error = %d on %s", + error, mp->m_fsname); + xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); + } + xfs_trans_cancel(tp, commit_flags | XFS_TRANS_ABORT); + } else { + /* + * Credit the quota account(s). The inode is gone. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); + + /* + * Just ignore errors at this point. There is + * nothing we can do except to try to keep going. + */ + (void) xfs_trans_commit(tp, commit_flags, NULL); + } + /* + * Release the dquots held by inode, if any. + */ + XFS_QM_DQDETACH(mp, ip); + + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); + + out: + return VN_INACTIVE_CACHE; +} + + +/* + * xfs_lookup + */ +STATIC int +xfs_lookup( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vnode_t **vpp, + int flags, + vnode_t *rdir, + cred_t *credp) +{ + xfs_inode_t *dp, *ip; + xfs_ino_t e_inum; + int error; + uint lock_mode; + vnode_t *dir_vp; + + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) + return XFS_ERROR(EIO); + + lock_mode = xfs_ilock_map_shared(dp); + error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip); + if (!error) { + *vpp = XFS_ITOV(ip); + ITRACE(ip); + } + xfs_iunlock_map_shared(dp, lock_mode); + return error; +} + + +#define XFS_CREATE_NEW_MAXTRIES 10000 + +/* + * xfs_create (create a new file). + */ +STATIC int +xfs_create( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + vnode_t **vpp, + cred_t *credp) +{ + char *name = VNAME(dentry); + vnode_t *dir_vp; + xfs_inode_t *dp, *ip; + vnode_t *vp=NULL; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_dev_t rdev; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + boolean_t dp_joined_to_trans; + int dm_event_sent = 0; + uint cancel_flags; + int committed; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + int dm_di_mode; + int namelen; + + ASSERT(!*vpp); + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, + dir_bdp, DM_RIGHT_NULL, NULL, + DM_RIGHT_NULL, name, NULL, + dm_di_mode, 0, 0); + + if (error) + return error; + dm_event_sent = 1; + } + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* Return through std_return after this point. */ + + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + ip = NULL; + dp_joined_to_trans = B_FALSE; + + tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_CREATE_SPACE_RES(mp, namelen); + /* + * Initially assume that the file does not exist and + * reserve the resources for that case. If that is not + * the case we'll drop the one we have and get a more + * appropriate transaction later. + */ + error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + XFS_BMAP_INIT(&free_list, &first_block); + + ASSERT(ip == NULL); + + /* + * Reserve disk quota and the inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) + goto error_return; + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + error = xfs_dir_ialloc(&tp, dp, + MAKEIMODE(vap->va_type,vap->va_mode), 1, + rdev, credp, prid, resblks > 0, + &ip, &committed); + if (error) { + if (error == ENOSPC) + goto error_return; + goto abort_return; + } + ITRACE(ip); + + /* + * At this point, we've gotten a newly allocated inode. + * It is locked (and joined to the transaction). + */ + + ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + + /* + * Now we join the directory inode to the transaction. + * We do not do it earlier because xfs_dir_ialloc + * might commit the previous transaction (and release + * all the locks). + */ + + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino, + &first_block, &free_list, + resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); + if (error) { + ASSERT(error != ENOSPC); + goto abort_return; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + /* + * If this is a synchronous mount, make sure that the + * create transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + dp->i_gen++; + + /* + * Attach the dquot(s) to the inodes and modify them incore. + * These ids of the inode couldn't have changed since the new + * inode has been locked ever since it was created. + */ + XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to return the + * vnode to the caller, we bump the vnode ref count now. + */ + IHOLD(ip); + vp = XFS_ITOV(ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + goto abort_rele; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(ip); + tp = NULL; + goto error_return; + } + + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + /* + * Propogate the fact that the vnode changed after the + * xfs_inode locks have been released. + */ + VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); + + *vpp = vp; + + /* Fallthrough to std_return with error = 0 */ + +std_return: + if ( (*vpp || (error != 0 && dm_event_sent != 0)) && + DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTCREATE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, + dir_bdp, DM_RIGHT_NULL, + *vpp ? vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops):NULL, + DM_RIGHT_NULL, name, NULL, + dm_di_mode, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + + if (tp != NULL) + xfs_trans_cancel(tp, cancel_flags); + + if (!dp_joined_to_trans && (dp != NULL)) + xfs_iunlock(dp, XFS_ILOCK_EXCL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + goto std_return; + + abort_rele: + /* + * Wait until after the current transaction is aborted to + * release the inode. This prevents recursive transactions + * and deadlocks from xfs_inactive. + */ + cancel_flags |= XFS_TRANS_ABORT; + xfs_trans_cancel(tp, cancel_flags); + IRELE(ip); + + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + goto std_return; +} + +#ifdef DEBUG +/* + * Some counters to see if (and how often) we are hitting some deadlock + * prevention code paths. + */ + +int xfs_rm_locks; +int xfs_rm_lock_delays; +int xfs_rm_attempts; +#endif + +/* + * The following routine will lock the inodes associated with the + * directory and the named entry in the directory. The locks are + * acquired in increasing inode number. + * + * If the entry is "..", then only the directory is locked. The + * vnode ref count will still include that from the .. entry in + * this case. + * + * There is a deadlock we need to worry about. If the locked directory is + * in the AIL, it might be blocking up the log. The next inode we lock + * could be already locked by another thread waiting for log space (e.g + * a permanent log reservation with a long running transaction (see + * xfs_itruncate_finish)). To solve this, we must check if the directory + * is in the ail and use lock_nowait. If we can't lock, we need to + * drop the inode lock on the directory and try again. xfs_iunlock will + * potentially push the tail if we were holding up the log. + */ +STATIC int +xfs_lock_dir_and_entry( + xfs_inode_t *dp, + vname_t *dentry, + xfs_inode_t *ip) /* inode of entry 'name' */ +{ + int attempts; + xfs_ino_t e_inum; + xfs_inode_t *ips[2]; + xfs_log_item_t *lp; + +#ifdef DEBUG + xfs_rm_locks++; +#endif + attempts = 0; + +again: + xfs_ilock(dp, XFS_ILOCK_EXCL); + + e_inum = ip->i_ino; + + ITRACE(ip); + + /* + * We want to lock in increasing inum. Since we've already + * acquired the lock on the directory, we may need to release + * if if the inum of the entry turns out to be less. + */ + if (e_inum > dp->i_ino) { + /* + * We are already in the right order, so just + * lock on the inode of the entry. + * We need to use nowait if dp is in the AIL. + */ + + lp = (xfs_log_item_t *)dp->i_itemp; + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { + attempts++; +#ifdef DEBUG + xfs_rm_attempts++; +#endif + + /* + * Unlock dp and try again. + * xfs_iunlock will try to push the tail + * if the inode is in the AIL. + */ + + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ +#ifdef DEBUG + xfs_rm_lock_delays++; +#endif + } + goto again; + } + } else { + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + } else if (e_inum < dp->i_ino) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + + ips[0] = ip; + ips[1] = dp; + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + } + /* else e_inum == dp->i_ino */ + /* This can happen if we're asked to lock /x/.. + * the entry is "..", which is also the parent directory. + */ + + return 0; +} + +#ifdef DEBUG +int xfs_locked_n; +int xfs_small_retries; +int xfs_middle_retries; +int xfs_lots_retries; +int xfs_lock_delays; +#endif + +/* + * The following routine will lock n inodes in exclusive mode. + * We assume the caller calls us with the inodes in i_ino order. + * + * We need to detect deadlock where an inode that we lock + * is in the AIL and we start waiting for another inode that is locked + * by a thread in a long running transaction (such as truncate). This can + * result in deadlock since the long running trans might need to wait + * for the inode we just locked in order to push the tail and free space + * in the log. + */ +void +xfs_lock_inodes( + xfs_inode_t **ips, + int inodes, + int first_locked, + uint lock_mode) +{ + int attempts = 0, i, j, try_lock; + xfs_log_item_t *lp; + + ASSERT(ips && (inodes >= 2)); /* we need at least two */ + + if (first_locked) { + try_lock = 1; + i = 1; + } else { + try_lock = 0; + i = 0; + } + +again: + for (; i < inodes; i++) { + ASSERT(ips[i]); + + if (i && (ips[i] == ips[i-1])) /* Already locked */ + continue; + + /* + * If try_lock is not set yet, make sure all locked inodes + * are not in the AIL. + * If any are, set try_lock to be used later. + */ + + if (!try_lock) { + for (j = (i - 1); j >= 0 && !try_lock; j--) { + lp = (xfs_log_item_t *)ips[j]->i_itemp; + if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { + try_lock++; + } + } + } + + /* + * If any of the previous locks we have locked is in the AIL, + * we must TRY to get the second and subsequent locks. If + * we can't get any, we must release all we have + * and try again. + */ + + if (try_lock) { + /* try_lock must be 0 if i is 0. */ + /* + * try_lock means we have an inode locked + * that is in the AIL. + */ + ASSERT(i != 0); + if (!xfs_ilock_nowait(ips[i], lock_mode)) { + attempts++; + + /* + * Unlock all previous guys and try again. + * xfs_iunlock will try to push the tail + * if the inode is in the AIL. + */ + + for(j = i - 1; j >= 0; j--) { + + /* + * Check to see if we've already + * unlocked this one. + * Not the first one going back, + * and the inode ptr is the same. + */ + if ((j != (i - 1)) && ips[j] == + ips[j+1]) + continue; + + xfs_iunlock(ips[j], lock_mode); + } + + if ((attempts % 5) == 0) { + delay(1); /* Don't just spin the CPU */ +#ifdef DEBUG + xfs_lock_delays++; +#endif + } + i = 0; + try_lock = 0; + goto again; + } + } else { + xfs_ilock(ips[i], lock_mode); + } + } + +#ifdef DEBUG + if (attempts) { + if (attempts < 5) xfs_small_retries++; + else if (attempts < 100) xfs_middle_retries++; + else xfs_lots_retries++; + } else { + xfs_locked_n++; + } +#endif +} + +#ifdef DEBUG +#define REMOVE_DEBUG_TRACE(x) {remove_which_error_return = (x);} +int remove_which_error_return = 0; +#else /* ! DEBUG */ +#define REMOVE_DEBUG_TRACE(x) +#endif /* ! DEBUG */ + + +/* + * xfs_remove + * + */ +STATIC int +xfs_remove( + bhv_desc_t *dir_bdp, + vname_t *dentry, + cred_t *credp) +{ + vnode_t *dir_vp; + char *name = VNAME(dentry); + xfs_inode_t *dp, *ip; + xfs_trans_t *tp = NULL; + xfs_mount_t *mp; + int error = 0; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + int dm_di_mode = 0; + int link_zero; + uint resblks; + int namelen; + + dir_vp = BHV_TO_VNODE(dir_bdp); + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_bdp, + DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, + name, NULL, 0, 0, 0); + if (error) + return error; + } + + /* From this point on, return through std_return */ + ip = NULL; + + /* + * We need to get a reference to ip before we get our log + * reservation. The reason for this is that we cannot call + * xfs_iget for an inode for which we do not have a reference + * once we've acquired a log reservation. This is because the + * inode we are trying to get might be in xfs_inactive going + * for a log reservation. Since we'll have to wait for the + * inactive code to complete before returning from xfs_iget, + * we need to make sure that we don't have log space reserved + * when we call xfs_iget. Instead we get an unlocked referece + * to the inode before getting our log reservation. + */ + error = xfs_get_dir_entry(dentry, &ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + + dm_di_mode = ip->i_d.di_mode; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + + ITRACE(ip); + + error = XFS_QM_DQATTACH(mp, dp, 0); + if (!error && dp != ip) + error = XFS_QM_DQATTACH(mp, ip, 0); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + IRELE(ip); + goto std_return; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * We try to get the real space reservation first, + * allowing for directory btree deletion(s) implying + * possible bmap insert(s). If we can't get the space + * reservation then we use 0 instead, and avoid the bmap + * btree insert(s) in the directory code by, if the bmap + * insert tries to happen, instead trimming the LAST + * block from the directory. + */ + resblks = XFS_REMOVE_SPACE_RES(mp); + error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); + } + if (error) { + ASSERT(error != ENOSPC); + REMOVE_DEBUG_TRACE(__LINE__); + xfs_trans_cancel(tp, 0); + IRELE(ip); + return error; + } + + error = xfs_lock_dir_and_entry(dp, dentry, ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + xfs_trans_cancel(tp, cancel_flags); + IRELE(ip); + goto std_return; + } + + /* + * At this point, we've gotten both the directory and the entry + * inodes locked. + */ + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + if (dp != ip) { + /* + * Increment vnode ref count only in this case since + * there's an extra vnode reference in the case where + * dp == ip. + */ + IHOLD(dp); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + } + + if ((error = _MAC_XFS_IACCESS(ip, MACWRITE, credp))) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error_return; + } + + /* + * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. + */ + XFS_BMAP_INIT(&free_list, &first_block); + error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino, + &first_block, &free_list, 0); + if (error) { + ASSERT(error != ENOENT); + REMOVE_DEBUG_TRACE(__LINE__); + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + dp->i_gen++; + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + error = xfs_droplink(tp, ip); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error1; + } + + /* Determine if this is the last link while + * we are in the transaction. + */ + link_zero = (ip)->i_d.di_nlink==0; + + /* + * Take an extra ref on the inode so that it doesn't + * go to xfs_inactive() from within the commit. + */ + IHOLD(ip); + + /* + * If this is a synchronous mount, make sure that the + * remove transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto error_rele; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(ip); + goto std_return; + } + + /* + * Before we drop our extra reference to the inode, purge it + * from the refcache if it is there. By waiting until afterwards + * to do the IRELE, we ensure that we won't go inactive in the + * xfs_refcache_purge_ip routine (although that would be OK). + */ + xfs_refcache_purge_ip(ip); + + vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + + /* + * Let interposed file systems know about removed links. + */ + VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero); + + IRELE(ip); + +/* Fall through to std_return with error = 0 */ + std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, + DM_EVENT_POSTREMOVE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, dm_di_mode, error, 0); + } + return error; + + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + + error_return: + xfs_trans_cancel(tp, cancel_flags); + goto std_return; + + error_rele: + /* + * In this case make sure to not release the inode until after + * the current transaction is aborted. Releasing it beforehand + * can cause us to go to xfs_inactive and start a recursive + * transaction which can easily deadlock with the current one. + */ + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + xfs_trans_cancel(tp, cancel_flags); + + /* + * Before we drop our extra reference to the inode, purge it + * from the refcache if it is there. By waiting until afterwards + * to do the IRELE, we ensure that we won't go inactive in the + * xfs_refcache_purge_ip routine (although that would be OK). + */ + xfs_refcache_purge_ip(ip); + + IRELE(ip); + + goto std_return; +} + + +/* + * xfs_link + * + */ +STATIC int +xfs_link( + bhv_desc_t *target_dir_bdp, + vnode_t *src_vp, + vname_t *dentry, + cred_t *credp) +{ + xfs_inode_t *tdp, *sip; + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_inode_t *ips[2]; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + vnode_t *target_dir_vp; + bhv_desc_t *src_bdp; + int resblks; + char *target_name = VNAME(dentry); + int target_namelen; + + target_dir_vp = BHV_TO_VNODE(target_dir_bdp); + vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address); + vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); + + target_namelen = VNAMELEN(dentry); + if (src_vp->v_type == VDIR) + return XFS_ERROR(EPERM); + + /* + * For now, manually find the XFS behavior descriptor for + * the source vnode. If it doesn't exist then something + * is wrong and we should just return an error. + * Eventually we need to figure out how link is going to + * work in the face of stacked vnodes. + */ + src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); + if (src_bdp == NULL) { + return XFS_ERROR(EXDEV); + } + sip = XFS_BHVTOI(src_bdp); + tdp = XFS_BHVTOI(target_dir_bdp); + mp = tdp->i_mount; + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, + target_dir_bdp, DM_RIGHT_NULL, + src_bdp, DM_RIGHT_NULL, + target_name, NULL, 0, 0, 0); + if (error) + return error; + } + + /* Return through std_return after this point. */ + + error = XFS_QM_DQATTACH(mp, sip, 0); + if (!error && sip != tdp) + error = XFS_QM_DQATTACH(mp, tdp, 0); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_LINK_SPACE_RES(mp, target_namelen); + error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + goto error_return; + } + + if (sip->i_ino < tdp->i_ino) { + ips[0] = sip; + ips[1] = tdp; + } else { + ips[0] = tdp; + ips[1] = sip; + } + + xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + + /* + * Increment vnode ref counts since xfs_trans_commit & + * xfs_trans_cancel will both unlock the inodes and + * decrement the associated ref counts. + */ + VN_HOLD(src_vp); + VN_HOLD(target_dir_vp); + xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); + + /* + * If the source has too many links, we can't make any more to it. + */ + if (sip->i_d.di_nlink >= XFS_MAXLINK) { + error = XFS_ERROR(EMLINK); + goto error_return; + } + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name, + target_namelen))) + goto error_return; + + XFS_BMAP_INIT(&free_list, &first_block); + + error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen, + sip->i_ino, &first_block, &free_list, + resblks); + if (error) + goto abort_return; + xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + tdp->i_gen++; + xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); + + error = xfs_bumplink(tp, sip); + if (error) { + goto abort_return; + } + + /* + * If this is a synchronous mount, make sure that the + * link transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + goto abort_return; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + goto std_return; + } + + /* Fall through to std_return with error = 0. */ +std_return: + if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip, + DM_EVENT_POSTLINK)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, + target_dir_bdp, DM_RIGHT_NULL, + src_bdp, DM_RIGHT_NULL, + target_name, NULL, 0, error, 0); + } + return error; + + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + /* FALLTHROUGH */ + error_return: + xfs_trans_cancel(tp, cancel_flags); + + goto std_return; +} +/* + * xfs_mkdir + * + */ +STATIC int +xfs_mkdir( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + vnode_t **vpp, + cred_t *credp) +{ + char *dir_name = VNAME(dentry); + xfs_inode_t *dp; + xfs_inode_t *cdp; /* inode of created dir */ + vnode_t *cvp; /* vnode of created dir */ + xfs_trans_t *tp; + xfs_dev_t rdev; + xfs_mount_t *mp; + int cancel_flags; + int error; + int committed; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + vnode_t *dir_vp; + boolean_t dp_joined_to_trans; + boolean_t created = B_FALSE; + int dm_event_sent = 0; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + int dm_di_mode; + int dir_namelen; + + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + dir_namelen = VNAMELEN(dentry); + + tp = NULL; + dp_joined_to_trans = B_FALSE; + dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, + dir_bdp, DM_RIGHT_NULL, NULL, + DM_RIGHT_NULL, dir_name, NULL, + dm_di_mode, 0, 0); + if (error) + return error; + dm_event_sent = 1; + } + + /* Return through std_return after this point. */ + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + mp = dp->i_mount; + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen); + error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_MKDIR_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * Check for directory link count overflow. + */ + if (dp->i_d.di_nlink >= XFS_MAXLINK) { + error = XFS_ERROR(EMLINK); + goto error_return; + } + + /* + * Reserve disk quota and the inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen))) + goto error_return; + /* + * create the directory inode. + */ + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + error = xfs_dir_ialloc(&tp, dp, + MAKEIMODE(vap->va_type,vap->va_mode), 2, + rdev, credp, prid, resblks > 0, + &cdp, NULL); + if (error) { + if (error == ENOSPC) + goto error_return; + goto abort_return; + } + ITRACE(cdp); + + /* + * Now we add the directory inode to the transaction. + * We waited until now since xfs_dir_ialloc might start + * a new transaction. Had we joined the transaction + * earlier, the locks might have gotten released. + */ + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + XFS_BMAP_INIT(&free_list, &first_block); + + error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen, + cdp->i_ino, &first_block, &free_list, + resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0); + if (error) { + ASSERT(error != ENOSPC); + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Bump the in memory version number of the parent directory + * so that other processes accessing it will recognize that + * the directory has changed. + */ + dp->i_gen++; + + error = XFS_DIR_INIT(mp, tp, cdp, dp); + if (error) { + goto error2; + } + + cdp->i_gen = 1; + error = xfs_bumplink(tp, dp); + if (error) { + goto error2; + } + + cvp = XFS_ITOV(cdp); + + created = B_TRUE; + + *vpp = cvp; + IHOLD(cdp); + + /* + * Attach the dquots to the new inode and modify the icount incore. + */ + XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp); + + /* + * If this is a synchronous mount, make sure that the + * mkdir transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + IRELE(cdp); + goto error2; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + if (error) { + IRELE(cdp); + } + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit. */ + +std_return: + if ( (created || (error != 0 && dm_event_sent != 0)) && + DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTCREATE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, + dir_bdp, DM_RIGHT_NULL, + created ? XFS_ITOBHV(cdp):NULL, + DM_RIGHT_NULL, + dir_name, NULL, + dm_di_mode, error, 0); + } + return error; + + error2: + error1: + xfs_bmap_cancel(&free_list); + abort_return: + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (!dp_joined_to_trans && (dp != NULL)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + } + + goto std_return; +} + + +/* + * xfs_rmdir + * + */ +STATIC int +xfs_rmdir( + bhv_desc_t *dir_bdp, + vname_t *dentry, + cred_t *credp) +{ + char *name = VNAME(dentry); + xfs_inode_t *dp; + xfs_inode_t *cdp; /* child directory */ + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + int cancel_flags; + int committed; + vnode_t *dir_vp; + int dm_di_mode = 0; + int last_cdp_link; + int namelen; + uint resblks; + + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + mp = dp->i_mount; + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount)) + return XFS_ERROR(EIO); + namelen = VNAMELEN(dentry); + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, 0, 0, 0); + if (error) + return XFS_ERROR(error); + } + + /* Return through std_return after this point. */ + + cdp = NULL; + + /* + * We need to get a reference to cdp before we get our log + * reservation. The reason for this is that we cannot call + * xfs_iget for an inode for which we do not have a reference + * once we've acquired a log reservation. This is because the + * inode we are trying to get might be in xfs_inactive going + * for a log reservation. Since we'll have to wait for the + * inactive code to complete before returning from xfs_iget, + * we need to make sure that we don't have log space reserved + * when we call xfs_iget. Instead we get an unlocked referece + * to the inode before getting our log reservation. + */ + error = xfs_get_dir_entry(dentry, &cdp); + if (error) { + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + mp = dp->i_mount; + dm_di_mode = cdp->i_d.di_mode; + + /* + * Get the dquots for the inodes. + */ + error = XFS_QM_DQATTACH(mp, dp, 0); + if (!error && dp != cdp) + error = XFS_QM_DQATTACH(mp, cdp, 0); + if (error) { + IRELE(cdp); + REMOVE_DEBUG_TRACE(__LINE__); + goto std_return; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * We try to get the real space reservation first, + * allowing for directory btree deletion(s) implying + * possible bmap insert(s). If we can't get the space + * reservation then we use 0 instead, and avoid the bmap + * btree insert(s) in the directory code by, if the bmap + * insert tries to happen, instead trimming the LAST + * block from the directory. + */ + resblks = XFS_REMOVE_SPACE_RES(mp); + error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); + if (error == ENOSPC) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT); + } + if (error) { + ASSERT(error != ENOSPC); + cancel_flags = 0; + IRELE(cdp); + goto error_return; + } + XFS_BMAP_INIT(&free_list, &first_block); + + /* + * Now lock the child directory inode and the parent directory + * inode in the proper order. This will take care of validating + * that the directory entry for the child directory inode has + * not changed while we were obtaining a log reservation. + */ + error = xfs_lock_dir_and_entry(dp, dentry, cdp); + if (error) { + xfs_trans_cancel(tp, cancel_flags); + IRELE(cdp); + goto std_return; + } + + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + if (dp != cdp) { + /* + * Only increment the parent directory vnode count if + * we didn't bump it in looking up cdp. The only time + * we don't bump it is when we're looking up ".". + */ + VN_HOLD(dir_vp); + } + + ITRACE(cdp); + xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); + + if ((error = _MAC_XFS_IACCESS(cdp, MACWRITE, credp))) { + goto error_return; + } + + ASSERT(cdp->i_d.di_nlink >= 2); + if (cdp->i_d.di_nlink != 2) { + error = XFS_ERROR(ENOTEMPTY); + goto error_return; + } + if (!XFS_DIR_ISEMPTY(mp, cdp)) { + error = XFS_ERROR(ENOTEMPTY); + goto error_return; + } + + error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino, + &first_block, &free_list, resblks); + if (error) { + goto error1; + } + + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + /* + * Bump the in memory generation count on the parent + * directory so that other can know that it has changed. + */ + dp->i_gen++; + + /* + * Drop the link from cdp's "..". + */ + error = xfs_droplink(tp, dp); + if (error) { + goto error1; + } + + /* + * Drop the link from dp to cdp. + */ + error = xfs_droplink(tp, cdp); + if (error) { + goto error1; + } + + /* + * Drop the "." link from cdp to self. + */ + error = xfs_droplink(tp, cdp); + if (error) { + goto error1; + } + + /* Determine these before committing transaction */ + last_cdp_link = (cdp)->i_d.di_nlink==0; + + /* + * Take an extra ref on the child vnode so that it + * does not go to xfs_inactive() from within the commit. + */ + IHOLD(cdp); + + /* + * If this is a synchronous mount, make sure that the + * rmdir transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + error = xfs_bmap_finish (&tp, &free_list, first_block, &committed); + if (error) { + xfs_bmap_cancel(&free_list); + xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT)); + IRELE(cdp); + goto std_return; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + if (error) { + IRELE(cdp); + goto std_return; + } + + + /* + * Let interposed file systems know about removed links. + */ + VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link); + + IRELE(cdp); + + /* Fall through to std_return with error = 0 or the errno + * from xfs_trans_commit. */ +std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, + dir_bdp, DM_RIGHT_NULL, + NULL, DM_RIGHT_NULL, + name, NULL, dm_di_mode, + error, 0); + } + return error; + + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + goto std_return; +} + + +/* + * xfs_readdir + * + * Read dp's entries starting at uiop->uio_offset and translate them into + * bufsize bytes worth of struct dirents starting at bufbase. + */ +STATIC int +xfs_readdir( + bhv_desc_t *dir_bdp, + uio_t *uiop, + cred_t *credp, + int *eofp) +{ + xfs_inode_t *dp; + xfs_trans_t *tp = NULL; + int error = 0; + uint lock_mode; + xfs_off_t start_offset; + + vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__, + (inst_t *)__return_address); + dp = XFS_BHVTOI(dir_bdp); + + if (XFS_FORCED_SHUTDOWN(dp->i_mount)) { + return XFS_ERROR(EIO); + } + + lock_mode = xfs_ilock_map_shared(dp); + start_offset = uiop->uio_offset; + error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); + if (start_offset != uiop->uio_offset) { + xfs_ichgtime(dp, XFS_ICHGTIME_ACC); + } + xfs_iunlock_map_shared(dp, lock_mode); + return error; +} + + +/* + * xfs_symlink + * + */ +STATIC int +xfs_symlink( + bhv_desc_t *dir_bdp, + vname_t *dentry, + vattr_t *vap, + char *target_path, + vnode_t **vpp, + cred_t *credp) +{ + xfs_trans_t *tp; + xfs_mount_t *mp; + xfs_inode_t *dp; + xfs_inode_t *ip; + int error; + int pathlen; + xfs_dev_t rdev; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + boolean_t dp_joined_to_trans; + vnode_t *dir_vp; + uint cancel_flags; + int committed; + xfs_fileoff_t first_fsb; + xfs_filblks_t fs_blocks; + int nmaps; + xfs_bmbt_irec_t mval[SYMLINK_MAPS]; + xfs_daddr_t d; + char *cur_chunk; + int byte_cnt; + int n; + xfs_buf_t *bp; + xfs_prid_t prid; + struct xfs_dquot *udqp, *gdqp; + uint resblks; + char *link_name = VNAME(dentry); + int link_namelen; + + *vpp = NULL; + dir_vp = BHV_TO_VNODE(dir_bdp); + dp = XFS_BHVTOI(dir_bdp); + dp_joined_to_trans = B_FALSE; + error = 0; + ip = NULL; + tp = NULL; + + vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); + + mp = dp->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + link_namelen = VNAMELEN(dentry); + + /* + * Check component lengths of the target path name. + */ + pathlen = strlen(target_path); + if (pathlen >= MAXPATHLEN) /* total string too long */ + return XFS_ERROR(ENAMETOOLONG); + if (pathlen >= MAXNAMELEN) { /* is any component too long? */ + int len, total; + char *path; + + for(total = 0, path = target_path; total < pathlen;) { + /* + * Skip any slashes. + */ + while(*path == '/') { + total++; + path++; + } + + /* + * Count up to the next slash or end of path. + * Error out if the component is bigger than MAXNAMELEN. + */ + for(len = 0; *path != '/' && total < pathlen;total++, path++) { + if (++len >= MAXNAMELEN) { + error = ENAMETOOLONG; + return error; + } + } + } + } + + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) { + error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_bdp, + DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, + link_name, target_path, 0, 0, 0); + if (error) + return error; + } + + /* Return through std_return after this point. */ + + udqp = gdqp = NULL; + if (vap->va_mask & XFS_AT_PROJID) + prid = (xfs_prid_t)vap->va_projid; + else + prid = (xfs_prid_t)dfltprid; + + /* + * Make sure that we have allocated dquot(s) on disk. + */ + error = XFS_QM_DQVOPALLOC(mp, dp, current->fsuid, current->fsgid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); + if (error) + goto std_return; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); + cancel_flags = XFS_TRANS_RELEASE_LOG_RES; + /* + * The symlink will fit into the inode data fork? + * There can't be any attributes so we get the whole variable part. + */ + if (pathlen <= XFS_LITINO(mp)) + fs_blocks = 0; + else + fs_blocks = XFS_B_TO_FSB(mp, pathlen); + resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks); + error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); + if (error == ENOSPC && fs_blocks == 0) { + resblks = 0; + error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); + } + if (error) { + cancel_flags = 0; + dp = NULL; + goto error_return; + } + + xfs_ilock(dp, XFS_ILOCK_EXCL); + + /* + * Reserve disk quota : blocks and inode. + */ + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); + if (error) + goto error_return; + + /* + * Check for ability to enter directory entry, if no space reserved. + */ + if (resblks == 0 && + (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen))) + goto error_return; + /* + * Initialize the bmap freelist prior to calling either + * bmapi or the directory create code. + */ + XFS_BMAP_INIT(&free_list, &first_block); + + /* + * Allocate an inode for the symlink. + */ + rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; + + error = xfs_dir_ialloc(&tp, dp, IFLNK | (vap->va_mode&~IFMT), + 1, rdev, credp, prid, resblks > 0, &ip, NULL); + if (error) { + if (error == ENOSPC) + goto error_return; + goto error1; + } + ITRACE(ip); + + VN_HOLD(dir_vp); + xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); + dp_joined_to_trans = B_TRUE; + + /* + * Also attach the dquot(s) to it, if applicable. + */ + XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); + + if (resblks) + resblks -= XFS_IALLOC_SPACE_RES(mp); + /* + * If the symlink will fit into the inode, write it inline. + */ + if (pathlen <= XFS_IFORK_DSIZE(ip)) { + xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK); + memcpy(ip->i_df.if_u1.if_data, target_path, pathlen); + ip->i_d.di_size = pathlen; + + /* + * The inode was initially created in extent format. + */ + ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT); + ip->i_df.if_flags |= XFS_IFINLINE; + + ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; + xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); + + } else { + first_fsb = 0; + nmaps = SYMLINK_MAPS; + + error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, + XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, + &first_block, resblks, mval, &nmaps, + &free_list); + if (error) { + goto error1; + } + + if (resblks) + resblks -= fs_blocks; + ip->i_d.di_size = pathlen; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + cur_chunk = target_path; + for (n = 0; n < nmaps; n++) { + d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); + byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, + BTOBB(byte_cnt), 0); + ASSERT(bp && !XFS_BUF_GETERROR(bp)); + if (pathlen < byte_cnt) { + byte_cnt = pathlen; + } + pathlen -= byte_cnt; + + memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); + cur_chunk += byte_cnt; + + xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); + } + } + + /* + * Create the directory entry for the symlink. + */ + error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen, + ip->i_ino, &first_block, &free_list, resblks); + if (error) { + goto error1; + } + xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + + /* + * Bump the in memory version number of the parent directory + * so that other processes accessing it will recognize that + * the directory has changed. + */ + dp->i_gen++; + + /* + * If this is a synchronous mount, make sure that the + * symlink transaction goes to disk before returning to + * the user. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) { + xfs_trans_set_sync(tp); + } + + /* + * xfs_trans_commit normally decrements the vnode ref count + * when it unlocks the inode. Since we want to return the + * vnode to the caller, we bump the vnode ref count now. + */ + IHOLD(ip); + + error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); + if (error) { + goto error2; + } + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + /* Fall through to std_return with error = 0 or errno from + * xfs_trans_commit */ +std_return: + if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), + DM_EVENT_POSTSYMLINK)) { + (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, + dir_bdp, DM_RIGHT_NULL, + error ? NULL : XFS_ITOBHV(ip), + DM_RIGHT_NULL, link_name, target_path, + 0, error, 0); + } + + if (!error) { + vnode_t *vp; + + ASSERT(ip); + vp = XFS_ITOV(ip); + *vpp = vp; + } + return error; + + error2: + IRELE(ip); + error1: + xfs_bmap_cancel(&free_list); + cancel_flags |= XFS_TRANS_ABORT; + error_return: + xfs_trans_cancel(tp, cancel_flags); + XFS_QM_DQRELE(mp, udqp); + XFS_QM_DQRELE(mp, gdqp); + + if (!dp_joined_to_trans && (dp != NULL)) { + xfs_iunlock(dp, XFS_ILOCK_EXCL); + } + + goto std_return; +} + + +/* + * xfs_fid2 + * + * A fid routine that takes a pointer to a previously allocated + * fid structure (like xfs_fast_fid) but uses a 64 bit inode number. + */ +STATIC int +xfs_fid2( + bhv_desc_t *bdp, + fid_t *fidp) +{ + xfs_inode_t *ip; + xfs_fid2_t *xfid; + + vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__, + (inst_t *)__return_address); + ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t)); + + xfid = (xfs_fid2_t *)fidp; + ip = XFS_BHVTOI(bdp); + xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len); + xfid->fid_pad = 0; + /* + * use memcpy because the inode is a long long and there's no + * assurance that xfid->fid_ino is properly aligned. + */ + memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino)); + xfid->fid_gen = ip->i_d.di_gen; + + return 0; +} + + +/* + * xfs_rwlock + */ +int +xfs_rwlock( + bhv_desc_t *bdp, + vrwlock_t locktype) +{ + xfs_inode_t *ip; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + if (vp->v_type == VDIR) + return 1; + ip = XFS_BHVTOI(bdp); + if (locktype == VRWLOCK_WRITE) { + xfs_ilock(ip, XFS_IOLOCK_EXCL); + } else if (locktype == VRWLOCK_TRY_READ) { + return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)); + } else if (locktype == VRWLOCK_TRY_WRITE) { + return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)); + } else { + ASSERT((locktype == VRWLOCK_READ) || + (locktype == VRWLOCK_WRITE_DIRECT)); + xfs_ilock(ip, XFS_IOLOCK_SHARED); + } + + return 1; +} + + +/* + * xfs_rwunlock + */ +void +xfs_rwunlock( + bhv_desc_t *bdp, + vrwlock_t locktype) +{ + xfs_inode_t *ip; + xfs_inode_t *release_ip; + vnode_t *vp; + int error; + + vp = BHV_TO_VNODE(bdp); + if (vp->v_type == VDIR) + return; + ip = XFS_BHVTOI(bdp); + if (locktype == VRWLOCK_WRITE) { + /* + * In the write case, we may have added a new entry to + * the reference cache. This might store a pointer to + * an inode to be released in this inode. If it is there, + * clear the pointer and release the inode after unlocking + * this one. + */ + release_ip = ip->i_release; + ip->i_release = NULL; + xfs_iunlock (ip, XFS_IOLOCK_EXCL); + + if (release_ip != NULL) { + VOP_RELEASE(XFS_ITOV(release_ip), error); + VN_RELE(XFS_ITOV(release_ip)); + } + } else { + ASSERT((locktype == VRWLOCK_READ) || + (locktype == VRWLOCK_WRITE_DIRECT)); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + } + return; +} + +STATIC int +xfs_inode_flush( + bhv_desc_t *bdp, + int flags) +{ + xfs_inode_t *ip; + xfs_dinode_t *dip; + xfs_mount_t *mp; + xfs_buf_t *bp; + int error = 0; + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* Bypass inodes which have already been cleaned by + * the inode flush clustering code inside xfs_iflush + */ + if ((ip->i_update_core == 0) && + ((ip->i_itemp == NULL) || + !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL))) + return 0; + + if (flags & FLUSH_LOG) { + xfs_inode_log_item_t *iip = ip->i_itemp; + + if (iip && iip->ili_last_lsn) { + xlog_t *log = mp->m_log; + xfs_lsn_t sync_lsn; + int s, log_flags = XFS_LOG_FORCE; + + s = GRANT_LOCK(log); + sync_lsn = log->l_last_sync_lsn; + GRANT_UNLOCK(log, s); + + if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0)) + return 0; + + if (flags & FLUSH_SYNC) + log_flags |= XFS_LOG_SYNC; + return xfs_log_force(mp, iip->ili_last_lsn, + log_flags); + } + } + + /* We make this non-blocking if the inode is contended, + * return EAGAIN to indicate to the caller that they + * did not succeed. This prevents the flush path from + * blocking on inodes inside another operation right + * now, they get caught later by xfs_sync. + */ + if (flags & FLUSH_INODE) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + if ((xfs_ipincount(ip) == 0) && xfs_iflock_nowait(ip)) { + int flush_flags; + +#if 0 + /* not turning this on until some + * performance analysis is done + */ + if (flags & FLUSH_SYNC) + flush_flags = XFS_IFLUSH_SYNC; + else +#endif + flush_flags = XFS_IFLUSH_DELWRI_ELSE_ASYNC; + + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); + if (error) + return error; + xfs_buf_relse(bp); + + if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED) == 0) + return EAGAIN; + + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return EAGAIN; + } + + error = xfs_iflush(ip, flush_flags); + } else { + error = EAGAIN; + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + } else { + error = EAGAIN; + } + } + + return error; +} + + +int +xfs_set_dmattrs ( + bhv_desc_t *bdp, + u_int evmask, + u_int16_t state, + cred_t *credp) +{ + xfs_inode_t *ip; + xfs_trans_t *tp; + xfs_mount_t *mp; + int error; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); + error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + + ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask; + ip->i_iocore.io_dmstate = ip->i_d.di_dmstate = state; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + IHOLD(ip); + error = xfs_trans_commit(tp, 0, NULL); + + return error; +} + + +/* + * xfs_reclaim + */ +STATIC int +xfs_reclaim( + bhv_desc_t *bdp) +{ + xfs_inode_t *ip; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ASSERT(!VN_MAPPED(vp)); + ip = XFS_BHVTOI(bdp); + + if ((ip->i_d.di_mode & IFMT) == IFREG) { + if (ip->i_d.di_size > 0) { + /* + * Flush and invalidate any data left around that is + * a part of this file. + * + * Get the inode's i/o lock so that buffers are pushed + * out while holding the proper lock. We can't hold + * the inode lock here since flushing out buffers may + * cause us to try to get the lock in xfs_strategy(). + * + * We don't have to call remapf() here, because there + * cannot be any mapped file references to this vnode + * since it is being reclaimed. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + /* + * If we hit an IO error, we need to make sure that the + * buffer and page caches of file data for + * the file are tossed away. We don't want to use + * VOP_FLUSHINVAL_PAGES here because we don't want dirty + * pages to stay attached to the vnode, but be + * marked P_BAD. pdflush/vnode_pagebad + * hates that. + */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE); + } else { + VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); + } + + ASSERT(VN_CACHED(vp) == 0); + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || + ip->i_delayed_blks == 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + } else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + /* + * di_size field may not be quite accurate if we're + * shutting down. + */ + VOP_TOSS_PAGES(vp, 0, -1, FI_NONE); + ASSERT(VN_CACHED(vp) == 0); + } + } + + /* If we have nothing to flush with this inode then complete the + * teardown now, otherwise break the link between the xfs inode + * and the linux inode and clean up the xfs inode later. This + * avoids flushing the inode to disk during the delete operation + * itself. + */ + if (!ip->i_update_core && (ip->i_itemp == NULL)) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); + } else { + xfs_mount_t *mp = ip->i_mount; + + /* Protect sync from us */ + XFS_MOUNT_ILOCK(mp); + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); + list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); + + XFS_MOUNT_IUNLOCK(mp); + } + return 0; +} + +int +xfs_finish_reclaim( + xfs_inode_t *ip, + int locked, + int sync_mode) +{ + xfs_ihash_t *ih = ip->i_hash; + int error; + + if (!locked) + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* The hash lock here protects a thread in xfs_iget_core from + * racing with us on linking the inode back with a vnode. + * Once we have the XFS_IRECLAIM flag set it will not touch + * us. + */ + write_lock(&ih->ih_lock); + if (ip->i_flags & XFS_IRECLAIM || (!locked && XFS_ITOV_NULL(ip))) { + write_unlock(&ih->ih_lock); + if (!locked) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return(1); + } + ip->i_flags |= XFS_IRECLAIM; + write_unlock(&ih->ih_lock); + + /* + * If the inode is still dirty, then flush it out. If the inode + * is not in the AIL, then it will be OK to flush it delwri as + * long as xfs_iflush() does not keep any references to the inode. + * We leave that decision up to xfs_iflush() since it has the + * knowledge of whether it's OK to simply do a delwri flush of + * the inode or whether we need to wait until the inode is + * pulled from the AIL. + * We get the flush lock regardless, though, just to make sure + * we don't free it while it is being flushed. + */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + if (!locked) { + xfs_iflock(ip); + } + + if (ip->i_update_core || + ((ip->i_itemp != NULL) && + (ip->i_itemp->ili_format.ilf_fields != 0))) { + error = xfs_iflush(ip, sync_mode); + /* + * If we hit an error, typically because of filesystem + * shutdown, we don't need to let vn_reclaim to know + * because we're gonna reclaim the inode anyway. + */ + if (error) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_ireclaim(ip); + return (0); + } + xfs_iflock(ip); /* synchronize with xfs_iflush_done */ + } + + ASSERT(ip->i_update_core == 0); + ASSERT(ip->i_itemp == NULL || + ip->i_itemp->ili_format.ilf_fields == 0); + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + xfs_ireclaim(ip); + return 0; +} + +int +xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) +{ + int purged; + struct list_head *curr, *next; + xfs_inode_t *ip; + int done = 0; + + while (!done) { + purged = 0; + XFS_MOUNT_ILOCK(mp); + list_for_each_safe(curr, next, &mp->m_del_inodes) { + ip = list_entry(curr, xfs_inode_t, i_reclaim); + if (noblock) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) + continue; + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + continue; + } + } + XFS_MOUNT_IUNLOCK(mp); + xfs_finish_reclaim(ip, noblock, + XFS_IFLUSH_DELWRI_ELSE_ASYNC); + purged = 1; + break; + } + + done = !purged; + } + + XFS_MOUNT_IUNLOCK(mp); + return 0; +} + +/* + * xfs_alloc_file_space() + * This routine allocates disk space for the given file. + * + * If alloc_type == 0, this request is for an ALLOCSP type + * request which will change the file size. In this case, no + * DMAPI event will be generated by the call. A TRUNCATE event + * will be generated later by xfs_setattr. + * + * If alloc_type != 0, this request is for a RESVSP type + * request, and a DMAPI DM_EVENT_WRITE will be generated if the + * lower block boundary byte address is less than the file's + * length. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +int +xfs_alloc_file_space( + xfs_inode_t *ip, + xfs_off_t offset, + xfs_off_t len, + int alloc_type, + int attr_flags) +{ + xfs_filblks_t allocated_fsb; + xfs_filblks_t allocatesize_fsb; + int committed; + xfs_off_t count; + xfs_filblks_t datablocks; + int error; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + xfs_bmbt_irec_t *imapp; + xfs_bmbt_irec_t imaps[1]; + xfs_mount_t *mp; + int numrtextents; + int reccount; + uint resblks; + int rt; + int rtextsize; + xfs_fileoff_t startoffset_fsb; + xfs_trans_t *tp; + int xfs_bmapi_flags; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + mp = ip->i_mount; + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* + * determine if this is a realtime file + */ + if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { + if (ip->i_d.di_extsize) + rtextsize = ip->i_d.di_extsize; + else + rtextsize = mp->m_sb.sb_rextsize; + } else + rtextsize = 0; + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return error; + + if (len <= 0) + return XFS_ERROR(EINVAL); + + count = len; + error = 0; + imapp = &imaps[0]; + reccount = 1; + xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); + startoffset_fsb = XFS_B_TO_FSBT(mp, offset); + allocatesize_fsb = XFS_B_TO_FSB(mp, count); + + /* Generate a DMAPI event if needed. */ + if (alloc_type != 0 && offset < ip->i_d.di_size && + (attr_flags&ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { + xfs_off_t end_dmi_offset; + + end_dmi_offset = offset+len; + if (end_dmi_offset > ip->i_d.di_size) + end_dmi_offset = ip->i_d.di_size; + error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip), + offset, end_dmi_offset - offset, + 0, NULL); + if (error) + return(error); + } + + /* + * allocate file space until done or until there is an error + */ +retry: + while (allocatesize_fsb && !error) { + /* + * determine if reserving space on + * the data or realtime partition. + */ + if (rt) { + xfs_fileoff_t s, e; + + s = startoffset_fsb; + do_div(s, rtextsize); + s *= rtextsize; + e = roundup_64(startoffset_fsb + allocatesize_fsb, + rtextsize); + numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; + datablocks = 0; + } else { + datablocks = allocatesize_fsb; + numrtextents = 0; + } + + /* + * allocate and setup the transaction + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); + error = xfs_trans_reserve(tp, + resblks, + XFS_WRITE_LOG_RES(mp), + numrtextents, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) { + /* + * Free the transaction structure. + */ + ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + break; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, + ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); + if (error) + goto error1; + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * issue the bmapi() call to allocate the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + error = xfs_bmapi(tp, ip, startoffset_fsb, + allocatesize_fsb, xfs_bmapi_flags, + &firstfsb, 0, imapp, &reccount, + &free_list); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + if (error) { + break; + } + + allocated_fsb = imapp->br_blockcount; + + if (reccount == 0) { + error = XFS_ERROR(ENOSPC); + break; + } + + startoffset_fsb += allocated_fsb; + allocatesize_fsb -= allocated_fsb; + } +dmapi_enospc_check: + if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) { + + error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, + XFS_ITOBHV(ip), DM_RIGHT_NULL, + XFS_ITOBHV(ip), DM_RIGHT_NULL, + NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ + if (error == 0) + goto retry; /* Maybe DMAPI app. has made space */ + /* else fall through with error from XFS_SEND_DATA */ + } + + return error; + + error0: + xfs_bmap_cancel(&free_list); + error1: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + goto dmapi_enospc_check; +} + +/* + * Zero file bytes between startoff and endoff inclusive. + * The iolock is held exclusive and no blocks are buffered. + */ +STATIC int +xfs_zero_remaining_bytes( + xfs_inode_t *ip, + xfs_off_t startoff, + xfs_off_t endoff) +{ + xfs_buf_t *bp; + int error=0; + xfs_bmbt_irec_t imap; + xfs_off_t lastoffset; + xfs_mount_t *mp; + int nimap; + xfs_off_t offset; + xfs_fileoff_t offset_fsb; + + mp = ip->i_mount; + bp = XFS_ngetrbuf(mp->m_sb.sb_blocksize,mp); + ASSERT(!XFS_BUF_GETERROR(bp)); + + if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) { + XFS_BUF_SET_TARGET(bp, mp->m_rtdev_targp); + } else { + XFS_BUF_SET_TARGET(bp, mp->m_ddev_targp); + } + + for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { + offset_fsb = XFS_B_TO_FSBT(mp, offset); + nimap = 1; + error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap, + &nimap, NULL); + if (error || nimap < 1) + break; + ASSERT(imap.br_blockcount >= 1); + ASSERT(imap.br_startoff == offset_fsb); + lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; + if (lastoffset > endoff) + lastoffset = endoff; + if (imap.br_startblock == HOLESTARTBLOCK) + continue; + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + if (imap.br_state == XFS_EXT_UNWRITTEN) + continue; + XFS_BUF_UNDONE(bp); + XFS_BUF_UNWRITE(bp); + XFS_BUF_READ(bp); + XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); + xfsbdstrat(mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", + mp, bp, XFS_BUF_ADDR(bp)); + break; + } + memset(XFS_BUF_PTR(bp) + + (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), + 0, lastoffset - offset + 1); + XFS_BUF_UNDONE(bp); + XFS_BUF_UNREAD(bp); + XFS_BUF_WRITE(bp); + xfsbdstrat(mp, bp); + if ((error = xfs_iowait(bp))) { + xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", + mp, bp, XFS_BUF_ADDR(bp)); + break; + } + } + XFS_nfreerbuf(bp); + return error; +} + +/* + * xfs_free_file_space() + * This routine frees disk space for the given file. + * + * This routine is only called by xfs_change_file_space + * for an UNRESVSP type call. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +STATIC int +xfs_free_file_space( + xfs_inode_t *ip, + xfs_off_t offset, + xfs_off_t len, + int attr_flags) +{ + int committed; + int done; + xfs_off_t end_dmi_offset; + xfs_fileoff_t endoffset_fsb; + int error; + xfs_fsblock_t firstfsb; + xfs_bmap_free_t free_list; + xfs_off_t ilen; + xfs_bmbt_irec_t imap; + xfs_off_t ioffset; + xfs_extlen_t mod=0; + xfs_mount_t *mp; + int nimap; + uint resblks; + int rounding; + int rt; + xfs_fileoff_t startoffset_fsb; + xfs_trans_t *tp; + + vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); + mp = ip->i_mount; + + if ((error = XFS_QM_DQATTACH(mp, ip, 0))) + return error; + + error = 0; + if (len <= 0) /* if nothing being freed */ + return error; + rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); + startoffset_fsb = XFS_B_TO_FSB(mp, offset); + end_dmi_offset = offset + len; + endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); + + if (offset < ip->i_d.di_size && + (attr_flags & ATTR_DMI) == 0 && + DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) { + if (end_dmi_offset > ip->i_d.di_size) + end_dmi_offset = ip->i_d.di_size; + error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOBHV(ip), + offset, end_dmi_offset - offset, + AT_DELAY_FLAG(attr_flags), NULL); + if (error) + return(error); + } + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), + (__uint8_t)NBPP); + ilen = len + (offset & (rounding - 1)); + ioffset = offset & ~(rounding - 1); + if (ilen & (rounding - 1)) + ilen = (ilen + rounding) & ~(rounding - 1); + xfs_inval_cached_pages(XFS_ITOV(ip), &(ip->i_iocore), ioffset, 0, 0); + /* + * Need to zero the stuff we're not freeing, on disk. + * If its a realtime file & can't use unwritten extents then we + * actually need to zero the extent edges. Otherwise xfs_bunmapi + * will take care of it for us. + */ + if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { + nimap = 1; + error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0, + &imap, &nimap, NULL); + if (error) + return error; + ASSERT(nimap == 0 || nimap == 1); + if (nimap && imap.br_startblock != HOLESTARTBLOCK) { + xfs_daddr_t block; + + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + block = imap.br_startblock; + mod = do_div(block, mp->m_sb.sb_rextsize); + if (mod) + startoffset_fsb += mp->m_sb.sb_rextsize - mod; + } + nimap = 1; + error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0, + &imap, &nimap, NULL); + if (error) + return error; + ASSERT(nimap == 0 || nimap == 1); + if (nimap && imap.br_startblock != HOLESTARTBLOCK) { + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + mod++; + if (mod && (mod != mp->m_sb.sb_rextsize)) + endoffset_fsb -= mod; + } + } + if ((done = (endoffset_fsb <= startoffset_fsb))) + /* + * One contiguous piece to clear + */ + error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); + else { + /* + * Some full blocks, possibly two pieces to clear + */ + if (offset < XFS_FSB_TO_B(mp, startoffset_fsb)) + error = xfs_zero_remaining_bytes(ip, offset, + XFS_FSB_TO_B(mp, startoffset_fsb) - 1); + if (!error && + XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len) + error = xfs_zero_remaining_bytes(ip, + XFS_FSB_TO_B(mp, endoffset_fsb), + offset + len - 1); + } + + /* + * free file space until done or until there is an error + */ + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); + while (!error && !done) { + + /* + * allocate and setup the transaction + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); + error = xfs_trans_reserve(tp, + resblks, + XFS_WRITE_LOG_RES(mp), + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + + /* + * check for running out of space + */ + if (error) { + /* + * Free the transaction structure. + */ + ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); + xfs_trans_cancel(tp, 0); + break; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = XFS_TRANS_RESERVE_QUOTA(mp, tp, + ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); + if (error) + goto error1; + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + /* + * issue the bunmapi() call to free the blocks + */ + XFS_BMAP_INIT(&free_list, &firstfsb); + error = xfs_bunmapi(tp, ip, startoffset_fsb, + endoffset_fsb - startoffset_fsb, + 0, 2, &firstfsb, &free_list, &done); + if (error) { + goto error0; + } + + /* + * complete the transaction + */ + error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); + if (error) { + goto error0; + } + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } + + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; + + error0: + xfs_bmap_cancel(&free_list); + error1: + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + return error; +} + +/* + * xfs_change_file_space() + * This routine allocates or frees disk space for the given file. + * The user specified parameters are checked for alignment and size + * limitations. + * + * RETURNS: + * 0 on success + * errno on error + * + */ +int +xfs_change_file_space( + bhv_desc_t *bdp, + int cmd, + xfs_flock64_t *bf, + xfs_off_t offset, + cred_t *credp, + int attr_flags) +{ + int clrprealloc; + int error; + xfs_fsize_t fsize; + xfs_inode_t *ip; + xfs_mount_t *mp; + int setprealloc; + xfs_off_t startoffset; + xfs_off_t llen; + xfs_trans_t *tp; + vattr_t va; + vnode_t *vp; + + vp = BHV_TO_VNODE(bdp); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + + ip = XFS_BHVTOI(bdp); + mp = ip->i_mount; + + /* + * must be a regular file and have write permission + */ + if (vp->v_type != VREG) + return XFS_ERROR(EINVAL); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + + if ((error = xfs_iaccess(ip, IWRITE, credp))) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; + } + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + switch (bf->l_whence) { + case 0: /*SEEK_SET*/ + break; + case 1: /*SEEK_CUR*/ + bf->l_start += offset; + break; + case 2: /*SEEK_END*/ + bf->l_start += ip->i_d.di_size; + break; + default: + return XFS_ERROR(EINVAL); + } + + llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len; + + if ( (bf->l_start < 0) + || (bf->l_start > XFS_MAX_FILE_OFFSET) + || (bf->l_start + llen < 0) + || (bf->l_start + llen > XFS_MAX_FILE_OFFSET)) + return XFS_ERROR(EINVAL); + + bf->l_whence = 0; + + startoffset = bf->l_start; + fsize = ip->i_d.di_size; + + /* + * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve + * file space. + * These calls do NOT zero the data space allocated to the file, + * nor do they change the file size. + * + * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file + * space. + * These calls cause the new file data to be zeroed and the file + * size to be changed. + */ + setprealloc = clrprealloc = 0; + + switch (cmd) { + case XFS_IOC_RESVSP: + case XFS_IOC_RESVSP64: + error = xfs_alloc_file_space(ip, startoffset, bf->l_len, + 1, attr_flags); + if (error) + return error; + setprealloc = 1; + break; + + case XFS_IOC_UNRESVSP: + case XFS_IOC_UNRESVSP64: + if ((error = xfs_free_file_space(ip, startoffset, bf->l_len, + attr_flags))) + return error; + break; + + case XFS_IOC_ALLOCSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP: + case XFS_IOC_FREESP64: + if (startoffset > fsize) { + error = xfs_alloc_file_space(ip, fsize, + startoffset - fsize, 0, attr_flags); + if (error) + break; + } + + va.va_mask = XFS_AT_SIZE; + va.va_size = startoffset; + + error = xfs_setattr(bdp, &va, attr_flags, credp); + + if (error) + return error; + + clrprealloc = 1; + break; + + default: + ASSERT(0); + return XFS_ERROR(EINVAL); + } + + /* + * update the inode timestamp, mode, and prealloc flag bits + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); + + if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp), + 0, 0, 0))) { + /* ASSERT(0); */ + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + + ip->i_d.di_mode &= ~ISUID; + + /* + * Note that we don't have to worry about mandatory + * file locking being disabled here because we only + * clear the ISGID bit if the Group execute bit is + * on, but if it was on then mandatory locking wouldn't + * have been enabled. + */ + if (ip->i_d.di_mode & (IEXEC >> 3)) + ip->i_d.di_mode &= ~ISGID; + + xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + + if (setprealloc) + ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; + else if (clrprealloc) + ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + + error = xfs_trans_commit(tp, 0, NULL); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + return error; +} + +vnodeops_t xfs_vnodeops = { + BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), + .vop_open = xfs_open, + .vop_read = xfs_read, + .vop_write = xfs_write, + .vop_ioctl = xfs_ioctl, + .vop_getattr = xfs_getattr, + .vop_setattr = xfs_setattr, + .vop_access = xfs_access, + .vop_lookup = xfs_lookup, + .vop_create = xfs_create, + .vop_remove = xfs_remove, + .vop_link = xfs_link, + .vop_rename = xfs_rename, + .vop_mkdir = xfs_mkdir, + .vop_rmdir = xfs_rmdir, + .vop_readdir = xfs_readdir, + .vop_symlink = xfs_symlink, + .vop_readlink = xfs_readlink, + .vop_fsync = xfs_fsync, + .vop_inactive = xfs_inactive, + .vop_fid2 = xfs_fid2, + .vop_rwlock = xfs_rwlock, + .vop_rwunlock = xfs_rwunlock, + .vop_bmap = xfs_bmap, + .vop_reclaim = xfs_reclaim, + .vop_attr_get = xfs_attr_get, + .vop_attr_set = xfs_attr_set, + .vop_attr_remove = xfs_attr_remove, + .vop_attr_list = xfs_attr_list, + .vop_link_removed = (vop_link_removed_t)fs_noval, + .vop_vnode_change = (vop_vnode_change_t)fs_noval, + .vop_tosspages = fs_tosspages, + .vop_flushinval_pages = fs_flushinval_pages, + .vop_flush_pages = fs_flush_pages, + .vop_release = xfs_release, + .vop_iflush = xfs_inode_flush, +}; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-alpha/bitops.h linux.22-ac2/include/asm-alpha/bitops.h --- linux.vanilla/include/asm-alpha/bitops.h 2001-10-12 23:35:54.000000000 +0100 +++ linux.22-ac2/include/asm-alpha/bitops.h 2003-06-29 16:09:26.000000000 +0100 @@ -3,6 +3,7 @@ #include #include +#include /* * Copyright 1994, Linus Torvalds. @@ -74,6 +75,17 @@ * WARNING: non atomic version. */ static __inline__ void +__clear_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +/* + * WARNING: non atomic version. + */ +static __inline__ void __change_bit(unsigned long nr, volatile void * addr) { int *m = ((int *) addr) + (nr >> 5); @@ -264,6 +276,28 @@ #endif } +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ +#if defined(__alpha_cix__) && defined(__alpha_fix__) + /* Whee. EV67 can calculate it directly. */ + unsigned long result; + __asm__("cttz %1,%0" : "=r"(result) : "r"(word)); + return result; +#else + unsigned long bits, qofs, bofs; + + __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word)); + qofs = ffz_b(bits); + bits = __kernel_extbl(word, qofs); + bofs = ffz_b(~bits); + + return qofs*8 + bofs; +#endif +} + #ifdef __KERNEL__ /* @@ -278,6 +312,20 @@ return word ? result+1 : 0; } +/* + * fls: find last bit set. + */ +#if defined(__alpha_cix__) && defined(__alpha_fix__) +static inline int fls(int word) +{ + long result; + __asm__("ctlz %1,%0" : "=r"(result) : "r"(word & 0xffffffff)); + return 64 - result; +} +#else +#define fls generic_fls +#endif + /* Compute powers of two for the given integer. */ static inline int floor_log2(unsigned long word) { @@ -365,13 +413,77 @@ } /* - * The optimizer actually does good code for this case.. + * Find next one bit in a bitmap reasonably efficiently. + */ +static inline unsigned long +find_next_bit(void * addr, unsigned long size, unsigned long offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= ~0UL >> (64 - size); + if (!tmp) + return result + size; +found_middle: + return result + __ffs(tmp); +} + +/* + * The optimizer actually does good code for this case. */ #define find_first_zero_bit(addr, size) \ find_next_zero_bit((addr), (size), 0) +#define find_first_bit(addr, size) \ + find_next_bit((addr), (size), 0) #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is set. + */ +static inline unsigned long +_sched_find_first_bit(unsigned long *b) +{ + unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; + unsigned long ofs; + + ofs = (b1 ? 64 : 128); + b1 = (b1 ? b1 : b2); + ofs = (b0 ? 0 : ofs); + b0 = (b0 ? b0 : b1); + + return __ffs(b0) + ofs; +} + + #define ext2_set_bit __test_and_set_bit #define ext2_clear_bit __test_and_clear_bit #define ext2_test_bit test_bit diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-alpha/smp.h linux.22-ac2/include/asm-alpha/smp.h --- linux.vanilla/include/asm-alpha/smp.h 2001-09-13 23:21:32.000000000 +0100 +++ linux.22-ac2/include/asm-alpha/smp.h 2003-06-29 16:09:26.000000000 +0100 @@ -55,7 +55,7 @@ #define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define hard_smp_processor_id() __hard_smp_processor_id() -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern unsigned long cpu_present_mask; #define cpu_online_map cpu_present_mask diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-alpha/system.h linux.22-ac2/include/asm-alpha/system.h --- linux.vanilla/include/asm-alpha/system.h 2003-06-14 00:11:39.000000000 +0100 +++ linux.22-ac2/include/asm-alpha/system.h 2003-06-29 16:09:26.000000000 +0100 @@ -310,11 +310,11 @@ #define __sti() do { barrier(); setipl(IPL_MIN); } while(0) #define __save_flags(flags) ((flags) = rdps()) #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) -#define __save_and_sti(flags) do { barrier(); (flags) = swpipl(IPL_MIN); } while(0) +#define __save_and_sti(flags) do { (flags) = swpipl(IPL_MIN); barrier(); } while(0) #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0) #define local_irq_save(flags) __save_and_cli(flags) -#define local_irq_set(flags) __save_and_sti(flags) +#define local_irq_set(flags) __save_and_sti(flags) #define local_irq_restore(flags) __restore_flags(flags) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -323,8 +323,6 @@ extern int global_irq_holder; -#define save_and_cli(flags) (save_flags(flags), cli()) - extern void __global_cli(void); extern void __global_sti(void); extern unsigned long __global_save_flags(void); @@ -334,6 +332,8 @@ #define sti() __global_sti() #define save_flags(flags) ((flags) = __global_save_flags()) #define restore_flags(flags) __global_restore_flags(flags) +#define save_and_cli(flags) (save_flags(flags), cli()) +#define save_and_sti(flags) (save_flags(flags), sti()) #else /* CONFIG_SMP */ @@ -341,6 +341,7 @@ #define sti() __sti() #define save_flags(flags) __save_flags(flags) #define save_and_cli(flags) __save_and_cli(flags) +#define save_and_sti(flags) __save_and_sti(flags) #define restore_flags(flags) __restore_flags(flags) #endif /* CONFIG_SMP */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-generic/bitops.h linux.22-ac2/include/asm-generic/bitops.h --- linux.vanilla/include/asm-generic/bitops.h 2000-11-28 01:47:38.000000000 +0000 +++ linux.22-ac2/include/asm-generic/bitops.h 2003-06-29 16:09:26.000000000 +0100 @@ -51,6 +51,12 @@ return ((mask & *addr) != 0); } +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/bitops.h linux.22-ac2/include/asm-i386/bitops.h --- linux.vanilla/include/asm-i386/bitops.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/bitops.h 2003-09-01 13:54:21.000000000 +0100 @@ -6,6 +6,7 @@ */ #include +#include /* * These have to be done with inline assembly: that way the bit-setting @@ -75,6 +76,14 @@ :"=m" (ADDR) :"Ir" (nr)); } + +static __inline__ void __clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( + "btrl %1,%0" + :"=m" (ADDR) + :"Ir" (nr)); +} #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -284,6 +293,34 @@ } /** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +static __inline__ int find_first_bit(void * addr, unsigned size) +{ + int d0, d1; + int res; + + /* This looks at memory. Mark it volatile to tell gcc not to move it around */ + __asm__ __volatile__( + "xorl %%eax,%%eax\n\t" + "repe; scasl\n\t" + "jz 1f\n\t" + "leal -4(%%edi),%%edi\n\t" + "bsfl (%%edi),%%eax\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%eax" + :"=a" (res), "=&c" (d0), "=&D" (d1) + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); + return res; +} + +/** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at @@ -296,7 +333,7 @@ if (bit) { /* - * Look for zero in first byte + * Look for zero in the first 32 bits. */ __asm__("bsfl %1,%0\n\t" "jne 1f\n\t" @@ -317,6 +354,39 @@ } /** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static __inline__ int find_next_bit (void * addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} + +/** * ffz - find first zero in word. * @word: The word to search * @@ -330,8 +400,47 @@ return word; } +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /** * ffs - find first bit set * @x: the word to search diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/cpufeature.h linux.22-ac2/include/asm-i386/cpufeature.h --- linux.vanilla/include/asm-i386/cpufeature.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/cpufeature.h 2003-08-13 14:28:47.000000000 +0100 @@ -10,9 +10,9 @@ /* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */ #define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE ##_BIT) -#define NCAPINTS 4 /* Currently we have 4 32-bit words worth of info */ +#define NCAPINTS 6 /* Currently we have 6 32-bit words worth of info */ -/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ +/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ @@ -47,6 +47,7 @@ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ @@ -63,10 +64,19 @@ #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ +/* cpu types for specific tunings: */ +#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ +#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ +#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ +#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ -/* Intel defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ + + #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) @@ -77,7 +87,9 @@ #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) +#define cpu_has_sse2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) +#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) @@ -87,6 +99,7 @@ #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) +#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) #endif /* __ASM_I386_CPUFEATURE_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/desc.h linux.22-ac2/include/asm-i386/desc.h --- linux.vanilla/include/asm-i386/desc.h 2001-07-26 21:40:32.000000000 +0100 +++ linux.22-ac2/include/asm-i386/desc.h 2003-06-29 16:09:26.000000000 +0100 @@ -18,23 +18,31 @@ * 9 - APM BIOS support * 10 - APM BIOS support * 11 - APM BIOS support + * 12 - PNPBIOS support + * 13 - PNPBIOS support + * 14 - PNPBIOS support + * 15 - PNPBIOS support + * 16 - PNPBIOS support + * 17 - not used + * 18 - not used + * 19 - not used * * The TSS+LDT descriptors are spread out a bit so that every CPU * has an exclusive cacheline for the per-CPU TSS and LDT: * - * 12 - CPU#0 TSS <-- new cacheline - * 13 - CPU#0 LDT - * 14 - not used - * 15 - not used - * 16 - CPU#1 TSS <-- new cacheline - * 17 - CPU#1 LDT - * 18 - not used - * 19 - not used + * 20 - CPU#0 TSS <-- new cacheline + * 21 - CPU#0 LDT + * 22 - not used + * 23 - not used + * 24 - CPU#1 TSS <-- new cacheline + * 25 - CPU#1 LDT + * 26 - not used + * 27 - not used * ... NR_CPUS per-CPU TSS+LDT's if on SMP * * Entry into gdt where to find first TSS. */ -#define __FIRST_TSS_ENTRY 12 +#define __FIRST_TSS_ENTRY 20 #define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1) #define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY) @@ -79,13 +87,13 @@ /* * load one particular LDT into the current CPU */ -static inline void load_LDT (struct mm_struct *mm) +static inline void load_LDT (mm_context_t *pc) { int cpu = smp_processor_id(); - void *segments = mm->context.segments; - int count = LDT_ENTRIES; + void *segments = pc->ldt; + int count = pc->size; - if (!segments) { + if (!count) { segments = &default_ldt[0]; count = 5; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/edd.h linux.22-ac2/include/asm-i386/edd.h --- linux.vanilla/include/asm-i386/edd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/asm-i386/edd.h 2003-06-29 16:09:26.000000000 +0100 @@ -0,0 +1,172 @@ +/* + * linux/include/asm-i386/edd.h + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/i386/boot/setup.S populates a scratch table + * in the empty_zero_block that contains a list of BIOS-enumerated + * boot devices. + * In arch/i386/kernel/setup.c, this information is + * transferred into the edd structure, and in arch/i386/kernel/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ASM_I386_EDD_H +#define _ASM_I386_EDD_H + +#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF + in empty_zero_block - treat this as 1 byte */ +#define EDDBUF 0x600 /* addr of edd_info structs in empty_zero_block */ +#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ +#define EDDEXTSIZE 4 /* change these if you muck with the structures */ +#define EDDPARMSIZE 74 +#define CHECKEXTENSIONSPRESENT 0x41 +#define GETDEVICEPARAMETERS 0x48 +#define EDDMAGIC1 0x55AA +#define EDDMAGIC2 0xAA55 + +#ifndef __ASSEMBLY__ + +#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0) +#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1) +#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2) +#define EDD_EXT_64BIT_EXTENSIONS (1 << 3) + +#define EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT (1 << 0) +#define EDD_INFO_GEOMETRY_VALID (1 << 1) +#define EDD_INFO_REMOVABLE (1 << 2) +#define EDD_INFO_WRITE_VERIFY (1 << 3) +#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4) +#define EDD_INFO_LOCKABLE (1 << 5) +#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6) +#define EDD_INFO_USE_INT13_FN50 (1 << 7) + +struct edd_device_params { + u16 length; + u16 info_flags; + u32 num_default_cylinders; + u32 num_default_heads; + u32 sectors_per_track; + u64 number_of_sectors; + u16 bytes_per_sector; + u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + u16 key; /* = 0xBEDD */ + u8 device_path_info_length; /* = 44 */ + u8 reserved2; + u16 reserved3; + u8 host_bus_type[4]; + u8 interface_type[8]; + union { + struct { + u16 base_address; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + u8 bus; + u8 slot; + u8 function; + u8 channel; + u32 reserved; + } __attribute__ ((packed)) pci; + /* pcix is same as pci */ + struct { + u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + u8 device; + u8 lun; + u8 reserved1; + u8 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + u16 id; + u64 lun; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + u64 serial_number; + u64 reserved; + } __attribute__ ((packed)) usb; + struct { + u64 eui; + u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + u64 wwid; + u64 lun; + } __attribute__ ((packed)) fibre; + struct { + u64 identity_tag; + u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + u32 array_number; + u32 reserved1; + u64 reserved2; + } __attribute((packed)) raid; + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + u64 reserved1; + u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + u8 reserved4; + u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + u8 device; + u8 version; + u16 interface_support; + struct edd_device_params params; +} __attribute__ ((packed)); + +extern struct edd_info edd[EDDMAXNR]; +extern unsigned char eddnr; +#endif /*!__ASSEMBLY__ */ + +#endif /* _ASM_I386_EDD_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/hardirq.h linux.22-ac2/include/asm-i386/hardirq.h --- linux.vanilla/include/asm-i386/hardirq.h 2001-11-22 19:46:19.000000000 +0000 +++ linux.22-ac2/include/asm-i386/hardirq.h 2003-09-01 13:54:21.000000000 +0100 @@ -12,7 +12,11 @@ unsigned int __local_bh_count; unsigned int __syscall_count; struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ + unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ +#if CONFIG_X86_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ +#endif } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/highmem.h linux.22-ac2/include/asm-i386/highmem.h --- linux.vanilla/include/asm-i386/highmem.h 2003-06-14 00:11:40.000000000 +0100 +++ linux.22-ac2/include/asm-i386/highmem.h 2003-09-01 13:54:21.000000000 +0100 @@ -46,7 +46,7 @@ * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#define PKMAP_BASE (0xfe000000UL) +#define PKMAP_BASE (0xff800000UL) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/hw_irq.h linux.22-ac2/include/asm-i386/hw_irq.h --- linux.vanilla/include/asm-i386/hw_irq.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/hw_irq.h 2003-09-01 13:54:21.000000000 +0100 @@ -13,8 +13,10 @@ */ #include +#include #include #include +#include /* * IDT vectors usable for external interrupt sources start diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/io_apic.h linux.22-ac2/include/asm-i386/io_apic.h --- linux.vanilla/include/asm-i386/io_apic.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/io_apic.h 2003-09-01 13:54:21.000000000 +0100 @@ -26,8 +26,7 @@ LTS : 1, delivery_type : 1, __reserved_1 : 8, - ID : 4, - __reserved_0 : 4; + ID : 8; } __attribute__ ((packed)); struct IO_APIC_reg_01 { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/mmu_context.h linux.22-ac2/include/asm-i386/mmu_context.h --- linux.vanilla/include/asm-i386/mmu_context.h 2002-08-03 16:08:30.000000000 +0100 +++ linux.22-ac2/include/asm-i386/mmu_context.h 2003-09-01 13:54:31.000000000 +0100 @@ -7,10 +7,12 @@ #include /* - * possibly do the LDT unload here? + * hooks to add arch specific data into the mm struct. + * Note that destroy_context is called even if init_new_context + * fails. */ -#define destroy_context(mm) do { } while(0) -#define init_new_context(tsk,mm) 0 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +void destroy_context(struct mm_struct *mm); #ifdef CONFIG_SMP @@ -27,22 +29,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { - if (prev != next) { + if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); - /* - * Re-load LDT if necessary - */ - if (prev->context.segments != next->context.segments) - load_LDT(next); #ifdef CONFIG_SMP cpu_tlbstate[cpu].state = TLBSTATE_OK; cpu_tlbstate[cpu].active_mm = next; #endif set_bit(cpu, &next->cpu_vm_mask); - set_bit(cpu, &next->context.cpuvalid); /* Re-load page tables */ load_cr3(next->pgd); + /* load_LDT, if either the previous or next thread + * has a non-default LDT. + */ + if (unlikely(next->context.size+prev->context.size)) + load_LDT(&next->context); } #ifdef CONFIG_SMP else { @@ -54,9 +55,8 @@ * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); + load_LDT(&next->context); } - if (!test_and_set_bit(cpu, &next->context.cpuvalid)) - load_LDT(next); } #endif } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/mmu.h linux.22-ac2/include/asm-i386/mmu.h --- linux.vanilla/include/asm-i386/mmu.h 2001-07-26 02:03:04.000000000 +0100 +++ linux.22-ac2/include/asm-i386/mmu.h 2003-06-29 16:09:26.000000000 +0100 @@ -4,10 +4,13 @@ /* * The i386 doesn't have a mmu context, but * we put the segment information here. + * + * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { - void *segments; - unsigned long cpuvalid; + int size; + struct semaphore sem; + void * ldt; } mm_context_t; #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/msr.h linux.22-ac2/include/asm-i386/msr.h --- linux.vanilla/include/asm-i386/msr.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/msr.h 2003-08-28 22:20:06.000000000 +0100 @@ -17,6 +17,21 @@ : /* no outputs */ \ : "c" (msr), "a" (val1), "d" (val2)) +#define rdmsrl(msr,val) do { \ + unsigned long l__,h__; \ + rdmsr (msr, l__, h__); \ + val = l__; \ + val |= ((u64)h__<<32); \ +} while(0) + +static inline void wrmsrl (unsigned long msr, unsigned long long val) +{ + unsigned long lo, hi; + lo = (unsigned long) val; + hi = val >> 32; + wrmsr (msr, lo, hi); +} + #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) @@ -92,7 +107,7 @@ #define MSR_K7_HWCR 0xC0010015 #define MSR_K7_CLK_CTL 0xC001001b #define MSR_K7_FID_VID_CTL 0xC0010041 -#define MSR_K7_VID_STATUS 0xC0010042 +#define MSR_K7_FID_VID_STATUS 0xC0010042 /* Centaur-Hauls/IDT defined MSRs. */ #define MSR_IDT_FCR1 0x107 @@ -113,6 +128,7 @@ /* VIA Cyrix defined MSRs*/ #define MSR_VIA_FCR 0x1107 #define MSR_VIA_LONGHAUL 0x110a +#define MSR_VIA_RNG 0x110b #define MSR_VIA_BCR2 0x1147 /* Transmeta defined MSRs */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/processor.h linux.22-ac2/include/asm-i386/processor.h --- linux.vanilla/include/asm-i386/processor.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/processor.h 2003-09-01 13:54:21.000000000 +0100 @@ -426,9 +426,12 @@ */ extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); -/* Copy and release all segment info associated with a VM */ -extern void copy_segments(struct task_struct *p, struct mm_struct * mm); -extern void release_segments(struct mm_struct * mm); +/* Copy and release all segment info associated with a VM + * Unusable due to lack of error handling, use {init_new,destroy}_context + * instead. + */ +static inline void copy_segments(struct task_struct *p, struct mm_struct * mm) { } +static inline void release_segments(struct mm_struct * mm) { } /* * Return saved PC of a blocked thread. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/smpboot.h linux.22-ac2/include/asm-i386/smpboot.h --- linux.vanilla/include/asm-i386/smpboot.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/smpboot.h 2003-09-01 13:54:31.000000000 +0100 @@ -21,7 +21,19 @@ /* * Can't recognize Summit xAPICs at present, so use the OEM ID. */ - if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "VIGIL SMP", 9)){ + if (!strncmp(oem, "IBM ENSW", 8) && + (!strncmp(prod, "VIGIL SMP", 9) || + !strncmp(prod, "EXA", 3) || + !strncmp(prod, "RUTHLESS", 8))){ + clustered_apic_mode = CLUSTERED_APIC_XAPIC; + apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; + int_dest_addr_mode = APIC_DEST_PHYSICAL; + int_delivery_mode = dest_Fixed; + esr_disable = 1; + /*Start cyclone clock*/ + cyclone_setup(0); + } + else if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "RUTHLESS SMP", 9)){ clustered_apic_mode = CLUSTERED_APIC_XAPIC; apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; int_dest_addr_mode = APIC_DEST_PHYSICAL; @@ -122,11 +134,10 @@ /*round robin the interrupts*/ cpu = (cpu+1)%smp_num_cpus; return cpu_to_physical_apicid(cpu); - default: } return cpu_online_map; } #else -#define target_cpus() (cpu_online_map) +#define target_cpus() (0xFF) #endif #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/smp.h linux.22-ac2/include/asm-i386/smp.h --- linux.vanilla/include/asm-i386/smp.h 2002-11-29 21:27:23.000000000 +0000 +++ linux.22-ac2/include/asm-i386/smp.h 2003-09-01 13:54:21.000000000 +0100 @@ -40,6 +40,7 @@ extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_send_reschedule(int cpu); +extern void smp_send_reschedule_all(void); extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); @@ -58,6 +59,8 @@ return cpu; } +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) + /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. @@ -81,7 +84,7 @@ * so this is correct in the x86 case. */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) static __inline int hard_smp_processor_id(void) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-i386/system.h linux.22-ac2/include/asm-i386/system.h --- linux.vanilla/include/asm-i386/system.h 2003-08-28 16:45:43.000000000 +0100 +++ linux.22-ac2/include/asm-i386/system.h 2003-09-01 13:54:21.000000000 +0100 @@ -12,25 +12,22 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); -#define prepare_to_switch() do { } while(0) #define switch_to(prev,next,last) do { \ asm volatile("pushl %%esi\n\t" \ "pushl %%edi\n\t" \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %3,%%esp\n\t" /* restore ESP */ \ + "movl %2,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %4\n\t" /* restore EIP */ \ + "pushl %3\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popl %%edi\n\t" \ "popl %%esi\n\t" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ - "=b" (last) \ + :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ - "a" (prev), "d" (next), \ - "b" (prev)); \ + "a" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ @@ -323,18 +320,19 @@ /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); -#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); +#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); +#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); /* For spinlocks etc */ #if 0 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") -#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") -#else -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_set(x) __save_and_sti(x) +#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") +#else +#define local_irq_save(x) __save_and_cli(x) +#define local_irq_set(x) __save_and_sti(x) #endif + #define local_irq_restore(x) __restore_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -349,8 +347,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-ppc/bitops.h linux.22-ac2/include/asm-ppc/bitops.h --- linux.vanilla/include/asm-ppc/bitops.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/asm-ppc/bitops.h 2003-07-22 18:32:19.000000000 +0100 @@ -7,6 +7,7 @@ #define _PPC_BITOPS_H #include +#include #include #include @@ -241,6 +242,13 @@ return __ilog2(x & -x); } +#ifdef __KERNEL__ + +static inline int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + /* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore @@ -252,6 +260,18 @@ } /* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __inline__ int fls(unsigned int x) +{ + int lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ @@ -260,6 +280,25 @@ #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) +#endif /* __KERNEL__ */ + +/* + * Find the first bit set in a 140-bit bitmap. + * The first 100 bits are unlikely to be set. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. @@ -306,6 +345,8 @@ } +#ifdef __KERNEL__ + #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr) #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr) @@ -369,5 +410,7 @@ #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) +#endif /* __KERNEL__ */ + #endif /* _PPC_BITOPS_H */ #endif /* __KERNEL__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-ppc/delay.h linux.22-ac2/include/asm-ppc/delay.h --- linux.vanilla/include/asm-ppc/delay.h 2003-06-14 00:11:40.000000000 +0100 +++ linux.22-ac2/include/asm-ppc/delay.h 2003-06-29 16:09:28.000000000 +0100 @@ -15,52 +15,44 @@ extern unsigned long loops_per_jiffy; -extern void __delay(unsigned int loops); +/* maximum permitted argument to udelay */ +#define __MAX_UDELAY 1000000 -/* - * Note that 19 * 226 == 4294 ==~ 2^32 / 10^6, so - * loops = (4294 * usecs * loops_per_jiffy * HZ) / 2^32. - * - * The mulhwu instruction gives us loops = (a * b) / 2^32. - * We choose a = usecs * 19 * HZ and b = loops_per_jiffy * 226 - * because this lets us support a wide range of HZ and - * loops_per_jiffy values without either a or b overflowing 2^32. - * Thus we need usecs * HZ <= (2^32 - 1) / 19 = 226050910 and - * loops_per_jiffy <= (2^32 - 1) / 226 = 19004280 - * (which corresponds to ~3800 bogomips at HZ = 100). - * -- paulus - */ -#define __MAX_UDELAY (226050910UL/HZ) /* maximum udelay argument */ -#define __MAX_NDELAY (4294967295UL/HZ) /* maximum ndelay argument */ +extern void __delay(unsigned int loops); -extern __inline__ void __udelay(unsigned int x) +/* N.B. the `secs' parameter here is a fixed-point number with + the binary point to the left of the most-significant bit. */ +extern __inline__ void __const_udelay(unsigned int secs) { unsigned int loops; __asm__("mulhwu %0,%1,%2" : "=r" (loops) : - "r" (x), "r" (loops_per_jiffy * 226)); - __delay(loops); + "r" (secs), "r" (loops_per_jiffy)); + __delay(loops * HZ); } -extern __inline__ void __ndelay(unsigned int x) +/* + * note that 4294 == 2^32 / 10^6, multiplying by 4294 converts from + * microseconds to a 32-bit fixed-point number of seconds. + */ +extern __inline__ void __udelay(unsigned int usecs) { - unsigned int loops; - - __asm__("mulhwu %0,%1,%2" : "=r" (loops) : - "r" (x), "r" (loops_per_jiffy * 5)); - __delay(loops); + __const_udelay(usecs * 4294); +} +extern __inline__ void __ndelay(unsigned int usecs) +{ + __const_udelay(usecs * 5); } extern void __bad_udelay(void); /* deliberately undefined */ -extern void __bad_ndelay(void); /* deliberately undefined */ +extern void __bad_ndelay(void); #define udelay(n) (__builtin_constant_p(n)? \ - ((n) > __MAX_UDELAY? __bad_udelay(): __udelay((n) * (19 * HZ))) : \ - __udelay((n) * (19 * HZ))) - -#define ndelay(n) (__builtin_constant_p(n)? \ - ((n) > __MAX_NDELAY? __bad_ndelay(): __ndelay((n) * HZ)) : \ - __ndelay((n) * HZ)) + ((n) > __MAX_UDELAY? __bad_udelay(): __const_udelay((n) * 4294u)) : \ + __udelay(n)) +#define ndelay(n) (__builtin_constant_p(n) ? \ + ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ + __ndelay(n)) #endif /* defined(_PPC_DELAY_H) */ #endif /* __KERNEL__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-ppc/io.h linux.22-ac2/include/asm-ppc/io.h --- linux.vanilla/include/asm-ppc/io.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/asm-ppc/io.h 2003-06-29 16:29:39.000000000 +0100 @@ -75,6 +75,24 @@ #define insl(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl)) #define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl)) +/* + * io_flush(unsigned int value) + * + * Similar to AIX function, it makes the CPU think the value + * has actually been _used_. Call it after an IO read to + * make sure the read was actually done (especially useful + * for reads used for flushing posted writes) + */ +static inline void __io_flush(unsigned int value) +{ + __asm__ __volatile__( + " twi 0,%0,0\n" + " isync\n" + " nop\n" /* Is that one necessary ? */ + : : "r" (value)); +} +#define io_flush(val) __io_flush((unsigned int)(val)) + #ifdef CONFIG_ALL_PPC /* * On powermacs, we will get a machine check exception if we diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390/bitops.h linux.22-ac2/include/asm-s390/bitops.h --- linux.vanilla/include/asm-s390/bitops.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/asm-s390/bitops.h 2003-06-29 16:09:29.000000000 +0100 @@ -47,6 +47,7 @@ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; extern const char _zb_findmap[]; +extern const char _sb_findmap[]; #ifdef CONFIG_SMP /* @@ -642,6 +643,45 @@ return (res < size) ? res : size; } +static inline int +find_first_bit(unsigned long * addr, unsigned size) +{ + unsigned long cmp, count; + int res; + + if (!size) + return 0; + __asm__(" slr %1,%1\n" + " lr %2,%3\n" + " slr %0,%0\n" + " ahi %2,31\n" + " srl %2,5\n" + "0: c %1,0(%0,%4)\n" + " jne 1f\n" + " ahi %0,4\n" + " brct %2,0b\n" + " lr %0,%3\n" + " j 4f\n" + "1: l %2,0(%0,%4)\n" + " sll %0,3\n" + " lhi %1,0xff\n" + " tml %2,0xffff\n" + " jnz 2f\n" + " ahi %0,16\n" + " srl %2,16\n" + "2: tml %2,0x00ff\n" + " jnz 3f\n" + " ahi %0,8\n" + " srl %2,8\n" + "3: nr %2,%1\n" + " ic %2,0(%2,%5)\n" + " alr %0,%2\n" + "4:" + : "=&a" (res), "=&d" (cmp), "=&a" (count) + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" ); + return (res < size) ? res : size; +} + static __inline__ int find_next_zero_bit (void * addr, int size, int offset) { unsigned long * p = ((unsigned long *) addr) + (offset >> 5); @@ -680,6 +720,45 @@ return (offset + res); } +static inline int +find_next_bit (unsigned long * addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + unsigned long bitvec, reg; + int set, bit = offset & 31, res; + + if (bit) { + /* + * Look for set bit in first word + */ + bitvec = (*p) >> bit; + __asm__(" slr %0,%0\n" + " lhi %2,0xff\n" + " tml %1,0xffff\n" + " jnz 0f\n" + " ahi %0,16\n" + " srl %1,16\n" + "0: tml %1,0x00ff\n" + " jnz 1f\n" + " ahi %0,8\n" + " srl %1,8\n" + "1: nr %1,%2\n" + " ic %1,0(%1,%3)\n" + " alr %0,%1" + : "=&d" (set), "+a" (bitvec), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + if (set < (32 - bit)) + return set + offset; + offset += 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + res); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. @@ -708,6 +787,43 @@ } /* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long reg, result; + + __asm__(" slr %0,%0\n" + " lhi %2,0xff\n" + " tml %1,0xffff\n" + " jnz 0f\n" + " ahi %0,16\n" + " srl %1,16\n" + "0: tml %1,0x00ff\n" + " jnz 1f\n" + " ahi %0,8\n" + " srl %1,8\n" + "1: nr %1,%2\n" + " ic %1,0(%1,%3)\n" + " alr %0,%1" + : "=&d" (result), "+a" (word), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + return result; +} + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + return find_first_bit(b, 140); +} + +/* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). @@ -745,6 +861,39 @@ } /* + * fls: find last bit set. + */ +extern __inline__ int fls(int x) +{ + int r = 32; + + if (x == 0) + return 0; + __asm__(" tmh %1,0xffff\n" + " jz 0f\n" + " sll %1,16\n" + " ahi %0,-16\n" + "0: tmh %1,0xff00\n" + " jz 1f\n" + " sll %1,8\n" + " ahi %0,-8\n" + "1: tmh %1,0xf000\n" + " jz 2f\n" + " sll %1,4\n" + " ahi %0,-4\n" + "2: tmh %1,0xc000\n" + " jz 3f\n" + " sll %1,2\n" + " ahi %0,-2\n" + "3: tmh %1,0x8000\n" + " jz 4f\n" + " ahi %0,-1\n" + "4:" + : "+d" (r), "+d" (x) : : "cc" ); + return r; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390/ctrlchar.h linux.22-ac2/include/asm-s390/ctrlchar.h --- linux.vanilla/include/asm-s390/ctrlchar.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/asm-s390/ctrlchar.h 2003-06-29 16:09:29.000000000 +0100 @@ -0,0 +1,20 @@ +/* + * Implemented in drivers/s390/char/ctrlchar.c + * Unified handling of special chars. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert + * + */ + +struct tty_struct; + +extern unsigned int ctrlchar_handle(const unsigned char *buf, int len, + struct tty_struct *tty, int is_console); +extern void ctrlchar_init(void); + +#define CTRLCHAR_CTRL (0 << 8) +#define CTRLCHAR_NONE (1 << 8) +#define CTRLCHAR_SYSRQ (2 << 8) + +#define CTRLCHAR_MASK (~0xffu) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390/setup.h linux.22-ac2/include/asm-s390/setup.h --- linux.vanilla/include/asm-s390/setup.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/asm-s390/setup.h 2003-07-06 14:11:47.000000000 +0100 @@ -25,15 +25,16 @@ */ extern unsigned long machine_flags; -#define MACHINE_IS_VM (machine_flags & 1) -#define MACHINE_HAS_IEEE (machine_flags & 2) -#define MACHINE_IS_P390 (machine_flags & 4) -#define MACHINE_HAS_CSP (machine_flags & 8) -#define MACHINE_HAS_MVPG (machine_flags & 16) +#define MACHINE_IS_VM (machine_flags & 1) +#define MACHINE_HAS_IEEE (machine_flags & 2) +#define MACHINE_IS_P390 (machine_flags & 4) +#define MACHINE_HAS_CSP (machine_flags & 8) +#define MACHINE_HAS_MVPG (machine_flags & 16) +/* 32 is MACHINE_HAS_DIAG44 on s390x */ #define MACHINE_NEW_STIDP (machine_flags & 64) -#define MACHINE_HAS_PFIX (machine_flags & 128) +#define MACHINE_HAS_PFIX (machine_flags & 128) -#define MACHINE_HAS_HWC (!MACHINE_IS_P390) +#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) /* * Console mode. Override with conmode= @@ -42,10 +43,10 @@ extern unsigned int console_device; #define CONSOLE_IS_UNDEFINED (console_mode == 0) -#define CONSOLE_IS_HWC (console_mode == 1) +#define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_3215 (console_mode == 2) #define CONSOLE_IS_3270 (console_mode == 3) -#define SET_CONSOLE_HWC do { console_mode = 1; } while (0) +#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0) #define SET_CONSOLE_3215 do { console_mode = 2; } while (0) #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390/smp.h linux.22-ac2/include/asm-s390/smp.h --- linux.vanilla/include/asm-s390/smp.h 2002-11-29 21:27:24.000000000 +0000 +++ linux.22-ac2/include/asm-s390/smp.h 2003-06-29 16:09:29.000000000 +0100 @@ -42,7 +42,7 @@ #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern __inline__ int cpu_logical_map(int cpu) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390/tape390.h linux.22-ac2/include/asm-s390/tape390.h --- linux.vanilla/include/asm-s390/tape390.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/asm-s390/tape390.h 2003-06-29 16:09:29.000000000 +0100 @@ -0,0 +1,39 @@ +/************************************************************************* + * + * tape390.h + * enables user programs to display messages on the tape device + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Despina Papadopoulou + * + *************************************************************************/ + +#ifndef _TAPE390_H +#define _TAPE390_H + +#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct) + +/* + * The TAPE390_DISPLAY ioctl calls the Load Display command + * which transfers 17 bytes of data from the channel to the subsystem: + * - 1 format control byte, and + * - two 8-byte messages + * + * Format control byte: + * 0-2: New Message Overlay + * 3: Alternate Messages + * 4: Blink Message + * 5: Display Low/High Message + * 6: Reserved + * 7: Automatic Load Request + * + */ + +typedef struct display_struct { + char cntrl; + char message1[8]; + char message2[8]; +} display_struct; + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390x/bitops.h linux.22-ac2/include/asm-s390x/bitops.h --- linux.vanilla/include/asm-s390x/bitops.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/asm-s390x/bitops.h 2003-06-29 16:09:29.000000000 +0100 @@ -51,6 +51,7 @@ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; extern const char _zb_findmap[]; +extern const char _sb_findmap[]; #ifdef CONFIG_SMP /* @@ -653,6 +654,48 @@ return (res < size) ? res : size; } +static inline unsigned long +find_first_bit(unsigned long * addr, unsigned long size) +{ + unsigned long res, cmp, count; + + if (!size) + return 0; + __asm__(" slgr %1,%1\n" + " lgr %2,%3\n" + " slgr %0,%0\n" + " aghi %2,63\n" + " srlg %2,%2,6\n" + "0: cg %1,0(%0,%4)\n" + " jne 1f\n" + " aghi %0,8\n" + " brct %2,0b\n" + " lgr %0,%3\n" + " j 5f\n" + "1: lg %2,0(%0,%4)\n" + " sllg %0,%0,3\n" + " clr %2,%1\n" + " jne 2f\n" + " aghi %0,32\n" + " srlg %2,%2,32\n" + "2: lghi %1,0xff\n" + " tmll %2,0xffff\n" + " jnz 3f\n" + " aghi %0,16\n" + " srl %2,16\n" + "3: tmll %2,0x00ff\n" + " jnz 4f\n" + " aghi %0,8\n" + " srl %2,8\n" + "4: ngr %2,%1\n" + " ic %2,0(%2,%5)\n" + " algr %0,%2\n" + "5:" + : "=&a" (res), "=&d" (cmp), "=&a" (count) + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" ); + return (res < size) ? res : size; +} + static __inline__ unsigned long find_next_zero_bit (void * addr, unsigned long size, unsigned long offset) { @@ -697,6 +740,49 @@ return (offset + res); } +static inline unsigned long +find_next_bit (unsigned long * addr, unsigned long size, unsigned long offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 6); + unsigned long bitvec, reg; + unsigned long set, bit = offset & 63, res; + + if (bit) { + /* + * Look for zero in first word + */ + bitvec = (*p) >> bit; + __asm__(" slgr %0,%0\n" + " ltr %1,%1\n" + " jnz 0f\n" + " aghi %0,32\n" + " srlg %1,%1,32\n" + "0: lghi %2,0xff\n" + " tmll %1,0xffff\n" + " jnz 1f\n" + " aghi %0,16\n" + " srlg %1,%1,16\n" + "1: tmll %1,0x00ff\n" + " jnz 2f\n" + " aghi %0,8\n" + " srlg %1,%1,8\n" + "2: ngr %1,%2\n" + " ic %1,0(%1,%3)\n" + " algr %0,%1" + : "=&d" (set), "+a" (bitvec), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + if (set < (64 - bit)) + return set + offset; + offset += 64 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr)); + return (offset + res); +} + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. @@ -730,6 +816,47 @@ } /* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long reg, result; + + __asm__(" slgr %0,%0\n" + " ltr %1,%1\n" + " jnz 0f\n" + " aghi %0,32\n" + " srlg %1,%1,32\n" + "0: lghi %2,0xff\n" + " tmll %1,0xffff\n" + " jnz 1f\n" + " aghi %0,16\n" + " srlg %1,%1,16\n" + "1: tmll %1,0x00ff\n" + " jnz 2f\n" + " aghi %0,8\n" + " srlg %1,%1,8\n" + "2: ngr %1,%2\n" + " ic %1,0(%1,%3)\n" + " algr %0,%1" + : "=&d" (result), "+a" (word), "=&d" (reg) + : "a" (&_sb_findmap) : "cc" ); + return result; +} + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + return find_first_bit(b, 140); +} + +/* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). @@ -767,6 +894,39 @@ } /* + * fls: find last bit set. + */ +extern __inline__ int fls(int x) +{ + int r = 32; + + if (x == 0) + return 0; + __asm__(" tmh %1,0xffff\n" + " jz 0f\n" + " sll %1,16\n" + " ahi %0,-16\n" + "0: tmh %1,0xff00\n" + " jz 1f\n" + " sll %1,8\n" + " ahi %0,-8\n" + "1: tmh %1,0xf000\n" + " jz 2f\n" + " sll %1,4\n" + " ahi %0,-4\n" + "2: tmh %1,0xc000\n" + " jz 3f\n" + " sll %1,2\n" + " ahi %0,-2\n" + "3: tmh %1,0x8000\n" + " jz 4f\n" + " ahi %0,-1\n" + "4:" + : "+d" (r), "+d" (x) : : "cc" ); + return r; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390x/ctrlchar.h linux.22-ac2/include/asm-s390x/ctrlchar.h --- linux.vanilla/include/asm-s390x/ctrlchar.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/asm-s390x/ctrlchar.h 2003-06-29 16:09:29.000000000 +0100 @@ -0,0 +1,20 @@ +/* + * Implemented in drivers/s390/char/ctrlchar.c + * Unified handling of special chars. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert + * + */ + +struct tty_struct; + +extern unsigned int ctrlchar_handle(const unsigned char *buf, int len, + struct tty_struct *tty, int is_console); +extern void ctrlchar_init(void); + +#define CTRLCHAR_CTRL (0 << 8) +#define CTRLCHAR_NONE (1 << 8) +#define CTRLCHAR_SYSRQ (2 << 8) + +#define CTRLCHAR_MASK (~0xffu) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390x/setup.h linux.22-ac2/include/asm-s390x/setup.h --- linux.vanilla/include/asm-s390x/setup.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/asm-s390x/setup.h 2003-06-29 16:09:29.000000000 +0100 @@ -29,10 +29,10 @@ #define MACHINE_IS_P390 (machine_flags & 4) #define MACHINE_HAS_MVPG (machine_flags & 16) #define MACHINE_HAS_DIAG44 (machine_flags & 32) -#define MACHINE_NEW_STIDP (machine_flags & 64) +#define MACHINE_NEW_STIDP (machine_flags & 64) #define MACHINE_HAS_PFIX (0) -#define MACHINE_HAS_HWC (!MACHINE_IS_P390) +#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) /* * Console mode. Override with conmode= @@ -41,10 +41,10 @@ extern unsigned int console_device; #define CONSOLE_IS_UNDEFINED (console_mode == 0) -#define CONSOLE_IS_HWC (console_mode == 1) +#define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_3215 (console_mode == 2) #define CONSOLE_IS_3270 (console_mode == 3) -#define SET_CONSOLE_HWC do { console_mode = 1; } while (0) +#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0) #define SET_CONSOLE_3215 do { console_mode = 2; } while (0) #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390x/smp.h linux.22-ac2/include/asm-s390x/smp.h --- linux.vanilla/include/asm-s390x/smp.h 2002-11-29 21:27:24.000000000 +0000 +++ linux.22-ac2/include/asm-s390x/smp.h 2003-06-29 16:09:29.000000000 +0100 @@ -42,7 +42,7 @@ #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern __inline__ int cpu_logical_map(int cpu) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-s390x/tape390.h linux.22-ac2/include/asm-s390x/tape390.h --- linux.vanilla/include/asm-s390x/tape390.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/asm-s390x/tape390.h 2003-06-29 16:09:29.000000000 +0100 @@ -0,0 +1,39 @@ +/************************************************************************* + * + * tape390.h + * enables user programs to display messages on the tape device + * + * S390 and zSeries version + * Copyright (C) 2001 IBM Corporation + * Author(s): Despina Papadopoulou + * + *************************************************************************/ + +#ifndef _TAPE390_H +#define _TAPE390_H + +#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct) + +/* + * The TAPE390_DISPLAY ioctl calls the Load Display command + * which transfers 17 bytes of data from the channel to the subsystem: + * - 1 format control byte, and + * - two 8-byte messages + * + * Format control byte: + * 0-2: New Message Overlay + * 3: Alternate Messages + * 4: Blink Message + * 5: Display Low/High Message + * 6: Reserved + * 7: Automatic Load Request + * + */ + +typedef struct display_struct { + char cntrl; + char message1[8]; + char message2[8]; +} display_struct; + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/asm-x86_64/system.h linux.22-ac2/include/asm-x86_64/system.h --- linux.vanilla/include/asm-x86_64/system.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/asm-x86_64/system.h 2003-06-29 16:09:29.000000000 +0100 @@ -284,8 +284,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/acct.h linux.22-ac2/include/linux/acct.h --- linux.vanilla/include/linux/acct.h 2001-11-22 19:46:18.000000000 +0000 +++ linux.22-ac2/include/linux/acct.h 2003-09-01 13:54:31.000000000 +0100 @@ -56,7 +56,9 @@ comp_t ac_swaps; /* Accounting Number of Swaps */ __u32 ac_exitcode; /* Accounting Exitcode */ char ac_comm[ACCT_COMM + 1]; /* Accounting Command Name */ - char ac_pad[10]; /* Accounting Padding Bytes */ + char ac_pad[2]; /* Accounting Padding Bytes */ + __u32 ac_uid32; /* 32 bit UID */ + __u32 ac_gid32; /* 32 bit GID */ }; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/agp_backend.h linux.22-ac2/include/linux/agp_backend.h --- linux.vanilla/include/linux/agp_backend.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/agp_backend.h 2003-07-31 15:12:06.000000000 +0100 @@ -65,6 +65,7 @@ VIA_APOLLO_KT133, VIA_APOLLO_KM266, VIA_APOLLO_KT400, + VIA_CLE266, VIA_APOLLO_P4M266, VIA_APOLLO_P4X400, SIS_GENERIC, @@ -90,6 +91,13 @@ NVIDIA_NFORCE2, NVIDIA_GENERIC, HP_ZX1, + ATI_RS100, + ATI_RS200, + ATI_RS250, + ATI_RS300_100, + ATI_RS300_133, + ATI_RS300_166, + ATI_RS300_200 }; typedef struct _agp_version { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/ata.h linux.22-ac2/include/linux/ata.h --- linux.vanilla/include/linux/ata.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/ata.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,649 @@ +/* + Copyright 2003 Red Hat, Inc. All rights reserved. + Copyright 2003 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LINUX_ATA_H__ +#define __LINUX_ATA_H__ + +#include +#include + +/* + * compile-time options + */ +#undef ATA_FORCE_PIO /* do not configure or use DMA */ +#undef ATA_DEBUG /* debugging output */ +#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ +#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ +#undef ATA_NDEBUG /* define to disable quick runtime checks */ + + +/* note: prints function name for you */ +#ifdef ATA_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#ifdef ATA_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* ATA_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* ATA_DEBUG */ + +#ifdef ATA_NDEBUG +#define assert(expr) +#else +#define assert(expr) \ + if(unlikely(!(expr))) { \ + printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#endif + +/* defines only for the constants which don't work well as enums */ +#define ATA_TAG_POISON 0xfafbfcfdU +#define ATA_DMA_BOUNDARY 0xffffUL +#define ATA_DMA_MASK 0xffffffffULL + +enum { + /* various global constants */ + ATA_MAX_PORTS = 2, + ATA_MAX_DEVICES = 2, /* per bus/port */ + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 1, + ATA_MAX_PRD = 256, /* we could make these 256/256 */ + ATA_MAX_SECTORS = 200, /* FIXME */ + ATA_MAX_BUS = 2, + ATA_SECT_SIZE = 512, + ATA_SECT_SIZE_MASK = (ATA_SECT_SIZE - 1), + ATA_SECT_DWORDS = ATA_SECT_SIZE / sizeof(u32), + ATA_ID_WORDS = 256, + ATA_ID_PROD_OFS = 27, + ATA_ID_SERNO_OFS = 10, + ATA_ID_MAJOR_VER = 80, + ATA_ID_PIO_MODES = 64, + ATA_ID_UDMA_MODES = 88, + ATA_ID_PIO4 = (1 << 1), + ATA_DEF_BUSY_WAIT = 10000, + ATA_PCI_CTL_OFS = 2, + ATA_SHORT_PAUSE = (HZ >> 6) + 1, + ATA_SERNO_LEN = 20, + ATA_UDMA_MASK_40C = 0x07, /* udma0-2 */ + + ATA_SHT_EMULATED = 1, + ATA_SHT_NEW_EH_CODE = 1, + ATA_SHT_CMD_PER_LUN = 1, + ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 0, /* FIXME: which is best, 0 or 1? */ + + /* DMA-related */ + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), + ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ + + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = (1 << 3), + ATA_DMA_START = (1 << 0), + ATA_DMA_INTR = (1 << 2), + ATA_DMA_ERR = (1 << 1), + ATA_DMA_ACTIVE = (1 << 0), + + /* bits in ATA command block registers */ + ATA_HOB = (1 << 7), /* LBA48 selector */ + ATA_NIEN = (1 << 1), /* disable-irq flag */ + ATA_LBA = (1 << 6), /* LBA28 selector */ + ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ + ATA_BUSY = (1 << 7), /* BSY status bit */ + ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ + ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ + ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_ERR = (1 << 0), /* have an error */ + ATA_SRST = (1 << 2), /* software reset */ + ATA_ABORTED = (1 << 2), /* command aborted */ + + /* ATA command block registers */ + ATA_REG_DATA = 0x00, + ATA_REG_ERR = 0x01, + ATA_REG_NSECT = 0x02, + ATA_REG_LBAL = 0x03, + ATA_REG_LBAM = 0x04, + ATA_REG_LBAH = 0x05, + ATA_REG_DEVICE = 0x06, + ATA_REG_STATUS = 0x07, + + ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ + ATA_REG_CMD = ATA_REG_STATUS, + ATA_REG_BYTEL = ATA_REG_LBAM, + ATA_REG_BYTEH = ATA_REG_LBAH, + ATA_REG_DEVSEL = ATA_REG_DEVICE, + ATA_REG_IRQ = ATA_REG_NSECT, + + /* struct ata_device stuff */ + ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ + ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ + ATA_DFLAG_MASTER = (1 << 2), /* is device 0? */ + ATA_DFLAG_WCACHE = (1 << 3), /* has write cache we can + * (hopefully) flush? */ + + ATA_DEV_UNKNOWN = 0, /* unknown device */ + ATA_DEV_ATA = 1, /* ATA device */ + ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ + ATA_DEV_ATAPI = 3, /* ATAPI device */ + ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ + ATA_DEV_NONE = 5, /* no device */ + + /* struct ata_port flags */ + ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ + /* (doesn't imply presence) */ + ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ + ATA_FLAG_SATA = (1 << 3), + ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ + ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ + ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ + + /* struct ata_taskfile flags */ + ATA_TFLAG_LBA48 = (1 << 0), + ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ + ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ + + ATA_QCFLAG_WRITE = (1 << 0), /* read==0, write==1 */ + ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_DMA = (1 << 2), /* data delivered via DMA */ + ATA_QCFLAG_ATAPI = (1 << 3), /* is ATAPI packet command? */ + ATA_QCFLAG_SG = (1 << 4), /* have s/g table? */ + ATA_QCFLAG_POLL = (1 << 5), /* polling, no interrupts */ + + /* struct ata_engine atomic flags (use test_bit, etc.) */ + ATA_EFLG_ACTIVE = 0, /* engine is active */ + + /* ATA taskfile protocols */ + ATA_PROT_UNKNOWN = 0, + ATA_PROT_NODATA = 1, + ATA_PROT_PIO_READ = 2, + ATA_PROT_PIO_WRITE = 3, + ATA_PROT_DMA_READ = 4, + ATA_PROT_DMA_WRITE = 5, + ATA_PROT_ATAPI = 6, + ATA_PROT_ATAPI_DMA = 7, + + /* ATA device commands */ + ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_ID_ATA = 0xEC, + ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_READ = 0xC8, + ATA_CMD_READ_EXT = 0x25, + ATA_CMD_WRITE = 0xCA, + ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_PIO_READ = 0x20, + ATA_CMD_PIO_READ_EXT = 0x24, + ATA_CMD_PIO_WRITE = 0x30, + ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_SET_FEATURES = 0xEF, + ATA_CMD_PACKET = 0xA0, + + /* various lengths of time */ + ATA_TMOUT_EDD = 5 * HZ, /* hueristic */ + ATA_TMOUT_PIO = 30 * HZ, + ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ + ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ + ATA_TMOUT_CDB = 30 * HZ, + ATA_TMOUT_CDB_QUICK = 5 * HZ, + + /* ATA bus states */ + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + + /* thread states */ + THR_UNKNOWN = 0, + THR_CHECKPORT = 1, + THR_BUS_RESET = 2, + THR_AWAIT_DEATH = 3, + THR_IDENTIFY = 4, + THR_CONFIG_TIMINGS = 5, + THR_CONFIG_DMA = 6, + THR_PROBE_FAILED = 7, + THR_IDLE = 8, + THR_PROBE_SUCCESS = 9, + THR_PROBE_START = 10, + THR_CONFIG_FORCE_PIO = 11, + THR_PIO_POLL = 12, + THR_PIO_TMOUT = 13, + THR_PIO = 14, + THR_PIO_LAST = 15, + THR_PIO_LAST_POLL = 16, + THR_PIO_ERR = 17, + THR_PACKET = 18, + + /* SATA port states */ + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + + /* SETFEATURES stuff */ + SETFEATURES_XFER = 0x03, + XFER_UDMA_7 = 0x47, + XFER_UDMA_6 = 0x46, + XFER_UDMA_5 = 0x45, + XFER_UDMA_4 = 0x44, + XFER_UDMA_3 = 0x43, + XFER_UDMA_2 = 0x42, + XFER_UDMA_1 = 0x41, + XFER_UDMA_0 = 0x40, + XFER_PIO_4 = 0x0C, + XFER_PIO_3 = 0x0B, + + /* ATAPI stuff */ + ATAPI_PKT_DMA = (1 << 0), + + /* cable types */ + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA_UNK = 3, + ATA_CBL_SATA = 4, + + /* ata_qc_cb_t flags - note uses above ATA_QCFLAG_xxx namespace, + * but not numberspace + */ + ATA_QCFLAG_TIMEOUT = (1 << 0), +}; + +/* forward declarations */ +struct ata_host_info; +struct ata_port; +struct ata_queued_cmd; + +/* typedefs */ +typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc, unsigned int flags); + +/* core structures */ +struct ata_prd { + u32 addr; + u32 flags_len; +} __attribute__((packed)); + +struct ata_ioports { + unsigned long cmd_addr; + unsigned long ctl_addr; + unsigned long bmdma_addr; +}; + +struct ata_probe_ent { + struct list_head node; + struct pci_dev *pdev; + struct ata_host_info *host_info; + Scsi_Host_Template *sht; + struct ata_ioports port[ATA_MAX_PORTS]; + unsigned int n_ports; + unsigned int pio_mask; + unsigned int udma_mask; + unsigned int legacy_mode; + unsigned long irq; + unsigned int irq_flags; + unsigned long host_flags; +}; + +struct ata_host_set { + spinlock_t lock; + struct pci_dev *pdev; + unsigned long irq; + unsigned int n_hosts; + struct ata_port * hosts[0]; +}; + +struct ata_taskfile { + unsigned long flags; /* ATA_TFLAG_xxx */ + u8 protocol; /* ATA_PROT_xxx */ + + u8 ctl; /* control reg */ + + u8 hob_feature; /* additional data */ + u8 hob_nsect; /* to support LBA48 */ + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + + u8 device; + + u8 command; /* IO operation */ +}; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + + Scsi_Cmnd *scsicmd; + void (*scsidone)(Scsi_Cmnd *); + + struct list_head node; + unsigned long flags; /* ATA_QCFLAG_xxx */ + unsigned int tag; + unsigned int n_elem; + unsigned int nsect; + unsigned int cursect; + unsigned int cursg; + unsigned int cursg_ofs; + struct ata_taskfile tf; + struct scatterlist sgent; + ata_qc_cb_t callback; + + struct semaphore sem; +}; + +struct ata_host_stats { + unsigned long unhandled_irq; + unsigned long idle_irq; + unsigned long rw_reqbuf; +}; + +struct ata_device { + u64 n_sectors; /* size of device, if ATA */ + unsigned long flags; /* ATA_DFLAG_xxx */ + unsigned int class; /* ATA_DEV_xxx */ + unsigned int devno; /* 0 or 1 */ + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + unsigned int pio_mode; + unsigned int udma_mode; + + unsigned char vendor[8]; /* space-padded, not ASCIIZ */ + unsigned char product[16]; +}; + +struct ata_engine { + unsigned long flags; + struct list_head q; +}; + +struct ata_port { + struct Scsi_Host *host; /* our co-allocated scsi host */ + struct ata_host_info *ops; + unsigned long flags; /* ATA_FLAG_xxx */ + unsigned int id; /* unique id req'd by scsi midlyr */ + unsigned int port_no; /* unique port #; from zero */ + + struct ata_prd *prd; /* our SG list */ + dma_addr_t prd_dma; /* and its DMA mapping */ + + struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ + + u8 ctl; /* cache of ATA control register */ + u8 devsel; /* cache of Device Select reg */ + unsigned int bus_state; + unsigned int port_state; + unsigned int pio_mask; + unsigned int udma_mask; + unsigned int cbl; /* cable type; ATA_CBL_xxx */ + + struct ata_engine eng; + + struct ata_device device[ATA_MAX_DEVICES]; + + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; + unsigned long qactive; + unsigned int active_tag; + + struct ata_host_stats stats; + struct ata_host_set *host_set; + + struct semaphore sem; + struct semaphore probe_sem; + + unsigned int thr_state; + int time_to_die; + pid_t thr_pid; + struct completion thr_exited; + struct semaphore thr_sem; + struct timer_list thr_timer; + unsigned long thr_timeout; +}; + +struct ata_host_info { + void (*port_probe) (struct ata_port *); + void (*port_disable) (struct ata_port *); + + void (*set_piomode) (struct ata_port *, struct ata_device *, + unsigned int); + void (*set_udmamode) (struct ata_port *, struct ata_device *, + unsigned int); + + void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf); + void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); + + void (*phy_probe) (struct ata_port *ap); + + void (*bmdma_start) (struct ata_queued_cmd *qc); +}; + +struct ata_board { + Scsi_Host_Template *sht; + unsigned long host_flags; + unsigned long pio_mask; + unsigned long udma_mask; + struct ata_host_info *host_info; +}; + +struct pci_bits { + unsigned long mask; + unsigned long val; + unsigned int reg; /* PCI config register to read */ + unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ +}; + +#define ata_id_is_ata(dev) (((dev)->id[0] & (1 << 15)) == 0) +#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10)) +#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8)) +#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9)) +#define ata_id_u32(dev,n) \ + (((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)])) +#define ata_id_u64(dev,n) \ + ( ((u64) dev->id[(n) + 3] << 48) | \ + ((u64) dev->id[(n) + 2] << 32) | \ + ((u64) dev->id[(n) + 1] << 16) | \ + ((u64) dev->id[(n) + 0]) ) + +extern void ata_port_probe(struct ata_port *); +extern void ata_port_disable(struct ata_port *); +extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_board **boards, + unsigned int n_boards); +extern void ata_pci_remove_one (struct pci_dev *pdev); +extern int ata_scsi_detect(Scsi_Host_Template *sht); +extern int ata_scsi_release(struct Scsi_Host *host); +extern int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)); +extern int ata_scsi_error(struct Scsi_Host *host); +extern void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_bmdma_start_mmio (struct ata_queued_cmd *qc); +extern void ata_bmdma_start_pio (struct ata_queued_cmd *qc); +extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); + +static inline unsigned long msecs_to_jiffies(unsigned long msecs) +{ + return ((HZ * msecs + 999) / 1000); +} + +static inline unsigned int ata_tag_valid(unsigned int tag) +{ + return (tag < ATA_MAX_QUEUE) ? 1 : 0; +} + +static inline unsigned int ata_dev_present(struct ata_device *dev) +{ + return ((dev->class == ATA_DEV_ATA) || + (dev->class == ATA_DEV_ATAPI)); +} + +static inline u8 ata_chk_err(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.cmd_addr; + return readb(mmio + ATA_REG_ERR); + } + return inb(ap->ioaddr.cmd_addr + ATA_REG_ERR); +} + +static inline u8 ata_chk_status(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.cmd_addr; + return readb(mmio + ATA_REG_STATUS); + } + return inb(ap->ioaddr.cmd_addr + ATA_REG_STATUS); +} + +static inline u8 ata_altstatus(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) + return readb(ap->ioaddr.ctl_addr); + return inb(ap->ioaddr.ctl_addr); +} + +static inline void ata_pause(struct ata_port *ap) +{ + ata_altstatus(ap); + ndelay(400); +} + +static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, + unsigned int max) +{ + u8 status; + + do { + udelay(10); + status = ata_chk_status(ap); + max--; + } while ((status & bits) && (max > 0)); + + return status; +} + +static inline u8 ata_wait_idle(struct ata_port *ap) +{ + u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + if (status & (ATA_BUSY | ATA_DRQ)) { + unsigned long l = ap->ioaddr.cmd_addr + ATA_REG_STATUS; + printk(KERN_WARNING + "ATA: abnormal status 0x%X on port 0x%lX\n", + status, l); + } + + return status; +} + +static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, + unsigned int tag) +{ + if (likely(ata_tag_valid(tag))) + return &ap->qcmd[tag]; + return NULL; +} + +static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf) +{ + memset(tf, 0, sizeof(*tf)); + + tf->ctl = ap->ctl; + tf->device = ap->devsel; +} + +static inline u8 ata_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + ap->ctl &= ~ATA_NIEN; + + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + + return ata_wait_idle(ap); +} + +static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) +{ + unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; + u8 host_stat, post_stat, status; + + status = ata_busy_wait(ap, bits, 1000); + if (status & bits) + DPRINTK("abnormal status 0x%X\n", status); + + /* get controller status; clear intr, err bits */ + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + mmio + ATA_DMA_STATUS); + + post_stat = readb(mmio + ATA_DMA_STATUS); + } else { + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", + host_stat, post_stat, status); + + return status; +} + +/* + * 2.5 compat. + */ + +typedef void irqreturn_t; +#define IRQ_RETVAL(x) /* nothing */ + +#define REPORT_LUNS 0xa0 +#define READ_16 0x88 +#define WRITE_16 0x8a +#define SERVICE_ACTION_IN 0x9e +/* values for service action in */ +#define SAI_READ_CAPACITY_16 0x10 + +#define SAM_STAT_GOOD 0x00 +#define SAM_STAT_CHECK_CONDITION 0x02 + +#endif /* __LINUX_ATA_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/binfmts.h linux.22-ac2/include/linux/binfmts.h --- linux.vanilla/include/linux/binfmts.h 2001-11-22 19:46:19.000000000 +0000 +++ linux.22-ac2/include/linux/binfmts.h 2003-09-01 13:54:21.000000000 +0100 @@ -16,6 +16,8 @@ #ifdef __KERNEL__ +struct file; + /* * This structure is used to hold the arguments that are used when loading binaries. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/bitops.h linux.22-ac2/include/linux/bitops.h --- linux.vanilla/include/linux/bitops.h 2001-11-22 19:46:18.000000000 +0000 +++ linux.22-ac2/include/linux/bitops.h 2003-09-01 13:54:21.000000000 +0100 @@ -1,6 +1,6 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H - +#include /* * ffs: find first bit set. This is defined the same way as @@ -38,6 +38,47 @@ } /* + * fls: find last bit set. + */ + +extern __inline__ int generic_fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000)) { + x <<= 1; + r -= 1; + } + return r; +} + +extern __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/blkdev.h linux.22-ac2/include/linux/blkdev.h --- linux.vanilla/include/linux/blkdev.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/blkdev.h 2003-09-01 13:54:21.000000000 +0100 @@ -156,6 +156,8 @@ * Tasks wait here for free read and write requests */ wait_queue_head_t wait_for_requests; + + struct request *last_request; }; #define blk_queue_plugged(q) (q)->plugged diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/blk.h linux.22-ac2/include/linux/blk.h --- linux.vanilla/include/linux/blk.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/linux/blk.h 2003-09-01 13:54:21.000000000 +0100 @@ -86,7 +86,15 @@ static inline void blkdev_dequeue_request(struct request * req) { - list_del(&req->queue); + request_queue_t *q = req->q; + if (q) { + if (q->last_request == req || + (q->head_active && + q->last_request == blkdev_next_request(req))) + q->last_request = NULL; + } + + list_del(&req->queue); } int end_that_request_first(struct request *req, int uptodate, char *name); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/cpufreq.h linux.22-ac2/include/linux/cpufreq.h --- linux.vanilla/include/linux/cpufreq.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/cpufreq.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,298 @@ +/* + * linux/include/linux/cpufreq.h + * + * Copyright (C) 2001 Russell King + * (C) 2002 - 2003 Dominik Brodowski + * + * + * $Id: cpufreq.h,v 1.36 2003/01/20 17:31:48 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_CPUFREQ_H +#define _LINUX_CPUFREQ_H + +#include +#include +#include +#include + +#define CPUFREQ_NAME_LEN 16 + + +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ + +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); + +#define CPUFREQ_TRANSITION_NOTIFIER (0) +#define CPUFREQ_POLICY_NOTIFIER (1) + + +/********************** cpufreq policy notifiers *********************/ + +#define CPUFREQ_POLICY_POWERSAVE (1) +#define CPUFREQ_POLICY_PERFORMANCE (2) +#define CPUFREQ_POLICY_GOVERNOR (3) + +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. + * Maximum transition latency is in microseconds - if it's unknown, + * CPUFREQ_ETERNAL shall be used. + */ + +struct cpufreq_governor; + +#define CPUFREQ_ETERNAL (-1) +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; /* in 10^(-9) s */ +}; + +struct cpufreq_policy { + unsigned int cpu; /* cpu nr */ + struct cpufreq_cpuinfo cpuinfo;/* see above */ + + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int cur; /* in kHz, only needed if cpufreq + * governors are used */ + unsigned int policy; /* see above */ + struct cpufreq_governor *governor; /* see below */ + + struct semaphore lock; /* CPU ->setpolicy or ->target may + only be called once a time */ + + /* see backport info in kernel/cpufreq.c */ + unsigned int use_count; + struct semaphore unload_sem; +}; + +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_INCOMPATIBLE (1) +#define CPUFREQ_NOTIFY (2) + + +/******************** cpufreq transition notifiers *******************/ + +#define CPUFREQ_PRECHANGE (0) +#define CPUFREQ_POSTCHANGE (1) + +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr */ + unsigned int old; + unsigned int new; +}; + + +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * Needed for loops_per_jiffy and similar calculations. We do it + * this way to avoid math overflow on 32-bit machines. This will + * become architecture dependent once high-resolution-timer is + * merged (or any other thing that introduces sc_math.h). + * + * new = old * mult / div + */ +static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) +{ + unsigned long val, carry; + + mult /= 100; + div /= 100; + val = (old / div) * mult; + carry = old % div; + carry = carry * mult / div; + + return carry + val; +}; + +/********************************************************************* + * CPUFREQ GOVERNORS * + *********************************************************************/ + +#define CPUFREQ_GOV_START 1 +#define CPUFREQ_GOV_STOP 2 +#define CPUFREQ_GOV_LIMITS 3 + +struct cpufreq_governor { + char name[CPUFREQ_NAME_LEN]; + int (*governor) (struct cpufreq_policy *policy, + unsigned int event); + struct list_head governor_list; +}; + +/* pass a target to the cpufreq driver + */ +extern int cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); +extern int __cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + + +/* pass an event to the cpufreq governor */ +int cpufreq_governor(unsigned int cpu, unsigned int event); + +int cpufreq_register_governor(struct cpufreq_governor *governor); +void cpufreq_unregister_governor(struct cpufreq_governor *governor); + +/********************************************************************* + * CPUFREQ DRIVER INTERFACE * + *********************************************************************/ + +#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ +#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ + +struct freq_attr; + +struct cpufreq_driver { + char name[CPUFREQ_NAME_LEN]; + + /* needed by all drivers */ + int (*init) (struct cpufreq_policy *policy); + int (*verify) (struct cpufreq_policy *policy); + + /* define one out of two */ + int (*setpolicy) (struct cpufreq_policy *policy); + int (*target) (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + + /* optional */ + int (*exit) (struct cpufreq_policy *policy); +}; + +int cpufreq_register_driver(struct cpufreq_driver *driver_data); +int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); + + +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); + + +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +{ + if (policy->min < min) + policy->min = min; + if (policy->max < min) + policy->max = min; + if (policy->min > max) + policy->min = max; + if (policy->max > max) + policy->max = max; + if (policy->min > policy->max) + policy->min = policy->max; + return; +} + + +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ +int cpufreq_set_policy(struct cpufreq_policy *policy); +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); + +/* the proc_intf.c needs this */ +int cpufreq_parse_governor (char *str_governor, unsigned int *policy, struct cpufreq_governor **governor); + +#if defined(CONFIG_CPU_FREQ_GOV_USERSPACE) || defined(CONFIG_CPU_FREQ_GOV_USERSPACE_MODULE) +/********************************************************************* + * CPUFREQ USERSPACE GOVERNOR * + *********************************************************************/ +extern struct cpufreq_governor cpufreq_gov_userspace; +int cpufreq_gov_userspace_init(void); + +int cpufreq_setmax(unsigned int cpu); +int cpufreq_set(unsigned int kHz, unsigned int cpu); +unsigned int cpufreq_get(unsigned int cpu); + +#ifdef CONFIG_CPU_FREQ_24_API + +/* /proc/sys/cpu */ +enum { + CPU_NR = 1, /* compatibilty reasons */ + CPU_NR_0 = 1, + CPU_NR_1 = 2, + CPU_NR_2 = 3, + CPU_NR_3 = 4, + CPU_NR_4 = 5, + CPU_NR_5 = 6, + CPU_NR_6 = 7, + CPU_NR_7 = 8, + CPU_NR_8 = 9, + CPU_NR_9 = 10, + CPU_NR_10 = 11, + CPU_NR_11 = 12, + CPU_NR_12 = 13, + CPU_NR_13 = 14, + CPU_NR_14 = 15, + CPU_NR_15 = 16, + CPU_NR_16 = 17, + CPU_NR_17 = 18, + CPU_NR_18 = 19, + CPU_NR_19 = 20, + CPU_NR_20 = 21, + CPU_NR_21 = 22, + CPU_NR_22 = 23, + CPU_NR_23 = 24, + CPU_NR_24 = 25, + CPU_NR_25 = 26, + CPU_NR_26 = 27, + CPU_NR_27 = 28, + CPU_NR_28 = 29, + CPU_NR_29 = 30, + CPU_NR_30 = 31, + CPU_NR_31 = 32, +}; + +/* /proc/sys/cpu/{0,1,...,(NR_CPUS-1)} */ +enum { + CPU_NR_FREQ_MAX = 1, + CPU_NR_FREQ_MIN = 2, + CPU_NR_FREQ = 3, +}; + +#endif /* CONFIG_CPU_FREQ_24_API */ + +#endif /* CONFIG_CPU_FREQ_GOV_USERSPACE */ + + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +#define CPUFREQ_ENTRY_INVALID ~0 +#define CPUFREQ_TABLE_END ~1 + +struct cpufreq_frequency_table { + unsigned int index; /* any */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ +}; + +#if defined(CONFIG_CPU_FREQ_TABLE) || defined(CONFIG_CPU_FREQ_TABLE_MODULE) +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int target_freq, + unsigned int relation, + unsigned int *index); + +#endif /* CONFIG_CPU_FREQ_TABLE */ +#endif /* _LINUX_CPUFREQ_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/dcache.h linux.22-ac2/include/linux/dcache.h --- linux.vanilla/include/linux/dcache.h 2002-11-29 21:27:24.000000000 +0000 +++ linux.22-ac2/include/linux/dcache.h 2003-09-01 13:54:21.000000000 +0100 @@ -62,7 +62,8 @@ return end_name_hash(hash); } -#define DNAME_INLINE_LEN 16 +/* XXX: check good value for 64bit */ +#define DNAME_INLINE_LEN 32 struct dentry { atomic_t d_count; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/device-mapper.h linux.22-ac2/include/linux/device-mapper.h --- linux.vanilla/include/linux/device-mapper.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/device-mapper.h 2003-06-29 16:09:26.000000000 +0100 @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DEVICE_MAPPER_H +#define _LINUX_DEVICE_MAPPER_H + +#define DM_DIR "mapper" /* Slashes not supported */ +#define DM_MAX_TYPE_NAME 16 +#define DM_NAME_LEN 128 +#define DM_UUID_LEN 129 + +#ifdef __KERNEL__ + +struct dm_table; +struct dm_dev; +typedef unsigned long offset_t; + +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; + +/* + * Prototypes for functions for a target + */ +typedef int (*dm_ctr_fn) (struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context); +typedef void (*dm_dtr_fn) (struct dm_table *t, void *c); +typedef int (*dm_map_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_err_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_status_fn) (status_type_t status_type, char *result, + int maxlen, void *context); + +void dm_error(const char *message); + +/* + * Constructors should call these functions to ensure destination devices + * are opened/closed correctly + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, + int mode, struct dm_dev **result); +void dm_table_put_device(struct dm_table *table, struct dm_dev *d); + +/* + * Information about a target type + */ +struct target_type { + const char *name; + struct module *module; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_err_fn err; + dm_status_fn status; +}; + +int dm_register_target(struct target_type *t); +int dm_unregister_target(struct target_type *t); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_DEVICE_MAPPER_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/dm-ioctl.h linux.22-ac2/include/linux/dm-ioctl.h --- linux.vanilla/include/linux/dm-ioctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/dm-ioctl.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_IOCTL_H +#define _LINUX_DM_IOCTL_H + +#include +#include + +/* + * Implements a traditional ioctl interface to the device mapper. + */ + +/* + * All ioctl arguments consist of a single chunk of memory, with + * this structure at the start. If a uuid is specified any + * lookup (eg. for a DM_INFO) will be done on that, *not* the + * name. + */ +struct dm_ioctl { + /* + * The version number is made up of three parts: + * major - no backward or forward compatibility, + * minor - only backwards compatible, + * patch - both backwards and forwards compatible. + * + * All clients of the ioctl interface should fill in the + * version number of the interface that they were + * compiled with. + * + * All recognised ioctl commands (ie. those that don't + * return -ENOTTY) fill out this field, even if the + * command failed. + */ + uint32_t version[3]; /* in/out */ + uint32_t data_size; /* total size of data passed in + * including this struct */ + + uint32_t data_start; /* offset to start of data + * relative to start of this struct */ + + uint32_t target_count; /* in/out */ + uint32_t open_count; /* out */ + uint32_t flags; /* in/out */ + + __kernel_dev_t dev; /* in/out */ + + char name[DM_NAME_LEN]; /* device name */ + char uuid[DM_UUID_LEN]; /* unique identifier for + * the block device */ +}; + +/* + * Used to specify tables. These structures appear after the + * dm_ioctl. + */ +struct dm_target_spec { + int32_t status; /* used when reading from kernel only */ + uint64_t sector_start; + uint32_t length; + + /* + * Offset in bytes (from the start of this struct) to + * next target_spec. + */ + uint32_t next; + + char target_type[DM_MAX_TYPE_NAME]; + + /* + * Parameter string starts immediately after this object. + * Be careful to add padding after string to ensure correct + * alignment of subsequent dm_target_spec. + */ +}; + +/* + * Used to retrieve the target dependencies. + */ +struct dm_target_deps { + uint32_t count; + + __kernel_dev_t dev[0]; /* out */ +}; + +/* + * If you change this make sure you make the corresponding change + * to dm-ioctl.c:lookup_ioctl() + */ +enum { + /* Top level cmds */ + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD, + + /* device level cmds */ + DM_DEV_CREATE_CMD, + DM_DEV_REMOVE_CMD, + DM_DEV_RELOAD_CMD, + DM_DEV_RENAME_CMD, + DM_DEV_SUSPEND_CMD, + DM_DEV_DEPS_CMD, + DM_DEV_STATUS_CMD, + + /* target level cmds */ + DM_TARGET_STATUS_CMD, + DM_TARGET_WAIT_CMD +}; + +#define DM_IOCTL 0xfd + +#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) +#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) + +#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) +#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) +#define DM_DEV_RELOAD _IOWR(DM_IOCTL, DM_DEV_RELOAD_CMD, struct dm_ioctl) +#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) +#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) +#define DM_DEV_DEPS _IOWR(DM_IOCTL, DM_DEV_DEPS_CMD, struct dm_ioctl) +#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) + +#define DM_TARGET_STATUS _IOWR(DM_IOCTL, DM_TARGET_STATUS_CMD, struct dm_ioctl) +#define DM_TARGET_WAIT _IOWR(DM_IOCTL, DM_TARGET_WAIT_CMD, struct dm_ioctl) + +#define DM_VERSION_MAJOR 1 +#define DM_VERSION_MINOR 0 +#define DM_VERSION_PATCHLEVEL 3 +#define DM_VERSION_EXTRA "-ioctl-bk (2002-08-5)" + +/* Status bits */ +#define DM_READONLY_FLAG 0x00000001 +#define DM_SUSPEND_FLAG 0x00000002 +#define DM_EXISTS_FLAG 0x00000004 +#define DM_PERSISTENT_DEV_FLAG 0x00000008 + +/* + * Flag passed into ioctl STATUS command to get table information + * rather than current status. + */ +#define DM_STATUS_TABLE_FLAG 0x00000010 + +#endif /* _LINUX_DM_IOCTL_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/firmware.h linux.22-ac2/include/linux/firmware.h --- linux.vanilla/include/linux/firmware.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/firmware.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,20 @@ +#ifndef _LINUX_FIRMWARE_H +#define _LINUX_FIRMWARE_H +#include +#include +#define FIRMWARE_NAME_MAX 30 +struct firmware { + size_t size; + u8 *data; +}; +int request_firmware (const struct firmware **fw, const char *name, + const char *device); +int request_firmware_nowait ( + struct module *module, + const char *name, const char *device, void *context, + void (*cont)(const struct firmware *fw, void *context)); +/* On 2.5 'device' is 'struct device *' */ + +void release_firmware (const struct firmware *fw); +void register_firmware (const char *name, const u8 *data, size_t size); +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/fs.h linux.22-ac2/include/linux/fs.h --- linux.vanilla/include/linux/fs.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/fs.h 2003-09-01 13:54:21.000000000 +0100 @@ -92,6 +92,8 @@ #define FS_SINGLE 8 /* Filesystem that can have only one superblock */ #define FS_NOMOUNT 16 /* Never mount from userland */ #define FS_LITTER 32 /* Keeps the tree in dcache */ +#define FS_ALWAYS_REVAL 16384 /* Always revalidate dentries returned by + link_path_walk */ #define FS_ODD_RENAME 32768 /* Temporary stuff; will go away as soon * as nfs_rename() will be cleaned up */ @@ -222,6 +224,7 @@ BH_Launder, /* 1 if we can throttle on this buffer */ BH_Attached, /* 1 if b_inode_buffers is linked into a list */ BH_JBD, /* 1 if it has an attached journal_head */ + BH_Delay, /* 1 if the buffer is delayed allocate */ BH_Sync, /* 1 if the buffer is a sync read */ BH_PrivateStart,/* not a state bit, but the first bit available @@ -265,7 +268,7 @@ struct page *b_page; /* the page this bh is mapped to */ void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */ void *b_private; /* reserved for b_end_io */ - + void *b_journal_head; /* ext3 journal_heads */ unsigned long b_rsector; /* Real buffer location on disk */ wait_queue_head_t b_wait; @@ -285,6 +288,7 @@ #define buffer_new(bh) __buffer_state(bh,New) #define buffer_async(bh) __buffer_state(bh,Async) #define buffer_launder(bh) __buffer_state(bh,Launder) +#define buffer_delay(bh) __buffer_state(bh,Delay) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) @@ -967,6 +971,7 @@ #define I_LOCK 8 #define I_FREEING 16 #define I_CLEAR 32 +#define I_NEW 64 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) @@ -1145,9 +1150,8 @@ extern void refile_buffer(struct buffer_head * buf); extern void create_empty_buffers(struct page *, kdev_t, unsigned long); extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); - -/* reiserfs_writepage needs this */ -extern void set_buffer_async_io(struct buffer_head *bh) ; +extern void end_buffer_io_async(struct buffer_head *bh, int uptodate); +extern void set_buffer_async_io(struct buffer_head *bh); #define BUF_CLEAN 0 #define BUF_LOCKED 1 /* Buffers scheduled for write */ @@ -1395,12 +1399,47 @@ extern void force_delete(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); +extern void unlock_new_inode(struct inode *); typedef int (*find_inode_t)(struct inode *, unsigned long, void *); -extern struct inode * iget4(struct super_block *, unsigned long, find_inode_t, void *); + +extern struct inode * iget4_locked(struct super_block *, unsigned long, + find_inode_t, void *); + +static inline struct inode *iget4(struct super_block *sb, unsigned long ino, + find_inode_t find_actor, void *opaque) +{ + struct inode *inode = iget4_locked(sb, ino, find_actor, opaque); + + if (inode && (inode->i_state & I_NEW)) { + /* + * reiserfs-specific kludge that is expected to go away ASAP. + */ + if (sb->s_op->read_inode2) + sb->s_op->read_inode2(inode, opaque); + else + sb->s_op->read_inode(inode); + unlock_new_inode(inode); + } + + return inode; +} + static inline struct inode *iget(struct super_block *sb, unsigned long ino) { - return iget4(sb, ino, NULL, NULL); + struct inode *inode = iget4_locked(sb, ino, NULL, NULL); + + if (inode && (inode->i_state & I_NEW)) { + sb->s_op->read_inode(inode); + unlock_new_inode(inode); + } + + return inode; +} + +static inline struct inode *iget_locked(struct super_block *sb, unsigned long ino) +{ + return iget4_locked(sb, ino, NULL, NULL); } extern void clear_inode(struct inode *); @@ -1479,6 +1518,7 @@ extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *); extern int precheck_file_write(struct file *, struct inode *, size_t *, loff_t *); extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t do_generic_file_write(struct file *, const char *, size_t, loff_t *); extern void do_generic_file_read(struct file *, loff_t *, read_descriptor_t *, read_actor_t); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/fs_struct.h linux.22-ac2/include/linux/fs_struct.h --- linux.vanilla/include/linux/fs_struct.h 2001-07-13 23:10:44.000000000 +0100 +++ linux.22-ac2/include/linux/fs_struct.h 2003-09-01 13:54:21.000000000 +0100 @@ -2,6 +2,9 @@ #define _LINUX_FS_STRUCT_H #ifdef __KERNEL__ +#include /* for RW_LOCK_* */ +#include /* for atomic_t */ + struct fs_struct { atomic_t count; rwlock_t lock; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/hdlc/ioctl.h linux.22-ac2/include/linux/hdlc/ioctl.h --- linux.vanilla/include/linux/hdlc/ioctl.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/hdlc/ioctl.h 2003-06-29 16:09:26.000000000 +0100 @@ -34,22 +34,15 @@ } fr_proto_pvc; /* for creating/deleting FR PVCs */ typedef struct { + unsigned int dlci; + char master[IFNAMSIZ]; /* Name of master FRAD device */ +}fr_proto_pvc_info; /* for returning PVC information only */ + +typedef struct { unsigned int interval; unsigned int timeout; } cisco_proto; /* PPP doesn't need any info now - supply length = 0 to ioctl */ -union hdlc_settings { - raw_hdlc_proto raw_hdlc; - cisco_proto cisco; - fr_proto fr; - fr_proto_pvc fr_pvc; -}; - -union line_settings { - sync_serial_settings sync; - te1_settings te1; -}; - #endif /* __HDLC_IOCTL_H__ */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/hdlc.h linux.22-ac2/include/linux/hdlc.h --- linux.vanilla/include/linux/hdlc.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/hdlc.h 2003-09-01 13:54:21.000000000 +0100 @@ -1,12 +1,11 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999-2002 Krzysztof Halasa + * Copyright (C) 1999-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #ifndef __HDLC_H @@ -52,7 +51,7 @@ #include #define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ -#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10) /* max 10 bytes for FR */ +#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ #define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */ @@ -145,17 +144,20 @@ typedef struct pvc_device_struct { - struct net_device netdev; /* PVC net device - must be first */ - struct net_device_stats stats; struct hdlc_device_struct *master; - struct pvc_device_struct *next; + struct net_device *main; + struct net_device *ether; /* bridged Ethernet interface */ + struct pvc_device_struct *next; /* Sorted in ascending DLCI order */ + int dlci; + int open_count; struct { - int active; - int new; - int deleted; - int fecn; - int becn; + unsigned int new: 1; + unsigned int active: 1; + unsigned int exist: 1; + unsigned int deleted: 1; + unsigned int fecn: 1; + unsigned int becn: 1; }state; }pvc_device; @@ -180,18 +182,20 @@ void (*stop)(struct hdlc_device_struct *hdlc); void (*proto_detach)(struct hdlc_device_struct *hdlc); void (*netif_rx)(struct sk_buff *skb); + unsigned short (*type_trans)(struct sk_buff *skb, + struct net_device *dev); int proto; /* IF_PROTO_HDLC/CISCO/FR/etc. */ union { struct { fr_proto settings; pvc_device *first_pvc; - int pvc_count; + int dce_pvc_count; struct timer_list timer; int last_poll; int reliable; - int changed; + int dce_changed; int request; int fullrep_sent; u32 last_errors; /* last errors bit list */ @@ -226,6 +230,7 @@ int hdlc_raw_ioctl(hdlc_device *hdlc, struct ifreq *ifr); +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_cisco_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_ppp_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_fr_ioctl(hdlc_device *hdlc, struct ifreq *ifr); @@ -254,15 +259,9 @@ } -static __inline__ struct net_device* pvc_to_dev(pvc_device *pvc) -{ - return &pvc->netdev; -} - - static __inline__ pvc_device* dev_to_pvc(struct net_device *dev) { - return (pvc_device*)dev; + return (pvc_device*)dev->priv; } @@ -272,19 +271,6 @@ } -static __inline__ const char *pvc_to_name(pvc_device *pvc) -{ - return pvc_to_dev(pvc)->name; -} - - -static __inline__ u16 netdev_dlci(struct net_device *dev) -{ - return ntohs(*(u16*)dev->dev_addr); -} - - - static __inline__ u16 q922_to_dlci(u8 *hdr) { return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); @@ -345,5 +331,15 @@ } +static __inline__ unsigned short hdlc_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(skb->dev); + if (hdlc->type_trans) + return hdlc->type_trans(skb, dev); + else + return __constant_htons(ETH_P_HDLC); +} + #endif /* __KERNEL */ #endif /* __HDLC_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/ide.h linux.22-ac2/include/linux/ide.h --- linux.vanilla/include/linux/ide.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/ide.h 2003-09-09 22:27:29.000000000 +0100 @@ -1003,6 +1003,7 @@ unsigned udma_four : 1; /* 1=ATA-66 capable, 0=default */ unsigned highmem : 1; /* can do full 32-bit dma */ unsigned no_dsc : 1; /* 0 default, 1 dsc_overlap disabled */ + unsigned sata : 1; /* 0 PATA, 1 SATA */ void *hwif_data; /* extra hwif data */ } ide_hwif_t; @@ -1331,9 +1332,10 @@ extern int ide_xlate_1024(kdev_t, int, int, const char *); /* - * Convert kdev_t structure into ide_drive_t * one. + * Convert kdev_t structure into ide_drive_t * one. If force is set the + * non present drives can be opened. */ -extern ide_drive_t *get_info_ptr(kdev_t i_rdev); +extern ide_drive_t *ide_info_ptr(kdev_t i_rdev, int force); /* * Return the current idea about the total capacity of this drive. @@ -1349,12 +1351,6 @@ extern ide_startstop_t ide_do_reset(ide_drive_t *); /* - * Re-Start an operation for an IDE interface. - * The caller should return immediately after invoking this. - */ -extern int restart_request(ide_drive_t *, struct request *); - -/* * This function is intended to be used prior to invoking ide_do_drive_cmd(). */ extern void ide_init_drive_cmd(struct request *); @@ -1722,6 +1718,12 @@ extern void hwif_unregister(ide_hwif_t *); +extern void ide_probe_reset(ide_hwif_t *); +extern void ide_tune_drives(ide_hwif_t *); +extern int ide_wait_hwif_ready(ide_hwif_t *); +extern u8 ide_probe_for_drive(ide_drive_t *); + + extern void export_ide_init_queue(ide_drive_t *); extern u8 export_probe_for_drive(ide_drive_t *); extern int probe_hwif_init(ide_hwif_t *); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/if.h linux.22-ac2/include/linux/if.h --- linux.vanilla/include/linux/if.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/if.h 2003-09-01 13:54:21.000000000 +0100 @@ -21,6 +21,8 @@ #include /* for "__kernel_caddr_t" et al */ #include /* for "struct sockaddr" et al */ + +#define IFNAMSIZ 16 #include /* Standard interface flags (netdevice->flags). */ @@ -69,7 +71,11 @@ #define IF_PROTO_FR_ADD_PVC 0x2004 /* Create FR PVC */ #define IF_PROTO_FR_DEL_PVC 0x2005 /* Delete FR PVC */ #define IF_PROTO_X25 0x2006 /* X.25 */ - +#define IF_PROTO_HDLC_ETH 0x2007 /* raw HDLC, Ethernet emulation */ +#define IF_PROTO_FR_ADD_ETH_PVC 0x2008 /* Create FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_DEL_ETH_PVC 0x2009 /* Delete FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_PVC 0x200A /* for reading PVC status */ +#define IF_PROTO_FR_ETH_PVC 0x200B /* @@ -103,6 +109,7 @@ cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; /* interface settings */ sync_serial_settings *sync; @@ -120,7 +127,6 @@ struct ifreq { #define IFHWADDRLEN 6 -#define IFNAMSIZ 16 union { char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/interrupt.h linux.22-ac2/include/linux/interrupt.h --- linux.vanilla/include/linux/interrupt.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/interrupt.h 2003-09-01 13:54:21.000000000 +0100 @@ -40,7 +40,8 @@ CM206_BH, JS_BH, MACSERIAL_BH, - ISICOM_BH + ISICOM_BH, + SIOLX_BH }; #include diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/iobuf.h linux.22-ac2/include/linux/iobuf.h --- linux.vanilla/include/linux/iobuf.h 2002-11-29 21:27:24.000000000 +0000 +++ linux.22-ac2/include/linux/iobuf.h 2003-09-01 13:54:30.000000000 +0100 @@ -37,7 +37,8 @@ int offset; /* Offset to start of valid data */ int length; /* Number of valid bytes of data */ - unsigned int locked : 1; /* If set, pages has been locked */ + unsigned int locked : 1, /* If set, pages has been locked */ + initialized:1; /* If set, done initialize */ struct page ** maplist; struct buffer_head ** bh; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/jbd.h linux.22-ac2/include/linux/jbd.h --- linux.vanilla/include/linux/jbd.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/jbd.h 2003-09-01 13:54:31.000000000 +0100 @@ -311,7 +311,7 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) { - return bh->b_private; + return bh->b_journal_head; } #define HAVE_JOURNAL_CALLBACK_STATUS diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/list.h linux.22-ac2/include/linux/list.h --- linux.vanilla/include/linux/list.h 2002-11-29 21:27:24.000000000 +0000 +++ linux.22-ac2/include/linux/list.h 2003-09-01 13:54:21.000000000 +0100 @@ -132,7 +132,7 @@ * list_empty - tests whether a list is empty * @head: the list to test. */ -static inline int list_empty(struct list_head *head) +static inline int list_empty(const struct list_head *head) { return head->next == head; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/lockd/lockd.h linux.22-ac2/include/linux/lockd/lockd.h --- linux.vanilla/include/linux/lockd/lockd.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/lockd/lockd.h 2003-09-01 13:54:31.000000000 +0100 @@ -89,8 +89,11 @@ /* * This is a server block (i.e. a lock requested by some client which * couldn't be granted because of a conflicting lock). + * + * XXX: Beware of signedness errors. b_when is passed as a signed long + * into time_before_eq et al. --okir */ -#define NLM_NEVER (~(unsigned long) 0) +#define NLM_NEVER (0x7FFFFFF) struct nlm_block { struct nlm_block * b_next; /* linked list (all blocks) */ struct nlm_block * b_fnext; /* linked list (per file) */ @@ -161,6 +164,7 @@ u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *, struct nlm_lock *); u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); +void nlmsvc_grant_reply(struct nlm_cookie *, u32); unsigned long nlmsvc_retry_blocked(void); int nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, int action); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/malloc.h linux.22-ac2/include/linux/malloc.h --- linux.vanilla/include/linux/malloc.h 2001-11-12 03:09:31.000000000 +0000 +++ linux.22-ac2/include/linux/malloc.h 2003-09-01 13:54:30.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef _LINUX_MALLOC_H #define _LINUX_MALLOC_H -#warning linux/malloc.h is deprecated, use linux/slab.h instead. +#error linux/malloc.h is deprecated, use linux/slab.h instead. #include #endif /* _LINUX_MALLOC_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/mempool.h linux.22-ac2/include/linux/mempool.h --- linux.vanilla/include/linux/mempool.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/mempool.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,31 @@ +/* + * memory buffer pool support + */ +#ifndef _LINUX_MEMPOOL_H +#define _LINUX_MEMPOOL_H + +#include +#include + +struct mempool_s; +typedef struct mempool_s mempool_t; + +typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask); +extern void mempool_destroy(mempool_t *pool); +extern void * mempool_alloc(mempool_t *pool, int gfp_mask); +extern void mempool_free(void *element, mempool_t *pool); + +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab that is passed in through pool_data. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); + + +#endif /* _LINUX_MEMPOOL_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/mman.h linux.22-ac2/include/linux/mman.h --- linux.vanilla/include/linux/mman.h 2000-03-15 02:21:56.000000000 +0000 +++ linux.22-ac2/include/linux/mman.h 2003-06-29 17:27:44.000000000 +0100 @@ -6,4 +6,8 @@ #define MREMAP_MAYMOVE 1 #define MREMAP_FIXED 2 +extern int vm_enough_memory(long pages); +extern void vm_unacct_memory(long pages); +extern void vm_validate_enough(char *x); + #endif /* _LINUX_MMAN_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/mm.h linux.22-ac2/include/linux/mm.h --- linux.vanilla/include/linux/mm.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/mm.h 2003-09-01 13:54:21.000000000 +0100 @@ -104,8 +104,12 @@ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ -#ifndef VM_STACK_FLAGS -#define VM_STACK_FLAGS 0x00000177 +#define VM_ACCOUNT 0x00100000 /* Memory is a vm accounted object */ + +#ifdef ARCH_STACK_GROWSUP +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT) +#else +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSDOWN|VM_ACCOUNT) #endif #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) @@ -567,7 +571,7 @@ return ret; } -extern int do_munmap(struct mm_struct *, unsigned long, size_t); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, int acct); extern unsigned long do_brk(unsigned long, unsigned long); @@ -634,34 +638,9 @@ return gfp_mask; } - -/* vma is the first one with address < vma->vm_end, - * and even address < vma->vm_start. Have to extend vma. */ -static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) -{ - unsigned long grow; - /* - * vma->vm_start/vm_end cannot change under us because the caller is required - * to hold the mmap_sem in write mode. We need to get the spinlock only - * before relocating the vma range ourself. - */ - address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); - grow = (vma->vm_start - address) >> PAGE_SHIFT; - if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); - return -ENOMEM; - } - vma->vm_start = address; - vma->vm_pgoff -= grow; - vma->vm_mm->total_vm += grow; - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); - return 0; -} +/* Do stack extension */ +extern int expand_stack(struct vm_area_struct * vma, unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/netfilter_ipv4/ip_conntrack.h linux.22-ac2/include/linux/netfilter_ipv4/ip_conntrack.h --- linux.vanilla/include/linux/netfilter_ipv4/ip_conntrack.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/netfilter_ipv4/ip_conntrack.h 2003-09-01 13:54:31.000000000 +0100 @@ -156,7 +156,8 @@ union ip_conntrack_expect_help help; }; -#include +struct ip_conntrack_helper; + struct ip_conntrack { /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/nfs_fs.h linux.22-ac2/include/linux/nfs_fs.h --- linux.vanilla/include/linux/nfs_fs.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/nfs_fs.h 2003-09-01 13:54:31.000000000 +0100 @@ -99,11 +99,19 @@ #define NFS_FLAGS(inode) ((inode)->u.nfs_i.flags) #define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING) #define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE) +#define NFS_FLUSH(inode) (NFS_FLAGS(inode) & NFS_INO_FLUSH) #define NFS_FILEID(inode) ((inode)->u.nfs_i.fileid) -/* Inode Flags */ -#define NFS_USE_READDIRPLUS(inode) ((NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS) ? 1 : 0) +static inline int nfs_server_capable(struct inode *inode, int cap) +{ + return NFS_SERVER(inode)->caps & cap; +} + +static inline int NFS_USE_READDIRPLUS(struct inode *inode) +{ + return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS; +} /* * These are the default flags for swap requests diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/nfs_fs_i.h linux.22-ac2/include/linux/nfs_fs_i.h --- linux.vanilla/include/linux/nfs_fs_i.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/linux/nfs_fs_i.h 2003-09-01 13:54:21.000000000 +0100 @@ -6,6 +6,16 @@ #include /* + * NFSv3 Access mode cache + */ +struct nfs_access_cache { + unsigned long jiffies; + struct rpc_cred * cred; + int mask; + int err; +}; + +/* * nfs fs inode data in memory */ struct nfs_inode_info { @@ -54,6 +64,8 @@ */ unsigned long cache_mtime_jiffies; + struct nfs_access_cache cache_access; + /* * This is the cookie verifier used for NFSv3 readdir * operations @@ -75,6 +87,9 @@ /* Credentials for shared mmap */ struct rpc_cred *mm_cred; + + /* The number of threads flushing out dirty mmaps pages */ + atomic_t flushers; }; /* diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/nfs_fs_sb.h linux.22-ac2/include/linux/nfs_fs_sb.h --- linux.vanilla/include/linux/nfs_fs_sb.h 2001-11-22 19:46:19.000000000 +0000 +++ linux.22-ac2/include/linux/nfs_fs_sb.h 2003-09-01 13:54:21.000000000 +0100 @@ -10,6 +10,7 @@ struct rpc_clnt * client; /* RPC client handle */ struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */ int flags; /* various flags */ + unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ unsigned int wsize; /* write size */ @@ -36,4 +37,8 @@ struct nfs_server s_server; }; +/* Server capabilities */ +#define NFS_CAP_READDIRPLUS 1 + + #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/nfs_xdr.h linux.22-ac2/include/linux/nfs_xdr.h --- linux.vanilla/include/linux/nfs_xdr.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/nfs_xdr.h 2003-06-29 16:09:25.000000000 +0100 @@ -27,6 +27,7 @@ __u64 atime; __u64 mtime; __u64 ctime; + unsigned long timestamp; }; #define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ @@ -37,6 +38,7 @@ * Info on the file system */ struct nfs_fsinfo { + struct nfs_fattr *fattr; __u32 rtmax; /* max. read transfer size */ __u32 rtpref; /* pref. read transfer size */ __u32 rtmult; /* reads should be multiple of this */ @@ -45,15 +47,37 @@ __u32 wtmult; /* writes should be multiple of this */ __u32 dtpref; /* pref. readdir transfer size */ __u64 maxfilesize; - __u64 bsize; /* block size */ + __u64 time_delta; + __u32 properties; +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; __u64 tbytes; /* total size in bytes */ __u64 fbytes; /* # of free bytes */ __u64 abytes; /* # of bytes available to user */ __u64 tfiles; /* # of files */ __u64 ffiles; /* # of free files */ __u64 afiles; /* # of files available to user */ + __u32 invarsec; +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; /* Post-op attributes */ __u32 linkmax;/* max # of hard links */ - __u32 namelen;/* max name length */ + __u32 name_max;/* max name length */ + int no_trunc : 1, + chown_restricted : 1, + case_insensitive : 1, + case_preserving : 1; +}; + +struct nfs2_statfs { + __u32 tsize; /* Server transfer size */ + __u32 bsize; /* Filesystem block size */ + __u32 blocks; /* No. of "bsize" blocks on filesystem */ + __u32 bfree; /* No. of free "bsize" blocks */ + __u32 bavail; /* No. of available "bsize" blocks */ }; /* Arguments to the read call. @@ -109,8 +133,8 @@ const char * name; unsigned int len; int eof; - struct nfs_fh fh; - struct nfs_fattr fattr; + struct nfs_fh *fh; + struct nfs_fattr *fattr; }; /* @@ -300,7 +324,7 @@ struct iattr *); int (*lookup) (struct inode *, struct qstr *, struct nfs_fh *, struct nfs_fattr *); - int (*access) (struct inode *, int , int); + int (*access) (struct inode *, struct rpc_cred *, int); int (*readlink)(struct inode *, struct page *); int (*read) (struct inode *, struct rpc_cred *, struct nfs_fattr *, @@ -332,7 +356,11 @@ int (*mknod) (struct inode *, struct qstr *, struct iattr *, dev_t, struct nfs_fh *, struct nfs_fattr *); int (*statfs) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsstat *); + int (*fsinfo) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*pathconf) (struct nfs_server *, struct nfs_fh *, + struct nfs_pathconf *); u32 * (*decode_dirent)(u32 *, struct nfs_entry *, int plus); }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/pci.h linux.22-ac2/include/linux/pci.h --- linux.vanilla/include/linux/pci.h 2003-08-28 16:45:45.000000000 +0100 +++ linux.22-ac2/include/linux/pci.h 2003-09-09 22:27:29.000000000 +0100 @@ -613,6 +613,8 @@ int pci_set_power_state(struct pci_dev *dev, int state); int pci_enable_wake(struct pci_dev *dev, u32 state, int enable); +int pci_restart_device(struct pci_dev *dev, char *name); + /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ int pci_claim_resource(struct pci_dev *, int); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/pci_ids.h linux.22-ac2/include/linux/pci_ids.h --- linux.vanilla/include/linux/pci_ids.h 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/include/linux/pci_ids.h 2003-09-09 19:12:35.000000000 +0100 @@ -841,6 +841,7 @@ #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 +#define PCI_DEVICE_ID_SGI_IOC4 0x100a #define PCI_VENDOR_ID_ACC 0x10aa #define PCI_DEVICE_ID_ACC_2056 0x0000 @@ -1082,6 +1083,7 @@ #define PCI_DEVICE_ID_VIA_8233C_0 0x3109 #define PCI_DEVICE_ID_VIA_8361 0x3112 #define PCI_DEVICE_ID_VIA_8375 0x3116 +#define PCI_DEVICE_ID_VIA_CLE266 0x3123 #define PCI_DEVICE_ID_VIA_8233A 0x3147 #define PCI_DEVICE_ID_VIA_P4M266 0x3148 #define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/pc_keyb.h linux.22-ac2/include/linux/pc_keyb.h --- linux.vanilla/include/linux/pc_keyb.h 1999-10-11 18:15:40.000000000 +0100 +++ linux.22-ac2/include/linux/pc_keyb.h 2003-06-29 16:09:25.000000000 +0100 @@ -128,3 +128,6 @@ struct fasync_struct *fasync; unsigned char buf[AUX_BUF_SIZE]; }; + +extern void pckbd_blink (char); + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/pnpbios.h linux.22-ac2/include/linux/pnpbios.h --- linux.vanilla/include/linux/pnpbios.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/pnpbios.h 2003-09-09 22:27:29.000000000 +0100 @@ -0,0 +1,212 @@ +/* + * Include file for the interface to a PnP BIOS + * + * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de) + * PnP handler parts (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PNPBIOS_H +#define _LINUX_PNPBIOS_H + +#ifdef __KERNEL__ + +#include +#include + +/* + * Status codes (warnings and errors) + */ +#define PNP_SUCCESS 0x00 +#define PNP_NOT_SET_STATICALLY 0x7f +#define PNP_UNKNOWN_FUNCTION 0x81 +#define PNP_FUNCTION_NOT_SUPPORTED 0x82 +#define PNP_INVALID_HANDLE 0x83 +#define PNP_BAD_PARAMETER 0x84 +#define PNP_SET_FAILED 0x85 +#define PNP_EVENTS_NOT_PENDING 0x86 +#define PNP_SYSTEM_NOT_DOCKED 0x87 +#define PNP_NO_ISA_PNP_CARDS 0x88 +#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89 +#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a +#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b +#define PNP_BUFFER_TOO_SMALL 0x8c +#define PNP_USE_ESCD_SUPPORT 0x8d +#define PNP_MESSAGE_NOT_SUPPORTED 0x8e +#define PNP_HARDWARE_ERROR 0x8f + +#define ESCD_SUCCESS 0x00 +#define ESCD_IO_ERROR_READING 0x55 +#define ESCD_INVALID 0x56 +#define ESCD_BUFFER_TOO_SMALL 0x59 +#define ESCD_NVRAM_TOO_SMALL 0x5a +#define ESCD_FUNCTION_NOT_SUPPORTED 0x81 + +/* + * Events that can be received by "get event" + */ +#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001 +#define PNPEV_DOCK_CHANGED 0x0002 +#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003 +#define PNPEV_CONFIG_CHANGED_FAILED 0x0004 +#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff +/* 0x8000 through 0xfffe are OEM defined */ + +/* + * Messages that should be sent through "send message" + */ +#define PNPMSG_OK 0x00 +#define PNPMSG_ABORT 0x01 +#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40 +#define PNPMSG_POWER_OFF 0x41 +#define PNPMSG_PNP_OS_ACTIVE 0x42 +#define PNPMSG_PNP_OS_INACTIVE 0x43 +/* 0x8000 through 0xffff are OEM defined */ + +#pragma pack(1) +struct pnp_dev_node_info { + __u16 no_nodes; + __u16 max_node_size; +}; +struct pnp_docking_station_info { + __u32 location_id; + __u32 serial; + __u16 capabilities; +}; +struct pnp_isa_config_struc { + __u8 revision; + __u8 no_csns; + __u16 isa_rd_data_port; + __u16 reserved; +}; +struct escd_info_struc { + __u16 min_escd_write_size; + __u16 escd_size; + __u32 nv_storage_base; +}; +struct pnp_bios_node { + __u16 size; + __u8 handle; + __u32 eisa_id; + __u8 type_code[3]; + __u16 flags; + __u8 data[0]; +}; +#pragma pack() + +struct pnpbios_device_id +{ + char id[8]; + unsigned long driver_data; +}; + +struct pnpbios_driver { + struct list_head node; + char *name; + const struct pnpbios_device_id *id_table; /* NULL if wants all devices */ + int (*probe) (struct pci_dev *dev, const struct pnpbios_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed, either due to hotplug remove or module remove */ +}; + +#ifdef CONFIG_PNPBIOS + +/* exported */ +extern int pnpbios_register_driver(struct pnpbios_driver *drv); +extern void pnpbios_unregister_driver(struct pnpbios_driver *drv); + +/* non-exported */ +#define pnpbios_for_each_dev(dev) \ + for(dev = pnpbios_dev_g(pnpbios_devices.next); dev != pnpbios_dev_g(&pnpbios_devices); dev = pnpbios_dev_g(dev->global_list.next)) + + +#define pnpbios_dev_g(n) list_entry(n, struct pci_dev, global_list) + +static __inline struct pnpbios_driver *pnpbios_dev_driver(const struct pci_dev *dev) +{ + return (struct pnpbios_driver *)dev->driver; +} + +extern int pnpbios_dont_use_current_config; +extern void *pnpbios_kmalloc(size_t size, int f); +extern int pnpbios_init (void); +extern int pnpbios_proc_init (void); +extern void pnpbios_proc_exit (void); + +extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data); +extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_get_stat_res (char *info); +extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data); +extern int pnp_bios_escd_info (struct escd_info_struc *data); +extern int pnp_bios_read_escd (char *data, u32 nvram_base); +#if needed +extern int pnp_bios_get_event (u16 *message); +extern int pnp_bios_send_message (u16 message); +extern int pnp_bios_set_stat_res (char *info); +extern int pnp_bios_apm_id_table (char *table, u16 *size); +extern int pnp_bios_write_escd (char *data, u32 nvram_base); +#endif + +/* + * a helper function which helps ensure correct pnpbios_driver + * setup and cleanup for commonly-encountered hotplug/modular cases + * + * This MUST stay in a header, as it checks for -DMODULE + */ + +static inline int pnpbios_module_init(struct pnpbios_driver *drv) +{ + int rc = pnpbios_register_driver (drv); + + if (rc > 0) + return 0; + + /* iff CONFIG_HOTPLUG and built into kernel, we should + * leave the driver around for future hotplug events. + * For the module case, a hotplug daemon of some sort + * should load a module in response to an insert event. */ +#if defined(CONFIG_HOTPLUG) && !defined(MODULE) + if (rc == 0) + return 0; +#else + if (rc == 0) + rc = -ENODEV; +#endif + + /* if we get here, we need to clean up pci driver instance + * and return some sort of error */ + pnpbios_unregister_driver (drv); + + return rc; +} + +#else /* CONFIG_PNPBIOS */ + +static __inline__ int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + return 0; +} + +static __inline__ void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + return; +} + +#endif /* CONFIG_PNPBIOS */ +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PNPBIOS_H */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/quota.h linux.22-ac2/include/linux/quota.h --- linux.vanilla/include/linux/quota.h 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/include/linux/quota.h 2003-09-01 13:54:21.000000000 +0100 @@ -315,6 +315,16 @@ void unregister_quota_format(struct quota_format_type *fmt); void init_dquot_operations(struct dquot_operations *fsdqops); +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +#define INIT_QUOTA_MODULE_NAMES {\ + {QFMT_VFS_OLD, "quota_v1"},\ + {QFMT_VFS_V0, "quota_v2"},\ + {0, NULL}} + #else # /* nodep */ include diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/sched.h linux.22-ac2/include/linux/sched.h --- linux.vanilla/include/linux/sched.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/sched.h 2003-09-01 13:54:21.000000000 +0100 @@ -73,16 +73,16 @@ #define CT_TO_SECS(x) ((x) / HZ) #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) -extern int nr_running, nr_threads; +extern int nr_threads; extern int last_pid; +extern unsigned long nr_running(void); +extern unsigned long nr_uninterruptible(void); #include #include #include #include -#ifdef __KERNEL__ #include -#endif #include @@ -109,12 +109,6 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 -/* - * This is an additional bit set when we want to - * yield the CPU for one re-schedule.. - */ -#define SCHED_YIELD 0x10 - struct sched_param { int sched_priority; }; @@ -135,14 +129,21 @@ extern spinlock_t runqueue_lock; extern spinlock_t mmlist_lock; +typedef struct task_struct task_t; + extern void sched_init(void); -extern void init_idle(void); +extern void init_idle(task_t *idle, int cpu); +extern int idle_cpu(int cpu); extern void show_state(void); extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); -extern void update_one_process(struct task_struct *p, unsigned long user, +extern void update_one_process(task_t *p, unsigned long user, unsigned long system, int cpu); +extern void scheduler_tick(int user_tick, int system); +extern void migration_init(void); +extern unsigned long cache_decay_ticks; +extern int set_user(uid_t new_ruid, int dumpclear); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); @@ -153,10 +154,34 @@ extern int start_context_thread(void); extern int current_is_keventd(void); -#if CONFIG_SMP -extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask); +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_OTHER tasks are + * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values + * are inverted: lower p->prio value means higher priority. + * + * The MAX_RT_USER_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) + +/* + * The maximum RT priority is configurable. If the resulting + * bitmap is 160-bits , we can use a hand-coded routine which + * is optimal. Otherwise, we fall back on a generic routine for + * finding the first set bit from an arbitrarily-sized bitmap. + */ +#if MAX_PRIO < 160 && MAX_PRIO > 127 +#define sched_find_first_bit(map) _sched_find_first_bit(map) #else -# define set_cpus_allowed(p, new_mask) do { } while (0) +#define sched_find_first_bit(map) find_first_bit(map, MAX_PRIO) #endif /* @@ -280,6 +305,8 @@ extern struct user_struct root_user; #define INIT_USER (&root_user) +typedef struct prio_array prio_array_t; + struct task_struct { /* * offsets of these are hardcoded elsewhere - touch with care @@ -297,34 +324,25 @@ int lock_depth; /* Lock depth */ -/* - * offset 32 begins here on 32-bit platforms. We keep - * all fields in a single cacheline that are needed for - * the goodness() loop in schedule(). - */ - long counter; - long nice; - unsigned long policy; - struct mm_struct *mm; - int processor; /* - * cpus_runnable is ~0 if the process is not running on any - * CPU. It's (1 << cpu) if it's running on a CPU. This mask - * is updated under the runqueue lock. - * - * To determine whether a process might run on a CPU, this - * mask is AND-ed with cpus_allowed. - */ - unsigned long cpus_runnable, cpus_allowed; - /* - * (only the 'next' pointer fits into the cacheline, but - * that's just fine.) + * offset 32 begins here on 32-bit platforms. */ + unsigned int cpu; + int prio, static_prio; struct list_head run_list; - unsigned long sleep_time; + prio_array_t *array; + + unsigned long sleep_avg; + unsigned long sleep_timestamp; + + unsigned long policy; + unsigned long cpus_allowed; + unsigned int time_slice; + + task_t *next_task, *prev_task; + + struct mm_struct *mm, *active_mm; - struct task_struct *next_task, *prev_task; - struct mm_struct *active_mm; struct list_head local_pages; unsigned int allocation_order, nr_local_pages; @@ -348,12 +366,12 @@ * older sibling, respectively. (p->father can be replaced with * p->p_pptr->pid) */ - struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; + task_t *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; struct list_head thread_group; /* PID hash table linkage. */ - struct task_struct *pidhash_next; - struct task_struct **pidhash_pprev; + task_t *pidhash_next; + task_t **pidhash_pprev; wait_queue_head_t wait_chldexit; /* for wait4() */ struct completion *vfork_done; /* for vfork() */ @@ -432,8 +450,8 @@ #define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ #define PF_FREE_PAGES 0x00002000 /* per process page freeing */ #define PF_NOIO 0x00004000 /* avoid generating further I/O */ - #define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */ +#define PF_FSTRANS 0x00200000 /* inside a filesystem transaction */ /* * Ptrace flags @@ -453,10 +471,16 @@ */ #define _STK_LIM (8*1024*1024) -#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */ -#define MAX_COUNTER (20*HZ/100) -#define DEF_NICE (0) +#if CONFIG_SMP +extern void set_cpus_allowed(task_t *p, unsigned long new_mask); +#else +#define set_cpus_allowed(p, new_mask) do { } while (0) +#endif +extern void set_user_nice(task_t *p, long nice); +extern int task_prio(task_t *p); +extern int task_nice(task_t *p); +extern int idle_cpu(int cpu); extern void yield(void); /* @@ -476,14 +500,14 @@ addr_limit: KERNEL_DS, \ exec_domain: &default_exec_domain, \ lock_depth: -1, \ - counter: DEF_COUNTER, \ - nice: DEF_NICE, \ + prio: MAX_PRIO-20, \ + static_prio: MAX_PRIO-20, \ policy: SCHED_OTHER, \ + cpus_allowed: ~0UL, \ mm: NULL, \ active_mm: &init_mm, \ - cpus_runnable: ~0UL, \ - cpus_allowed: ~0UL, \ run_list: LIST_HEAD_INIT(tsk.run_list), \ + time_slice: HZ, \ next_task: &tsk, \ prev_task: &tsk, \ p_opptr: &tsk, \ @@ -517,24 +541,24 @@ #endif union task_union { - struct task_struct task; + task_t task; unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; }; extern union task_union init_task_union; extern struct mm_struct init_mm; -extern struct task_struct *init_tasks[NR_CPUS]; +extern task_t *init_tasks[NR_CPUS]; /* PID hashing. (shouldnt this be dynamic?) */ #define PIDHASH_SZ (4096 >> 2) -extern struct task_struct *pidhash[PIDHASH_SZ]; +extern task_t *pidhash[PIDHASH_SZ]; #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) -static inline void hash_pid(struct task_struct *p) +static inline void hash_pid(task_t *p) { - struct task_struct **htable = &pidhash[pid_hashfn(p->pid)]; + task_t **htable = &pidhash[pid_hashfn(p->pid)]; if((p->pidhash_next = *htable) != NULL) (*htable)->pidhash_pprev = &p->pidhash_next; @@ -542,16 +566,16 @@ p->pidhash_pprev = htable; } -static inline void unhash_pid(struct task_struct *p) +static inline void unhash_pid(task_t *p) { if(p->pidhash_next) p->pidhash_next->pidhash_pprev = p->pidhash_pprev; *p->pidhash_pprev = p->pidhash_next; } -static inline struct task_struct *find_task_by_pid(int pid) +static inline task_t *find_task_by_pid(int pid) { - struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; + task_t *p, **htable = &pidhash[pid_hashfn(pid)]; for(p = *htable; p && p->pid != pid; p = p->pidhash_next) ; @@ -559,19 +583,6 @@ return p; } -#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) - -static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) -{ - tsk->processor = cpu; - tsk->cpus_runnable = 1UL << cpu; -} - -static inline void task_release_cpu(struct task_struct *tsk) -{ - tsk->cpus_runnable = ~0UL; -} - /* per-UID process charging. */ extern struct user_struct * alloc_uid(uid_t); extern void free_uid(struct user_struct *); @@ -598,47 +609,51 @@ extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout)); -extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process(task_t * tsk)); +extern void FASTCALL(wake_up_forked_process(task_t * tsk)); +extern void FASTCALL(sched_exit(task_t * p)); #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) -#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) -#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) -#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) -#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) +#ifdef CONFIG_SMP +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) +#else +#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) +#endif + asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *); +extern void flush_signals(task_t *); +extern void flush_signal_handlers(task_t *); extern void sig_exit(int, int, struct siginfo *); extern int dequeue_signal(sigset_t *, siginfo_t *); extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); extern void unblock_all_signals(void); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int send_sig_info(int, struct siginfo *, task_t *); +extern int force_sig_info(int, struct siginfo *, task_t *); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_sl_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void notify_parent(struct task_struct *, int); -extern void do_notify_parent(struct task_struct *, int); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); +extern void notify_parent(task_t *, int); +extern void do_notify_parent(task_t *, int); +extern void force_sig(int, task_t *); +extern int send_sig(int, task_t *, int); extern int kill_pg(pid_t, int, int); extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); -static inline int signal_pending(struct task_struct *p) +static inline int signal_pending(task_t *p) { return (p->sigpending != 0); } @@ -677,7 +692,7 @@ This is required every time the blocked sigset_t changes. All callers should have t->sigmask_lock. */ -static inline void recalc_sigpending(struct task_struct *t) +static inline void recalc_sigpending(task_t *t) { t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked); } @@ -784,16 +799,19 @@ extern int expand_fdset(struct files_struct *, int nr); extern void free_fdset(fd_set *, int); -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern int unshare_files(void); + +extern int copy_thread(int, unsigned long, unsigned long, unsigned long, task_t *, struct pt_regs *); extern void flush_thread(void); extern void exit_thread(void); -extern void exit_mm(struct task_struct *); -extern void exit_files(struct task_struct *); -extern void exit_sighand(struct task_struct *); +extern void exit_mm(task_t *); +extern void exit_files(task_t *); +extern void exit_sighand(task_t *); extern void reparent_to_init(void); extern void daemonize(void); +extern task_t *child_reaper; extern int do_execve(char *, char **, char **, struct pt_regs *); extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long); @@ -802,6 +820,9 @@ extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); +extern void wait_task_inactive(task_t * p); +extern void kick_if_running(task_t * p); + extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); #define __wait_event(wq, condition) \ @@ -885,27 +906,12 @@ for (task = next_thread(current) ; task != current ; task = next_thread(task)) #define next_thread(p) \ - list_entry((p)->thread_group.next, struct task_struct, thread_group) + list_entry((p)->thread_group.next, task_t, thread_group) #define thread_group_leader(p) (p->pid == p->tgid) -static inline void del_from_runqueue(struct task_struct * p) -{ - nr_running--; - p->sleep_time = jiffies; - list_del(&p->run_list); - p->run_list.next = NULL; -} - -static inline int task_on_runqueue(struct task_struct *p) +static inline void unhash_process(task_t *p) { - return (p->run_list.next != NULL); -} - -static inline void unhash_process(struct task_struct *p) -{ - if (task_on_runqueue(p)) - out_of_line_bug(); write_lock_irq(&tasklist_lock); nr_threads--; unhash_pid(p); @@ -915,12 +921,12 @@ } /* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */ -static inline void task_lock(struct task_struct *p) +static inline void task_lock(task_t *p) { spin_lock(&p->alloc_lock); } -static inline void task_unlock(struct task_struct *p) +static inline void task_unlock(task_t *p) { spin_unlock(&p->alloc_lock); } @@ -944,9 +950,29 @@ return res; } +static inline void set_need_resched(void) +{ + current->need_resched = 1; +} + +static inline void clear_need_resched(void) +{ + current->need_resched = 0; +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 1; +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 0; +} + static inline int need_resched(void) { - return (unlikely(current->need_resched)); + return unlikely(current->need_resched); } extern void __cond_resched(void); @@ -956,5 +982,34 @@ __cond_resched(); } +/* + * Wrappers for p->cpu access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return p->cpu; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + p->cpu = cpu; +} + +#else + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ +} + +#endif /* CONFIG_SMP */ + #endif /* __KERNEL__ */ + #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/seq_file.h linux.22-ac2/include/linux/seq_file.h --- linux.vanilla/include/linux/seq_file.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/linux/seq_file.h 2003-09-01 13:54:21.000000000 +0100 @@ -2,7 +2,13 @@ #define _LINUX_SEQ_FILE_H #ifdef __KERNEL__ +#include +#include +#include + struct seq_operations; +struct file; +struct inode; struct seq_file { char *buf; @@ -52,5 +58,7 @@ int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); +int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_release(struct inode *, struct file *); #endif #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/serialP.h linux.22-ac2/include/linux/serialP.h --- linux.vanilla/include/linux/serialP.h 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/include/linux/serialP.h 2003-09-01 13:54:30.000000000 +0100 @@ -83,6 +83,7 @@ long pgrp; /* pgrp of opening process */ struct circ_buf xmit; spinlock_t xmit_lock; + spinlock_t irq_spinlock; u8 *iomem_base; u16 iomem_reg_shift; int io_type; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/smp_balance.h linux.22-ac2/include/linux/smp_balance.h --- linux.vanilla/include/linux/smp_balance.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/smp_balance.h 2003-06-29 16:09:26.000000000 +0100 @@ -0,0 +1,10 @@ +#ifndef _LINUX_SMP_BALANCE_H +#define _LINUX_SMP_BALANCE_H + +#ifdef ARCH_HAS_SMP_BALANCE +#include +#else +#define arch_load_balance(x,y) 0 +#endif + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/smp.h linux.22-ac2/include/linux/smp.h --- linux.vanilla/include/linux/smp.h 2001-11-22 19:46:19.000000000 +0000 +++ linux.22-ac2/include/linux/smp.h 2003-09-01 13:54:21.000000000 +0100 @@ -84,8 +84,16 @@ #define kernel_lock() #define cpu_logical_map(cpu) 0 #define cpu_number_map(cpu) 0 +#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define smp_call_function(func,info,retry,wait) ({ 0; }) #define cpu_online_map 1 - +static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_all(void) { } #endif + +/* + * Common definitions: + */ +#define cpu() smp_processor_id() + #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/sonypi.h linux.22-ac2/include/linux/sonypi.h --- linux.vanilla/include/linux/sonypi.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/sonypi.h 2003-09-01 13:54:30.000000000 +0100 @@ -94,6 +94,8 @@ #define SONYPI_EVENT_MEMORYSTICK_INSERT 54 #define SONYPI_EVENT_MEMORYSTICK_EJECT 55 #define SONYPI_EVENT_ANYBUTTON_RELEASED 56 +#define SONYPI_EVENT_BATTERY_INSERT 57 +#define SONYPI_EVENT_BATTERY_REMOVE 58 /* get/set brightness */ #define SONYPI_IOCGBRT _IOR('v', 0, __u8) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/sysctl.h linux.22-ac2/include/linux/sysctl.h --- linux.vanilla/include/linux/sysctl.h 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/include/linux/sysctl.h 2003-09-01 13:54:21.000000000 +0100 @@ -127,6 +127,7 @@ KERN_CORE_PATTERN=56, /* string: pattern for core-files */ KERN_PPC_L3CR=57, /* l3cr register on PPC */ KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */ + KERN_CORE_SETUID=59, /* int: set to allow core dumps of setuid apps */ }; @@ -146,6 +147,8 @@ VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */ VM_MIN_READAHEAD=12, /* Min file readahead */ VM_MAX_READAHEAD=13, /* Max file readahead */ + VM_OVERCOMMIT_RATIO=16, /* percent of RAM to allow overcommit in */ + VM_PAGEBUF=22, /* struct: Control pagebuf parameters */ }; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/tty.h linux.22-ac2/include/linux/tty.h --- linux.vanilla/include/linux/tty.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/tty.h 2003-09-01 13:54:21.000000000 +0100 @@ -265,7 +265,7 @@ int session; kdev_t device; unsigned long flags; - int count; + atomic_t count; struct winsize winsize; unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; unsigned char low_latency:1, warned:1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/usbdevice_fs.h linux.22-ac2/include/linux/usbdevice_fs.h --- linux.vanilla/include/linux/usbdevice_fs.h 2001-11-22 19:49:52.000000000 +0000 +++ linux.22-ac2/include/linux/usbdevice_fs.h 2003-09-01 13:54:31.000000000 +0100 @@ -142,6 +142,8 @@ #define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) #define USBDEVFS_RESET _IO('U', 20) #define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) /* --------------------------------------------------------------------- */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/usb.h linux.22-ac2/include/linux/usb.h --- linux.vanilla/include/linux/usb.h 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/include/linux/usb.h 2003-09-01 13:54:30.000000000 +0100 @@ -865,6 +865,7 @@ struct usb_device *children[USB_MAXCHILDREN]; }; +extern int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum); extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum); extern struct usb_endpoint_descriptor *usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum); @@ -873,6 +874,7 @@ extern void usb_scan_devices(void); /* used these for multi-interface device registration */ +extern int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned ifnum); extern void usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void* priv); extern int usb_interface_claimed(struct usb_interface *iface); extern void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/vblankdev.h linux.22-ac2/include/linux/vblankdev.h --- linux.vanilla/include/linux/vblankdev.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/vblankdev.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,18 @@ +#ifndef _LINUX_VBLANKDEV_H +#define _LINUX_VBLANKDEV_H + +#include +#include + +#define VBLANK_MINOR 99 /* Temporary, for testing */ + +struct vblank_bind +{ + u8 domain; + u8 bus; + u8 devfn; +}; + +#define VBLIOC_BIND _IOW('B', 1, struct vblank_bind) + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/vmalloc.h linux.22-ac2/include/linux/vmalloc.h --- linux.vanilla/include/linux/vmalloc.h 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/include/linux/vmalloc.h 2003-09-01 13:54:21.000000000 +0100 @@ -29,6 +29,7 @@ extern void vmfree_area_pages(unsigned long address, unsigned long size); extern int vmalloc_area_pages(unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot); +extern void *vcalloc(unsigned long nmemb, unsigned long elem_size); /* * Allocate any pages diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/wait.h linux.22-ac2/include/linux/wait.h --- linux.vanilla/include/linux/wait.h 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/include/linux/wait.h 2003-09-01 13:54:21.000000000 +0100 @@ -58,6 +58,7 @@ # define wq_read_unlock read_unlock # define wq_write_lock_irq write_lock_irq # define wq_write_lock_irqsave write_lock_irqsave +# define wq_write_unlock_irq write_unlock_irq # define wq_write_unlock_irqrestore write_unlock_irqrestore # define wq_write_unlock write_unlock #else @@ -70,6 +71,7 @@ # define wq_read_unlock_irqrestore spin_unlock_irqrestore # define wq_write_lock_irq spin_lock_irq # define wq_write_lock_irqsave spin_lock_irqsave +# define wq_write_unlock_irq spin_unlock_irq # define wq_write_unlock_irqrestore spin_unlock_irqrestore # define wq_write_unlock spin_unlock #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/linux/wm97xx.h linux.22-ac2/include/linux/wm97xx.h --- linux.vanilla/include/linux/wm97xx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/linux/wm97xx.h 2003-09-01 13:54:31.000000000 +0100 @@ -0,0 +1,96 @@ + +/* + * Register bits for Wolfson WM97xx series of codecs + */ + +#ifndef _WM97XX_H_ +#define _WM97XX_H_ + +#include /* AC97 control layer */ + +/* + * WM97xx AC97 Touchscreen registers + */ +#define AC97_WM97XX_DIGITISER1 0x76 +#define AC97_WM97XX_DIGITISER2 0x78 +#define AC97_WM97XX_DIGITISER_RD 0x7a + +/* + * WM97xx register bits + */ +#define WM97XX_POLL 0x8000 /* initiate a polling measurement */ +#define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */ +#define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */ +#define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */ +#define WM97XX_COO 0x0800 /* enable coordinate mode */ +#define WM97XX_CTC 0x0400 /* enable continuous mode */ +#define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */ +#define WM97XX_CM_RATE_187 0x0100 /* 187.5Hz continuous rate */ +#define WM97XX_CM_RATE_375 0x0200 /* 375Hz continuous rate */ +#define WM97XX_CM_RATE_750 0x0300 /* 750Hz continuous rate */ +#define WM97XX_CM_RATE_8K 0x00f0 /* 8kHz continuous rate */ +#define WM97XX_CM_RATE_12K 0x01f0 /* 12kHz continuous rate */ +#define WM97XX_CM_RATE_24K 0x02f0 /* 24kHz continuous rate */ +#define WM97XX_CM_RATE_48K 0x03f0 /* 48kHz continuous rate */ +#define WM97XX_DELAY(i) ((i << 4) & 0x00f0) /* sample delay times */ +#define WM97XX_SLEN 0x0008 /* slot read back enable */ +#define WM97XX_SLT(i) ((i - 5) & 0x7) /* touchpanel slot selection (5-11) */ +#define WM97XX_PRP_DETW 0x4000 /* pen detect on, digitiser off, wake up */ +#define WM97XX_PRP_DET 0x8000 /* pen detect on, digitiser off, no wake up */ +#define WM97XX_PRP_DET_DIG 0xc000 /* pen detect on, digitiser on */ +#define WM97XX_RPR 0x2000 /* wake up on pen down */ +#define WM97XX_PEN_DOWN 0x8000 /* pen is down */ + +/* WM9712 Bits */ +#define WM9712_45W 0x1000 /* set for 5-wire touchscreen */ +#define WM9712_PDEN 0x0800 /* measure only when pen down */ +#define WM9712_WAIT 0x0200 /* wait until adc is read before next sample */ +#define WM9712_PIL 0x0100 /* current used for pressure measurement. set 400uA else 200uA */ +#define WM9712_MASK_HI 0x0040 /* hi on mask pin (47) stops conversions */ +#define WM9712_MASK_EDGE 0x0080 /* rising/falling edge on pin delays sample */ +#define WM9712_MASK_SYNC 0x00c0 /* rising/falling edge on mask initiates sample */ +#define WM9712_RPU(i) (i&0x3f) /* internal pull up on pen detect (64k / rpu) */ +#define WM9712_ADCSEL_COMP1 0x4000 /* COMP1/AUX1 measurement (pin29) */ +#define WM9712_ADCSEL_COMP2 0x5000 /* COMP2/AUX2 measurement (pin30) */ +#define WM9712_ADCSEL_BMON 0x6000 /* BMON/AUX3 measurement (pin31) */ +#define WM9712_ADCSEL_WIPER 0x7000 /* WIPER/AUX4 measurement (pin12) */ +#define WM9712_PD(i) (0x1 << i) /* power management */ + +/* WM9712 Registers */ +#define AC97_WM9712_POWER 0x24 +#define AC97_WM9712_REV 0x58 + +/* WM9705 Bits */ +#define WM9705_PDEN 0x1000 /* measure only when pen is down */ +#define WM9705_PINV 0x0800 /* inverts sense of pen down output */ +#define WM9705_BSEN 0x0400 /* BUSY flag enable, pin47 is 1 when busy */ +#define WM9705_BINV 0x0200 /* invert BUSY (pin47) output */ +#define WM9705_WAIT 0x0100 /* wait until adc is read before next sample */ +#define WM9705_PIL 0x0080 /* current used for pressure measurement. set 400uA else 200uA */ +#define WM9705_PHIZ 0x0040 /* set PHONE and PCBEEP inputs to high impedance */ +#define WM9705_MASK_HI 0x0010 /* hi on mask stops conversions */ +#define WM9705_MASK_EDGE 0x0020 /* rising/falling edge on pin delays sample */ +#define WM9705_MASK_SYNC 0x0030 /* rising/falling edge on mask initiates sample */ +#define WM9705_PDD(i) (i & 0x000f) /* pen detect comparator threshold */ +#define WM9705_ADCSEL_BMON 0x4000 /* BMON measurement */ +#define WM9705_ADCSEL_AUX 0x5000 /* AUX measurement */ +#define WM9705_ADCSEL_PHONE 0x6000 /* PHONE measurement */ +#define WM9705_ADCSEL_PCBEEP 0x7000 /* PCBEEP measurement */ + +/* AUX ADC ID's */ +#define TS_COMP1 0x0 +#define TS_COMP2 0x1 +#define TS_BMON 0x2 +#define TS_WIPER 0x3 + +/* ID numbers */ +#define WM97XX_ID1 0x574d +#define WM9712_ID2 0x4c12 +#define WM9705_ID2 0x4c05 + +#define AC97_LINK_FRAME 21 /* time in uS for AC97 link frame */ + +void register_touchscreen_codec(struct ac97_codec *codec); +void unregister_touchscreen_codec(struct ac97_codec *codec); + +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/include/net/edp2.h linux.22-ac2/include/net/edp2.h --- linux.vanilla/include/net/edp2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/include/net/edp2.h 2003-06-29 16:09:28.000000000 +0100 @@ -0,0 +1,131 @@ +/* + * The Coraid EDPv2 protocol definitions + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +struct edp2 +{ + /* This structure follows the DIX ethernet header */ + u8 flag_err; +#define EDP_F_RESPONSE 0x80 +#define EDP_F_ERROR 0x40 +#define EDP_F_ERRMASK 0x0F + u8 function; + u32 tag __attribute((packed)); +}; + +struct edp2_ata_fid0 +{ + u8 flag_ver; +#define EDP_ATA_WRITE 0x80 /* I/O from host */ +#define EDP_ATA_LBA48 0x40 /* Command is LBA48 */ +#define EDP_ATA_DBIT 0x10 /* Dbit state in LBA48 */ + u8 err_feature; + u8 sector; + u8 cmd_status; + u8 lba0; + u8 lba1; + u8 lba2; + u8 lba3; + u8 lba4; + u8 lba5; + u8 res0; + u8 res1; + u32 data[0]; +}; + +#define EDPT2_ATA_BADPARAM 0 +#define EDPT2_ATA_DISKFAIL 1 + +struct edp2_ata_fid1 +{ + u32 count; /* Outstanding buffer limit */ + u32 firmware; /* Firmware revision */ + u16 aoe; /* Protocol supported */ + u8 shad; /* Shelf ident */ + u8 slad; /* Slot ident */ + u32 data[0]; /* Config string */ +}; + +struct edp2_ata_fid2 +{ + u32 tx; + u32 rx; + u32 tx_error; + u32 rx_error; + u32 rx_trunc; + u32 rx_over; + u32 rx_crc; + u32 rx_short; + u32 rx_align; + u32 rx_violation; + u32 tx_carrier; + u32 tx_under; + u32 tx_retrans; + u32 tx_late; + u32 tx_heartbeat; + u32 tx_defer; + u32 tx_retry; +}; + +struct edp2_ata_fid3 +{ + u8 cmd; +#define EDP_ATA_FID3_CMD_CMP 0 +#define EDP_ATA_FID3_CMD_NCMP 1 +#define EDP_ATA_FID3_CLAIM 2 +#define EDP_ATA_FID3_FORCE 3 + + u8 res1, res2, res3; + u32 data[0]; +}; + + +#define MAX_QUEUED 128 + +/* + * Used to hold together the edp2 device database + */ + +struct edp2_device +{ + struct edp2_device *next; + + /* For upper layer */ + void *edp2_upper; + int users; + + /* Location */ + struct net_device *dev; + u8 shelf; + u8 slot; + u8 mac[6]; + + /* Properties */ + u32 queue; + u32 revision; + u8 protocol; + int dead:1; + + /* Time info */ + unsigned long last_ident; + + /* Protocol queue */ + int count; + u16 tag; /* bits 23-8 of the tag */ + struct sk_buff *skb[MAX_QUEUED]; + struct timer_list timer; +}; + +struct edp2_cb /* Holds our data on the queue copy of the skb */ +{ + int (*completion)(struct edp2_device *, struct edp2 *edp, unsigned long data, struct sk_buff *skb); + unsigned long data; + unsigned long timeout; + int count; +}; + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/init/main.c linux.22-ac2/init/main.c --- linux.vanilla/init/main.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/init/main.c 2003-08-08 15:50:53.000000000 +0100 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,10 @@ #include #endif +#ifdef CONFIG_PNPBIOS +#include +#endif + #ifdef CONFIG_IRDA extern int irda_proto_init(void); extern int irda_device_init(void); @@ -110,6 +115,10 @@ #if defined(CONFIG_SYSVIPC) extern void ipc_init(void); #endif + +#ifdef CONFIG_PARISC +extern void parisc_init(void); +#endif /* * Boot command-line arguments @@ -292,8 +301,6 @@ extern void setup_arch(char **); extern void cpu_idle(void); -unsigned long wait_init_idle; - #ifndef CONFIG_SMP #ifdef CONFIG_X86_LOCAL_APIC @@ -302,34 +309,24 @@ APIC_init_uniprocessor(); } #else -#define smp_init() do { } while (0) +#define smp_init() do { } while (0) #endif #else - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); - wait_init_idle = cpu_online_map; - clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */ smp_threads_ready=1; smp_commence(); - - /* Wait for the other cpus to set up their idle processes */ - printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle); - while (wait_init_idle) { - cpu_relax(); - barrier(); - } - printk("All processors have done init_idle\n"); } #endif + /* * We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to @@ -341,9 +338,8 @@ { kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL); unlock_kernel(); - current->need_resched = 1; - cpu_idle(); -} + cpu_idle(); +} /* * Activate the first processor. @@ -424,18 +420,25 @@ #ifdef CONFIG_PROC_FS proc_root_init(); #endif +#ifdef CONFIG_PARISC + parisc_init(); +#endif #if defined(CONFIG_SYSVIPC) ipc_init(); #endif check_bugs(); + printk("POSIX conformance testing by UNIFIX\n"); - /* - * We count on the initial thread going ok - * Like idlers init is an unlocked kernel thread, which will - * make syscalls (and thus be locked). + init_idle(current, smp_processor_id()); + /* + * We count on the initial thread going ok + * Like idlers init is an unlocked kernel thread, which will + * make syscalls (and thus be locked). */ smp_init(); + + /* Do the rest non-__init'ed, we're now alive */ rest_init(); } @@ -464,6 +467,10 @@ */ static void __init do_basic_setup(void) { + /* Start the per-CPU migration threads */ +#if CONFIG_SMP + migration_init(); +#endif /* * Tell the world that we're going to be the grim @@ -525,6 +532,9 @@ #ifdef CONFIG_ISAPNP isapnp_init(); #endif +#ifdef CONFIG_PNPBIOS + pnpbios_init(); +#endif #ifdef CONFIG_TC tc_init(); #endif @@ -548,6 +558,7 @@ static int init(void * unused) { + struct files_struct *files; lock_kernel(); do_basic_setup(); @@ -560,7 +571,17 @@ */ free_initmem(); unlock_kernel(); - + + /* + * Right now we are a thread sharing with a ton of kernel + * stuff. We don't want to end up in user space in that state + */ + + files = current->files; + if(unshare_files()) + panic("unshare"); + put_files_struct(files); + if (open("/dev/console", O_RDWR, 0) < 0) printk("Warning: unable to open an initial console.\n"); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/ipc/shm.c linux.22-ac2/ipc/shm.c --- linux.vanilla/ipc/shm.c 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/ipc/shm.c 2003-06-29 16:09:50.000000000 +0100 @@ -680,7 +680,7 @@ shmdnext = shmd->vm_next; if (shmd->vm_ops == &shm_vm_ops && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { - do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start); + do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start, 1); retval = 0; } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/acct.c linux.22-ac2/kernel/acct.c --- linux.vanilla/kernel/acct.c 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/kernel/acct.c 2003-06-29 16:09:23.000000000 +0100 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -299,8 +300,10 @@ ac.ac_etime = encode_comp_t(jiffies - current->start_time); ac.ac_utime = encode_comp_t(current->times.tms_utime); ac.ac_stime = encode_comp_t(current->times.tms_stime); - ac.ac_uid = current->uid; - ac.ac_gid = current->gid; + ac.ac_uid = fs_high2lowuid(current->uid); + ac.ac_gid = fs_high2lowgid(current->gid); + ac.ac_uid32 = current->uid; + ac.ac_gid32 = current->gid; ac.ac_tty = (current->tty) ? kdev_t_to_nr(current->tty->device) : 0; ac.ac_flag = 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/capability.c linux.22-ac2/kernel/capability.c --- linux.vanilla/kernel/capability.c 2000-06-24 05:06:37.000000000 +0100 +++ linux.22-ac2/kernel/capability.c 2003-06-29 16:09:23.000000000 +0100 @@ -8,6 +8,8 @@ #include #include +unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ + kernel_cap_t cap_bset = CAP_INIT_EFF_SET; /* Note: never hold tasklist_lock while spinning for this one */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/cpufreq.c linux.22-ac2/kernel/cpufreq.c --- linux.vanilla/kernel/cpufreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/kernel/cpufreq.c 2003-08-28 22:20:06.000000000 +0100 @@ -0,0 +1,722 @@ +/* + * linux/kernel/cpufreq.c + * + * Copyright (C) 2001 Russell King + * (C) 2002 - 2003 Dominik Brodowski + * + * $Id: cpufreq.c,v 1.59 2003/01/20 17:31:48 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +/** + * The "cpufreq driver" - the arch- or hardware-dependend low + * level driver of CPUFreq support, and its spinlock. This lock + * also protects the cpufreq_cpu_data array. + */ +static struct cpufreq_driver *cpufreq_driver; +static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; +static spinlock_t cpufreq_driver_lock = SPIN_LOCK_UNLOCKED; + +/* internal prototype */ +static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); + + +/** + * Two notifier lists: the "policy" list is involved in the + * validation process for a new CPU frequency policy; the + * "transition" list for kernel code that needs to handle + * changes to devices when the CPU clock speed changes. + * The mutex locks both lists. + */ +static struct notifier_block *cpufreq_policy_notifier_list; +static struct notifier_block *cpufreq_transition_notifier_list; +static DECLARE_RWSEM (cpufreq_notifier_rwsem); + + +static LIST_HEAD(cpufreq_governor_list); +static DECLARE_MUTEX (cpufreq_governor_sem); + +/* + * backport info: + * we don't have a kobj we can use for ref-counting, so use a + * "unsigned int policy->use_count" and an "unload_sem" [idea from + * Pat Mochel's struct driver unload_sem] for proper reference counting. + */ + +static struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) +{ + struct cpufreq_policy *data; + unsigned long flags; + + if (cpu >= NR_CPUS) + goto err_out; + + /* get the cpufreq driver */ + spin_lock_irqsave(&cpufreq_driver_lock, flags); + + if (!cpufreq_driver) + goto err_out_unlock; + + /* get the CPU */ + data = cpufreq_cpu_data[cpu]; + + if (!data) + goto err_out_unlock; + + if (!data->use_count) + goto err_out_unlock; + + data->use_count += 1; + + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + return data; + + err_out_unlock: + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + err_out: + return NULL; +} + +static void cpufreq_cpu_put(struct cpufreq_policy *data) +{ + unsigned long flags; + + spin_lock_irqsave(&cpufreq_driver_lock, flags); + data->use_count -= 1; + if (!data->use_count) { + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + up(&data->unload_sem); + return; + } + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); +} + +/********************************************************************* + * SYSFS INTERFACE * + *********************************************************************/ + +/** + * cpufreq_parse_governor - parse a governor string + */ +int cpufreq_parse_governor (char *str_governor, unsigned int *policy, + struct cpufreq_governor **governor) +{ + if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { + *policy = CPUFREQ_POLICY_PERFORMANCE; + return 0; + } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { + *policy = CPUFREQ_POLICY_POWERSAVE; + return 0; + } else { + struct cpufreq_governor *t; + down(&cpufreq_governor_sem); + if (!cpufreq_driver || !cpufreq_driver->target) + goto out; + list_for_each_entry(t, &cpufreq_governor_list, governor_list) { + if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { + *governor = t; + *policy = CPUFREQ_POLICY_GOVERNOR; + up(&cpufreq_governor_sem); + return 0; + } + } + out: + up(&cpufreq_governor_sem); + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cpufreq_parse_governor); + + +/* backport info: + * all the sysfs stuff is missing -- of course + */ + +/** + * cpufreq_add_dev - add a CPU device + * + * Adds the cpufreq interface for a CPU device. + */ +static int cpufreq_add_dev (unsigned int cpu) +{ + int ret = 0; + struct cpufreq_policy new_policy; + struct cpufreq_policy *policy; + unsigned long flags; + + policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!policy) + return -ENOMEM; + memset(policy, 0, sizeof(struct cpufreq_policy)); + + policy->cpu = cpu; + policy->use_count = 1; + init_MUTEX_LOCKED(&policy->lock); + init_MUTEX_LOCKED(&policy->unload_sem); + + /* call driver. From then on the cpufreq must be able + * to accept all calls to ->verify and ->setpolicy for this CPU + */ + ret = cpufreq_driver->init(policy); + if (ret) + goto err_out; + + memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); + + spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_cpu_data[cpu] = policy; + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + up(&policy->lock); + + /* set default policy */ + ret = cpufreq_set_policy(&new_policy); + if (ret) + goto err_out_unregister; + + return 0; + + + err_out_unregister: + spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_cpu_data[cpu] = NULL; + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + err_out: + kfree(policy); + return ret; +} + + +/** + * cpufreq_remove_dev - remove a CPU device + * + * Removes the cpufreq interface for a CPU device. + */ +static int cpufreq_remove_dev (unsigned int cpu) +{ + unsigned long flags; + struct cpufreq_policy *data; + + spin_lock_irqsave(&cpufreq_driver_lock, flags); + data = cpufreq_cpu_data[cpu]; + if (!data) { + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + return -EINVAL; + } + cpufreq_cpu_data[cpu] = NULL; + + data->use_count -= 1; + if (!data->use_count) { + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + up(&data->unload_sem); + } else { + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + /* this will sleep until data->use_count gets to zero */ + down(&data->unload_sem); + up(&data->unload_sem); + } + + if (cpufreq_driver->target) + __cpufreq_governor(data, CPUFREQ_GOV_STOP); + + if (cpufreq_driver->exit) + cpufreq_driver->exit(data); + + kfree(data); + + return 0; +} + + +/********************************************************************* + * NOTIFIER LISTS INTERFACE * + *********************************************************************/ + +/** + * cpufreq_register_notifier - register a driver with cpufreq + * @nb: notifier function to register + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Add a driver to one of two lists: either a list of drivers that + * are notified about clock rate changes (once before and once after + * the transition), or a list of drivers that are notified about + * changes in cpufreq policy. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_register. + */ +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down_write(&cpufreq_notifier_rwsem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up_write(&cpufreq_notifier_rwsem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_register_notifier); + + +/** + * cpufreq_unregister_notifier - unregister a driver with cpufreq + * @nb: notifier block to be unregistered + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Remove a driver from the CPU frequency notifier list. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_unregister. + */ +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down_write(&cpufreq_notifier_rwsem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up_write(&cpufreq_notifier_rwsem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_unregister_notifier); + + +/********************************************************************* + * GOVERNORS * + *********************************************************************/ + + +int __cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + return cpufreq_driver->target(policy, target_freq, relation); +} +EXPORT_SYMBOL_GPL(__cpufreq_driver_target); + + +int cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int ret; + + policy = cpufreq_cpu_get(policy->cpu); + if (!policy) + return -EINVAL; + + down(&policy->lock); + + ret = __cpufreq_driver_target(policy, target_freq, relation); + + up(&policy->lock); + + cpufreq_cpu_put(policy); + + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_driver_target); + + +static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) +{ + int ret = 0; + + switch (policy->policy) { + case CPUFREQ_POLICY_POWERSAVE: + if ((event == CPUFREQ_GOV_LIMITS) || (event == CPUFREQ_GOV_START)) { + ret = __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + } + break; + case CPUFREQ_POLICY_PERFORMANCE: + if ((event == CPUFREQ_GOV_LIMITS) || (event == CPUFREQ_GOV_START)) { + ret = __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + } + break; + case CPUFREQ_POLICY_GOVERNOR: + ret = policy->governor->governor(policy, event); + break; + default: + ret = -EINVAL; + } + + return ret; +} + + +int cpufreq_governor(unsigned int cpu, unsigned int event) +{ + int ret = 0; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) + return -EINVAL; + + down(&policy->lock); + ret = __cpufreq_governor(policy, event); + up(&policy->lock); + + cpufreq_cpu_put(policy); + + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_governor); + + +int cpufreq_register_governor(struct cpufreq_governor *governor) +{ + struct cpufreq_governor *t; + + if (!governor) + return -EINVAL; + + if (!strnicmp(governor->name,"powersave",CPUFREQ_NAME_LEN)) + return -EBUSY; + if (!strnicmp(governor->name,"performance",CPUFREQ_NAME_LEN)) + return -EBUSY; + + down(&cpufreq_governor_sem); + + list_for_each_entry(t, &cpufreq_governor_list, governor_list) { + if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { + up(&cpufreq_governor_sem); + return -EBUSY; + } + } + list_add(&governor->governor_list, &cpufreq_governor_list); + + up(&cpufreq_governor_sem); + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_register_governor); + + +void cpufreq_unregister_governor(struct cpufreq_governor *governor) +{ + /* backport info: + * As the module usage count isn't assured in 2.4., check for removal + * of running cpufreq governor + */ + unsigned int i; + + if (!governor) + return; + + down(&cpufreq_governor_sem); + + for (i=0; ilock); + + if (policy->policy != CPUFREQ_POLICY_GOVERNOR) + goto unlock_done; + if (policy->governor != governor) + goto unlock_done; + + /* stop old one, start performance [always present] */ + __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + __cpufreq_governor(policy, CPUFREQ_GOV_START); + + unlock_done: + up(&policy->lock); + done: + cpufreq_cpu_put(policy); + } + list_del(&governor->governor_list); + up(&cpufreq_governor_sem); + return; +} +EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); + + + +/********************************************************************* + * POLICY INTERFACE * + *********************************************************************/ + +/** + * cpufreq_get_policy - get the current cpufreq_policy + * @policy: struct cpufreq_policy into which the current cpufreq_policy is written + * + * Reads the current cpufreq policy. + */ +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) +{ + struct cpufreq_policy *cpu_policy; + if (!policy) + return -EINVAL; + + cpu_policy = cpufreq_cpu_get(cpu); + if (!cpu_policy) + return -EINVAL; + + down(&cpu_policy->lock); + memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); + up(&cpu_policy->lock); + + cpufreq_cpu_put(cpu_policy); + + return 0; +} +EXPORT_SYMBOL(cpufreq_get_policy); + + +/** + * cpufreq_set_policy - set a new CPUFreq policy + * @policy: policy to be set. + * + * Sets a new CPU frequency and voltage scaling policy. + */ +int cpufreq_set_policy(struct cpufreq_policy *policy) +{ + int ret = 0; + struct cpufreq_policy *data; + + if (!policy) + return -EINVAL; + + data = cpufreq_cpu_get(policy->cpu); + if (!data) + return -EINVAL; + + /* lock this CPU */ + down(&data->lock); + + memcpy(&policy->cpuinfo, + &data->cpuinfo, + sizeof(struct cpufreq_cpuinfo)); + + /* verify the cpu speed can be set within this limit */ + ret = cpufreq_driver->verify(policy); + if (ret) + goto error_out; + + down_read(&cpufreq_notifier_rwsem); + + /* adjust if necessary - all reasons */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, + policy); + + /* adjust if necessary - hardware incompatibility*/ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, + policy); + + /* verify the cpu speed can be set within this limit, + which might be different to the first one */ + ret = cpufreq_driver->verify(policy); + if (ret) { + up_read(&cpufreq_notifier_rwsem); + goto error_out; + } + + /* notification of the new policy */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, + policy); + + up_read(&cpufreq_notifier_rwsem); + + data->min = policy->min; + data->max = policy->max; + + if (cpufreq_driver->setpolicy) { + data->policy = policy->policy; + ret = cpufreq_driver->setpolicy(policy); + } else { + if ((policy->policy != data->policy) || + ((policy->policy == CPUFREQ_POLICY_GOVERNOR) && (policy->governor != data->governor))) { + /* save old, working values */ + unsigned int old_pol = data->policy; + struct cpufreq_governor *old_gov = data->governor; + + /* end old governor */ + __cpufreq_governor(data, CPUFREQ_GOV_STOP); + + /* start new governor */ + data->policy = policy->policy; + data->governor = policy->governor; + if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { + /* new governor failed, so re-start old one */ + data->policy = old_pol; + data->governor = old_gov; + __cpufreq_governor(data, CPUFREQ_GOV_START); + } + /* might be a policy change, too, so fall through */ + } + __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); + } + + error_out: + up(&data->lock); + cpufreq_cpu_put(data); + + return ret; +} +EXPORT_SYMBOL(cpufreq_set_policy); + + + +/********************************************************************* + * EXTERNALLY AFFECTING FREQUENCY CHANGES * + *********************************************************************/ + +/** + * adjust_jiffies - adjust the system "loops_per_jiffy" + * + * This function alters the system "loops_per_jiffy" for the clock + * speed change. Note that loops_per_jiffy cannot be updated on SMP + * systems as each CPU might be scaled differently. So, use the arch + * per-CPU loops_per_jiffy value wherever possible. + */ +#ifndef CONFIG_SMP +static unsigned long l_p_j_ref = 0; +static unsigned int l_p_j_ref_freq = 0; + +static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) +{ + if (!l_p_j_ref_freq) { + l_p_j_ref = loops_per_jiffy; + l_p_j_ref_freq = ci->old; + } + if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || + (val == CPUFREQ_POSTCHANGE && ci->old > ci->new)) + loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); +} +#else +#define adjust_jiffies(x...) do {} while (0) +#endif + + +/** + * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition + * + * This function calls the transition notifiers and the "adjust_jiffies" function. It is called + * twice on all CPU frequency changes that have external effects. + */ +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) +{ + down_read(&cpufreq_notifier_rwsem); + switch (state) { + case CPUFREQ_PRECHANGE: + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); + adjust_jiffies(CPUFREQ_PRECHANGE, freqs); + break; + case CPUFREQ_POSTCHANGE: + adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); + cpufreq_cpu_data[freqs->cpu]->cur = freqs->new; + break; + } + up_read(&cpufreq_notifier_rwsem); +} +EXPORT_SYMBOL_GPL(cpufreq_notify_transition); + + + +/********************************************************************* + * REGISTER / UNREGISTER CPUFREQ DRIVER * + *********************************************************************/ + +/** + * cpufreq_register_driver - register a CPU Frequency driver + * @driver_data: A struct cpufreq_driver containing the values# + * submitted by the CPU Frequency driver. + * + * Registers a CPU Frequency driver to this core code. This code + * returns zero on success, -EBUSY when another driver got here first + * (and isn't unregistered in the meantime). + * + */ +int cpufreq_register_driver(struct cpufreq_driver *driver_data) +{ + unsigned long flags; + unsigned int i; + + if (!driver_data || !driver_data->verify || !driver_data->init || + ((!driver_data->setpolicy) && (!driver_data->target))) + return -EINVAL; + + spin_lock_irqsave(&cpufreq_driver_lock, flags); + if (cpufreq_driver) { + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + return -EBUSY; + } + cpufreq_driver = driver_data; + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + for (i=0; iuser->processes); - free_uid(p->user); - unhash_process(p); - - release_thread(p); - current->cmin_flt += p->min_flt + p->cmin_flt; - current->cmaj_flt += p->maj_flt + p->cmaj_flt; - current->cnswap += p->nswap + p->cnswap; - /* - * Potentially available timeslices are retrieved - * here - this way the parent does not get penalized - * for creating too many processes. - * - * (this cannot be used to artificially 'generate' - * timeslices, because any timeslice recovered here - * was given away by the parent in the first place.) - */ - current->counter += p->counter; - if (current->counter >= MAX_COUNTER) - current->counter = MAX_COUNTER; - p->pid = 0; - free_task_struct(p); - } else { - printk("task releasing itself\n"); - } + atomic_dec(&p->user->processes); + free_uid(p->user); + unhash_process(p); + + release_thread(p); + current->cmin_flt += p->min_flt + p->cmin_flt; + current->cmaj_flt += p->maj_flt + p->cmaj_flt; + current->cnswap += p->nswap + p->cnswap; + sched_exit(p); + p->pid = 0; + free_task_struct(p); } /* @@ -150,6 +123,79 @@ return retval; } +/** + * reparent_to_init() - Reparent the calling kernel thread to the init task. + * + * If a kernel thread is launched as a result of a system call, or if + * it ever exits, it should generally reparent itself to init so that + * it is correctly cleaned up on exit. + * + * The various task state such as scheduling policy and priority may have + * been inherited from a user process, so we reset them to sane values here. + * + * NOTE that reparent_to_init() gives the caller full capabilities. + */ +void reparent_to_init(void) +{ + write_lock_irq(&tasklist_lock); + + /* Reparent to init */ + REMOVE_LINKS(current); + current->p_pptr = child_reaper; + current->p_opptr = child_reaper; + SET_LINKS(current); + + /* Set the exit signal to SIGCHLD so we signal init on exit */ + current->exit_signal = SIGCHLD; + + current->ptrace = 0; + if ((current->policy == SCHED_OTHER) && (task_nice(current) < 0)) + set_user_nice(current, 0); + /* cpus_allowed? */ + /* rt_priority? */ + /* signals? */ + current->cap_effective = CAP_INIT_EFF_SET; + current->cap_inheritable = CAP_INIT_INH_SET; + current->cap_permitted = CAP_FULL_SET; + current->keep_capabilities = 0; + memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); + current->user = INIT_USER; + + write_unlock_irq(&tasklist_lock); +} + +/* + * Put all the gunge required to become a kernel thread without + * attached user resources in one place where it belongs. + */ + +void daemonize(void) +{ + struct fs_struct *fs; + + + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them + * they would be locked into memory. + */ + exit_mm(current); + + current->session = 1; + current->pgrp = 1; + current->tty = NULL; + + /* Become as one with the init task */ + + exit_fs(current); /* current->fs->count--; */ + fs = init_task.fs; + current->fs = fs; + atomic_inc(&fs->count); + exit_files(current); + current->files = init_task.files; + atomic_inc(¤t->files->count); +} + /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/fork.c linux.22-ac2/kernel/fork.c --- linux.vanilla/kernel/fork.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/fork.c 2003-07-14 12:54:06.000000000 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -31,7 +32,6 @@ /* The idle threads do not count.. */ int nr_threads; -int nr_running; int max_threads; unsigned long total_forks; /* Handle normal Linux uptimes. */ @@ -39,6 +39,8 @@ struct task_struct *pidhash[PIDHASH_SZ]; +rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ + void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -114,8 +116,10 @@ last_pid = 300; next_safe = PID_MAX; } - if(unlikely(last_pid == beginpid)) + if(unlikely(last_pid == beginpid)) { + next_safe = 0; goto nomorepids; + } goto repeat; } if(p->pid > last_pid && next_safe > p->pid) @@ -144,6 +148,7 @@ { struct vm_area_struct * mpnt, *tmp, **pprev; int retval; + unsigned long charge = 0; flush_cache_mm(current->mm); mm->locked_vm = 0; @@ -172,6 +177,12 @@ retval = -ENOMEM; if(mpnt->vm_flags & VM_DONTCOPY) continue; + if(mpnt->vm_flags & VM_ACCOUNT) { + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + if(!vm_enough_memory(len)) + goto fail_nomem; + charge += len; + } tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!tmp) goto fail_nomem; @@ -215,10 +226,12 @@ } retval = 0; build_mmap_rb(mm); - -fail_nomem: +out: flush_tlb_mm(current->mm); return retval; +fail_nomem: + vm_unacct_memory(charge); + goto out; } spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; @@ -636,6 +649,7 @@ struct pt_regs *regs, unsigned long stack_size) { int retval; + unsigned long flags; struct task_struct *p; struct completion vfork; @@ -696,8 +710,7 @@ if (p->pid == 0 && current->pid != 0) goto bad_fork_cleanup; - p->run_list.next = NULL; - p->run_list.prev = NULL; + INIT_LIST_HEAD(&p->run_list); p->p_cptr = NULL; init_waitqueue_head(&p->wait_chldexit); @@ -723,14 +736,15 @@ #ifdef CONFIG_SMP { int i; - p->cpus_runnable = ~0UL; - p->processor = current->processor; + /* ?? should we just memset this ?? */ for(i = 0; i < smp_num_cpus; i++) - p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0; + p->per_cpu_utime[cpu_logical_map(i)] = + p->per_cpu_stime[cpu_logical_map(i)] = 0; spin_lock_init(&p->sigmask_lock); } #endif + p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; @@ -764,15 +778,27 @@ p->pdeath_signal = 0; /* - * "share" dynamic priority between parent and child, thus the - * total amount of dynamic priorities in the system doesn't change, - * more scheduling fairness. This is only important in the first - * timeslice, on the long run the scheduling behaviour is unchanged. - */ - p->counter = (current->counter + 1) >> 1; - current->counter >>= 1; - if (!current->counter) - current->need_resched = 1; + * Share the timeslice between parent and child, thus the + * total amount of pending timeslices in the system doesnt change, + * resulting in more scheduling fairness. + */ + __save_flags(flags); + __cli(); + if (!current->time_slice) + BUG(); + p->time_slice = (current->time_slice + 1) >> 1; + current->time_slice >>= 1; + if (!current->time_slice) { + /* + * This case is rare, it happens when the parent has only + * a single jiffy left from its timeslice. Taking the + * runqueue lock is not a problem. + */ + current->time_slice = 1; + scheduler_tick(0,0); + } + p->sleep_timestamp = jiffies; + __restore_flags(flags); /* * Ok, add it to the run-queues and make it @@ -808,11 +834,16 @@ if (p->ptrace & PT_PTRACED) send_sig(SIGSTOP, p, 1); - - wake_up_process(p); /* do this last */ + wake_up_forked_process(p); /* do this last */ ++total_forks; if (clone_flags & CLONE_VFORK) wait_for_completion(&vfork); + else + /* + * Let the child process run first, to avoid most of the + * COW overhead when the child exec()s afterwards. + */ + current->need_resched = 1; fork_out: return retval; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/kmod.c linux.22-ac2/kernel/kmod.c --- linux.vanilla/kernel/kmod.c 2002-08-03 16:08:32.000000000 +0100 +++ linux.22-ac2/kernel/kmod.c 2003-06-29 16:09:23.000000000 +0100 @@ -119,15 +119,8 @@ if (curtask->files->fd[i]) close(i); } - /* Drop the "current user" thing */ - { - struct user_struct *user = curtask->user; - curtask->user = INIT_USER; - atomic_inc(&INIT_USER->__count); - atomic_inc(&INIT_USER->processes); - atomic_dec(&user->processes); - free_uid(user); - } + /* Become root */ + set_user(0, 0); /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/ksyms.c linux.22-ac2/kernel/ksyms.c --- linux.vanilla/kernel/ksyms.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/ksyms.c 2003-07-24 22:51:59.000000000 +0100 @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -68,6 +69,7 @@ extern spinlock_t dma_spin_lock; extern int panic_timeout; + #ifdef CONFIG_MODVERSIONS const struct module_symbol __export_Using_Versions __attribute__((section("__ksymtab"))) = { @@ -91,6 +93,7 @@ EXPORT_SYMBOL(exit_files); EXPORT_SYMBOL(exit_fs); EXPORT_SYMBOL(exit_sighand); +EXPORT_SYMBOL(unshare_files); /* internal kernel memory management */ EXPORT_SYMBOL(_alloc_pages); @@ -112,6 +115,7 @@ EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(vcalloc); EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(mem_map); @@ -143,13 +147,15 @@ EXPORT_SYMBOL(fget); EXPORT_SYMBOL(igrab); EXPORT_SYMBOL(iunique); -EXPORT_SYMBOL(iget4); +EXPORT_SYMBOL(iget4_locked); +EXPORT_SYMBOL(unlock_new_inode); EXPORT_SYMBOL(iput); EXPORT_SYMBOL(inode_init_once); EXPORT_SYMBOL(force_delete); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(lookup_mnt); +EXPORT_SYMBOL(path_lookup); EXPORT_SYMBOL(path_init); EXPORT_SYMBOL(path_walk); EXPORT_SYMBOL(path_release); @@ -225,6 +231,7 @@ EXPORT_SYMBOL(generic_file_read); EXPORT_SYMBOL(do_generic_file_read); EXPORT_SYMBOL(generic_file_write); +EXPORT_SYMBOL(do_generic_file_write); EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_ro_fops); EXPORT_SYMBOL(generic_buffer_fdatasync); @@ -249,6 +256,7 @@ EXPORT_SYMBOL(find_inode_number); EXPORT_SYMBOL(is_subdir); EXPORT_SYMBOL(get_unused_fd); +EXPORT_SYMBOL(put_unused_fd); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); @@ -270,7 +278,6 @@ EXPORT_SYMBOL(grab_cache_page_nowait); EXPORT_SYMBOL(read_cache_page); EXPORT_SYMBOL(set_page_dirty); -EXPORT_SYMBOL(mark_page_accessed); EXPORT_SYMBOL(vfs_readlink); EXPORT_SYMBOL(vfs_follow_link); EXPORT_SYMBOL(page_readlink); @@ -449,7 +456,9 @@ /* process management */ EXPORT_SYMBOL(complete_and_exit); EXPORT_SYMBOL(__wake_up); -EXPORT_SYMBOL(__wake_up_sync); +#if CONFIG_SMP +EXPORT_SYMBOL_GPL(__wake_up_sync); /* internal use only */ +#endif EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on_timeout); @@ -457,11 +466,14 @@ EXPORT_SYMBOL(interruptible_sleep_on_timeout); EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule_timeout); -#if CONFIG_SMP -EXPORT_SYMBOL(set_cpus_allowed); -#endif EXPORT_SYMBOL(yield); EXPORT_SYMBOL(__cond_resched); +EXPORT_SYMBOL(set_user_nice); +EXPORT_SYMBOL(task_nice); +EXPORT_SYMBOL_GPL(idle_cpu); +#ifdef CONFIG_SMP +EXPORT_SYMBOL_GPL(set_cpus_allowed); +#endif EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(xtime); EXPORT_SYMBOL(do_gettimeofday); @@ -472,7 +484,7 @@ #endif EXPORT_SYMBOL(kstat); -EXPORT_SYMBOL(nr_running); +EXPORT_SYMBOL(nr_context_switches); /* misc */ EXPORT_SYMBOL(panic); @@ -513,6 +525,8 @@ EXPORT_SYMBOL(seq_release); EXPORT_SYMBOL(seq_read); EXPORT_SYMBOL(seq_lseek); +EXPORT_SYMBOL(single_open); +EXPORT_SYMBOL(single_release); /* Program loader interfaces */ EXPORT_SYMBOL(setup_arg_pages); @@ -592,7 +606,6 @@ EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(pidhash); -EXPORT_SYMBOL(unshare_files); /* debug */ EXPORT_SYMBOL(dump_stack); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/Makefile linux.22-ac2/kernel/Makefile --- linux.vanilla/kernel/Makefile 2001-09-17 05:22:40.000000000 +0100 +++ linux.22-ac2/kernel/Makefile 2003-06-29 16:09:23.000000000 +0100 @@ -9,7 +9,7 @@ O_TARGET := kernel.o -export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o +export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o cpufreq.o obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \ module.o exit.o itimer.o info.o time.o softirq.o resource.o \ @@ -19,6 +19,8 @@ obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += ksyms.o obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_IKCONFIG) += configs.o ifneq ($(CONFIG_IA64),y) # According to Alan Modra , the -fno-omit-frame-pointer is @@ -26,7 +28,21 @@ # me. I suspect most platforms don't need this, but until we know that for sure # I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k # to get a correct value for the wait-channel (WCHAN in ps). --davidm +# +# Some gcc's are building so that O(1) scheduler is triple faulting if we +# build -O2. (Turns out to be a CPU issue. Update your microcode if you hit it) +# CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer endif include $(TOPDIR)/Rules.make + +configs.o: $(TOPDIR)/scripts/mkconfigs configs.c + $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DEXPORT_SYMTAB -c -o configs.o configs.c + +$(TOPDIR)/scripts/mkconfigs: $(TOPDIR)/scripts/mkconfigs.c + $(HOSTCC) $(HOSTCFLAGS) -o $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/scripts/mkconfigs.c + +configs.c: $(TOPDIR)/.config $(TOPDIR)/scripts/mkconfigs + $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/.config configs.c + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/module.c linux.22-ac2/kernel/module.c --- linux.vanilla/kernel/module.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/module.c 2003-09-01 13:12:39.000000000 +0100 @@ -40,6 +40,9 @@ extern const char __start___kallsyms[] __attribute__ ((weak)); extern const char __stop___kallsyms[] __attribute__ ((weak)); + +extern int tainted; + struct module kernel_module = { size_of_struct: sizeof(struct module), @@ -311,6 +314,12 @@ error = -EEXIST; goto err1; } + + /* the sii6512 module by accident had an incorrect MODULE_LICENSE; fix that */ + if (strstr(name, "sii6512")) { + tainted |= 1; + printk(KERN_ERR "Loading proprietary sii6512 software raid module; tainting kernel\n"); + } if ((mod = (struct module *)module_map(size)) == NULL) { error = -ENOMEM; goto err1; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/panic.c linux.22-ac2/kernel/panic.c --- linux.vanilla/kernel/panic.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/kernel/panic.c 2003-06-29 16:09:23.000000000 +0100 @@ -16,6 +16,8 @@ #include #include #include +#include +#include asmlinkage void sys_sync(void); /* it's really int */ @@ -28,9 +30,155 @@ panic_timeout = simple_strtoul(str, NULL, 0); return 1; } - __setup("panic=", panic_setup); + +#if (defined(CONFIG_X86) && defined(CONFIG_VT)) || defined(CONFIG_PC_KEYB) +#define do_blink(x) pckbd_blink(x) +#else +#define do_blink(x) 0 +#endif + +#ifdef CONFIG_PANIC_MORSE + +static int blink_setting = 1; + +static const unsigned char morsetable[] = { + 0122, 0, 0310, 0, 0, 0163, /* "#$%&' */ + 055, 0155, 0, 0, 0163, 0141, 0152, 0051, /* ()*+,-./ */ + 077, 076, 074, 070, 060, 040, 041, 043, 047, 057, /* 0-9 */ + 0107, 0125, 0, 0061, 0, 0114, 0, /* :;<=>?@ */ + 006, 021, 025, 011, 002, 024, 013, 020, 004, /* A-I */ + 036, 015, 022, 007, 005, 017, 026, 033, 012, /* J-R */ + 010, 003, 014, 030, 016, 031, 035, 023, /* S-Z */ + 0, 0, 0, 0, 0154 /* [\]^_ */ +}; + +__inline__ unsigned char tomorse(char c) { + if (c >= 'a' && c <= 'z') + c = c - 'a' + 'A'; + if (c >= '"' && c <= '_') { + return morsetable[c - '"']; + } else + return 0; +} + + +#define DITLEN (HZ / 5) +#define DAHLEN 3 * DITLEN +#define SPACELEN 7 * DITLEN + +#define FREQ 844 + +/* Tell the user who may be running in X and not see the console that we have + panic'ed. This is to distingush panics from "real" lockups. + Could in theory send the panic message as morse, but that is left as an + exercise for the reader. + And now it's done! LED and speaker morse code by Andrew Rodland + , with improvements based on suggestions from + linux@horizon.com and a host of others. +*/ + +void panic_blink(char *buf) +{ + static unsigned long next_jiffie = 0; + static char * bufpos = 0; + static unsigned char morse = 0; + static char state = 1; + + if (!blink_setting) + return; + + if (!buf) + buf="Panic lost?"; + + + if (bufpos && time_after (next_jiffie, jiffies)) { + return; /* Waiting for something. */ + } + + if (state) { /* Coming off of a blink. */ + if (blink_setting & 0x01) + do_blink(0); + + state = 0; + + if(morse > 1) { /* Not done yet, just a one-dit pause. */ + next_jiffie = jiffies + DITLEN; + } else { /* Get a new char, and figure out how much space. */ + + if(!bufpos) + bufpos = (char *)buf; /* First time through */ + + if(!*bufpos) { + bufpos = (char *)buf; /* Repeating */ + next_jiffie = jiffies + SPACELEN; + } else { + /* Inter-letter space */ + next_jiffie = jiffies + DAHLEN; + } + + if (! (morse = tomorse(*bufpos) )) { + next_jiffie = jiffies + SPACELEN; + state = 1; /* And get us back here */ + } + bufpos ++; + } + } else { /* Starting a new blink. We have valid code in morse. */ + int len; + + len = (morse & 001) ? DAHLEN : DITLEN; + + if (blink_setting & 0x02) + kd_mksound(FREQ, len); + + next_jiffie = jiffies + len; + + if (blink_setting & 0x01) + do_blink(1); + state = 1; + morse >>= 1; + } +} + +#else /* CONFIG_PANIC_MORSE */ + +static int blink_setting = HZ / 2; /* Over here, it's jiffies between blinks. */ + +/* This is the "original" 2.4-ac panic_blink, rewritten to use my + * sorta-arch-independent do_blink stuff. + */ +void panic_blink(char *buf) { + static char state = 0; + static unsigned long next_jiffie = 0; + + if (!blink_setting) + return; + + if (jiffies >= next_jiffie) { + state ^= 1; + do_blink(state); + next_jiffie = jiffies + blink_setting; + } + + return; +} + +#endif /* CONFIG_PANIC_MORSE */ + +static int __init panicblink_setup(char *str) +{ + int par; + if (get_option(&str,&par)) + blink_setting = par; + return 1; +} + +/* panicblink=0 disables the blinking as it caused problems with some console + switches. */ +__setup("panicblink=", panicblink_setup); + + /** * panic - halt the system * @fmt: The text string to print @@ -96,10 +244,7 @@ #endif sti(); for(;;) { -#if defined(CONFIG_X86) && defined(CONFIG_VT) - extern void panic_blink(void); - panic_blink(); -#endif + panic_blink(buf); CHECK_EMERGENCY_SYNC } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/printk.c linux.22-ac2/kernel/printk.c --- linux.vanilla/kernel/printk.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/printk.c 2003-06-29 16:09:23.000000000 +0100 @@ -26,6 +26,7 @@ #include #include /* For in_interrupt() */ #include +#include #include @@ -529,6 +530,7 @@ if (must_wake_klogd && !oops_in_progress) wake_up_interruptible(&log_wait); } +EXPORT_SYMBOL(release_console_sem); /** console_conditional_schedule - yield the CPU if required * diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/ptrace.c linux.22-ac2/kernel/ptrace.c --- linux.vanilla/kernel/ptrace.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/ptrace.c 2003-08-28 17:09:23.000000000 +0100 @@ -32,20 +32,7 @@ if (child->state != TASK_STOPPED) return -ESRCH; #ifdef CONFIG_SMP - /* Make sure the child gets off its CPU.. */ - for (;;) { - task_lock(child); - if (!task_has_cpu(child)) - break; - task_unlock(child); - do { - if (child->state != TASK_STOPPED) - return -ESRCH; - barrier(); - cpu_relax(); - } while (task_has_cpu(child)); - } - task_unlock(child); + wait_task_inactive(child); #endif } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/sched.c linux.22-ac2/kernel/sched.c --- linux.vanilla/kernel/sched.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/sched.c 2003-07-31 14:29:58.000000000 +0100 @@ -1,760 +1,1052 @@ /* - * linux/kernel/sched.c + * kernel/sched.c * * Kernel scheduler and related syscalls * - * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli - * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar + * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: + * hybrid priority-list and round-robin design with + * an array-switch method of distributing timeslices + * and per-CPU runqueues. Additional code by Davide + * Libenzi, Robert Love, and Rusty Russell. */ -/* - * 'sched.c' is the main kernel file. It contains scheduling primitives - * (sleep_on, wakeup, schedule etc) as well as a number of simple system - * call functions (type getpid()), which just extract a field from - * current-task - */ - -#include #include +#include #include +#include +#include #include -#include +#include #include -#include #include -#include -#include +#include -#include -#include +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) -extern void timer_bh(void); -extern void tqueue_bh(void); -extern void immediate_bh(void); +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* - * scheduler variables - */ + * These are the 'tuning knobs' of the scheduler: + * + * Minimum timeslice is 10 msecs, default timeslice is 150 msecs, + * maximum timeslice is 300 msecs. Timeslices get refilled after + * they expire. + */ +#define MIN_TIMESLICE ( 10 * HZ / 1000) +#define MAX_TIMESLICE (300 * HZ / 1000) +#define CHILD_PENALTY 95 +#define PARENT_PENALTY 100 +#define EXIT_WEIGHT 3 +#define PRIO_BONUS_RATIO 25 +#define INTERACTIVE_DELTA 2 +#define MAX_SLEEP_AVG (2*HZ) +#define STARVATION_LIMIT (2*HZ) -unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ +/* + * If a task is 'interactive' then we reinsert it in the active + * array after it has expired its current timeslice. (it will not + * continue to run immediately, it will still roundrobin with + * other interactive tasks.) + * + * This part scales the interactivity limit depending on niceness. + * + * We scale it linearly, offset by the INTERACTIVE_DELTA delta. + * Here are a few examples of different nice levels: + * + * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0] + * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0] + * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0] + * + * (the X axis represents the possible -5 ... 0 ... +5 dynamic + * priority range a task can explore, a value of '1' means the + * task is rated interactive.) + * + * Ie. nice +19 tasks can never get 'interactive' enough to be + * reinserted into the active array. And only heavily CPU-hog nice -20 + * tasks will be expired. Default nice 0 tasks are somewhere between, + * it takes some effort for them to get interactive, but it's not + * too hard. + */ + +#define SCALE(v1,v1_max,v2_max) \ + (v1) * (v2_max) / (v1_max) + +#define DELTA(p) \ + (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \ + INTERACTIVE_DELTA) -extern void mem_use(void); +#define TASK_INTERACTIVE(p) \ + ((p)->prio <= (p)->static_prio - DELTA(p)) /* - * Scheduling quanta. + * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] + * to time slice values. * - * NOTE! The unix "nice" value influences how long a process - * gets. The nice value ranges from -20 to +19, where a -20 - * is a "high-priority" task, and a "+10" is a low-priority - * task. + * The higher a thread's priority, the bigger timeslices + * it gets during one round of execution. But even the lowest + * priority thread gets MIN_TIMESLICE worth of execution time. * - * We want the time-slice to be around 50ms or so, so this - * calculation depends on the value of HZ. + * task_timeslice() is the interface that is used by the scheduler. */ -#if HZ < 200 -#define TICK_SCALE(x) ((x) >> 2) -#elif HZ < 400 -#define TICK_SCALE(x) ((x) >> 1) -#elif HZ < 800 -#define TICK_SCALE(x) (x) -#elif HZ < 1600 -#define TICK_SCALE(x) ((x) << 1) -#else -#define TICK_SCALE(x) ((x) << 2) -#endif - -#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1) +#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \ + ((MAX_TIMESLICE - MIN_TIMESLICE) * \ + (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1))) +static inline unsigned int task_timeslice(task_t *p) +{ + return BASE_TIMESLICE(p); +} /* - * Init task must be ok at boot for the ix86 as we will check its signals - * via the SMP irq return path. + * These are the runqueue data structures: */ - -struct task_struct * init_tasks[NR_CPUS] = {&init_task, }; + +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long)) + +typedef struct runqueue runqueue_t; + +struct prio_array { + int nr_active; + unsigned long bitmap[BITMAP_SIZE]; + struct list_head queue[MAX_PRIO]; +}; /* - * The tasklist_lock protects the linked list of processes. - * - * The runqueue_lock locks the parts that actually access - * and change the run-queues, and have to be interrupt-safe. - * - * If both locks are to be concurrently held, the runqueue_lock - * nests inside the tasklist_lock. + * This is the main, per-CPU runqueue data structure. * - * task->alloc_lock nests inside tasklist_lock. + * Locking rule: those places that want to lock multiple runqueues + * (such as the load balancing or the process migration code), lock + * acquire operations must be ordered by ascending &runqueue. + */ +struct runqueue { + spinlock_t lock; + unsigned long nr_running, nr_switches, expired_timestamp, + nr_uninterruptible; + task_t *curr, *idle; + prio_array_t *active, *expired, arrays[2]; + int prev_nr_running[NR_CPUS]; + task_t *migration_thread; + struct list_head migration_queue; +} ____cacheline_aligned; + +static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; + +#define cpu_rq(cpu) (runqueues + (cpu)) +#define this_rq() cpu_rq(smp_processor_id()) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define rt_task(p) ((p)->prio < MAX_RT_PRIO) + +/* + * Default context-switch locking: */ -spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */ -rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ +#ifndef prepare_arch_schedule +# define prepare_arch_schedule(prev) do { } while(0) +# define finish_arch_schedule(prev) do { } while(0) +# define prepare_arch_switch(rq) do { } while(0) +# define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) +#endif -static LIST_HEAD(runqueue_head); /* - * We align per-CPU scheduling data on cacheline boundaries, - * to prevent cacheline ping-pong. + * task_rq_lock - lock the runqueue a given task resides on and disable + * interrupts. Note the ordering: we can safely lookup the task_rq without + * explicitly disabling preemption. */ -static union { - struct schedule_data { - struct task_struct * curr; - cycles_t last_schedule; - } schedule_data; - char __pad [SMP_CACHE_BYTES]; -} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}}; +static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +{ + struct runqueue *rq; -#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr -#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule +repeat_lock_task: + local_irq_save(*flags); + rq = task_rq(p); + spin_lock(&rq->lock); + if (unlikely(rq != task_rq(p))) { + spin_unlock_irqrestore(&rq->lock, *flags); + goto repeat_lock_task; + } + return rq; +} -struct kernel_stat kstat; -extern struct task_struct *child_reaper; +static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) +{ + spin_unlock_irqrestore(&rq->lock, *flags); +} -#ifdef CONFIG_SMP +/* + * rq_lock - lock a given runqueue and disable interrupts. + */ +static inline runqueue_t *this_rq_lock(void) +{ + runqueue_t *rq; -#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)]) -#define can_schedule(p,cpu) \ - ((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu)) + local_irq_disable(); + rq = this_rq(); + spin_lock(&rq->lock); -#else + return rq; +} -#define idle_task(cpu) (&init_task) -#define can_schedule(p,cpu) (1) +static inline void rq_unlock(runqueue_t *rq) +{ + spin_unlock_irq(&rq->lock); +} -#endif +/* + * Adding/removing a task to/from a priority array: + */ +static inline void dequeue_task(struct task_struct *p, prio_array_t *array) +{ + array->nr_active--; + list_del(&p->run_list); + if (list_empty(array->queue + p->prio)) + __clear_bit(p->prio, array->bitmap); +} -void scheduling_functions_start_here(void) { } +static inline void enqueue_task(struct task_struct *p, prio_array_t *array) +{ + list_add_tail(&p->run_list, array->queue + p->prio); + __set_bit(p->prio, array->bitmap); + array->nr_active++; + p->array = array; +} /* - * This is the function that decides how desirable a process is.. - * You can weigh different processes against each other depending - * on what CPU they've run on lately etc to try to handle cache - * and TLB miss penalties. + * effective_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into the -5 ... 0 ... +5 bonus/penalty range. + * + * We use 25% of the full 0...39 priority range so that: * - * Return values: - * -1000: never select this - * 0: out of time, recalculate counters (but it might still be - * selected) - * +ve: "goodness" value (the larger, the better) - * +1000: realtime process, select this. + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. + * + * Both properties are important to certain workloads. */ - -static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm) +static inline int effective_prio(task_t *p) { - int weight; + int bonus, prio; - /* - * select the current process after every other - * runnable process, but before the idle thread. - * Also, dont trigger a counter recalculation. - */ - weight = -1; - if (p->policy & SCHED_YIELD) - goto out; + bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 - + MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2; - /* - * Non-RT process - normal case first. - */ - if (p->policy == SCHED_OTHER) { + prio = p->static_prio - bonus; + if (prio < MAX_RT_PRIO) + prio = MAX_RT_PRIO; + if (prio > MAX_PRIO-1) + prio = MAX_PRIO-1; + return prio; +} + +/* + * activate_task - move a task to the runqueue. + * + * Also update all the scheduling statistics stuff. (sleep average + * calculation, priority modifiers, etc.) + */ +static inline void activate_task(task_t *p, runqueue_t *rq) +{ + unsigned long sleep_time = jiffies - p->sleep_timestamp; + prio_array_t *array = rq->active; + + if (!rt_task(p) && sleep_time) { /* - * Give the process a first-approximation goodness value - * according to the number of clock-ticks it has left. - * - * Don't do any other calculations if the time slice is - * over.. + * This code gives a bonus to interactive tasks. We update + * an 'average sleep time' value here, based on + * sleep_timestamp. The more time a task spends sleeping, + * the higher the average gets - and the higher the priority + * boost gets as well. */ - weight = p->counter; - if (!weight) - goto out; - -#ifdef CONFIG_SMP - /* Give a largish advantage to the same processor... */ - /* (this is equivalent to penalizing other processors) */ - if (p->processor == this_cpu) - weight += PROC_CHANGE_PENALTY; -#endif - - /* .. and a slight advantage to the current MM */ - if (p->mm == this_mm || !p->mm) - weight += 1; - weight += 20 - p->nice; - goto out; + p->sleep_avg += sleep_time; + if (p->sleep_avg > MAX_SLEEP_AVG) + p->sleep_avg = MAX_SLEEP_AVG; + p->prio = effective_prio(p); } - - /* - * Realtime process, select the first one on the - * runqueue (taking priorities within processes - * into account). - */ - weight = 1000 + p->rt_priority; -out: - return weight; + enqueue_task(p, array); + rq->nr_running++; } /* - * the 'goodness value' of replacing a process on a given CPU. - * positive value means 'replace', zero or negative means 'dont'. + * deactivate_task - remove a task from the runqueue. */ -static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu) +static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) { - return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm); + rq->nr_running--; + if (p->state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible++; + dequeue_task(p, p->array); + p->array = NULL; } /* - * This is ugly, but reschedule_idle() is very timing-critical. - * We are called with the runqueue spinlock held and we must - * not claim the tasklist_lock. + * resched_task - mark a task 'to be rescheduled now'. + * + * On UP this means the setting of the need_resched flag, on SMP it + * might also involve a cross-CPU call to trigger the scheduler on + * the target CPU. */ -static FASTCALL(void reschedule_idle(struct task_struct * p)); - -static void reschedule_idle(struct task_struct * p) +static inline void resched_task(task_t *p) { #ifdef CONFIG_SMP - int this_cpu = smp_processor_id(); - struct task_struct *tsk, *target_tsk; - int cpu, best_cpu, i, max_prio; - cycles_t oldest_idle; - - /* - * shortcut if the woken up task's last CPU is - * idle now. - */ - best_cpu = p->processor; - if (can_schedule(p, best_cpu)) { - tsk = idle_task(best_cpu); - if (cpu_curr(best_cpu) == tsk) { - int need_resched; -send_now_idle: - /* - * If need_resched == -1 then we can skip sending - * the IPI altogether, tsk->need_resched is - * actively watched by the idle thread. - */ - need_resched = tsk->need_resched; - tsk->need_resched = 1; - if ((best_cpu != this_cpu) && !need_resched) - smp_send_reschedule(best_cpu); - return; - } - } - - /* - * We know that the preferred CPU has a cache-affine current - * process, lets try to find a new idle CPU for the woken-up - * process. Select the least recently active idle CPU. (that - * one will have the least active cache context.) Also find - * the executing process which has the least priority. - */ - oldest_idle = (cycles_t) -1; - target_tsk = NULL; - max_prio = 0; + int need_resched; - for (i = 0; i < smp_num_cpus; i++) { - cpu = cpu_logical_map(i); - if (!can_schedule(p, cpu)) - continue; - tsk = cpu_curr(cpu); - /* - * We use the first available idle CPU. This creates - * a priority list between idle CPUs, but this is not - * a problem. - */ - if (tsk == idle_task(cpu)) { -#if defined(__i386__) && defined(CONFIG_SMP) - /* - * Check if two siblings are idle in the same - * physical package. Use them if found. - */ - if (smp_num_siblings == 2) { - if (cpu_curr(cpu_sibling_map[cpu]) == - idle_task(cpu_sibling_map[cpu])) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - break; - } - - } -#endif - if (last_schedule(cpu) < oldest_idle) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - } - } else { - if (oldest_idle == (cycles_t)-1) { - int prio = preemption_goodness(tsk, p, cpu); - - if (prio > max_prio) { - max_prio = prio; - target_tsk = tsk; - } - } - } - } - tsk = target_tsk; - if (tsk) { - if (oldest_idle != (cycles_t)-1) { - best_cpu = tsk->processor; - goto send_now_idle; - } - tsk->need_resched = 1; - if (tsk->processor != this_cpu) - smp_send_reschedule(tsk->processor); - } - return; - - -#else /* UP */ - int this_cpu = smp_processor_id(); - struct task_struct *tsk; - - tsk = cpu_curr(this_cpu); - if (preemption_goodness(tsk, p, this_cpu) > 0) - tsk->need_resched = 1; + need_resched = p->need_resched; + wmb(); + set_tsk_need_resched(p); + if (!need_resched && (task_cpu(p) != smp_processor_id())) + smp_send_reschedule(task_cpu(p)); +#else + set_tsk_need_resched(p); #endif } +#ifdef CONFIG_SMP + /* - * Careful! + * wait_task_inactive - wait for a thread to unschedule. * - * This has to add the process to the _end_ of the - * run-queue, not the beginning. The goodness value will - * determine whether this process will run next. This is - * important to get SCHED_FIFO and SCHED_RR right, where - * a process that is either pre-empted or its time slice - * has expired, should be moved to the tail of the run - * queue for its priority - Bhavesh Davda + * The caller must ensure that the task *will* unschedule sometime soon, + * else this function might spin for a *long* time. */ -static inline void add_to_runqueue(struct task_struct * p) +void wait_task_inactive(task_t * p) { - list_add_tail(&p->run_list, &runqueue_head); - nr_running++; + unsigned long flags; + runqueue_t *rq; + +repeat: + rq = task_rq(p); + if (unlikely(rq->curr == p)) { + cpu_relax(); + goto repeat; + } + rq = task_rq_lock(p, &flags); + if (unlikely(rq->curr == p)) { + task_rq_unlock(rq, &flags); + goto repeat; + } + task_rq_unlock(rq, &flags); } -static inline void move_last_runqueue(struct task_struct * p) +/* + * kick_if_running - kick the remote CPU if the task is running currently. + * + * This code is used by the signal code to signal tasks + * which are in user-mode, as quickly as possible. + * + * (Note that we do this lockless - if the task does anything + * while the message is in flight then it will notice the + * sigpending condition anyway.) + */ +void kick_if_running(task_t * p) { - list_del(&p->run_list); - list_add_tail(&p->run_list, &runqueue_head); + if (p == task_rq(p)->curr) + resched_task(p); } +#endif -/* - * Wake up a process. Put it on the run-queue if it's not - * already there. The "current" process is always on the - * run-queue (except when the actual re-schedule is in - * progress), and as such you're allowed to do the simpler - * "current->state = TASK_RUNNING" to mark yourself runnable - * without the overhead of this. +/*** + * try_to_wake_up - wake up a thread + * @p: the to-be-woken-up thread + * @sync: do a synchronous wakeup? + * + * Put it on the run-queue if it's not already there. The "current" + * thread is always on the run-queue (except when the actual + * re-schedule is in progress), and as such you're allowed to do + * the simpler "current->state = TASK_RUNNING" to mark yourself + * runnable without the overhead of this. + * + * returns failure only if the task is already active. */ -static inline int try_to_wake_up(struct task_struct * p, int synchronous) +static int try_to_wake_up(task_t * p, int sync) { unsigned long flags; int success = 0; + long old_state; + runqueue_t *rq; - /* - * We want the common case fall through straight, thus the goto. - */ - spin_lock_irqsave(&runqueue_lock, flags); +repeat_lock_task: + rq = task_rq_lock(p, &flags); + old_state = p->state; + if (!p->array) { + /* + * Fast-migrate the task if it's not running or runnable + * currently. Do not violate hard affinity. + */ + if (unlikely(sync && (rq->curr != p) && + (task_cpu(p) != smp_processor_id()) && + (p->cpus_allowed & (1UL << smp_processor_id())))) { + + set_task_cpu(p, smp_processor_id()); + task_rq_unlock(rq, &flags); + goto repeat_lock_task; + } + if (old_state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible--; + activate_task(p, rq); + if (p->prio < rq->curr->prio) + resched_task(rq->curr); + success = 1; + } p->state = TASK_RUNNING; - if (task_on_runqueue(p)) - goto out; - add_to_runqueue(p); - if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id()))) - reschedule_idle(p); - success = 1; -out: - spin_unlock_irqrestore(&runqueue_lock, flags); + task_rq_unlock(rq, &flags); + return success; } -inline int wake_up_process(struct task_struct * p) +int wake_up_process(task_t * p) { return try_to_wake_up(p, 0); } -static void process_timeout(unsigned long __data) +/* + * wake_up_forked_process - wake up a freshly forked process. + * + * This function will do some initial scheduler statistics housekeeping + * that must be done for every newly created process. + */ +void wake_up_forked_process(task_t * p) { - struct task_struct * p = (struct task_struct *) __data; + runqueue_t *rq = this_rq_lock(); - wake_up_process(p); + p->state = TASK_RUNNING; + if (!rt_task(p)) { + /* + * We decrease the sleep average of forking parents + * and children as well, to keep max-interactive tasks + * from forking tasks that are max-interactive. + */ + current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100; + p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100; + p->prio = effective_prio(p); + } + set_task_cpu(p, smp_processor_id()); + activate_task(p, rq); + + rq_unlock(rq); +} + +/* + * Potentially available exiting-child timeslices are + * retrieved here - this way the parent does not get + * penalized for creating too many threads. + * + * (this cannot be used to 'generate' timeslices + * artificially, because any timeslice recovered here + * was given away by the parent in the first place.) + */ +void sched_exit(task_t * p) +{ + __cli(); + current->time_slice += p->time_slice; + if (unlikely(current->time_slice > MAX_TIMESLICE)) + current->time_slice = MAX_TIMESLICE; + __sti(); + /* + * If the child was a (relative-) CPU hog then decrease + * the sleep_avg of the parent as well. + */ + if (p->sleep_avg < current->sleep_avg) + current->sleep_avg = (current->sleep_avg * EXIT_WEIGHT + + p->sleep_avg) / (EXIT_WEIGHT + 1); } /** - * schedule_timeout - sleep until timeout - * @timeout: timeout value in jiffies - * - * Make the current task sleep until @timeout jiffies have - * elapsed. The routine will return immediately unless - * the current task state has been set (see set_current_state()). - * - * You can set the task state as follows - - * - * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to - * pass before the routine returns. The routine will return 0 + * schedule_tail - first thing a freshly forked thread must call. * - * %TASK_INTERRUPTIBLE - the routine may return early if a signal is - * delivered to the current task. In this case the remaining time - * in jiffies will be returned, or 0 if the timer expired in time - * - * The current task state is guaranteed to be TASK_RUNNING when this - * routine returns. - * - * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule - * the CPU away without a bound on the timeout. In this case the return - * value will be %MAX_SCHEDULE_TIMEOUT. - * - * In all cases the return value is guaranteed to be non-negative. + * @prev: the thread we just switched away from. */ -signed long schedule_timeout(signed long timeout) +#if CONFIG_SMP +asmlinkage void schedule_tail(task_t *prev) { - struct timer_list timer; - unsigned long expire; - - switch (timeout) - { - case MAX_SCHEDULE_TIMEOUT: - /* - * These two special cases are useful to be comfortable - * in the caller. Nothing more. We could take - * MAX_SCHEDULE_TIMEOUT from one of the negative value - * but I' d like to return a valid offset (>=0) to allow - * the caller to do everything it want with the retval. - */ - schedule(); - goto out; - default: - /* - * Another bit of PARANOID. Note that the retval will be - * 0 since no piece of kernel is supposed to do a check - * for a negative retval of schedule_timeout() (since it - * should never happens anyway). You just have the printk() - * that will tell you if something is gone wrong and where. - */ - if (timeout < 0) - { - printk(KERN_ERR "schedule_timeout: wrong timeout " - "value %lx from %p\n", timeout, - __builtin_return_address(0)); - current->state = TASK_RUNNING; - goto out; - } - } + finish_arch_switch(this_rq()); + finish_arch_schedule(prev); +} +#endif - expire = timeout + jiffies; +/* + * context_switch - switch to the new MM and the new + * thread's register state. + */ +static inline task_t * context_switch(task_t *prev, task_t *next) +{ + struct mm_struct *mm = next->mm; + struct mm_struct *oldmm = prev->active_mm; - init_timer(&timer); - timer.expires = expire; - timer.data = (unsigned long) current; - timer.function = process_timeout; + if (unlikely(!mm)) { + next->active_mm = oldmm; + atomic_inc(&oldmm->mm_count); + enter_lazy_tlb(oldmm, next, smp_processor_id()); + } else + switch_mm(oldmm, mm, next, smp_processor_id()); - add_timer(&timer); - schedule(); - del_timer_sync(&timer); + if (unlikely(!prev->mm)) { + prev->active_mm = NULL; + mmdrop(oldmm); + } - timeout = expire - jiffies; + /* Here we just switch the register state and the stack. */ + switch_to(prev, next, prev); - out: - return timeout < 0 ? 0 : timeout; + return prev; } /* - * schedule_tail() is getting called from the fork return path. This - * cleans up all remaining scheduler things, without impacting the - * common case. + * nr_running, nr_uninterruptible and nr_context_switches: + * + * externally visible scheduler statistics: current number of runnable + * threads, current number of uninterruptible-sleeping threads, total + * number of context switches performed since bootup. */ -static inline void __schedule_tail(struct task_struct *prev) +unsigned long nr_running(void) { -#ifdef CONFIG_SMP - int policy; + unsigned long i, sum = 0; - /* - * prev->policy can be written from here only before `prev' - * can be scheduled (before setting prev->cpus_runnable to ~0UL). - * Of course it must also be read before allowing prev - * to be rescheduled, but since the write depends on the read - * to complete, wmb() is enough. (the spin_lock() acquired - * before setting cpus_runnable is not enough because the spin_lock() - * common code semantics allows code outside the critical section - * to enter inside the critical section) - */ - policy = prev->policy; - prev->policy = policy & ~SCHED_YIELD; - wmb(); + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_running; - /* - * fast path falls through. We have to clear cpus_runnable before - * checking prev->state to avoid a wakeup race. Protect against - * the task exiting early. - */ - task_lock(prev); - task_release_cpu(prev); - mb(); - if (prev->state == TASK_RUNNING) - goto needs_resched; - -out_unlock: - task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */ - return; + return sum; +} - /* - * Slow path - we 'push' the previous process and - * reschedule_idle() will attempt to find a new - * processor for it. (but it might preempt the - * current process as well.) We must take the runqueue - * lock and re-check prev->state to be correct. It might - * still happen that this process has a preemption - * 'in progress' already - but this is not a problem and - * might happen in other circumstances as well. - */ -needs_resched: - { - unsigned long flags; +unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; - /* - * Avoid taking the runqueue lock in cases where - * no preemption-check is necessery: - */ - if ((prev == idle_task(smp_processor_id())) || - (policy & SCHED_YIELD)) - goto out_unlock; + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible; - spin_lock_irqsave(&runqueue_lock, flags); - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev)) - reschedule_idle(prev); - spin_unlock_irqrestore(&runqueue_lock, flags); - goto out_unlock; - } -#else - prev->policy &= ~SCHED_YIELD; -#endif /* CONFIG_SMP */ + return sum; } -asmlinkage void schedule_tail(struct task_struct *prev) +unsigned long nr_context_switches(void) { - __schedule_tail(prev); + unsigned long i, sum = 0; + + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_switches; + + return sum; } /* - * 'schedule()' is the scheduler function. It's a very simple and nice - * scheduler: it's not perfect, but certainly works for most things. + * double_rq_lock - safely lock two runqueues * - * The goto is "interesting". - * - * NOTE!! Task 0 is the 'idle' task, which gets called when no other - * tasks can run. It can not be killed, and it cannot sleep. The 'state' - * information in task[0] is never used. + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. */ -asmlinkage void schedule(void) +static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) { - struct schedule_data * sched_data; - struct task_struct *prev, *next, *p; - struct list_head *tmp; - int this_cpu, c; - - - spin_lock_prefetch(&runqueue_lock); + if (rq1 == rq2) + spin_lock(&rq1->lock); + else { + if (rq1 < rq2) { + spin_lock(&rq1->lock); + spin_lock(&rq2->lock); + } else { + spin_lock(&rq2->lock); + spin_lock(&rq1->lock); + } + } +} - BUG_ON(!current->active_mm); -need_resched_back: - prev = current; - this_cpu = prev->processor; +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +{ + spin_unlock(&rq1->lock); + if (rq1 != rq2) + spin_unlock(&rq2->lock); +} - if (unlikely(in_interrupt())) { - printk("Scheduling in interrupt\n"); - BUG(); +#if CONFIG_SMP +/* + * double_lock_balance - lock the busiest runqueue + * + * this_rq is locked already. Recalculate nr_running if we have to + * drop the runqueue lock. + */ +static inline unsigned int double_lock_balance(runqueue_t *this_rq, + runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running) +{ + if (unlikely(!spin_trylock(&busiest->lock))) { + if (busiest < this_rq) { + spin_unlock(&this_rq->lock); + spin_lock(&busiest->lock); + spin_lock(&this_rq->lock); + /* Need to recalculate nr_running */ + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; + } else + spin_lock(&busiest->lock); } + return nr_running; +} + +#include - release_kernel_lock(prev, this_cpu); +/* + * Current runqueue is empty, or rebalance tick: if there is an + * inbalance (current runqueue is too short) then pull from + * busiest runqueue(s). + * + * We call this with the current runqueue locked, + * irqs disabled. + */ +static void load_balance(runqueue_t *this_rq, int idle) +{ + int imbalance, nr_running, load, max_load, + idx, i, this_cpu = smp_processor_id(); + task_t *next = this_rq->idle, *tmp; + runqueue_t *busiest, *rq_src; + prio_array_t *array; + struct list_head *head, *curr; /* - * 'sched_data' is protected by the fact that we can run - * only one process per CPU. + * Handle platform specific balancing operations, such as + * hyperthreading. + */ + + if(arch_load_balance(this_cpu, idle)) + return; + + /* + * We search all runqueues to find the most busy one. + * We do this lockless to reduce cache-bouncing overhead, + * we re-check the 'best' source CPU later on again, with + * the lock held. + * + * We fend off statistical fluctuations in runqueue lengths by + * saving the runqueue length during the previous load-balancing + * operation and using the smaller one the current and saved lengths. + * If a runqueue is long enough for a longer amount of time then + * we recognize it and pull tasks from it. + * + * The 'current runqueue length' is a statistical maximum variable, + * for that one we take the longer one - to avoid fluctuations in + * the other direction. So for a load-balance to happen it needs + * stable long runqueue on the target CPU and stable short runqueue + * on the local runqueue. + * + * We make an exception if this CPU is about to become idle - in + * that case we are less picky about moving a task across CPUs and + * take what can be taken. */ - sched_data = & aligned_data[this_cpu].schedule_data; + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; - spin_lock_irq(&runqueue_lock); + busiest = NULL; + max_load = 1; + for (i = 0; i < smp_num_cpus; i++) { + int logical = cpu_logical_map(i); - /* move an exhausted RR process to be last.. */ - if (unlikely(prev->policy == SCHED_RR)) - if (!prev->counter) { - prev->counter = NICE_TO_TICKS(prev->nice); - move_last_runqueue(prev); + rq_src = cpu_rq(logical); + if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical])) + load = rq_src->nr_running; + else + load = this_rq->prev_nr_running[logical]; + this_rq->prev_nr_running[logical] = rq_src->nr_running; + + if ((load > max_load) && (rq_src != this_rq)) { + busiest = rq_src; + max_load = load; } - - switch (prev->state) { - case TASK_INTERRUPTIBLE: - if (signal_pending(prev)) { - prev->state = TASK_RUNNING; - break; - } - default: - del_from_runqueue(prev); - case TASK_RUNNING:; } - prev->need_resched = 0; + if (likely(!busiest)) + return; + + imbalance = (max_load - nr_running) / 2; + + /* It needs an at least ~25% imbalance to trigger balancing. */ + if (!idle && (imbalance < (max_load + 3)/4)) + return; + + nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running); /* - * this is the scheduler proper: + * Make sure nothing changed since we checked the + * runqueue length. */ + if (busiest->nr_running <= nr_running + 1) + goto out_unlock; -repeat_schedule: /* - * Default process to select.. + * We first consider expired tasks. Those will likely not be + * executed in the near future, and they are most likely to + * be cache-cold, thus switching CPUs has the least effect + * on them. */ - next = idle_task(this_cpu); - c = -1000; - list_for_each(tmp, &runqueue_head) { - p = list_entry(tmp, struct task_struct, run_list); - if (can_schedule(p, this_cpu)) { - int weight = goodness(p, this_cpu, prev->active_mm); - if (weight > c) - c = weight, next = p; + if (busiest->expired->nr_active) + array = busiest->expired; + else + array = busiest->active; + +new_array: + /* Start searching at priority 0: */ + idx = 0; +skip_bitmap: + if (!idx) + idx = sched_find_first_bit(array->bitmap); + else + idx = find_next_bit(array->bitmap, MAX_PRIO, idx); + if (idx == MAX_PRIO) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; } + goto out_unlock; } - /* Do we need to re-calculate counters? */ - if (unlikely(!c)) { - struct task_struct *p; - - spin_unlock_irq(&runqueue_lock); - read_lock(&tasklist_lock); - for_each_task(p) - p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice); - read_unlock(&tasklist_lock); - spin_lock_irq(&runqueue_lock); - goto repeat_schedule; + head = array->queue + idx; + curr = head->prev; +skip_queue: + tmp = list_entry(curr, task_t, run_list); + + /* + * We do not migrate tasks that are: + * 1) running (obviously), or + * 2) cannot be migrated to this CPU due to cpus_allowed, or + * 3) are cache-hot on their current CPU. + */ + +#define CAN_MIGRATE_TASK(p,rq,this_cpu) \ + ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \ + ((p) != (rq)->curr) && \ + ((p)->cpus_allowed & (1UL << (this_cpu)))) + + if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) { + curr = curr->next; + if (curr != head) + goto skip_queue; + idx++; + goto skip_bitmap; + } + next = tmp; + /* + * take the task out of the other runqueue and + * put it into this one: + */ + dequeue_task(next, array); + busiest->nr_running--; + set_task_cpu(next, this_cpu); + this_rq->nr_running++; + enqueue_task(next, this_rq->active); + if (next->prio < current->prio) + set_need_resched(); + if (!idle && --imbalance) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; + } } +out_unlock: + spin_unlock(&busiest->lock); +} - /* - * from this point on nothing can prevent us from - * switching to the next task, save this fact in - * sched_data. - */ - sched_data->curr = next; - task_set_cpu(next, this_cpu); - spin_unlock_irq(&runqueue_lock); +/* + * One of the idle_cpu_tick() and busy_cpu_tick() functions will + * get called every timer tick, on every CPU. Our balancing action + * frequency and balancing agressivity depends on whether the CPU is + * idle or not. + * + * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on + * systems with HZ=100, every 10 msecs.) + */ +#define BUSY_REBALANCE_TICK (HZ/4 ?: 1) +#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1) + +static inline void idle_tick(void) +{ + if (jiffies % IDLE_REBALANCE_TICK) + return; + spin_lock(&this_rq()->lock); + load_balance(this_rq(), 1); + spin_unlock(&this_rq()->lock); +} - if (unlikely(prev == next)) { - /* We won't go through the normal tail, so do this by hand */ - prev->policy &= ~SCHED_YIELD; - goto same_process; - } +#endif -#ifdef CONFIG_SMP - /* - * maintain the per-process 'last schedule' value. - * (this has to be recalculated even if we reschedule to - * the same process) Currently this is only used on SMP, - * and it's approximate, so we do not have to maintain - * it while holding the runqueue spinlock. - */ - sched_data->last_schedule = get_cycles(); +/* + * We place interactive tasks back into the active array, if possible. + * + * To guarantee that this does not starve expired tasks we ignore the + * interactivity of a task if the first expired task had to wait more + * than a 'reasonable' amount of time. This deadline timeout is + * load-dependent, as the frequency of array switched decreases with + * increasing number of running tasks: + */ +#define EXPIRED_STARVING(rq) \ + ((rq)->expired_timestamp && \ + (jiffies - (rq)->expired_timestamp >= \ + STARVATION_LIMIT * ((rq)->nr_running) + 1)) - /* - * We drop the scheduler lock early (it's a global spinlock), - * thus we have to lock the previous process from getting - * rescheduled during switch_to(). - */ +/* + * This function gets called by the timer code, with HZ frequency. + * We call it with interrupts disabled. + */ +void scheduler_tick(int user_tick, int system) +{ + int cpu = smp_processor_id(); + runqueue_t *rq = this_rq(); + task_t *p = current; -#endif /* CONFIG_SMP */ + if (p == rq->idle) { + if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + kstat.per_cpu_system[cpu] += system; +#if CONFIG_SMP + idle_tick(); +#endif + return; + } + if (TASK_NICE(p) > 0) + kstat.per_cpu_nice[cpu] += user_tick; + else + kstat.per_cpu_user[cpu] += user_tick; + kstat.per_cpu_system[cpu] += system; - kstat.context_swtch++; - /* - * there are 3 processes which are affected by a context switch: - * - * prev == .... ==> (last => next) - * - * It's the 'much more previous' 'prev' that is on next's stack, - * but prev is set to (the just run) 'last' process by switch_to(). - * This might sound slightly confusing but makes tons of sense. - */ - prepare_to_switch(); - { - struct mm_struct *mm = next->mm; - struct mm_struct *oldmm = prev->active_mm; - if (!mm) { - BUG_ON(next->active_mm); - next->active_mm = oldmm; - atomic_inc(&oldmm->mm_count); - enter_lazy_tlb(oldmm, next, this_cpu); - } else { - BUG_ON(next->active_mm != mm); - switch_mm(oldmm, mm, next, this_cpu); + /* Task might have expired already, but not scheduled off yet */ + if (p->array != rq->active) { + set_tsk_need_resched(p); + return; + } + spin_lock(&rq->lock); + if (unlikely(rt_task(p))) { + /* + * RR tasks need a special form of timeslice management. + * FIFO tasks have no timeslices. + */ + if ((p->policy == SCHED_RR) && !--p->time_slice) { + p->time_slice = task_timeslice(p); + set_tsk_need_resched(p); + + /* put it at the end of the queue: */ + dequeue_task(p, rq->active); + enqueue_task(p, rq->active); } + goto out; + } + /* + * The task was running during this tick - update the + * time slice counter and the sleep average. Note: we + * do not update a process's priority until it either + * goes to sleep or uses up its timeslice. This makes + * it possible for interactive tasks to use up their + * timeslices at their highest priority levels. + */ + if (p->sleep_avg) + p->sleep_avg--; + if (!--p->time_slice) { + dequeue_task(p, rq->active); + set_tsk_need_resched(p); + p->prio = effective_prio(p); + p->time_slice = task_timeslice(p); + + if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { + if (!rq->expired_timestamp) + rq->expired_timestamp = jiffies; + enqueue_task(p, rq->expired); + } else + enqueue_task(p, rq->active); + } +out: +#if CONFIG_SMP + if (!(jiffies % BUSY_REBALANCE_TICK)) + load_balance(rq, 0); +#endif + spin_unlock(&rq->lock); +} + +void scheduling_functions_start_here(void) { } + +/* + * schedule() is the main scheduler function. + */ +asmlinkage void schedule(void) +{ + task_t *prev, *next; + runqueue_t *rq; + prio_array_t *array; + struct list_head *queue; + int idx; + + if (unlikely(in_interrupt())) + BUG(); + +need_resched: + prev = current; + rq = this_rq(); + + release_kernel_lock(prev, smp_processor_id()); + prepare_arch_schedule(prev); + prev->sleep_timestamp = jiffies; + spin_lock_irq(&rq->lock); - if (!prev->mm) { - prev->active_mm = NULL; - mmdrop(oldmm); + switch (prev->state) { + case TASK_INTERRUPTIBLE: + if (unlikely(signal_pending(prev))) { + prev->state = TASK_RUNNING; + break; } + default: + deactivate_task(prev, rq); + case TASK_RUNNING: + ; + } +#if CONFIG_SMP +pick_next_task: +#endif + if (unlikely(!rq->nr_running)) { +#if CONFIG_SMP + load_balance(rq, 1); + if (rq->nr_running) + goto pick_next_task; +#endif + next = rq->idle; + rq->expired_timestamp = 0; + goto switch_tasks; } - /* - * This just switches the register state and the - * stack. - */ - switch_to(prev, next, prev); - __schedule_tail(prev); + array = rq->active; + if (unlikely(!array->nr_active)) { + /* + * Switch the active and expired arrays. + */ + rq->active = rq->expired; + rq->expired = array; + array = rq->active; + rq->expired_timestamp = 0; + } + + idx = sched_find_first_bit(array->bitmap); + queue = array->queue + idx; + next = list_entry(queue->next, task_t, run_list); + +switch_tasks: + prefetch(next); + clear_tsk_need_resched(prev); + + if (likely(prev != next)) { + rq->nr_switches++; + rq->curr = next; + + prepare_arch_switch(rq); + prev = context_switch(prev, next); + barrier(); + rq = this_rq(); + finish_arch_switch(rq); + } else + spin_unlock_irq(&rq->lock); + finish_arch_schedule(prev); -same_process: reacquire_kernel_lock(current); - if (current->need_resched) - goto need_resched_back; - return; + if (need_resched()) + goto need_resched; } /* - * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything - * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the - * non-exclusive tasks and one exclusive task. + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve + * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already - * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero - * in this (rare) case, and we handle it by contonuing to scan the queue. + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns + * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, const int sync) +static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp; - struct task_struct *p; - - CHECK_MAGIC_WQHEAD(q); - WQ_CHECK_LIST_HEAD(&q->task_list); - - list_for_each(tmp,&q->task_list) { - unsigned int state; - wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); + unsigned int state; + wait_queue_t *curr; + task_t *p; - CHECK_MAGIC(curr->__magic); + list_for_each(tmp, &q->task_list) { + curr = list_entry(tmp, wait_queue_t, task_list); p = curr->task; state = p->state; - if (state & mode) { - WQ_NOTE_WAKER(curr); - if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + if ((state & mode) && try_to_wake_up(p, sync) && + ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) break; - } } } -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr) +/** + * __wake_up - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + */ +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 0); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr) +#if CONFIG_SMP + +/** + * __wake_up - sync- wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * + * The sync wakeup differs that the waker knows that it will schedule + * away soon, so while the target thread will be woken up, it will not + * be migrated to another CPU - ie. the two threads are 'synchronized' + * with each other. This can prevent needless bouncing between CPUs. + */ +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 1); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + if (likely(nr_exclusive)) + __wake_up_common(q, mode, nr_exclusive, 1); + else + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } +#endif + void complete(struct completion *x) { unsigned long flags; @@ -790,15 +1082,15 @@ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); -#define SLEEP_ON_HEAD \ - wq_write_lock_irqsave(&q->lock,flags); \ +#define SLEEP_ON_HEAD \ + spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ - wq_write_unlock(&q->lock); + spin_unlock(&q->lock); #define SLEEP_ON_TAIL \ - wq_write_lock_irq(&q->lock); \ + spin_lock_irq(&q->lock); \ __remove_wait_queue(q, &wait); \ - wq_write_unlock_irqrestore(&q->lock,flags); + spin_unlock_irqrestore(&q->lock, flags); void interruptible_sleep_on(wait_queue_head_t *q) { @@ -850,55 +1142,53 @@ void scheduling_functions_end_here(void) { } -#if CONFIG_SMP -/** - * set_cpus_allowed() - change a given task's processor affinity - * @p: task to bind - * @new_mask: bitmask of allowed processors - * - * Upon return, the task is running on a legal processor. Note the caller - * must have a valid reference to the task: it must not exit() prematurely. - * This call can sleep; do not hold locks on call. - */ -void set_cpus_allowed(struct task_struct *p, unsigned long new_mask) +void set_user_nice(task_t *p, long nice) { - new_mask &= cpu_online_map; - BUG_ON(!new_mask); - - p->cpus_allowed = new_mask; + unsigned long flags; + prio_array_t *array; + runqueue_t *rq; + if (TASK_NICE(p) == nice || nice < -20 || nice > 19) + return; /* - * If the task is on a no-longer-allowed processor, we need to move - * it. If the task is not current, then set need_resched and send - * its processor an IPI to reschedule. - */ - if (!(p->cpus_runnable & p->cpus_allowed)) { - if (p != current) { - p->need_resched = 1; - smp_send_reschedule(p->processor); - } + * We have to be careful, if called from sys_setpriority(), + * the task might be in the middle of scheduling on another CPU. + */ + rq = task_rq_lock(p, &flags); + if (rt_task(p)) { + p->static_prio = NICE_TO_PRIO(nice); + goto out_unlock; + } + array = p->array; + if (array) + dequeue_task(p, array); + p->static_prio = NICE_TO_PRIO(nice); + p->prio = NICE_TO_PRIO(nice); + if (array) { + enqueue_task(p, array); /* - * Wait until we are on a legal processor. If the task is - * current, then we should be on a legal processor the next - * time we reschedule. Otherwise, we need to wait for the IPI. + * If the task is running and lowered its priority, + * or increased its priority then reschedule its CPU: */ - while (!(p->cpus_runnable & p->cpus_allowed)) - schedule(); + if ((NICE_TO_PRIO(nice) < p->static_prio) || (p == rq->curr)) + resched_task(rq->curr); } +out_unlock: + task_rq_unlock(rq, &flags); } -#endif /* CONFIG_SMP */ #ifndef __alpha__ -/* - * This has been replaced by sys_setpriority. Maybe it should be - * moved into the arch dependent tree for those ports that require - * it for backward compatibility? +/** + * sys_nice - change the priority of the current process. + * @increment: priority increment + * + * sys_setpriority is a more generic, but much slower function that + * does similar things. */ - asmlinkage long sys_nice(int increment) { - long newprio; + long nice; /* * Setpriority might change our priority at the same moment. @@ -914,34 +1204,69 @@ if (increment > 40) increment = 40; - newprio = current->nice + increment; - if (newprio < -20) - newprio = -20; - if (newprio > 19) - newprio = 19; - current->nice = newprio; + nice = PRIO_TO_NICE(current->static_prio) + increment; + if (nice < -20) + nice = -20; + if (nice > 19) + nice = 19; + set_user_nice(current, nice); return 0; } #endif -static inline struct task_struct *find_process_by_pid(pid_t pid) +/** + * task_prio - return the priority value of a given task. + * @p: the task in question. + * + * This is the priority value as seen by users in /proc. + * RT tasks are offset by -200. Normal tasks are centered + * around 0, value goes from -16 to +15. + */ +int task_prio(task_t *p) +{ + return p->prio - MAX_USER_RT_PRIO; +} + +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + */ +int task_nice(task_t *p) +{ + return TASK_NICE(p); +} + +/** + * idle_cpu - is a given cpu idle currently? + * @cpu: the processor in question. + */ +inline int idle_cpu(int cpu) { - struct task_struct *tsk = current; + return cpu_curr(cpu) == cpu_rq(cpu)->idle; +} - if (pid) - tsk = find_task_by_pid(pid); - return tsk; +/** + * find_process_by_pid - find a process with a matching PID value. + * @pid: the pid in question. + */ +static inline task_t *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_pid(pid) : current; } -static int setscheduler(pid_t pid, int policy, - struct sched_param *param) +/* + * setscheduler - change the scheduling policy and/or RT priority of a thread. + */ +static int setscheduler(pid_t pid, int policy, struct sched_param *param) { struct sched_param lp; - struct task_struct *p; - int retval; + int retval = -EINVAL; + prio_array_t *array; + unsigned long flags; + runqueue_t *rq; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -953,14 +1278,19 @@ * We play safe to avoid deadlocks. */ read_lock_irq(&tasklist_lock); - spin_lock(&runqueue_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) - goto out_unlock; - + goto out_unlock_tasklist; + + /* + * To be able to change p->policy safely, the apropriate + * runqueue lock must be held. + */ + rq = task_rq_lock(p, &flags); + if (policy < 0) policy = p->policy; else { @@ -969,56 +1299,78 @@ policy != SCHED_OTHER) goto out_unlock; } - + /* - * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid - * priority for SCHED_OTHER is 0. + * Valid priorities for SCHED_FIFO and SCHED_RR are + * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0. */ retval = -EINVAL; - if (lp.sched_priority < 0 || lp.sched_priority > 99) + if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1) goto out_unlock; if ((policy == SCHED_OTHER) != (lp.sched_priority == 0)) goto out_unlock; retval = -EPERM; - if ((policy == SCHED_FIFO || policy == SCHED_RR) && + if ((policy == SCHED_FIFO || policy == SCHED_RR) && !capable(CAP_SYS_NICE)) goto out_unlock; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; + array = p->array; + if (array) + deactivate_task(p, task_rq(p)); retval = 0; p->policy = policy; p->rt_priority = lp.sched_priority; - - current->need_resched = 1; + if (policy != SCHED_OTHER) + p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority; + else + p->prio = p->static_prio; + if (array) + activate_task(p, task_rq(p)); out_unlock: - spin_unlock(&runqueue_lock); + task_rq_unlock(rq, &flags); +out_unlock_tasklist: read_unlock_irq(&tasklist_lock); out_nounlock: return retval; } -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, +/** + * sys_sched_setscheduler - set/change the scheduler policy and RT priority + * @pid: the pid in question. + * @policy: new policy + * @param: structure containing the new RT priority. + */ +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param) { return setscheduler(pid, policy, param); } +/** + * sys_sched_setparam - set/change the RT priority of a thread + * @pid: the pid in question. + * @param: structure containing the new RT priority. + */ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param) { return setscheduler(pid, -1, param); } +/** + * sys_sched_getscheduler - get the policy (scheduling class) of a thread + * @pid: the pid in question. + */ asmlinkage long sys_sched_getscheduler(pid_t pid) { - struct task_struct *p; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (pid < 0) goto out_nounlock; @@ -1026,20 +1378,24 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - retval = p->policy & ~SCHED_YIELD; + retval = p->policy; read_unlock(&tasklist_lock); out_nounlock: return retval; } +/** + * sys_sched_getparam - get the RT priority of a thread + * @pid: the pid in question. + * @param: sched_param structure containing the RT priority. + */ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param) { - struct task_struct *p; struct sched_param lp; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -1064,44 +1420,125 @@ return retval; } -asmlinkage long sys_sched_yield(void) +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) { + unsigned long new_mask; + int retval; + task_t *p; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + read_lock(&tasklist_lock); + + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + /* - * Trick. sched_yield() first counts the number of truly - * 'pending' runnable processes, then returns if it's - * only the current processes. (This test does not have - * to be atomic.) In threaded applications this optimization - * gets triggered quite often. + * It is not safe to call set_cpus_allowed with the + * tasklist_lock held. We will bump the task_struct's + * usage count and then drop tasklist_lock. */ + get_task_struct(p); + read_unlock(&tasklist_lock); - int nr_pending = nr_running; + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; -#if CONFIG_SMP - int i; + retval = 0; + set_cpus_allowed(p, new_mask); - // Subtract non-idle processes running on other CPUs. - for (i = 0; i < smp_num_cpus; i++) { - int cpu = cpu_logical_map(i); - if (aligned_data[cpu].schedule_data.curr != idle_task(cpu)) - nr_pending--; - } -#else - // on UP this process is on the runqueue as well - nr_pending--; -#endif - if (nr_pending) { - /* - * This process can only be rescheduled by us, - * so this is safe without any locking. - */ - if (current->policy == SCHED_OTHER) - current->policy |= SCHED_YIELD; - current->need_resched = 1; - - spin_lock_irq(&runqueue_lock); - move_last_runqueue(current); - spin_unlock_irq(&runqueue_lock); +out_unlock: + free_task_struct(p); + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned int real_len; + unsigned long mask; + int retval; + task_t *p; + + real_len = sizeof(mask); + if (len < real_len) + return -EINVAL; + + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + mask = p->cpus_allowed & cpu_online_map; + +out_unlock: + read_unlock(&tasklist_lock); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + +/** + * sys_sched_yield - yield the current processor to other threads. + * + * this function yields the current CPU by moving the calling thread + * to the expired array. If there are no other threads running on this + * CPU then this function will return. + */ +asmlinkage long sys_sched_yield(void) +{ + runqueue_t *rq = this_rq_lock(); + prio_array_t *array = current->array; + + /* + * We implement yielding by moving the task into the expired + * queue. + * + * (special rule: RT tasks will just roundrobin in the active + * array.) + */ + if (likely(!rt_task(current))) { + dequeue_task(current, array); + enqueue_task(current, rq->expired); + } else { + list_del(¤t->run_list); + list_add_tail(¤t->run_list, array->queue + current->prio); } + spin_unlock(&rq->lock); + + schedule(); + return 0; } @@ -1115,15 +1552,29 @@ { set_current_state(TASK_RUNNING); sys_sched_yield(); - schedule(); } +/** + * _cond_resched - conditionally reschedule + * + * Helper function called if cond_resched inline decides we have + * exceeded the timeslice at this point. We give up the processor + * having made sure we will get it back + */ + void __cond_resched(void) { set_current_state(TASK_RUNNING); schedule(); } +/** + * sys_sched_get_priority_max - return maximum RT priority. + * @policy: scheduling class. + * + * this syscall returns the maximum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; @@ -1131,7 +1582,7 @@ switch (policy) { case SCHED_FIFO: case SCHED_RR: - ret = 99; + ret = MAX_USER_RT_PRIO-1; break; case SCHED_OTHER: ret = 0; @@ -1140,6 +1591,13 @@ return ret; } +/** + * sys_sched_get_priority_min - return minimum RT priority. + * @policy: scheduling class. + * + * this syscall returns the minimum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_min(int policy) { int ret = -EINVAL; @@ -1155,11 +1613,19 @@ return ret; } +/** + * sys_sched_rr_get_interval - return the default timeslice of a process. + * @pid: pid of the process. + * @interval: userspace pointer to the timeslice value. + * + * this syscall writes the default timeslice value of a given process + * into the user-space timespec buffer. A value of '0' means infinity. + */ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval) { - struct timespec t; - struct task_struct *p; int retval = -EINVAL; + struct timespec t; + task_t *p; if (pid < 0) goto out_nounlock; @@ -1168,8 +1634,8 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice), - &t); + jiffies_to_timespec(p->policy & SCHED_FIFO ? + 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); if (p) retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; @@ -1177,14 +1643,14 @@ return retval; } -static void show_task(struct task_struct * p) +static void show_task(task_t * p) { unsigned long free = 0; int state; static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; printk("%-13.13s ", p->comm); - state = p->state ? ffz(~p->state) + 1 : 0; + state = p->state ? __ffs(p->state) + 1 : 0; if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *)) printk(stat_nam[state]); else @@ -1225,7 +1691,7 @@ printk(" (NOTLB)\n"); { - extern void show_trace_task(struct task_struct *tsk); + extern void show_trace_task(task_t *tsk); show_trace_task(p); } } @@ -1247,7 +1713,7 @@ void show_state(void) { - struct task_struct *p; + task_t *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -1270,128 +1736,241 @@ read_unlock(&tasklist_lock); } -/** - * reparent_to_init() - Reparent the calling kernel thread to the init task. - * - * If a kernel thread is launched as a result of a system call, or if - * it ever exits, it should generally reparent itself to init so that - * it is correctly cleaned up on exit. - * - * The various task state such as scheduling policy and priority may have - * been inherited fro a user process, so we reset them to sane values here. - * - * NOTE that reparent_to_init() gives the caller full capabilities. - */ -void reparent_to_init(void) +void __init init_idle(task_t *idle, int cpu) { - struct task_struct *this_task = current; + runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); + unsigned long flags; + + __save_flags(flags); + __cli(); + double_rq_lock(idle_rq, rq); + + idle_rq->curr = idle_rq->idle = idle; + deactivate_task(idle, rq); + idle->array = NULL; + idle->prio = MAX_PRIO; + idle->state = TASK_RUNNING; + set_task_cpu(idle, cpu); + double_rq_unlock(idle_rq, rq); + set_tsk_need_resched(idle); + __restore_flags(flags); +} - write_lock_irq(&tasklist_lock); +extern void init_timervecs(void); +extern void timer_bh(void); +extern void tqueue_bh(void); +extern void immediate_bh(void); - /* Reparent to init */ - REMOVE_LINKS(this_task); - this_task->p_pptr = child_reaper; - this_task->p_opptr = child_reaper; - SET_LINKS(this_task); +void __init sched_init(void) +{ + runqueue_t *rq; + int i, j, k; - /* Set the exit signal to SIGCHLD so we signal init on exit */ - this_task->exit_signal = SIGCHLD; + for (i = 0; i < NR_CPUS; i++) { + prio_array_t *array; - /* We also take the runqueue_lock while altering task fields - * which affect scheduling decisions */ - spin_lock(&runqueue_lock); + rq = cpu_rq(i); + rq->active = rq->arrays; + rq->expired = rq->arrays + 1; + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->migration_queue); + + for (j = 0; j < 2; j++) { + array = rq->arrays + j; + for (k = 0; k < MAX_PRIO; k++) { + INIT_LIST_HEAD(array->queue + k); + __clear_bit(k, array->bitmap); + } + // delimiter for bitsearch + __set_bit(MAX_PRIO, array->bitmap); + } + } + /* + * We have to do a little magic to get the first + * process right in SMP mode. + */ + rq = this_rq(); + rq->curr = current; + rq->idle = current; + wake_up_process(current); - this_task->ptrace = 0; - this_task->nice = DEF_NICE; - this_task->policy = SCHED_OTHER; - /* cpus_allowed? */ - /* rt_priority? */ - /* signals? */ - this_task->cap_effective = CAP_INIT_EFF_SET; - this_task->cap_inheritable = CAP_INIT_INH_SET; - this_task->cap_permitted = CAP_FULL_SET; - this_task->keep_capabilities = 0; - memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim))); - this_task->user = INIT_USER; + init_timervecs(); + init_bh(TIMER_BH, timer_bh); + init_bh(TQUEUE_BH, tqueue_bh); + init_bh(IMMEDIATE_BH, immediate_bh); - spin_unlock(&runqueue_lock); - write_unlock_irq(&tasklist_lock); + /* + * The boot idle thread does lazy MMU switching as well: + */ + atomic_inc(&init_mm.mm_count); + enter_lazy_tlb(&init_mm, current, smp_processor_id()); } +#if CONFIG_SMP + /* - * Put all the gunge required to become a kernel thread without - * attached user resources in one place where it belongs. - */ + * This is how migration works: + * + * 1) we queue a migration_req_t structure in the source CPU's + * runqueue and wake up that CPU's migration thread. + * 2) we down() the locked semaphore => thread blocks. + * 3) migration thread wakes up (implicitly it forces the migrated + * thread off the CPU) + * 4) it gets the migration request and checks whether the migrated + * task is still in the wrong runqueue. + * 5) if it's in the wrong runqueue then the migration thread removes + * it and puts it into the right queue. + * 6) migration thread up()s the semaphore. + * 7) we wake up and the migration is done. + */ + +typedef struct { + struct list_head list; + task_t *task; + struct semaphore sem; +} migration_req_t; -void daemonize(void) +/* + * Change a given task's CPU affinity. Migrate the process to a + * proper CPU and schedule it away if the CPU it's executing on + * is removed from the allowed bitmask. + * + * NOTE: the caller must have a valid reference to the task, the + * task must not exit() & deallocate itself prematurely. The + * call is not atomic; no spinlocks may be held. + */ +void set_cpus_allowed(task_t *p, unsigned long new_mask) { - struct fs_struct *fs; + unsigned long flags; + migration_req_t req; + runqueue_t *rq; + new_mask &= cpu_online_map; + if (!new_mask) + BUG(); + rq = task_rq_lock(p, &flags); + p->cpus_allowed = new_mask; /* - * If we were started as result of loading a module, close all of the - * user space pages. We don't need them, and if we didn't close them - * they would be locked into memory. + * Can the task run on the task's current CPU? If not then + * migrate the process off to a proper CPU. */ - exit_mm(current); - - current->session = 1; - current->pgrp = 1; - current->tty = NULL; - - /* Become as one with the init task */ + if (new_mask & (1UL << task_cpu(p))) { + task_rq_unlock(rq, &flags); + goto out; + } + /* + * If the task is not on a runqueue (and not running), then + * it is sufficient to simply update the task's cpu field. + */ + if (!p->array && (p != rq->curr)) { + set_task_cpu(p, __ffs(p->cpus_allowed)); + task_rq_unlock(rq, &flags); + goto out; + } + init_MUTEX_LOCKED(&req.sem); + req.task = p; + list_add(&req.list, &rq->migration_queue); + task_rq_unlock(rq, &flags); + wake_up_process(rq->migration_thread); - exit_fs(current); /* current->fs->count--; */ - fs = init_task.fs; - current->fs = fs; - atomic_inc(&fs->count); - exit_files(current); - current->files = init_task.files; - atomic_inc(¤t->files->count); + down(&req.sem); +out: + ; } -extern unsigned long wait_init_idle; - -void __init init_idle(void) +/* + * migration_thread - this is a highprio system thread that performs + * thread migration by 'pulling' threads into the target runqueue. + */ +static int migration_thread(void * bind_cpu) { - struct schedule_data * sched_data; - sched_data = &aligned_data[smp_processor_id()].schedule_data; + struct sched_param param = { sched_priority: MAX_RT_PRIO-1 }; + int cpu = cpu_logical_map((int) (long) bind_cpu); + runqueue_t *rq; + int ret; - if (current != &init_task && task_on_runqueue(current)) { - printk("UGH! (%d:%d) was on the runqueue, removing.\n", - smp_processor_id(), current->pid); - del_from_runqueue(current); + daemonize(); + sigfillset(¤t->blocked); + set_fs(KERNEL_DS); + /* + * The first migration thread is started on CPU #0. This one can + * migrate the other migration threads to their destination CPUs. + */ + if (cpu != 0) { + while (!cpu_rq(cpu_logical_map(0))->migration_thread) + yield(); + set_cpus_allowed(current, 1UL << cpu); } - sched_data->curr = current; - sched_data->last_schedule = get_cycles(); - clear_bit(current->processor, &wait_init_idle); -} + printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id()); + ret = setscheduler(0, SCHED_FIFO, ¶m); -extern void init_timervecs (void); + rq = this_rq(); + rq->migration_thread = current; -void __init sched_init(void) -{ - /* - * We have to do a little magic to get the first - * process right in SMP mode. - */ - int cpu = smp_processor_id(); - int nr; + sprintf(current->comm, "migration_CPU%d", smp_processor_id()); - init_task.processor = cpu; + for (;;) { + runqueue_t *rq_src, *rq_dest; + struct list_head *head; + int cpu_src, cpu_dest; + migration_req_t *req; + unsigned long flags; + task_t *p; + + spin_lock_irqsave(&rq->lock, flags); + head = &rq->migration_queue; + current->state = TASK_INTERRUPTIBLE; + if (list_empty(head)) { + spin_unlock_irqrestore(&rq->lock, flags); + schedule(); + continue; + } + req = list_entry(head->next, migration_req_t, list); + list_del_init(head->next); + spin_unlock_irqrestore(&rq->lock, flags); + + p = req->task; + cpu_dest = __ffs(p->cpus_allowed); + rq_dest = cpu_rq(cpu_dest); +repeat: + cpu_src = task_cpu(p); + rq_src = cpu_rq(cpu_src); + + local_irq_save(flags); + double_rq_lock(rq_src, rq_dest); + if (task_cpu(p) != cpu_src) { + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); + goto repeat; + } + if (rq_src == rq) { + set_task_cpu(p, cpu_dest); + if (p->array) { + deactivate_task(p, rq_src); + activate_task(p, rq_dest); + } + } + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); - for(nr = 0; nr < PIDHASH_SZ; nr++) - pidhash[nr] = NULL; + up(&req->sem); + } +} - init_timervecs(); +void __init migration_init(void) +{ + int cpu; - init_bh(TIMER_BH, timer_bh); - init_bh(TQUEUE_BH, tqueue_bh); - init_bh(IMMEDIATE_BH, immediate_bh); + current->cpus_allowed = 1UL << cpu_logical_map(0); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + if (kernel_thread(migration_thread, (void *) (long) cpu, + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) + BUG(); + current->cpus_allowed = -1L; - /* - * The boot idle thread does lazy MMU switching as well: - */ - atomic_inc(&init_mm.mm_count); - enter_lazy_tlb(&init_mm, current, cpu); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + while (!cpu_rq(cpu_logical_map(cpu))->migration_thread) + schedule_timeout(2); } +#endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/signal.c linux.22-ac2/kernel/signal.c --- linux.vanilla/kernel/signal.c 2003-06-14 00:11:42.000000000 +0100 +++ linux.22-ac2/kernel/signal.c 2003-06-29 16:09:23.000000000 +0100 @@ -492,7 +492,7 @@ * No need to set need_resched since signal event passing * goes through ->blocked */ -static inline void signal_wake_up(struct task_struct *t) +inline void signal_wake_up(struct task_struct *t) { t->sigpending = 1; @@ -507,12 +507,9 @@ * process of changing - but no harm is done by that * other than doing an extra (lightweight) IPI interrupt. */ - spin_lock(&runqueue_lock); - if (task_has_cpu(t) && t->processor != smp_processor_id()) - smp_send_reschedule(t->processor); - spin_unlock(&runqueue_lock); -#endif /* CONFIG_SMP */ - + if ((t->state == TASK_RUNNING) && (t->cpu != cpu())) + kick_if_running(t); +#endif if (t->state & TASK_INTERRUPTIBLE) { wake_up_process(t); return; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/softirq.c linux.22-ac2/kernel/softirq.c --- linux.vanilla/kernel/softirq.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/kernel/softirq.c 2003-06-29 16:09:23.000000000 +0100 @@ -364,15 +364,15 @@ int cpu = cpu_logical_map(bind_cpu); daemonize(); - current->nice = 19; + set_user_nice(current, 19); sigfillset(¤t->blocked); /* Migrate to the right CPU */ - current->cpus_allowed = 1UL << cpu; - while (smp_processor_id() != cpu) - schedule(); + set_cpus_allowed(current, 1UL << cpu); + if (cpu() != cpu) + BUG(); - sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); + sprintf(current->comm, "ksoftirqd/%d", bind_cpu); __set_current_state(TASK_INTERRUPTIBLE); mb(); @@ -395,7 +395,7 @@ } } -static __init int spawn_ksoftirqd(void) +__init int spawn_ksoftirqd(void) { int cpu; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/sys.c linux.22-ac2/kernel/sys.c --- linux.vanilla/kernel/sys.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/sys.c 2003-07-14 12:54:13.000000000 +0100 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -239,10 +240,10 @@ } if (error == -ESRCH) error = 0; - if (niceval < p->nice && !capable(CAP_SYS_NICE)) + if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) error = -EACCES; else - p->nice = niceval; + set_user_nice(p, niceval); } read_unlock(&tasklist_lock); @@ -268,7 +269,7 @@ long niceval; if (!proc_sel(p, which, who)) continue; - niceval = 20 - p->nice; + niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } @@ -509,9 +510,10 @@ } } -static int set_user(uid_t new_ruid, int dumpclear) +int set_user(uid_t new_ruid, int dumpclear) { struct user_struct *new_user, *old_user; + struct task_struct *this_task = current; /* What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now @@ -521,17 +523,16 @@ new_user = alloc_uid(new_ruid); if (!new_user) return -EAGAIN; - old_user = current->user; - atomic_dec(&old_user->processes); + old_user = this_task->user; atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); - if(dumpclear) - { - current->mm->dumpable = 0; + if (dumpclear && this_task->mm) { + this_task->mm->dumpable = 0; wmb(); } - current->uid = new_ruid; - current->user = new_user; + this_task->uid = new_ruid; + this_task->user = new_user; free_uid(old_user); return 0; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/sysctl.c linux.22-ac2/kernel/sysctl.c --- linux.vanilla/kernel/sysctl.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/kernel/sysctl.c 2003-07-14 12:54:22.000000000 +0100 @@ -44,11 +44,13 @@ extern int C_A_D; extern int bdf_prm[], bdflush_min[], bdflush_max[]; extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; extern int max_threads; extern atomic_t nr_queued_signals; extern int max_queued_signals; extern int sysrq_enabled; extern int core_uses_pid; +extern int core_setuid_ok; extern char core_pattern[]; extern int cad_pid; @@ -82,6 +84,11 @@ extern int stop_a_enabled; #endif +#ifdef __hppa__ +extern int pwrsw_enabled; +extern int unaligned_enabled; +#endif + #ifdef CONFIG_ARCH_S390 #ifdef CONFIG_MATHEMU extern int sysctl_ieee_emulation_warnings; @@ -176,6 +183,8 @@ 0644, NULL, &proc_dointvec}, {KERN_CORE_USES_PID, "core_uses_pid", &core_uses_pid, sizeof(int), 0644, NULL, &proc_dointvec}, + {KERN_CORE_USES_PID, "core_setuid_ok", &core_setuid_ok, sizeof(int), + 0644, NULL, &proc_dointvec}, {KERN_CORE_PATTERN, "core_pattern", core_pattern, 64, 0644, NULL, &proc_dostring, &sysctl_string}, {KERN_TAINTED, "tainted", &tainted, sizeof(int), @@ -192,6 +201,12 @@ {KERN_SPARC_STOP_A, "stop-a", &stop_a_enabled, sizeof (int), 0644, NULL, &proc_dointvec}, #endif +#ifdef __hppa__ + {KERN_HPPA_PWRSW, "soft-power", &pwrsw_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, + {KERN_HPPA_UNALIGNED, "unaligned-trap", &unaligned_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, +#endif #ifdef CONFIG_PPC32 {KERN_PPC_ZEROPAGED, "zero-paged", &zero_paged_on, sizeof(int), 0644, NULL, &proc_dointvec}, @@ -278,6 +293,8 @@ &bdflush_min, &bdflush_max}, {VM_OVERCOMMIT_MEMORY, "overcommit_memory", &sysctl_overcommit_memory, sizeof(sysctl_overcommit_memory), 0644, NULL, &proc_dointvec}, + {VM_OVERCOMMIT_RATIO, "overcommit_ratio", &sysctl_overcommit_ratio, + sizeof(sysctl_overcommit_ratio), 0644, NULL, &proc_dointvec}, {VM_PAGERDAEMON, "kswapd", &pager_daemon, sizeof(pager_daemon_t), 0644, NULL, &proc_dointvec}, {VM_PGT_CACHE, "pagetable_cache", diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/kernel/timer.c linux.22-ac2/kernel/timer.c --- linux.vanilla/kernel/timer.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/kernel/timer.c 2003-06-29 16:09:23.000000000 +0100 @@ -25,6 +25,8 @@ #include +struct kernel_stat kstat; + /* * Timekeeping variables */ @@ -598,25 +600,7 @@ int cpu = smp_processor_id(), system = user_tick ^ 1; update_one_process(p, user_tick, system, cpu); - if (p->pid) { - if (--p->counter <= 0) { - p->counter = 0; - /* - * SCHED_FIFO is priority preemption, so this is - * not the place to decide whether to reschedule a - * SCHED_FIFO task or not - Bhavesh Davda - */ - if (p->policy != SCHED_FIFO) { - p->need_resched = 1; - } - } - if (p->nice > 0) - kstat.per_cpu_nice[cpu] += user_tick; - else - kstat.per_cpu_user[cpu] += user_tick; - kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) - kstat.per_cpu_system[cpu] += system; + scheduler_tick(user_tick, system); } /* @@ -624,17 +608,7 @@ */ static unsigned long count_active_tasks(void) { - struct task_struct *p; - unsigned long nr = 0; - - read_lock(&tasklist_lock); - for_each_task(p) { - if ((p->state == TASK_RUNNING || - (p->state & TASK_UNINTERRUPTIBLE))) - nr += FIXED_1; - } - read_unlock(&tasklist_lock); - return nr; + return (nr_running() + nr_uninterruptible()) * FIXED_1; } /* @@ -827,6 +801,89 @@ #endif +static void process_timeout(unsigned long __data) +{ + wake_up_process((task_t *)__data); +} + +/** + * schedule_timeout - sleep until timeout + * @timeout: timeout value in jiffies + * + * Make the current task sleep until @timeout jiffies have + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to + * pass before the routine returns. The routine will return 0 + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. In this case the remaining time + * in jiffies will be returned, or 0 if the timer expired in time + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule + * the CPU away without a bound on the timeout. In this case the return + * value will be %MAX_SCHEDULE_TIMEOUT. + * + * In all cases the return value is guaranteed to be non-negative. + */ +signed long schedule_timeout(signed long timeout) +{ + struct timer_list timer; + unsigned long expire; + + switch (timeout) + { + case MAX_SCHEDULE_TIMEOUT: + /* + * These two special cases are useful to be comfortable + * in the caller. Nothing more. We could take + * MAX_SCHEDULE_TIMEOUT from one of the negative value + * but I' d like to return a valid offset (>=0) to allow + * the caller to do everything it want with the retval. + */ + schedule(); + goto out; + default: + /* + * Another bit of PARANOID. Note that the retval will be + * 0 since no piece of kernel is supposed to do a check + * for a negative retval of schedule_timeout() (since it + * should never happens anyway). You just have the printk() + * that will tell you if something is gone wrong and where. + */ + if (timeout < 0) + { + printk(KERN_ERR "schedule_timeout: wrong timeout " + "value %lx from %p\n", timeout, + __builtin_return_address(0)); + current->state = TASK_RUNNING; + goto out; + } + } + + expire = timeout + jiffies; + + init_timer(&timer); + timer.expires = expire; + timer.data = (unsigned long) current; + timer.function = process_timeout; + + add_timer(&timer); + schedule(); + del_timer_sync(&timer); + + timeout = expire - jiffies; + + out: + return timeout < 0 ? 0 : timeout; +} + /* Thread ID - the internal kernel "pid" */ asmlinkage long sys_gettid(void) { @@ -873,4 +930,3 @@ } return 0; } - diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/lib/Config.in linux.22-ac2/lib/Config.in --- linux.vanilla/lib/Config.in 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/lib/Config.in 2003-08-08 15:15:11.000000000 +0100 @@ -41,4 +41,9 @@ fi fi +if [ "$CONFIG_EXPERIMENTAL" = "y" -a \ + "$CONFIG_HOTPLUG" = "y" ]; then + tristate 'Hotplug firmware loading support (EXPERIMENTAL)' CONFIG_FW_LOADER +fi + endmenu diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/lib/firmware_class.c linux.22-ac2/lib/firmware_class.c --- linux.vanilla/lib/firmware_class.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/lib/firmware_class.c 2003-07-31 14:09:15.000000000 +0100 @@ -0,0 +1,557 @@ +/* + * firmware_class.c - Multi purpose firmware loading support + * + * Copyright (c) 2003 Manuel Estrada Sainz + * + * Please see Documentation/firmware_class/ for more information. + * + */ +/* + * Based on kernel/kmod.c and drivers/usb/usb.c + */ +/* + kernel/kmod.c + Kirk Petersen + + Reorganized not to be a daemon by Adam Richter, with guidance + from Greg Zornetzer. + + Modified to avoid chroot and file sharing problems. + Mikael Pettersson + + Limit the concurrent number of kmod modprobes to catch loops from + "modprobe needs a service that is in a module". + Keith Owens December 1999 + + Unblock all signals when we exec a usermode process. + Shuu Yamaguchi December 2000 +*/ +/* + * drivers/usb/usb.c + * + * (C) Copyright Linus Torvalds 1999 + * (C) Copyright Johannes Erdfelt 1999-2001 + * (C) Copyright Andreas Gal 1999 + * (C) Copyright Gregory P. Smith 1999 + * (C) Copyright Deti Fliegl 1999 (new USB architecture) + * (C) Copyright Randy Dunlap 2000 + * (C) Copyright David Brownell 2000 (kernel hotplug, usb_device_id) + * (C) Copyright Yggdrasil Computing, Inc. 2000 + * (usb_device_id matching changes by Adam J. Richter) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "linux/firmware.h" + +MODULE_AUTHOR("Manuel Estrada Sainz "); +MODULE_DESCRIPTION("Multi purpose firmware loading support"); +MODULE_LICENSE("GPL"); + +#define err(format, arg...) \ + printk(KERN_ERR "%s:%s: " format "\n",__FILE__, __FUNCTION__ , ## arg) +#define warn(format, arg...) \ + printk(KERN_WARNING "%s:%s: " format "\n",__FILE__, __FUNCTION__ , ## arg) +#define dbg(format, arg...) \ + printk(KERN_DEBUG "%s:%s: " format "\n",__FILE__, __FUNCTION__ , ## arg) + +static int loading_timeout = 10; /* In seconds */ +static struct proc_dir_entry *proc_dir_timeout; +static struct proc_dir_entry *proc_dir; + +static int +call_helper(char *verb, const char *name, const char *device) +{ + char *argv[3], **envp, *buf, *scratch; + int i = 0; + + int retval = 0; + + if (!hotplug_path[0]) + return -ENOENT; + if (in_interrupt()) { + err("in_interrupt"); + return -EFAULT; + } + if (!current->fs->root) { + warn("call_policy %s -- no FS yet", verb); + return -EPERM; + } + + if (!(envp = (char **) kmalloc(20 * sizeof (char *), GFP_KERNEL))) { + err("unable to allocate envp"); + return -ENOMEM; + } + if (!(buf = kmalloc(256, GFP_KERNEL))) { + kfree(envp); + err("unable to allocate buf"); + return -ENOMEM; + } + + /* only one standardized param to hotplug command: type */ + argv[0] = hotplug_path; + argv[1] = "firmware"; + argv[2] = 0; + + /* minimal command environment */ + envp[i++] = "HOME=/"; + envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + +#ifdef DEBUG + /* hint that policy agent should enter no-stdout debug mode */ + envp[i++] = "DEBUG=kernel"; +#endif + scratch = buf; + + if (device) { + envp[i++] = scratch; + scratch += snprintf(scratch, FIRMWARE_NAME_MAX+25, + "DEVPATH=/driver/firmware/%s", device) + 1; + } + + envp[i++] = scratch; + scratch += sprintf(scratch, "ACTION=%s", verb) + 1; + + envp[i++] = scratch; + scratch += snprintf(scratch, FIRMWARE_NAME_MAX, + "FIRMWARE=%s", name) + 1; + + envp[i++] = 0; + + dbg("firmware: %s %s %s", argv[0], argv[1], verb); + + retval = call_usermodehelper(argv[0], argv, envp); + if (retval) { + printk("call_usermodehelper return %d\n", retval); + } + + kfree(buf); + kfree(envp); + return retval; +} + +struct firmware_priv { + struct completion completion; + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *attr_data; + struct proc_dir_entry *attr_loading; + struct firmware *fw; + int loading; + int abort; + int alloc_size; + struct timer_list timeout; +}; + +static int +firmware_timeout_show(char *buf, char **start, off_t off, + int count, int *eof, void *data) +{ + return sprintf(buf, "%d\n", loading_timeout); +} + +/** + * firmware_timeout_store: + * Description: + * Sets the number of seconds to wait for the firmware. Once + * this expires an error will be return to the driver and no + * firmware will be provided. + * + * Note: zero means 'wait for ever' + * + **/ +static int +firmware_timeout_store(struct file *file, const char *buf, + unsigned long count, void *data) +{ + loading_timeout = simple_strtol(buf, NULL, 10); + return count; +} + +static ssize_t +firmware_loading_show(char *buf, char **start, off_t off, + int count, int *eof, void *data) +{ + struct firmware_priv *fw_priv = data; + return sprintf(buf, "%d\n", fw_priv->loading); +} + +/** + * firmware_loading_store: - loading control file + * Description: + * The relevant values are: + * + * 1: Start a load, discarding any previous partial load. + * 0: Conclude the load and handle the data to the driver code. + * -1: Conclude the load with an error and discard any written data. + **/ +static ssize_t +firmware_loading_store(struct file *file, const char *buf, + unsigned long count, void *data) +{ + struct firmware_priv *fw_priv = data; + int prev_loading = fw_priv->loading; + + fw_priv->loading = simple_strtol(buf, NULL, 10); + + switch (fw_priv->loading) { + case -1: + fw_priv->abort = 1; + wmb(); + complete(&fw_priv->completion); + break; + case 1: + kfree(fw_priv->fw->data); + fw_priv->fw->data = NULL; + fw_priv->fw->size = 0; + fw_priv->alloc_size = 0; + break; + case 0: + if (prev_loading == 1) + complete(&fw_priv->completion); + break; + } + + return count; +} + +static ssize_t +firmware_data_read(char *buffer, char **start, off_t offset, + int count, int *eof, void *data) +{ + struct firmware_priv *fw_priv = data; + struct firmware *fw = fw_priv->fw; + + if (offset > fw->size) + return 0; + if (offset + count > fw->size) + count = fw->size - offset; + + memcpy(buffer, fw->data + offset, count); + *start = (void*)count; + return count; +} +static int +fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) +{ + u8 *new_data; + int new_size; + + if (min_size <= fw_priv->alloc_size) + return 0; + if((min_size % PAGE_SIZE) == 0) + new_size = min_size; + else + new_size = (min_size + PAGE_SIZE) & PAGE_MASK; + new_data = vmalloc(new_size); + if (!new_data) { + printk(KERN_ERR "%s: unable to alloc buffer\n", __FUNCTION__); + /* Make sure that we don't keep incomplete data */ + fw_priv->abort = 1; + return -ENOMEM; + } + fw_priv->alloc_size = new_size; + if (fw_priv->fw->data) { + memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size); + vfree(fw_priv->fw->data); + } + fw_priv->fw->data = new_data; + BUG_ON(min_size > fw_priv->alloc_size); + return 0; +} + +/** + * firmware_data_write: + * + * Description: + * + * Data written to the 'data' attribute will be later handled to + * the driver as a firmware image. + **/ +static ssize_t +firmware_data_write(struct file *file, const char *buffer, + unsigned long count, void *data) +{ + struct firmware_priv *fw_priv = data; + struct firmware *fw = fw_priv->fw; + int offset = file->f_pos; + int retval; + + retval = fw_realloc_buffer(fw_priv, offset + count); + if (retval) { + printk("%s: retval:%d\n", __FUNCTION__, retval); + return retval; + } + + memcpy(fw->data + offset, buffer, count); + + fw->size = max_t(size_t, offset + count, fw->size); + file->f_pos += count; + return count; +} + +static void +firmware_class_timeout(u_long data) +{ + struct firmware_priv *fw_priv = (struct firmware_priv *) data; + fw_priv->abort = 1; + wmb(); + complete(&fw_priv->completion); +} +static int +fw_setup_class_device(struct firmware_priv **fw_priv_p, + const char *fw_name, const char *device) +{ + int retval; + struct firmware_priv *fw_priv = kmalloc(sizeof (struct firmware_priv), + GFP_KERNEL); + *fw_priv_p = fw_priv; + if (!fw_priv) { + retval = -ENOMEM; + goto out; + } + memset(fw_priv, 0, sizeof (*fw_priv)); + + init_completion(&fw_priv->completion); + + fw_priv->timeout.function = firmware_class_timeout; + fw_priv->timeout.data = (u_long) fw_priv; + init_timer(&fw_priv->timeout); + + retval = -EAGAIN; + fw_priv->proc_dir = create_proc_entry(device, 0644 | S_IFDIR, proc_dir); + if (!fw_priv->proc_dir) + goto err_free_fw_priv; + + fw_priv->attr_data = create_proc_entry("data", 0644 | S_IFREG, + fw_priv->proc_dir); + if (!fw_priv->attr_data) + goto err_remove_dir; + + fw_priv->attr_data->read_proc = firmware_data_read; + fw_priv->attr_data->write_proc = firmware_data_write; + fw_priv->attr_data->data = fw_priv; + + fw_priv->attr_loading = create_proc_entry("loading", 0644 | S_IFREG, + fw_priv->proc_dir); + if (!fw_priv->attr_loading) + goto err_remove_data; + + fw_priv->attr_loading->read_proc = firmware_loading_show; + fw_priv->attr_loading->write_proc = firmware_loading_store; + fw_priv->attr_loading->data = fw_priv; + + retval = 0; + fw_priv->fw = kmalloc(sizeof (struct firmware), GFP_KERNEL); + if (!fw_priv->fw) { + printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", + __FUNCTION__); + retval = -ENOMEM; + goto err_remove_loading; + } + memset(fw_priv->fw, 0, sizeof (*fw_priv->fw)); + + goto out; + +err_remove_loading: + remove_proc_entry("loading", fw_priv->proc_dir); +err_remove_data: + remove_proc_entry("data", fw_priv->proc_dir); +err_remove_dir: + remove_proc_entry(device, proc_dir); +err_free_fw_priv: + kfree(fw_priv); +out: + return retval; +} +static void +fw_remove_class_device(struct firmware_priv *fw_priv) +{ + remove_proc_entry("loading", fw_priv->proc_dir); + remove_proc_entry("data", fw_priv->proc_dir); + remove_proc_entry(fw_priv->proc_dir->name, proc_dir); +} + +/** + * request_firmware: - request firmware to hotplug and wait for it + * Description: + * @firmware will be used to return a firmware image by the name + * of @name for device @device. + * + * Should be called from user context where sleeping is allowed. + * + * @name will be use as $FIRMWARE in the hotplug environment and + * should be distinctive enough not to be confused with any other + * firmware image for this or any other device. + **/ +int +request_firmware(const struct firmware **firmware, const char *name, + const char *device) +{ + struct firmware_priv *fw_priv; + int retval; + + if (!firmware) { + retval = -EINVAL; + goto out; + } + *firmware = NULL; + + retval = fw_setup_class_device(&fw_priv, name, device); + if (retval) + goto out; + + retval = call_helper("add", name, device); + if (retval) + goto out; + if (loading_timeout) { + fw_priv->timeout.expires = jiffies + loading_timeout * HZ; + add_timer(&fw_priv->timeout); + } + + wait_for_completion(&fw_priv->completion); + + del_timer(&fw_priv->timeout); + fw_remove_class_device(fw_priv); + + if (fw_priv->fw->size && !fw_priv->abort) { + *firmware = fw_priv->fw; + } else { + retval = -ENOENT; + vfree(fw_priv->fw->data); + kfree(fw_priv->fw); + } +out: + kfree(fw_priv); + return retval; +} + +void +release_firmware(const struct firmware *fw) +{ + if (fw) { + vfree(fw->data); + kfree(fw); + } +} + +/** + * register_firmware: - provide a firmware image for later usage + * + * Description: + * Make sure that @data will be available by requesting firmware @name. + * + * Note: This will not be possible until some kind of persistence + * is available. + **/ +void +register_firmware(const char *name, const u8 *data, size_t size) +{ + /* This is meaningless without firmware caching, so until we + * decide if firmware caching is reasonable just leave it as a + * noop */ +} + +/* Async support */ +struct firmware_work { + struct tq_struct work; + struct module *module; + const char *name; + const char *device; + void *context; + void (*cont)(const struct firmware *fw, void *context); +}; + +static void +request_firmware_work_func(void *arg) +{ + struct firmware_work *fw_work = arg; + const struct firmware *fw; + if (!arg) + return; + request_firmware(&fw, fw_work->name, fw_work->device); + fw_work->cont(fw, fw_work->context); + release_firmware(fw); + __MOD_DEC_USE_COUNT(fw_work->module); + kfree(fw_work); +} + +/** + * request_firmware_nowait: + * + * Description: + * Asynchronous variant of request_firmware() for contexts where + * it is not possible to sleep. + * + * @cont will be called asynchronously when the firmware request is over. + * + * @context will be passed over to @cont. + * + * @fw may be %NULL if firmware request fails. + * + **/ +int +request_firmware_nowait( + struct module *module, + const char *name, const char *device, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), + GFP_ATOMIC); + if (!fw_work) + return -ENOMEM; + if (!try_inc_mod_count(module)) { + kfree(fw_work); + return -EFAULT; + } + + *fw_work = (struct firmware_work) { + .module = module, + .name = name, + .device = device, + .context = context, + .cont = cont, + }; + INIT_TQUEUE(&fw_work->work, request_firmware_work_func, fw_work); + + schedule_task(&fw_work->work); + return 0; +} + +static int __init +firmware_class_init(void) +{ + proc_dir = create_proc_entry("driver/firmware", 0755 | S_IFDIR, NULL); + if (!proc_dir) + return -EAGAIN; + proc_dir_timeout = create_proc_entry("timeout", + 0644 | S_IFREG, proc_dir); + if (!proc_dir_timeout) { + remove_proc_entry("driver/firmware", NULL); + return -EAGAIN; + } + proc_dir_timeout->read_proc = firmware_timeout_show; + proc_dir_timeout->write_proc = firmware_timeout_store; + return 0; +} +static void __exit +firmware_class_exit(void) +{ + remove_proc_entry("timeout", proc_dir); + remove_proc_entry("driver/firmware", NULL); +} + +module_init(firmware_class_init); +module_exit(firmware_class_exit); + +EXPORT_SYMBOL(release_firmware); +EXPORT_SYMBOL(request_firmware); +EXPORT_SYMBOL(request_firmware_nowait); +EXPORT_SYMBOL(register_firmware); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/lib/Makefile linux.22-ac2/lib/Makefile --- linux.vanilla/lib/Makefile 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/lib/Makefile 2003-07-31 14:09:15.000000000 +0100 @@ -9,11 +9,12 @@ L_TARGET := lib.a export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \ - rbtree.o crc32.o + rbtree.o crc32.o firmware_class.o obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \ bust_spinlocks.o rbtree.o dump_stack.o +obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/MAINTAINERS linux.22-ac2/MAINTAINERS --- linux.vanilla/MAINTAINERS 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/MAINTAINERS 2003-08-28 22:16:50.000000000 +0100 @@ -1,4 +1,4 @@ - + List of maintainers and how to submit kernel changes Please try to follow the guidelines below. This will make things @@ -554,6 +554,13 @@ W: http://www.debian.org/~dz/i8k/ S: Maintained +DEVICE-MAPPER +P: Joe Thornber +M: thornber@sistina.com +L: dm@uk.sistina.com +W: http://www.sistina.com/lvm +S: Maintained + DEVICE NUMBER REGISTRY P: H. Peter Anvin M: hpa@zytor.com @@ -620,8 +627,6 @@ S: Maintained DRM DRIVERS -P: Rik Faith -M: faith@valinux.com L: dri-devel@lists.sourceforge.net S: Supported @@ -825,6 +830,14 @@ W: http://www.linuxia64.org/ S: Maintained +MARVELL YUKON / SYSKONNECT DRIVER +P: Mirko Lindner +M: mlindner@syskonnect.de +P: Ralph Roesler +M: rroesler@syskonnect.de +W: http://www.syskonnect.com +S: Supported + SN-IA64 (Itanium) SUB-PLATFORM P: John Hesterberg M: jh@sgi.com @@ -944,6 +957,11 @@ L: linux-mips@linux-mips.org S: Maintained +IOC4 IDE DRIVER +P: Aniket Malatpure +M: aniket@sgi.com +S: Maintained + IP MASQUERADING: P: Juanjo Ciarlante M: jjciarla@raiz.uncu.edu.ar @@ -1026,8 +1044,6 @@ KERNEL BUILD (Makefile, Rules.make, scripts/*) P: Keith Owens M: kaos@ocs.com.au -P: Michael Elizabeth Chastain -M: mec@shout.net L: kbuild-devel@lists.sourceforge.net W: http://kbuild.sourceforge.net S: Maintained @@ -1745,12 +1761,6 @@ L: linux-video@atrey.karlin.mff.cuni.cz S: Maintained -SYSKONNECT DRIVER -P: Mirko Lindner -M: mlindner@syskonnect.de -W: http://www.syskonnect.com -S: Supported - SYSV FILESYSTEM P: Christoph Hellwig M: hch@infradead.org @@ -2052,10 +2062,9 @@ S: Maintained VFAT FILESYSTEM: -P: Gordon Chaffee -M: chaffee@cs.berkeley.edu +P: OGAWA Hirofumi +M: hirofumi@mail.parknet.co.jp L: linux-kernel@vger.kernel.org -W: http://bmrc.berkeley.edu/people/chaffee S: Maintained VIA 82Cxxx AUDIO DRIVER @@ -2099,12 +2108,26 @@ L: linux-scsi@vger.kernel.org S: Maintained +WOLFSON WM97xx AC97 CODECS & TOUCHSCREEN DRIVERS +P: Liam Girdwood +M: liam.girdwood@wolfsonmicro.com +M: linux@wolfsonmicro.com +S: Supported + X.25 NETWORK LAYER P: Henner Eisen M: eis@baty.hanse.de L: linux-x25@vger.kernel.org S: Maintained +XFS FILESYSTEM +P: Silicon Graphics Inc +M: owner-xfs@oss.sgi.com +M: lord@sgi.com +L: linux-xfs@oss.sgi.com +W: http://oss.sgi.com/projects/xfs +S: Supported + X86 3-LEVEL PAGING (PAE) SUPPORT P: Ingo Molnar M: mingo@redhat.com diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Makefile linux.22-ac2/Makefile --- linux.vanilla/Makefile 2003-08-28 16:45:26.000000000 +0100 +++ linux.22-ac2/Makefile 2003-09-01 13:43:29.000000000 +0100 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 22 -EXTRAVERSION = +EXTRAVERSION = -ac2 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) @@ -135,12 +135,14 @@ DRIVERS-m := DRIVERS- := -DRIVERS-$(CONFIG_ACPI_BOOT) += drivers/acpi/acpi.o +DRIVERS-$(CONFIG_ACPI) += drivers/acpi/acpi.o +DRIVERS-$(CONFIG_CPU_FREQ) += drivers/cpufreq/cpufreq.o DRIVERS-$(CONFIG_PARPORT) += drivers/parport/driver.o DRIVERS-y += drivers/char/char.o \ drivers/block/block.o \ drivers/misc/misc.o \ drivers/net/net.o + DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o @@ -174,7 +176,7 @@ DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a DRIVERS-$(CONFIG_PPC32) += drivers/macintosh/macintosh.o DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o -DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o +DRIVERS-$(CONFIG_PNP) += drivers/pnp/pnp.o DRIVERS-$(CONFIG_SGI_IP22) += drivers/sgi/sgi.a DRIVERS-$(CONFIG_VT) += drivers/video/video.o DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a @@ -203,6 +205,7 @@ kernel/ksyms.lst include/linux/compile.h \ vmlinux System.map \ .tmp* \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \ drivers/char/conmakehash \ drivers/char/drm/*-mod.c \ @@ -230,6 +233,7 @@ # files removed with 'make mrproper' MRPROPER_FILES = \ include/linux/autoconf.h include/linux/version.h \ + tmp* \ lib/crc32table.h lib/gen_crc32table \ drivers/net/hamradio/soundmodem/sm_tbl_{afsk1200,afsk2666,fsk9600}.h \ drivers/net/hamradio/soundmodem/sm_tbl_{hapn4800,psk4800}.h \ @@ -248,6 +252,7 @@ include/asm \ .hdepend scripts/mkdep scripts/split-include scripts/docproc \ $(TOPDIR)/include/linux/modversions.h \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ kernel.spec # directories removed with 'make mrproper' @@ -321,7 +326,7 @@ linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS)) -$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h include/config/MARKER +$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h tmp_include_depends $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@) $(TOPDIR)/include/linux/version.h: include/linux/version.h @@ -361,13 +366,13 @@ comma := , -init/version.o: init/version.c include/linux/compile.h include/config/MARKER +init/version.o: init/version.c include/linux/compile.h tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(ARCH)"' -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o init/version.c -init/main.o: init/main.c include/config/MARKER +init/main.o: init/main.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< -init/do_mounts.o: init/do_mounts.c include/config/MARKER +init/do_mounts.o: init/do_mounts.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< fs lib mm ipc kernel drivers net: dummy @@ -394,7 +399,7 @@ modules: $(patsubst %, _mod_%, $(SUBDIRS)) .PHONY: $(patsubst %, _mod_%, $(SUBDIRS)) -$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h include/config/MARKER +$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h tmp_include_depends $(MAKE) -C $(patsubst _mod_%, %, $@) CFLAGS="$(CFLAGS) $(MODFLAGS)" MAKING_MODULES=1 modules .PHONY: modules_install @@ -500,6 +505,13 @@ endif scripts/mkdep -- `find $(FINDHPATH) \( -name SCCS -o -name .svn \) -prune -o -follow -name \*.h ! -name modversions.h -print` > .hdepend scripts/mkdep -- init/*.c > .depend + (find $(TOPDIR) \( -name .depend -o -name .hdepend \) -print | xargs $(AWK) -f scripts/include_deps) > tmp_include_depends + sed -ne 's/^\([^ ].*\):.*/ \1 \\/p' tmp_include_depends > tmp_include_depends_1 + (echo ""; echo "all: \\"; cat tmp_include_depends_1; echo "") >> tmp_include_depends + rm tmp_include_depends_1 + +tmp_include_depends: include/config/MARKER dummy + $(MAKE) -r -f tmp_include_depends all ifdef CONFIG_MODVERSIONS MODVERFILE := $(TOPDIR)/include/linux/modversions.h diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/filemap.c linux.22-ac2/mm/filemap.c --- linux.vanilla/mm/filemap.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/filemap.c 2003-08-08 13:42:54.000000000 +0100 @@ -962,6 +962,7 @@ spin_unlock(&pagecache_lock); return page; } +EXPORT_SYMBOL_GPL(find_trylock_page); /* * Must be called with the pagecache lock held, @@ -1338,6 +1339,8 @@ SetPageReferenced(page); } +EXPORT_SYMBOL(mark_page_accessed); + /* * This is a generic file read routine, and uses the * inode->i_op->readpage() function for the actual low-level @@ -1636,7 +1639,7 @@ if (retval) break; - retval = do_call_directIO(rw, filp, iobuf, (offset+progress) >> blocksize_bits, blocksize); + retval = do_call_directIO(rw, filp, iobuf, (offset+progress) >> blocksize_bits, blocksize); if (rw == READ && retval > 0) mark_dirty_kiobuf(iobuf, retval); @@ -3007,7 +3010,7 @@ } /* FIXME: this is for backwards compatibility with 2.4 */ - if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND) + if (!S_ISBLK(inode->i_mode) && (file->f_flags & O_APPEND)) *ppos = pos = inode->i_size; /* @@ -3238,7 +3241,7 @@ if (err != 0 || count == 0) goto out; - if (!file->f_flags & O_DIRECT) + if (!(file->f_flags & O_DIRECT)) BUG(); remove_suid(inode); @@ -3259,7 +3262,7 @@ * Sync the fs metadata but not the minor inode changes and * of course not the data as we did direct DMA for the IO. */ - if (written >= 0 && file->f_flags & O_SYNC) + if (written >= 0 && (file->f_flags & O_SYNC)) status = generic_osync_inode(inode, OSYNC_METADATA); err = written ? written : status; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/highmem.c linux.22-ac2/mm/highmem.c --- linux.vanilla/mm/highmem.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/mm/highmem.c 2003-06-29 16:09:24.000000000 +0100 @@ -362,6 +362,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } @@ -398,6 +399,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/Makefile linux.22-ac2/mm/Makefile --- linux.vanilla/mm/Makefile 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/mm/Makefile 2003-06-29 16:09:24.000000000 +0100 @@ -9,12 +9,12 @@ O_TARGET := mm.o -export-objs := shmem.o filemap.o memory.o page_alloc.o +export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \ - shmem.o + shmem.o mempool.o obj-$(CONFIG_HIGHMEM) += highmem.o diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/mempool.c linux.22-ac2/mm/mempool.c --- linux.vanilla/mm/mempool.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/mm/mempool.c 2003-06-29 16:09:24.000000000 +0100 @@ -0,0 +1,299 @@ +/* + * linux/mm/mempool.c + * + * memory buffer pool support. Such pools are mostly used + * for guaranteed, deadlock-free memory allocations during + * extreme VM load. + * + * started by Ingo Molnar, Copyright (C) 2001 + */ + +#include +#include +#include +#include + +struct mempool_s { + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +static void add_element(mempool_t *pool, void *element) +{ + BUG_ON(pool->curr_nr >= pool->min_nr); + pool->elements[pool->curr_nr++] = element; +} + +static void *remove_element(mempool_t *pool) +{ + BUG_ON(pool->curr_nr <= 0); + return pool->elements[--pool->curr_nr]; +} + +static void free_pool(mempool_t *pool) +{ + while (pool->curr_nr) { + void *element = remove_element(pool); + pool->free(element, pool->pool_data); + } + kfree(pool->elements); + kfree(pool); +} + +/** + * mempool_create - create a memory pool + * @min_nr: the minimum number of elements guaranteed to be + * allocated for this pool. + * @alloc_fn: user-defined element-allocation function. + * @free_fn: user-defined element-freeing function. + * @pool_data: optional private data available to the user-defined functions. + * + * this function creates and allocates a guaranteed size, preallocated + * memory pool. The pool can be used from the mempool_alloc and mempool_free + * functions. This function might sleep. Both the alloc_fn() and the free_fn() + * functions might sleep - as long as the mempool_alloc function is not called + * from IRQ contexts. + */ +mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) +{ + mempool_t *pool; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + memset(pool, 0, sizeof(*pool)); + pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL); + if (!pool->elements) { + kfree(pool); + return NULL; + } + spin_lock_init(&pool->lock); + pool->min_nr = min_nr; + pool->pool_data = pool_data; + init_waitqueue_head(&pool->wait); + pool->alloc = alloc_fn; + pool->free = free_fn; + + /* + * First pre-allocate the guaranteed number of buffers. + */ + while (pool->curr_nr < pool->min_nr) { + void *element; + + element = pool->alloc(GFP_KERNEL, pool->pool_data); + if (unlikely(!element)) { + free_pool(pool); + return NULL; + } + add_element(pool, element); + } + return pool; +} + +/** + * mempool_resize - resize an existing memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @new_min_nr: the new minimum number of elements guaranteed to be + * allocated for this pool. + * @gfp_mask: the usual allocation bitmask. + * + * This function shrinks/grows the pool. In the case of growing, + * it cannot be guaranteed that the pool will be grown to the new + * size immediately, but new mempool_free() calls will refill it. + * + * Note, the caller must guarantee that no mempool_destroy is called + * while this function is running. mempool_alloc() & mempool_free() + * might be called (eg. from IRQ contexts) while this function executes. + */ +int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask) +{ + void *element; + void **new_elements; + unsigned long flags; + + BUG_ON(new_min_nr <= 0); + + spin_lock_irqsave(&pool->lock, flags); + if (new_min_nr < pool->min_nr) { + while (pool->curr_nr > new_min_nr) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + pool->free(element, pool->pool_data); + spin_lock_irqsave(&pool->lock, flags); + } + pool->min_nr = new_min_nr; + goto out_unlock; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* Grow the pool */ + new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); + if (!new_elements) + return -ENOMEM; + + spin_lock_irqsave(&pool->lock, flags); + memcpy(new_elements, pool->elements, + pool->curr_nr * sizeof(*new_elements)); + kfree(pool->elements); + pool->elements = new_elements; + pool->min_nr = new_min_nr; + + while (pool->curr_nr < pool->min_nr) { + spin_unlock_irqrestore(&pool->lock, flags); + element = pool->alloc(gfp_mask, pool->pool_data); + if (!element) + goto out; + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) + add_element(pool, element); + else + kfree(element); /* Raced */ + } +out_unlock: + spin_unlock_irqrestore(&pool->lock, flags); +out: + return 0; +} + +/** + * mempool_destroy - deallocate a memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. The caller + * has to guarantee that all elements have been returned to the pool (ie: + * freed) prior to calling mempool_destroy(). + */ +void mempool_destroy(mempool_t *pool) +{ + if (pool->curr_nr != pool->min_nr) + BUG(); /* There were outstanding elements */ + free_pool(pool); +} + +/** + * mempool_alloc - allocate an element from a specific memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @gfp_mask: the usual allocation bitmask. + * + * this function only sleeps if the alloc_fn function sleeps or + * returns NULL. Note that due to preallocation, this function + * *never* fails when called from process contexts. (it might + * fail if called from an IRQ context.) + */ +void * mempool_alloc(mempool_t *pool, int gfp_mask) +{ + void *element; + unsigned long flags; + int curr_nr; + DECLARE_WAITQUEUE(wait, current); + int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + +repeat_alloc: + element = pool->alloc(gfp_nowait, pool->pool_data); + if (likely(element != NULL)) + return element; + + /* + * If the pool is less than 50% full then try harder + * to allocate an element: + */ + if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) { + element = pool->alloc(gfp_mask, pool->pool_data); + if (likely(element != NULL)) + return element; + } + + /* + * Kick the VM at this point. + */ + wakeup_bdflush(); + + spin_lock_irqsave(&pool->lock, flags); + if (likely(pool->curr_nr)) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + return element; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* We must not sleep in the GFP_ATOMIC case */ + if (gfp_mask == gfp_nowait) + return NULL; + + run_task_queue(&tq_disk); + + add_wait_queue_exclusive(&pool->wait, &wait); + set_task_state(current, TASK_UNINTERRUPTIBLE); + + spin_lock_irqsave(&pool->lock, flags); + curr_nr = pool->curr_nr; + spin_unlock_irqrestore(&pool->lock, flags); + + if (!curr_nr) + schedule(); + + current->state = TASK_RUNNING; + remove_wait_queue(&pool->wait, &wait); + + goto repeat_alloc; +} + +/** + * mempool_free - return an element to the pool. + * @element: pool element pointer. + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. + */ +void mempool_free(void *element, mempool_t *pool) +{ + unsigned long flags; + + if (pool->curr_nr < pool->min_nr) { + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) { + add_element(pool, element); + spin_unlock_irqrestore(&pool->lock, flags); + wake_up(&pool->wait); + return; + } + spin_unlock_irqrestore(&pool->lock, flags); + } + pool->free(element, pool->pool_data); +} + +/* + * A commonly used alloc and free fn. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + return kmem_cache_alloc(mem, gfp_mask); +} + +void mempool_free_slab(void *element, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + kmem_cache_free(mem, element); +} + + +EXPORT_SYMBOL(mempool_create); +EXPORT_SYMBOL(mempool_resize); +EXPORT_SYMBOL(mempool_destroy); +EXPORT_SYMBOL(mempool_alloc); +EXPORT_SYMBOL(mempool_free); +EXPORT_SYMBOL(mempool_alloc_slab); +EXPORT_SYMBOL(mempool_free_slab); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/mlock.c linux.22-ac2/mm/mlock.c --- linux.vanilla/mm/mlock.c 2001-09-17 23:30:23.000000000 +0100 +++ linux.22-ac2/mm/mlock.c 2003-06-29 16:09:24.000000000 +0100 @@ -198,6 +198,7 @@ unsigned long lock_limit; int error = -ENOMEM; + vm_validate_enough("entering sys_mlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; @@ -220,6 +221,7 @@ error = do_mlock(start, len, 1); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlock"); return error; } @@ -227,11 +229,13 @@ { int ret; + vm_validate_enough("entering sys_munlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlock"); return ret; } @@ -268,6 +272,8 @@ unsigned long lock_limit; int ret = -EINVAL; + vm_validate_enough("entering sys_mlockall"); + down_write(¤t->mm->mmap_sem); if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; @@ -287,15 +293,18 @@ ret = do_mlockall(flags); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlockall"); return ret; } asmlinkage long sys_munlockall(void) { int ret; + vm_validate_enough("entering sys_munlockall"); down_write(¤t->mm->mmap_sem); ret = do_mlockall(0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlockall"); return ret; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/mmap.c linux.22-ac2/mm/mmap.c --- linux.vanilla/mm/mmap.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/mm/mmap.c 2003-06-29 16:09:24.000000000 +0100 @@ -1,8 +1,25 @@ /* * linux/mm/mmap.c - * * Written by obz. + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #include #include #include @@ -44,8 +61,10 @@ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; -int sysctl_overcommit_memory; +int sysctl_overcommit_memory = 0; /* default is heuristic overcommit */ +int sysctl_overcommit_ratio = 50; /* default is 50% */ int max_map_count = DEFAULT_MAX_MAP_COUNT; +atomic_t vm_committed_space = ATOMIC_INIT(0); /* Check that a process has enough memory to allocate a * new virtual mapping. @@ -55,42 +74,107 @@ /* Stupid algorithm to decide if we have enough memory: while * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. - */ - /* 23/11/98 NJC: Somewhat less stupid version of algorithm, + * + * 23/11/98 NJC: Somewhat less stupid version of algorithm, * which tries to do "TheRightThing". Instead of using half of * (buffers+cache), use the minimum values. Allow an extra 2% * of num_physpages for safety margin. + * + * 2002/02/26 Alan Cox: Added two new modes that do real accounting */ + unsigned long free, allowed; + struct sysinfo i; - unsigned long free; + atomic_add(pages, &vm_committed_space); /* Sometimes we want to use more memory than we have. */ - if (sysctl_overcommit_memory) - return 1; + if (sysctl_overcommit_memory == 1) + return 1; + + if (sysctl_overcommit_memory == 0) + { + /* The page cache contains buffer pages these days.. */ + free = atomic_read(&page_cache_size); + free += nr_free_pages(); + free += nr_swap_pages; + + /* + * This double-counts: the nrpages are both in the page-cache + * and in the swapper space. At the same time, this compensates + * for the swap-space over-allocation (ie "nr_swap_pages" being + * too small. + */ + free += swapper_space.nrpages; + + /* + * The code below doesn't account for free space in the inode + * and dentry slab cache, slab cache fragmentation, inodes and + * dentries which will become freeable under VM load, etc. + * Lets just hope all these (complex) factors balance out... + */ + free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; + free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; + + if(free > pages) + return 1; + atomic_sub(pages, &vm_committed_space); + return 0; + } - /* The page cache contains buffer pages these days.. */ - free = atomic_read(&page_cache_size); - free += nr_free_pages(); - free += nr_swap_pages; + /* FIXME - need to add arch hooks to get the bits we need + without the higher overhead crap */ + si_meminfo(&i); + allowed = i.totalram * sysctl_overcommit_ratio / 100; + allowed += total_swap_pages; + + if(atomic_read(&vm_committed_space) < allowed) + return 1; - /* - * This double-counts: the nrpages are both in the page-cache - * and in the swapper space. At the same time, this compensates - * for the swap-space over-allocation (ie "nr_swap_pages" being - * too small. - */ - free += swapper_space.nrpages; + atomic_sub(pages, &vm_committed_space); + return 0; +} - /* - * The code below doesn't account for free space in the inode - * and dentry slab cache, slab cache fragmentation, inodes and - * dentries which will become freeable under VM load, etc. - * Lets just hope all these (complex) factors balance out... - */ - free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; - free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; +void vm_unacct_memory(long pages) +{ + atomic_sub(pages, &vm_committed_space); +} - return free > pages; +/* + * Don't even bother telling me the locking is wrong - its a test + * routine and uniprocessor is quite sufficient.. + * + * To enable this debugging you must tweak the #if below, and build + * with no SYS5 shared memory (thats not validated yet) and non SMP + */ + +void vm_validate_enough(char *x) +{ +#if 0 + unsigned long count = 0UL; + struct mm_struct *mm; + struct vm_area_struct *vma; + struct list_head *mmp; + unsigned long flags; + + spin_lock_irqsave(&mmlist_lock, flags); + + list_for_each(mmp, &init_mm.mmlist) + { + mm = list_entry(mmp, struct mm_struct, mmlist); + for(vma = mm->mmap; vma!=NULL; vma=vma->vm_next) + { + if(vma->vm_flags & VM_ACCOUNT) + count += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + } + if(count != atomic_read(&vm_committed_space)) + { + printk("MM crappo accounting %s: %lu %ld.\n", + x, count, atomic_read(&vm_committed_space)); + atomic_set(&vm_committed_space, count); + } + spin_unlock_irqrestore(&mmlist_lock, flags); +#endif } /* Remove one vm structure from the inode's i_mapping address space. */ @@ -161,12 +245,13 @@ /* Always allow shrinking brk. */ if (brk <= mm->brk) { - if (!do_munmap(mm, newbrk, oldbrk-newbrk)) + if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1)) goto set_brk; goto out; } /* Check against rlimit.. */ + /* FIXME: - this seems to be checked in do_brk.. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -175,10 +260,6 @@ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; - /* Check if we have enough memory.. */ - if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) - goto out; - /* Ok, looks good - let it rip. */ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) goto out; @@ -399,7 +480,9 @@ int correct_wcount = 0; int error; rb_node_t ** rb_link, * rb_parent; + unsigned long charged = 0; + vm_validate_enough("entering do_mmap_pgoff"); if (file && (!file->f_op || !file->f_op->mmap)) return -ENODEV; @@ -484,7 +567,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -494,11 +577,15 @@ > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; - /* Private writable mapping? Check memory availability.. */ - if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory(len >> PAGE_SHIFT)) - return -ENOMEM; + if (!(flags & MAP_NORESERVE) || sysctl_overcommit_memory > 1) { + if ((vm_flags & (VM_SHARED|VM_WRITE)) == VM_WRITE) { + /* Private writable mapping: check memory availability */ + charged = len >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; + } + } /* Can we just expand an old anonymous mapping? */ if (!file && !(vm_flags & VM_SHARED) && rb_parent) @@ -510,8 +597,9 @@ * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + error = -ENOMEM; if (!vma) - return -ENOMEM; + goto unacct_error; vma->vm_mm = mm; vma->vm_start = addr; @@ -563,8 +651,7 @@ * to update the tree pointers. */ addr = vma->vm_start; - stale_vma = find_vma_prepare(mm, addr, &prev, - &rb_link, &rb_parent); + stale_vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); /* * Make sure the lowlevel driver did its job right. */ @@ -585,6 +672,7 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + vm_validate_enough("out from do_mmap_pgoff"); return addr; unmap_and_free_vma: @@ -597,6 +685,10 @@ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start); free_vma: kmem_cache_free(vm_area_cachep, vma); +unacct_error: + if(charged) + vm_unacct_memory(charged); + vm_validate_enough("error path from do_mmap_pgoff"); return error; } @@ -739,6 +831,96 @@ return NULL; } +/* vma is the first one with address < vma->vm_end, + * and even address < vma->vm_start. Have to extend vma. */ + +#ifdef ARCH_STACK_GROWSUP +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + spin_lock(&vma->vm_mm->page_table_lock); + + address += 4 + PAGE_SIZE - 1; + address &= PAGE_MASK; + grow = (address - vma->vm_end) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) + { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_end = address; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + vm_validate_enough("exiting expand_stack"); + return 0; +} +#else + +int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + address &= PAGE_MASK; + spin_lock(&vma->vm_mm->page_table_lock); + grow = (vma->vm_start - address) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_start = address; + vma->vm_pgoff -= grow; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + spin_unlock(&vma->vm_mm->page_table_lock); + vm_validate_enough("exiting expand_stack"); + return 0; +} + +#endif + struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) { struct vm_area_struct * vma; @@ -786,7 +968,7 @@ */ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, struct vm_area_struct *area, unsigned long addr, size_t len, - struct vm_area_struct *extra) + struct vm_area_struct *extra, int acct) { struct vm_area_struct *mpnt; unsigned long end = addr + len; @@ -794,6 +976,8 @@ area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; + if (acct && (area->vm_flags & VM_ACCOUNT)) + vm_unacct_memory(len >> PAGE_SHIFT); /* Unmapping the whole area. */ if (addr == area->vm_start && end == area->vm_end) { @@ -921,10 +1105,12 @@ * work. This now handles partial unmappings. * Jeremy Fitzhardine */ -int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) +int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) { struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; + if(acct) vm_validate_enough("entering do_munmap"); + if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) return -EINVAL; @@ -991,6 +1177,7 @@ (file = mpnt->vm_file) != NULL) { atomic_dec(&file->f_dentry->d_inode->i_writecount); } + remove_shared_vm_struct(mpnt); mm->map_count--; @@ -999,7 +1186,7 @@ /* * Fix the mapping, and free the old area if it wasn't reused. */ - extra = unmap_fixup(mm, mpnt, st, size, extra); + extra = unmap_fixup(mm, mpnt, st, size, extra, acct); if (file) atomic_inc(&file->f_dentry->d_inode->i_writecount); } @@ -1010,6 +1197,7 @@ kmem_cache_free(vm_area_cachep, extra); free_pgtables(mm, prev, addr, addr+len); + if(acct) vm_validate_enough("exit -ok- do_munmap"); return 0; } @@ -1020,7 +1208,7 @@ struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); + ret = do_munmap(mm, addr, len, 1); up_write(&mm->mmap_sem); return ret; } @@ -1037,6 +1225,9 @@ unsigned long flags; rb_node_t ** rb_link, * rb_parent; + vm_validate_enough("entering do_brk"); + + len = PAGE_ALIGN(len); if (!len) return addr; @@ -1057,7 +1248,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -1073,7 +1264,7 @@ if (!vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; /* Can we just expand an old anonymous mapping? */ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags)) @@ -1084,8 +1275,11 @@ */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) + { + /* We accounted this address space - undo it */ + vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; - + } vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; @@ -1104,6 +1298,9 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + + vm_validate_enough("exiting do_brk"); + return addr; } @@ -1145,6 +1342,10 @@ unsigned long end = mpnt->vm_end; unsigned long size = end - start; + /* If the VMA has been charged for, account for its removal */ + if (mpnt->vm_flags & VM_ACCOUNT) + vm_unacct_memory(size >> PAGE_SHIFT); + if (mpnt->vm_ops) { if (mpnt->vm_ops->close) mpnt->vm_ops->close(mpnt); @@ -1163,8 +1364,9 @@ BUG(); clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); - flush_tlb_mm(mm); + vm_validate_enough("exiting exit_mmap"); + } /* Insert vm structure into process list sorted by address diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/mprotect.c linux.22-ac2/mm/mprotect.c --- linux.vanilla/mm/mprotect.c 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/mm/mprotect.c 2003-06-29 16:09:24.000000000 +0100 @@ -2,6 +2,23 @@ * linux/mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include @@ -241,11 +258,28 @@ { pgprot_t newprot; int error; + unsigned long charged = 0; if (newflags == vma->vm_flags) { *pprev = vma; return 0; } + + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we + * make it unwritable again. + * + * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting + * a MAP_NORESERVE private mapping to writable will now reserve. + */ + if ((newflags & VM_WRITE) && + !(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) { + charged = (end - start) >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + newflags |= VM_ACCOUNT; + } newprot = protection_map[newflags & 0xf]; if (start == vma->vm_start) { if (end == vma->vm_end) @@ -256,10 +290,10 @@ error = mprotect_fixup_end(vma, pprev, start, newflags, newprot); else error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot); - - if (error) + if (error) { + vm_unacct_memory(charged); return error; - + } change_protection(start, end, newprot); return 0; } @@ -270,6 +304,8 @@ struct vm_area_struct * vma, * next, * prev; int error = -EINVAL; + vm_validate_enough("entering mprotect"); + if (start & ~PAGE_MASK) return -EINVAL; len = PAGE_ALIGN(len); @@ -333,5 +369,6 @@ } out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting mprotect"); return error; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/mremap.c linux.22-ac2/mm/mremap.c --- linux.vanilla/mm/mremap.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/mremap.c 2003-07-06 18:49:56.000000000 +0100 @@ -2,6 +2,23 @@ * linux/mm/remap.c * * (C) Copyright 1996 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -13,8 +30,6 @@ #include #include -extern int vm_enough_memory(long pages); - static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; @@ -130,6 +145,7 @@ struct vm_area_struct * new_vma, * next, * prev; int allocated_vma; + new_vma = NULL; next = find_vma_prev(mm, new_addr, &prev); if (next) { @@ -183,7 +199,7 @@ *new_vma = *vma; new_vma->vm_start = new_addr; new_vma->vm_end = new_addr+new_len; - new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT; + new_vma->vm_pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; new_vma->vm_raend = 0; if (new_vma->vm_file) get_file(new_vma->vm_file); @@ -191,9 +207,10 @@ new_vma->vm_ops->open(new_vma); insert_vm_struct(current->mm, new_vma); } - - do_munmap(current->mm, addr, old_len); - + + /* The old VMA has been accounted for, don't double account */ + do_munmap(current->mm, addr, old_len, 0); + current->mm->total_vm += new_len >> PAGE_SHIFT; if (vm_locked) { current->mm->locked_vm += new_len >> PAGE_SHIFT; @@ -222,6 +239,7 @@ { struct vm_area_struct *vma; unsigned long ret = -EINVAL; + unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -251,16 +269,17 @@ if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; - do_munmap(current->mm, new_addr, new_len); + do_munmap(current->mm, new_addr, new_len, 1); } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. + * do_munmap does all the needed commit accounting */ ret = addr; if (old_len >= new_len) { - do_munmap(current->mm, addr+new_len, old_len - new_len); + do_munmap(current->mm, addr+new_len, old_len - new_len, 1); if (!(flags & MREMAP_FIXED) || (new_addr == addr)) goto out; } @@ -290,11 +309,12 @@ if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) > current->rlim[RLIMIT_AS].rlim_cur) goto out; - /* Private writable mapping? Check memory availability.. */ - if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT)) - goto out; + + if (vma->vm_flags & VM_ACCOUNT) { + charged = (new_len - old_len) >> PAGE_SHIFT; + if(!vm_enough_memory(charged)) + goto out_nc; + } /* old_len exactly to the end of the area.. * And we're not relocating the area. @@ -318,6 +338,7 @@ addr + new_len); } ret = addr; + vm_validate_enough("mremap path1"); goto out; } } @@ -341,6 +362,12 @@ ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: + if(ret & ~PAGE_MASK) + { + vm_unacct_memory(charged); + vm_validate_enough("mremap error path"); + } +out_nc: return ret; } @@ -350,8 +377,10 @@ { unsigned long ret; + vm_validate_enough("entry to mremap"); down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exit from mremap"); return ret; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/oom_kill.c linux.22-ac2/mm/oom_kill.c --- linux.vanilla/mm/oom_kill.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/oom_kill.c 2003-07-14 12:55:14.000000000 +0100 @@ -86,7 +86,7 @@ * Niced processes are most likely less important, so double * their badness points. */ - if (p->nice > 0) + if (task_nice(p) > 0) points *= 2; /* @@ -150,7 +150,7 @@ * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ - p->counter = 5 * HZ; + p->time_slice = HZ; p->flags |= PF_MEMALLOC | PF_MEMDIE; /* This process has hardware access, be more careful. */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/shmem.c linux.22-ac2/mm/shmem.c --- linux.vanilla/mm/shmem.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/shmem.c 2003-07-06 18:58:02.000000000 +0100 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -376,10 +377,20 @@ { struct inode *inode = dentry->d_inode; struct page *page = NULL; + long change = 0; int error; - if (attr->ia_valid & ATTR_SIZE) { - if (attr->ia_size < inode->i_size) { + if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size <= SHMEM_MAX_BYTES)) { + /* + * Account swap file usage based on new file size, + * but just let vmtruncate fail on out-of-range sizes. + */ + change = VM_ACCT(attr->ia_size) - VM_ACCT(inode->i_size); + if (change > 0) { + if (!vm_enough_memory(change)) + return -ENOMEM; + } else if (attr->ia_size < inode->i_size) { + vm_unacct_memory(-change); /* * If truncating down to a partial page, then * if that page is already allocated, hold it @@ -400,6 +411,8 @@ error = inode_setattr(inode, attr); if (page) page_cache_release(page); + if (error) + vm_unacct_memory(change); return error; } @@ -412,6 +425,7 @@ spin_lock(&shmem_ilock); list_del(&info->list); spin_unlock(&shmem_ilock); + vm_unacct_memory(VM_ACCT(inode->i_size)); inode->i_size = 0; shmem_truncate(inode); } @@ -928,6 +942,7 @@ loff_t pos; unsigned long written; int err; + loff_t maxpos; if ((ssize_t) count < 0) return -EINVAL; @@ -944,6 +959,15 @@ if (err || !count) goto out; + maxpos = inode->i_size; + if (maxpos < pos + count) { + maxpos = pos + count; + if (!vm_enough_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size))) { + err = -ENOMEM; + goto out; + } + } + remove_suid(inode); inode->i_ctime = inode->i_mtime = CURRENT_TIME; @@ -952,12 +976,15 @@ unsigned long bytes, index, offset; char *kaddr; int left; + int deactivate = 1; offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ index = pos >> PAGE_CACHE_SHIFT; bytes = PAGE_CACHE_SIZE - offset; - if (bytes > count) + if (bytes > count) { bytes = count; + deactivate = 0; + } /* * We don't hold page lock across copy from user - @@ -982,6 +1009,12 @@ flush_dcache_page(page); SetPageDirty(page); SetPageReferenced(page); +#ifdef PG_inactive_dirty + if (deactivate) + deactivate_page(page); + else + mark_page_accessed(page); +#endif page_cache_release(page); if (left) { @@ -995,6 +1028,10 @@ *ppos = pos; if (written) err = written; + + /* Short writes give back address space */ + if (inode->i_size != maxpos) + vm_unacct_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size)); out: up(&inode->i_sem); return err; @@ -1293,8 +1330,13 @@ memcpy(info, symname, len); inode->i_op = &shmem_symlink_inline_operations; } else { + if (!vm_enough_memory(VM_ACCT(1))) { + iput(inode); + return -ENOMEM; + } error = shmem_getpage(inode, 0, &page, SGP_WRITE); if (error) { + vm_unacct_memory(VM_ACCT(1)); iput(inode); return error; } @@ -1611,7 +1653,6 @@ struct inode *inode; struct dentry *dentry, *root; struct qstr this; - int vm_enough_memory(long pages); if (IS_ERR(shm_mnt)) return (void *)shm_mnt; @@ -1622,13 +1663,14 @@ if (!vm_enough_memory(VM_ACCT(size))) return ERR_PTR(-ENOMEM); + error = -ENOMEM; this.name = name; this.len = strlen(name); this.hash = 0; /* will go */ root = shm_mnt->mnt_root; dentry = d_alloc(root, &this); if (!dentry) - return ERR_PTR(-ENOMEM); + goto put_memory; error = -ENFILE; file = get_empty_filp(); @@ -1653,6 +1695,8 @@ put_filp(file); put_dentry: dput(dentry); +put_memory: + vm_unacct_memory(VM_ACCT(size)); return ERR_PTR(error); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/slab.c linux.22-ac2/mm/slab.c --- linux.vanilla/mm/slab.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/mm/slab.c 2003-06-29 16:09:24.000000000 +0100 @@ -297,8 +297,9 @@ #define RED_MAGIC2 0x170FC2A5UL /* when obj is inactive */ /* ...and for poisoning */ -#define POISON_BYTE 0x5a /* byte value for poisoning */ -#define POISON_END 0xa5 /* end-byte of poisoning */ +#define POISON_BEFORE 0x5a /* for use-uninitialised poisoning */ +#define POISON_AFTER 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ #endif @@ -522,14 +523,15 @@ } #if DEBUG -static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr) +static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr, + unsigned char val) { int size = cachep->objsize; if (cachep->flags & SLAB_RED_ZONE) { addr += BYTES_PER_WORD; size -= 2*BYTES_PER_WORD; } - memset(addr, POISON_BYTE, size); + memset(addr, val, size); *(unsigned char *)(addr+size-1) = POISON_END; } @@ -1083,7 +1085,7 @@ objp -= BYTES_PER_WORD; if (cachep->flags & SLAB_POISON) /* need to poison the objs */ - kmem_poison_obj(cachep, objp); + kmem_poison_obj(cachep, objp, POISON_BEFORE); if (cachep->flags & SLAB_RED_ZONE) { if (*((unsigned long*)(objp)) != RED_MAGIC1) BUG(); @@ -1443,7 +1445,7 @@ BUG(); } if (cachep->flags & SLAB_POISON) - kmem_poison_obj(cachep, objp); + kmem_poison_obj(cachep, objp, POISON_AFTER); if (kmem_extra_free_checks(cachep, slabp, objp)) return; #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/swapfile.c linux.22-ac2/mm/swapfile.c --- linux.vanilla/mm/swapfile.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/swapfile.c 2003-07-09 13:35:27.000000000 +0100 @@ -722,23 +722,32 @@ struct swap_info_struct * p = NULL; unsigned short *swap_map; struct nameidata nd; + struct file *victim; + struct address_space *mapping; int i, type, prev; int err; + char *tmp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - err = user_path_walk(specialfile, &nd); - if (err) + tmp = getname(specialfile); + if(IS_ERR(tmp)) + return PTR_ERR(tmp); + + victim = filp_open(tmp, O_RDWR, 0); + err = PTR_ERR(victim); + if (IS_ERR(victim)) goto out; + mapping = victim->f_dentry->d_inode->i_mapping; lock_kernel(); prev = -1; swap_list_lock(); for (type = swap_list.head; type >= 0; type = swap_info[type].next) { p = swap_info + type; if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) { - if (p->swap_file == nd.dentry) + if (p->swap_file->d_inode->i_mapping==mapping) break; } prev = type; @@ -746,7 +755,7 @@ err = -EINVAL; if (type < 0) { swap_list_unlock(); - goto out_dput; + goto out_dput_no_nd; } if (prev < 0) { @@ -784,7 +793,6 @@ } if (p->swap_device) blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP); - path_release(&nd); swap_list_lock(); swap_device_lock(p); @@ -803,9 +811,12 @@ err = 0; out_dput: - unlock_kernel(); path_release(&nd); +out_dput_no_nd: + unlock_kernel(); + filp_close(victim, NULL); out: + putname(tmp); return err; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/mm/vmalloc.c linux.22-ac2/mm/vmalloc.c --- linux.vanilla/mm/vmalloc.c 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/mm/vmalloc.c 2003-06-29 16:09:24.000000000 +0100 @@ -374,3 +374,22 @@ read_unlock(&vmlist_lock); return buf - buf_start; } + +void *vcalloc(unsigned long nmemb, unsigned long elem_size) +{ + unsigned long size; + void *addr; + + /* + * Check that we're not going to overflow. + */ + if (nmemb > (ULONG_MAX / elem_size)) + return NULL; + + size = nmemb * elem_size; + addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + + return addr; +} diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/appletalk/ddp.c linux.22-ac2/net/appletalk/ddp.c --- linux.vanilla/net/appletalk/ddp.c 2001-09-10 15:57:00.000000000 +0100 +++ linux.22-ac2/net/appletalk/ddp.c 2003-08-29 12:34:21.000000000 +0100 @@ -978,6 +978,23 @@ if (copy_from_user(&rt, arg, sizeof(rt))) return -EFAULT; + + if(rt.rt_dev) + { + char *colon; + struct net_device *dev; + char devname[IFNAMSIZ]; + + if (copy_from_user(devname, rt.rt_dev, IFNAMSIZ-1)) + return -EFAULT; + devname[IFNAMSIZ-1] = 0; + colon = strchr(devname, ':'); + if (colon) + *colon = 0; + dev = __dev_get_by_name(devname); + if(dev == NULL) + return -ENODEV; + } switch (cmd) { case SIOCDELRT: @@ -987,13 +1004,6 @@ &rt.rt_dst)->sat_addr); case SIOCADDRT: - /* FIXME: the name of the device is still in user - * space, isn't it? */ - if (rt.rt_dev) { - dev = __dev_get_by_name(rt.rt_dev); - if (!dev) - return -ENODEV; - } return atrtr_create(&rt, dev); } return -EINVAL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/atm/common.c linux.22-ac2/net/atm/common.c --- linux.vanilla/net/atm/common.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/atm/common.c 2003-08-28 22:09:20.000000000 +0100 @@ -672,7 +672,8 @@ } if (try_atm_clip_ops()) { ret_val = atm_clip_ops->clip_create(arg); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); } else ret_val = -ENOSYS; goto done; @@ -687,7 +688,8 @@ #endif if (try_atm_clip_ops()) { error = atm_clip_ops->atm_init_atmarp(vcc); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); if (!error) sock->state = SS_CONNECTED; ret_val = error; @@ -701,7 +703,8 @@ } if (try_atm_clip_ops()) { ret_val = atm_clip_ops->clip_mkip(vcc, arg); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); } else ret_val = -ENOSYS; goto done; @@ -712,7 +715,8 @@ } if (try_atm_clip_ops()) { ret_val = atm_clip_ops->clip_setentry(vcc, arg); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); } else ret_val = -ENOSYS; goto done; @@ -723,7 +727,8 @@ } if (try_atm_clip_ops()) { ret_val = atm_clip_ops->clip_encap(vcc, arg); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); } else ret_val = -ENOSYS; goto done; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/atm/proc.c linux.22-ac2/net/atm/proc.c --- linux.vanilla/net/atm/proc.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/atm/proc.c 2003-08-28 22:09:20.000000000 +0100 @@ -358,7 +358,7 @@ spin_unlock_irqrestore(&dev->lock, flags); spin_unlock(&atm_dev_lock); #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) - if (clip_info) + if (clip_info && atm_clip_ops->owner) __MOD_DEC_USE_COUNT(atm_clip_ops->owner); #endif return strlen(buf); @@ -367,8 +367,8 @@ } spin_unlock(&atm_dev_lock); #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) - if (clip_info) - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (clip_info && atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); #endif return 0; } @@ -458,7 +458,8 @@ if (--count) continue; atmarp_info(n->dev,entry,NULL,buf); read_unlock_bh(&clip_tbl_hook->lock); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); return strlen(buf); } for (vcc = entry->vccs; vcc; @@ -466,12 +467,14 @@ if (--count) continue; atmarp_info(n->dev,entry,vcc,buf); read_unlock_bh(&clip_tbl_hook->lock); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); return strlen(buf); } } read_unlock_bh(&clip_tbl_hook->lock); - __MOD_DEC_USE_COUNT(atm_clip_ops->owner); + if (atm_clip_ops->owner) + __MOD_DEC_USE_COUNT(atm_clip_ops->owner); return 0; } #endif diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/bluetooth/bnep/core.c linux.22-ac2/net/bluetooth/bnep/core.c --- linux.vanilla/net/bluetooth/bnep/core.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/bluetooth/bnep/core.c 2003-07-22 18:34:25.000000000 +0100 @@ -460,8 +460,6 @@ sigfillset(¤t->blocked); flush_signals(current); - current->nice = -15; - set_fs(KERNEL_DS); init_waitqueue_entry(&wait, current); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/bluetooth/cmtp/core.c linux.22-ac2/net/bluetooth/cmtp/core.c --- linux.vanilla/net/bluetooth/cmtp/core.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/bluetooth/cmtp/core.c 2003-07-22 18:34:25.000000000 +0100 @@ -298,7 +298,7 @@ sigfillset(¤t->blocked); flush_signals(current); - current->nice = -15; +// current->nice = -15; set_fs(KERNEL_DS); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/bridge/br_forward.c linux.22-ac2/net/bridge/br_forward.c --- linux.vanilla/net/bridge/br_forward.c 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/net/bridge/br_forward.c 2003-09-09 18:59:18.000000000 +0100 @@ -59,6 +59,7 @@ indev = skb->dev; skb->dev = to->dev; + skb->ip_summed = CHECKSUM_NONE; NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, __br_forward_finish); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/Config.in linux.22-ac2/net/Config.in --- linux.vanilla/net/Config.in 2003-08-28 16:45:46.000000000 +0100 +++ linux.22-ac2/net/Config.in 2003-07-30 21:19:42.000000000 +0100 @@ -67,6 +67,7 @@ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25 + tristate 'EDP2 ATA Packet Layer (EXPERIMENTAL)' CONFIG_EDP2 tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/edp2/edp2.c linux.22-ac2/net/edp2/edp2.c --- linux.vanilla/net/edp2/edp2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/net/edp2/edp2.c 2003-06-29 16:09:50.000000000 +0100 @@ -0,0 +1,598 @@ +/* + * Copyright 2003 Red Hat Inc. All Rights Reserved. + * + * Initial experimental implementation of the EDP2 protocol layer + * + * The contents of this file are subject to the Open Software License + * version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is + * included herein by reference. + * + * Alternatively, the contents of this file may be used under the + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void edp2_dev_timeout(unsigned long data); +static struct edp2_device *edp_devices; +spinlock_t edp2_device_database = SPIN_LOCK_UNLOCKED; + +EXPORT_SYMBOL_GPL(edp2_device_database); + +/** + * edp2_dev_find - find EDP2 device + * @mac: MAC address + * + * Each EDP2 device has a unique MAC address. We key on this + * for searches. We don't key on interface because we want to + * handle multipathing eventually + * + * The caller must hold the edp2_device_database lock. + * + * Returns: + * device pointer, or NULL - not found + */ + +struct edp2_device *edp2_dev_find(u8 *mac) +{ + struct edp2_device *dev = edp_devices; + + while(dev != NULL) + { + if(memcmp(dev->mac, mac, 6) == 0) + return dev; + dev = dev->next; + } + return NULL; +} + +EXPORT_SYMBOL_GPL(edp2_dev_find); + +/** + * edp2_dev_alloc - Allocate EDP2 device + * @mac: MAC address + * + * Add a new EDP2 device to the database. The basic fields are + * filled in and the device is added. The caller must hold the + * edp2_device_database_lock + * + * Returns: + * device pointer or NULL - out of memory + */ + +struct edp2_device *edp2_dev_alloc(u8 *mac) +{ + struct edp2_device *dev = kmalloc(sizeof(struct edp2_device), GFP_ATOMIC); + if(dev == NULL) + return NULL; + memset(dev, 0, sizeof(struct edp2_device)); + memcpy(dev->mac, mac, 6); + init_timer(&dev->timer); + dev->timer.data = (unsigned long)dev; + dev->timer.function = edp2_dev_timeout; + dev->users = 0; + dev->next = edp_devices; + edp_devices = dev; + return dev; +} + +EXPORT_SYMBOL_GPL(edp2_dev_alloc); + +/** + * edp2_dev_free - Free an EDP2 device + * @dev: device to free + * + * Release an unused EDP2 device. This layer does not + * check usage counts, the caller must handle that. + * The caller must hold the edp2_device_database_lock + */ + +void edp2_dev_free(struct edp2_device *dev) +{ + struct edp2_device **p = &edp_devices; + + while(*p != NULL) + { + if(*p == dev) + { + *p = dev->next; + if(dev->dev) + dev_put(dev->dev); + kfree(dev); + return; + } + p = &((*p)->next); + } + BUG(); +} + +EXPORT_SYMBOL_GPL(edp2_dev_free); + +/** + * edp2_dev_recover - hunt for a lost device + * @dev: The EDP2 device which vanished + * + * Called when an EDP2 device that is in use vanishes. We + * will eventually use this to do discovery for it on other + * links in case it is multipathed + */ + +static void edp2_dev_recover(struct edp2_device *dev) +{ +} + +/** + * edp2_dev_flush - flush down devices + * @dev: network device that is down + * + * Mark all the devices that are unreachable as dead + * for the moment. They aren't neccessarily defunct + * as they may be multipathed and if so they will ident + * for us later. Unused devices are freed. + */ + +static void edp2_dev_flush(struct net_device *dev) +{ + struct edp2_device **p = &edp_devices; + + while(*p != NULL) + { + if((*p)->dev == dev) + { + struct edp2_device *d = *p; + d->dead = 1; + dev_put(d->dev); + d->dev = NULL; + if(d->users == 0) + { + *p = d->next; + kfree(d); + continue; + } + else /* Live disk vanished, hunt for it */ + edp2_dev_recover(d); + } + p = &((*p)->next); + } +} + +/** + * edp2_device_recover_complete - device returns + * @edp: edp device + * + * An EDP device marked as dead has been heard from. At the + * moment we do minimal housekeeping here but more is likely + * to be added + */ + +static void edp2_device_recover_complete(struct edp2_device *edp) +{ + edp->dead = 0; +} + +/* + * EDP2 protocol handlers + */ + +/** + * edp2_ident_reply - EDP ident protocol handler + * @edp: EDP2 header + * @skb: Buffer + * + * An EDP2 ident message has been sent to us. These occur when + * we probe and also when a device is activated. We use the ident + * replies to keep the EDP2 device database current. + */ + +static void edp2_ident_reply(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + /* Ident indicates volume found or maybe a new volume */ + struct edp2_ata_fid1 *ident; + + ident = (struct edp2_ata_fid1 *)skb_pull(skb, sizeof(struct edp2_ata_fid1)); + if(ident == NULL) + return; + + if(dev) + { + /* FIXME: can't handle count change on active device */ + dev->queue = ntohl(ident->count); + dev->revision = ntohl(ident->firmware); + dev->protocol = ntohs(ident->aoe)&15; + dev->shelf = ntohs(ident->shad); + dev->slot = ntohs(ident->slad); + dev->last_ident = jiffies; + } + else + { + dev = edp2_dev_alloc(skb->mac.raw+6); + if(dev != NULL) + { + dev_hold(skb->dev); + dev->dev = skb->dev; + dev->last_ident = jiffies; + dev->queue = ntohl(ident->count); + if(dev->queue > MAX_QUEUED) + dev->queue = MAX_QUEUED; + dev->revision = ntohl(ident->firmware); + dev->protocol = ntohs(ident->aoe)&15; + dev->shelf = ntohs(ident->shad); + dev->slot = ntohs(ident->slad); + /* Report the new device to the disk layer */ + /* edp2_disk_new(dev, skb); */ + } + } +} + +/** + * edp2_callback - EDP2 disk functions + * @dev: EDP2 device + * @edp: EDP2 header + * @skb: buffer + * + * An EDP2 response has arrived. There are several kinds + * of request and reply frames for our drivers. We use the + * tag to find the matching transmit and then the callback + * to tidy up. + */ + +static void edp2_callback(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + struct sk_buff *txskb; + struct edp2 *txedp; + struct edp2_cb *cb; + int slot = edp->tag & (MAX_QUEUED-1); + + /* Use the tag to find the tx frame */ + txskb = dev->skb[slot]; + if(txskb == NULL) + return; + /* Check the tag */ + txedp = (struct edp2 *)(skb->data+14); + if(txedp->tag != edp->tag) + return; + /* Clean up before callback in case we free the last slot */ + dev->skb[slot] = NULL; + dev->count--; + /* Ok it is our reply */ + cb = (struct edp2_cb *)txskb->cb; + cb->completion(dev, edp, cb->data, skb); + kfree_skb(txskb); + if(dev->count == 0) + del_timer(&dev->timer); +} + +/** + * edp2_rcv - EDP2 packet handler + * @skb: buffer + * @dev: device received on + * @pt: protocol + * + * Called whenever the networking layer receives an EDP2 type frame. + * We perform the basic validation for an EDP initiator and then + * hand the frame on for processing. + */ + +static int edp2_rcv(struct sk_buff *skb, struct net_device *netdev, struct packet_type *pt) +{ + struct edp2 *edp; + struct edp2_device *dev; + + if(skb->pkt_type == PACKET_OTHERHOST) + goto drop; + + edp = (struct edp2 *)skb_pull(skb, 6); + if(edp == NULL) + goto drop; + + /* We are an initiator */ + if(!(edp->flag_err & EDP_F_RESPONSE)) + goto drop; + + + spin_lock(&edp2_device_database); + dev = edp2_dev_find(skb->mac.raw + 6); + if(dev != NULL) + { + if(dev->dead) + edp2_device_recover_complete(dev); + if(dev->dev != skb->dev) + { + /* A volume moved interface */ + dev_put(dev->dev); + dev_hold(skb->dev); + dev->dev = skb->dev; + } + } + switch(edp->function) + { + case 0: /* EDP ATA */ + case 3: /* Claim reply */ + edp2_callback(dev, edp, skb); + break; + case 1: /* EDP ident reply or beacon */ + edp2_ident_reply(dev, edp, skb); + break; + case 2: /* Statistics reply */ + default:; + /* Unknown type */ + } + spin_unlock(&edp2_device_database); +drop: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * edp2_resend_frame - edp2 retry helper + * @dev: device to talk to + * @slot: slot to resend + * + * Retransmit an EDP packet. This is also called to kick off + * the initial transmission event. Caller must hold the edp2 + * protocol lock. + */ + +static void edp2_resend_frame(struct edp2_device *dev, int slot) +{ + struct sk_buff *skb; + if(dev->dead) + return; + skb = skb_clone(dev->skb[slot], GFP_ATOMIC); + if(skb == NULL) + return; + /* Fill in sending interface */ + memcpy(skb->data, dev->dev->dev_addr, 6); + /* Fire at will */ + skb->dev = dev->dev; + skb->nh.raw = skb->data + 14; + skb->mac.raw = skb->data; + skb->protocol = htons(ETH_P_EDP2); + dev_queue_xmit(skb); +} + +/** + * edp2_resend_timeout - EDP2 failed + * @dev: Device that timed out + * @slot: Slot ID of timed out frame + * + * Clean up after a timeout failure. We call the callback wth + * NULL replies to indicate a failure. Possibly we should have two + * callback functions ? + */ + +static void edp2_resend_timeout(struct edp2_device *dev, int slot) +{ + struct sk_buff *skb = dev->skb[slot]; + struct edp2_cb *cb; + dev->skb[slot] = NULL; + cb = (struct edp2_cb *)skb->cb; + cb->completion(dev, NULL, cb->data, NULL); + kfree_skb(skb); +} + +/** + * edp2_dev_timeout - EDP2 timer event + * @data: The EDP2 device that timed out + * + * Walk the EDP2 list for this device and handle retransmits. + * The current slot array is just get it going, we need a real + * linked chain to walk for this and other purposes. + * + * FIXME: locking + */ + +static void edp2_dev_timeout(unsigned long data) +{ + struct edp2_device *dev = (struct edp2_device *)data; + struct edp2_cb *cb; + unsigned long next = jiffies + 1000*HZ; + int found = 0; + int i; + + for(i=0; i < MAX_QUEUED; i++) + { + if(dev->skb[i]) + { + cb = (struct edp2_cb *)(dev->skb[i]->cb); + if(time_before(dev->skb[i], cb->timeout)) + { + /* Need to do better adaption ! */ + cb->timeout = jiffies + (1<count)*HZ; + cb->count++; + if(cb->count == 10) + edp2_resend_timeout(dev, i); + else + { + found=1; + edp2_resend_frame(dev, i); + } + } + else + { + found = 1; + if(time_before(cb->timeout, next)) + next = cb->timeout; + } + } + } + if(!found) + return; + mod_timer(&dev->timer, next); +} + +/** + * edp2_queue_xmit - send an EDP2 frame + * @dev: device to talk to + * @edp: edp header in packet + * @skb: packet to send + * + * Send a prepared EDP frame out to a device. The EDP frame + * has its timers set and is queued for retransmit in case + * of problems. We also use the low tag bits ourselves. + * + * The caller has completed the edp2_cb control block and + * must hold the edp2 protocol lock. + */ + +int edp2_queue_xmit(struct edp2_device *dev, struct edp2 *edp, struct sk_buff *skb) +{ + int next ; + edp->tag &= ~0xFF; /* Low byte is reserved for protocol muncher */ + + /* FIXME: better algorithm given the fact completion is + generally ordered */ + for(next = 0; next < MAX_QUEUED; next++) + if(dev->skb[next] == NULL) + break; + if(dev->skb[next]) + BUG(); + dev->skb[next] = skb; + edp->tag |= next; /* So we can find the skb fast */ + dev->count++; + /* FIXME: need to know if the timer is live before adjusting + but must do this race free of the expire path */ + mod_timer(&dev->timer, jiffies + 2 * HZ); + edp2_resend_frame(dev, next); + return next; +} + +EXPORT_SYMBOL_GPL(edp2_queue_xmit); + +/** + * edp2_alloc_skb - alloc an EDP2 frame + * @dev: Device we will talk to + * @len: length of frame (in full) + * @function: EDP function + * @callback: Callback handler on completion/error + * @data: data for callback + * @timeout: retransmit timeout + * + * Allocate and build the basis of an EDP2 frame + * We hand up a frame with everything built below the + * application layer. + * + * The caller must hold the edp2 protocol lock + */ + +struct sk_buff *edp2_alloc_skb(struct edp2_device *dev, int len, int function, + int (*callback)(struct edp2_device *, struct edp2 *, unsigned long, struct sk_buff *), + unsigned long data, unsigned long timeout) +{ + struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); + struct edp2_cb *cb = (struct edp2_cb *)skb->cb; + struct edp2 *edp; + u8 *hdr; + if(skb == NULL) + return NULL; + + /* MAC header */ + hdr = skb_put(skb, 14); + memcpy(hdr+6, dev->mac, 6); + hdr[12] = 0x88; + hdr[13] = 0xA2; + + edp = (struct edp2 *)skb_put(skb, sizeof(struct edp2)); + edp->flag_err = 0; + edp->function = function; + edp->tag = dev->tag << 8; /* Kernel tags are 0:devtag:slot */ + dev->tag++; /* Wraps at 65536 */ + + /* So we can handle the completion path nicely */ + cb->completion = callback; + cb->data = data; + cb->timeout = timeout; + cb->count = 0; + + return skb; +} + +EXPORT_SYMBOL_GPL(edp2_alloc_skb); + +/** + * edp2_device_event - device change notifier + * @this: event block + * @event: event id + * @ptr: data for event + * + * Listen to the network layer events and watch for devices going + * down. We need to release any devices that vanish so that the + * refcount drops and the device can be flushed. + */ + +static int edp2_device_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + unsigned long flags; + if(event == NETDEV_DOWN) + { + spin_lock_irqsave(&edp2_device_database, flags); + edp2_dev_flush(ptr); + spin_unlock_irqrestore(&edp2_device_database, flags); + } + return NOTIFY_DONE; +} + +static struct notifier_block edp2_notifier = { + notifier_call: edp2_device_event +}; + +static struct packet_type edp2_dix_packet_type = { + type: __constant_htons(ETH_P_EDP2), + func: edp2_rcv, + data: (void *) 1, /* Shared skb is ok */ +}; + +/** + * edp2_proto_init - set up the EDP2 protocol handlers + * + * This function sets up the protocol layer for EDP2. It assumes + * that the caller is ready to receive packets and events so + * requires the EDP block layer data structures are initialised. + */ + +static __init int edp2_proto_init(void) +{ + printk(KERN_INFO "Coraid EDPv2 Protocol Layer v0.01\n"); + printk(KERN_INFO " (c) Copyright 2003 Red Hat.\n"); + dev_add_pack(&edp2_dix_packet_type); + register_netdevice_notifier(&edp2_notifier); + return 0; +} + +/** + * edp2_proto_exit - cease listening to networking for EDP2 + * + * Disconnect EDP2 from the network layer and cease listening to + * any device events + */ + +static void __exit edp2_proto_exit(void) +{ + /* FIXME: module locking, device flushing */ + unregister_netdevice_notifier(&edp2_notifier); + dev_remove_pack(&edp2_dix_packet_type); +} + +module_init(edp2_proto_init); +module_exit(edp2_proto_exit); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/edp2/Makefile linux.22-ac2/net/edp2/Makefile --- linux.vanilla/net/edp2/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/net/edp2/Makefile 2003-06-29 16:09:50.000000000 +0100 @@ -0,0 +1,15 @@ +# +# Makefile for the Linux EDP2 layer. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +export-objs = edp2.o + +obj-$(CONFIG_EDP2) := edp2.o + +include $(TOPDIR)/Rules.make + diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/ipv4/netfilter/ip_nat_tftp.c linux.22-ac2/net/ipv4/netfilter/ip_nat_tftp.c --- linux.vanilla/net/ipv4/netfilter/ip_nat_tftp.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/net/ipv4/netfilter/ip_nat_tftp.c 2003-08-13 14:54:49.000000000 +0100 @@ -153,7 +153,7 @@ static int __init init(void) { - int i, ret; + int i, ret = 0; char *tmpname; if (!ports[0]) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/ipv4/netfilter/ipt_ah.c linux.22-ac2/net/ipv4/netfilter/ipt_ah.c --- linux.vanilla/net/ipv4/netfilter/ipt_ah.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/ipv4/netfilter/ipt_ah.c 2003-08-13 14:53:44.000000000 +0100 @@ -15,7 +15,11 @@ #endif struct ahhdr { + __u8 nexthdr; + __u8 hdrlen; + __u16 reserved; __u32 spi; + __u32 seq_no; }; /* Returns 1 if the spi is matched by the range, 0 otherwise */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/ipv4/netfilter/ipt_esp.c linux.22-ac2/net/ipv4/netfilter/ipt_esp.c --- linux.vanilla/net/ipv4/netfilter/ipt_esp.c 2002-02-25 19:38:14.000000000 +0000 +++ linux.22-ac2/net/ipv4/netfilter/ipt_esp.c 2003-08-13 14:53:44.000000000 +0100 @@ -16,6 +16,7 @@ struct esphdr { __u32 spi; + __u32 seq_no; }; /* Returns 1 if the spi is matched by the range, 0 otherwise */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/ipv4/netfilter/ipt_LOG.c linux.22-ac2/net/ipv4/netfilter/ipt_LOG.c --- linux.vanilla/net/ipv4/netfilter/ipt_LOG.c 2002-02-25 19:38:14.000000000 +0000 +++ linux.22-ac2/net/ipv4/netfilter/ipt_LOG.c 2003-08-13 14:53:44.000000000 +0100 @@ -3,15 +3,14 @@ */ #include #include -#include #include +#include #include #include #include -#include - -struct in_device; #include + +#include #include #if 0 @@ -20,10 +19,20 @@ #define DEBUGP(format, args...) #endif +/* FIXME: move to ip.h like in 2.5 */ +struct ahhdr { + __u8 nexthdr; + __u8 hdrlen; + __u16 reserved; + __u32 spi; + __u32 seq_no; +}; + struct esphdr { __u32 spi; -}; /* FIXME evil kludge */ - + __u32 seq_no; +}; + /* Use lock to serialize, so printks don't overlap */ static spinlock_t log_lock = SPIN_LOCK_UNLOCKED; @@ -58,7 +67,8 @@ printk("FRAG:%u ", ntohs(iph->frag_off) & IP_OFFSET); if ((info->logflags & IPT_LOG_IPOPT) - && iph->ihl * 4 != sizeof(struct iphdr)) { + && iph->ihl * 4 != sizeof(struct iphdr) + && iph->ihl * 4 >= datalen) { unsigned int i; /* Max length: 127 "OPT (" 15*4*2chars ") " */ @@ -230,13 +240,30 @@ break; } /* Max Length */ - case IPPROTO_AH: + case IPPROTO_AH: { + struct ahhdr *ah = protoh; + + /* Max length: 9 "PROTO=AH " */ + printk("PROTO=AH "); + + if (ntohs(iph->frag_off) & IP_OFFSET) + break; + + /* Max length: 25 "INCOMPLETE [65535 bytes] " */ + if (datalen < sizeof (*ah)) { + printk("INCOMPLETE [%u bytes] ", datalen); + break; + } + + /* Length: 15 "SPI=0xF1234567 " */ + printk("SPI=0x%x ", ntohl(ah->spi) ); + break; + } case IPPROTO_ESP: { struct esphdr *esph = protoh; - int esp= (iph->protocol==IPPROTO_ESP); /* Max length: 10 "PROTO=ESP " */ - printk("PROTO=%s ",esp? "ESP" : "AH"); + printk("PROTO=ESP "); if (ntohs(iph->frag_off) & IP_OFFSET) break; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/af_irda.c linux.22-ac2/net/irda/af_irda.c --- linux.vanilla/net/irda/af_irda.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/irda/af_irda.c 2003-08-28 22:35:10.000000000 +0100 @@ -98,7 +98,7 @@ struct sock *sk; int err; - IRDA_DEBUG(3, __FUNCTION__ "()\n"); + IRDA_DEBUG(3, "%s()\n", __FUNCTION__); self = (struct irda_sock *) instance; ASSERT(self != NULL, return -1;); @@ -108,7 +108,7 @@ err = sock_queue_rcv_skb(sk, skb); if (err) { - IRDA_DEBUG(1, __FUNCTION__ "(), error: no more mem!\n"); + IRDA_DEBUG(1, "%s(), error: no more mem!\n", __FUNCTION__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ @@ -132,7 +132,7 @@ self = (struct irda_sock *) instance; - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); /* Don't care about it, but let's not leak it */ if(skb) @@ -194,7 +194,7 @@ self = (struct irda_sock *) instance; - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); sk = self->sk; if (sk == NULL) @@ -210,14 +210,14 @@ switch (sk->type) { case SOCK_STREAM: if (max_sdu_size != 0) { - ERROR(__FUNCTION__ "(), max_sdu_size must be 0\n"); + ERROR("%s(), max_sdu_size must be 0\n", __FUNCTION__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - ERROR(__FUNCTION__ "(), max_sdu_size cannot be 0\n"); + ERROR("%s(), max_sdu_size cannot be 0\n", __FUNCTION__); return; } self->max_data_size = max_sdu_size; @@ -226,7 +226,7 @@ self->max_data_size = irttp_get_max_seg_size(self->tsap); }; - IRDA_DEBUG(2, __FUNCTION__ "(), max_data_size=%d\n", + IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -253,7 +253,7 @@ self = (struct irda_sock *) instance; - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); sk = self->sk; if (sk == NULL) @@ -269,14 +269,14 @@ switch (sk->type) { case SOCK_STREAM: if (max_sdu_size != 0) { - ERROR(__FUNCTION__ "(), max_sdu_size must be 0\n"); + ERROR("%s(), max_sdu_size must be 0\n", __FUNCTION__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - ERROR(__FUNCTION__ "(), max_sdu_size cannot be 0\n"); + ERROR("%s(), max_sdu_size cannot be 0\n", __FUNCTION__); return; } self->max_data_size = max_sdu_size; @@ -285,7 +285,7 @@ self->max_data_size = irttp_get_max_seg_size(self->tsap); }; - IRDA_DEBUG(2, __FUNCTION__ "(), max_data_size=%d\n", + IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -304,13 +304,13 @@ { struct sk_buff *skb; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); skb = dev_alloc_skb(64); if (skb == NULL) { - IRDA_DEBUG(0, __FUNCTION__ "() Unable to allocate sk_buff!\n"); + IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", __FUNCTION__); return; } @@ -331,7 +331,7 @@ struct irda_sock *self; struct sock *sk; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = (struct irda_sock *) instance; ASSERT(self != NULL, return;); @@ -341,17 +341,17 @@ switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, __FUNCTION__ "(), IrTTP wants us to slow down\n"); + IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", __FUNCTION__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; - IRDA_DEBUG(1, __FUNCTION__ - "(), IrTTP wants us to start again\n"); + IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", + __FUNCTION__); wake_up_interruptible(sk->sleep); break; default: - IRDA_DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n"); + IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __FUNCTION__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; @@ -373,11 +373,11 @@ self = (struct irda_sock *) priv; if (!self) { - WARNING(__FUNCTION__ "(), lost myself!\n"); + WARNING("%s(), lost myself!\n", __FUNCTION__); return; } - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); /* We probably don't need to make any more queries */ iriap_close(self->iriap); @@ -385,7 +385,7 @@ /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(1, __FUNCTION__ "(), IAS query failed! (%d)\n", + IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __FUNCTION__, result); self->errno = result; /* We really need it later */ @@ -419,11 +419,11 @@ { struct irda_sock *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = (struct irda_sock *) priv; if (!self) { - WARNING(__FUNCTION__ "(), lost myself!\n"); + WARNING("%s(), lost myself!\n", __FUNCTION__); return; } @@ -456,7 +456,7 @@ { struct irda_sock *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = (struct irda_sock *) priv; ASSERT(self != NULL, return;); @@ -481,7 +481,7 @@ notify_t notify; if (self->tsap) { - WARNING(__FUNCTION__ "(), busy!\n"); + WARNING("%s(), busy!\n", __FUNCTION__); return -EBUSY; } @@ -499,7 +499,7 @@ self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, ¬ify); if (self->tsap == NULL) { - IRDA_DEBUG( 0, __FUNCTION__ "(), Unable to allocate TSAP!\n"); + IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", __FUNCTION__); return -ENOMEM; } /* Remember which TSAP selector we actually got */ @@ -520,7 +520,7 @@ notify_t notify; if (self->lsap) { - WARNING(__FUNCTION__ "(), busy!\n"); + WARNING("%s(), busy!\n", __FUNCTION__); return -EBUSY; } @@ -532,7 +532,7 @@ self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); if (self->lsap == NULL) { - IRDA_DEBUG( 0, __FUNCTION__ "(), Unable to allocate LSAP!\n"); + IRDA_DEBUG(0, "%s(), Unable to allocate LSAP!\n", __FUNCTION__); return -ENOMEM; } @@ -553,12 +553,12 @@ */ static int irda_find_lsap_sel(struct irda_sock *self, char *name) { - IRDA_DEBUG(2, __FUNCTION__ "(%p, %s)\n", self, name); + IRDA_DEBUG(2, "%s(%p, %s)\n", __FUNCTION__, self, name); ASSERT(self != NULL, return -1;); if (self->iriap) { - WARNING(__FUNCTION__ "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", __FUNCTION__); return -EBUSY; } @@ -591,7 +591,7 @@ /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: - IRDA_DEBUG(4, __FUNCTION__ "() int=%d\n", + IRDA_DEBUG(4, "%s() int=%d\n", __FUNCTION__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) @@ -601,7 +601,7 @@ break; default: self->dtsap_sel = 0; - IRDA_DEBUG(0, __FUNCTION__ "(), bad type!\n"); + IRDA_DEBUG(0, "%s(), bad type!\n", __FUNCTION__); break; } if (self->ias_result) @@ -639,7 +639,7 @@ __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ - IRDA_DEBUG(2, __FUNCTION__ "(), name=%s\n", name); + IRDA_DEBUG(2, "%s(), name=%s\n", __FUNCTION__, name); ASSERT(self != NULL, return -1;); @@ -661,7 +661,7 @@ /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; - IRDA_DEBUG(1, __FUNCTION__ "(), trying daddr = %08x\n", + IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", __FUNCTION__, self->daddr); /* Query remote LM-IAS for this service */ @@ -670,9 +670,9 @@ case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { - IRDA_DEBUG(1, __FUNCTION__ - "(), discovered service ''%s'' in two different devices !!!\n", - name); + IRDA_DEBUG(1, "%s(), discovered service ''%s'' " + "in two different devices !!!\n", + __FUNCTION__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return(-ENOTUNIQ); @@ -686,8 +686,8 @@ break; default: /* Something bad did happen :-( */ - IRDA_DEBUG(0, __FUNCTION__ - "(), unexpected IAS query failure\n"); + IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", + __FUNCTION__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return(-EHOSTUNREACH); @@ -699,9 +699,8 @@ /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { - IRDA_DEBUG(1, __FUNCTION__ - "(), cannot discover service ''%s'' in any device !!!\n", - name); + IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any " + "device !!!\n", __FUNCTION__, name); self->daddr = DEV_ADDR_ANY; return(-EADDRNOTAVAIL); } @@ -711,9 +710,8 @@ self->saddr = 0x0; self->dtsap_sel = dtsap_sel; - IRDA_DEBUG(1, __FUNCTION__ - "(), discovered requested service ''%s'' at address %08x\n", - name, self->daddr); + IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at " + "address %08x\n", __FUNCTION__, name, self->daddr); return 0; } @@ -744,8 +742,8 @@ saddr.sir_addr = self->saddr; } - IRDA_DEBUG(1, __FUNCTION__ "(), tsap_sel = %#x\n", saddr.sir_lsap_sel); - IRDA_DEBUG(1, __FUNCTION__ "(), addr = %08x\n", saddr.sir_addr); + IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __FUNCTION__, saddr.sir_lsap_sel); + IRDA_DEBUG(1, "%s(), addr = %08x\n", __FUNCTION__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); @@ -764,7 +762,7 @@ { struct sock *sk = sock->sk; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) && (sk->type != SOCK_DGRAM)) @@ -796,7 +794,7 @@ self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; @@ -806,8 +804,8 @@ if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA)) { self->pid = addr->sir_lsap_sel; if (self->pid & 0x80) { - IRDA_DEBUG(0, __FUNCTION__ - "(), extension in PID not supp!\n"); + IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", + __FUNCTION__); return -EOPNOTSUPP; } err = irda_open_lsap(self, self->pid); @@ -852,7 +850,7 @@ struct sk_buff *skb; int err; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); @@ -898,7 +896,7 @@ /* Now attach up the new socket */ new->tsap = irttp_dup(self->tsap, new); if (!new->tsap) { - IRDA_DEBUG(0, __FUNCTION__ "(), dup failed!\n"); + IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); return -1; } @@ -959,7 +957,7 @@ self = sk->protinfo.irda; - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); /* Don't allow connect for Ultra sockets */ if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA)) @@ -989,19 +987,19 @@ /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, __FUNCTION__ - "(), auto-connect failed!\n"); + IRDA_DEBUG(0, "%s(), auto-connect failed!\n", + __FUNCTION__); return err; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; - IRDA_DEBUG(1, __FUNCTION__ "(), daddr = %08x\n", self->daddr); + IRDA_DEBUG(1, "%s(), daddr = %08x\n", __FUNCTION__, self->daddr); /* Query remote LM-IAS */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, __FUNCTION__ "(), connect failed!\n"); + IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); return err; } } @@ -1019,7 +1017,7 @@ self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { - IRDA_DEBUG(0, __FUNCTION__ "(), connect failed!\n"); + IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); return err; } @@ -1064,7 +1062,7 @@ struct sock *sk; struct irda_sock *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Check for valid socket type */ switch (sock->type) { @@ -1088,7 +1086,7 @@ } memset(self, 0, sizeof(struct irda_sock)); - IRDA_DEBUG(2, __FUNCTION__ "() : self is %p\n", self); + IRDA_DEBUG(2, "%s() : self is %p\n", __FUNCTION__, self); init_waitqueue_head(&self->query_wait); @@ -1122,7 +1120,7 @@ self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; default: - ERROR(__FUNCTION__ "(), protocol not supported!\n"); + ERROR("%s(), protocol not supported!\n", __FUNCTION__); return -ESOCKTNOSUPPORT; } break; @@ -1151,7 +1149,7 @@ */ void irda_destroy_socket(struct irda_sock *self) { - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); ASSERT(self != NULL, return;); @@ -1197,7 +1195,7 @@ { struct sock *sk = sock->sk; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (sk == NULL) return 0; @@ -1266,7 +1264,7 @@ unsigned char *asmptr; int err; - IRDA_DEBUG(4, __FUNCTION__ "(), len=%d\n", len); + IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR)) @@ -1285,7 +1283,7 @@ /* Check if IrTTP is wants us to slow down */ while (self->tx_flow == FLOW_STOP) { - IRDA_DEBUG(2, __FUNCTION__ "(), IrTTP is busy, going to sleep!\n"); + IRDA_DEBUG(2, "%s(), IrTTP is busy, going to sleep!\n", __FUNCTION__); interruptible_sleep_on(sk->sleep); /* Check if we are still connected */ @@ -1298,9 +1296,8 @@ /* Check that we don't send out to big frames */ if (len > self->max_data_size) { - IRDA_DEBUG(2, __FUNCTION__ - "(), Chopping frame from %d to %d bytes!\n", len, - self->max_data_size); + IRDA_DEBUG(2, "%s(), Chopping frame from %d to %d bytes!\n", + __FUNCTION__, len, self->max_data_size); len = self->max_data_size; } @@ -1320,7 +1317,7 @@ */ err = irttp_data_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err); + IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); return err; } /* Tell client how much data we actually sent */ @@ -1341,7 +1338,7 @@ struct sk_buff *skb; int copied, err; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); @@ -1355,9 +1352,8 @@ copied = skb->len; if (copied > size) { - IRDA_DEBUG(2, __FUNCTION__ - "(), Received truncated frame (%d < %d)!\n", - copied, size); + IRDA_DEBUG(2, "%s(), Received truncated frame (%d < %d)!\n", + __FUNCTION__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } @@ -1373,7 +1369,7 @@ */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) { - IRDA_DEBUG(2, __FUNCTION__ "(), Starting IrTTP\n"); + IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1412,7 +1408,7 @@ int copied = 0; int target = 1; - IRDA_DEBUG(3, __FUNCTION__ "()\n"); + IRDA_DEBUG(3, "%s()\n", __FUNCTION__); self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); @@ -1472,14 +1468,14 @@ /* put the skb back if we didn't use it up.. */ if (skb->len) { - IRDA_DEBUG(1, __FUNCTION__ "(), back on q!\n"); + IRDA_DEBUG(1, "%s(), back on q!\n", __FUNCTION__); skb_queue_head(&sk->receive_queue, skb); break; } kfree_skb(skb); } else { - IRDA_DEBUG(0, __FUNCTION__ "() questionable!?\n"); + IRDA_DEBUG(0, "%s() questionable!?\n", __FUNCTION__); /* put message back and return */ skb_queue_head(&sk->receive_queue, skb); @@ -1495,7 +1491,7 @@ */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) { - IRDA_DEBUG(2, __FUNCTION__ "(), Starting IrTTP\n"); + IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1520,7 +1516,7 @@ unsigned char *asmptr; int err; - IRDA_DEBUG(4, __FUNCTION__ "(), len=%d\n", len); + IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); if (msg->msg_flags & ~MSG_DONTWAIT) return -EINVAL; @@ -1541,9 +1537,8 @@ * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, __FUNCTION__ "(), Warning to much data! " - "Chopping frame from %d to %d bytes!\n", len, - self->max_data_size); + IRDA_DEBUG(0, "%s(), Warning to much data! Chopping frame from " + "%d to %d bytes!\n", __FUNCTION__, len, self->max_data_size); len = self->max_data_size; } @@ -1554,7 +1549,7 @@ skb_reserve(skb, self->max_header_size); - IRDA_DEBUG(4, __FUNCTION__ "(), appending user data\n"); + IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); asmptr = skb->h.raw = skb_put(skb, len); memcpy_fromiovec(asmptr, msg->msg_iov, len); @@ -1564,7 +1559,7 @@ */ err = irttp_udata_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err); + IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); return err; } return len; @@ -1586,7 +1581,7 @@ unsigned char *asmptr; int err; - IRDA_DEBUG(4, __FUNCTION__ "(), len=%d\n", len); + IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); if (msg->msg_flags & ~MSG_DONTWAIT) return -EINVAL; @@ -1604,9 +1599,8 @@ * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, __FUNCTION__ "(), Warning to much data! " - "Chopping frame from %d to %d bytes!\n", len, - self->max_data_size); + IRDA_DEBUG(0, "%s(), Warning to much data! Chopping frame from " + "%d to %d bytes!\n", __FUNCTION__, len, self->max_data_size); len = self->max_data_size; } @@ -1617,13 +1611,13 @@ skb_reserve(skb, self->max_header_size); - IRDA_DEBUG(4, __FUNCTION__ "(), appending user data\n"); + IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); asmptr = skb->h.raw = skb_put(skb, len); memcpy_fromiovec(asmptr, msg->msg_iov, len); err = irlmp_connless_data_request(self->lsap, skb); if (err) { - IRDA_DEBUG(0, __FUNCTION__ "(), err=%d\n", err); + IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); return err; } return len; @@ -1644,7 +1638,7 @@ self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); - IRDA_DEBUG(1, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self); sk->state = TCP_CLOSE; sk->shutdown |= SEND_SHUTDOWN; @@ -1682,7 +1676,7 @@ unsigned int mask; struct irda_sock *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = sk->protinfo.irda; poll_wait(file, sk->sleep, wait); @@ -1692,7 +1686,7 @@ if (sk->err) mask |= POLLERR; if (sk->shutdown & RCV_SHUTDOWN) { - IRDA_DEBUG(0, __FUNCTION__ "(), POLLHUP\n"); + IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); mask |= POLLHUP; } @@ -1706,7 +1700,7 @@ switch (sk->type) { case SOCK_STREAM: if (sk->state == TCP_CLOSE) { - IRDA_DEBUG(0, __FUNCTION__ "(), POLLHUP\n"); + IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); mask |= POLLHUP; } @@ -1745,7 +1739,7 @@ { struct sock *sk = sock->sk; - IRDA_DEBUG(4, __FUNCTION__ "(), cmd=%#x\n", cmd); + IRDA_DEBUG(4, "%s(), cmd=%#x\n", __FUNCTION__, cmd); switch (cmd) { case TIOCOUTQ: { @@ -1792,7 +1786,7 @@ case SIOCSIFMETRIC: return -EINVAL; default: - IRDA_DEBUG(1, __FUNCTION__ "(), doing device ioctl!\n"); + IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __FUNCTION__); return dev_ioctl(cmd, (void *) arg); } @@ -1819,7 +1813,7 @@ self = sk->protinfo.irda; ASSERT(self != NULL, return -1;); - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -1981,8 +1975,8 @@ /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { - IRDA_DEBUG(1, __FUNCTION__ - "(), attempting to delete a kernel attribute\n"); + IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", + __FUNCTION__); kfree(ias_opt); return -EPERM; } @@ -2000,13 +1994,11 @@ /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->type != SOCK_SEQPACKET) { - IRDA_DEBUG(2, __FUNCTION__ - "(), setting max_sdu_size = %d\n", opt); + IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", __FUNCTION__, opt); self->max_sdu_size_rx = opt; } else { - WARNING(__FUNCTION__ - "(), not allowed to set MAXSDUSIZE for this " - "socket type!\n"); + WARNING("%s(), not allowed to set MAXSDUSIZE for this " + "socket type!\n", __FUNCTION__); return -ENOPROTOOPT; } break; @@ -2123,7 +2115,7 @@ self = sk->protinfo.irda; - IRDA_DEBUG(2, __FUNCTION__ "(%p)\n", self); + IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -2283,8 +2275,7 @@ /* Check that we can proceed with IAP */ if (self->iriap) { - WARNING(__FUNCTION__ - "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", __FUNCTION__); kfree(ias_opt); return -EBUSY; } @@ -2365,8 +2356,7 @@ /* Wait until a node is discovered */ if (!self->cachediscovery) { - IRDA_DEBUG(1, __FUNCTION__ - "(), nothing discovered yet, going to sleep...\n"); + IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __FUNCTION__); /* Set watchdog timer to expire in ms. */ self->watchdog.function = irda_discovery_timeout; @@ -2381,12 +2371,10 @@ if(timer_pending(&(self->watchdog))) del_timer(&(self->watchdog)); - IRDA_DEBUG(1, __FUNCTION__ - "(), ...waking up !\n"); + IRDA_DEBUG(1, "%s(), ...waking up !\n", __FUNCTION__); } else - IRDA_DEBUG(1, __FUNCTION__ - "(), found immediately !\n"); + IRDA_DEBUG(1, "%s(), found immediately !\n", __FUNCTION__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask, NULL, NULL, NULL); @@ -2527,11 +2515,11 @@ switch (event) { case NETDEV_UP: - IRDA_DEBUG(3, __FUNCTION__ "(), NETDEV_UP\n"); + IRDA_DEBUG(3, "%s(), NETDEV_UP\n", __FUNCTION__); /* irda_dev_device_up(dev); */ break; case NETDEV_DOWN: - IRDA_DEBUG(3, __FUNCTION__ "(), NETDEV_DOWN\n"); + IRDA_DEBUG(3, "%s(), NETDEV_DOWN\n", __FUNCTION__); /* irda_kill_by_device(dev); */ /* irda_rt_device_down(dev); */ /* irda_dev_device_down(dev); */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_core.c linux.22-ac2/net/irda/ircomm/ircomm_core.c --- linux.vanilla/net/irda/ircomm/ircomm_core.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/net/irda/ircomm/ircomm_core.c 2003-08-28 22:35:37.000000000 +0100 @@ -63,7 +63,7 @@ { ircomm = hashbin_new(HB_LOCAL); if (ircomm == NULL) { - ERROR(__FUNCTION__ "(), can't allocate hashbin!\n"); + ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); return -ENOMEM; } @@ -79,7 +79,7 @@ #ifdef MODULE void ircomm_cleanup(void) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); @@ -100,7 +100,7 @@ struct ircomm_cb *self = NULL; int ret; - IRDA_DEBUG(2, __FUNCTION__ "(), service_type=0x%02x\n", + IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __FUNCTION__, service_type); ASSERT(ircomm != NULL, return NULL;); @@ -144,7 +144,7 @@ */ static int __ircomm_close(struct ircomm_cb *self) { - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Disconnect link if any */ ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); @@ -180,7 +180,7 @@ ASSERT(self != NULL, return -EIO;); ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); entry = hashbin_remove(ircomm, self->line, NULL); @@ -203,7 +203,7 @@ struct ircomm_info info; int ret; - IRDA_DEBUG(2 , __FUNCTION__"()\n"); + IRDA_DEBUG(2 , "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -230,7 +230,7 @@ { int clen = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Check if the packet contains data on the control channel */ if (skb->len > 0) @@ -246,7 +246,7 @@ info->qos, info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n"); + IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -264,7 +264,7 @@ ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_MAGIC, return -1;); - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); @@ -280,7 +280,7 @@ void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); if (self->notify.connect_confirm ) self->notify.connect_confirm(self->notify.instance, @@ -288,7 +288,7 @@ info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n"); + IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -303,7 +303,7 @@ { int ret; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -EFAULT;); ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); @@ -322,14 +322,14 @@ */ void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(skb->len > 0, return;); if (self->notify.data_indication) self->notify.data_indication(self->notify.instance, self, skb); else { - IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n"); + IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -362,8 +362,8 @@ if (skb->len) ircomm_data_indication(self, skb); else { - IRDA_DEBUG(4, __FUNCTION__ - "(), data was control info only!\n"); + IRDA_DEBUG(4, "%s(), data was control info only!\n", + __FUNCTION__); dev_kfree_skb(skb); } } @@ -378,7 +378,7 @@ { int ret; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -EFAULT;); ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); @@ -400,7 +400,7 @@ { struct sk_buff *ctrl_skb; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ctrl_skb = skb_clone(skb, GFP_ATOMIC); if (!ctrl_skb) @@ -414,7 +414,7 @@ self->notify.udata_indication(self->notify.instance, self, ctrl_skb); else { - IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n"); + IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -430,7 +430,7 @@ struct ircomm_info info; int ret; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -449,7 +449,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(info != NULL, return;); @@ -457,7 +457,7 @@ self->notify.disconnect_indication(self->notify.instance, self, info->reason, skb); } else { - IRDA_DEBUG(0, __FUNCTION__ "(), missing handler\n"); + IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -470,7 +470,7 @@ */ void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_event.c linux.22-ac2/net/irda/ircomm/ircomm_event.c --- linux.vanilla/net/irda/ircomm/ircomm_event.c 2001-03-02 19:12:12.000000000 +0000 +++ linux.22-ac2/net/irda/ircomm/ircomm_event.c 2003-08-28 22:35:37.000000000 +0100 @@ -107,7 +107,7 @@ ircomm_connect_indication(self, skb, info); break; default: - IRDA_DEBUG(4, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(4, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_event[event]); if (skb) dev_kfree_skb(skb); @@ -139,7 +139,7 @@ ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(0, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_event[event]); if (skb) dev_kfree_skb(skb); @@ -174,7 +174,7 @@ ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, __FUNCTION__ "(), unknown event = %s\n", + IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__, ircomm_event[event]); if (skb) dev_kfree_skb(skb); @@ -218,7 +218,7 @@ ret = self->issue.disconnect_request(self, skb, info); break; default: - IRDA_DEBUG(0, __FUNCTION__ "(), unknown event = %s\n", + IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__, ircomm_event[event]); if (skb) dev_kfree_skb(skb); @@ -236,7 +236,7 @@ int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_state[self->state], ircomm_event[event]); return (*state[self->state])(self, event, skb, info); @@ -252,6 +252,6 @@ { self->state = state; - IRDA_DEBUG(4, __FUNCTION__": next state=%s, service type=%d\n", + IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __FUNCTION__, ircomm_state[self->state], self->service_type); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_lmp.c linux.22-ac2/net/irda/ircomm/ircomm_lmp.c --- linux.vanilla/net/irda/ircomm/ircomm_lmp.c 2002-02-25 19:38:14.000000000 +0000 +++ linux.22-ac2/net/irda/ircomm/ircomm_lmp.c 2003-08-28 22:35:37.000000000 +0100 @@ -49,7 +49,7 @@ { notify_t notify; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); /* Register callbacks */ irda_notify_init(¬ify); @@ -62,7 +62,7 @@ self->lsap = irlmp_open_lsap(LSAP_ANY, ¬ify, 0); if (!self->lsap) { - IRDA_DEBUG(0,__FUNCTION__"failed to allocate tsap\n"); + IRDA_DEBUG(0,"%s failed to allocate tsap\n", __FUNCTION__); return -1; } self->slsap_sel = self->lsap->slsap_sel; @@ -90,7 +90,7 @@ { int ret = 0; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ret = irlmp_connect_request(self->lsap, info->dlsap_sel, info->saddr, info->daddr, NULL, userdata); @@ -108,7 +108,7 @@ struct sk_buff *skb; int ret; - IRDA_DEBUG(0, __FUNCTION__"()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); /* Any userdata supplied? */ if (userdata == NULL) { @@ -139,7 +139,7 @@ struct sk_buff *skb; int ret; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); if (!userdata) { skb = dev_alloc_skb(64); @@ -172,13 +172,13 @@ cb = (struct irda_skb_cb *) skb->cb; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); line = cb->line; self = (struct ircomm_cb *) hashbin_find(ircomm, line, NULL); if (!self) { - IRDA_DEBUG(2, __FUNCTION__ "(), didn't find myself\n"); + IRDA_DEBUG(2, "%s(), didn't find myself\n", __FUNCTION__); return; } @@ -188,7 +188,7 @@ self->pkt_count--; if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { - IRDA_DEBUG(2, __FUNCTION__ "(), asking TTY to start again!\n"); + IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __FUNCTION__); self->flow_status = FLOW_START; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -214,12 +214,12 @@ cb->line = self->line; - IRDA_DEBUG(4, __FUNCTION__"(), sending frame\n"); + IRDA_DEBUG(4, "%s(), sending frame\n", __FUNCTION__); skb->destructor = ircomm_lmp_flow_control; if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { - IRDA_DEBUG(2, __FUNCTION__ "(), asking TTY to slow down!\n"); + IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __FUNCTION__); self->flow_status = FLOW_STOP; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -227,7 +227,7 @@ } ret = irlmp_data_request(self->lsap, skb); if (ret) { - ERROR(__FUNCTION__ "(), failed\n"); + ERROR("%s(), failed\n", __FUNCTION__); dev_kfree_skb(skb); } @@ -245,7 +245,7 @@ { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -272,7 +272,7 @@ struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, __FUNCTION__"()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -302,7 +302,7 @@ struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(0, __FUNCTION__"()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -329,7 +329,7 @@ struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, __FUNCTION__"()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_param.c linux.22-ac2/net/irda/ircomm/ircomm_param.c --- linux.vanilla/net/irda/ircomm/ircomm_param.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/ircomm/ircomm_param.c 2003-08-28 22:35:37.000000000 +0100 @@ -118,7 +118,7 @@ struct sk_buff *skb; int count; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -152,7 +152,7 @@ count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb), &ircomm_param_info); if (count < 0) { - WARNING(__FUNCTION__ "(), no room for parameter!\n"); + WARNING("%s(), no room for parameter!\n", __FUNCTION__); restore_flags(flags); return -1; } @@ -160,7 +160,7 @@ restore_flags(flags); - IRDA_DEBUG(2, __FUNCTION__ "(), skb->len=%d\n", skb->len); + IRDA_DEBUG(2, "%s(), skb->len=%d\n", __FUNCTION__, skb->len); if (flush) { /* ircomm_tty_do_softint will take care of the rest */ @@ -195,11 +195,11 @@ /* Find all common service types */ service_type &= self->service_type; if (!service_type) { - IRDA_DEBUG(2, __FUNCTION__ - "(), No common service type to use!\n"); + IRDA_DEBUG(2, "%s(), No common service type to use!\n", + __FUNCTION__); return -1; } - IRDA_DEBUG(0, __FUNCTION__ "(), services in common=%02x\n", + IRDA_DEBUG(0, "%s(), services in common=%02x\n", __FUNCTION__, service_type); /* @@ -214,7 +214,7 @@ else if (service_type & IRCOMM_3_WIRE_RAW) self->settings.service_type = IRCOMM_3_WIRE_RAW; - IRDA_DEBUG(0, __FUNCTION__ "(), resulting service type=0x%02x\n", + IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __FUNCTION__, self->settings.service_type); /* @@ -257,7 +257,7 @@ else { self->settings.port_type = (__u8) param->pv.i; - IRDA_DEBUG(0, __FUNCTION__ "(), port type=%d\n", + IRDA_DEBUG(0, "%s(), port type=%d\n", __FUNCTION__, self->settings.port_type); } return 0; @@ -277,9 +277,9 @@ ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); if (get) { - IRDA_DEBUG(0, __FUNCTION__ "(), not imp!\n"); + IRDA_DEBUG(0, "%s(), not imp!\n", __FUNCTION__); } else { - IRDA_DEBUG(0, __FUNCTION__ "(), port-name=%s\n", param->pv.c); + IRDA_DEBUG(0, "%s(), port-name=%s\n", __FUNCTION__, param->pv.c); strncpy(self->settings.port_name, param->pv.c, 32); } @@ -304,7 +304,7 @@ else self->settings.data_rate = param->pv.i; - IRDA_DEBUG(2, __FUNCTION__ "(), data rate = %d\n", param->pv.i); + IRDA_DEBUG(2, "%s(), data rate = %d\n", __FUNCTION__, param->pv.i); return 0; } @@ -350,7 +350,7 @@ else self->settings.flow_control = (__u8) param->pv.i; - IRDA_DEBUG(1, __FUNCTION__ "(), flow control = 0x%02x\n", (__u8) param->pv.i); + IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __FUNCTION__, (__u8) param->pv.i); return 0; } @@ -376,7 +376,7 @@ self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, __FUNCTION__ "(), XON/XOFF = 0x%02x,0x%02x\n", + IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __FUNCTION__, param->pv.i & 0xff, param->pv.i >> 8); return 0; @@ -403,7 +403,7 @@ self->settings.enqack[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, __FUNCTION__ "(), ENQ/ACK = 0x%02x,0x%02x\n", + IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __FUNCTION__, param->pv.i & 0xff, param->pv.i >> 8); return 0; @@ -418,7 +418,7 @@ static int ircomm_param_line_status(void *instance, irda_param_t *param, int get) { - IRDA_DEBUG(2, __FUNCTION__ "(), not impl.\n"); + IRDA_DEBUG(2, "%s(), not impl.\n", __FUNCTION__); return 0; } @@ -477,7 +477,7 @@ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; __u8 dce; - IRDA_DEBUG(1, __FUNCTION__ "(), dce = 0x%02x\n", (__u8) param->pv.i); + IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __FUNCTION__, (__u8) param->pv.i); dce = (__u8) param->pv.i; @@ -489,7 +489,7 @@ /* Check if any of the settings have changed */ if (dce & 0x0f) { if (dce & IRCOMM_DELTA_CTS) { - IRDA_DEBUG(2, __FUNCTION__ "(), CTS \n"); + IRDA_DEBUG(2, "%s(), CTS \n", __FUNCTION__); } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_ttp.c linux.22-ac2/net/irda/ircomm/ircomm_ttp.c --- linux.vanilla/net/irda/ircomm/ircomm_ttp.c 2001-03-02 19:12:12.000000000 +0000 +++ linux.22-ac2/net/irda/ircomm/ircomm_ttp.c 2003-08-28 22:35:37.000000000 +0100 @@ -49,7 +49,7 @@ { notify_t notify; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* Register callbacks */ irda_notify_init(¬ify); @@ -64,7 +64,7 @@ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!self->tsap) { - IRDA_DEBUG(0, __FUNCTION__"failed to allocate tsap\n"); + IRDA_DEBUG(0, "%s failed to allocate tsap\n", __FUNCTION__); return -1; } self->slsap_sel = self->tsap->stsap_sel; @@ -92,7 +92,7 @@ { int ret = 0; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ret = irttp_connect_request(self->tsap, info->dlsap_sel, info->saddr, info->daddr, NULL, @@ -110,7 +110,7 @@ { int ret; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ret = irttp_connect_response(self->tsap, TTP_SAR_DISABLE, skb); @@ -133,7 +133,7 @@ ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, __FUNCTION__"(), clen=%d\n", clen); + IRDA_DEBUG(2, "%s(), clen=%d\n", __FUNCTION__, clen); /* * Insert clen field, currently we either send data only, or control @@ -146,7 +146,7 @@ ret = irttp_data_request(self->tsap, skb); if (ret) { - ERROR(__FUNCTION__ "(), failed\n"); + ERROR("%s(), failed\n", __FUNCTION__); dev_kfree_skb(skb); } @@ -164,7 +164,7 @@ { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -184,7 +184,7 @@ struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -192,7 +192,7 @@ ASSERT(qos != NULL, return;); if (max_sdu_size != TTP_SAR_DISABLE) { - ERROR(__FUNCTION__ "(), SAR not allowed for IrCOMM!\n"); + ERROR("%s(), SAR not allowed for IrCOMM!\n", __FUNCTION__); dev_kfree_skb(skb); return; } @@ -221,7 +221,7 @@ struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -229,7 +229,7 @@ ASSERT(qos != NULL, return;); if (max_sdu_size != TTP_SAR_DISABLE) { - ERROR(__FUNCTION__ "(), SAR not allowed for IrCOMM!\n"); + ERROR("%s(), SAR not allowed for IrCOMM!\n", __FUNCTION__); dev_kfree_skb(skb); return; } @@ -272,7 +272,7 @@ struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -292,7 +292,7 @@ { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_MAGIC, return;); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_tty_attach.c linux.22-ac2/net/irda/ircomm/ircomm_tty_attach.c --- linux.vanilla/net/irda/ircomm/ircomm_tty_attach.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/ircomm/ircomm_tty_attach.c 2003-08-28 22:35:37.000000000 +0100 @@ -126,14 +126,14 @@ */ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if somebody has already connected to us */ if (ircomm_is_connected(self->ircomm)) { - IRDA_DEBUG(0, __FUNCTION__ "(), already connected!\n"); + IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__); return 0; } @@ -144,7 +144,7 @@ /* Check if somebody has already connected to us */ if (ircomm_is_connected(self->ircomm)) { - IRDA_DEBUG(0, __FUNCTION__ "(), already connected!\n"); + IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__); return 0; } @@ -161,7 +161,7 @@ */ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -203,7 +203,7 @@ __u8 oct_seq[6]; __u16 hints; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -258,16 +258,16 @@ * Set default values, but only if the application for some reason * haven't set them already */ - IRDA_DEBUG(2, __FUNCTION__ "(), data-rate = %d\n", + IRDA_DEBUG(2, "%s(), data-rate = %d\n", __FUNCTION__, self->settings.data_rate); if (!self->settings.data_rate) self->settings.data_rate = 9600; - IRDA_DEBUG(2, __FUNCTION__ "(), data-format = %d\n", + IRDA_DEBUG(2, "%s(), data-format = %d\n", __FUNCTION__, self->settings.data_format); if (!self->settings.data_format) self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ - IRDA_DEBUG(2, __FUNCTION__ "(), flow-control = %d\n", + IRDA_DEBUG(2, "%s(), flow-control = %d\n", __FUNCTION__, self->settings.flow_control); /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ @@ -312,7 +312,7 @@ struct ircomm_tty_cb *self; struct ircomm_tty_info info; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Important note : * We need to drop all passive discoveries. @@ -354,7 +354,7 @@ { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -384,7 +384,7 @@ { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -395,13 +395,13 @@ /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(4, __FUNCTION__ "(), got NULL value!\n"); + IRDA_DEBUG(4, "%s(), got NULL value!\n", __FUNCTION__); return; } switch (value->type) { case IAS_OCT_SEQ: - IRDA_DEBUG(2, __FUNCTION__"(), got octet sequence\n"); + IRDA_DEBUG(2, "%s(), got octet sequence\n", __FUNCTION__); irda_param_extract_all(self, value->t.oct_seq, value->len, &ircomm_param_info); @@ -411,21 +411,21 @@ break; case IAS_INTEGER: /* Got LSAP selector */ - IRDA_DEBUG(2, __FUNCTION__"(), got lsapsel = %d\n", + IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __FUNCTION__, value->t.integer); if (value->t.integer == -1) { - IRDA_DEBUG(0, __FUNCTION__"(), invalid value!\n"); + IRDA_DEBUG(0, "%s(), invalid value!\n", __FUNCTION__); } else self->dlsap_sel = value->t.integer; ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); break; case IAS_MISSING: - IRDA_DEBUG(0, __FUNCTION__"(), got IAS_MISSING\n"); + IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __FUNCTION__); break; default: - IRDA_DEBUG(0, __FUNCTION__"(), got unknown type!\n"); + IRDA_DEBUG(0, "%s(), got unknown type!\n", __FUNCTION__); break; } irias_delete_value(value); @@ -445,7 +445,7 @@ { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -476,7 +476,7 @@ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -505,7 +505,7 @@ */ void ircomm_tty_link_established(struct ircomm_tty_cb *self) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -580,7 +580,7 @@ { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -601,7 +601,7 @@ { int ret = 0; - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_ATTACH_CABLE: @@ -616,8 +616,8 @@ self->saddr = info->saddr; if (self->iriap) { - WARNING(__FUNCTION__ - "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", + __FUNCTION__); return -EBUSY; } @@ -645,7 +645,7 @@ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -665,7 +665,7 @@ { int ret = 0; - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { @@ -674,8 +674,7 @@ self->saddr = info->saddr; if (self->iriap) { - WARNING(__FUNCTION__ - "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", __FUNCTION__); return -EBUSY; } @@ -717,7 +716,7 @@ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -737,14 +736,14 @@ { int ret = 0; - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_PARAMETERS: if (self->iriap) { - WARNING(__FUNCTION__ - "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", + __FUNCTION__); return -EBUSY; } @@ -774,7 +773,7 @@ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -794,7 +793,7 @@ { int ret = 0; - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { @@ -822,7 +821,7 @@ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -842,7 +841,7 @@ { int ret = 0; - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { @@ -874,7 +873,7 @@ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -911,13 +910,13 @@ self->settings.dce = IRCOMM_DELTA_CD; ircomm_tty_check_modem_status(self); } else { - IRDA_DEBUG(0, __FUNCTION__ "(), hanging up!\n"); + IRDA_DEBUG(0, "%s(), hanging up!\n", __FUNCTION__); if (self->tty) tty_hangup(self->tty); } break; default: - IRDA_DEBUG(2, __FUNCTION__"(), unknown event: %s\n", + IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__, ircomm_tty_event[event]); return -EINVAL; } @@ -936,7 +935,7 @@ ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); - IRDA_DEBUG(2, __FUNCTION__": state=%s, event=%s\n", + IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__, ircomm_tty_state[self->state], ircomm_tty_event[event]); return (*state[self->state])(self, event, skb, info); @@ -955,7 +954,7 @@ self->state = state; - IRDA_DEBUG(2, __FUNCTION__": next state=%s, service type=%d\n", + IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __FUNCTION__, ircomm_tty_state[self->state], self->service_type); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_tty.c linux.22-ac2/net/irda/ircomm/ircomm_tty.c --- linux.vanilla/net/irda/ircomm/ircomm_tty.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/ircomm/ircomm_tty.c 2003-06-29 16:34:15.000000000 +0100 @@ -526,7 +526,7 @@ return; } - if ((tty->count == 1) && (self->open_count != 1)) { + if ((atomic_read(&tty->count) == 1) && (self->open_count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/ircomm/ircomm_tty_ioctl.c linux.22-ac2/net/irda/ircomm/ircomm_tty_ioctl.c --- linux.vanilla/net/irda/ircomm/ircomm_tty_ioctl.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/irda/ircomm/ircomm_tty_ioctl.c 2003-08-28 22:35:37.000000000 +0100 @@ -59,7 +59,7 @@ unsigned cflag, cval; int baud; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (!self->tty || !self->tty->termios || !self->ircomm) return; @@ -96,7 +96,7 @@ self->settings.flow_control |= IRCOMM_RTS_CTS_IN; /* This got me. Bummer. Jean II */ if (self->service_type == IRCOMM_3_WIRE_RAW) - WARNING(__FUNCTION__ "(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n"); + WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __FUNCTION__); } else { self->flags &= ~ASYNC_CTS_FLOW; self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; @@ -152,7 +152,7 @@ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int cflag = tty->termios->c_cflag; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if ((cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios->c_iflag) == @@ -201,7 +201,7 @@ { unsigned int result; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); result = ((self->settings.dte & IRCOMM_RTS) ? TIOCM_RTS : 0) | ((self->settings.dte & IRCOMM_DTR) ? TIOCM_DTR : 0) @@ -225,7 +225,7 @@ unsigned int arg; __u8 old_rts, old_dtr; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -287,7 +287,7 @@ if (!retinfo) return -EFAULT; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); memset(&info, 0, sizeof(info)); info.line = self->line; @@ -323,7 +323,7 @@ struct serial_struct new_serial; struct ircomm_tty_cb old_state, *state; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; @@ -397,7 +397,7 @@ struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; int ret = 0; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && @@ -426,7 +426,7 @@ break; case TIOCGICOUNT: - IRDA_DEBUG(0, __FUNCTION__ "(), TIOCGICOUNT not impl!\n"); + IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __FUNCTION__); #if 0 save_flags(flags); cli(); cnow = driver->icount; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irda_device.c linux.22-ac2/net/irda/irda_device.c --- linux.vanilla/net/irda/irda_device.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/irda_device.c 2003-08-28 22:37:44.000000000 +0100 @@ -174,7 +174,7 @@ void irda_device_cleanup(void) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); hashbin_delete(dongles, NULL); @@ -190,7 +190,7 @@ { struct irlap_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "(%s)\n", status ? "TRUE" : "FALSE"); + IRDA_DEBUG(4, "%s(%s)\n", __FUNCTION__, status ? "TRUE" : "FALSE"); self = (struct irlap_cb *) dev->atalk_ptr; @@ -215,11 +215,11 @@ struct if_irda_req req; int ret; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (!dev->do_ioctl) { - ERROR(__FUNCTION__ "(), do_ioctl not impl. by " - "device driver\n"); + ERROR("%s(), do_ioctl not impl. by device driver\n", + __FUNCTION__); return -1; } @@ -236,11 +236,11 @@ struct if_irda_req req; int ret; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (!dev->do_ioctl) { - ERROR(__FUNCTION__ "(), do_ioctl not impl. by " - "device driver\n"); + ERROR("%s(), do_ioctl not impl. by device driver\n", + __FUNCTION__); return -1; } @@ -262,11 +262,11 @@ struct if_irda_req req; int ret; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (!dev->do_ioctl) { - ERROR(__FUNCTION__ "(), do_ioctl not impl. by " - "device driver\n"); + ERROR("%s(), do_ioctl not impl. by device driver\n", + __FUNCTION__); return -1; } @@ -279,7 +279,7 @@ void irda_task_next_state(struct irda_task *task, IRDA_TASK_STATE state) { - IRDA_DEBUG(2, __FUNCTION__ "(), state = %s\n", task_state[state]); + IRDA_DEBUG(2, "%s(), state = %s\n", __FUNCTION__, task_state[state]); task->state = state; } @@ -313,7 +313,7 @@ int count = 0; int timeout; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(task != NULL, return -1;); ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); @@ -322,14 +322,14 @@ do { timeout = task->function(task); if (count++ > 100) { - ERROR(__FUNCTION__ "(), error in task handler!\n"); + ERROR("%s(), error in task handler!\n", __FUNCTION__); irda_task_delete(task); return TRUE; } } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); if (timeout < 0) { - ERROR(__FUNCTION__ "(), Error executing task!\n"); + ERROR("%s(), Error executing task!\n", __FUNCTION__); irda_task_delete(task); return TRUE; } @@ -361,8 +361,8 @@ irda_task_timer_expired); finished = FALSE; } else { - IRDA_DEBUG(0, __FUNCTION__ - "(), not finished, and no timeout!\n"); + IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", + __FUNCTION__); finished = FALSE; } @@ -391,7 +391,7 @@ struct irda_task *task; int ret; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); task = kmalloc(sizeof(struct irda_task), GFP_ATOMIC); if (!task) @@ -428,7 +428,7 @@ { struct irda_task *task; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); task = (struct irda_task *) data; @@ -547,7 +547,7 @@ { /* Check if this dongle has been registred before */ if (hashbin_find(dongles, new->type, NULL)) { - MESSAGE(__FUNCTION__ "(), Dongle already registered\n"); + MESSAGE("%s(), Dongle already registered\n", __FUNCTION__); return 0; } @@ -569,7 +569,7 @@ node = hashbin_remove(dongles, dongle->type, NULL); if (!node) { - ERROR(__FUNCTION__ "(), dongle not found!\n"); + ERROR("%s(), dongle not found!\n", __FUNCTION__); return; } } @@ -586,11 +586,11 @@ struct if_irda_req req; int ret; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); if (!dev->do_ioctl) { - ERROR(__FUNCTION__ "(), set_raw_mode not impl. by " - "device driver\n"); + ERROR("%s(), set_raw_mode not impl. by device driver\n", + __FUNCTION__); return -1; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irias_object.c linux.22-ac2/net/irda/irias_object.c --- linux.vanilla/net/irda/irias_object.c 2001-10-05 02:41:09.000000000 +0100 +++ linux.22-ac2/net/irda/irias_object.c 2003-08-28 22:37:44.000000000 +0100 @@ -59,7 +59,7 @@ /* Allocate new string */ new_str = kmalloc(len + 1, GFP_ATOMIC); if (new_str == NULL) { - WARNING(__FUNCTION__"(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); return NULL; } @@ -80,12 +80,12 @@ { struct ias_object *obj; - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); obj = (struct ias_object *) kmalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { - IRDA_DEBUG(0, __FUNCTION__ "(), Unable to allocate object!\n"); + IRDA_DEBUG(0, "%s(), Unable to allocate object!\n", __FUNCTION__); return NULL; } memset(obj, 0, sizeof( struct ias_object)); @@ -272,7 +272,7 @@ /* Find object */ obj = hashbin_find(objects, 0, obj_name); if (obj == NULL) { - WARNING(__FUNCTION__ "(), Unable to find object: %s\n", + WARNING("%s(), Unable to find object: %s\n", __FUNCTION__, obj_name); return -1; } @@ -280,14 +280,14 @@ /* Find attribute */ attrib = hashbin_find(obj->attribs, 0, attrib_name); if (attrib == NULL) { - WARNING(__FUNCTION__ "(), Unable to find attribute: %s\n", + WARNING("%s(), Unable to find attribute: %s\n", __FUNCTION__, attrib_name); return -1; } if ( attrib->value->type != new_value->type) { - IRDA_DEBUG( 0, __FUNCTION__ - "(), changing value type not allowed!\n"); + IRDA_DEBUG(0, "%s(), changing value type not allowed!\n", + __FUNCTION__); return -1; } @@ -319,7 +319,7 @@ attrib = (struct ias_attrib *) kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - WARNING(__FUNCTION__ "(), Unable to allocate attribute!\n"); + WARNING("%s(), Unable to allocate attribute!\n", __FUNCTION__); return; } memset(attrib, 0, sizeof( struct ias_attrib)); @@ -354,8 +354,7 @@ attrib = (struct ias_attrib *) kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - WARNING(__FUNCTION__ - "(), Unable to allocate attribute!\n"); + WARNING("%s(), Unable to allocate attribute!\n", __FUNCTION__); return; } memset(attrib, 0, sizeof( struct ias_attrib)); @@ -388,7 +387,7 @@ attrib = (struct ias_attrib *) kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - WARNING(__FUNCTION__ "(), Unable to allocate attribute!\n"); + WARNING("%s(), Unable to allocate attribute!\n", __FUNCTION__); return; } memset(attrib, 0, sizeof( struct ias_attrib)); @@ -413,7 +412,7 @@ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { - WARNING(__FUNCTION__ "(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); return NULL; } memset(value, 0, sizeof(struct ias_value)); @@ -438,7 +437,7 @@ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { - WARNING(__FUNCTION__ "(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); return NULL; } memset( value, 0, sizeof( struct ias_value)); @@ -465,7 +464,7 @@ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { - WARNING(__FUNCTION__ "(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); return NULL; } memset(value, 0, sizeof(struct ias_value)); @@ -478,7 +477,7 @@ value->t.oct_seq = kmalloc(len, GFP_ATOMIC); if (value->t.oct_seq == NULL){ - WARNING(__FUNCTION__"(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); kfree(value); return NULL; } @@ -492,7 +491,7 @@ value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { - WARNING(__FUNCTION__ "(), Unable to kmalloc!\n"); + WARNING("%s(), Unable to kmalloc!\n", __FUNCTION__); return NULL; } memset(value, 0, sizeof(struct ias_value)); @@ -511,7 +510,7 @@ */ void irias_delete_value(struct ias_value *value) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(value != NULL, return;); @@ -531,7 +530,7 @@ kfree(value->t.oct_seq); break; default: - IRDA_DEBUG(0, __FUNCTION__ "(), Unknown value type!\n"); + IRDA_DEBUG(0, "%s(), Unknown value type!\n", __FUNCTION__); break; } kfree(value); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_client.c linux.22-ac2/net/irda/irlan/irlan_client.c --- linux.vanilla/net/irda/irlan/irlan_client.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_client.c 2003-08-28 22:36:03.000000000 +0100 @@ -71,7 +71,7 @@ { struct irlan_cb *self = (struct irlan_cb *) data; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -90,7 +90,7 @@ void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); irda_start_timer(&self->client.kick_timer, timeout, (void *) self, irlan_client_kick_timer_expired); @@ -104,7 +104,7 @@ */ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) { - IRDA_DEBUG(1, __FUNCTION__ "()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -116,7 +116,7 @@ if ((self->client.state != IRLAN_IDLE) || (self->provider.access_type == ACCESS_DIRECT)) { - IRDA_DEBUG(0, __FUNCTION__ "(), already awake!\n"); + IRDA_DEBUG(0, "%s(), already awake!\n", __FUNCTION__); return; } @@ -125,7 +125,7 @@ self->daddr = daddr; if (self->disconnect_reason == LM_USER_REQUEST) { - IRDA_DEBUG(0, __FUNCTION__ "(), still stopped by user\n"); + IRDA_DEBUG(0, "%s(), still stopped by user\n", __FUNCTION__); return; } @@ -152,7 +152,7 @@ struct irlan_cb *self; __u32 saddr, daddr; - IRDA_DEBUG(1, __FUNCTION__"()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); ASSERT(irlan != NULL, return;); ASSERT(discovery != NULL, return;); @@ -174,7 +174,7 @@ if (self) { ASSERT(self->magic == IRLAN_MAGIC, return;); - IRDA_DEBUG(1, __FUNCTION__ "(), Found instance (%08x)!\n", + IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__, daddr); irlan_client_wakeup(self, saddr, daddr); @@ -192,7 +192,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = (struct irlan_cb *) instance; @@ -203,7 +203,7 @@ irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); /* Ready for a new command */ - IRDA_DEBUG(2, __FUNCTION__ "(), clearing tx_busy\n"); + IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __FUNCTION__); self->client.tx_busy = FALSE; /* Check if we have some queued commands waiting to be sent */ @@ -220,7 +220,7 @@ struct tsap_cb *tsap; struct sk_buff *skb; - IRDA_DEBUG(4, __FUNCTION__ "(), reason=%d\n", reason); + IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__, reason); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; @@ -252,7 +252,7 @@ struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -272,7 +272,7 @@ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, __FUNCTION__ "(), Got no tsap!\n"); + IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__); return; } self->client.tsap_ctrl = tsap; @@ -292,7 +292,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct irlan_cb *) instance; @@ -318,7 +318,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -361,13 +361,13 @@ ASSERT(skb != NULL, return;); - IRDA_DEBUG(4, __FUNCTION__ "() skb->len=%d\n", (int) skb->len); + IRDA_DEBUG(4, "%s() skb->len=%d\n", __FUNCTION__, (int) skb->len); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); if (!skb) { - ERROR( __FUNCTION__ "(), Got NULL skb!\n"); + ERROR("%s(), Got NULL skb!\n", __FUNCTION__); return; } frame = skb->data; @@ -392,7 +392,7 @@ /* How many parameters? */ count = frame[1]; - IRDA_DEBUG(4, __FUNCTION__ "(), got %d parameters\n", count); + IRDA_DEBUG(4, "%s(), got %d parameters\n", __FUNCTION__, count); ptr = frame+2; @@ -400,7 +400,7 @@ for (i=0; imagic == IRLAN_MAGIC, return;); @@ -462,7 +462,7 @@ else if (strcmp(value, "HOSTED") == 0) self->client.access_type = ACCESS_HOSTED; else { - IRDA_DEBUG(2, __FUNCTION__ "(), unknown access type!\n"); + IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__); } } /* IRLAN version */ @@ -484,14 +484,14 @@ memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.recv_arb_val = tmp_cpu; - IRDA_DEBUG(2, __FUNCTION__ "(), receive arb val=%d\n", + IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __FUNCTION__, self->client.recv_arb_val); } if (strcmp(param, "MAX_FRAME") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.max_frame = tmp_cpu; - IRDA_DEBUG(4, __FUNCTION__ "(), max frame=%d\n", + IRDA_DEBUG(4, "%s(), max frame=%d\n", __FUNCTION__, self->client.max_frame); } @@ -526,7 +526,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(priv != NULL, return;); @@ -539,7 +539,7 @@ /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(2, __FUNCTION__ "(), got NULL value!\n"); + IRDA_DEBUG(2, "%s(), got NULL value!\n", __FUNCTION__); irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); return; @@ -557,7 +557,7 @@ irias_delete_value(value); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), unknown type!\n"); + IRDA_DEBUG(2, "%s(), unknown type!\n", __FUNCTION__); break; } irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_client_event.c linux.22-ac2/net/irda/irlan/irlan_client_event.c --- linux.vanilla/net/irda/irlan/irlan_client_event.c 2000-11-28 02:07:31.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_client_event.c 2003-08-28 22:36:03.000000000 +0100 @@ -92,7 +92,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -100,8 +100,7 @@ switch (event) { case IRLAN_DISCOVERY_INDICATION: if (self->client.iriap) { - WARNING(__FUNCTION__ - "(), busy with a previous query\n"); + WARNING("%s(), busy with a previous query\n", __FUNCTION__); return -EBUSY; } @@ -114,10 +113,10 @@ "IrLAN", "IrDA:TinyTP:LsapSel"); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -136,7 +135,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -154,7 +153,7 @@ irlan_next_client_state(self, IRLAN_CONN); break; case IRLAN_IAS_PROVIDER_NOT_AVAIL: - IRDA_DEBUG(2, __FUNCTION__ "(), IAS_PROVIDER_NOT_AVAIL\n"); + IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __FUNCTION__); irlan_next_client_state(self, IRLAN_IDLE); /* Give the client a kick! */ @@ -167,10 +166,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__"(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -189,7 +188,7 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -204,10 +203,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -224,7 +223,7 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -244,10 +243,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -266,7 +265,7 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -281,10 +280,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -305,7 +304,7 @@ { struct qos_info qos; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -344,7 +343,7 @@ irlan_next_client_state(self, IRLAN_DATA); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), unknown access type!\n"); + IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__); break; } break; @@ -353,10 +352,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } @@ -376,7 +375,7 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -390,10 +389,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -407,7 +406,7 @@ { struct qos_info qos; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -429,7 +428,7 @@ } else if (self->client.recv_arb_val > self->provider.send_arb_val) { - IRDA_DEBUG(2, __FUNCTION__ "(), lost the battle :-(\n"); + IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __FUNCTION__); } break; case IRLAN_DATA_CONNECT_INDICATION: @@ -440,10 +439,10 @@ irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n"); + IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -462,7 +461,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -476,7 +475,7 @@ irlan_next_client_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -494,7 +493,7 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (skb) dev_kfree_skb(skb); @@ -511,7 +510,7 @@ static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (skb) dev_kfree_skb(skb); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_common.c linux.22-ac2/net/irda/irlan/irlan_common.c --- linux.vanilla/net/irda/irlan/irlan_common.c 2002-02-25 19:38:14.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_common.c 2003-08-28 22:36:03.000000000 +0100 @@ -122,7 +122,7 @@ struct irlan_cb *new; __u16 hints; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); /* Allocate master structure */ irlan = hashbin_new(HB_LOCAL); if (irlan == NULL) { @@ -133,7 +133,7 @@ create_proc_info_entry("irlan", 0, proc_irda, irlan_proc_read); #endif /* CONFIG_PROC_FS */ - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); hints = irlmp_service_to_hint(S_LAN); /* Register with IrLMP as a client */ @@ -157,7 +157,7 @@ void irlan_cleanup(void) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); irlmp_unregister_client(ckey); irlmp_unregister_service(skey); @@ -181,7 +181,7 @@ { int i=0; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); /* Check if we should call the device eth or irlan */ if (!eth) { @@ -192,7 +192,7 @@ } if (register_netdev(&self->dev) != 0) { - IRDA_DEBUG(2, __FUNCTION__ "(), register_netdev() failed!\n"); + IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", __FUNCTION__); return -1; } return 0; @@ -208,7 +208,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(irlan != NULL, return NULL;); /* @@ -264,7 +264,7 @@ { struct sk_buff *skb; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -301,7 +301,7 @@ struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; @@ -390,7 +390,7 @@ struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(0, __FUNCTION__ "(), reason=%d\n", reason); + IRDA_DEBUG(0, "%s(), reason=%d\n", __FUNCTION__, reason); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; @@ -409,22 +409,22 @@ switch (reason) { case LM_USER_REQUEST: /* User request */ - IRDA_DEBUG(2, __FUNCTION__ "(), User requested\n"); + IRDA_DEBUG(2, "%s(), User requested\n", __FUNCTION__); break; case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ - IRDA_DEBUG(2, __FUNCTION__ "(), Unexpected IrLAP disconnect\n"); + IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __FUNCTION__); break; case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ - IRDA_DEBUG(2, __FUNCTION__ "(), IrLAP connect failed\n"); + IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __FUNCTION__); break; case LM_LAP_RESET: /* IrLAP reset */ - IRDA_DEBUG(2, __FUNCTION__ "(), IrLAP reset\n"); + IRDA_DEBUG(2, "%s(), IrLAP reset\n", __FUNCTION__); break; case LM_INIT_DISCONNECT: - IRDA_DEBUG(2, __FUNCTION__ "(), IrLMP connect failed\n"); + IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __FUNCTION__); break; default: - ERROR(__FUNCTION__ "(), Unknown disconnect reason\n"); + ERROR("%s(), Unknown disconnect reason\n", __FUNCTION__); break; } @@ -446,7 +446,7 @@ struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -468,7 +468,7 @@ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, __FUNCTION__ "(), Got no tsap!\n"); + IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__); return; } self->tsap_data = tsap; @@ -482,7 +482,7 @@ void irlan_close_tsaps(struct irlan_cb *self) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -572,7 +572,7 @@ { struct sk_buff *skb; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); if (irda_lock(&self->client.tx_busy) == FALSE) return -EBUSY; @@ -591,7 +591,7 @@ dev_kfree_skb(skb); return -1; } - IRDA_DEBUG(2, __FUNCTION__ "(), sending ...\n"); + IRDA_DEBUG(2, "%s(), sending ...\n", __FUNCTION__); return irttp_data_request(self->client.tsap_ctrl, skb); } @@ -604,7 +604,7 @@ */ void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Queue command */ skb_queue_tail(&self->client.txq, skb); @@ -624,7 +624,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -656,7 +656,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -688,7 +688,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -727,7 +727,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -765,7 +765,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -804,7 +804,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -844,7 +844,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -879,7 +879,7 @@ struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -964,7 +964,7 @@ int n=0; if (skb == NULL) { - IRDA_DEBUG(2, __FUNCTION__ "(), Got NULL skb\n"); + IRDA_DEBUG(2, "%s(), Got NULL skb\n", __FUNCTION__); return 0; } @@ -981,7 +981,7 @@ ASSERT(value_len > 0, return 0;); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown parameter type!\n"); + IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __FUNCTION__); return 0; break; } @@ -991,7 +991,7 @@ /* Make space for data */ if (skb_tailroom(skb) < (param_len+value_len+3)) { - IRDA_DEBUG(2, __FUNCTION__ "(), No more space at end of skb\n"); + IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __FUNCTION__); return 0; } skb_put(skb, param_len+value_len+3); @@ -1038,13 +1038,13 @@ __u16 val_len; int n=0; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* get length of parameter name (1 byte) */ name_len = buf[n++]; if (name_len > 254) { - IRDA_DEBUG(2, __FUNCTION__ "(), name_len > 254\n"); + IRDA_DEBUG(2, "%s(), name_len > 254\n", __FUNCTION__); return -RSP_INVALID_COMMAND_FORMAT; } @@ -1061,7 +1061,7 @@ le16_to_cpus(&val_len); n+=2; if (val_len > 1016) { - IRDA_DEBUG(2, __FUNCTION__ "(), parameter length to long\n"); + IRDA_DEBUG(2, "%s(), parameter length to long\n", __FUNCTION__); return -RSP_INVALID_COMMAND_FORMAT; } *len = val_len; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_eth.c linux.22-ac2/net/irda/irlan/irlan_eth.c --- linux.vanilla/net/irda/irlan/irlan_eth.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/irlan/irlan_eth.c 2003-08-28 22:36:03.000000000 +0100 @@ -51,7 +51,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(2, __FUNCTION__"()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(dev != NULL, return -1;); @@ -109,7 +109,7 @@ { struct irlan_cb *self; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(dev != NULL, return -1;); @@ -143,7 +143,7 @@ struct irlan_cb *self = (struct irlan_cb *) dev->priv; struct sk_buff *skb; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Stop device */ netif_stop_queue(dev); @@ -354,14 +354,14 @@ self = dev->priv; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); /* Check if data channel has been connected yet */ if (self->client.state != IRLAN_DATA) { - IRDA_DEBUG(1, __FUNCTION__ "(), delaying!\n"); + IRDA_DEBUG(1, "%s(), delaying!\n", __FUNCTION__); return; } @@ -371,20 +371,20 @@ } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { /* Disable promiscuous mode, use normal mode. */ - IRDA_DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n"); + IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__); /* hardware_set_filter(NULL); */ irlan_set_multicast_filter(self, TRUE); } else if (dev->mc_count) { - IRDA_DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n"); + IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__); /* Walk the address list, and load the filter */ /* hardware_set_filter(dev->mc_list); */ irlan_set_multicast_filter(self, TRUE); } else { - IRDA_DEBUG(4, __FUNCTION__ "(), Clearing multicast filter\n"); + IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __FUNCTION__); irlan_set_multicast_filter(self, FALSE); } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_event.c linux.22-ac2/net/irda/irlan/irlan_event.c --- linux.vanilla/net/irda/irlan/irlan_event.c 1999-11-03 01:07:55.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_event.c 2003-08-28 22:36:03.000000000 +0100 @@ -40,7 +40,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]); + IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__, irlan_state[state]); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -50,7 +50,7 @@ void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]); + IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__, irlan_state[state]); ASSERT(self != NULL, return;); ASSERT(self->magic == IRLAN_MAGIC, return;); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_filter.c linux.22-ac2/net/irda/irlan/irlan_filter.c --- linux.vanilla/net/irda/irlan/irlan_filter.c 1999-11-03 01:07:55.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_filter.c 2003-08-28 22:36:03.000000000 +0100 @@ -143,7 +143,7 @@ { __u8 *bytes; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); bytes = value; @@ -156,7 +156,7 @@ * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); self->use_udata = TRUE; return; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_provider.c linux.22-ac2/net/irda/irlan/irlan_provider.c --- linux.vanilla/net/irda/irlan/irlan_provider.c 2001-03-02 19:12:12.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_provider.c 2003-08-28 22:36:03.000000000 +0100 @@ -70,7 +70,7 @@ struct irlan_cb *self; __u8 code; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct irlan_cb *) instance; @@ -99,15 +99,15 @@ irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); break; case CMD_RECONNECT_DATA_CHAN: - IRDA_DEBUG(2, __FUNCTION__"(), Got RECONNECT_DATA_CHAN command\n"); - IRDA_DEBUG(2, __FUNCTION__"(), NOT IMPLEMENTED\n"); + IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __FUNCTION__); + IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__); break; case CMD_CLOSE_DATA_CHAN: IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); - IRDA_DEBUG(2, __FUNCTION__"(), NOT IMPLEMENTED\n"); + IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown command!\n"); + IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__); break; } return 0; @@ -129,7 +129,7 @@ struct tsap_cb *tsap; __u32 saddr, daddr; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; @@ -182,7 +182,7 @@ struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(4, __FUNCTION__ "(), reason=%d\n", reason); + IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__, reason); self = (struct irlan_cb *) instance; tsap = (struct tsap_cb *) sap; @@ -236,7 +236,7 @@ ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); - IRDA_DEBUG(4, __FUNCTION__ "(), skb->len=%d\n", (int)skb->len); + IRDA_DEBUG(4, "%s(), skb->len=%d\n", __FUNCTION__, (int)skb->len); ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); @@ -266,7 +266,7 @@ for (i=0; imagic == IRLAN_MAGIC, return;); @@ -316,7 +316,7 @@ irlan_insert_string_param(skb, "MEDIA", "802.5"); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), unknown media type!\n"); + IRDA_DEBUG(2, "%s(), unknown media type!\n", __FUNCTION__); break; } irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); @@ -340,7 +340,7 @@ irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown access type\n"); + IRDA_DEBUG(2, "%s(), Unknown access type\n", __FUNCTION__); break; } irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); @@ -361,7 +361,7 @@ handle_filter_request(self, skb); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown command!\n"); + IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__); break; } @@ -379,7 +379,7 @@ struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -400,7 +400,7 @@ tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify); if (!tsap) { - IRDA_DEBUG(2, __FUNCTION__ "(), Got no tsap!\n"); + IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__); return -1; } self->provider.tsap_ctrl = tsap; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlan/irlan_provider_event.c linux.22-ac2/net/irda/irlan/irlan_provider_event.c --- linux.vanilla/net/irda/irlan/irlan_provider_event.c 1999-11-03 01:07:55.000000000 +0000 +++ linux.22-ac2/net/irda/irlan/irlan_provider_event.c 2003-08-28 22:36:03.000000000 +0100 @@ -72,7 +72,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -82,7 +82,7 @@ irlan_next_provider_state( self, IRLAN_INFO); break; default: - IRDA_DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -101,7 +101,7 @@ { int ret; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -147,7 +147,7 @@ irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -166,7 +166,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); @@ -186,7 +186,7 @@ irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) @@ -205,7 +205,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -221,7 +221,7 @@ irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); break; } if (skb) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlap_event.c linux.22-ac2/net/irda/irlap_event.c --- linux.vanilla/net/irda/irlap_event.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/irlap_event.c 2003-08-28 22:36:26.000000000 +0100 @@ -217,7 +217,7 @@ } else self->fast_RR = FALSE; - IRDA_DEBUG(3, __FUNCTION__ "(), timeout=%d (%ld)\n", timeout, jiffies); + IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __FUNCTION__, timeout, jiffies); #endif /* CONFIG_IRDA_FAST_RR */ if (timeout == 0) @@ -241,7 +241,7 @@ if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(3, __FUNCTION__ "(), event = %s, state = %s\n", + IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __FUNCTION__, irlap_event[event], irlap_state[self->state]); ret = (*state[self->state])(self, event, skb, info); @@ -259,7 +259,7 @@ * try to disconnect link if we send any data frames, since * that will change the state away form XMIT */ - IRDA_DEBUG(2, __FUNCTION__ "() : queue len = %d\n", + IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, skb_queue_len(&self->txq)); if (skb_queue_len(&self->txq)) { @@ -353,8 +353,8 @@ /* Note : this will never happen, because we test * media busy in irlap_connect_request() and * postpone the event... - Jean II */ - IRDA_DEBUG(0, __FUNCTION__ - "(), CONNECT_REQUEST: media busy!\n"); + IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", + __FUNCTION__); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); @@ -380,15 +380,15 @@ irlap_connect_indication(self, skb); } else { - IRDA_DEBUG(0, __FUNCTION__ "(), SNRM frame does not " - "contain an I field!\n"); + IRDA_DEBUG(0, "%s(), SNRM frame does not " + "contain an I field!\n", __FUNCTION__); } break; case DISCOVERY_REQUEST: ASSERT(info != NULL, return -1;); if (self->media_busy) { - IRDA_DEBUG(0, __FUNCTION__ "(), media busy!\n"); + IRDA_DEBUG(0, "%s(), media busy!\n", __FUNCTION__); /* irlap->log.condition = MEDIA_BUSY; */ /* This will make IrLMP try again */ @@ -450,7 +450,7 @@ * log (and post an event). * Jean II */ - IRDA_DEBUG(1, __FUNCTION__ "(), Receiving final discovery request, missed the discovery slots :-(\n"); + IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __FUNCTION__); /* Last discovery request -> in the log */ irlap_discovery_indication(self, info->discovery); @@ -526,8 +526,8 @@ case RECV_UI_FRAME: /* Only accept broadcast frames in NDM mode */ if (info->caddr != CBROADCAST) { - IRDA_DEBUG(0, __FUNCTION__ - "(), not a broadcast frame!\n"); + IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", + __FUNCTION__); } else irlap_unitdata_indication(self, skb); break; @@ -543,10 +543,10 @@ irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); break; case RECV_TEST_RSP: - IRDA_DEBUG(0, __FUNCTION__ "() not implemented!\n"); + IRDA_DEBUG(0, "%s() not implemented!\n", __FUNCTION__); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -1; @@ -574,13 +574,13 @@ ASSERT(info != NULL, return -1;); ASSERT(info->discovery != NULL, return -1;); - IRDA_DEBUG(4, __FUNCTION__ "(), daddr=%08x\n", + IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, info->discovery->daddr); if (!self->discovery_log) { - WARNING(__FUNCTION__ "(), discovery log is gone! " + WARNING("%s(), discovery log is gone! " "maybe the discovery timeout has been set to " - "short?\n"); + "short?\n", __FUNCTION__); break; } hashbin_insert(self->discovery_log, @@ -605,7 +605,7 @@ ASSERT(info != NULL, return -1;); - IRDA_DEBUG(1, __FUNCTION__ "(), Receiving discovery request (s = %d) while performing discovery :-(\n", info->s); + IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __FUNCTION__, info->s); /* Last discovery request ? */ if (info->s == 0xff) @@ -619,9 +619,8 @@ * timing requirements. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { - IRDA_DEBUG(2, __FUNCTION__ - "(), device is slow to answer, " - "waiting some more!\n"); + IRDA_DEBUG(2, "%s(), device is slow to answer, " + "waiting some more!\n", __FUNCTION__); irlap_start_slot_timer(self, MSECS_TO_JIFFIES(10)); self->add_wait = TRUE; return ret; @@ -657,7 +656,7 @@ } break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -1; @@ -679,15 +678,15 @@ discovery_t *discovery_rsp; int ret=0; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case QUERY_TIMER_EXPIRED: - IRDA_DEBUG(2, __FUNCTION__ "(), QUERY_TIMER_EXPIRED <%ld>\n", - jiffies); + IRDA_DEBUG(2, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", + __FUNCTION__, jiffies); irlap_next_state(self, LAP_NDM); break; case RECV_DISCOVERY_XID_CMD: @@ -715,8 +714,8 @@ } break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event, - irlap_event[event]); + IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, + event, irlap_event[event]); ret = -1; break; @@ -736,7 +735,7 @@ { int ret = 0; - IRDA_DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[ event]); + IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -796,20 +795,20 @@ break; case RECV_DISCOVERY_XID_CMD: - IRDA_DEBUG(3, __FUNCTION__ - "(), event RECV_DISCOVER_XID_CMD!\n"); + IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", + __FUNCTION__); irlap_next_state(self, LAP_NDM); break; case DISCONNECT_REQUEST: - IRDA_DEBUG(0, __FUNCTION__ "(), Disconnect request!\n"); + IRDA_DEBUG(0, "%s(), Disconnect request!\n", __FUNCTION__); irlap_send_dm_frame(self); irlap_next_state( self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event, - irlap_event[event]); + IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, + event, irlap_event[event]); ret = -1; break; @@ -830,7 +829,7 @@ { int ret = 0; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -859,7 +858,7 @@ self->retry_count++; break; case RECV_SNRM_CMD: - IRDA_DEBUG(4, __FUNCTION__ "(), SNRM battle!\n"); + IRDA_DEBUG(4, "%s(), SNRM battle!\n", __FUNCTION__); ASSERT(skb != NULL, return 0;); ASSERT(info != NULL, return 0;); @@ -940,8 +939,8 @@ irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event, - irlap_event[event]); + IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, + event, irlap_event[event]); ret = -1; break; @@ -958,7 +957,7 @@ static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 0, __FUNCTION__ "(), Unknown event\n"); + IRDA_DEBUG(0, "%s(), Unknown event\n", __FUNCTION__); return -1; } @@ -989,9 +988,8 @@ * speed and turn-around-time. */ if (skb->len > self->bytes_left) { - IRDA_DEBUG(4, __FUNCTION__ - "(), Not allowed to transmit more " - "bytes!\n"); + IRDA_DEBUG(4, "%s(), Not allowed to transmit more " + "bytes!\n", __FUNCTION__); skb_queue_head(&self->txq, skb_get(skb)); /* * We should switch state to LAP_NRM_P, but @@ -1029,8 +1027,8 @@ self->fast_RR = FALSE; #endif /* CONFIG_IRDA_FAST_RR */ } else { - IRDA_DEBUG(4, __FUNCTION__ - "(), Unable to send! remote busy?\n"); + IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", + __FUNCTION__); skb_queue_head(&self->txq, skb_get(skb)); /* @@ -1041,7 +1039,7 @@ } break; case POLL_TIMER_EXPIRED: - IRDA_DEBUG(3, __FUNCTION__ "(), POLL_TIMER_EXPIRED (%ld)\n", + IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED (%ld)\n", __FUNCTION__, jiffies); irlap_send_rr_frame(self, CMD_FRAME); /* Return to NRM properly - Jean II */ @@ -1067,7 +1065,7 @@ * when we return... - Jean II */ break; default: - IRDA_DEBUG(0, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(0, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -EINVAL; @@ -1086,7 +1084,7 @@ { int ret = 0; - IRDA_DEBUG(1, __FUNCTION__ "()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1121,7 +1119,7 @@ } break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d\n", event); + IRDA_DEBUG(1, "%s(), Unknown event %d\n", __FUNCTION__, event); ret = -1; break; @@ -1234,8 +1232,8 @@ /* Keep state */ irlap_next_state(self, LAP_NRM_P); } else { - IRDA_DEBUG(4, __FUNCTION__ - "(), missing or duplicate frame!\n"); + IRDA_DEBUG(4, "%s(), missing or duplicate frame!\n", + __FUNCTION__); /* Update Nr received */ irlap_update_nr_received(self, info->nr); @@ -1299,8 +1297,8 @@ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_UNEXPECTED)) { - IRDA_DEBUG(4, __FUNCTION__ - "(), unexpected nr and ns!\n"); + IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", + __FUNCTION__); if (info->pf) { /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); @@ -1339,10 +1337,9 @@ } break; } - IRDA_DEBUG(1, __FUNCTION__ "(), Not implemented!\n"); - IRDA_DEBUG(1, __FUNCTION__ - "(), event=%s, ns_status=%d, nr_status=%d\n", - irlap_event[ event], ns_status, nr_status); + IRDA_DEBUG(1, "%s(), Not implemented!\n", __FUNCTION__); + IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", + __FUNCTION__, irlap_event[event], ns_status, nr_status); break; case RECV_UI_FRAME: /* Poll bit cleared? */ @@ -1353,7 +1350,7 @@ del_timer(&self->final_timer); irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_XMIT_P); - printk(__FUNCTION__ "(): RECV_UI_FRAME: next state %s\n", irlap_state[self->state]); + printk("%s(): RECV_UI_FRAME: next state %s\n", __FUNCTION__, irlap_state[self->state]); irlap_start_poll_timer(self, self->poll_timeout); } break; @@ -1406,8 +1403,8 @@ irlap_next_state(self, LAP_NRM_P); } else if (ret == NR_INVALID) { - IRDA_DEBUG(1, __FUNCTION__ "(), Received RR with " - "invalid nr !\n"); + IRDA_DEBUG(1, "%s(), Received RR with invalid nr !\n", + __FUNCTION__); del_timer(&self->final_timer); irlap_next_state(self, LAP_RESET_WAIT); @@ -1507,7 +1504,7 @@ irlap_start_final_timer(self, self->final_timeout); break; case RECV_RD_RSP: - IRDA_DEBUG(1, __FUNCTION__ "(), RECV_RD_RSP\n"); + IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __FUNCTION__); irlap_flush_all_queues(self); irlap_next_state(self, LAP_XMIT_P); @@ -1515,7 +1512,7 @@ irlap_disconnect_request(self); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(1, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -1; @@ -1536,7 +1533,7 @@ { int ret = 0; - IRDA_DEBUG(3, __FUNCTION__ "(), event = %s\n", irlap_event[event]); + IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1562,7 +1559,7 @@ irlap_next_state( self, LAP_PCLOSE); break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -1; @@ -1583,7 +1580,7 @@ { int ret = 0; - IRDA_DEBUG(3, __FUNCTION__ "(), event = %s\n", irlap_event[event]); + IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1641,7 +1638,7 @@ * state */ if (!info) { - IRDA_DEBUG(3, __FUNCTION__ "(), RECV_SNRM_CMD\n"); + IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __FUNCTION__); irlap_initiate_connection_state(self); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, &self->qos_rx); @@ -1649,12 +1646,12 @@ irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NDM); } else { - IRDA_DEBUG(0, __FUNCTION__ - "(), SNRM frame contained an I field!\n"); + IRDA_DEBUG(0, "%s(), SNRM frame contained an I field!\n", + __FUNCTION__); } break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(1, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -1; @@ -1675,7 +1672,7 @@ { int ret = 0; - IRDA_DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[event]); + IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); ASSERT(self != NULL, return -ENODEV;); ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -1731,7 +1728,7 @@ ret = -EPROTO; } } else { - IRDA_DEBUG(2, __FUNCTION__ "(), Unable to send!\n"); + IRDA_DEBUG(2, "%s(), Unable to send!\n", __FUNCTION__); skb_queue_head(&self->txq, skb_get(skb)); ret = -EPROTO; } @@ -1747,7 +1744,7 @@ * when we return... - Jean II */ break; default: - IRDA_DEBUG(2, __FUNCTION__ "(), Unknown event %s\n", + IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, irlap_event[event]); ret = -EINVAL; @@ -1770,7 +1767,7 @@ int nr_status; int ret = 0; - IRDA_DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[ event]); + IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[ event]); ASSERT(self != NULL, return -1;); ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1778,8 +1775,8 @@ switch (event) { case RECV_I_CMD: /* Optimize for the common case */ /* FIXME: must check for remote_busy below */ - IRDA_DEBUG(4, __FUNCTION__ "(), event=%s nr=%d, vs=%d, ns=%d, " - "vr=%d, pf=%d\n", irlap_event[event], info->nr, + IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n", + __FUNCTION__, irlap_event[event], info->nr, self->vs, info->ns, self->vr, info->pf); self->retry_count = 0; @@ -2011,21 +2008,21 @@ /* Keep state */ irlap_next_state(self, LAP_NRM_S); } else { - IRDA_DEBUG(1, __FUNCTION__ - "(), invalid nr not implemented!\n"); + IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", + __FUNCTION__); } break; case RECV_SNRM_CMD: /* SNRM frame is not allowed to contain an I-field */ if (!info) { del_timer(&self->wd_timer); - IRDA_DEBUG(1, __FUNCTION__ "(), received SNRM cmd\n"); + IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __FUNCTION__); irlap_next_state(self, LAP_RESET_CHECK); irlap_reset_indication(self); } else { - IRDA_DEBUG(0, __FUNCTION__ - "(), SNRM frame contained an I-field!\n"); + IRDA_DEBUG(0, "%s(), SNRM frame contained an I-field!\n", + __FUNCTION__); } break; @@ -2057,7 +2054,7 @@ * which explain why we use (self->N2 / 2) here !!! * Jean II */ - IRDA_DEBUG(1, __FUNCTION__ "(), retry_count = %d\n", + IRDA_DEBUG(1, "%s(), retry_count = %d\n", __FUNCTION__, self->retry_count); if (self->retry_count < (self->N2 / 2)) { @@ -2110,7 +2107,7 @@ irlap_send_test_frame(self, self->caddr, info->daddr, skb); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n", + IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, event, irlap_event[event]); ret = -EINVAL; @@ -2130,7 +2127,7 @@ { int ret = 0; - IRDA_DEBUG(1, __FUNCTION__ "()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -ENODEV;); ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2168,7 +2165,7 @@ irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n", + IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, event, irlap_event[event]); ret = -EINVAL; @@ -2184,7 +2181,7 @@ { int ret = 0; - IRDA_DEBUG(1, __FUNCTION__ "(), event=%s\n", irlap_event[event]); + IRDA_DEBUG(1, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); ASSERT(self != NULL, return -ENODEV;); ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2205,7 +2202,7 @@ irlap_next_state(self, LAP_SCLOSE); break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n", + IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, event, irlap_event[event]); ret = -EINVAL; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlap_frame.c linux.22-ac2/net/irda/irlap_frame.c --- linux.vanilla/net/irda/irlap_frame.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/irlap_frame.c 2003-08-28 22:36:26.000000000 +0100 @@ -167,8 +167,8 @@ /* Check if the new connection address is valid */ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { - IRDA_DEBUG(3, __FUNCTION__ - "(), invalid connection address!\n"); + IRDA_DEBUG(3, "%s(), invalid connection address!\n", + __FUNCTION__); return; } @@ -178,7 +178,7 @@ /* Only accept if addressed directly to us */ if (info->saddr != self->saddr) { - IRDA_DEBUG(2, __FUNCTION__ "(), not addressed to us!\n"); + IRDA_DEBUG(2, "%s(), not addressed to us!\n", __FUNCTION__); return; } irlap_do_event(self, RECV_SNRM_CMD, skb, info); @@ -200,7 +200,7 @@ struct ua_frame *frame; int ret; - IRDA_DEBUG(2, __FUNCTION__ "() <%ld>\n", jiffies); + IRDA_DEBUG(2, "%s() <%ld>\n", __FUNCTION__, jiffies); ASSERT(self != NULL, return;); ASSERT(self->magic == LAP_MAGIC, return;); @@ -275,7 +275,7 @@ struct sk_buff *skb = NULL; __u8 *frame; - IRDA_DEBUG(3, __FUNCTION__ "()\n"); + IRDA_DEBUG(3, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LAP_MAGIC, return;); @@ -306,7 +306,7 @@ __u32 bcast = BROADCAST; __u8 *info; - IRDA_DEBUG(4, __FUNCTION__ "(), s=%d, S=%d, command=%d\n", s, S, + IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __FUNCTION__, s, S, command); ASSERT(self != NULL, return;); @@ -398,7 +398,7 @@ __u8 *discovery_info; char *text; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LAP_MAGIC, return;); @@ -410,13 +410,13 @@ /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, __FUNCTION__ - "(), frame is not addressed to us!\n"); + IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", + __FUNCTION__); return; } if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { - WARNING(__FUNCTION__ "(), kmalloc failed!\n"); + WARNING("%s(), kmalloc failed!\n", __FUNCTION__); return; } memset(discovery, 0, sizeof(discovery_t)); @@ -425,7 +425,7 @@ discovery->saddr = self->saddr; discovery->timestamp = jiffies; - IRDA_DEBUG(4, __FUNCTION__ "(), daddr=%08x\n", discovery->daddr); + IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, discovery->daddr); discovery_info = skb_pull(skb, sizeof(struct xid_frame)); @@ -476,8 +476,8 @@ /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, __FUNCTION__ - "(), frame is not addressed to us!\n"); + IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", + __FUNCTION__); return; } @@ -509,7 +509,7 @@ if (info->s == 0xff) { /* Check if things are sane at this point... */ if((discovery_info == NULL) || (skb->len < 3)) { - ERROR(__FUNCTION__ "(), discovery frame to short!\n"); + ERROR("%s(), discovery frame to short!\n", __FUNCTION__); return; } @@ -518,7 +518,7 @@ */ discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); if (!discovery) { - WARNING(__FUNCTION__ "(), unable to malloc!\n"); + WARNING("%s(), unable to malloc!\n", __FUNCTION__); return; } @@ -642,7 +642,7 @@ frame[2] = 0; - IRDA_DEBUG(4, __FUNCTION__ "(), vr=%d, %ld\n",self->vr, jiffies); + IRDA_DEBUG(4, "%s(), vr=%d, %ld\n", __FUNCTION__, self->vr, jiffies); irlap_queue_xmit(self, skb); } @@ -658,7 +658,7 @@ { info->nr = skb->data[1] >> 5; - IRDA_DEBUG(4, __FUNCTION__ "(), nr=%d, %ld\n", info->nr, jiffies); + IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __FUNCTION__, info->nr, jiffies); if (command) irlap_do_event(self, RECV_RNR_CMD, skb, info); @@ -669,7 +669,7 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); info->nr = skb->data[1] >> 5; @@ -683,7 +683,7 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); info->nr = skb->data[1] >> 5; @@ -697,7 +697,7 @@ static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* Check if this is a command or a response frame */ if (command) @@ -754,7 +754,7 @@ irlap_send_i_frame( self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, __FUNCTION__ "(), sending unreliable frame\n"); + IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); self->window -= 1; } @@ -803,7 +803,7 @@ irlap_send_i_frame(self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, __FUNCTION__ "(), sending unreliable frame\n"); + IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); @@ -952,7 +952,7 @@ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, __FUNCTION__ "(), unable to copy\n"); + IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); return; } /* Unlink tx_skb from list */ @@ -987,7 +987,7 @@ */ while (skb_queue_len( &self->txq) > 0) { - IRDA_DEBUG(0, __FUNCTION__ "(), sending additional frames!\n"); + IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); if ((skb_queue_len( &self->txq) > 0) && (self->window > 0)) { skb = skb_dequeue( &self->txq); @@ -1032,7 +1032,7 @@ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, __FUNCTION__ "(), unable to copy\n"); + IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); return; } /* Unlink tx_skb from list */ @@ -1058,7 +1058,7 @@ void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 caddr, int command) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LAP_MAGIC, return;); @@ -1118,7 +1118,7 @@ static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); info->pf = skb->data[1] & PF_BIT; /* Final bit */ @@ -1137,7 +1137,7 @@ __u8 *frame; int w, x, y, z; - IRDA_DEBUG(0, __FUNCTION__ "()\n"); + IRDA_DEBUG(0, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LAP_MAGIC, return;); @@ -1226,15 +1226,15 @@ { struct test_frame *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); frame = (struct test_frame *) skb->data; /* Broadcast frames must carry saddr and daddr fields */ if (info->caddr == CBROADCAST) { if (skb->len < sizeof(struct test_frame)) { - IRDA_DEBUG(0, __FUNCTION__ - "() test frame to short!\n"); + IRDA_DEBUG(0, "%s() test frame to short!\n", + __FUNCTION__); return; } @@ -1281,7 +1281,7 @@ /* Check if frame is large enough for parsing */ if (skb->len < 2) { - ERROR(__FUNCTION__ "(), frame to short!\n"); + ERROR("%s(), frame to short!\n", __FUNCTION__); dev_kfree_skb(skb); return -1; } @@ -1296,7 +1296,7 @@ /* First we check if this frame has a valid connection address */ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { - IRDA_DEBUG(0, __FUNCTION__ "(), wrong connection address!\n"); + IRDA_DEBUG(0, "%s(), wrong connection address!\n", __FUNCTION__); goto out; } /* @@ -1330,9 +1330,8 @@ irlap_recv_srej_frame(self, skb, &info, command); break; default: - WARNING(__FUNCTION__ - "() Unknown S-frame %02x received!\n", - info.control); + WARNING("%s() Unknown S-frame %02x received!\n", + __FUNCTION__, info.control); break; } goto out; @@ -1369,7 +1368,7 @@ irlap_recv_ui_frame(self, skb, &info); break; default: - WARNING(__FUNCTION__ "(), Unknown frame %02x received!\n", + WARNING("%s(), Unknown frame %02x received!\n", __FUNCTION__, info.control); break; } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlmp.c linux.22-ac2/net/irda/irlmp.c --- linux.vanilla/net/irda/irlmp.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/net/irda/irlmp.c 2003-08-28 22:36:51.000000000 +0100 @@ -1241,7 +1241,7 @@ /* Get the number of lsap. That's the only safe way to know * that we have looped around... - Jean II */ lsap_todo = HASHBIN_GET_SIZE(self->lsaps); - IRDA_DEBUG(4, __FUNCTION__ "() : %d lsaps to scan\n", lsap_todo); + IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __FUNCTION__, lsap_todo); /* Poll lsap in order until the queue is full or until we * tried them all. @@ -1255,7 +1255,7 @@ /* Note that if there is only one LSAP on the LAP * (most common case), self->flow_next is always NULL, * so we always avoid this loop. - Jean II */ - IRDA_DEBUG(4, __FUNCTION__ "() : searching my LSAP\n"); + IRDA_DEBUG(4, "%s() : searching my LSAP\n", __FUNCTION__); /* We look again in hashbins, because the lsap * might have gone away... - Jean II */ @@ -1274,14 +1274,14 @@ /* Next time, we will get the next one (or the first one) */ self->flow_next = (struct lsap_cb *) hashbin_get_next(self->lsaps); - IRDA_DEBUG(4, __FUNCTION__ "() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); + IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __FUNCTION__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); /* Inform lsap user that it can send one more packet. */ if (curr->notify.flow_indication != NULL) curr->notify.flow_indication(curr->notify.instance, curr, flow); else - IRDA_DEBUG(1, __FUNCTION__ "(), no handler\n"); + IRDA_DEBUG(1, "%s(), no handler\n", __FUNCTION__); } } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irlmp_frame.c linux.22-ac2/net/irda/irlmp_frame.c --- linux.vanilla/net/irda/irlmp_frame.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/irda/irlmp_frame.c 2003-08-28 22:36:51.000000000 +0100 @@ -45,7 +45,7 @@ skb->data[1] = slsap; if (expedited) { - IRDA_DEBUG(4, __FUNCTION__ "(), sending expedited data\n"); + IRDA_DEBUG(4, "%s(), sending expedited data\n", __FUNCTION__); irlap_data_request(self->irlap, skb, TRUE); } else irlap_data_request(self->irlap, skb, FALSE); @@ -61,7 +61,7 @@ { __u8 *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -96,7 +96,7 @@ __u8 dlsap_sel; /* Destination LSAP address */ __u8 *fp; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -116,8 +116,8 @@ * it in a different way than other established connections. */ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { - IRDA_DEBUG(3, __FUNCTION__ "(), incoming connection, " - "source LSAP=%d, dest LSAP=%d\n", + IRDA_DEBUG(3, "%s(), incoming connection, " + "source LSAP=%d, dest LSAP=%d\n", __FUNCTION__, slsap_sel, dlsap_sel); /* Try to find LSAP among the unconnected LSAPs */ @@ -126,7 +126,7 @@ /* Maybe LSAP was already connected, so try one more time */ if (!lsap) { - IRDA_DEBUG(1, __FUNCTION__ "(), incoming connection for LSAP already connected\n"); + IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __FUNCTION__); lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, self->lsaps); } @@ -136,14 +136,13 @@ if (lsap == NULL) { IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); - IRDA_DEBUG(2, __FUNCTION__ - "(), slsap_sel = %02x, dlsap_sel = %02x\n", slsap_sel, - dlsap_sel); + IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", + __FUNCTION__, slsap_sel, dlsap_sel); if (fp[0] & CONTROL_BIT) { - IRDA_DEBUG(2, __FUNCTION__ - "(), received control frame %02x\n", fp[2]); + IRDA_DEBUG(2, "%s(), received control frame %02x\n", + __FUNCTION__, fp[2]); } else { - IRDA_DEBUG(2, __FUNCTION__ "(), received data frame\n"); + IRDA_DEBUG(2, "%s(), received data frame\n", __FUNCTION__); } dev_kfree_skb(skb); return; @@ -162,8 +161,8 @@ irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb); break; case DISCONNECT: - IRDA_DEBUG(4, __FUNCTION__ - "(), Disconnect indication!\n"); + IRDA_DEBUG(4, "%s(), Disconnect indication!\n", + __FUNCTION__); irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, skb); break; @@ -176,8 +175,8 @@ dev_kfree_skb(skb); break; default: - IRDA_DEBUG(0, __FUNCTION__ - "(), Unknown control frame %02x\n", fp[2]); + IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", + __FUNCTION__, fp[2]); dev_kfree_skb(skb); break; } @@ -211,7 +210,7 @@ __u8 pid; /* Protocol identifier */ __u8 *fp; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -228,7 +227,7 @@ pid = fp[2]; if (pid & 0x80) { - IRDA_DEBUG(0, __FUNCTION__ "(), extension in PID not supp!\n"); + IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__); dev_kfree_skb(skb); return; @@ -236,7 +235,7 @@ /* Check if frame is addressed to the connectionless LSAP */ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { - IRDA_DEBUG(0, __FUNCTION__ "(), dropping frame!\n"); + IRDA_DEBUG(0, "%s(), dropping frame!\n", __FUNCTION__); dev_kfree_skb(skb); return; @@ -258,7 +257,7 @@ if (lsap) irlmp_connless_data_indication(lsap, skb); else { - IRDA_DEBUG(0, __FUNCTION__ "(), found no matching LSAP!\n"); + IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __FUNCTION__); dev_kfree_skb(skb); } } @@ -275,7 +274,7 @@ LAP_REASON reason, struct sk_buff *userdata) { - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(lap != NULL, return;); ASSERT(lap->magic == LMP_LAP_MAGIC, return;); @@ -303,7 +302,7 @@ __u32 daddr, struct qos_info *qos, struct sk_buff *skb) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* Copy QoS settings for this session */ self->qos = qos; @@ -324,7 +323,7 @@ void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, struct sk_buff *userdata) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -391,7 +390,7 @@ */ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) { - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == LMP_LAP_MAGIC, return;); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irnet/irnet.h linux.22-ac2/net/irda/irnet/irnet.h --- linux.vanilla/net/irda/irnet/irnet.h 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/net/irda/irnet/irnet.h 2003-09-01 13:54:21.000000000 +0100 @@ -322,29 +322,29 @@ * compiler will optimise away the if() in all cases. */ /* All error messages (will show up in the normal logs) */ -#define DERROR(dbg, args...) \ - {if(DEBUG_##dbg) \ - printk(KERN_INFO "irnet: " __FUNCTION__ "(): " args);} +#define DERROR(dbg, format, args...) \ + {if(DEBUG_##dbg) \ + printk(KERN_INFO "irnet: %s(): " format, __FUNCTION__ , ##args);} /* Normal debug message (will show up in /var/log/debug) */ -#define DEBUG(dbg, args...) \ - {if(DEBUG_##dbg) \ - printk(KERN_DEBUG "irnet: " __FUNCTION__ "(): " args);} +#define DEBUG(dbg, format, args...) \ + {if(DEBUG_##dbg) \ + printk(KERN_DEBUG "irnet: %s(): " format, __FUNCTION__ , ##args);} /* Entering a function (trace) */ -#define DENTER(dbg, args...) \ - {if(DEBUG_##dbg) \ - printk(KERN_DEBUG "irnet: ->" __FUNCTION__ args);} +#define DENTER(dbg, format, args...) \ + {if(DEBUG_##dbg) \ + printk(KERN_DEBUG "irnet: -> %s" format, __FUNCTION__ , ##args);} /* Entering and exiting a function in one go (trace) */ -#define DPASS(dbg, args...) \ - {if(DEBUG_##dbg) \ - printk(KERN_DEBUG "irnet: <>" __FUNCTION__ args);} +#define DPASS(dbg, format, args...) \ + {if(DEBUG_##dbg) \ + printk(KERN_DEBUG "irnet: <>%s" format, __FUNCTION__ , ##args);} /* Exiting a function (trace) */ -#define DEXIT(dbg, args...) \ - {if(DEBUG_##dbg) \ - printk(KERN_DEBUG "irnet: <-" __FUNCTION__ "()" args);} +#define DEXIT(dbg, format, args...) \ + {if(DEBUG_##dbg) \ + printk(KERN_DEBUG "irnet: <-%s()" format, __FUNCTION__ , ##args);} /* Exit a function with debug */ #define DRETURN(ret, dbg, args...) \ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irqueue.c linux.22-ac2/net/irda/irqueue.c --- linux.vanilla/net/irda/irqueue.c 2001-07-04 19:50:38.000000000 +0100 +++ linux.22-ac2/net/irda/irqueue.c 2003-08-28 22:37:44.000000000 +0100 @@ -154,7 +154,7 @@ unsigned long flags = 0; int bin; - IRDA_DEBUG( 4, __FUNCTION__"()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT( hashbin != NULL, return;); ASSERT( hashbin->magic == HB_MAGIC, return;); @@ -308,7 +308,7 @@ unsigned long flags = 0; irda_queue_t* entry; - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT( hashbin != NULL, return NULL;); ASSERT( hashbin->magic == HB_MAGIC, return NULL;); @@ -407,7 +407,7 @@ int bin; __u32 hashv; - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT( hashbin != NULL, return NULL;); ASSERT( hashbin->magic == HB_MAGIC, return NULL;); @@ -553,7 +553,7 @@ */ static void __enqueue_last( irda_queue_t **queue, irda_queue_t* element) { - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* * Check if queue is empty. @@ -596,7 +596,7 @@ void enqueue_first(irda_queue_t **queue, irda_queue_t* element) { - IRDA_DEBUG( 4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* * Check if queue is empty. diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/irttp.c linux.22-ac2/net/irda/irttp.c --- linux.vanilla/net/irda/irttp.c 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/net/irda/irttp.c 2003-08-28 22:37:44.000000000 +0100 @@ -94,7 +94,7 @@ irttp->tsaps = hashbin_new(HB_LOCAL); if (!irttp->tsaps) { - ERROR(__FUNCTION__ "(), can't allocate IrTTP hashbin!\n"); + ERROR("%s(), can't allocate IrTTP hashbin!\n", __FUNCTION__); return -ENOMEM; } @@ -165,7 +165,7 @@ if (!self || self->magic != TTP_TSAP_MAGIC) return; - IRDA_DEBUG(4, __FUNCTION__ "(instance=%p)\n", self); + IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); /* Try to make some progress, especially on Tx side - Jean II */ irttp_run_rx_queue(self); @@ -206,7 +206,7 @@ { struct sk_buff* skb; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == TTP_TSAP_MAGIC, return;); @@ -239,7 +239,7 @@ ASSERT(self != NULL, return NULL;); ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); - IRDA_DEBUG(2, __FUNCTION__ "(), self->rx_sdu_size=%d\n", + IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __FUNCTION__, self->rx_sdu_size); skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); @@ -262,9 +262,9 @@ dev_kfree_skb(frag); } - IRDA_DEBUG(2, __FUNCTION__ "(), frame len=%d\n", n); + IRDA_DEBUG(2, "%s(), frame len=%d\n", __FUNCTION__, n); - IRDA_DEBUG(2, __FUNCTION__ "(), rx_sdu_size=%d\n", self->rx_sdu_size); + IRDA_DEBUG(2, "%s(), rx_sdu_size=%d\n", __FUNCTION__, self->rx_sdu_size); ASSERT(n <= self->rx_sdu_size, return NULL;); /* Set the new length */ @@ -287,7 +287,7 @@ struct sk_buff *frag; __u8 *frame; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == TTP_TSAP_MAGIC, return;); @@ -297,7 +297,7 @@ * Split frame into a number of segments */ while (skb->len > self->max_seg_size) { - IRDA_DEBUG(2, __FUNCTION__ "(), fragmenting ...\n"); + IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); /* Make new segment */ frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); @@ -321,7 +321,7 @@ skb_queue_tail(&self->tx_queue, frag); } /* Queue what is left of the original skb */ - IRDA_DEBUG(2, __FUNCTION__ "(), queuing last segment\n"); + IRDA_DEBUG(2, "%s(), queuing last segment\n", __FUNCTION__); frame = skb_push(skb, TTP_HEADER); frame[0] = 0x00; /* Clear more bit */ @@ -352,7 +352,7 @@ else self->tx_max_sdu_size = param->pv.i; - IRDA_DEBUG(1, __FUNCTION__ "(), MaxSduSize=%d\n", param->pv.i); + IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __FUNCTION__, param->pv.i); return 0; } @@ -380,13 +380,13 @@ * JeanII */ if((stsap_sel != LSAP_ANY) && ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { - IRDA_DEBUG(0, __FUNCTION__ "(), invalid tsap!\n"); + IRDA_DEBUG(0, "%s(), invalid tsap!\n", __FUNCTION__); return NULL; } self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); if (self == NULL) { - IRDA_DEBUG(0, __FUNCTION__ "(), unable to kmalloc!\n"); + IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); return NULL; } memset(self, 0, sizeof(struct tsap_cb)); @@ -421,7 +421,7 @@ */ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); if (lsap == NULL) { - WARNING(__FUNCTION__ "(), unable to allocate LSAP!!\n"); + WARNING("%s(), unable to allocate LSAP!!\n", __FUNCTION__); return NULL; } @@ -431,7 +431,7 @@ * the stsap_sel we have might not be valid anymore */ self->stsap_sel = lsap->slsap_sel; - IRDA_DEBUG(4, __FUNCTION__ "(), stsap_sel=%02x\n", self->stsap_sel); + IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __FUNCTION__, self->stsap_sel); self->notify = *notify; self->lsap = lsap; @@ -488,7 +488,7 @@ { struct tsap_cb *tsap; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return -1;); ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); @@ -497,7 +497,7 @@ if (self->connected) { /* Check if disconnect is not pending */ if (!test_bit(0, &self->disconnect_pend)) { - WARNING(__FUNCTION__ "(), TSAP still connected!\n"); + WARNING("%s(), TSAP still connected!\n", __FUNCTION__); irttp_disconnect_request(self, NULL, P_NORMAL); } self->close_pend = TRUE; @@ -533,16 +533,16 @@ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); /* Check that nothing bad happens */ if ((skb->len == 0) || (!self->connected)) { - IRDA_DEBUG(1, __FUNCTION__ "(), No data, or not connected\n"); + IRDA_DEBUG(1, "%s(), No data, or not connected\n", __FUNCTION__); return -1; } if (skb->len > self->max_seg_size) { - IRDA_DEBUG(1, __FUNCTION__ "(), UData is to large for IrLAP!\n"); + IRDA_DEBUG(1, "%s(), UData is to large for IrLAP!\n", __FUNCTION__); return -1; } @@ -566,12 +566,12 @@ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, __FUNCTION__ " : queue len = %d\n", + IRDA_DEBUG(2, "%s : queue len = %d\n", __FUNCTION__, skb_queue_len(&self->tx_queue)); /* Check that nothing bad happens */ if ((skb->len == 0) || (!self->connected)) { - WARNING(__FUNCTION__ "(), No data, or not connected\n"); + WARNING("%s(), No data, or not connected\n", __FUNCTION__); return -ENOTCONN; } @@ -580,8 +580,8 @@ * inside an IrLAP frame */ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { - ERROR(__FUNCTION__ - "(), SAR disabled, and data is to large for IrLAP!\n"); + ERROR("%s(), SAR disabled, and data is to large for IrLAP!\n", + __FUNCTION__); return -EMSGSIZE; } @@ -593,8 +593,8 @@ (self->tx_max_sdu_size != TTP_SAR_UNBOUND) && (skb->len > self->tx_max_sdu_size)) { - ERROR(__FUNCTION__ "(), SAR enabled, " - "but data is larger than TxMaxSduSize!\n"); + ERROR("%s(), SAR enabled, but data is larger " + "than TxMaxSduSize!\n", __FUNCTION__); return -EMSGSIZE; } /* @@ -665,7 +665,7 @@ unsigned long flags; int n; - IRDA_DEBUG(2, __FUNCTION__ "() : send_credit = %d, queue_len = %d\n", + IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", __FUNCTION__, self->send_credit, skb_queue_len(&self->tx_queue)); /* Get exclusive access to the tx queue, otherwise don't touch it */ @@ -773,7 +773,7 @@ ASSERT(self != NULL, return;); ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n", + IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, self->send_credit, self->avail_credit, self->remote_credit); /* Give credit to peer */ @@ -821,7 +821,7 @@ { struct tsap_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct tsap_cb *) instance; @@ -933,7 +933,7 @@ { struct tsap_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct tsap_cb *) instance; @@ -947,7 +947,7 @@ self->notify.status_indication(self->notify.instance, link, lock); else - IRDA_DEBUG(2, __FUNCTION__ "(), no handler\n"); + IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__); } /* @@ -965,7 +965,7 @@ ASSERT(self != NULL, return;); ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, __FUNCTION__ "(instance=%p)\n", self); + IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); /* We are "polled" directly from LAP, and the LAP want to fill * its Tx window. We want to do our best to send it data, so that @@ -1003,18 +1003,18 @@ */ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(1, __FUNCTION__ "()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); ASSERT(self != NULL, return;); ASSERT(self->magic == TTP_TSAP_MAGIC, return;); switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, __FUNCTION__ "(), flow stop\n"); + IRDA_DEBUG(1, "%s(), flow stop\n", __FUNCTION__); self->rx_sdu_busy = TRUE; break; case FLOW_START: - IRDA_DEBUG(1, __FUNCTION__ "(), flow start\n"); + IRDA_DEBUG(1, "%s(), flow start\n", __FUNCTION__); self->rx_sdu_busy = FALSE; /* Client say he can accept more data, try to free our @@ -1023,7 +1023,7 @@ break; default: - IRDA_DEBUG(1, __FUNCTION__ "(), Unknown flow command!\n"); + IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __FUNCTION__); } } @@ -1042,7 +1042,7 @@ __u8 *frame; __u8 n; - IRDA_DEBUG(4, __FUNCTION__ "(), max_sdu_size=%d\n", max_sdu_size); + IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __FUNCTION__, max_sdu_size); ASSERT(self != NULL, return -EBADR;); ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); @@ -1134,7 +1134,7 @@ __u8 plen; __u8 n; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct tsap_cb *) instance; @@ -1158,7 +1158,7 @@ n = skb->data[0] & 0x7f; - IRDA_DEBUG(4, __FUNCTION__ "(), Initial send_credit=%d\n", n); + IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __FUNCTION__, n); self->send_credit = n; self->tx_max_sdu_size = 0; @@ -1178,8 +1178,8 @@ /* Any errors in the parameter list? */ if (ret < 0) { - WARNING(__FUNCTION__ - "(), error extracting parameters\n"); + WARNING("%s(), error extracting parameters\n", + __FUNCTION__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1189,10 +1189,10 @@ skb_pull(skb, IRDA_MIN(skb->len, plen+1)); } - IRDA_DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n", + IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, self->send_credit, self->avail_credit, self->remote_credit); - IRDA_DEBUG(2, __FUNCTION__ "(), MaxSduSize=%d\n", self->tx_max_sdu_size); + IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __FUNCTION__, self->tx_max_sdu_size); if (self->notify.connect_confirm) { self->notify.connect_confirm(self->notify.instance, self, qos, @@ -1229,7 +1229,7 @@ self->max_seg_size = max_seg_size - TTP_HEADER;; self->max_header_size = max_header_size+TTP_HEADER; - IRDA_DEBUG(4, __FUNCTION__ "(), TSAP sel=%02x\n", self->stsap_sel); + IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __FUNCTION__, self->stsap_sel); /* Need to update dtsap_sel if its equal to LSAP_ANY */ self->dtsap_sel = lsap->dlsap_sel; @@ -1253,8 +1253,8 @@ /* Any errors in the parameter list? */ if (ret < 0) { - WARNING(__FUNCTION__ - "(), error extracting parameters\n"); + WARNING("%s(), error extracting parameters\n", + __FUNCTION__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1291,7 +1291,7 @@ ASSERT(self != NULL, return -1;); ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); - IRDA_DEBUG(4, __FUNCTION__ "(), Source TSAP selector=%02x\n", + IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __FUNCTION__, self->stsap_sel); /* Any userdata supplied? */ @@ -1369,15 +1369,15 @@ { struct tsap_cb *new; - IRDA_DEBUG(1, __FUNCTION__ "()\n"); + IRDA_DEBUG(1, "%s()\n", __FUNCTION__); if (!hashbin_find(irttp->tsaps, (int) orig, NULL)) { - IRDA_DEBUG(0, __FUNCTION__ "(), unable to find TSAP\n"); + IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __FUNCTION__); return NULL; } new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { - IRDA_DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n"); + IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); return NULL; } /* Dup */ @@ -1415,7 +1415,7 @@ /* Already disconnected? */ if (!self->connected) { - IRDA_DEBUG(4, __FUNCTION__ "(), already disconnected!\n"); + IRDA_DEBUG(4, "%s(), already disconnected!\n", __FUNCTION__); if (userdata) dev_kfree_skb(userdata); return -1; @@ -1427,7 +1427,7 @@ * for following a disconnect_indication() (i.e. net_bh). * Jean II */ if(test_and_set_bit(0, &self->disconnect_pend)) { - IRDA_DEBUG(0, __FUNCTION__ "(), disconnect already pending\n"); + IRDA_DEBUG(0, "%s(), disconnect already pending\n", __FUNCTION__); if (userdata) dev_kfree_skb(userdata); @@ -1446,7 +1446,7 @@ * disconnecting right now since the data will * not have any usable connection to be sent on */ - IRDA_DEBUG(1, __FUNCTION__ "High priority!!()\n" ); + IRDA_DEBUG(1, "%s(): High priority!!()\n", __FUNCTION__); irttp_flush_queues(self); } else if (priority == P_NORMAL) { /* @@ -1467,7 +1467,7 @@ * be sent at the LMP level (so even if the peer has its Tx queue * full of data). - Jean II */ - IRDA_DEBUG(1, __FUNCTION__ "(), Disconnecting ...\n"); + IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __FUNCTION__); self->connected = FALSE; if (!userdata) { @@ -1501,7 +1501,7 @@ { struct tsap_cb *self; - IRDA_DEBUG(4, __FUNCTION__ "()\n"); + IRDA_DEBUG(4, "%s()\n", __FUNCTION__); self = (struct tsap_cb *) instance; @@ -1561,7 +1561,7 @@ * give an error back */ if (err == -ENOMEM) { - IRDA_DEBUG(0, __FUNCTION__ "() requeueing skb!\n"); + IRDA_DEBUG(0, "%s() requeueing skb!\n", __FUNCTION__); /* Make sure we take a break */ self->rx_sdu_busy = TRUE; @@ -1586,7 +1586,7 @@ struct sk_buff *skb; int more = 0; - IRDA_DEBUG(2, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n", + IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, self->send_credit, self->avail_credit, self->remote_credit); /* Get exclusive access to the rx queue, otherwise don't touch it */ @@ -1626,7 +1626,7 @@ * limits of the maximum size of the rx_sdu */ if (self->rx_sdu_size <= self->rx_max_sdu_size) { - IRDA_DEBUG(4, __FUNCTION__ "(), queueing frag\n"); + IRDA_DEBUG(4, "%s(), queueing frag\n", __FUNCTION__); skb_queue_tail(&self->rx_fragments, skb); } else { /* Free the part of the SDU that is too big */ @@ -1656,7 +1656,7 @@ /* Now we can deliver the reassembled skb */ irttp_do_data_indication(self, skb); } else { - IRDA_DEBUG(1, __FUNCTION__ "(), Truncated frame\n"); + IRDA_DEBUG(1, "%s(), Truncated frame\n", __FUNCTION__); /* Free the part of the SDU that is too big */ dev_kfree_skb(skb); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/parameters.c linux.22-ac2/net/irda/parameters.c --- linux.vanilla/net/irda/parameters.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/parameters.c 2003-08-28 22:37:44.000000000 +0100 @@ -150,22 +150,22 @@ */ if (p.pl == 0) { if (p.pv.i < 0xff) { - IRDA_DEBUG(2, __FUNCTION__ "(), using 1 byte\n"); + IRDA_DEBUG(2, "%s(), using 1 byte\n", __FUNCTION__); p.pl = 1; } else if (p.pv.i < 0xffff) { - IRDA_DEBUG(2, __FUNCTION__ "(), using 2 bytes\n"); + IRDA_DEBUG(2, "%s(), using 2 bytes\n", __FUNCTION__); p.pl = 2; } else { - IRDA_DEBUG(2, __FUNCTION__ "(), using 4 bytes\n"); + IRDA_DEBUG(2, "%s(), using 4 bytes\n", __FUNCTION__); p.pl = 4; /* Default length */ } } /* Check if buffer is long enough for insertion */ if (len < (2+p.pl)) { - WARNING(__FUNCTION__ "(), buffer to short for insertion!\n"); + WARNING("%s(), buffer to short for insertion!\n", __FUNCTION__); return -1; } - IRDA_DEBUG(2, __FUNCTION__ "(), pi=%#x, pl=%d, pi=%d\n", p.pi, p.pl, p.pv.i); + IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, p.pi, p.pl, p.pv.i); switch (p.pl) { case 1: n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i); @@ -186,7 +186,7 @@ break; default: - WARNING(__FUNCTION__ "() length %d not supported\n", p.pl); + WARNING("%s() length %d not supported\n", __FUNCTION__, p.pl); /* Skip parameter */ return -1; } @@ -215,8 +215,8 @@ /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - WARNING(__FUNCTION__ "(), buffer to short for parsing! " - "Need %d bytes, but len is only %d\n", p.pl, len); + WARNING("%s(), buffer to short for parsing! Need %d bytes, " + "but len is only %d\n", __FUNCTION__, p.pl, len); return -1; } @@ -226,8 +226,8 @@ * PV_INTEGER means that the handler is flexible. */ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { - ERROR(__FUNCTION__ "(), invalid parameter length! " - "Expected %d bytes, but value had %d bytes!\n", + ERROR("%s(), invalid parameter length! Expected %d bytes, " + "but value had %d bytes!\n", __FUNCTION__, type & PV_MASK, p.pl); /* Most parameters are bit/byte fields or little endian, @@ -265,13 +265,13 @@ le32_to_cpus(&p.pv.i); break; default: - WARNING(__FUNCTION__ "() length %d not supported\n", p.pl); + WARNING("%s() length %d not supported\n", __FUNCTION__, p.pl); /* Skip parameter */ return p.pl+2; } - IRDA_DEBUG(2, __FUNCTION__ "(), pi=%#x, pl=%d, pi=%d\n", p.pi, p.pl, p.pv.i); + IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, p.pi, p.pl, p.pv.i); /* Call handler for this parameter */ err = (*func)(self, &p, PV_PUT); if (err < 0) @@ -293,17 +293,17 @@ irda_param_t p; int err; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract lenght of value */ - IRDA_DEBUG(2, __FUNCTION__ "(), pi=%#x, pl=%d\n", p.pi, p.pl); + IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __FUNCTION__, p.pi, p.pl); /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - WARNING(__FUNCTION__ "(), buffer to short for parsing! " - "Need %d bytes, but len is only %d\n", p.pl, len); + WARNING("%s(), buffer to short for parsing! Need %d bytes, " + "but len is only %d\n", __FUNCTION__, p.pl, len); return -1; } @@ -311,7 +311,7 @@ * checked that the buffer is long enough */ strncpy(str, buf+2, p.pl); - IRDA_DEBUG(2, __FUNCTION__ "(), str=0x%02x 0x%02x\n", (__u8) str[0], + IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __FUNCTION__, (__u8) str[0], (__u8) str[1]); /* Null terminate string */ @@ -343,12 +343,12 @@ /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - WARNING(__FUNCTION__ "(), buffer to short for parsing! " - "Need %d bytes, but len is only %d\n", p.pl, len); + WARNING("%s(), buffer to short for parsing! Need %d bytes, " + "but len is only %d\n", __FUNCTION__, p.pl, len); return -1; } - IRDA_DEBUG(0, __FUNCTION__ "(), not impl\n"); + IRDA_DEBUG(0, "%s(), not impl\n", __FUNCTION__); return p.pl+2; /* Extracted pl+2 bytes */ } @@ -474,8 +474,8 @@ if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, __FUNCTION__ - "(), no handler for parameter=0x%02x\n", pi); + IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", + __FUNCTION__, pi); /* Skip this parameter */ return -1; @@ -489,7 +489,7 @@ /* Check if handler has been implemented */ if (!pi_minor_info->func) { - MESSAGE(__FUNCTION__"(), no handler for pi=%#x\n", pi); + MESSAGE("%s(), no handler for pi=%#x\n", __FUNCTION__, pi); /* Skip this parameter */ return -1; } @@ -526,8 +526,8 @@ if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, __FUNCTION__ "(), no handler for parameter=0x%02x\n", - buf[0]); + IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", + __FUNCTION__, buf[0]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ @@ -539,12 +539,12 @@ /* Find expected data type for this parameter identifier (pi)*/ type = pi_minor_info->type; - IRDA_DEBUG(3, __FUNCTION__ "(), pi=[%d,%d], type=%d\n", + IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __FUNCTION__, pi_major, pi_minor, type); /* Check if handler has been implemented */ if (!pi_minor_info->func) { - MESSAGE(__FUNCTION__"(), no handler for pi=%#x\n", buf[n]); + MESSAGE("%s(), no handler for pi=%#x\n", __FUNCTION__, buf[n]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/qos.c linux.22-ac2/net/irda/qos.c --- linux.vanilla/net/irda/qos.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/irda/qos.c 2003-08-28 22:37:44.000000000 +0100 @@ -348,7 +348,7 @@ __u32 line_capacity; int index; - IRDA_DEBUG(2, __FUNCTION__ "()\n"); + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); /* * Make sure the mintt is sensible. @@ -374,9 +374,8 @@ if ((qos->baud_rate.value < 115200) && (qos->max_turn_time.value < 500)) { - IRDA_DEBUG(0, __FUNCTION__ - "(), adjusting max turn time from %d to 500 ms\n", - qos->max_turn_time.value); + IRDA_DEBUG(0, "%s(), adjusting max turn time from %d to 500 ms\n", + __FUNCTION__, qos->max_turn_time.value); qos->max_turn_time.value = 500; } @@ -391,8 +390,7 @@ #ifdef CONFIG_IRDA_DYNAMIC_WINDOW while ((qos->data_size.value > line_capacity) && (index > 0)) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, __FUNCTION__ - "(), reducing data size to %d\n", + IRDA_DEBUG(2, "%s(), reducing data size to %d\n", __FUNCTION__, qos->data_size.value); } #else /* Use method described in section 6.6.11 of IrLAP */ @@ -402,16 +400,14 @@ /* Must be able to send at least one frame */ if (qos->window_size.value > 1) { qos->window_size.value--; - IRDA_DEBUG(2, __FUNCTION__ - "(), reducing window size to %d\n", - qos->window_size.value); + IRDA_DEBUG(2, "%s(), reducing window size to %d\n", + __FUNCTION__, qos->window_size.value); } else if (index > 1) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, __FUNCTION__ - "(), reducing data size to %d\n", - qos->data_size.value); + IRDA_DEBUG(2, "%s(), reducing data size to %d\n", + __FUNCTION__, qos->data_size.value); } else { - WARNING(__FUNCTION__ "(), nothing more we can do!\n"); + WARNING("%s(), nothing more we can do!\n", __FUNCTION__); } } #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ @@ -545,7 +541,7 @@ if (get) { param->pv.i = self->qos_rx.baud_rate.bits; - IRDA_DEBUG(2, __FUNCTION__ "(), baud rate = 0x%02x\n", + IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", __FUNCTION__, param->pv.i); } else { /* @@ -718,7 +714,7 @@ __u32 line_capacity; int i,j; - IRDA_DEBUG(2, __FUNCTION__ "(), speed=%d, max_turn_time=%d\n", + IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", __FUNCTION__, speed, max_turn_time); i = value_index(speed, baud_rates, 10); @@ -729,7 +725,7 @@ line_capacity = max_line_capacities[i][j]; - IRDA_DEBUG(2, __FUNCTION__ "(), line capacity=%d bytes\n", + IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", __FUNCTION__, line_capacity); return line_capacity; @@ -743,7 +739,7 @@ irlap_min_turn_time_in_bytes(qos->baud_rate.value, qos->min_turn_time.value); - IRDA_DEBUG(2, __FUNCTION__ "(), requested line capacity=%d\n", + IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", __FUNCTION__, line_capacity); return line_capacity; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/irda/wrapper.c linux.22-ac2/net/irda/wrapper.c --- linux.vanilla/net/irda/wrapper.c 2001-05-01 00:26:09.000000000 +0100 +++ linux.22-ac2/net/irda/wrapper.c 2003-08-28 22:37:44.000000000 +0100 @@ -93,16 +93,16 @@ * Nothing to worry about, but we set the default number of * BOF's */ - IRDA_DEBUG(1, __FUNCTION__ "(), wrong magic in skb!\n"); + IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __FUNCTION__); xbofs = 10; } else xbofs = cb->xbofs + cb->xbofs_delay; - IRDA_DEBUG(4, __FUNCTION__ "(), xbofs=%d\n", xbofs); + IRDA_DEBUG(4, "%s(), xbofs=%d\n", __FUNCTION__, xbofs); /* Check that we never use more than 115 + 48 xbofs */ if (xbofs > 163) { - IRDA_DEBUG(0, __FUNCTION__ "(), too many xbofs (%d)\n", xbofs); + IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __FUNCTION__, xbofs); xbofs = 163; } @@ -265,7 +265,7 @@ case EOF: /* Abort frame */ rx_buff->state = OUTSIDE_FRAME; - IRDA_DEBUG(1, __FUNCTION__ "(), abort frame\n"); + IRDA_DEBUG(1, "%s(), abort frame\n", __FUNCTION__); stats->rx_errors++; stats->rx_frame_errors++; break; @@ -289,13 +289,13 @@ { switch (byte) { case BOF: /* New frame? */ - IRDA_DEBUG(1, __FUNCTION__ - "(), Discarding incomplete frame\n"); + IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", + __FUNCTION__); rx_buff->state = BEGIN_FRAME; irda_device_set_media_busy(dev, TRUE); break; case CE: - WARNING(__FUNCTION__ "(), state not defined\n"); + WARNING("%s(), state not defined\n", __FUNCTION__); break; case EOF: /* Abort frame */ rx_buff->state = OUTSIDE_FRAME; @@ -311,7 +311,7 @@ rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); rx_buff->state = INSIDE_FRAME; } else { - IRDA_DEBUG(1, __FUNCTION__ "(), rx buffer overflow\n"); + IRDA_DEBUG(1, "%s(), rx buffer overflow\n", __FUNCTION__); rx_buff->state = OUTSIDE_FRAME; } break; @@ -332,8 +332,7 @@ switch (byte) { case BOF: /* New frame? */ - IRDA_DEBUG(1, __FUNCTION__ - "(), Discarding incomplete frame\n"); + IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", __FUNCTION__); rx_buff->state = BEGIN_FRAME; irda_device_set_media_busy(dev, TRUE); break; @@ -354,7 +353,7 @@ /* Wrong CRC, discard frame! */ irda_device_set_media_busy(dev, TRUE); - IRDA_DEBUG(1, __FUNCTION__ "(), crc error\n"); + IRDA_DEBUG(1, "%s(), crc error\n", __FUNCTION__); stats->rx_errors++; stats->rx_crc_errors++; } @@ -364,8 +363,8 @@ rx_buff->data[rx_buff->len++] = byte; rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); } else { - IRDA_DEBUG(1, __FUNCTION__ - "(), Rx buffer overflow, aborting\n"); + IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", + __FUNCTION__); rx_buff->state = OUTSIDE_FRAME; } break; diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/Makefile linux.22-ac2/net/Makefile --- linux.vanilla/net/Makefile 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/net/Makefile 2003-06-29 16:09:45.000000000 +0100 @@ -7,7 +7,7 @@ O_TARGET := network.o -mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core +mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core edp2 export-objs := netsyms.o subdir-y := core ethernet @@ -44,8 +44,8 @@ subdir-$(CONFIG_ATM) += atm subdir-$(CONFIG_DECNET) += decnet subdir-$(CONFIG_ECONET) += econet -subdir-$(CONFIG_VLAN_8021Q) += 8021q - +subdir-$(CONFIG_VLAN_8021Q) += 8021q +subdir-$(CONFIG_EDP2) += edp2 obj-y := socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y)))) ifeq ($(CONFIG_NET),y) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/netsyms.c linux.22-ac2/net/netsyms.c --- linux.vanilla/net/netsyms.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/netsyms.c 2003-07-22 18:34:39.000000000 +0100 @@ -85,7 +85,7 @@ extern void destroy_8023_client(struct datalink_proto *); #endif -#ifdef CONFIG_ATALK_MODULE +#if defined(CONFIG_ATALK_MODULE) || defined(CONFIG_FILTER) #include #endif @@ -96,7 +96,6 @@ /* Skbuff symbols. */ EXPORT_SYMBOL(skb_over_panic); EXPORT_SYMBOL(skb_under_panic); -EXPORT_SYMBOL(skb_pad); /* Socket layer registration */ EXPORT_SYMBOL(sock_register); @@ -473,6 +472,7 @@ #endif /* CONFIG_INET */ #ifdef CONFIG_TR +EXPORT_SYMBOL(tr_source_route); EXPORT_SYMBOL(tr_type_trans); #endif @@ -493,6 +493,7 @@ EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name); +EXPORT_SYMBOL(dev_getbyhwaddr); EXPORT_SYMBOL(netdev_finish_unregister); EXPORT_SYMBOL(netdev_set_master); EXPORT_SYMBOL(eth_type_trans); @@ -506,6 +507,7 @@ EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(skb_clone); EXPORT_SYMBOL(skb_copy); +EXPORT_SYMBOL(skb_pad); EXPORT_SYMBOL(netif_rx); EXPORT_SYMBOL(netif_receive_skb); EXPORT_SYMBOL(dev_add_pack); diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/sunrpc/svcsock.c linux.22-ac2/net/sunrpc/svcsock.c --- linux.vanilla/net/sunrpc/svcsock.c 2003-08-28 16:45:47.000000000 +0100 +++ linux.22-ac2/net/sunrpc/svcsock.c 2003-07-30 21:19:42.000000000 +0100 @@ -826,7 +826,7 @@ goto error; svsk->sk_tcplen += len; if (len < want) { - dprintk("svc: short recvfrom while reading record length (%d of %ld)\n", + dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", len, want); svc_sock_received(svsk); return -EAGAIN; /* record header not complete */ diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/sunrpc/timer.c linux.22-ac2/net/sunrpc/timer.c --- linux.vanilla/net/sunrpc/timer.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/sunrpc/timer.c 2003-06-29 16:09:47.000000000 +0100 @@ -8,7 +8,7 @@ #define RPC_RTO_MAX (60*HZ) #define RPC_RTO_INIT (HZ/5) -#define RPC_RTO_MIN (2) +#define RPC_RTO_MIN (HZ/30) void rpc_init_rtt(struct rpc_rtt *rt, long timeo) diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/net/unix/af_unix.c linux.22-ac2/net/unix/af_unix.c --- linux.vanilla/net/unix/af_unix.c 2002-11-29 21:27:26.000000000 +0000 +++ linux.22-ac2/net/unix/af_unix.c 2003-06-29 16:09:45.000000000 +0100 @@ -1707,6 +1707,39 @@ return err; } +static unsigned int dgram_poll(struct file * file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + unsigned int mask; + unix_socket *other; + + poll_wait(file, sk->sleep, wait); + mask = 0; + + /* exceptional events? */ + if (sk->err || !skb_queue_empty(&sk->error_queue)) + mask |= POLLERR; + if (sk->shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + /* readable? */ + if (!skb_queue_empty(&sk->receive_queue) || + (sk->shutdown & RCV_SHUTDOWN)) + mask |= POLLIN | POLLRDNORM; + + /* writable? */ + other = unix_peer_get(sk); + if (sock_writeable(sk) && + (other == NULL || + skb_queue_len(&other->receive_queue) <= other->max_ack_backlog)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + else + set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags); + + return mask; +} + static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; @@ -1836,7 +1869,7 @@ socketpair: unix_socketpair, accept: sock_no_accept, getname: unix_getname, - poll: datagram_poll, + poll: dgram_poll, ioctl: unix_ioctl, listen: sock_no_listen, shutdown: unix_shutdown, diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/Rules.make linux.22-ac2/Rules.make --- linux.vanilla/Rules.make 2002-08-03 16:08:19.000000000 +0100 +++ linux.22-ac2/Rules.make 2003-06-29 16:09:45.000000000 +0100 @@ -291,10 +291,6 @@ include .depend endif -ifneq ($(wildcard $(TOPDIR)/.hdepend),) -include $(TOPDIR)/.hdepend -endif - # # Find files whose flags have changed and force recompilation. # For safety, this works in the converse direction: diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/scripts/Configure linux.22-ac2/scripts/Configure --- linux.vanilla/scripts/Configure 2003-06-14 00:11:43.000000000 +0100 +++ linux.22-ac2/scripts/Configure 2003-07-07 16:06:31.000000000 +0100 @@ -350,7 +350,7 @@ def=${old:-$3} while :; do readln "$1 ($2) [$def] " "$def" "$old" - if expr "$ans" : '[0-9]*$' > /dev/null; then + if expr "$ans" : '[+-]\?[0-9]*$' > /dev/null; then define_int "$2" "$ans" break else diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/scripts/include_deps linux.22-ac2/scripts/include_deps --- linux.vanilla/scripts/include_deps 1970-01-01 01:00:00.000000000 +0100 +++ linux.22-ac2/scripts/include_deps 2003-06-29 16:10:44.000000000 +0100 @@ -0,0 +1,15 @@ +# Read the .depend files, extract the dependencies for .h targets, convert +# relative names to absolute and write the result to stdout. It is part of +# building the global .h dependency graph for kbuild 2.4. KAO + +/^[^ ]/ { copy = 0; fn = "/error/"; } +/^[^ ][^ ]*\.h:/ { copy = 1; fn = FILENAME; sub(/\.depend/, "", fn); } +!copy { next; } + { + indent = $0; sub(/[^ ].*/, "", indent); + if ($1 != "" && $1 !~ /^[@$\/\\]/) { $1 = fn $1 }; + if ($2 != "" && $2 !~ /^[@$\/\\]/) { $2 = fn $2 }; + $1 = $1; # ensure $0 is rebuilt + $0 = indent $0; + print; + } diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/scripts/mkdep.c linux.22-ac2/scripts/mkdep.c --- linux.vanilla/scripts/mkdep.c 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/scripts/mkdep.c 2003-06-29 16:10:44.000000000 +0100 @@ -48,6 +48,8 @@ char __depname[512] = "\n\t@touch "; #define depname (__depname+9) int hasdep; +char cwd[PATH_MAX]; +int lcwd; struct path_struct { int len; @@ -202,8 +204,28 @@ memcpy(path->buffer+path->len, name, len); path->buffer[path->len+len] = '\0'; if (access(path->buffer, F_OK) == 0) { + int l = lcwd + strlen(path->buffer); + int need_wildcard = 0; + char name2[l+2], *p; + if (path->buffer[0] == '/') { + memcpy(name2, path->buffer, l+1); + } + else { + need_wildcard = 1; + memcpy(name2, cwd, lcwd); + name2[lcwd] = '/'; + memcpy(name2+lcwd+1, path->buffer, path->len+len+1); + } + while ((p = strstr(name2, "/../"))) { + *p = '\0'; + strcpy(strrchr(name2, '/'), p+3); + } do_depname(); - printf(" \\\n %s", path->buffer); + if (need_wildcard) { + printf(" \\\n $(wildcard %s)", name2); + } else { + printf(" \\\n %s", name2); + } return; } } @@ -585,6 +607,12 @@ return 1; } + if (!getcwd(cwd, sizeof(cwd))) { + fprintf(stderr, "mkdep: getcwd() failed %m\n"); + return 1; + } + lcwd = strlen(cwd); + add_path("."); /* for #include "..." */ while (++argv, --argc > 0) { diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.vanilla/scripts/tkgen.c linux.22-ac2/scripts/tkgen.c --- linux.vanilla/scripts/tkgen.c 2002-08-03 16:08:33.000000000 +0100 +++ linux.22-ac2/scripts/tkgen.c 2003-06-29 16:10:44.000000000 +0100 @@ -625,6 +625,7 @@ if ( ! vartable[i].global_written ) { global( vartable[i].name ); + vartable[i].global_written = 1; } printf( "\t" ); } @@ -699,6 +700,19 @@ } /* + * Generate global declarations for the dependency chain (e.g. CONSTANT_M). + */ + for ( tmp = cfg->depend; tmp; tmp = tmp->next ) + { + int i = get_varnum( tmp->name ); + if ( ! vartable[i].global_written ) + { + global( vartable[i].name ); + vartable[i].global_written = 1; + } + } + + /* * Generate indentation. */ printf( "\t" );